From ef98add0c5afdf29a065c99bbf8afaf129e872d0 Mon Sep 17 00:00:00 2001 From: David Gwilliam Date: Fri, 20 Mar 2026 04:41:23 -0700 Subject: [PATCH] feat(integration): Complete feature rewrite with pipeline architecture, effects system, and display improvements Major changes: - Pipeline architecture with capability-based dependency resolution - Effects plugin system with performance monitoring - Display abstraction with multiple backends (terminal, null, websocket) - Camera system for viewport scrolling - Sensor framework for real-time input - Command-and-control system via ntfy - WebSocket display backend for browser clients - Comprehensive test suite and documentation Issue #48: ADR for preset scripting language included This commit consolidates 110 individual commits into a single feature integration that can be reviewed and tested before further refinement. --- .gitignore | 5 +- .../skills/mainline-architecture/SKILL.md | 97 + .opencode/skills/mainline-display/SKILL.md | 163 ++ .opencode/skills/mainline-effects/SKILL.md | 113 + .opencode/skills/mainline-presets/SKILL.md | 103 + .opencode/skills/mainline-sensors/SKILL.md | 136 ++ .opencode/skills/mainline-sources/SKILL.md | 87 + AGENTS.md | 463 ++++- README.md | 160 +- TODO.md | 27 + client/editor.html | 313 +++ client/index.html | 369 ++++ cmdline.py | 6 + docs/ARCHITECTURE.md | 153 ++ ...Renderer + ntfy Message Queue for ESP32.md | 0 docs/PIPELINE.md | 223 ++ .../Refactor mainline.md | 79 +- .../klubhaus-doorbell-hardware.md | 0 .../adr-preset-scripting-language.md | 217 ++ .../2026-03-16-color-scheme-implementation.md | 894 -------- .../plans/2026-03-19-figment-mode.md | 1110 ---------- .../specs/2026-03-15-readme-update-design.md | 145 -- .../specs/2026-03-16-code-scroll-design.md | 154 -- .../specs/2026-03-16-color-scheme-design.md | 299 --- .../specs/2026-03-19-figment-mode-design.md | 308 --- effects_plugins/figment.py | 200 -- effects_plugins/glitch.py | 37 - engine/__init__.py | 9 + engine/app.py | 441 +--- engine/app/__init__.py | 34 + engine/app/main.py | 457 ++++ engine/app/pipeline_runner.py | 852 ++++++++ engine/benchmark.py | 73 + engine/camera.py | 473 +++++ engine/canvas.py | 186 ++ engine/config.py | 66 +- engine/controller.py | 68 - engine/data_sources/__init__.py | 12 + engine/data_sources/checkerboard.py | 60 + engine/data_sources/pipeline_introspection.py | 312 +++ engine/data_sources/sources.py | 490 +++++ engine/display.py | 102 - engine/display/__init__.py | 290 +++ engine/display/backends/multi.py | 50 + engine/display/backends/null.py | 183 ++ engine/display/backends/pygame.py | 369 ++++ engine/display/backends/replay.py | 122 ++ engine/display/backends/terminal.py | 133 ++ engine/display/backends/websocket.py | 464 +++++ engine/display/renderer.py | 280 +++ engine/display/streaming.py | 268 +++ engine/effects/__init__.py | 18 +- engine/effects/chain.py | 20 +- engine/effects/controller.py | 9 +- engine/effects/legacy.py | 39 + .../effects/plugins}/__init__.py | 4 +- engine/effects/plugins/afterimage.py | 122 ++ engine/effects/plugins/border.py | 105 + engine/effects/plugins/crop.py | 42 + .../effects/plugins}/fade.py | 8 +- .../effects/plugins}/firehose.py | 6 +- engine/effects/plugins/glitch.py | 52 + engine/effects/plugins/hud.py | 102 + engine/effects/plugins/motionblur.py | 119 ++ .../effects/plugins}/noise.py | 9 +- engine/effects/plugins/tint.py | 99 + engine/effects/types.py | 221 +- engine/emitters.py | 25 - engine/events.py | 10 - engine/fetch.py | 157 +- engine/fetch_code.py | 67 - engine/figment_render.py | 90 - engine/figment_trigger.py | 36 - engine/fixtures/headlines.json | 1 + engine/interfaces/__init__.py | 73 + engine/layers.py | 356 ---- engine/mic.py | 96 - engine/pipeline/__init__.py | 106 + engine/pipeline/adapters.py | 50 + engine/pipeline/adapters/__init__.py | 44 + engine/pipeline/adapters/camera.py | 219 ++ engine/pipeline/adapters/data_source.py | 143 ++ engine/pipeline/adapters/display.py | 93 + engine/pipeline/adapters/effect_plugin.py | 117 ++ engine/pipeline/adapters/factory.py | 38 + engine/pipeline/adapters/transform.py | 293 +++ engine/pipeline/controller.py | 1055 ++++++++++ engine/pipeline/core.py | 321 +++ engine/pipeline/params.py | 150 ++ .../pipeline/pipeline_introspection_demo.py | 300 +++ engine/pipeline/preset_loader.py | 280 +++ engine/pipeline/presets.py | 237 +++ engine/pipeline/registry.py | 189 ++ engine/pipeline/stages/framebuffer.py | 174 ++ engine/pipeline/ui.py | 674 ++++++ engine/pipeline/validation.py | 221 ++ engine/render/__init__.py | 37 + engine/{render.py => render/blocks.py} | 170 +- engine/render/gradient.py | 82 + engine/scroll.py | 161 -- engine/sensors/__init__.py | 203 ++ engine/sensors/mic.py | 145 ++ engine/sensors/oscillator.py | 161 ++ engine/sensors/pipeline_metrics.py | 114 + engine/themes.py | 60 - ...of-mexico-antique-cultures-svgrepo-com.svg | 32 - figments/mayan-mask-of-mexico-svgrepo-com.svg | 60 - .../mayan-symbol-of-mexico-svgrepo-com.svg | 110 - fonts/Kapiler.otf | Bin 76120 -> 0 bytes fonts/Kapiler.ttf | Bin 118964 -> 0 bytes hk.pkl | 3 + mise.toml | 70 +- opencode-instructions.md | 1 + presets.toml | 125 ++ pyproject.toml | 19 +- requirements-dev.txt | 4 - requirements.txt | 4 - scripts/demo_hot_rebuild.py | 222 ++ scripts/demo_image_oscilloscope.py | 378 ++++ scripts/demo_oscillator_simple.py | 137 ++ scripts/demo_oscilloscope.py | 204 ++ scripts/demo_oscilloscope_mod.py | 380 ++++ scripts/demo_oscilloscope_pipeline.py | 411 ++++ scripts/oscillator_data_export.py | 111 + scripts/pipeline_demo.py | 509 +++++ scripts/render-diagrams.py | 49 + scripts/validate-diagrams.py | 64 + test_ui_simple.py | 56 + tests/acceptance_report.py | 473 +++++ tests/conftest.py | 36 + tests/e2e/test_web_client.py | 133 ++ tests/fixtures/test.svg | 3 - tests/kitty_test.py | 31 + tests/test_acceptance.py | 290 +++ tests/test_adapters.py | 345 +++ tests/test_app.py | 215 ++ tests/test_benchmark.py | 380 ++++ tests/test_border_effect.py | 111 + tests/test_camera.py | 68 + tests/test_camera_acceptance.py | 826 ++++++++ tests/test_controller.py | 117 -- tests/test_crop_effect.py | 99 + tests/test_data_sources.py | 220 ++ tests/test_display.py | 356 +++- tests/test_effects_controller.py | 124 ++ tests/test_emitters.py | 69 - tests/test_fetch.py | 234 +++ tests/test_fetch_code.py | 35 - tests/test_figment.py | 151 -- tests/test_figment_overlay.py | 64 - tests/test_figment_render.py | 52 - tests/test_figment_trigger.py | 40 - tests/test_framebuffer_acceptance.py | 195 ++ tests/test_framebuffer_stage.py | 237 +++ tests/test_glitch_effect.py | 240 +++ tests/test_hud.py | 106 + tests/test_layers.py | 96 - tests/test_mic.py | 149 -- tests/test_ntfy_integration.py | 131 ++ tests/test_performance_regression.py | 185 ++ tests/test_pipeline.py | 1844 +++++++++++++++++ tests/test_pipeline_e2e.py | 552 +++++ tests/test_pipeline_introspection.py | 171 ++ tests/test_pipeline_introspection_demo.py | 167 ++ tests/test_pipeline_metrics_sensor.py | 113 + tests/test_pipeline_mutation_commands.py | 259 +++ tests/test_pipeline_rebuild.py | 405 ++++ tests/test_render.py | 301 --- tests/test_sensors.py | 473 +++++ tests/test_streaming.py | 223 ++ tests/test_themes.py | 169 -- tests/test_tint_acceptance.py | 206 ++ tests/test_tint_effect.py | 125 ++ tests/test_translate.py | 115 + tests/test_ui_panel.py | 184 ++ tests/test_viewport_filter_performance.py | 252 +++ tests/test_vis_offset.py | 31 + tests/test_websocket.py | 395 ++++ tests/test_websocket_e2e.py | 78 + 179 files changed, 27649 insertions(+), 6552 deletions(-) create mode 100644 .opencode/skills/mainline-architecture/SKILL.md create mode 100644 .opencode/skills/mainline-display/SKILL.md create mode 100644 .opencode/skills/mainline-effects/SKILL.md create mode 100644 .opencode/skills/mainline-presets/SKILL.md create mode 100644 .opencode/skills/mainline-sensors/SKILL.md create mode 100644 .opencode/skills/mainline-sources/SKILL.md create mode 100644 TODO.md create mode 100644 client/editor.html create mode 100644 client/index.html create mode 100644 docs/ARCHITECTURE.md rename Mainline Renderer + ntfy Message Queue for ESP32.md => docs/Mainline Renderer + ntfy Message Queue for ESP32.md (100%) create mode 100644 docs/PIPELINE.md rename Refactor mainline.md => docs/Refactor mainline.md (98%) rename klubhaus-doorbell-hardware.md => docs/klubhaus-doorbell-hardware.md (100%) create mode 100644 docs/proposals/adr-preset-scripting-language.md delete mode 100644 docs/superpowers/plans/2026-03-16-color-scheme-implementation.md delete mode 100644 docs/superpowers/plans/2026-03-19-figment-mode.md delete mode 100644 docs/superpowers/specs/2026-03-15-readme-update-design.md delete mode 100644 docs/superpowers/specs/2026-03-16-code-scroll-design.md delete mode 100644 docs/superpowers/specs/2026-03-16-color-scheme-design.md delete mode 100644 docs/superpowers/specs/2026-03-19-figment-mode-design.md delete mode 100644 effects_plugins/figment.py delete mode 100644 effects_plugins/glitch.py create mode 100644 engine/app/__init__.py create mode 100644 engine/app/main.py create mode 100644 engine/app/pipeline_runner.py create mode 100644 engine/benchmark.py create mode 100644 engine/camera.py create mode 100644 engine/canvas.py delete mode 100644 engine/controller.py create mode 100644 engine/data_sources/__init__.py create mode 100644 engine/data_sources/checkerboard.py create mode 100644 engine/data_sources/pipeline_introspection.py create mode 100644 engine/data_sources/sources.py delete mode 100644 engine/display.py create mode 100644 engine/display/__init__.py create mode 100644 engine/display/backends/multi.py create mode 100644 engine/display/backends/null.py create mode 100644 engine/display/backends/pygame.py create mode 100644 engine/display/backends/replay.py create mode 100644 engine/display/backends/terminal.py create mode 100644 engine/display/backends/websocket.py create mode 100644 engine/display/renderer.py create mode 100644 engine/display/streaming.py rename {effects_plugins => engine/effects/plugins}/__init__.py (84%) create mode 100644 engine/effects/plugins/afterimage.py create mode 100644 engine/effects/plugins/border.py create mode 100644 engine/effects/plugins/crop.py rename {effects_plugins => engine/effects/plugins}/fade.py (87%) rename {effects_plugins => engine/effects/plugins}/firehose.py (95%) create mode 100644 engine/effects/plugins/glitch.py create mode 100644 engine/effects/plugins/hud.py create mode 100644 engine/effects/plugins/motionblur.py rename {effects_plugins => engine/effects/plugins}/noise.py (78%) create mode 100644 engine/effects/plugins/tint.py delete mode 100644 engine/emitters.py delete mode 100644 engine/fetch_code.py delete mode 100644 engine/figment_render.py delete mode 100644 engine/figment_trigger.py create mode 100644 engine/fixtures/headlines.json create mode 100644 engine/interfaces/__init__.py delete mode 100644 engine/layers.py delete mode 100644 engine/mic.py create mode 100644 engine/pipeline/__init__.py create mode 100644 engine/pipeline/adapters.py create mode 100644 engine/pipeline/adapters/__init__.py create mode 100644 engine/pipeline/adapters/camera.py create mode 100644 engine/pipeline/adapters/data_source.py create mode 100644 engine/pipeline/adapters/display.py create mode 100644 engine/pipeline/adapters/effect_plugin.py create mode 100644 engine/pipeline/adapters/factory.py create mode 100644 engine/pipeline/adapters/transform.py create mode 100644 engine/pipeline/controller.py create mode 100644 engine/pipeline/core.py create mode 100644 engine/pipeline/params.py create mode 100644 engine/pipeline/pipeline_introspection_demo.py create mode 100644 engine/pipeline/preset_loader.py create mode 100644 engine/pipeline/presets.py create mode 100644 engine/pipeline/registry.py create mode 100644 engine/pipeline/stages/framebuffer.py create mode 100644 engine/pipeline/ui.py create mode 100644 engine/pipeline/validation.py create mode 100644 engine/render/__init__.py rename engine/{render.py => render/blocks.py} (61%) create mode 100644 engine/render/gradient.py delete mode 100644 engine/scroll.py create mode 100644 engine/sensors/__init__.py create mode 100644 engine/sensors/mic.py create mode 100644 engine/sensors/oscillator.py create mode 100644 engine/sensors/pipeline_metrics.py delete mode 100644 engine/themes.py delete mode 100644 figments/animal-head-symbol-of-mexico-antique-cultures-svgrepo-com.svg delete mode 100644 figments/mayan-mask-of-mexico-svgrepo-com.svg delete mode 100644 figments/mayan-symbol-of-mexico-svgrepo-com.svg delete mode 100644 fonts/Kapiler.otf delete mode 100644 fonts/Kapiler.ttf create mode 100644 opencode-instructions.md create mode 100644 presets.toml delete mode 100644 requirements-dev.txt delete mode 100644 requirements.txt create mode 100644 scripts/demo_hot_rebuild.py create mode 100644 scripts/demo_image_oscilloscope.py create mode 100644 scripts/demo_oscillator_simple.py create mode 100644 scripts/demo_oscilloscope.py create mode 100644 scripts/demo_oscilloscope_mod.py create mode 100644 scripts/demo_oscilloscope_pipeline.py create mode 100644 scripts/oscillator_data_export.py create mode 100644 scripts/pipeline_demo.py create mode 100644 scripts/render-diagrams.py create mode 100644 scripts/validate-diagrams.py create mode 100644 test_ui_simple.py create mode 100644 tests/acceptance_report.py create mode 100644 tests/conftest.py create mode 100644 tests/e2e/test_web_client.py delete mode 100644 tests/fixtures/test.svg create mode 100644 tests/kitty_test.py create mode 100644 tests/test_acceptance.py create mode 100644 tests/test_adapters.py create mode 100644 tests/test_app.py create mode 100644 tests/test_benchmark.py create mode 100644 tests/test_border_effect.py create mode 100644 tests/test_camera.py create mode 100644 tests/test_camera_acceptance.py delete mode 100644 tests/test_controller.py create mode 100644 tests/test_crop_effect.py create mode 100644 tests/test_data_sources.py delete mode 100644 tests/test_emitters.py create mode 100644 tests/test_fetch.py delete mode 100644 tests/test_fetch_code.py delete mode 100644 tests/test_figment.py delete mode 100644 tests/test_figment_overlay.py delete mode 100644 tests/test_figment_render.py delete mode 100644 tests/test_figment_trigger.py create mode 100644 tests/test_framebuffer_acceptance.py create mode 100644 tests/test_framebuffer_stage.py create mode 100644 tests/test_glitch_effect.py create mode 100644 tests/test_hud.py delete mode 100644 tests/test_layers.py delete mode 100644 tests/test_mic.py create mode 100644 tests/test_ntfy_integration.py create mode 100644 tests/test_performance_regression.py create mode 100644 tests/test_pipeline.py create mode 100644 tests/test_pipeline_e2e.py create mode 100644 tests/test_pipeline_introspection.py create mode 100644 tests/test_pipeline_introspection_demo.py create mode 100644 tests/test_pipeline_metrics_sensor.py create mode 100644 tests/test_pipeline_mutation_commands.py create mode 100644 tests/test_pipeline_rebuild.py delete mode 100644 tests/test_render.py create mode 100644 tests/test_sensors.py create mode 100644 tests/test_streaming.py delete mode 100644 tests/test_themes.py create mode 100644 tests/test_tint_acceptance.py create mode 100644 tests/test_tint_effect.py create mode 100644 tests/test_translate.py create mode 100644 tests/test_ui_panel.py create mode 100644 tests/test_viewport_filter_performance.py create mode 100644 tests/test_vis_offset.py create mode 100644 tests/test_websocket.py create mode 100644 tests/test_websocket_e2e.py diff --git a/.gitignore b/.gitignore index 573af53..855e84e 100644 --- a/.gitignore +++ b/.gitignore @@ -9,4 +9,7 @@ htmlcov/ .coverage .pytest_cache/ *.egg-info/ -.DS_Store +coverage.xml +*.dot +*.png +test-reports/ diff --git a/.opencode/skills/mainline-architecture/SKILL.md b/.opencode/skills/mainline-architecture/SKILL.md new file mode 100644 index 0000000..25117c8 --- /dev/null +++ b/.opencode/skills/mainline-architecture/SKILL.md @@ -0,0 +1,97 @@ +--- +name: mainline-architecture +description: Pipeline stages, capability resolution, and core architecture patterns +compatibility: opencode +metadata: + audience: developers + source_type: codebase +--- + +## What This Skill Covers + +This skill covers Mainline's pipeline architecture - the Stage-based system for dependency resolution, data flow, and component composition. + +## Key Concepts + +### Stage Class (engine/pipeline/core.py) + +The `Stage` ABC is the foundation. All pipeline components inherit from it: + +```python +class Stage(ABC): + name: str + category: str # "source", "effect", "overlay", "display", "camera" + optional: bool = False + + @property + def capabilities(self) -> set[str]: + """What this stage provides (e.g., 'source.headlines')""" + return set() + + @property + def dependencies(self) -> set[str]: + """What this stage needs (e.g., {'source'})""" + return set() +``` + +### Capability-Based Dependencies + +The Pipeline resolves dependencies using **prefix matching**: +- `"source"` matches `"source.headlines"`, `"source.poetry"`, etc. +- `"camera.state"` matches the camera state capability +- This allows flexible composition without hardcoding specific stage names + +### Minimum Capabilities + +The pipeline requires these minimum capabilities to function: +- `"source"` - Data source capability +- `"render.output"` - Rendered content capability +- `"display.output"` - Display output capability +- `"camera.state"` - Camera state for viewport filtering + +These are automatically injected if missing (auto-injection). + +### DataType Enum + +PureData-style data types for inlet/outlet validation: +- `SOURCE_ITEMS`: List[SourceItem] - raw items from sources +- `ITEM_TUPLES`: List[tuple] - (title, source, timestamp) tuples +- `TEXT_BUFFER`: List[str] - rendered ANSI buffer +- `RAW_TEXT`: str - raw text strings +- `PIL_IMAGE`: PIL Image object + +### Pipeline Execution + +The Pipeline (engine/pipeline/controller.py): +1. Collects all stages from StageRegistry +2. Resolves dependencies using prefix matching +3. Executes stages in dependency order +4. Handles errors for non-optional stages + +### Canvas & Camera + +- **Canvas** (`engine/canvas.py`): 2D rendering surface with dirty region tracking +- **Camera** (`engine/camera.py`): Viewport controller for scrolling content + +Canvas tracks dirty regions automatically when content is written via `put_region`, `put_text`, `fill`, enabling partial buffer updates. + +## Adding New Stages + +1. Create a class inheriting from `Stage` +2. Define `capabilities` and `dependencies` properties +3. Implement required abstract methods +4. Register in StageRegistry or use as adapter + +## Common Patterns + +- Use adapters (engine/pipeline/adapters.py) to wrap existing components as stages +- Set `optional=True` for stages that can fail gracefully +- Use `stage_type` and `render_order` for execution ordering +- Clock stages update state independently of data flow + +## Sources + +- engine/pipeline/core.py - Stage base class +- engine/pipeline/controller.py - Pipeline implementation +- engine/pipeline/adapters/ - Stage adapters +- docs/PIPELINE.md - Pipeline documentation diff --git a/.opencode/skills/mainline-display/SKILL.md b/.opencode/skills/mainline-display/SKILL.md new file mode 100644 index 0000000..bd1dd2a --- /dev/null +++ b/.opencode/skills/mainline-display/SKILL.md @@ -0,0 +1,163 @@ +--- +name: mainline-display +description: Display backend implementation and the Display protocol +compatibility: opencode +metadata: + audience: developers + source_type: codebase +--- + +## What This Skill Covers + +This skill covers Mainline's display backend system - how to implement new display backends and how the Display protocol works. + +## Key Concepts + +### Display Protocol + +All backends implement a common Display protocol (in `engine/display/__init__.py`): + +```python +class Display(Protocol): + width: int + height: int + + def init(self, width: int, height: int, reuse: bool = False) -> None: + """Initialize the display""" + ... + + def show(self, buf: list[str], border: bool = False) -> None: + """Display the buffer""" + ... + + def clear(self) -> None: + """Clear the display""" + ... + + def cleanup(self) -> None: + """Clean up resources""" + ... + + def get_dimensions(self) -> tuple[int, int]: + """Return (width, height)""" + ... +``` + +### DisplayRegistry + +Discovers and manages backends: + +```python +from engine.display import DisplayRegistry +display = DisplayRegistry.create("terminal") # or "websocket", "null", "multi" +``` + +### Available Backends + +| Backend | File | Description | +|---------|------|-------------| +| terminal | backends/terminal.py | ANSI terminal output | +| websocket | backends/websocket.py | Web browser via WebSocket | +| null | backends/null.py | Headless for testing | +| multi | backends/multi.py | Forwards to multiple displays | +| moderngl | backends/moderngl.py | GPU-accelerated OpenGL rendering (optional) | + +### WebSocket Backend + +- WebSocket server: port 8765 +- HTTP server: port 8766 (serves client/index.html) +- Client has ANSI color parsing and fullscreen support + +### Multi Backend + +Forwards to multiple displays simultaneously - useful for `terminal + websocket`. + +## Adding a New Backend + +1. Create `engine/display/backends/my_backend.py` +2. Implement the Display protocol methods +3. Register in `engine/display/__init__.py`'s `DisplayRegistry` + +Required methods: +- `init(width: int, height: int, reuse: bool = False)` - Initialize display +- `show(buf: list[str], border: bool = False)` - Display buffer +- `clear()` - Clear screen +- `cleanup()` - Clean up resources +- `get_dimensions() -> tuple[int, int]` - Get terminal dimensions + +Optional methods: +- `title(text: str)` - Set window title +- `cursor(show: bool)` - Control cursor + +## Usage + +```bash +python mainline.py --display terminal # default +python mainline.py --display websocket +python mainline.py --display moderngl # GPU-accelerated (requires moderngl) +``` + +## Common Bugs and Patterns + +### BorderMode.OFF Enum Bug + +**Problem**: `BorderMode.OFF` has enum value `1` (not `0`), and Python enums are always truthy. + +**Incorrect Code**: +```python +if border: + buffer = render_border(buffer, width, height, fps, frame_time) +``` + +**Correct Code**: +```python +from engine.display import BorderMode +if border and border != BorderMode.OFF: + buffer = render_border(buffer, width, height, fps, frame_time) +``` + +**Why**: Checking `if border:` evaluates to `True` even when `border == BorderMode.OFF` because enum members are always truthy in Python. + +### Context Type Mismatch + +**Problem**: `PipelineContext` and `EffectContext` have different APIs for storing data. + +- `PipelineContext`: Uses `set()`/`get()` for services +- `EffectContext`: Uses `set_state()`/`get_state()` for state + +**Pattern for Passing Data**: +```python +# In pipeline setup (uses PipelineContext) +ctx.set("pipeline_order", pipeline.execution_order) + +# In EffectPluginStage (must copy to EffectContext) +effect_ctx.set_state("pipeline_order", ctx.get("pipeline_order")) +``` + +### Terminal Display ANSI Patterns + +**Screen Clearing**: +```python +output = "\033[H\033[J" + "".join(buffer) +``` + +**Cursor Positioning** (used by HUD effect): +- `\033[row;colH` - Move cursor to row, column +- Example: `\033[1;1H` - Move to row 1, column 1 + +**Key Insight**: Terminal display joins buffer lines WITHOUT newlines, relying on ANSI cursor positioning codes to move the cursor to the correct location for each line. + +### EffectPluginStage Context Copying + +**Problem**: When effects need access to pipeline services (like `pipeline_order`), they must be copied from `PipelineContext` to `EffectContext`. + +**Pattern**: +```python +# In EffectPluginStage.process() +# Copy pipeline_order from PipelineContext services to EffectContext state +pipeline_order = ctx.get("pipeline_order") +if pipeline_order: + effect_ctx.set_state("pipeline_order", pipeline_order) +``` + +This ensures effects can access `ctx.get_state("pipeline_order")` in their process method. diff --git a/.opencode/skills/mainline-effects/SKILL.md b/.opencode/skills/mainline-effects/SKILL.md new file mode 100644 index 0000000..403440c --- /dev/null +++ b/.opencode/skills/mainline-effects/SKILL.md @@ -0,0 +1,113 @@ +--- +name: mainline-effects +description: How to add new effect plugins to Mainline's effect system +compatibility: opencode +metadata: + audience: developers + source_type: codebase +--- + +## What This Skill Covers + +This skill covers Mainline's effect plugin system - how to create, configure, and integrate visual effects into the pipeline. + +## Key Concepts + +### EffectPlugin ABC (engine/effects/types.py) + +All effects must inherit from `EffectPlugin` and implement: + +```python +class EffectPlugin(ABC): + name: str + config: EffectConfig + param_bindings: dict[str, dict[str, str | float]] = {} + supports_partial_updates: bool = False + + @abstractmethod + def process(self, buf: list[str], ctx: EffectContext) -> list[str]: + """Process buffer with effect applied""" + ... + + @abstractmethod + def configure(self, config: EffectConfig) -> None: + """Configure the effect""" + ... +``` + +### EffectContext + +Passed to every effect's process method: + +```python +@dataclass +class EffectContext: + terminal_width: int + terminal_height: int + scroll_cam: int + ticker_height: int + camera_x: int = 0 + mic_excess: float = 0.0 + grad_offset: float = 0.0 + frame_number: int = 0 + has_message: bool = False + items: list = field(default_factory=list) + _state: dict[str, Any] = field(default_factory=dict) +``` + +Access sensor values via `ctx.get_sensor_value("sensor_name")`. + +### EffectConfig + +Configuration dataclass: + +```python +@dataclass +class EffectConfig: + enabled: bool = True + intensity: float = 1.0 + params: dict[str, Any] = field(default_factory=dict) +``` + +### Partial Updates + +For performance optimization, set `supports_partial_updates = True` and implement `process_partial`: + +```python +class MyEffect(EffectPlugin): + supports_partial_updates = True + + def process_partial(self, buf, ctx, partial: PartialUpdate) -> list[str]: + # Only process changed regions + ... +``` + +## Adding a New Effect + +1. Create file in `effects_plugins/my_effect.py` +2. Inherit from `EffectPlugin` +3. Implement `process()` and `configure()` +4. Add to `effects_plugins/__init__.py` (runtime discovery via issubclass checks) + +## Param Bindings + +Declarative sensor-to-param mappings: + +```python +param_bindings = { + "intensity": {"sensor": "mic", "transform": "linear"}, + "rate": {"sensor": "oscillator", "transform": "exponential"}, +} +``` + +Transforms: `linear`, `exponential`, `threshold` + +## Effect Chain + +Effects are chained via `engine/effects/chain.py` - processes each effect in order, passing output to next. + +## Existing Effects + +See `effects_plugins/`: +- noise.py, fade.py, glitch.py, firehose.py +- border.py, crop.py, tint.py, hud.py diff --git a/.opencode/skills/mainline-presets/SKILL.md b/.opencode/skills/mainline-presets/SKILL.md new file mode 100644 index 0000000..2f882b2 --- /dev/null +++ b/.opencode/skills/mainline-presets/SKILL.md @@ -0,0 +1,103 @@ +--- +name: mainline-presets +description: Creating pipeline presets in TOML format for Mainline +compatibility: opencode +metadata: + audience: developers + source_type: codebase +--- + +## What This Skill Covers + +This skill covers how to create pipeline presets in TOML format for Mainline's rendering pipeline. + +## Key Concepts + +### Preset Loading Order + +Presets are loaded from multiple locations (later overrides earlier): +1. Built-in: `engine/presets.toml` +2. User config: `~/.config/mainline/presets.toml` +3. Local override: `./presets.toml` + +### PipelinePreset Dataclass + +```python +@dataclass +class PipelinePreset: + name: str + description: str = "" + source: str = "headlines" # Data source + display: str = "terminal" # Display backend + camera: str = "scroll" # Camera mode + effects: list[str] = field(default_factory=list) + border: bool = False +``` + +### TOML Format + +```toml +[presets.my-preset] +description = "My custom pipeline" +source = "headlines" +display = "terminal" +camera = "scroll" +effects = ["noise", "fade"] +border = true +``` + +## Creating a Preset + +### Option 1: User Config + +Create/edit `~/.config/mainline/presets.toml`: + +```toml +[presets.my-cool-preset] +description = "Noise and glitch effects" +source = "headlines" +display = "terminal" +effects = ["noise", "glitch"] +``` + +### Option 2: Local Override + +Create `./presets.toml` in project root: + +```toml +[presets.dev-inspect] +description = "Pipeline introspection for development" +source = "headlines" +display = "terminal" +effects = ["hud"] +``` + +### Option 3: Built-in + +Edit `engine/presets.toml` (requires PR to repository). + +## Available Sources + +- `headlines` - RSS news feeds +- `poetry` - Literature mode +- `pipeline-inspect` - Live DAG visualization + +## Available Displays + +- `terminal` - ANSI terminal +- `websocket` - Web browser +- `null` - Headless +- `moderngl` - GPU-accelerated (optional) + +## Available Effects + +See `effects_plugins/`: +- noise, fade, glitch, firehose +- border, crop, tint, hud + +## Validation Functions + +Use these from `engine/pipeline/presets.py`: +- `validate_preset()` - Validate preset structure +- `validate_signal_path()` - Detect circular dependencies +- `generate_preset_toml()` - Generate skeleton preset diff --git a/.opencode/skills/mainline-sensors/SKILL.md b/.opencode/skills/mainline-sensors/SKILL.md new file mode 100644 index 0000000..3362ded --- /dev/null +++ b/.opencode/skills/mainline-sensors/SKILL.md @@ -0,0 +1,136 @@ +--- +name: mainline-sensors +description: Sensor framework for real-time input in Mainline +compatibility: opencode +metadata: + audience: developers + source_type: codebase +--- + +## What This Skill Covers + +This skill covers Mainline's sensor framework - how to use, create, and integrate sensors for real-time input. + +## Key Concepts + +### Sensor Base Class (engine/sensors/__init__.py) + +```python +class Sensor(ABC): + name: str + unit: str = "" + + @property + def available(self) -> bool: + """Whether sensor is currently available""" + return True + + @abstractmethod + def read(self) -> SensorValue | None: + """Read current sensor value""" + ... + + def start(self) -> None: + """Initialize sensor (optional)""" + pass + + def stop(self) -> None: + """Clean up sensor (optional)""" + pass +``` + +### SensorValue Dataclass + +```python +@dataclass +class SensorValue: + sensor_name: str + value: float + timestamp: float + unit: str = "" +``` + +### SensorRegistry + +Discovers and manages sensors globally: + +```python +from engine.sensors import SensorRegistry +registry = SensorRegistry() +sensor = registry.get("mic") +``` + +### SensorStage + +Pipeline adapter that provides sensor values to effects: + +```python +from engine.pipeline.adapters import SensorStage +stage = SensorStage(sensor_name="mic") +``` + +## Built-in Sensors + +| Sensor | File | Description | +|--------|------|-------------| +| MicSensor | sensors/mic.py | Microphone input (RMS dB) | +| OscillatorSensor | sensors/oscillator.py | Test sine wave generator | +| PipelineMetricsSensor | sensors/pipeline_metrics.py | FPS, frame time, etc. | + +## Param Bindings + +Effects declare sensor-to-param mappings: + +```python +class GlitchEffect(EffectPlugin): + param_bindings = { + "intensity": {"sensor": "mic", "transform": "linear"}, + } +``` + +### Transform Functions + +- `linear` - Direct mapping to param range +- `exponential` - Exponential scaling +- `threshold` - Binary on/off + +## Adding a New Sensor + +1. Create `engine/sensors/my_sensor.py` +2. Inherit from `Sensor` ABC +3. Implement required methods +4. Register in `SensorRegistry` + +Example: +```python +class MySensor(Sensor): + name = "my-sensor" + unit = "units" + + def read(self) -> SensorValue | None: + return SensorValue( + sensor_name=self.name, + value=self._read_hardware(), + timestamp=time.time(), + unit=self.unit + ) +``` + +## Using Sensors in Effects + +Access sensor values via EffectContext: + +```python +def process(self, buf, ctx): + mic_level = ctx.get_sensor_value("mic") + if mic_level and mic_level > 0.5: + # Apply intense effect + ... +``` + +Or via param_bindings (automatic): + +```python +# If intensity is bound to "mic", it's automatically +# available in self.config.intensity +``` diff --git a/.opencode/skills/mainline-sources/SKILL.md b/.opencode/skills/mainline-sources/SKILL.md new file mode 100644 index 0000000..118ac58 --- /dev/null +++ b/.opencode/skills/mainline-sources/SKILL.md @@ -0,0 +1,87 @@ +--- +name: mainline-sources +description: Adding new RSS feeds and data sources to Mainline +compatibility: opencode +metadata: + audience: developers + source_type: codebase +--- + +## What This Skill Covers + +This skill covers how to add new data sources (RSS feeds, poetry) to Mainline. + +## Key Concepts + +### Feeds Dictionary (engine/sources.py) + +All feeds are defined in a simple dictionary: + +```python +FEEDS = { + "Feed Name": "https://example.com/feed.xml", + # Category comments help organize: + # Science & Technology + # Economics & Business + # World & Politics + # Culture & Ideas +} +``` + +### Poetry Sources + +Project Gutenberg URLs for public domain literature: + +```python +POETRY_SOURCES = { + "Author Name": "https://www.gutenberg.org/cache/epub/1234/pg1234.txt", +} +``` + +### Language & Script Mapping + +The sources.py also contains language/script detection mappings used for auto-translation and font selection. + +## Adding a New RSS Feed + +1. Edit `engine/sources.py` +2. Add entry to `FEEDS` dict under appropriate category: + ```python + "My Feed": "https://example.com/feed.xml", + ``` +3. The feed will be automatically discovered on next run + +### Feed Requirements + +- Must be valid RSS or Atom XML +- Should have `` elements for items +- Must be HTTP/HTTPS accessible + +## Adding Poetry Sources + +1. Edit `engine/sources.py` +2. Add to `POETRY_SOURCES` dict: + ```python + "Author": "https://www.gutenberg.org/cache/epub/XXXX/pgXXXX.txt", + ``` + +### Poetry Requirements + +- Plain text (UTF-8) +- Project Gutenberg format preferred +- No DRM-protected sources + +## Data Flow + +Feeds are fetched via `engine/fetch.py`: +- `fetch_feed(url)` - Fetches and parses RSS/Atom +- Results cached for fast restarts +- Filtered via `engine/filter.py` for content cleaning + +## Categories + +Organize new feeds by category using comments: +- Science & Technology +- Economics & Business +- World & Politics +- Culture & Ideas diff --git a/AGENTS.md b/AGENTS.md index 21d7e2c..849ebb3 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -4,88 +4,208 @@ This project uses: - **mise** (mise.jdx.dev) - tool version manager and task runner -- **hk** (hk.jdx.dev) - git hook manager - **uv** - fast Python package installer -- **ruff** - linter and formatter -- **pytest** - test runner +- **ruff** - linter and formatter (line-length 88, target Python 3.10) +- **pytest** - test runner with strict marker enforcement ### Setup ```bash -# Install dependencies -mise run install - -# Or equivalently: -uv sync +mise run install # Install dependencies +# Or: uv sync --all-extras # includes mic, websocket support ``` ### Available Commands ```bash -mise run test # Run tests -mise run test-v # Run tests verbose -mise run test-cov # Run tests with coverage report -mise run lint # Run ruff linter -mise run lint-fix # Run ruff with auto-fix -mise run format # Run ruff formatter -mise run ci # Full CI pipeline (sync + test + coverage) +# Testing +mise run test # Run all tests +mise run test-cov # Run tests with coverage report +pytest tests/test_foo.py::TestClass::test_method # Run single test + +# Linting & Formatting +mise run lint # Run ruff linter +mise run lint-fix # Run ruff with auto-fix +mise run format # Run ruff formatter + +# CI +mise run ci # Full CI pipeline (topics-init + lint + test-cov) ``` -## Git Hooks - -**At the start of every agent session**, verify hooks are installed: +### Running a Single Test ```bash -ls -la .git/hooks/pre-commit +# Run a specific test function +pytest tests/test_eventbus.py::TestEventBusInit::test_init_creates_empty_subscribers + +# Run all tests in a file +pytest tests/test_eventbus.py + +# Run tests matching a pattern +pytest -k "test_subscribe" ``` -If hooks are not installed, install them with: +### Git Hooks +Install hooks at start of session: ```bash -hk init --mise -mise run pre-commit +ls -la .git/hooks/pre-commit # Verify installed +hk init --mise # Install if missing +mise run pre-commit # Run manually ``` -The project uses hk configured in `hk.pkl`: -- **pre-commit**: runs ruff-format and ruff (with auto-fix) -- **pre-push**: runs ruff check +## Code Style Guidelines + +### Imports (three sections, alphabetical within each) + +```python +# 1. Standard library +import os +import threading +from collections import defaultdict +from collections.abc import Callable +from dataclasses import dataclass, field +from typing import Any + +# 2. Third-party +from abc import ABC, abstractmethod + +# 3. Local project +from engine.events import EventType +``` + +### Type Hints + +- Use type hints for all function signatures (parameters and return) +- Use `|` for unions (Python 3.10+): `EventType | None` +- Use `dict[K, V]`, `list[V]` (generic syntax): `dict[str, list[int]]` +- Use `Callable[[ArgType], ReturnType]` for callbacks + +```python +def subscribe(self, event_type: EventType, callback: Callable[[Any], None]) -> None: + ... + +def get_sensor_value(self, sensor_name: str) -> float | None: + return self._state.get(f"sensor.{sensor_name}") +``` + +### Naming Conventions + +- **Classes**: `PascalCase` (e.g., `EventBus`, `EffectPlugin`) +- **Functions/methods**: `snake_case` (e.g., `get_event_bus`, `process_partial`) +- **Constants**: `SCREAMING_SNAKE_CASE` (e.g., `CURSOR_OFF`) +- **Private methods**: `_snake_case` prefix (e.g., `_initialize`) +- **Type variables**: `PascalCase` (e.g., `T`, `EffectT`) + +### Dataclasses + +Use `@dataclass` for simple data containers: + +```python +@dataclass +class EffectContext: + terminal_width: int + terminal_height: int + scroll_cam: int + ticker_height: int = 0 + _state: dict[str, Any] = field(default_factory=dict, repr=False) +``` + +### Abstract Base Classes + +Use ABC for interface enforcement: + +```python +class EffectPlugin(ABC): + name: str + config: EffectConfig + + @abstractmethod + def process(self, buf: list[str], ctx: EffectContext) -> list[str]: + ... + + @abstractmethod + def configure(self, config: EffectConfig) -> None: + ... +``` + +### Error Handling + +- Catch specific exceptions, not bare `Exception` +- Use `try/except` with fallbacks for optional features +- Silent pass in event callbacks to prevent one handler from breaking others + +```python +# Good: specific exception +try: + term_size = os.get_terminal_size() +except OSError: + term_width = 80 + +# Good: silent pass in callbacks +for callback in callbacks: + try: + callback(event) + except Exception: + pass +``` + +### Thread Safety + +Use locks for shared state: + +```python +class EventBus: + def __init__(self): + self._lock = threading.Lock() + + def publish(self, event_type: EventType, event: Any = None) -> None: + with self._lock: + callbacks = list(self._subscribers.get(event_type, [])) +``` + +### Comments + +- **DO NOT ADD comments** unless explicitly required +- Let code be self-documenting with good naming +- Use docstrings only for public APIs or complex logic + +### Testing Patterns + +Follow pytest conventions: + +```python +class TestEventBusSubscribe: + """Tests for EventBus.subscribe method.""" + + def test_subscribe_adds_callback(self): + """subscribe() adds a callback for an event type.""" + bus = EventBus() + def callback(e): + return None + bus.subscribe(EventType.NTFY_MESSAGE, callback) + assert bus.subscriber_count(EventType.NTFY_MESSAGE) == 1 +``` + +- Use classes to group related tests (`Test<ClassName>`, `Test<method_name>`) +- Test docstrings follow `"<method>() <action>"` pattern +- Use descriptive assertion messages via pytest behavior ## Workflow Rules ### Before Committing -1. **Always run the test suite** - never commit code that fails tests: - ```bash - mise run test - ``` - -2. **Always run the linter**: - ```bash - mise run lint - ``` - -3. **Fix any lint errors** before committing (or let the pre-commit hook handle it). - -4. **Review your changes** using `git diff` to understand what will be committed. +1. Run tests: `mise run test` +2. Run linter: `mise run lint` +3. Review changes: `git diff` ### On Failing Tests -When tests fail, **determine whether it's an out-of-date test or a correctly failing test**: - -- **Out-of-date test**: The test was written for old behavior that has legitimately changed. Update the test to match the new expected behavior. - -- **Correctly failing test**: The test correctly identifies a broken contract. Fix the implementation, not the test. +- **Out-of-date test**: Update test to match new expected behavior +- **Correctly failing test**: Fix implementation, not the test **Never** modify a test to make it pass without understanding why it failed. -### Code Review - -Before committing significant changes: -- Run `git diff` to review all changes -- Ensure new code follows existing patterns in the codebase -- Check that type hints are added for new functions -- Verify that tests exist for new functionality - ## Testing Tests live in `tests/` and follow the pattern `test_*.py`. @@ -102,9 +222,244 @@ mise run test-cov The project uses pytest with strict marker enforcement. Test configuration is in `pyproject.toml` under `[tool.pytest.ini_options]`. +### Test Coverage Strategy + +Current coverage: 56% (463 tests) + +Key areas with lower coverage (acceptable for now): +- **app.py** (8%): Main entry point - integration heavy, requires terminal +- **scroll.py** (10%): Terminal-dependent rendering logic (unused) + +Key areas with good coverage: +- **display/backends/null.py** (95%): Easy to test headlessly +- **display/backends/terminal.py** (96%): Uses mocking +- **display/backends/multi.py** (100%): Simple forwarding logic +- **effects/performance.py** (99%): Pure Python logic +- **eventbus.py** (96%): Simple event system +- **effects/controller.py** (95%): Effects command handling + +Areas needing more tests: +- **websocket.py** (48%): Network I/O, hard to test in CI +- **ntfy.py** (50%): Network I/O, hard to test in CI +- **mic.py** (61%): Audio I/O, hard to test in CI + +Note: Terminal-dependent modules (scroll, layers render) are harder to test in CI. +Performance regression tests are in `tests/test_benchmark.py` with `@pytest.mark.benchmark`. + ## Architecture Notes -- **ntfy.py** and **mic.py** are standalone modules with zero internal dependencies +- **ntfy.py** - standalone notification poller with zero internal dependencies +- **sensors/** - Sensor framework (MicSensor, OscillatorSensor) for real-time input - **eventbus.py** provides thread-safe event publishing for decoupled communication -- **controller.py** coordinates ntfy/mic monitoring -- The render pipeline: fetch → render → effects → scroll → terminal output +- **effects/** - plugin architecture with performance monitoring +- The new pipeline architecture: source → render → effects → display + +#### Canvas & Camera + +- **Canvas** (`engine/canvas.py`): 2D rendering surface with dirty region tracking +- **Camera** (`engine/camera.py`): Viewport controller for scrolling content + +The Canvas tracks dirty regions automatically when content is written (via `put_region`, `put_text`, `fill`), enabling partial buffer updates for optimized effect processing. + +### Pipeline Architecture + +The new Stage-based pipeline architecture provides capability-based dependency resolution: + +- **Stage** (`engine/pipeline/core.py`): Base class for pipeline stages +- **Pipeline** (`engine/pipeline/controller.py`): Executes stages with capability-based dependency resolution +- **PipelineConfig** (`engine/pipeline/controller.py`): Configuration for pipeline instance +- **StageRegistry** (`engine/pipeline/registry.py`): Discovers and registers stages +- **Stage Adapters** (`engine/pipeline/adapters.py`): Wraps existing components as stages + +#### Pipeline Configuration + +The `PipelineConfig` dataclass configures pipeline behavior: + +```python +@dataclass +class PipelineConfig: + source: str = "headlines" # Data source identifier + display: str = "terminal" # Display backend identifier + camera: str = "vertical" # Camera mode identifier + effects: list[str] = field(default_factory=list) # List of effect names + enable_metrics: bool = True # Enable performance metrics +``` + +**Available sources**: `headlines`, `poetry`, `empty`, `list`, `image`, `metrics`, `cached`, `transform`, `composite`, `pipeline-inspect` +**Available displays**: `terminal`, `null`, `replay`, `websocket`, `pygame`, `moderngl`, `multi` +**Available camera modes**: `FEED`, `SCROLL`, `HORIZONTAL`, `OMNI`, `FLOATING`, `BOUNCE`, `RADIAL` + +#### Capability-Based Dependencies + +Stages declare capabilities (what they provide) and dependencies (what they need). The Pipeline resolves dependencies using prefix matching: +- `"source"` matches `"source.headlines"`, `"source.poetry"`, etc. +- `"camera.state"` matches the camera state capability +- This allows flexible composition without hardcoding specific stage names + +#### Minimum Capabilities + +The pipeline requires these minimum capabilities to function: +- `"source"` - Data source capability +- `"render.output"` - Rendered content capability +- `"display.output"` - Display output capability +- `"camera.state"` - Camera state for viewport filtering + +These are automatically injected if missing by the `ensure_minimum_capabilities()` method. + +#### Sensor Framework + +- **Sensor** (`engine/sensors/__init__.py`): Base class for real-time input sensors +- **SensorRegistry**: Discovers available sensors +- **SensorStage**: Pipeline adapter that provides sensor values to effects +- **MicSensor** (`engine/sensors/mic.py`): Self-contained microphone input +- **OscillatorSensor** (`engine/sensors/oscillator.py`): Test sensor for development +- **PipelineMetricsSensor** (`engine/sensors/pipeline_metrics.py`): Exposes pipeline metrics as sensor values + +Sensors support param bindings to drive effect parameters in real-time. + +#### Pipeline Introspection + +- **PipelineIntrospectionSource** (`engine/data_sources/pipeline_introspection.py`): Renders live ASCII visualization of pipeline DAG with metrics +- **PipelineIntrospectionDemo** (`engine/pipeline/pipeline_introspection_demo.py`): 3-phase demo controller for effect animation + +Preset: `pipeline-inspect` - Live pipeline introspection with DAG and performance metrics + +#### Partial Update Support + +Effect plugins can opt-in to partial buffer updates for performance optimization: +- Set `supports_partial_updates = True` on the effect class +- Implement `process_partial(buf, ctx, partial)` method +- The `PartialUpdate` dataclass indicates which regions changed + +### Preset System + +Presets use TOML format (no external dependencies): + +- Built-in: `engine/presets.toml` +- User config: `~/.config/mainline/presets.toml` +- Local override: `./presets.toml` + +- **Preset loader** (`engine/pipeline/preset_loader.py`): Loads and validates presets +- **PipelinePreset** (`engine/pipeline/presets.py`): Dataclass for preset configuration + +Functions: +- `validate_preset()` - Validate preset structure +- `validate_signal_path()` - Detect circular dependencies +- `generate_preset_toml()` - Generate skeleton preset + +### Display System + +- **Display abstraction** (`engine/display/`): swap display backends via the Display protocol + - `display/backends/terminal.py` - ANSI terminal output + - `display/backends/websocket.py` - broadcasts to web clients via WebSocket + - `display/backends/null.py` - headless display for testing + - `display/backends/multi.py` - forwards to multiple displays simultaneously + - `display/backends/moderngl.py` - GPU-accelerated OpenGL rendering (optional) + - `display/__init__.py` - DisplayRegistry for backend discovery + +- **WebSocket display** (`engine/display/backends/websocket.py`): real-time frame broadcasting to web browsers + - WebSocket server on port 8765 + - HTTP server on port 8766 (serves HTML client) + - Client at `client/index.html` with ANSI color parsing and fullscreen support + +- **Display modes** (`--display` flag): + - `terminal` - Default ANSI terminal output + - `websocket` - Web browser display (requires websockets package) + - `moderngl` - GPU-accelerated rendering (requires moderngl package) + +### Effect Plugin System + +- **EffectPlugin ABC** (`engine/effects/types.py`): abstract base class for effects + - All effects must inherit from EffectPlugin and implement `process()` and `configure()` + - Runtime discovery via `effects_plugins/__init__.py` using `issubclass()` checks + +- **EffectRegistry** (`engine/effects/registry.py`): manages registered effects +- **EffectChain** (`engine/effects/chain.py`): chains effects in pipeline order + +### Command & Control + +- C&C uses separate ntfy topics for commands and responses +- `NTFY_CC_CMD_TOPIC` - commands from cmdline.py +- `NTFY_CC_RESP_TOPIC` - responses back to cmdline.py +- Effects controller handles `/effects` commands (list, on/off, intensity, reorder, stats) + +### Pipeline Documentation + +The rendering pipeline is documented in `docs/PIPELINE.md` using Mermaid diagrams. + +**IMPORTANT**: When making significant architectural changes to the rendering pipeline (new layers, effects, display backends), update `docs/PIPELINE.md` to reflect the changes: +1. Edit `docs/PIPELINE.md` with the new architecture +2. If adding new SVG diagrams, render them manually using an external tool (e.g., Mermaid Live Editor) +3. Commit both the markdown and any new diagram files + +### Pipeline Mutation API + +The Pipeline class supports dynamic mutation during runtime via the mutation API: + +**Core Methods:** +- `add_stage(name, stage, initialize=True)` - Add a stage to the pipeline +- `remove_stage(name, cleanup=True)` - Remove a stage and rebuild execution order +- `replace_stage(name, new_stage, preserve_state=True)` - Replace a stage with another +- `swap_stages(name1, name2)` - Swap two stages +- `move_stage(name, after=None, before=None)` - Move a stage in execution order +- `enable_stage(name)` - Enable a stage +- `disable_stage(name)` - Disable a stage + +**New Methods (Issue #35):** +- `cleanup_stage(name)` - Clean up specific stage without removing it +- `remove_stage_safe(name, cleanup=True)` - Alias for remove_stage that explicitly rebuilds +- `can_hot_swap(name)` - Check if a stage can be safely hot-swapped + - Returns False for stages that provide minimum capabilities as sole provider + - Returns True for swappable stages + +**WebSocket Commands:** +Commands can be sent via WebSocket to mutate the pipeline at runtime: +```json +{"action": "remove_stage", "stage": "stage_name"} +{"action": "swap_stages", "stage1": "name1", "stage2": "name2"} +{"action": "enable_stage", "stage": "stage_name"} +{"action": "disable_stage", "stage": "stage_name"} +{"action": "cleanup_stage", "stage": "stage_name"} +{"action": "can_hot_swap", "stage": "stage_name"} +``` + +**Implementation Files:** +- `engine/pipeline/controller.py` - Pipeline class with mutation methods +- `engine/app/pipeline_runner.py` - `_handle_pipeline_mutation()` function +- `engine/pipeline/ui.py` - execute_command() with docstrings +- `tests/test_pipeline_mutation_commands.py` - Integration tests + +## Skills Library + +A skills library MCP server (`skills`) is available for capturing and tracking learned knowledge. Skills are stored in `~/.skills/`. + +### Workflow + +**Before starting work:** +1. Run `local_skills_list_skills` to see available skills +2. Use `local_skills_peek_skill({name: "skill-name"})` to preview relevant skills +3. Use `local_skills_skill_slice({name: "skill-name", query: "your question"})` to get relevant sections + +**While working:** +- If a skill was wrong or incomplete: `local_skills_update_skill` → `local_skills_record_assessment` → `local_skills_report_outcome({quality: 1})` +- If a skill worked correctly: `local_skills_report_outcome({quality: 4})` (normal) or `quality: 5` (perfect) + +**End of session:** +- Run `local_skills_reflect_on_session({context_summary: "what you did"})` to identify new skills to capture +- Use `local_skills_create_skill` to add new skills +- Use `local_skills_record_assessment` to score them + +### Useful Tools +- `local_skills_review_stale_skills()` - Skills due for review (negative days_until_due) +- `local_skills_skills_report()` - Overview of entire collection +- `local_skills_validate_skill({name: "skill-name"})` - Load skill for review with sources + +### Agent Skills + +This project also has Agent Skills (SKILL.md files) in `.opencode/skills/`. Use the `skill` tool to load them: +- `skill({name: "mainline-architecture"})` - Pipeline stages, capability resolution +- `skill({name: "mainline-effects"})` - How to add new effect plugins +- `skill({name: "mainline-display"})` - Display backend implementation +- `skill({name: "mainline-sources"})` - Adding new RSS feeds +- `skill({name: "mainline-presets"})` - Creating pipeline presets +- `skill({name: "mainline-sensors"})` - Sensor framework usage diff --git a/README.md b/README.md index 0dcbd3f..e445746 100644 --- a/README.md +++ b/README.md @@ -2,34 +2,7 @@ > *Digital consciousness stream. Matrix aesthetic · THX-1138 hue.* -A full-screen terminal news ticker that renders live global headlines in large OTF-font block characters with selectable color gradients (Verdant Green, Molten Orange, or Violet Purple). Headlines auto-translate into the native script of their subject region. Ambient mic input warps the glitch rate in real time. A `--poetry` mode replaces the feed with public-domain literary passages. Live messages can be pushed to the display over [ntfy.sh](https://ntfy.sh). **Figment mode** overlays flickery, theme-colored SVG glyphs on the running stream at timed intervals — controllable from any input source via an extensible trigger protocol. - ---- - -## Contents - -- [Using](#using) - - [Run](#run) - - [Config](#config) - - [Display Modes](#display-modes) - - [Feeds](#feeds) - - [Fonts](#fonts) - - [ntfy.sh](#ntfysh) - - [Figment Mode](#figment-mode) - - [Command & Control](#command--control-cc) -- [Internals](#internals) - - [How it works](#how-it-works) - - [Architecture](#architecture) -- [Development](#development) - - [Setup](#setup) - - [Tasks](#tasks) - - [Testing](#testing) - - [Linting](#linting) -- [Roadmap](#roadmap) - - [Performance](#performance) - - [Graphics](#graphics) - - [Cyberpunk Vibes](#cyberpunk-vibes) - - [Extensibility](#extensibility) +A full-screen terminal news ticker that renders live global headlines in large OTF-font block characters with a white-hot → deep green gradient. Headlines auto-translate into the native script of their subject region. Ambient mic input warps the glitch rate in real time. A `--poetry` mode replaces the feed with public-domain literary passages. Live messages can be pushed to the display over [ntfy.sh](https://ntfy.sh). --- @@ -42,11 +15,7 @@ python3 mainline.py # news stream python3 mainline.py --poetry # literary consciousness mode python3 mainline.py -p # same python3 mainline.py --firehose # dense rapid-fire headline mode -python3 mainline.py --figment # enable periodic SVG glyph overlays -python3 mainline.py --figment-interval 30 # figment every 30 seconds (default: 60) python3 mainline.py --display websocket # web browser display only -python3 mainline.py --display both # terminal + web browser -python3 mainline.py --refresh # force re-fetch (bypass cache) python3 mainline.py --no-font-picker # skip interactive font picker python3 mainline.py --font-file path.otf # use a specific font file python3 mainline.py --font-dir ~/fonts # scan a different font folder @@ -98,7 +67,6 @@ All constants live in `engine/config.py`: | `FRAME_DT` | `0.05` | Frame interval in seconds (20 FPS) | | `FIREHOSE_H` | `12` | Firehose zone height (terminal rows) | | `GRAD_SPEED` | `0.08` | Gradient sweep speed | -| `FIGMENT_INTERVAL` | `60` | Seconds between figment appearances (set by `--figment-interval`) | ### Display Modes @@ -106,8 +74,7 @@ Mainline supports multiple display backends: - **Terminal** (`--display terminal`): ANSI terminal output (default) - **WebSocket** (`--display websocket`): Stream to web browser clients -- **Sixel** (`--display sixel`): Sixel graphics in supported terminals (iTerm2, mintty) -- **Both** (`--display both`): Terminal + WebSocket simultaneously +- **ModernGL** (`--display moderngl`): GPU-accelerated rendering (optional) WebSocket mode serves a web client at http://localhost:8766 with ANSI color support and fullscreen mode. @@ -135,56 +102,20 @@ To push a message: curl -d "Body text" -H "Title: Alert title" https://ntfy.sh/your_topic ``` -Update `NTFY_TOPIC` in `engine/config.py` to point at your own topic. - -### Figment Mode - -Figment mode periodically overlays a full-screen SVG glyph on the running ticker — flickering through a reveal → hold (strobe) → dissolve cycle, colored with a randomly selected theme gradient. - -**Enable it** with the `--figment` flag: - -```bash -uv run mainline.py --figment # glyph every 60 seconds (default) -uv run mainline.py --figment --figment-interval 30 # every 30 seconds -``` - -**Figment assets** live in `figments/` — drop any `.svg` file there and it will be picked up automatically. The bundled set contains Mayan and Aztec glyphs. Figments are selected randomly, avoiding immediate repeats, and rasterized into half-block terminal art at display time. - -**Triggering manually** — any object with a `poll() -> FigmentCommand | None` method satisfies the `FigmentTrigger` protocol and can be passed to the plugin: - -```python -from engine.figment_trigger import FigmentAction, FigmentCommand - -class MyTrigger: - def poll(self): - if some_condition: - return FigmentCommand(action=FigmentAction.TRIGGER) - return None -``` - -Built-in commands: `TRIGGER`, `SET_INTENSITY`, `SET_INTERVAL`, `SET_COLOR`, `STOP`. - -**System dependency:** Figment mode requires the Cairo C library (`brew install cairo` on macOS) in addition to the `figment` extras group: - -```bash -uv sync --extra figment # adds cairosvg -``` - --- ## Internals ### How it works -- On launch, the font picker scans `fonts/` and presents a live-rendered TUI for face selection; `--no-font-picker` skips directly to stream -- Feeds are fetched and filtered on startup (sports and vapid content stripped); results are cached to `.mainline_cache_news.json` / `.mainline_cache_poetry.json` for fast restarts -- Headlines are rasterized via Pillow with 4× SSAA into half-block characters (`▀▄█ `) at the configured font size -- The ticker uses a sweeping white-hot → deep green gradient; ntfy messages use a complementary white-hot → magenta/maroon gradient to distinguish them visually -- Subject-region detection runs a regex pass on each headline; matches trigger a Google Translate call and font swap to the appropriate script (CJK, Arabic, Devanagari, etc.) using macOS system fonts -- The mic stream runs in a background thread, feeding RMS dB into the glitch probability calculation each frame -- The viewport scrolls through a virtual canvas of pre-rendered blocks; fade zones at top and bottom dissolve characters probabilistically -- An ntfy.sh SSE stream runs in a background thread for messages and C&C commands; incoming messages interrupt the scroll and render full-screen until dismissed or expired -- Figment mode rasterizes SVGs via cairosvg → PIL → greyscale → half-block encode, then overlays them with ANSI cursor-positioning commands between the effect chain and the ntfy message layer +- On launch, the font picker scans `fonts/` and presents a live-rendered TUI for face selection +- Feeds are fetched and filtered on startup; results are cached for fast restarts +- Headlines are rasterized via Pillow with 4× SSAA into half-block characters +- The ticker uses a sweeping white-hot → deep green gradient +- Subject-region detection triggers Google Translate and font swap for non-Latin scripts +- The mic stream runs in a background thread, feeding RMS dB into glitch probability +- The viewport scrolls through pre-rendered blocks with fade zones +- An ntfy.sh SSE stream runs in a background thread for messages and C&C commands ### Architecture @@ -205,40 +136,32 @@ engine/ controller.py handles /effects commands performance.py performance monitoring legacy.py legacy functional effects - fetch.py RSS/Gutenberg fetching + cache load/save + effects_plugins/ effect plugin implementations + noise.py noise effect + fade.py fade effect + glitch.py glitch effect + firehose.py firehose effect + fetch.py RSS/Gutenberg fetching + cache ntfy.py NtfyPoller — standalone, zero internal deps mic.py MicMonitor — standalone, graceful fallback scroll.py stream() frame loop + message rendering - viewport.py terminal dimension tracking (tw/th) + viewport.py terminal dimension tracking frame.py scroll step calculation, timing - layers.py ticker zone, firehose, message + figment overlay rendering - figment_render.py SVG → cairosvg → PIL → half-block rasterizer with cache - figment_trigger.py FigmentTrigger protocol, FigmentAction enum, FigmentCommand - eventbus.py thread-safe event publishing for decoupled communication + layers.py ticker zone, firehose, message overlay + eventbus.py thread-safe event publishing events.py event types and definitions - controller.py coordinates ntfy/mic monitoring and event publishing - emitters.py background emitters for ntfy and mic - types.py type definitions and dataclasses - themes.py THEME_REGISTRY — gradient color definitions + controller.py coordinates ntfy/mic monitoring + emitters.py background emitters + types.py type definitions display/ Display backend system __init__.py DisplayRegistry, get_monitor backends/ terminal.py ANSI terminal display websocket.py WebSocket server for browser clients - sixel.py Sixel graphics (pure Python) - null.py headless display for testing - multi.py forwards to multiple displays + null.py headless display for testing + multi.py forwards to multiple displays + moderngl.py GPU-accelerated OpenGL rendering benchmark.py performance benchmarking tool - -effects_plugins/ - __init__.py plugin discovery (ABC issubclass scan) - noise.py NoiseEffect — random character noise - glitch.py GlitchEffect — horizontal glitch bars - fade.py FadeEffect — edge fade zones - firehose.py FirehoseEffect — dense bottom ticker strip - figment.py FigmentEffect — periodic SVG glyph overlay (state machine) - -figments/ SVG assets for figment mode ``` --- @@ -250,15 +173,11 @@ figments/ SVG assets for figment mode Requires Python 3.10+ and [uv](https://docs.astral.sh/uv/). ```bash -uv sync # minimal (no mic, no figment) -uv sync --extra mic # with mic support (sounddevice + numpy) -uv sync --extra figment # with figment mode (cairosvg + system Cairo) -uv sync --all-extras # all optional features +uv sync # minimal (no mic) +uv sync --all-extras # with mic support uv sync --all-extras --group dev # full dev environment ``` -Figment mode also requires the Cairo C library: `brew install cairo` (macOS). - ### Tasks With [mise](https://mise.jdx.dev/): @@ -273,9 +192,7 @@ mise run format # ruff format mise run run # terminal display mise run run-websocket # web display only -mise run run-sixel # sixel graphics -mise run run-both # terminal + web -mise run run-client # both + open browser +mise run run-client # terminal + web mise run cmd # C&C command interface mise run cmd-stats # watch effects stats @@ -288,8 +205,6 @@ mise run topics-init # initialize ntfy topics ### Testing -Tests live in `tests/` and cover `config`, `filter`, `mic`, `ntfy`, `sources`, `terminal`, and the full figment pipeline (`figment_render`, `figment_trigger`, `figment`, `figment_overlay`). Figment tests are automatically skipped if Cairo is not installed. - ```bash uv run pytest uv run pytest --cov=engine --cov-report=term-missing @@ -333,19 +248,12 @@ Pre-commit hooks run lint automatically via `hk`. - Parallax secondary column ### Cyberpunk Vibes -- **Figment intensity wiring** — `config.intensity` currently stored but not yet applied to reveal/dissolve speed or strobe frequency -- **ntfy figment trigger** — built-in `NtfyFigmentTrigger` that listens on a dedicated topic to fire figments on demand -- **Keyword watch list** — highlight or strobe any headline matching tracked terms (names, topics, tickers) -- **Breaking interrupt** — full-screen flash + synthesized blip when a high-priority keyword hits -- **Live data overlay** — secondary ticker strip at screen edge: BTC price, ISS position, geomagnetic index -- **Theme switcher** — `--amber` (phosphor), `--ice` (electric cyan), `--red` (alert state) palette modes via CLI flag -- **Persona modes** — `--surveillance`, `--oracle`, `--underground` as feed presets with matching color themes and boot copy -- **Synthesized audio** — short static bursts tied to glitch events, independent of mic input - -### Extensibility -- **serve.py** — HTTP server that imports `engine.render` and `engine.fetch` directly to stream 1-bit bitmaps to an ESP32 display -- **Rust port** — `ntfy.py` and `render.py` are the natural first targets; clear module boundaries make incremental porting viable +- Keyword watch list with strobe effects +- Breaking interrupt with synthesized audio +- Live data overlay (BTC, ISS position) +- Theme switcher (amber, ice, red) +- Persona modes (surveillance, oracle, underground) --- -*Python 3.10+. Primary display font is user-selectable via bundled `fonts/` picker.* +*Python 3.10+. Primary display font is user-selectable via bundled `fonts/` picker.* \ No newline at end of file diff --git a/TODO.md b/TODO.md new file mode 100644 index 0000000..d9e0b01 --- /dev/null +++ b/TODO.md @@ -0,0 +1,27 @@ +# Tasks + +## Documentation Updates +- [x] Remove references to removed display backends (sixel, kitty) from all documentation +- [x] Remove references to deprecated "both" display mode +- [x] Update AGENTS.md to reflect current architecture and remove merge conflicts +- [x] Update Agent Skills (.opencode/skills/) to match current codebase +- [x] Update docs/ARCHITECTURE.md to remove SixelDisplay references +- [x] Verify ModernGL backend is properly documented and registered +- [ ] Update docs/PIPELINE.md to reflect Stage-based architecture (outdated legacy flowchart) [#41](https://git.notsosm.art/david/Mainline/issues/41) + +## Code & Features +- [ ] Check if luminance implementation exists for shade/tint effects (see [#26](https://git.notsosm.art/david/Mainline/issues/26) related: need to verify render/blocks.py has luminance calculation) +- [x] Add entropy/chaos score metadata to effects for auto-categorization and intensity control [#32](https://git.notsosm.art/david/Mainline/issues/32) (closed - completed) +- [ ] Finish ModernGL display backend: integrate window system, implement glyph caching, add event handling, and support border modes [#42](https://git.notsosm.art/david/Mainline/issues/42) +- [x] Integrate UIPanel with pipeline: register stages, link parameter schemas, handle events, implement hot-reload. +- [x] Move cached fixture headlines to engine/fixtures/headlines.json and update default source to use fixture. +- [x] Add interactive UI panel for pipeline configuration (right-side panel) with stage toggles and param sliders. +- [x] Enumerate all effect plugin parameters automatically for UI control (intensity, decay, etc.) +- [ ] Implement pipeline hot-rebuild when stage toggles or params change, preserving camera and display state [#43](https://git.notsosm.art/david/Mainline/issues/43) + +## Gitea Issues Tracking +- [#37](https://git.notsosm.art/david/Mainline/issues/37): Refactor app.py and adapter.py for better maintainability +- [#35](https://git.notsosm.art/david/Mainline/issues/35): Epic: Pipeline Mutation API for Stage Hot-Swapping +- [#34](https://git.notsosm.art/david/Mainline/issues/34): Improve benchmarking system and performance tests +- [#33](https://git.notsosm.art/david/Mainline/issues/33): Add web-based pipeline editor UI +- [#26](https://git.notsosm.art/david/Mainline/issues/26): Add Streaming display backend diff --git a/client/editor.html b/client/editor.html new file mode 100644 index 0000000..1dc1356 --- /dev/null +++ b/client/editor.html @@ -0,0 +1,313 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="UTF-8"> + <meta name="viewport" content="width=device-width, initial-scale=1.0"> + <title>Mainline Pipeline Editor + + + + +
+

Pipeline

+
+
+ + +
+
+
Disconnected
+ + + + diff --git a/client/index.html b/client/index.html new file mode 100644 index 0000000..0cb12c1 --- /dev/null +++ b/client/index.html @@ -0,0 +1,369 @@ + + + + + + Mainline Terminal + + + +
+ +
+
+ + + + +
+
Connecting...
+ + + + diff --git a/cmdline.py b/cmdline.py index 9ee9ba6..15048a1 100644 --- a/cmdline.py +++ b/cmdline.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +# -*- coding: utf-8 -*- """ Command-line utility for interacting with mainline via ntfy. @@ -20,6 +21,11 @@ C&C works like a serial port: 3. Cmdline polls for response """ +import os + +os.environ["FORCE_COLOR"] = "1" +os.environ["TERM"] = "xterm-256color" + import argparse import json import sys diff --git a/docs/ARCHITECTURE.md b/docs/ARCHITECTURE.md new file mode 100644 index 0000000..ebe71f6 --- /dev/null +++ b/docs/ARCHITECTURE.md @@ -0,0 +1,153 @@ +# Mainline Architecture Diagrams + +> These diagrams use Mermaid. Render with: `npx @mermaid-js/mermaid-cli -i ARCHITECTURE.md` or view in GitHub/GitLab/Notion. + +## Class Hierarchy (Mermaid) + +```mermaid +classDiagram + class Stage { + <> + +str name + +set[str] capabilities + +set[str] dependencies + +process(data, ctx) Any + } + + Stage <|-- DataSourceStage + Stage <|-- CameraStage + Stage <|-- FontStage + Stage <|-- ViewportFilterStage + Stage <|-- EffectPluginStage + Stage <|-- DisplayStage + Stage <|-- SourceItemsToBufferStage + Stage <|-- PassthroughStage + Stage <|-- ImageToTextStage + Stage <|-- CanvasStage + + class EffectPlugin { + <> + +str name + +EffectConfig config + +process(buf, ctx) list[str] + +configure(config) None + } + + EffectPlugin <|-- NoiseEffect + EffectPlugin <|-- FadeEffect + EffectPlugin <|-- GlitchEffect + EffectPlugin <|-- FirehoseEffect + EffectPlugin <|-- CropEffect + EffectPlugin <|-- TintEffect + + class Display { + <> + +int width + +int height + +init(width, height, reuse) + +show(buffer, border) + +clear() None + +cleanup() None + } + + Display <|.. TerminalDisplay + Display <|.. NullDisplay + Display <|.. PygameDisplay + Display <|.. WebSocketDisplay + + class Camera { + +int viewport_width + +int viewport_height + +CameraMode mode + +apply(buffer, width, height) list[str] + } + + class Pipeline { + +dict[str, Stage] stages + +PipelineContext context + +execute(data) StageResult + } + + Pipeline --> Stage + Stage --> Display +``` + +## Data Flow (Mermaid) + +```mermaid +flowchart LR + DataSource[Data Source] --> DataSourceStage + DataSourceStage --> FontStage + FontStage --> CameraStage + CameraStage --> EffectStages + EffectStages --> DisplayStage + DisplayStage --> TerminalDisplay + DisplayStage --> BrowserWebSocket + DisplayStage --> SixelDisplay + DisplayStage --> NullDisplay +``` + +## Effect Chain (Mermaid) + +```mermaid +flowchart LR + InputBuffer --> NoiseEffect + NoiseEffect --> FadeEffect + FadeEffect --> GlitchEffect + GlitchEffect --> FirehoseEffect + FirehoseEffect --> Output +``` + +> **Note:** Each effect must preserve buffer dimensions (line count and visible width). + +## Stage Capabilities + +```mermaid +flowchart TB + subgraph "Capability Resolution" + D[DataSource
provides: source.*] + C[Camera
provides: render.output] + E[Effects
provides: render.effect] + DIS[Display
provides: display.output] + end +``` + +--- + +## Legacy ASCII Diagrams + +### Stage Inheritance +``` +Stage(ABC) +├── DataSourceStage +├── CameraStage +├── FontStage +├── ViewportFilterStage +├── EffectPluginStage +├── DisplayStage +├── SourceItemsToBufferStage +├── PassthroughStage +├── ImageToTextStage +└── CanvasStage +``` + +### Display Backends +``` +Display(Protocol) +├── TerminalDisplay +├── NullDisplay +├── PygameDisplay +├── WebSocketDisplay +└── MultiDisplay +``` + +### Camera Modes +``` +Camera +├── FEED # Static view +├── SCROLL # Horizontal scroll +├── VERTICAL # Vertical scroll +├── HORIZONTAL # Same as scroll +├── OMNI # Omnidirectional +├── FLOATING # Floating particles +└── BOUNCE # Bouncing camera diff --git a/Mainline Renderer + ntfy Message Queue for ESP32.md b/docs/Mainline Renderer + ntfy Message Queue for ESP32.md similarity index 100% rename from Mainline Renderer + ntfy Message Queue for ESP32.md rename to docs/Mainline Renderer + ntfy Message Queue for ESP32.md diff --git a/docs/PIPELINE.md b/docs/PIPELINE.md new file mode 100644 index 0000000..b759289 --- /dev/null +++ b/docs/PIPELINE.md @@ -0,0 +1,223 @@ +# Mainline Pipeline + +## Architecture Overview + +The Mainline pipeline uses a **Stage-based architecture** with **capability-based dependency resolution**. Stages declare capabilities (what they provide) and dependencies (what they need), and the Pipeline resolves dependencies using prefix matching. + +``` +Source Stage → Render Stage → Effect Stages → Display Stage + ↓ +Camera Stage (provides camera.state capability) +``` + +### Capability-Based Dependency Resolution + +Stages declare capabilities and dependencies: +- **Capabilities**: What the stage provides (e.g., `source`, `render.output`, `display.output`, `camera.state`) +- **Dependencies**: What the stage needs (e.g., `source`, `render.output`, `camera.state`) + +The Pipeline resolves dependencies using **prefix matching**: +- `"source"` matches `"source.headlines"`, `"source.poetry"`, etc. +- `"camera.state"` matches the camera state capability provided by `CameraClockStage` +- This allows flexible composition without hardcoding specific stage names + +### Minimum Capabilities + +The pipeline requires these minimum capabilities to function: +- `"source"` - Data source capability (provides raw items) +- `"render.output"` - Rendered content capability +- `"display.output"` - Display output capability +- `"camera.state"` - Camera state for viewport filtering + +These are automatically injected if missing by the `ensure_minimum_capabilities()` method. + +### Stage Registry + +The `StageRegistry` discovers and registers stages automatically: +- Scans `engine/stages/` for stage implementations +- Registers stages by their declared capabilities +- Enables runtime stage discovery and composition + +## Stage-Based Pipeline Flow + +```mermaid +flowchart TD + subgraph Stages["Stage Pipeline"] + subgraph SourceStage["Source Stage (provides: source.*)"] + Headlines[HeadlinesSource] + Poetry[PoetrySource] + Pipeline[PipelineSource] + end + + subgraph RenderStage["Render Stage (provides: render.*)"] + Render[RenderStage] + Canvas[Canvas] + Camera[Camera] + end + + subgraph EffectStages["Effect Stages (provides: effect.*)"] + Noise[NoiseEffect] + Fade[FadeEffect] + Glitch[GlitchEffect] + Firehose[FirehoseEffect] + Hud[HudEffect] + end + + subgraph DisplayStage["Display Stage (provides: display.*)"] + Terminal[TerminalDisplay] + Pygame[PygameDisplay] + WebSocket[WebSocketDisplay] + Null[NullDisplay] + end + end + + subgraph Capabilities["Capability Map"] + SourceCaps["source.headlines
source.poetry
source.pipeline"] + RenderCaps["render.output
render.canvas"] + EffectCaps["effect.noise
effect.fade
effect.glitch"] + DisplayCaps["display.output
display.terminal"] + end + + SourceStage --> RenderStage + RenderStage --> EffectStages + EffectStages --> DisplayStage + + SourceStage --> SourceCaps + RenderStage --> RenderCaps + EffectStages --> EffectCaps + DisplayStage --> DisplayCaps + + style SourceStage fill:#f9f,stroke:#333 + style RenderStage fill:#bbf,stroke:#333 + style EffectStages fill:#fbf,stroke:#333 + style DisplayStage fill:#bfb,stroke:#333 +``` + +## Stage Adapters + +Existing components are wrapped as Stages via adapters: + +### Source Stage Adapter +- Wraps `HeadlinesDataSource`, `PoetryDataSource`, etc. +- Provides `source.*` capabilities +- Fetches data and outputs to pipeline buffer + +### Render Stage Adapter +- Wraps `StreamController`, `Camera`, `render_ticker_zone` +- Provides `render.output` capability +- Processes content and renders to canvas + +### Effect Stage Adapter +- Wraps `EffectChain` and individual effect plugins +- Provides `effect.*` capabilities +- Applies visual effects to rendered content + +### Display Stage Adapter +- Wraps `TerminalDisplay`, `PygameDisplay`, etc. +- Provides `display.*` capabilities +- Outputs final buffer to display backend + +## Pipeline Mutation API + +The Pipeline supports dynamic mutation during runtime: + +### Core Methods +- `add_stage(name, stage, initialize=True)` - Add a stage +- `remove_stage(name, cleanup=True)` - Remove a stage and rebuild execution order +- `replace_stage(name, new_stage, preserve_state=True)` - Replace a stage +- `swap_stages(name1, name2)` - Swap two stages +- `move_stage(name, after=None, before=None)` - Move a stage in execution order +- `enable_stage(name)` / `disable_stage(name)` - Enable/disable stages + +### Safety Checks +- `can_hot_swap(name)` - Check if a stage can be safely hot-swapped +- `cleanup_stage(name)` - Clean up specific stage without removing it + +### WebSocket Commands +The mutation API is accessible via WebSocket for remote control: +```json +{"action": "remove_stage", "stage": "stage_name"} +{"action": "swap_stages", "stage1": "name1", "stage2": "name2"} +{"action": "enable_stage", "stage": "stage_name"} +{"action": "cleanup_stage", "stage": "stage_name"} +``` + +## Camera Modes + +The Camera supports the following modes: + +- **FEED**: Single item view (static or rapid cycling) +- **SCROLL**: Smooth vertical scrolling (movie credits style) +- **HORIZONTAL**: Left/right movement +- **OMNI**: Combination of vertical and horizontal +- **FLOATING**: Sinusoidal/bobbing motion +- **BOUNCE**: DVD-style bouncing off edges +- **RADIAL**: Polar coordinate scanning (radar sweep) + +Note: Camera state is provided by `CameraClockStage` (capability: `camera.state`) which updates independently of data flow. The `CameraStage` applies viewport transformations (capability: `camera`). + +## Animation & Presets + +```mermaid +flowchart LR + subgraph Preset["Preset"] + PP[PipelineParams] + AC[AnimationController] + end + + subgraph AnimationController["AnimationController"] + Clock[Clock] + Events[Events] + Triggers[Triggers] + end + + subgraph Triggers["Trigger Types"] + TIME[TIME] + FRAME[FRAME] + CYCLE[CYCLE] + COND[CONDITION] + MANUAL[MANUAL] + end + + PP --> AC + Clock --> AC + Events --> AC + Triggers --> Events +``` + +## Camera Modes State Diagram + +```mermaid +stateDiagram-v2 + [*] --> Vertical + Vertical --> Horizontal: mode change + Horizontal --> Omni: mode change + Omni --> Floating: mode change + Floating --> Trace: mode change + Trace --> Vertical: mode change + + state Vertical { + [*] --> ScrollUp + ScrollUp --> ScrollUp: +y each frame + } + + state Horizontal { + [*] --> ScrollLeft + ScrollLeft --> ScrollLeft: +x each frame + } + + state Omni { + [*] --> Diagonal + Diagonal --> Diagonal: +x, +y each frame + } + + state Floating { + [*] --> Bobbing + Bobbing --> Bobbing: sin(time) for x,y + } + + state Trace { + [*] --> FollowPath + FollowPath --> FollowPath: node by node + } +``` diff --git a/Refactor mainline.md b/docs/Refactor mainline.md similarity index 98% rename from Refactor mainline.md rename to docs/Refactor mainline.md index 467c590..76bc82e 100644 --- a/Refactor mainline.md +++ b/docs/Refactor mainline.md @@ -1,11 +1,18 @@ -# Refactor mainline\.py into modular package +# + +Refactor mainline\.py into modular package + ## Problem + `mainline.py` is a single 1085\-line file with ~10 interleaved concerns\. This prevents: + * Reusing the ntfy doorbell interrupt in other visualizers * Importing the render pipeline from `serve.py` \(future ESP32 HTTP server\) * Testing any concern in isolation * Porting individual layers to Rust independently + ## Target structure + ```warp-runnable-command mainline.py # thin entrypoint: venv bootstrap → engine.app.main() engine/ @@ -23,8 +30,11 @@ engine/ scroll.py # stream() frame loop + message rendering app.py # main(), TITLE art, boot sequence, signal handler ``` + The package is named `engine/` to avoid a naming conflict with the `mainline.py` entrypoint\. + ## Module dependency graph + ```warp-runnable-command config ← (nothing) sources ← (nothing) @@ -39,64 +49,92 @@ mic ← (nothing — sounddevice only) scroll ← config, terminal, render, effects, ntfy, mic app ← everything above ``` + Critical property: **ntfy\.py and mic\.py have zero internal dependencies**, making ntfy reusable by any visualizer\. + ## Module details + ### mainline\.py \(entrypoint — slimmed down\) + Keeps only the venv bootstrap \(lines 10\-38\) which must run before any third\-party imports\. After bootstrap, delegates to `engine.app.main()`\. + ### engine/config\.py + From current mainline\.py: + * `HEADLINE_LIMIT`, `FEED_TIMEOUT`, `MIC_THRESHOLD_DB` \(lines 55\-57\) * `MODE`, `FIREHOSE` CLI flag parsing \(lines 58\-59\) * `NTFY_TOPIC`, `NTFY_POLL_INTERVAL`, `MESSAGE_DISPLAY_SECS` \(lines 62\-64\) * `_FONT_PATH`, `_FONT_SZ`, `_RENDER_H` \(lines 147\-150\) * `_SCROLL_DUR`, `_FRAME_DT`, `FIREHOSE_H` \(lines 505\-507\) * `GLITCH`, `KATA` glyph tables \(lines 143\-144\) + ### engine/sources\.py + Pure data, no logic: + * `FEEDS` dict \(lines 102\-140\) * `POETRY_SOURCES` dict \(lines 67\-80\) * `SOURCE_LANGS` dict \(lines 258\-266\) * `_LOCATION_LANGS` dict \(lines 269\-289\) * `_SCRIPT_FONTS` dict \(lines 153\-165\) * `_NO_UPPER` set \(line 167\) + ### engine/terminal\.py + ANSI primitives and terminal I/O: + * All ANSI constants: `RST`, `BOLD`, `DIM`, `G_HI`, `G_MID`, `G_LO`, `G_DIM`, `W_COOL`, `W_DIM`, `W_GHOST`, `C_DIM`, `CLR`, `CURSOR_OFF`, `CURSOR_ON` \(lines 83\-99\) * `tw()`, `th()` \(lines 223\-234\) * `type_out()`, `slow_print()`, `boot_ln()` \(lines 355\-386\) + ### engine/filter\.py + * `_Strip` HTML parser class \(lines 205\-214\) * `strip_tags()` \(lines 217\-220\) * `_SKIP_RE` compiled regex \(lines 322\-346\) * `_skip()` predicate \(lines 349\-351\) + ### engine/translate\.py + * `_TRANSLATE_CACHE` \(line 291\) * `_detect_location_language()` \(lines 294\-300\) — imports `_LOCATION_LANGS` from sources * `_translate_headline()` \(lines 303\-319\) + ### engine/render\.py + The OTF→terminal pipeline\. This is exactly what `serve.py` will import to produce 1\-bit bitmaps for the ESP32\. + * `_GRAD_COLS` gradient table \(lines 169\-182\) * `_font()`, `_font_for_lang()` with lazy\-load \+ cache \(lines 185\-202\) * `_render_line()` — OTF text → half\-block terminal rows \(lines 567\-605\) * `_big_wrap()` — word\-wrap \+ render \(lines 608\-636\) * `_lr_gradient()` — apply left→right color gradient \(lines 639\-656\) * `_make_block()` — composite: translate → render → colorize a headline \(lines 718\-756\)\. Imports from translate, sources\. + ### engine/effects\.py + Visual effects applied during the frame loop: + * `noise()` \(lines 237\-245\) * `glitch_bar()` \(lines 248\-252\) * `_fade_line()` — probabilistic character dissolve \(lines 659\-680\) * `_vis_trunc()` — ANSI\-aware width truncation \(lines 683\-701\) * `_firehose_line()` \(lines 759\-801\) — imports config\.MODE, sources\.FEEDS/POETRY\_SOURCES * `_next_headline()` — pool management \(lines 704\-715\) + ### engine/fetch\.py + * `fetch_feed()` \(lines 390\-396\) * `fetch_all()` \(lines 399\-426\) — imports filter\.\_skip, filter\.strip\_tags, terminal\.boot\_ln * `_fetch_gutenberg()` \(lines 429\-456\) * `fetch_poetry()` \(lines 459\-472\) * `_cache_path()`, `_load_cache()`, `_save_cache()` \(lines 476\-501\) + ### engine/ntfy\.py — standalone, reusable + Refactored from the current globals \+ thread \(lines 531\-564\) and the message rendering section of `stream()` \(lines 845\-909\) into a class: + ```python class NtfyPoller: def __init__(self, topic_url, poll_interval=15, display_secs=30): @@ -108,8 +146,10 @@ class NtfyPoller: def dismiss(self): """Manually dismiss current message.""" ``` + Dependencies: `urllib.request`, `json`, `threading`, `time` — all stdlib\. No internal imports\. Other visualizers use it like: + ```python from engine.ntfy import NtfyPoller poller = NtfyPoller("https://ntfy.sh/my_topic/json?since=20s&poll=1") @@ -120,8 +160,11 @@ if msg: title, body, ts = msg render_my_message(title, body) # visualizer-specific ``` + ### engine/mic\.py — standalone + Refactored from the current globals \(lines 508\-528\) into a class: + ```python class MicMonitor: def __init__(self, threshold_db=50): @@ -137,41 +180,75 @@ class MicMonitor: def excess(self) -> float: """dB above threshold (clamped to 0).""" ``` + Dependencies: `sounddevice`, `numpy` \(both optional — graceful fallback\)\. + ### engine/scroll\.py + The `stream()` function \(lines 804\-990\)\. Receives its dependencies via arguments or imports: + * `stream(items, ntfy_poller, mic_monitor, config)` or similar * Message rendering \(lines 855\-909\) stays here since it's terminal\-display\-specific — a different visualizer would render messages differently + ### engine/app\.py + The orchestrator: + * `TITLE` ASCII art \(lines 994\-1001\) * `main()` \(lines 1004\-1084\): CLI handling, signal setup, boot animation, fetch, wire up ntfy/mic/scroll + ## Execution order + ### Step 1: Create engine/ package skeleton + Create `engine/__init__.py` and all empty module files\. + ### Step 2: Extract pure data modules \(zero\-dep\) + Move constants and data dicts into `config.py`, `sources.py`\. These have no logic dependencies\. + ### Step 3: Extract terminal\.py + Move ANSI codes and terminal I/O helpers\. No internal deps\. + ### Step 4: Extract filter\.py and translate\.py + Both are small, self\-contained\. translate imports from sources\. + ### Step 5: Extract render\.py + Font loading \+ the OTF→half\-block pipeline\. Imports from config, terminal, sources\. This is the module `serve.py` will later import\. + ### Step 6: Extract effects\.py + Visual effects\. Imports from config, terminal, sources\. + ### Step 7: Extract fetch\.py + Feed/Gutenberg fetching \+ caching\. Imports from config, sources, filter, terminal\. + ### Step 8: Extract ntfy\.py and mic\.py + Refactor globals\+threads into classes\. Zero internal deps\. + ### Step 9: Extract scroll\.py + The frame loop\. Last to extract because it depends on everything above\. + ### Step 10: Extract app\.py + The `main()` function, boot sequence, signal handler\. Wire up all modules\. + ### Step 11: Slim down mainline\.py + Keep only venv bootstrap \+ `from engine.app import main; main()`\. + ### Step 12: Verify + Run `python3 mainline.py`, `python3 mainline.py --poetry`, and `python3 mainline.py --firehose` to confirm identical behavior\. No behavioral changes in this refactor\. + ## What this enables + * **serve\.py** \(future\): `from engine.render import _render_line, _big_wrap` \+ `from engine.fetch import fetch_all` — imports the pipeline directly * **Other visualizers**: `from engine.ntfy import NtfyPoller` — doorbell feature with no coupling to mainline's scroll engine * **Rust port**: Clear boundaries for what to port first \(ntfy client, render pipeline\) vs what stays in Python \(fetching, caching — the server side\) diff --git a/klubhaus-doorbell-hardware.md b/docs/klubhaus-doorbell-hardware.md similarity index 100% rename from klubhaus-doorbell-hardware.md rename to docs/klubhaus-doorbell-hardware.md diff --git a/docs/proposals/adr-preset-scripting-language.md b/docs/proposals/adr-preset-scripting-language.md new file mode 100644 index 0000000..fe72118 --- /dev/null +++ b/docs/proposals/adr-preset-scripting-language.md @@ -0,0 +1,217 @@ +# ADR: Preset Scripting Language for Mainline + +## Status: Draft + +## Context + +We need to evaluate whether to add a scripting language for authoring presets in Mainline, replacing or augmenting the current TOML-based preset system. The goals are: + +1. **Expressiveness**: More powerful than TOML for describing dynamic, procedural, or dataflow-based presets +2. **Live coding**: Support hot-reloading of presets during runtime (like TidalCycles or Sonic Pi) +3. **Testing**: Include assertion language to package tests alongside presets +4. **Toolchain**: Consider packaging and build processes + +### Current State + +The current preset system uses TOML files (`presets.toml`) with a simple structure: + +```toml +[presets.demo-base] +description = "Demo: Base preset for effect hot-swapping" +source = "headlines" +display = "terminal" +camera = "feed" +effects = [] # Demo script will add/remove effects dynamically +camera_speed = 0.1 +viewport_width = 80 +viewport_height = 24 +``` + +This is declarative and static. It cannot express: +- Conditional logic based on runtime state +- Dataflow between pipeline stages +- Procedural generation of stage configurations +- Assertions or validation of preset behavior + +### Problems with TOML + +- No way to express dependencies between effects or stages +- Cannot describe temporal/animated behavior +- No support for sensor bindings or parametric animations +- Static configuration cannot adapt to runtime conditions +- No built-in testing/assertion mechanism + +## Approaches + +### 1. Visual Dataflow Language (PureData-style) + +Inspired by Pure Data (Pd), Max/MSP, and TouchDesigner: + +**Pros:** +- Intuitive for creative coding and live performance +- Strong model for real-time parameter modulation +- Matches the "patcher" paradigm already seen in pipeline architecture +- Rich ecosystem of visual programming tools + +**Cons:** +- Complex to implement from scratch +- Requires dedicated GUI editor +- Harder to version control (binary/graph formats) +- Mermaid diagrams alone aren't sufficient for this + +**Tools to explore:** +- libpd (Pure Data bindings for other languages) +- Node-based frameworks (node-red, various DSP tools) +- TouchDesigner-like approaches + +### 2. Textual DSL (TidalCycles-style) + +Domain-specific language focused on pattern transformation: + +**Pros:** +- Lightweight, fast iteration +- Easy to version control (text files) +- Can express complex patterns with minimal syntax +- Proven in livecoding community + +**Cons:** +- Learning curve for non-programmers +- Less visual than PureData approach + +**Example (hypothetical):** +``` +preset my-show { + source: headlines + + every 8s { + effect noise: intensity = (0.5 <-> 1.0) + } + + on mic.level > 0.7 { + effect glitch: intensity += 0.2 + } +} +``` + +### 3. Embed Existing Language + +Embed Lua, Python, or JavaScript: + +**Pros:** +- Full power of general-purpose language +- Existing tooling, testing frameworks +- Easy to integrate (many embeddable interpreters) + +**Cons:** +- Security concerns with running user code +- May be overkill for simple presets +- Testing/assertion system must be built on top + +**Tools:** +- Lua (lightweight, fast) +- Python (rich ecosystem, but heavier) +- QuickJS (small, embeddable JS) + +### 4. Hybrid Approach + +Visual editor generates textual DSL that compiles to Python: + +**Pros:** +- Best of both worlds +- Can start with simple DSL and add editor later + +**Cons:** +- More complex initial implementation + +## Requirements Analysis + +### Must Have +- [ ] Express pipeline stage configurations (source, effects, camera, display) +- [ ] Support parameter bindings to sensors +- [ ] Hot-reloading during runtime +- [ ] Integration with existing Pipeline architecture + +### Should Have +- [ ] Basic assertion language for testing +- [ ] Ability to define custom abstractions/modules +- [ ] Version control friendly (text-based) + +### Could Have +- [ ] Visual node-based editor +- [ ] Real-time visualization of dataflow +- [ ] MIDI/OSC support for external controllers + +## User Stories (Proposed) + +### Spike Stories (Investigation) + +**Story 1: Evaluate DSL Parsing Tools** +> As a developer, I want to understand the available Python DSL parsing libraries (Lark, parsy, pyparsing) so that I can choose the right tool for implementing a preset DSL. +> +> **Acceptance**: Document pros/cons of 3+ parsing libraries with small proof-of-concept experiments + +**Story 2: Research Livecoding Languages** +> As a developer, I want to understand how TidalCycles, Sonic Pi, and PureData handle hot-reloading and pattern generation so that I can apply similar techniques to Mainline. +> +> **Acceptance**: Document key architectural patterns from 2+ livecoding systems + +**Story 3: Prototype Textual DSL** +> As a preset author, I want to write presets in a simple textual DSL that supports basic conditionals and sensor bindings. +> +> **Acceptance**: Create a prototype DSL that can parse a sample preset and convert to PipelineConfig + +**Story 4: Investigate Assertion/Testing Approaches** +> As a quality engineer, I want to include assertions with presets so that preset behavior can be validated automatically. +> +> **Acceptance**: Survey testing patterns in livecoding and propose assertion syntax + +### Implementation Stories (Future) + +**Story 5: Implement Core DSL Parser** +> As a preset author, I want to write presets in a textual DSL that supports sensors, conditionals, and parameter bindings. +> +> **Acceptance**: DSL parser handles the core syntax, produces valid PipelineConfig + +**Story 6: Hot-Reload System** +> As a performer, I want to edit preset files and see changes reflected in real-time without restarting. +> +> **Acceptance**: File watcher + pipeline mutation API integration works + +**Story 7: Assertion Language** +> As a preset author, I want to include assertions that validate sensor values or pipeline state. +> +> **Acceptance**: Assertions can run as part of preset execution and report pass/fail + +**Story 8: Toolchain/Packaging** +> As a preset distributor, I want to package presets with dependencies for easy sharing. +> +> **Acceptance**: Can create, build, and install a preset package + +## Decision + +**Recommend: Start with textual DSL approach (Option 2/4)** + +Rationale: +- Lowest barrier to entry (text files, version control) +- Can evolve to hybrid later if visual editor is needed +- Strong precedents in livecoding community (TidalCycles, Sonic Pi) +- Enables hot-reloading naturally +- Assertion language can be part of the DSL syntax + +**Not recommending Mermaid**: Mermaid is excellent for documentation and visualization, but it's a diagramming tool, not a programming language. It cannot express the logic, conditionals, and sensor bindings we need. + +## Next Steps + +1. Execute Spike Stories 1-4 to reduce uncertainty +2. Create minimal viable DSL syntax +3. Prototype hot-reloading with existing preset system +4. Evaluate whether visual editor adds sufficient value to warrant complexity + +## References + +- Pure Data: https://puredata.info/ +- TidalCycles: https://tidalcycles.org/ +- Sonic Pi: https://sonic-pi.net/ +- Lark parser: https://lark-parser.readthedocs.io/ +- Mainline Pipeline Architecture: `engine/pipeline/` +- Current Presets: `presets.toml` diff --git a/docs/superpowers/plans/2026-03-16-color-scheme-implementation.md b/docs/superpowers/plans/2026-03-16-color-scheme-implementation.md deleted file mode 100644 index c08017f..0000000 --- a/docs/superpowers/plans/2026-03-16-color-scheme-implementation.md +++ /dev/null @@ -1,894 +0,0 @@ -# Color Scheme Switcher Implementation Plan - -> **For agentic workers:** REQUIRED: Use superpowers:subagent-driven-development (if subagents available) or superpowers:executing-plans to implement this plan. Steps use checkbox (`- [ ]`) syntax for tracking. - -**Goal:** Implement interactive color theme picker at startup that lets users choose between green, orange, or purple gradients with complementary message queue colors. - -**Architecture:** New `themes.py` data module defines Theme class and THEME_REGISTRY. Config adds `ACTIVE_THEME` global set by picker. Render functions read from active theme instead of hardcoded constants. App adds picker UI that mirrors font picker pattern. - -**Tech Stack:** Python 3.10+, ANSI 256-color codes, existing terminal I/O utilities - ---- - -## File Structure - -| File | Purpose | Change Type | -|------|---------|------------| -| `engine/themes.py` | Theme class, THEME_REGISTRY, color codes | Create | -| `engine/config.py` | ACTIVE_THEME global, set_active_theme() | Modify | -| `engine/render.py` | Replace GRAD_COLS/MSG_GRAD_COLS with config lookup | Modify | -| `engine/scroll.py` | Update message gradient call | Modify | -| `engine/app.py` | pick_color_theme(), call in main() | Modify | -| `tests/test_themes.py` | Theme class and registry unit tests | Create | - ---- - -## Chunk 1: Theme Data Module - -### Task 1: Create themes.py with Theme class and registry - -**Files:** -- Create: `engine/themes.py` -- Test: `tests/test_themes.py` - -- [ ] **Step 1: Write failing test for Theme class** - -Create `tests/test_themes.py`: - -```python -"""Test color themes and registry.""" -from engine.themes import Theme, THEME_REGISTRY, get_theme - - -def test_theme_construction(): - """Theme stores name and gradient lists.""" - main = ["\033[1;38;5;231m"] * 12 - msg = ["\033[1;38;5;225m"] * 12 - theme = Theme(name="Test Green", main_gradient=main, message_gradient=msg) - - assert theme.name == "Test Green" - assert theme.main_gradient == main - assert theme.message_gradient == msg - - -def test_gradient_length(): - """Each gradient must have exactly 12 ANSI codes.""" - for theme_id, theme in THEME_REGISTRY.items(): - assert len(theme.main_gradient) == 12, f"{theme_id} main gradient wrong length" - assert len(theme.message_gradient) == 12, f"{theme_id} message gradient wrong length" - - -def test_theme_registry_has_three_themes(): - """Registry contains green, orange, purple.""" - assert len(THEME_REGISTRY) == 3 - assert "green" in THEME_REGISTRY - assert "orange" in THEME_REGISTRY - assert "purple" in THEME_REGISTRY - - -def test_get_theme_valid(): - """get_theme returns Theme object for valid ID.""" - theme = get_theme("green") - assert isinstance(theme, Theme) - assert theme.name == "Verdant Green" - - -def test_get_theme_invalid(): - """get_theme raises KeyError for invalid ID.""" - with pytest.raises(KeyError): - get_theme("invalid_theme") - - -def test_green_theme_unchanged(): - """Green theme uses original green → magenta colors.""" - green_theme = get_theme("green") - # First color should be white (bold) - assert green_theme.main_gradient[0] == "\033[1;38;5;231m" - # Last deep green - assert green_theme.main_gradient[9] == "\033[38;5;22m" - # Message gradient is magenta - assert green_theme.message_gradient[9] == "\033[38;5;89m" -``` - -Run: `pytest tests/test_themes.py -v` -Expected: FAIL (module doesn't exist) - -- [ ] **Step 2: Create themes.py with Theme class and finalized gradients** - -Create `engine/themes.py`: - -```python -"""Color theme definitions and registry.""" -from typing import Optional - - -class Theme: - """Encapsulates a color scheme: name, main gradient, message gradient.""" - - def __init__(self, name: str, main_gradient: list[str], message_gradient: list[str]): - """Initialize theme with display name and gradient lists. - - Args: - name: Display name (e.g., "Verdant Green") - main_gradient: List of 12 ANSI 256-color codes (white → primary color) - message_gradient: List of 12 ANSI codes (white → complementary color) - """ - self.name = name - self.main_gradient = main_gradient - self.message_gradient = message_gradient - - -# ─── FINALIZED GRADIENTS ────────────────────────────────────────────────── -# Each gradient: white → primary/complementary, 12 steps total -# Format: "\033[;m" where color is 38;5; - -_GREEN_MAIN = [ - "\033[1;38;5;231m", # white (bold) - "\033[1;38;5;195m", # pale white-tint - "\033[38;5;123m", # bright cyan - "\033[38;5;118m", # bright lime - "\033[38;5;82m", # lime - "\033[38;5;46m", # bright green - "\033[38;5;40m", # green - "\033[38;5;34m", # medium green - "\033[38;5;28m", # dark green - "\033[38;5;22m", # deep green - "\033[2;38;5;22m", # dim deep green - "\033[2;38;5;235m", # near black -] - -_GREEN_MESSAGE = [ - "\033[1;38;5;231m", # white (bold) - "\033[1;38;5;225m", # pale pink-white - "\033[38;5;219m", # bright pink - "\033[38;5;213m", # hot pink - "\033[38;5;207m", # magenta - "\033[38;5;201m", # bright magenta - "\033[38;5;165m", # orchid-red - "\033[38;5;161m", # ruby-magenta - "\033[38;5;125m", # dark magenta - "\033[38;5;89m", # deep maroon-magenta - "\033[2;38;5;89m", # dim deep maroon-magenta - "\033[2;38;5;235m", # near black -] - -_ORANGE_MAIN = [ - "\033[1;38;5;231m", # white (bold) - "\033[1;38;5;215m", # pale orange-white - "\033[38;5;209m", # bright orange - "\033[38;5;208m", # vibrant orange - "\033[38;5;202m", # orange - "\033[38;5;166m", # dark orange - "\033[38;5;130m", # burnt orange - "\033[38;5;94m", # rust - "\033[38;5;58m", # dark rust - "\033[38;5;94m", # rust (hold) - "\033[2;38;5;94m", # dim rust - "\033[2;38;5;235m", # near black -] - -_ORANGE_MESSAGE = [ - "\033[1;38;5;231m", # white (bold) - "\033[1;38;5;195m", # pale cyan-white - "\033[38;5;33m", # bright blue - "\033[38;5;27m", # blue - "\033[38;5;21m", # deep blue - "\033[38;5;21m", # deep blue (hold) - "\033[38;5;21m", # deep blue (hold) - "\033[38;5;18m", # navy - "\033[38;5;18m", # navy (hold) - "\033[38;5;18m", # navy (hold) - "\033[2;38;5;18m", # dim navy - "\033[2;38;5;235m", # near black -] - -_PURPLE_MAIN = [ - "\033[1;38;5;231m", # white (bold) - "\033[1;38;5;225m", # pale purple-white - "\033[38;5;177m", # bright purple - "\033[38;5;171m", # vibrant purple - "\033[38;5;165m", # purple - "\033[38;5;135m", # medium purple - "\033[38;5;129m", # purple - "\033[38;5;93m", # dark purple - "\033[38;5;57m", # deep purple - "\033[38;5;57m", # deep purple (hold) - "\033[2;38;5;57m", # dim deep purple - "\033[2;38;5;235m", # near black -] - -_PURPLE_MESSAGE = [ - "\033[1;38;5;231m", # white (bold) - "\033[1;38;5;226m", # pale yellow-white - "\033[38;5;226m", # bright yellow - "\033[38;5;220m", # yellow - "\033[38;5;220m", # yellow (hold) - "\033[38;5;184m", # dark yellow - "\033[38;5;184m", # dark yellow (hold) - "\033[38;5;178m", # olive-yellow - "\033[38;5;178m", # olive-yellow (hold) - "\033[38;5;172m", # golden - "\033[2;38;5;172m", # dim golden - "\033[2;38;5;235m", # near black -] - -# ─── THEME REGISTRY ─────────────────────────────────────────────────────── - -THEME_REGISTRY = { - "green": Theme( - name="Verdant Green", - main_gradient=_GREEN_MAIN, - message_gradient=_GREEN_MESSAGE, - ), - "orange": Theme( - name="Molten Orange", - main_gradient=_ORANGE_MAIN, - message_gradient=_ORANGE_MESSAGE, - ), - "purple": Theme( - name="Violet Purple", - main_gradient=_PURPLE_MAIN, - message_gradient=_PURPLE_MESSAGE, - ), -} - - -def get_theme(theme_id: str) -> Theme: - """Retrieve a theme by ID. - - Args: - theme_id: One of "green", "orange", "purple" - - Returns: - Theme object - - Raises: - KeyError: If theme_id not found in registry - """ - if theme_id not in THEME_REGISTRY: - raise KeyError(f"Unknown theme: {theme_id}. Available: {list(THEME_REGISTRY.keys())}") - return THEME_REGISTRY[theme_id] -``` - -- [ ] **Step 3: Run tests to verify they pass** - -Run: `pytest tests/test_themes.py -v` -Expected: PASS (all 6 tests) - -- [ ] **Step 4: Commit** - -```bash -git add engine/themes.py tests/test_themes.py -git commit -m "feat: create Theme class and registry with finalized color gradients - -- Define Theme class to encapsulate name and main/message gradients -- Create THEME_REGISTRY with green, orange, purple themes -- Each gradient has 12 ANSI 256-color codes finalized -- Complementary color pairs: green/magenta, orange/blue, purple/yellow -- Add get_theme() lookup with error handling -- Add comprehensive unit tests" -``` - ---- - -## Chunk 2: Config Integration - -### Task 2: Add ACTIVE_THEME global and set_active_theme() to config.py - -**Files:** -- Modify: `engine/config.py:1-30` -- Test: `tests/test_config.py` (expand existing) - -- [ ] **Step 1: Write failing tests for config changes** - -Add to `tests/test_config.py`: - -```python -def test_active_theme_initially_none(): - """ACTIVE_THEME is None before initialization.""" - # This test may fail if config is already initialized - # We'll set it to None first for testing - import engine.config - engine.config.ACTIVE_THEME = None - assert engine.config.ACTIVE_THEME is None - - -def test_set_active_theme_green(): - """set_active_theme('green') sets ACTIVE_THEME to green theme.""" - from engine.config import set_active_theme - from engine.themes import get_theme - - set_active_theme("green") - - assert config.ACTIVE_THEME is not None - assert config.ACTIVE_THEME.name == "Verdant Green" - assert config.ACTIVE_THEME == get_theme("green") - - -def test_set_active_theme_default(): - """set_active_theme() with no args defaults to green.""" - from engine.config import set_active_theme - - set_active_theme() - - assert config.ACTIVE_THEME.name == "Verdant Green" - - -def test_set_active_theme_invalid(): - """set_active_theme() with invalid ID raises KeyError.""" - from engine.config import set_active_theme - - with pytest.raises(KeyError): - set_active_theme("invalid") -``` - -Run: `pytest tests/test_config.py -v` -Expected: FAIL (functions don't exist yet) - -- [ ] **Step 2: Add ACTIVE_THEME global and set_active_theme() to config.py** - -Edit `engine/config.py`, add after line 30 (after `_resolve_font_path` function): - -```python -# ─── COLOR THEME ────────────────────────────────────────────────────────── -ACTIVE_THEME = None # set by set_active_theme() after picker - - -def set_active_theme(theme_id: str = "green"): - """Set the active color theme. Defaults to 'green' if not specified. - - Args: - theme_id: One of "green", "orange", "purple" - - Raises: - KeyError: If theme_id is invalid - """ - global ACTIVE_THEME - from engine import themes - ACTIVE_THEME = themes.get_theme(theme_id) -``` - -- [ ] **Step 3: Remove hardcoded GRAD_COLS and MSG_GRAD_COLS from render.py** - -Edit `engine/render.py`, find and delete lines 20-49 (the hardcoded gradient arrays): - -```python -# DELETED: -# GRAD_COLS = [...] -# MSG_GRAD_COLS = [...] -``` - -- [ ] **Step 4: Run tests to verify they pass** - -Run: `pytest tests/test_config.py::test_active_theme_initially_none -v` -Run: `pytest tests/test_config.py::test_set_active_theme_green -v` -Run: `pytest tests/test_config.py::test_set_active_theme_default -v` -Run: `pytest tests/test_config.py::test_set_active_theme_invalid -v` - -Expected: PASS (all 4 new tests) - -- [ ] **Step 5: Verify existing config tests still pass** - -Run: `pytest tests/test_config.py -v` - -Expected: PASS (all existing + new tests) - -- [ ] **Step 6: Commit** - -```bash -git add engine/config.py tests/test_config.py -git commit -m "feat: add ACTIVE_THEME global and set_active_theme() to config - -- Add ACTIVE_THEME global (initialized to None) -- Add set_active_theme(theme_id) function with green default -- Remove hardcoded GRAD_COLS and MSG_GRAD_COLS (move to themes.py) -- Add comprehensive tests for theme setting" -``` - ---- - -## Chunk 3: Render Pipeline Integration - -### Task 3: Update render.py to use config.ACTIVE_THEME - -**Files:** -- Modify: `engine/render.py:15-220` -- Test: `tests/test_render.py` (expand existing) - -- [ ] **Step 1: Write failing test for lr_gradient with theme** - -Add to `tests/test_render.py`: - -```python -def test_lr_gradient_uses_active_theme(monkeypatch): - """lr_gradient uses config.ACTIVE_THEME when cols=None.""" - from engine import config, render - from engine.themes import get_theme - - # Set orange theme - config.set_active_theme("orange") - - # Create simple rows - rows = ["test row"] - result = render.lr_gradient(rows, offset=0, cols=None) - - # Result should start with first color from orange main gradient - assert result[0].startswith("\033[1;38;5;231m") # white (same for all) - - -def test_lr_gradient_fallback_when_no_theme(monkeypatch): - """lr_gradient uses fallback when ACTIVE_THEME is None.""" - from engine import config, render - - # Clear active theme - config.ACTIVE_THEME = None - - rows = ["test row"] - result = render.lr_gradient(rows, offset=0, cols=None) - - # Should not crash and should return something - assert result is not None - assert len(result) > 0 - - -def test_default_green_gradient_length(): - """_default_green_gradient returns 12 colors.""" - from engine import render - - colors = render._default_green_gradient() - assert len(colors) == 12 -``` - -Run: `pytest tests/test_render.py::test_lr_gradient_uses_active_theme -v` -Expected: FAIL (function signature doesn't match) - -- [ ] **Step 2: Update lr_gradient() to use config.ACTIVE_THEME** - -Edit `engine/render.py`, find the `lr_gradient()` function (around line 194) and update it: - -```python -def lr_gradient(rows, offset, cols=None): - """ - Render rows through a left-to-right color sweep. - - Args: - rows: List of text rows to colorize - offset: Gradient position offset (for animation) - cols: Optional list of color codes. If None, uses active theme. - - Returns: - List of colorized rows - """ - if cols is None: - from engine import config - cols = ( - config.ACTIVE_THEME.main_gradient - if config.ACTIVE_THEME - else _default_green_gradient() - ) - - # ... rest of function unchanged ... -``` - -- [ ] **Step 3: Add _default_green_gradient() fallback function** - -Add to `engine/render.py` before `lr_gradient()`: - -```python -def _default_green_gradient(): - """Fallback green gradient (original colors) for initialization.""" - return [ - "\033[1;38;5;231m", # white (bold) - "\033[1;38;5;195m", # pale white-tint - "\033[38;5;123m", # bright cyan - "\033[38;5;118m", # bright lime - "\033[38;5;82m", # lime - "\033[38;5;46m", # bright green - "\033[38;5;40m", # green - "\033[38;5;34m", # medium green - "\033[38;5;28m", # dark green - "\033[38;5;22m", # deep green - "\033[2;38;5;22m", # dim deep green - "\033[2;38;5;235m", # near black - ] - - -def _default_magenta_gradient(): - """Fallback magenta gradient (original message colors) for initialization.""" - return [ - "\033[1;38;5;231m", # white (bold) - "\033[1;38;5;225m", # pale pink-white - "\033[38;5;219m", # bright pink - "\033[38;5;213m", # hot pink - "\033[38;5;207m", # magenta - "\033[38;5;201m", # bright magenta - "\033[38;5;165m", # orchid-red - "\033[38;5;161m", # ruby-magenta - "\033[38;5;125m", # dark magenta - "\033[38;5;89m", # deep maroon-magenta - "\033[2;38;5;89m", # dim deep maroon-magenta - "\033[2;38;5;235m", # near black - ] -``` - -- [ ] **Step 4: Run tests to verify they pass** - -Run: `pytest tests/test_render.py::test_lr_gradient_uses_active_theme -v` -Run: `pytest tests/test_render.py::test_lr_gradient_fallback_when_no_theme -v` -Run: `pytest tests/test_render.py::test_default_green_gradient_length -v` - -Expected: PASS (all 3 new tests) - -- [ ] **Step 5: Run full render test suite** - -Run: `pytest tests/test_render.py -v` - -Expected: PASS (existing tests may need adjustment for mocking) - -- [ ] **Step 6: Commit** - -```bash -git add engine/render.py tests/test_render.py -git commit -m "feat: update lr_gradient to use config.ACTIVE_THEME - -- Update lr_gradient(cols=None) to check config.ACTIVE_THEME -- Add _default_green_gradient() and _default_magenta_gradient() fallbacks -- Fallback used when ACTIVE_THEME is None (non-interactive init) -- Add tests for theme-aware and fallback gradient rendering" -``` - ---- - -## Chunk 4: Message Gradient Integration - -### Task 4: Update scroll.py to use message gradient from config - -**Files:** -- Modify: `engine/scroll.py:85-95` -- Test: existing `tests/test_scroll.py` - -- [ ] **Step 1: Locate message gradient calls in scroll.py** - -Run: `grep -n "MSG_GRAD_COLS\|lr_gradient_opposite" /Users/genejohnson/Dev/mainline/engine/scroll.py` - -Expected: Should find line(s) where `MSG_GRAD_COLS` or similar is used - -- [ ] **Step 2: Update scroll.py to use theme message gradient** - -Edit `engine/scroll.py`, find the line that uses message gradients (around line 89 based on spec) and update: - -Old code: -```python -# Some variation of: -rows = lr_gradient(rows, offset, MSG_GRAD_COLS) -``` - -New code: -```python -from engine import config -msg_cols = ( - config.ACTIVE_THEME.message_gradient - if config.ACTIVE_THEME - else render._default_magenta_gradient() -) -rows = lr_gradient(rows, offset, msg_cols) -``` - -Or use the helper approach (create `msg_gradient()` in render.py): - -```python -def msg_gradient(rows, offset): - """Apply message (ntfy) gradient using theme complementary colors.""" - from engine import config - cols = ( - config.ACTIVE_THEME.message_gradient - if config.ACTIVE_THEME - else _default_magenta_gradient() - ) - return lr_gradient(rows, offset, cols) -``` - -Then in scroll.py: -```python -rows = render.msg_gradient(rows, offset) -``` - -- [ ] **Step 3: Run existing scroll tests** - -Run: `pytest tests/test_scroll.py -v` - -Expected: PASS (existing functionality unchanged) - -- [ ] **Step 4: Commit** - -```bash -git add engine/scroll.py engine/render.py -git commit -m "feat: update scroll.py to use theme message gradient - -- Replace MSG_GRAD_COLS reference with config.ACTIVE_THEME.message_gradient -- Use fallback magenta gradient when theme not initialized -- Ensure ntfy messages render in complementary color from selected theme" -``` - ---- - -## Chunk 5: Color Picker UI - -### Task 5: Create pick_color_theme() function in app.py - -**Files:** -- Modify: `engine/app.py:1-300` -- Test: manual/integration (interactive) - -- [ ] **Step 1: Write helper functions for color picker UI** - -Edit `engine/app.py`, add before `pick_font_face()` function: - -```python -def _draw_color_picker(themes_list, selected): - """Draw the color theme picker menu.""" - import sys - from engine.terminal import CLR, W_GHOST, G_HI, G_DIM, tw - - print(CLR, end="") - print() - print(f" {G_HI}▼ COLOR THEME{W_GHOST} ─ ↑/↓ or j/k to move, Enter/q to select{G_DIM}") - print(f" {W_GHOST}{'─' * (tw() - 4)}\n") - - for i, (theme_id, theme) in enumerate(themes_list): - prefix = " ▶ " if i == selected else " " - color = G_HI if i == selected else "" - reset = "" if i == selected else W_GHOST - print(f"{prefix}{color}{theme.name}{reset}") - - print() -``` - -- [ ] **Step 2: Create pick_color_theme() function** - -Edit `engine/app.py`, add after helper function: - -```python -def pick_color_theme(): - """Interactive color theme picker. Defaults to 'green' if not TTY.""" - import sys - import termios - import tty - from engine import config, themes - - # Non-interactive fallback: use green - if not sys.stdin.isatty(): - config.set_active_theme("green") - return - - themes_list = list(themes.THEME_REGISTRY.items()) - selected = 0 - - fd = sys.stdin.fileno() - old_settings = termios.tcgetattr(fd) - try: - tty.setcbreak(fd) - while True: - _draw_color_picker(themes_list, selected) - key = _read_picker_key() - if key == "up": - selected = max(0, selected - 1) - elif key == "down": - selected = min(len(themes_list) - 1, selected + 1) - elif key == "enter": - break - elif key == "interrupt": - raise KeyboardInterrupt - finally: - termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) - - selected_theme_id = themes_list[selected][0] - config.set_active_theme(selected_theme_id) - - theme_name = themes_list[selected][1].name - print(f" {G_DIM}> using {theme_name}{RST}") - time.sleep(0.8) - print(CLR, end="") - print(CURSOR_OFF, end="") - print() -``` - -- [ ] **Step 3: Update main() to call pick_color_theme() before pick_font_face()** - -Edit `engine/app.py`, find the `main()` function and locate where `pick_font_face()` is called (around line 265). Add before it: - -```python -def main(): - # ... existing signal handler setup ... - - pick_color_theme() # NEW LINE - before font picker - pick_font_face() - - # ... rest of main unchanged ... -``` - -- [ ] **Step 4: Manual test - run in interactive terminal** - -Run: `python3 mainline.py` - -Expected: -- See color theme picker menu before font picker -- Can navigate with ↑/↓ or j/k -- Can select with Enter or q -- Selected theme applies to scrolling headlines -- Can select different themes and see colors change - -- [ ] **Step 5: Manual test - run in non-interactive environment** - -Run: `echo "" | python3 mainline.py` - -Expected: -- No color picker menu shown -- Defaults to green theme -- App runs without error - -- [ ] **Step 6: Commit** - -```bash -git add engine/app.py -git commit -m "feat: add pick_color_theme() UI and integration - -- Create _draw_color_picker() to render menu -- Create pick_color_theme() function mirroring font picker pattern -- Integrate into main() before font picker -- Fallback to green theme in non-interactive environments -- Support arrow keys and j/k navigation" -``` - ---- - -## Chunk 6: Integration & Validation - -### Task 6: End-to-end testing and cleanup - -**Files:** -- Test: All modified files -- Verify: App functionality - -- [ ] **Step 1: Run full test suite** - -Run: `pytest tests/ -v` - -Expected: PASS (all tests, including new ones) - -- [ ] **Step 2: Run linter** - -Run: `ruff check engine/ mainline.py` - -Expected: No errors (fix any style issues) - -- [ ] **Step 3: Manual integration test - green theme** - -Run: `python3 mainline.py` - -Then select "Verdant Green" from picker. - -Expected: -- Headlines render in green → deep green -- ntfy messages render in magenta gradient -- Both work correctly during streaming - -- [ ] **Step 4: Manual integration test - orange theme** - -Run: `python3 mainline.py` - -Then select "Molten Orange" from picker. - -Expected: -- Headlines render in orange → deep orange -- ntfy messages render in blue gradient -- Colors are visually distinct from green - -- [ ] **Step 5: Manual integration test - purple theme** - -Run: `python3 mainline.py` - -Then select "Violet Purple" from picker. - -Expected: -- Headlines render in purple → deep purple -- ntfy messages render in yellow gradient -- Colors are visually distinct from green and orange - -- [ ] **Step 6: Test poetry mode with color picker** - -Run: `python3 mainline.py --poetry` - -Then select "orange" from picker. - -Expected: -- Poetry mode works with color picker -- Colors apply to poetry rendering - -- [ ] **Step 7: Test code mode with color picker** - -Run: `python3 mainline.py --code` - -Then select "purple" from picker. - -Expected: -- Code mode works with color picker -- Colors apply to code rendering - -- [ ] **Step 8: Verify acceptance criteria** - -✓ Color picker displays 3 theme options at startup -✓ Selection applies to all headline and message gradients -✓ Boot UI (title, status) uses hardcoded green (not theme) -✓ Scrolling headlines and ntfy messages use theme gradients -✓ No persistence between runs (each run picks fresh) -✓ Non-TTY environments default to green without error -✓ Architecture supports future random/animation modes -✓ All gradient color codes finalized with no TBD values - -- [ ] **Step 9: Final commit** - -```bash -git add -A -git commit -m "feat: color scheme switcher implementation complete - -Closes color-pick feature with: -- Three selectable color themes (green, orange, purple) -- Interactive menu at startup (mirrors font picker UI) -- Complementary colors for ntfy message queue -- Fallback to green in non-interactive environments -- All tests passing, manual validation complete" -``` - -- [ ] **Step 10: Create feature branch PR summary** - -``` -## Color Scheme Switcher - -Implements interactive color theme selection for Mainline news ticker. - -### What's New -- 3 color themes: Verdant Green, Molten Orange, Violet Purple -- Interactive picker at startup (↑/↓ or j/k, Enter to select) -- Complementary gradients for ntfy messages (magenta, blue, yellow) -- Fresh theme selection each run (no persistence) - -### Files Changed -- `engine/themes.py` (new) -- `engine/config.py` (ACTIVE_THEME, set_active_theme) -- `engine/render.py` (theme-aware gradients) -- `engine/scroll.py` (message gradient integration) -- `engine/app.py` (pick_color_theme UI) -- `tests/test_themes.py` (new theme tests) -- `README.md` (documentation) - -### Acceptance Criteria -All met. App fully tested and ready for merge. -``` - ---- - -## Testing Checklist - -- [ ] Unit tests: `pytest tests/test_themes.py -v` -- [ ] Unit tests: `pytest tests/test_config.py -v` -- [ ] Unit tests: `pytest tests/test_render.py -v` -- [ ] Full suite: `pytest tests/ -v` -- [ ] Linting: `ruff check engine/ mainline.py` -- [ ] Manual: Green theme selection -- [ ] Manual: Orange theme selection -- [ ] Manual: Purple theme selection -- [ ] Manual: Poetry mode with colors -- [ ] Manual: Code mode with colors -- [ ] Manual: Non-TTY fallback - ---- - -## Notes - -- `themes.py` is data-only; never import config or render to prevent cycles -- `ACTIVE_THEME` initialized to None; guaranteed non-None before stream() via pick_color_theme() -- Font picker UI remains hardcoded green; title/subtitle use G_HI/G_DIM constants (not theme) -- Message gradients use complementary colors; lookup in scroll.py -- Each gradient has 12 colors; verify length in tests -- No persistence; fresh picker each run diff --git a/docs/superpowers/plans/2026-03-19-figment-mode.md b/docs/superpowers/plans/2026-03-19-figment-mode.md deleted file mode 100644 index 0d5c9c7..0000000 --- a/docs/superpowers/plans/2026-03-19-figment-mode.md +++ /dev/null @@ -1,1110 +0,0 @@ -# Figment Mode Implementation Plan - -> **For agentic workers:** REQUIRED: Use superpowers:subagent-driven-development (if subagents available) or superpowers:executing-plans to implement this plan. Steps use checkbox (`- [ ]`) syntax for tracking. - -**Goal:** Add a periodic full-screen SVG glyph overlay ("figment mode") that renders flickery, theme-colored half-block art on top of the running news ticker. - -**Architecture:** Hybrid EffectPlugin + overlay. `FigmentEffect` (effect plugin) owns the lifecycle, timer, and state machine. `render_figment_overlay()` (in layers.py) handles ANSI overlay rendering. `engine/figment_render.py` handles SVG→half-block rasterization. `engine/figment_trigger.py` defines the extensible input protocol. - -**Tech Stack:** Python 3.10+, cairosvg (SVG→PNG), Pillow (image processing), existing effect plugin system (ABC-based), existing theme gradients. - -**Spec:** `docs/superpowers/specs/2026-03-19-figment-mode-design.md` - ---- - -## Chunk 1: Foundation - -### Task 1: Merge main and add cairosvg dependency - -The `feat/figment` branch is behind `main` by 2 commits (the ABC plugin migration). Must merge first so `EffectPlugin` is ABC-based. - -**Files:** -- Modify: `pyproject.toml:28-38` - -- [ ] **Step 1: Merge main into feat/figment** - -```bash -git merge main -``` - -Expected: Fast-forward or clean merge. No conflicts (branch only added docs). - -- [ ] **Step 2: Add cairosvg optional dependency** - -In `pyproject.toml`, add a `figment` extras group after the `mic` group (line 32): - -```toml -figment = [ - "cairosvg>=2.7.0", -] -``` - -- [ ] **Step 3: Sync dependencies** - -```bash -uv sync --all-extras -``` - -Expected: cairosvg installs successfully. - -- [ ] **Step 4: Verify cairosvg works** - -```bash -uv run python -c "import cairosvg; print('cairosvg OK')" -``` - -Expected: prints `cairosvg OK` - -- [ ] **Step 5: Commit** - -```bash -git add pyproject.toml uv.lock -git commit -m "build: add cairosvg optional dependency for figment mode" -``` - ---- - -### Task 2: Test fixture SVG and event types - -**Files:** -- Create: `tests/fixtures/test.svg` -- Modify: `engine/events.py:12-21` (add FIGMENT_TRIGGER), `engine/events.py:62-68` (add FigmentTriggerEvent) - -- [ ] **Step 1: Create minimal test SVG** - -Create `tests/fixtures/test.svg` — a simple 100x100 black rectangle on white: - -```xml - - - -``` - -- [ ] **Step 2: Add FIGMENT_TRIGGER event type** - -In `engine/events.py`, add to the `EventType` enum (after `STREAM_END = auto()` at line 20): - -```python - FIGMENT_TRIGGER = auto() -``` - -And add the event dataclass at the end of the file (after `StreamEvent`): - -```python -@dataclass -class FigmentTriggerEvent: - """Event emitted when a figment is triggered.""" - - action: str - value: float | str | None = None - timestamp: datetime | None = None -``` - -- [ ] **Step 3: Run existing tests to verify no breakage** - -```bash -uv run pytest tests/test_events.py -v -``` - -Expected: All existing event tests pass. - -- [ ] **Step 4: Commit** - -```bash -git add tests/fixtures/test.svg engine/events.py -git commit -m "feat(figment): add test fixture SVG and FIGMENT_TRIGGER event type" -``` - ---- - -### Task 3: Trigger protocol and command types - -**Files:** -- Create: `engine/figment_trigger.py` -- Create: `tests/test_figment_trigger.py` - -- [ ] **Step 1: Write failing tests for FigmentCommand and FigmentAction** - -Create `tests/test_figment_trigger.py`: - -```python -"""Tests for engine.figment_trigger module.""" - -from enum import Enum - -from engine.figment_trigger import FigmentAction, FigmentCommand - - -class TestFigmentAction: - def test_is_enum(self): - assert issubclass(FigmentAction, Enum) - - def test_has_trigger(self): - assert FigmentAction.TRIGGER.value == "trigger" - - def test_has_set_intensity(self): - assert FigmentAction.SET_INTENSITY.value == "set_intensity" - - def test_has_set_interval(self): - assert FigmentAction.SET_INTERVAL.value == "set_interval" - - def test_has_set_color(self): - assert FigmentAction.SET_COLOR.value == "set_color" - - def test_has_stop(self): - assert FigmentAction.STOP.value == "stop" - - -class TestFigmentCommand: - def test_trigger_command(self): - cmd = FigmentCommand(action=FigmentAction.TRIGGER) - assert cmd.action == FigmentAction.TRIGGER - assert cmd.value is None - - def test_set_intensity_command(self): - cmd = FigmentCommand(action=FigmentAction.SET_INTENSITY, value=0.8) - assert cmd.value == 0.8 - - def test_set_color_command(self): - cmd = FigmentCommand(action=FigmentAction.SET_COLOR, value="orange") - assert cmd.value == "orange" -``` - -- [ ] **Step 2: Run test to verify it fails** - -```bash -uv run pytest tests/test_figment_trigger.py -v -``` - -Expected: FAIL — `ModuleNotFoundError: No module named 'engine.figment_trigger'` - -- [ ] **Step 3: Write FigmentTrigger protocol, FigmentAction, FigmentCommand** - -Create `engine/figment_trigger.py`: - -```python -""" -Figment trigger protocol and command types. - -Defines the extensible input abstraction for triggering figment displays -from any control surface (ntfy, MQTT, serial, etc.). -""" - -from __future__ import annotations - -from dataclasses import dataclass -from enum import Enum -from typing import Protocol - - -class FigmentAction(Enum): - TRIGGER = "trigger" - SET_INTENSITY = "set_intensity" - SET_INTERVAL = "set_interval" - SET_COLOR = "set_color" - STOP = "stop" - - -@dataclass -class FigmentCommand: - action: FigmentAction - value: float | str | None = None - - -class FigmentTrigger(Protocol): - """Protocol for figment trigger sources. - - Any input source (ntfy, MQTT, serial) can implement this - to trigger and control figment displays. - """ - - def poll(self) -> FigmentCommand | None: ... -``` - -- [ ] **Step 4: Run tests to verify they pass** - -```bash -uv run pytest tests/test_figment_trigger.py -v -``` - -Expected: All 8 tests pass. - -- [ ] **Step 5: Commit** - -```bash -git add engine/figment_trigger.py tests/test_figment_trigger.py -git commit -m "feat(figment): add trigger protocol and command types" -``` - ---- - -## Chunk 2: SVG Rasterization - -### Task 4: SVG to half-block rasterizer - -**Files:** -- Create: `engine/figment_render.py` -- Create: `tests/test_figment_render.py` - -- [ ] **Step 1: Write failing tests for rasterize_svg** - -Create `tests/test_figment_render.py`: - -```python -"""Tests for engine.figment_render module.""" - -import os - -from engine.figment_render import rasterize_svg - -FIXTURE_SVG = os.path.join(os.path.dirname(__file__), "fixtures", "test.svg") - - -class TestRasterizeSvg: - def test_returns_list_of_strings(self): - rows = rasterize_svg(FIXTURE_SVG, 40, 20) - assert isinstance(rows, list) - assert all(isinstance(r, str) for r in rows) - - def test_output_height_matches_terminal_height(self): - rows = rasterize_svg(FIXTURE_SVG, 40, 20) - assert len(rows) == 20 - - def test_output_contains_block_characters(self): - rows = rasterize_svg(FIXTURE_SVG, 40, 20) - all_chars = "".join(rows) - block_chars = {"█", "▀", "▄"} - assert any(ch in all_chars for ch in block_chars) - - def test_different_sizes_produce_different_output(self): - rows_small = rasterize_svg(FIXTURE_SVG, 20, 10) - rows_large = rasterize_svg(FIXTURE_SVG, 80, 40) - assert len(rows_small) == 10 - assert len(rows_large) == 40 - - def test_nonexistent_file_raises(self): - import pytest - with pytest.raises(Exception): - rasterize_svg("/nonexistent/file.svg", 40, 20) - - -class TestRasterizeCache: - def test_cache_returns_same_result(self): - rows1 = rasterize_svg(FIXTURE_SVG, 40, 20) - rows2 = rasterize_svg(FIXTURE_SVG, 40, 20) - assert rows1 == rows2 - - def test_cache_invalidated_by_size_change(self): - rows1 = rasterize_svg(FIXTURE_SVG, 40, 20) - rows2 = rasterize_svg(FIXTURE_SVG, 60, 30) - assert len(rows1) != len(rows2) -``` - -- [ ] **Step 2: Run tests to verify they fail** - -```bash -uv run pytest tests/test_figment_render.py -v -``` - -Expected: FAIL — `ModuleNotFoundError: No module named 'engine.figment_render'` - -- [ ] **Step 3: Implement rasterize_svg** - -Create `engine/figment_render.py`: - -```python -""" -SVG to half-block terminal art rasterization. - -Pipeline: SVG -> cairosvg -> PIL -> greyscale threshold -> half-block encode. -Follows the same pixel-pair approach as engine/render.py for OTF fonts. -""" - -from __future__ import annotations - -from io import BytesIO - -import cairosvg -from PIL import Image - -_cache: dict[tuple[str, int, int], list[str]] = {} - - -def rasterize_svg(svg_path: str, width: int, height: int) -> list[str]: - """Convert SVG file to list of half-block terminal rows (uncolored). - - Args: - svg_path: Path to SVG file. - width: Target terminal width in columns. - height: Target terminal height in rows. - - Returns: - List of strings, one per terminal row, containing block characters. - """ - cache_key = (svg_path, width, height) - if cache_key in _cache: - return _cache[cache_key] - - # SVG -> PNG in memory - png_bytes = cairosvg.svg2png( - url=svg_path, - output_width=width, - output_height=height * 2, # 2 pixel rows per terminal row - ) - - # PNG -> greyscale PIL image - img = Image.open(BytesIO(png_bytes)).convert("L") - img = img.resize((width, height * 2), Image.Resampling.LANCZOS) - - data = img.tobytes() - pix_w = width - pix_h = height * 2 - threshold = 80 - - # Half-block encode: walk pixel pairs - rows: list[str] = [] - for y in range(0, pix_h, 2): - row: list[str] = [] - for x in range(pix_w): - top = data[y * pix_w + x] > threshold - bot = data[(y + 1) * pix_w + x] > threshold if y + 1 < pix_h else False - if top and bot: - row.append("█") - elif top: - row.append("▀") - elif bot: - row.append("▄") - else: - row.append(" ") - rows.append("".join(row)) - - _cache[cache_key] = rows - return rows - - -def clear_cache() -> None: - """Clear the rasterization cache (e.g., on terminal resize).""" - _cache.clear() -``` - -- [ ] **Step 4: Run tests to verify they pass** - -```bash -uv run pytest tests/test_figment_render.py -v -``` - -Expected: All 7 tests pass. - -- [ ] **Step 5: Commit** - -```bash -git add engine/figment_render.py tests/test_figment_render.py -git commit -m "feat(figment): add SVG to half-block rasterization pipeline" -``` - ---- - -## Chunk 3: FigmentEffect Plugin - -### Task 5: FigmentEffect state machine and lifecycle - -This is the core plugin. It manages the timer, SVG selection, state machine, and exposes `get_figment_state()`. - -**Files:** -- Create: `effects_plugins/figment.py` -- Create: `tests/test_figment.py` - -- [ ] **Step 1: Write failing tests for FigmentState, FigmentPhase, and state machine** - -Create `tests/test_figment.py`: - -```python -"""Tests for the FigmentEffect plugin.""" - -import os -from enum import Enum -from unittest.mock import patch - -import pytest - -from effects_plugins.figment import FigmentEffect, FigmentPhase, FigmentState -from engine.effects.types import EffectConfig, EffectContext - - -FIXTURE_SVG = os.path.join( - os.path.dirname(__file__), "fixtures", "test.svg" -) -FIGMENTS_DIR = os.path.join(os.path.dirname(__file__), "fixtures") - - -class TestFigmentPhase: - def test_is_enum(self): - assert issubclass(FigmentPhase, Enum) - - def test_has_all_phases(self): - assert hasattr(FigmentPhase, "REVEAL") - assert hasattr(FigmentPhase, "HOLD") - assert hasattr(FigmentPhase, "DISSOLVE") - - -class TestFigmentState: - def test_creation(self): - state = FigmentState( - phase=FigmentPhase.REVEAL, - progress=0.5, - rows=["█▀▄", " █ "], - gradient=[46, 40, 34, 28, 22, 22, 34, 40, 46, 82, 118, 231], - center_row=5, - center_col=10, - ) - assert state.phase == FigmentPhase.REVEAL - assert state.progress == 0.5 - assert len(state.rows) == 2 - - -class TestFigmentEffectInit: - def test_name(self): - effect = FigmentEffect(figment_dir=FIGMENTS_DIR) - assert effect.name == "figment" - - def test_default_config(self): - effect = FigmentEffect(figment_dir=FIGMENTS_DIR) - assert effect.config.enabled is False - assert effect.config.intensity == 1.0 - assert effect.config.params["interval_secs"] == 60 - assert effect.config.params["display_secs"] == 4.5 - - def test_process_is_noop(self): - effect = FigmentEffect(figment_dir=FIGMENTS_DIR) - buf = ["line1", "line2"] - ctx = EffectContext( - terminal_width=80, - terminal_height=24, - scroll_cam=0, - ticker_height=20, - ) - result = effect.process(buf, ctx) - assert result == buf - assert result is buf - - def test_configure(self): - effect = FigmentEffect(figment_dir=FIGMENTS_DIR) - new_cfg = EffectConfig(enabled=True, intensity=0.5) - effect.configure(new_cfg) - assert effect.config.enabled is True - assert effect.config.intensity == 0.5 - - -class TestFigmentStateMachine: - def test_idle_initially(self): - effect = FigmentEffect(figment_dir=FIGMENTS_DIR) - effect.config.enabled = True - state = effect.get_figment_state(0, 80, 24) - # Timer hasn't fired yet, should be None (idle) - assert state is None - - def test_trigger_starts_reveal(self): - effect = FigmentEffect(figment_dir=FIGMENTS_DIR) - effect.config.enabled = True - effect.trigger(80, 24) - state = effect.get_figment_state(1, 80, 24) - assert state is not None - assert state.phase == FigmentPhase.REVEAL - - def test_full_cycle(self): - effect = FigmentEffect(figment_dir=FIGMENTS_DIR) - effect.config.enabled = True - effect.config.params["display_secs"] = 0.15 # 3 phases x 0.05s - - effect.trigger(40, 20) - - # Advance through reveal (30 frames at 0.05s = 1.5s, but we shrunk it) - # With display_secs=0.15, each phase is 0.05s = 1 frame - state = effect.get_figment_state(1, 40, 20) - assert state is not None - assert state.phase == FigmentPhase.REVEAL - - # Advance enough frames to get through all phases - last_state = None - for frame in range(2, 100): - state = effect.get_figment_state(frame, 40, 20) - if state is None: - break - last_state = state - - # Should have completed the full cycle back to idle - assert state is None - - def test_timer_fires_at_interval(self): - effect = FigmentEffect(figment_dir=FIGMENTS_DIR) - effect.config.enabled = True - effect.config.params["interval_secs"] = 0.1 # 2 frames at 20fps - - # Frame 0: idle - state = effect.get_figment_state(0, 40, 20) - assert state is None - - # Advance past interval (0.1s = 2 frames) - state = effect.get_figment_state(1, 40, 20) - state = effect.get_figment_state(2, 40, 20) - state = effect.get_figment_state(3, 40, 20) - # Timer should have fired by now - assert state is not None - - -class TestFigmentEdgeCases: - def test_empty_figment_dir(self, tmp_path): - effect = FigmentEffect(figment_dir=str(tmp_path)) - effect.config.enabled = True - effect.trigger(40, 20) - state = effect.get_figment_state(1, 40, 20) - # No SVGs available — should stay idle - assert state is None - - def test_missing_figment_dir(self): - effect = FigmentEffect(figment_dir="/nonexistent/path") - effect.config.enabled = True - effect.trigger(40, 20) - state = effect.get_figment_state(1, 40, 20) - assert state is None - - def test_disabled_ignores_trigger(self): - effect = FigmentEffect(figment_dir=FIGMENTS_DIR) - effect.config.enabled = False - effect.trigger(80, 24) - state = effect.get_figment_state(1, 80, 24) - assert state is None -``` - -- [ ] **Step 2: Run tests to verify they fail** - -```bash -uv run pytest tests/test_figment.py -v -``` - -Expected: FAIL — `ImportError` - -- [ ] **Step 3: Implement FigmentEffect** - -Create `effects_plugins/figment.py`: - -```python -""" -Figment effect plugin — periodic SVG glyph overlay. - -Owns the figment lifecycle: timer, SVG selection, state machine. -Delegates rendering to render_figment_overlay() in engine/layers.py. - -Named FigmentEffect (not FigmentPlugin) to match the *Effect discovery -convention in effects_plugins/__init__.py. - -NOT added to the EffectChain order — process() is a no-op. The overlay -rendering is handled by scroll.py calling get_figment_state(). -""" - -from __future__ import annotations - -import random -from dataclasses import dataclass -from enum import Enum, auto -from pathlib import Path - -from engine import config -from engine.effects.types import EffectConfig, EffectContext, EffectPlugin -from engine.figment_render import rasterize_svg -from engine.figment_trigger import FigmentAction, FigmentCommand, FigmentTrigger -from engine.themes import THEME_REGISTRY - - -class FigmentPhase(Enum): - REVEAL = auto() - HOLD = auto() - DISSOLVE = auto() - - -@dataclass -class FigmentState: - phase: FigmentPhase - progress: float - rows: list[str] - gradient: list[int] - center_row: int - center_col: int - - -class FigmentEffect(EffectPlugin): - name = "figment" - config = EffectConfig( - enabled=False, - intensity=1.0, - params={ - "interval_secs": 60, - "display_secs": 4.5, - "figment_dir": "figments", - }, - ) - - def __init__(self, figment_dir: str | None = None, triggers: list[FigmentTrigger] | None = None): - self.config = EffectConfig( - enabled=False, - intensity=1.0, - params={ - "interval_secs": 60, - "display_secs": 4.5, - "figment_dir": figment_dir or "figments", - }, - ) - self._triggers = triggers or [] - self._phase: FigmentPhase | None = None - self._progress: float = 0.0 - self._rows: list[str] = [] - self._gradient: list[int] = [] - self._center_row: int = 0 - self._center_col: int = 0 - self._timer: float = 0.0 - self._last_svg: str | None = None - self._svg_files: list[str] = [] - self._scan_svgs() - - def _scan_svgs(self) -> None: - figment_dir = Path(self.config.params["figment_dir"]) - if figment_dir.is_dir(): - self._svg_files = sorted(str(p) for p in figment_dir.glob("*.svg")) - - def process(self, buf: list[str], ctx: EffectContext) -> list[str]: - return buf - - def configure(self, cfg: EffectConfig) -> None: - self.config = cfg - self._scan_svgs() - - def trigger(self, w: int, h: int) -> None: - """Manually trigger a figment display.""" - if not self._svg_files: - return - - # Pick a random SVG, avoid repeating - candidates = [s for s in self._svg_files if s != self._last_svg] - if not candidates: - candidates = self._svg_files - svg_path = random.choice(candidates) - self._last_svg = svg_path - - # Rasterize - try: - self._rows = rasterize_svg(svg_path, w, h) - except Exception: - return - - # Pick random theme gradient - theme_key = random.choice(list(THEME_REGISTRY.keys())) - self._gradient = THEME_REGISTRY[theme_key].main_gradient - - # Center in viewport - figment_h = len(self._rows) - figment_w = max((len(r) for r in self._rows), default=0) - self._center_row = max(0, (h - figment_h) // 2) - self._center_col = max(0, (w - figment_w) // 2) - - # Start reveal phase - self._phase = FigmentPhase.REVEAL - self._progress = 0.0 - - def get_figment_state(self, frame_number: int, w: int, h: int) -> FigmentState | None: - """Tick the state machine and return current state, or None if idle.""" - if not self.config.enabled: - return None - - # Poll triggers - for trig in self._triggers: - cmd = trig.poll() - if cmd is not None: - self._handle_command(cmd, w, h) - - # Tick timer when idle - if self._phase is None: - self._timer += config.FRAME_DT - interval = self.config.params.get("interval_secs", 60) - if self._timer >= interval: - self._timer = 0.0 - self.trigger(w, h) - - # Tick animation - if self._phase is not None: - display_secs = self.config.params.get("display_secs", 4.5) - phase_duration = display_secs / 3.0 - self._progress += config.FRAME_DT / phase_duration - - if self._progress >= 1.0: - self._progress = 0.0 - if self._phase == FigmentPhase.REVEAL: - self._phase = FigmentPhase.HOLD - elif self._phase == FigmentPhase.HOLD: - self._phase = FigmentPhase.DISSOLVE - elif self._phase == FigmentPhase.DISSOLVE: - self._phase = None - return None - - return FigmentState( - phase=self._phase, - progress=self._progress, - rows=self._rows, - gradient=self._gradient, - center_row=self._center_row, - center_col=self._center_col, - ) - - return None - - def _handle_command(self, cmd: FigmentCommand, w: int, h: int) -> None: - if cmd.action == FigmentAction.TRIGGER: - self.trigger(w, h) - elif cmd.action == FigmentAction.SET_INTENSITY and isinstance(cmd.value, (int, float)): - self.config.intensity = float(cmd.value) - elif cmd.action == FigmentAction.SET_INTERVAL and isinstance(cmd.value, (int, float)): - self.config.params["interval_secs"] = float(cmd.value) - elif cmd.action == FigmentAction.SET_COLOR and isinstance(cmd.value, str): - if cmd.value in THEME_REGISTRY: - self._gradient = THEME_REGISTRY[cmd.value].main_gradient - elif cmd.action == FigmentAction.STOP: - self._phase = None - self._progress = 0.0 -``` - -- [ ] **Step 4: Run tests to verify they pass** - -```bash -uv run pytest tests/test_figment.py -v -``` - -Expected: All tests pass. - -- [ ] **Step 5: Verify plugin discovery finds FigmentEffect** - -```bash -uv run python -c " -from engine.effects.registry import EffectRegistry, set_registry -set_registry(EffectRegistry()) -from effects_plugins import discover_plugins -plugins = discover_plugins() -print('Discovered:', list(plugins.keys())) -assert 'figment' in plugins, 'FigmentEffect not discovered!' -print('OK') -" -``` - -Expected: Prints `Discovered: ['noise', 'glitch', 'fade', 'firehose', 'figment']` and `OK`. - -- [ ] **Step 6: Commit** - -```bash -git add effects_plugins/figment.py tests/test_figment.py -git commit -m "feat(figment): add FigmentEffect plugin with state machine and timer" -``` - ---- - -## Chunk 4: Overlay Rendering and Scroll Integration - -### Task 6: Figment overlay renderer in layers.py - -**Files:** -- Modify: `engine/layers.py:1-4` (add import), append `render_figment_overlay()` function -- Create: `tests/test_figment_overlay.py` - -- [ ] **Step 1: Write failing tests for render_figment_overlay** - -Create `tests/test_figment_overlay.py`: - -```python -"""Tests for render_figment_overlay in engine.layers.""" - -from effects_plugins.figment import FigmentPhase, FigmentState -from engine.layers import render_figment_overlay - - -def _make_state(phase=FigmentPhase.HOLD, progress=0.5): - return FigmentState( - phase=phase, - progress=progress, - rows=["█▀▄ █", " ▄█▀ ", "█ █"], - gradient=[46, 40, 34, 28, 22, 22, 34, 40, 46, 82, 118, 231], - center_row=10, - center_col=37, - ) - - -class TestRenderFigmentOverlay: - def test_returns_list_of_strings(self): - state = _make_state() - result = render_figment_overlay(state, 80, 24) - assert isinstance(result, list) - assert all(isinstance(s, str) for s in result) - - def test_contains_ansi_positioning(self): - state = _make_state() - result = render_figment_overlay(state, 80, 24) - # Should contain cursor positioning escape codes - assert any("\033[" in s for s in result) - - def test_reveal_phase_partial(self): - state = _make_state(phase=FigmentPhase.REVEAL, progress=0.0) - result = render_figment_overlay(state, 80, 24) - # At progress 0.0, very few cells should be visible - # Result should still be a valid list - assert isinstance(result, list) - - def test_hold_phase_full(self): - state = _make_state(phase=FigmentPhase.HOLD, progress=0.5) - result = render_figment_overlay(state, 80, 24) - # During hold, content should be present - assert len(result) > 0 - - def test_dissolve_phase(self): - state = _make_state(phase=FigmentPhase.DISSOLVE, progress=0.9) - result = render_figment_overlay(state, 80, 24) - # At high dissolve progress, most cells are gone - assert isinstance(result, list) - - def test_empty_rows(self): - state = FigmentState( - phase=FigmentPhase.HOLD, - progress=0.5, - rows=[], - gradient=[46] * 12, - center_row=0, - center_col=0, - ) - result = render_figment_overlay(state, 80, 24) - assert result == [] -``` - -- [ ] **Step 2: Run tests to verify they fail** - -```bash -uv run pytest tests/test_figment_overlay.py -v -``` - -Expected: FAIL — `ImportError: cannot import name 'render_figment_overlay' from 'engine.layers'` - -- [ ] **Step 3: Implement render_figment_overlay** - -Add to the end of `engine/layers.py` (after `get_effect_chain()`): - -```python -def render_figment_overlay( - figment_state, - w: int, - h: int, -) -> list[str]: - """Render figment overlay as ANSI cursor-positioning commands. - - Args: - figment_state: FigmentState with phase, progress, rows, gradient, centering. - w: terminal width - h: terminal height - - Returns: - List of ANSI strings to append to display buffer. - """ - from engine.render import lr_gradient, _color_codes_to_ansi - - rows = figment_state.rows - if not rows: - return [] - - phase = figment_state.phase - progress = figment_state.progress - gradient = figment_state.gradient - center_row = figment_state.center_row - center_col = figment_state.center_col - - cols = _color_codes_to_ansi(gradient) - - # Determine cell visibility based on phase - # Build a visibility mask for non-space cells - cell_positions = [] - for r_idx, row in enumerate(rows): - for c_idx, ch in enumerate(row): - if ch != " ": - cell_positions.append((r_idx, c_idx)) - - n_cells = len(cell_positions) - if n_cells == 0: - return [] - - # Use a deterministic seed so the reveal/dissolve pattern is stable per-figment - rng = random.Random(hash(tuple(rows[0][:10])) if rows[0] else 42) - shuffled = list(cell_positions) - rng.shuffle(shuffled) - - # Phase-dependent visibility - from effects_plugins.figment import FigmentPhase - - if phase == FigmentPhase.REVEAL: - visible_count = int(n_cells * progress) - visible = set(shuffled[:visible_count]) - elif phase == FigmentPhase.HOLD: - visible = set(cell_positions) - # Strobe: dim some cells periodically - if int(progress * 20) % 3 == 0: - # Dim ~30% of cells for strobe effect - dim_count = int(n_cells * 0.3) - visible -= set(shuffled[:dim_count]) - elif phase == FigmentPhase.DISSOLVE: - remaining_count = int(n_cells * (1.0 - progress)) - visible = set(shuffled[:remaining_count]) - else: - visible = set(cell_positions) - - # Build overlay commands - overlay: list[str] = [] - n_cols = len(cols) - max_x = max((len(r.rstrip()) for r in rows if r.strip()), default=1) - - for r_idx, row in enumerate(rows): - scr_row = center_row + r_idx + 1 # 1-indexed - if scr_row < 1 or scr_row > h: - continue - - line_buf: list[str] = [] - has_content = False - - for c_idx, ch in enumerate(row): - scr_col = center_col + c_idx + 1 - if scr_col < 1 or scr_col > w: - continue - - if ch != " " and (r_idx, c_idx) in visible: - # Apply gradient color - shifted = (c_idx / max(max_x - 1, 1)) % 1.0 - idx = min(round(shifted * (n_cols - 1)), n_cols - 1) - line_buf.append(f"{cols[idx]}{ch}{RST}") - has_content = True - else: - line_buf.append(" ") - - if has_content: - # Trim trailing spaces - line_str = "".join(line_buf).rstrip() - if line_str.strip(): - overlay.append( - f"\033[{scr_row};{center_col + 1}H{line_str}{RST}" - ) - - return overlay -``` - -- [ ] **Step 4: Run tests to verify they pass** - -```bash -uv run pytest tests/test_figment_overlay.py -v -``` - -Expected: All 6 tests pass. - -- [ ] **Step 5: Commit** - -```bash -git add engine/layers.py tests/test_figment_overlay.py -git commit -m "feat(figment): add render_figment_overlay() to layers.py" -``` - ---- - -### Task 7: Scroll loop integration - -**Files:** -- Modify: `engine/scroll.py:18-24` (add import), `engine/scroll.py:30` (setup), `engine/scroll.py:125-127` (frame loop) - -- [ ] **Step 1: Add figment import and setup to stream()** - -In `engine/scroll.py`, add the import for `render_figment_overlay` to the existing layers import block (line 18-24): - -```python -from engine.layers import ( - apply_glitch, - process_effects, - render_firehose, - render_figment_overlay, - render_message_overlay, - render_ticker_zone, -) -``` - -Then add the figment setup inside `stream()`, after the `frame_number = 0` line (line 54): - -```python - # Figment overlay (optional — requires cairosvg) - try: - from effects_plugins.figment import FigmentEffect - from engine.effects.registry import get_registry - - _fg_plugin = get_registry().get("figment") - figment = _fg_plugin if isinstance(_fg_plugin, FigmentEffect) else None - except ImportError: - figment = None -``` - -- [ ] **Step 2: Add figment overlay to frame loop** - -In the frame loop, insert the figment overlay block between the effects processing (line 120) and the message overlay (line 126). Insert after the `else:` block at line 124: - -```python - # Figment overlay (between effects and ntfy message) - if figment and figment.config.enabled: - figment_state = figment.get_figment_state(frame_number, w, h) - if figment_state is not None: - figment_buf = render_figment_overlay(figment_state, w, h) - buf.extend(figment_buf) -``` - -- [ ] **Step 3: Run full test suite** - -```bash -uv run pytest tests/ -v -``` - -Expected: All tests pass (existing + new). The 3 pre-existing `warmup_topics` failures remain. - -- [ ] **Step 4: Commit** - -```bash -git add engine/scroll.py -git commit -m "feat(figment): integrate figment overlay into scroll loop" -``` - ---- - -### Task 8: Run lint and final verification - -- [ ] **Step 1: Run ruff linter** - -```bash -uv run ruff check . -``` - -Fix any issues found. - -- [ ] **Step 2: Run ruff formatter** - -```bash -uv run ruff format . -``` - -- [ ] **Step 3: Run full test suite one more time** - -```bash -uv run pytest tests/ -v -``` - -Expected: All tests pass (except the 3 pre-existing `warmup_topics` failures). - -- [ ] **Step 4: Commit any lint/format fixes** - -```bash -git add -u -git commit -m "style: apply ruff formatting to figment modules" -``` - -(Skip this commit if ruff made no changes.) diff --git a/docs/superpowers/specs/2026-03-15-readme-update-design.md b/docs/superpowers/specs/2026-03-15-readme-update-design.md deleted file mode 100644 index 1af12e3..0000000 --- a/docs/superpowers/specs/2026-03-15-readme-update-design.md +++ /dev/null @@ -1,145 +0,0 @@ -# README Update Design — 2026-03-15 - -## Goal - -Restructure and expand `README.md` to: -1. Align with the current codebase (Python 3.10+, uv/mise/pytest/ruff toolchain, 6 new fonts) -2. Add extensibility-focused content (`Extending` section) -3. Add developer workflow coverage (`Development` section) -4. Improve navigability via top-level grouping (Approach C) - ---- - -## Proposed Structure - -``` -# MAINLINE -> tagline + description - -## Using - ### Run - ### Config - ### Feeds - ### Fonts - ### ntfy.sh - -## Internals - ### How it works - ### Architecture - -## Extending - ### NtfyPoller - ### MicMonitor - ### Render pipeline - -## Development - ### Setup - ### Tasks - ### Testing - ### Linting - -## Roadmap - ---- -*footer* -``` - ---- - -## Section-by-section design - -### Using - -All existing content preserved verbatim. Two changes: -- **Run**: add `uv run mainline.py` as an alternative invocation; expand bootstrap note to mention `uv sync` / `uv sync --all-extras` -- **ntfy.sh**: remove `NtfyPoller` reuse code example (moves to Extending); keep push instructions and topic config - -Subsections moved into Using (currently standalone): -- `Feeds` — it's configuration, not a concept -- `ntfy.sh` (usage half) - -### Internals - -All existing content preserved verbatim. One change: -- **Architecture**: append `tests/` directory listing to the module tree - -### Extending - -Entirely new section. Three subsections: - -**NtfyPoller** -- Minimal working import + usage example -- Note: stdlib only dependencies - -```python -from engine.ntfy import NtfyPoller - -poller = NtfyPoller("https://ntfy.sh/my_topic/json?since=20s&poll=1") -poller.start() - -# in your render loop: -msg = poller.get_active_message() # → (title, body, timestamp) or None -if msg: - title, body, ts = msg - render_my_message(title, body) # visualizer-specific -``` - -**MicMonitor** -- Minimal working import + usage example -- Note: sounddevice/numpy optional, degrades gracefully - -```python -from engine.mic import MicMonitor - -mic = MicMonitor(threshold_db=50) -if mic.start(): # returns False if sounddevice unavailable - excess = mic.excess # dB above threshold, clamped to 0 - db = mic.db # raw RMS dB level -``` - -**Render pipeline** -- Brief prose about `engine.render` as importable pipeline -- Minimal sketch of serve.py / ESP32 usage pattern -- Reference to `Mainline Renderer + ntfy Message Queue for ESP32.md` - -### Development - -Entirely new section. Four subsections: - -**Setup** -- Hard requirements: Python 3.10+, uv -- `uv sync` / `uv sync --all-extras` / `uv sync --group dev` - -**Tasks** (via mise) -- `mise run test`, `test-cov`, `lint`, `lint-fix`, `format`, `run`, `run-poetry`, `run-firehose` - -**Testing** -- Tests in `tests/` covering config, filter, mic, ntfy, sources, terminal -- `uv run pytest` and `uv run pytest --cov=engine --cov-report=term-missing` - -**Linting** -- `uv run ruff check` and `uv run ruff format` -- Note: pre-commit hooks run lint via `hk` - -### Roadmap - -Existing `## Ideas / Future` content preserved verbatim. Only change: rename heading to `## Roadmap`. - -### Footer - -Update `Python 3.9+` → `Python 3.10+`. - ---- - -## Files changed - -- `README.md` — restructured and expanded as above -- No other files - ---- - -## What is not changing - -- All existing prose, examples, and config table values — preserved verbatim where retained -- The Ideas/Future content — kept intact under the new Roadmap heading -- The cyberpunk voice and terse style of the existing README diff --git a/docs/superpowers/specs/2026-03-16-code-scroll-design.md b/docs/superpowers/specs/2026-03-16-code-scroll-design.md deleted file mode 100644 index 719db19..0000000 --- a/docs/superpowers/specs/2026-03-16-code-scroll-design.md +++ /dev/null @@ -1,154 +0,0 @@ -# Code Scroll Mode — Design Spec - -**Date:** 2026-03-16 -**Branch:** feat/code-scroll -**Status:** Approved - ---- - -## Overview - -Add a `--code` CLI flag that puts MAINLINE into "source consciousness" mode. Instead of RSS headlines or poetry stanzas, the program's own source code scrolls upward as large OTF half-block characters with the standard white-hot → deep green gradient. Each scroll item is one non-blank, non-comment line from `engine/*.py`, attributed to its enclosing function/class scope and dotted module path. - ---- - -## Goals - -- Mirror the existing `--poetry` mode pattern as closely as possible -- Zero new runtime dependencies (stdlib `ast` and `pathlib` only) -- No changes to `scroll.py` or the render pipeline -- The item tuple shape `(text, src, ts)` is unchanged - ---- - -## New Files - -### `engine/fetch_code.py` - -Single public function `fetch_code()` that returns `(items, line_count, 0)`. - -**Algorithm:** - -1. Glob `engine/*.py` in sorted order -2. For each file: - a. Read source text - b. `ast.parse(source)` → build a `{line_number: scope_label}` map by walking all `FunctionDef`, `AsyncFunctionDef`, and `ClassDef` nodes. Each node covers its full line range. Inner scopes override outer ones. - c. Iterate source lines (1-indexed). Skip if: - - The stripped line is empty - - The stripped line starts with `#` - d. For each kept line emit: - - `text` = `line.rstrip()` (preserve indentation for readability in the big render) - - `src` = scope label from the AST map, e.g. `stream()` for functions, `MicMonitor` for classes, `` for top-level lines - - `ts` = dotted module path derived from filename, e.g. `engine/scroll.py` → `engine.scroll` -3. Return `(items, len(items), 0)` - -**Scope label rules:** -- `FunctionDef` / `AsyncFunctionDef` → `name()` -- `ClassDef` → `name` (no parens) -- No enclosing node → `` - -**Dependencies:** `ast`, `pathlib` — stdlib only. - ---- - -## Modified Files - -### `engine/config.py` - -Extend `MODE` detection to recognise `--code`: - -```python -MODE = ( - "poetry" if "--poetry" in sys.argv or "-p" in sys.argv - else "code" if "--code" in sys.argv - else "news" -) -``` - -### `engine/app.py` - -**Subtitle line** — extend the subtitle dict: - -```python -_subtitle = { - "poetry": "literary consciousness stream", - "code": "source consciousness stream", -}.get(config.MODE, "digital consciousness stream") -``` - -**Boot sequence** — add `elif config.MODE == "code":` branch after the poetry branch: - -```python -elif config.MODE == "code": - from engine.fetch_code import fetch_code - slow_print(" > INITIALIZING SOURCE ARRAY...\n") - time.sleep(0.2) - print() - items, line_count, _ = fetch_code() - print() - print(f" {G_DIM}>{RST} {G_MID}{line_count} LINES ACQUIRED{RST}") -``` - -No cache save/load — local source files are read instantly and change only on disk writes. - ---- - -## Data Flow - -``` -engine/*.py (sorted) - │ - ▼ -fetch_code() - │ ast.parse → scope map - │ filter blank + comment lines - │ emit (line, scope(), engine.module) - ▼ -items: List[Tuple[str, str, str]] - │ - ▼ -stream(items, ntfy, mic) ← unchanged - │ - ▼ -next_headline() shuffles + recycles automatically -``` - ---- - -## Error Handling - -- If a file fails to `ast.parse` (malformed source), fall back to `` scope for all lines in that file — do not crash. -- If `engine/` contains no `.py` files (shouldn't happen in practice), `fetch_code()` returns an empty list; `app.py`'s existing `if not items:` guard handles this. - ---- - -## Testing - -New file: `tests/test_fetch_code.py` - -| Test | Assertion | -|------|-----------| -| `test_items_are_tuples` | Every item from `fetch_code()` is a 3-tuple of strings | -| `test_blank_and_comment_lines_excluded` | No item text is empty; no item text (stripped) starts with `#` | -| `test_module_path_format` | Every `ts` field matches pattern `engine\.\w+` | - -No mocking — tests read the real engine source files, keeping them honest against actual content. - ---- - -## CLI - -```bash -python3 mainline.py --code # source consciousness mode -uv run mainline.py --code -``` - -Compatible with all existing flags (`--no-font-picker`, `--font-file`, `--firehose`, etc.). - ---- - -## Out of Scope - -- Syntax highlighting / token-aware coloring (can be added later) -- `--code-dir` flag for pointing at arbitrary directories (YAGNI) -- Caching code items to disk diff --git a/docs/superpowers/specs/2026-03-16-color-scheme-design.md b/docs/superpowers/specs/2026-03-16-color-scheme-design.md deleted file mode 100644 index 56120b5..0000000 --- a/docs/superpowers/specs/2026-03-16-color-scheme-design.md +++ /dev/null @@ -1,299 +0,0 @@ -# Color Scheme Switcher Design - -**Date:** 2026-03-16 -**Status:** Revised after review -**Scope:** Interactive color theme selection for Mainline news ticker - ---- - -## Overview - -Mainline currently renders news headlines with a fixed white-hot → deep green gradient. This feature adds an interactive theme picker at startup that lets users choose between three precise color schemes (green, orange, purple), each with complementary message queue colors. - -The implementation uses a dedicated `Theme` class to encapsulate gradients and metadata, enabling future extensions like random rotation, animation, or additional themes without architectural changes. - ---- - -## Requirements - -**Functional:** -1. User selects a color theme from an interactive menu at startup (green, orange, or purple) -2. Main headline gradient uses the selected primary color (white → color) -3. Message queue (ntfy) gradient uses the precise complementary color (white → opposite) -4. Selection is fresh each run (no persistence) -5. Design supports future "random rotation" mode without refactoring - -**Complementary colors (precise opposites):** -- Green (38;5;22) → Magenta (38;5;89) *(current, unchanged)* -- Orange (38;5;208) → Blue (38;5;21) -- Purple (38;5;129) → Yellow (38;5;226) - -**Non-functional:** -- Reuse the existing font picker pattern for UI consistency -- Zero runtime overhead during streaming (theme lookup happens once at startup) -- **Boot UI (title, subtitle, status lines) use hardcoded green color constants (G_HI, G_DIM, G_MID); only scrolling headlines and ntfy messages use theme gradients** -- Font picker UI remains hardcoded green for visual continuity - ---- - -## Architecture - -### New Module: `engine/themes.py` - -**Data-only module:** Contains Theme class, THEME_REGISTRY, and get_theme() function. **Imports only typing; does NOT import config or render** to prevent circular dependencies. - -```python -class Theme: - """Encapsulates a color scheme: name, main gradient, message gradient.""" - - def __init__(self, name: str, main_gradient: list[str], message_gradient: list[str]): - self.name = name - self.main_gradient = main_gradient # white → primary color - self.message_gradient = message_gradient # white → complementary -``` - -**Theme Registry:** -Three instances registered by ID: `"green"`, `"orange"`, `"purple"` (IDs match menu labels for clarity). - -Each gradient is a list of 12 ANSI 256-color codes matching the current green gradient: -``` -[ - "\033[1;38;5;231m", # white (bold) - "\033[1;38;5;195m", # pale white-tint - "\033[38;5;123m", # bright cyan - "\033[38;5;118m", # bright lime - "\033[38;5;82m", # lime - "\033[38;5;46m", # bright color - "\033[38;5;40m", # color - "\033[38;5;34m", # medium color - "\033[38;5;28m", # dark color - "\033[38;5;22m", # deep color - "\033[2;38;5;22m", # dim deep color - "\033[2;38;5;235m", # near black -] -``` - -**Finalized color codes:** - -**Green (primary: 22, complementary: 89)** — unchanged from current -- Main: `[231, 195, 123, 118, 82, 46, 40, 34, 28, 22, 22(dim), 235]` -- Messages: `[231, 225, 219, 213, 207, 201, 165, 161, 125, 89, 89(dim), 235]` - -**Orange (primary: 208, complementary: 21)** -- Main: `[231, 215, 209, 208, 202, 166, 130, 94, 58, 94, 94(dim), 235]` -- Messages: `[231, 195, 33, 27, 21, 21, 21, 18, 18, 18, 18(dim), 235]` - -**Purple (primary: 129, complementary: 226)** -- Main: `[231, 225, 177, 171, 165, 135, 129, 93, 57, 57, 57(dim), 235]` -- Messages: `[231, 226, 226, 220, 220, 184, 184, 178, 178, 172, 172(dim), 235]` - -**Public API:** -- `get_theme(theme_id: str) -> Theme` — lookup by ID, raises KeyError if not found -- `THEME_REGISTRY` — dict of all available themes (for picker) - ---- - -### Modified: `engine/config.py` - -**New globals:** -```python -ACTIVE_THEME = None # set by set_active_theme() after picker; guaranteed non-None during stream() -``` - -**New function:** -```python -def set_active_theme(theme_id: str = "green"): - """Set the active theme. Defaults to 'green' if not specified.""" - global ACTIVE_THEME - from engine import themes - ACTIVE_THEME = themes.get_theme(theme_id) -``` - -**Behavior:** -- Called by `app.pick_color_theme()` with user selection -- Has default fallback to "green" for non-interactive environments (CI, testing, piped stdin) -- Guarantees `ACTIVE_THEME` is set before any render functions are called - -**Removal:** -- Delete hardcoded `GRAD_COLS` and `MSG_GRAD_COLS` constants - ---- - -### Modified: `engine/render.py` - -**Updated gradient access in existing functions:** - -Current pattern (will be removed): -```python -GRAD_COLS = [...] # hardcoded green -MSG_GRAD_COLS = [...] # hardcoded magenta -``` - -New pattern — update `lr_gradient()` function: -```python -def lr_gradient(rows, offset, cols=None): - if cols is None: - from engine import config - cols = (config.ACTIVE_THEME.main_gradient - if config.ACTIVE_THEME - else _default_green_gradient()) - # ... rest of function unchanged -``` - -**Define fallback:** -```python -def _default_green_gradient(): - """Fallback green gradient (current colors).""" - return [ - "\033[1;38;5;231m", "\033[1;38;5;195m", "\033[38;5;123m", - "\033[38;5;118m", "\033[38;5;82m", "\033[38;5;46m", - "\033[38;5;40m", "\033[38;5;34m", "\033[38;5;28m", - "\033[38;5;22m", "\033[2;38;5;22m", "\033[2;38;5;235m", - ] -``` - -**Message gradient handling:** - -The existing code (scroll.py line 89) calls `lr_gradient()` with `MSG_GRAD_COLS`. Change this call to: -```python -# Instead of: lr_gradient(rows, offset, MSG_GRAD_COLS) -# Use: -from engine import config -cols = (config.ACTIVE_THEME.message_gradient - if config.ACTIVE_THEME - else _default_magenta_gradient()) -lr_gradient(rows, offset, cols) -``` - -or define a helper: -```python -def msg_gradient(rows, offset): - """Apply message (ntfy) gradient using theme complementary colors.""" - from engine import config - cols = (config.ACTIVE_THEME.message_gradient - if config.ACTIVE_THEME - else _default_magenta_gradient()) - return lr_gradient(rows, offset, cols) -``` - ---- - -### Modified: `engine/app.py` - -**New function: `pick_color_theme()`** - -Mirrors `pick_font_face()` pattern: - -```python -def pick_color_theme(): - """Interactive color theme picker. Defaults to 'green' if not TTY.""" - import sys - from engine import config, themes - - # Non-interactive fallback: use default - if not sys.stdin.isatty(): - config.set_active_theme("green") - return - - # Interactive picker (similar to font picker) - themes_list = list(themes.THEME_REGISTRY.items()) - selected = 0 - - # ... render menu, handle arrow keys j/k, ↑/↓ ... - # ... on Enter, call config.set_active_theme(themes_list[selected][0]) ... -``` - -**Placement in `main()`:** -```python -def main(): - # ... signal handler setup ... - pick_color_theme() # NEW — before title/subtitle - pick_font_face() - # ... rest of boot sequence, title/subtitle use hardcoded G_HI/G_DIM ... -``` - -**Important:** The title and subtitle render with hardcoded `G_HI`/`G_DIM` constants, not theme gradients. This is intentional for visual consistency with the font picker menu. - ---- - -## Data Flow - -``` -User starts: mainline.py - ↓ -main() called - ↓ -pick_color_theme() - → If TTY: display menu, read input, call config.set_active_theme(user_choice) - → If not TTY: silently call config.set_active_theme("green") - ↓ -pick_font_face() — renders in hardcoded green UI colors - ↓ -Boot messages (title, status) — all use hardcoded G_HI/G_DIM (not theme gradients) - ↓ -stream() — headlines + ntfy messages use config.ACTIVE_THEME gradients - ↓ -On exit: no persistence -``` - ---- - -## Implementation Notes - -### Initialization Guarantee -`config.ACTIVE_THEME` is guaranteed to be non-None before `stream()` is called because: -1. `pick_color_theme()` always sets it (either interactively or via fallback) -2. It's called before any rendering happens -3. Default fallback ensures non-TTY environments don't crash - -### Module Independence -`themes.py` is a pure data module with no imports of `config` or `render`. This prevents circular dependencies and allows it to be imported by multiple consumers without side effects. - -### Color Code Finalization -All three gradient sequences (green, orange, purple main + complementary) are now finalized with specific ANSI codes. No TBD placeholders remain. - -### Theme ID Naming -IDs are `"green"`, `"orange"`, `"purple"` — matching the menu labels exactly for clarity. - -### Terminal Resize Handling -The `pick_color_theme()` function mirrors `pick_font_face()`, which does not handle terminal resizing during the picker display. If the terminal is resized while the picker menu is shown, the menu redraw may be incomplete; pressing any key (arrow, j/k, q) continues normally. This is acceptable because: -1. The picker completes quickly (< 5 seconds typical interaction) -2. Once a theme is selected, the menu closes and rendering begins -3. The streaming phase (`stream()`) is resilient to terminal resizing and auto-reflows to new dimensions - -No special resize handling is needed for the color picker beyond what exists for the font picker. - -### Testing Strategy -1. **Unit tests** (`tests/test_themes.py`): - - Verify Theme class construction - - Test THEME_REGISTRY lookup (valid and invalid IDs) - - Confirm gradient lists have correct length (12) - -2. **Integration tests** (`tests/test_render.py`): - - Mock `config.ACTIVE_THEME` to each theme - - Verify `lr_gradient()` uses correct colors - - Verify fallback works when `ACTIVE_THEME` is None - -3. **Existing tests:** - - Render tests that check gradient output will need to mock `config.ACTIVE_THEME` - - Use pytest fixtures to set theme per test case - ---- - -## Files Changed -- `engine/themes.py` (new) -- `engine/config.py` (add `ACTIVE_THEME`, `set_active_theme()`) -- `engine/render.py` (replace GRAD_COLS/MSG_GRAD_COLS references with config lookups) -- `engine/app.py` (add `pick_color_theme()`, call in main) -- `tests/test_themes.py` (new unit tests) -- `tests/test_render.py` (update mocking strategy) - -## Acceptance Criteria -1. ✓ Color picker displays 3 theme options at startup -2. ✓ Selection applies to all headline and message gradients -3. ✓ Boot UI (title, status) uses hardcoded green (not theme) -4. ✓ Scrolling headlines and ntfy messages use theme gradients -5. ✓ No persistence between runs -6. ✓ Non-TTY environments default to green without error -7. ✓ Architecture supports future random/animation modes -8. ✓ All gradient color codes finalized with no TBD values diff --git a/docs/superpowers/specs/2026-03-19-figment-mode-design.md b/docs/superpowers/specs/2026-03-19-figment-mode-design.md deleted file mode 100644 index c6bd3f9..0000000 --- a/docs/superpowers/specs/2026-03-19-figment-mode-design.md +++ /dev/null @@ -1,308 +0,0 @@ -# Figment Mode Design Spec - -> Periodic full-screen SVG glyph overlay with flickery animation, theme-aware coloring, and extensible physical device control. - -## Overview - -Figment mode displays a randomly selected SVG from the `figments/` directory as a flickery, glitchy half-block terminal overlay on top of the running ticker. It appears once per minute (configurable), holds for ~4.5 seconds with a three-phase animation (progressive reveal, strobing hold, dissolve), then fades back to the ticker. Colors are randomly chosen from the existing theme gradients. - -The feature is designed for extensibility: a generic input protocol allows MQTT, ntfy, serial, or any other control surface to trigger figments and adjust parameters in real time. - -## Goals - -- Display SVG figments as half-block terminal art overlaid on the running ticker -- Three-phase animation: progressive reveal, strobing hold, dissolve -- Random color from existing theme gradients (green, orange, purple) -- Configurable interval and duration via C&C -- Extensible input abstraction for physical device control (MQTT, serial, etc.) - -## Out of Scope - -- Multi-figment simultaneous display (one at a time) -- SVG animation support (static SVGs only; animation comes from the overlay phases) -- Custom color palettes beyond existing themes -- MQTT and serial adapters (v1 ships with ntfy C&C only; protocol is ready for future adapters) - -## Architecture: Hybrid Plugin + Overlay - -The figment is an **EffectPlugin** for lifecycle, discovery, and configuration, but delegates rendering to a **layers-style overlay helper**. This avoids stretching the `EffectPlugin.process()` contract (which transforms line buffers) while still benefiting from the plugin system for C&C, auto-discovery, and config management. - -**Important**: The plugin class is named `FigmentEffect` (not `FigmentPlugin`) to match the `*Effect` naming convention required by `discover_plugins()` in `effects_plugins/__init__.py`. The plugin is **not** added to the `EffectChain` order list — its `process()` is a no-op that returns the buffer unchanged. The chain only processes effects that transform buffers (noise, fade, glitch, firehose). Figment's rendering happens via the overlay path in `scroll.py`, outside the chain. - -### Component Diagram - -``` - +-------------------+ - | FigmentTrigger | (Protocol) - | - NtfyTrigger | (v1) - | - MqttTrigger | (future) - | - SerialTrigger | (future) - +--------+----------+ - | - | FigmentCommand - v -+------------------+ +-----------------+ +----------------------+ -| figment_render |<---| FigmentEffect |--->| render_figment_ | -| .py | | (EffectPlugin) | | overlay() in | -| | | | | layers.py | -| SVG -> PIL -> | | Timer, state | | | -| half-block cache | | machine, SVG | | ANSI cursor-position | -| | | selection | | commands for overlay | -+------------------+ +-----------------+ +----------------------+ - | - | get_figment_state() - v - +-------------------+ - | scroll.py | - +-------------------+ -``` - -## Section 1: SVG Rasterization - -**File: `engine/figment_render.py`** - -Reuses the same PIL-based half-block encoding that `engine/render.py` uses for OTF fonts. - -### Pipeline - -1. **Load**: `cairosvg.svg2png()` converts SVG to PNG bytes in memory (no temp files) -2. **Resize**: PIL scales to fit terminal — width = `tw()`, height = `th() * 2` pixels (each terminal row encodes 2 pixel rows via half-blocks) -3. **Threshold**: Convert to greyscale ("L" mode), apply binary threshold to get visible/not-visible -4. **Half-block encode**: Walk pixel pairs top-to-bottom. For each 2-row pair, emit `█` (both lit), `▀` (top only), `▄` (bottom only), or space (neither) -5. **Cache**: Results cached per `(svg_path, terminal_width, terminal_height)` — invalidated on terminal resize - -### Dependency - -`cairosvg` added as an optional dependency in `pyproject.toml` (like `sounddevice`). If `cairosvg` is not installed, the `FigmentEffect` class will fail to import, and `discover_plugins()` will silently skip it (the existing `except Exception: pass` in discovery handles this). The plugin simply won't appear in the registry. - -### Key Function - -```python -def rasterize_svg(svg_path: str, width: int, height: int) -> list[str]: - """Convert SVG file to list of half-block terminal rows (uncolored).""" -``` - -## Section 2: Figment Overlay Rendering - -**Integration point: `engine/layers.py`** - -New function following the `render_message_overlay()` pattern. - -### FigmentState Dataclass - -Defined in `effects_plugins/figment.py`, passed between the plugin and the overlay renderer: - -```python -@dataclass -class FigmentState: - phase: FigmentPhase # enum: REVEAL, HOLD, DISSOLVE - progress: float # 0.0 to 1.0 within current phase - rows: list[str] # rasterized half-block rows (uncolored) - gradient: list[int] # 12-color ANSI 256 gradient from chosen theme - center_row: int # top row for centering in viewport - center_col: int # left column for centering in viewport -``` - -### Function Signature - -```python -def render_figment_overlay(figment_state: FigmentState, w: int, h: int) -> list[str]: - """Return ANSI cursor-positioning commands for the current figment frame.""" -``` - -### Animation Phases (~4.5 seconds total) - -Progress advances each frame as: `progress += config.FRAME_DT / phase_duration`. At 20 FPS (FRAME_DT=0.05s), a 1.5s phase takes 30 frames to complete. - -| Phase | Duration | Behavior | -|-------|----------|----------| -| **Reveal** | ~1.5s | Progressive scanline fill. Each frame, a percentage of the figment's non-empty cells become visible in random block order. Intensity scales reveal speed. | -| **Hold** | ~1.5s | Full figment visible. Strobes between full brightness and dimmed/partial visibility every few frames. Intensity scales strobe frequency. | -| **Dissolve** | ~1.5s | Inverse of reveal. Cells randomly drop out, replaced by spaces. Intensity scales dissolve speed. | - -### Color - -A random theme gradient is selected from `THEME_REGISTRY` at trigger time. Applied via `lr_gradient()` — the same function that colors headlines and messages. - -### Positioning - -Figment is centered in the viewport. Each visible row is an ANSI `\033[row;colH` command appended to the buffer, identical to how the message overlay works. - -## Section 3: FigmentEffect (Effect Plugin) - -**File: `effects_plugins/figment.py`** - -An `EffectPlugin(ABC)` subclass named `FigmentEffect` to match the `*Effect` discovery convention. - -### Chain Exclusion - -`FigmentEffect` is registered in the `EffectRegistry` (for C&C access and config management) but is **not** added to the `EffectChain` order list. Its `process()` returns the buffer unchanged. The `enabled` flag is checked directly by `scroll.py` when deciding whether to call `get_figment_state()`, not by the chain. - -### Responsibilities - -- **Timer**: Tracks elapsed time via `config.FRAME_DT` accumulation. At the configured interval (default 60s), triggers a new figment. -- **SVG selection**: Randomly picks from `figments/*.svg`. Avoids repeating the last shown. -- **State machine**: `idle -> reveal -> hold -> dissolve -> idle`. Tracks phase progress (0.0 to 1.0). -- **Color selection**: Picks a random theme key (`"green"`, `"orange"`, `"purple"`) at trigger time. -- **Rasterization**: Calls `rasterize_svg()` on trigger, caches result for the display duration. - -### State Machine - -``` -idle ──(timer fires or trigger received)──> reveal -reveal ──(progress >= 1.0)──> hold -hold ──(progress >= 1.0)──> dissolve -dissolve ──(progress >= 1.0)──> idle -``` - -### Interface - -The `process()` method returns the buffer unchanged (no-op). The plugin exposes state via: - -```python -def get_figment_state(self, frame_number: int) -> FigmentState | None: - """Tick the state machine and return current state, or None if idle.""" -``` - -This mirrors the `ntfy_poller.get_active_message()` pattern. - -### Scroll Loop Access - -`scroll.py` imports `FigmentEffect` directly and uses `isinstance()` to safely downcast from the registry: - -```python -from effects_plugins.figment import FigmentEffect - -plugin = registry.get("figment") -figment = plugin if isinstance(plugin, FigmentEffect) else None -``` - -This is a one-time setup check, not per-frame. If `cairosvg` is missing, the import is wrapped in a try/except and `figment` stays `None`. - -### EffectConfig - -- `enabled`: bool (default `False` — opt-in) -- `intensity`: float — scales strobe frequency and reveal/dissolve speed -- `params`: - - `interval_secs`: 60 (time between figments) - - `display_secs`: 4.5 (total animation duration) - - `figment_dir`: "figments" (SVG source directory) - -Controllable via C&C: `/effects figment on`, `/effects figment intensity 0.7`. - -## Section 4: Input Abstraction (FigmentTrigger) - -**File: `engine/figment_trigger.py`** - -### Protocol - -```python -class FigmentTrigger(Protocol): - def poll(self) -> FigmentCommand | None: ... -``` - -### FigmentCommand - -```python -class FigmentAction(Enum): - TRIGGER = "trigger" - SET_INTENSITY = "set_intensity" - SET_INTERVAL = "set_interval" - SET_COLOR = "set_color" - STOP = "stop" - -@dataclass -class FigmentCommand: - action: FigmentAction - value: float | str | None = None -``` - -Uses an enum for consistency with `EventType` in `engine/events.py`. - -### Adapters - -| Adapter | Transport | Dependency | Status | -|---------|-----------|------------|--------| -| `NtfyTrigger` | Existing C&C ntfy topic | None (reuses ntfy) | v1 | -| `MqttTrigger` | MQTT broker | `paho-mqtt` (optional) | Future | -| `SerialTrigger` | USB serial | `pyserial` (optional) | Future | - -**NtfyTrigger v1**: Subscribes as a callback on the existing `NtfyPoller`. Parses messages with a `/figment` prefix (e.g., `/figment trigger`, `/figment intensity 0.8`). This is separate from the `/effects figment on` C&C path — the trigger protocol allows external devices to send commands without knowing the effects controller API. - -### Integration - -The `FigmentEffect` accepts a list of triggers. Each frame, it polls all triggers and acts on commands. Triggers are optional — if none are configured, the plugin runs on its internal timer alone. - -### EventBus Bridge - -A new `FIGMENT_TRIGGER` variant is added to the `EventType` enum in `engine/events.py`, with a corresponding `FigmentTriggerEvent` dataclass. Triggers publish to the EventBus for other components to react (logging, multi-display sync). - -## Section 5: Scroll Loop Integration - -Minimal change to `engine/scroll.py`: - -```python -# In stream() setup (with safe import): -try: - from effects_plugins.figment import FigmentEffect - _plugin = registry.get("figment") - figment = _plugin if isinstance(_plugin, FigmentEffect) else None -except ImportError: - figment = None - -# In frame loop, after effects processing, before ntfy message overlay: -if figment and figment.config.enabled: - figment_state = figment.get_figment_state(frame_number) - if figment_state is not None: - figment_overlay = render_figment_overlay(figment_state, w, h) - buf.extend(figment_overlay) -``` - -### Overlay Priority - -Figment overlay appends **after** effects processing but **before** the ntfy message overlay. This means: -- Ntfy messages always appear on top of figments (higher priority) -- Existing glitch/noise effects run over the ticker underneath the figment - -Note: If more overlay types are added in the future, a priority-based overlay system should replace the current positional ordering. - -## Section 6: Error Handling - -| Scenario | Behavior | -|----------|----------| -| `cairosvg` not installed | `FigmentEffect` fails to import; `discover_plugins()` silently skips it; `scroll.py` import guard sets `figment = None` | -| `figments/` directory missing | Plugin logs warning at startup, stays in permanent `idle` state | -| `figments/` contains zero `.svg` files | Same as above: warning, permanent `idle` | -| Malformed SVG | `cairosvg` raises exception; plugin catches it, skips that SVG, picks another. If all SVGs fail, enters permanent `idle` with warning | -| Terminal resize during animation | Re-rasterize on next frame using new dimensions. Cache miss triggers fresh rasterization. Animation phase/progress are preserved; only the rendered rows update | - -## Section 7: File Summary - -### New Files - -| File | Purpose | -|------|---------| -| `effects_plugins/figment.py` | FigmentEffect — lifecycle, timer, state machine, SVG selection, FigmentState/FigmentPhase | -| `engine/figment_render.py` | SVG to half-block rasterization pipeline | -| `engine/figment_trigger.py` | FigmentTrigger protocol, FigmentAction enum, FigmentCommand, NtfyTrigger adapter | -| `figments/` | SVG source directory (ships with sample SVGs) | -| `tests/test_figment.py` | FigmentEffect lifecycle, state machine transitions, timer | -| `tests/test_figment_render.py` | SVG rasterization, caching, edge cases | -| `tests/test_figment_trigger.py` | FigmentCommand parsing, NtfyTrigger adapter | -| `tests/fixtures/test.svg` | Minimal SVG for deterministic rasterization tests | - -### Modified Files - -| File | Change | -|------|--------| -| `engine/scroll.py` | Figment overlay integration (setup + per-frame block) | -| `engine/layers.py` | Add `render_figment_overlay()` function | -| `engine/events.py` | Add `FIGMENT_TRIGGER` to `EventType` enum, add `FigmentTriggerEvent` dataclass | -| `pyproject.toml` | Add `cairosvg` as optional dependency | - -## Testing Strategy - -- **Unit**: State machine transitions (idle→reveal→hold→dissolve→idle), timer accuracy (fires at interval_secs), SVG rasterization output dimensions, FigmentCommand parsing, FigmentAction enum coverage -- **Integration**: Plugin discovery (verify `FigmentEffect` is found by `discover_plugins()`), overlay rendering with mock terminal dimensions, C&C command handling via `/effects figment on` -- **Edge cases**: Missing figments dir, empty dir, malformed SVG, cairosvg unavailable, terminal resize mid-animation -- **Fixture**: Minimal `test.svg` (simple rectangle) for deterministic rasterization tests diff --git a/effects_plugins/figment.py b/effects_plugins/figment.py deleted file mode 100644 index bf9ca14..0000000 --- a/effects_plugins/figment.py +++ /dev/null @@ -1,200 +0,0 @@ -""" -Figment effect plugin — periodic SVG glyph overlay. - -Owns the figment lifecycle: timer, SVG selection, state machine. -Delegates rendering to render_figment_overlay() in engine/layers.py. - -Named FigmentEffect (not FigmentPlugin) to match the *Effect discovery -convention in effects_plugins/__init__.py. - -NOT added to the EffectChain order — process() is a no-op. The overlay -rendering is handled by scroll.py calling get_figment_state(). -""" - -from __future__ import annotations - -import random -from dataclasses import dataclass -from enum import Enum, auto -from pathlib import Path - -from engine import config -from engine.effects.types import EffectConfig, EffectContext, EffectPlugin -from engine.figment_render import rasterize_svg -from engine.figment_trigger import FigmentAction, FigmentCommand, FigmentTrigger -from engine.themes import THEME_REGISTRY - - -class FigmentPhase(Enum): - REVEAL = auto() - HOLD = auto() - DISSOLVE = auto() - - -@dataclass -class FigmentState: - phase: FigmentPhase - progress: float - rows: list[str] - gradient: list[int] - center_row: int - center_col: int - - -class FigmentEffect(EffectPlugin): - name = "figment" - config = EffectConfig( - enabled=False, - intensity=1.0, - params={ - "interval_secs": 60, - "display_secs": 4.5, - "figment_dir": "figments", - }, - ) - - def __init__( - self, - figment_dir: str | None = None, - triggers: list[FigmentTrigger] | None = None, - ): - self.config = EffectConfig( - enabled=False, - intensity=1.0, - params={ - "interval_secs": 60, - "display_secs": 4.5, - "figment_dir": figment_dir or "figments", - }, - ) - self._triggers = triggers or [] - self._phase: FigmentPhase | None = None - self._progress: float = 0.0 - self._rows: list[str] = [] - self._gradient: list[int] = [] - self._center_row: int = 0 - self._center_col: int = 0 - self._timer: float = 0.0 - self._last_svg: str | None = None - self._svg_files: list[str] = [] - self._scan_svgs() - - def _scan_svgs(self) -> None: - figment_dir = Path(self.config.params["figment_dir"]) - if figment_dir.is_dir(): - self._svg_files = sorted(str(p) for p in figment_dir.glob("*.svg")) - - def process(self, buf: list[str], ctx: EffectContext) -> list[str]: - return buf - - def configure(self, cfg: EffectConfig) -> None: - # Preserve figment_dir if the new config doesn't supply one - figment_dir = cfg.params.get( - "figment_dir", self.config.params.get("figment_dir", "figments") - ) - self.config = cfg - if "figment_dir" not in self.config.params: - self.config.params["figment_dir"] = figment_dir - self._scan_svgs() - - def trigger(self, w: int, h: int) -> None: - """Manually trigger a figment display.""" - if not self._svg_files: - return - - # Pick a random SVG, avoid repeating - candidates = [s for s in self._svg_files if s != self._last_svg] - if not candidates: - candidates = self._svg_files - svg_path = random.choice(candidates) - self._last_svg = svg_path - - # Rasterize - try: - self._rows = rasterize_svg(svg_path, w, h) - except Exception: - return - - # Pick random theme gradient - theme_key = random.choice(list(THEME_REGISTRY.keys())) - self._gradient = THEME_REGISTRY[theme_key].main_gradient - - # Center in viewport - figment_h = len(self._rows) - figment_w = max((len(r) for r in self._rows), default=0) - self._center_row = max(0, (h - figment_h) // 2) - self._center_col = max(0, (w - figment_w) // 2) - - # Start reveal phase - self._phase = FigmentPhase.REVEAL - self._progress = 0.0 - - def get_figment_state( - self, frame_number: int, w: int, h: int - ) -> FigmentState | None: - """Tick the state machine and return current state, or None if idle.""" - if not self.config.enabled: - return None - - # Poll triggers - for trig in self._triggers: - cmd = trig.poll() - if cmd is not None: - self._handle_command(cmd, w, h) - - # Tick timer when idle - if self._phase is None: - self._timer += config.FRAME_DT - interval = self.config.params.get("interval_secs", 60) - if self._timer >= interval: - self._timer = 0.0 - self.trigger(w, h) - - # Tick animation — snapshot current phase/progress, then advance - if self._phase is not None: - # Capture the state at the start of this frame - current_phase = self._phase - current_progress = self._progress - - # Advance for next frame - display_secs = self.config.params.get("display_secs", 4.5) - phase_duration = display_secs / 3.0 - self._progress += config.FRAME_DT / phase_duration - - if self._progress >= 1.0: - self._progress = 0.0 - if self._phase == FigmentPhase.REVEAL: - self._phase = FigmentPhase.HOLD - elif self._phase == FigmentPhase.HOLD: - self._phase = FigmentPhase.DISSOLVE - elif self._phase == FigmentPhase.DISSOLVE: - self._phase = None - - return FigmentState( - phase=current_phase, - progress=current_progress, - rows=self._rows, - gradient=self._gradient, - center_row=self._center_row, - center_col=self._center_col, - ) - - return None - - def _handle_command(self, cmd: FigmentCommand, w: int, h: int) -> None: - if cmd.action == FigmentAction.TRIGGER: - self.trigger(w, h) - elif cmd.action == FigmentAction.SET_INTENSITY and isinstance( - cmd.value, (int, float) - ): - self.config.intensity = float(cmd.value) - elif cmd.action == FigmentAction.SET_INTERVAL and isinstance( - cmd.value, (int, float) - ): - self.config.params["interval_secs"] = float(cmd.value) - elif cmd.action == FigmentAction.SET_COLOR and isinstance(cmd.value, str): - if cmd.value in THEME_REGISTRY: - self._gradient = THEME_REGISTRY[cmd.value].main_gradient - elif cmd.action == FigmentAction.STOP: - self._phase = None - self._progress = 0.0 diff --git a/effects_plugins/glitch.py b/effects_plugins/glitch.py deleted file mode 100644 index eb54cba..0000000 --- a/effects_plugins/glitch.py +++ /dev/null @@ -1,37 +0,0 @@ -import random - -from engine import config -from engine.effects.types import EffectConfig, EffectContext, EffectPlugin -from engine.terminal import C_DIM, DIM, G_DIM, G_LO, RST - - -class GlitchEffect(EffectPlugin): - name = "glitch" - config = EffectConfig(enabled=True, intensity=1.0) - - def process(self, buf: list[str], ctx: EffectContext) -> list[str]: - if not buf: - return buf - result = list(buf) - intensity = self.config.intensity - - glitch_prob = 0.32 + min(0.9, ctx.mic_excess * 0.16) - glitch_prob = glitch_prob * intensity - n_hits = 4 + int(ctx.mic_excess / 2) - n_hits = int(n_hits * intensity) - - if random.random() < glitch_prob: - for _ in range(min(n_hits, len(result))): - gi = random.randint(0, len(result) - 1) - scr_row = gi + 1 - result[gi] = f"\033[{scr_row};1H{self._glitch_bar(ctx.terminal_width)}" - return result - - def _glitch_bar(self, w: int) -> str: - c = random.choice(["░", "▒", "─", "\xc2"]) - n = random.randint(3, w // 2) - o = random.randint(0, w - n) - return " " * o + f"{G_LO}{DIM}" + c * n + RST - - def configure(self, cfg: EffectConfig) -> None: - self.config = cfg diff --git a/engine/__init__.py b/engine/__init__.py index 63f007f..a305edb 100644 --- a/engine/__init__.py +++ b/engine/__init__.py @@ -1 +1,10 @@ # engine — modular internals for mainline + +# Import submodules to make them accessible via engine. +# This is required for unittest.mock.patch to work with "engine.." +# strings and for direct attribute access on the engine package. +import engine.config # noqa: F401 +import engine.fetch # noqa: F401 +import engine.filter # noqa: F401 +import engine.sources # noqa: F401 +import engine.terminal # noqa: F401 diff --git a/engine/app.py b/engine/app.py index 8ba2803..6fc9e6e 100644 --- a/engine/app.py +++ b/engine/app.py @@ -1,437 +1,14 @@ """ -Application orchestrator — boot sequence, signal handling, main loop wiring. +Application orchestrator — pipeline mode entry point. + +This module provides the main entry point for the application. +The implementation has been refactored into the engine.app package. """ -import atexit -import os -import signal -import sys -import termios -import time -import tty +# Re-export from the new package structure +from engine.app import main, run_pipeline_mode, run_pipeline_mode_direct -from engine import config, render, themes -from engine.fetch import fetch_all, fetch_poetry, load_cache, save_cache -from engine.mic import MicMonitor -from engine.ntfy import NtfyPoller -from engine.scroll import stream -from engine.terminal import ( - CLR, - CURSOR_OFF, - CURSOR_ON, - G_DIM, - G_HI, - G_MID, - RST, - W_DIM, - W_GHOST, - boot_ln, - slow_print, - tw, -) +__all__ = ["main", "run_pipeline_mode", "run_pipeline_mode_direct"] -TITLE = [ - " ███╗ ███╗ █████╗ ██╗███╗ ██╗██╗ ██╗███╗ ██╗███████╗", - " ████╗ ████║██╔══██╗██║████╗ ██║██║ ██║████╗ ██║██╔════╝", - " ██╔████╔██║███████║██║██╔██╗ ██║██║ ██║██╔██╗ ██║█████╗ ", - " ██║╚██╔╝██║██╔══██║██║██║╚██╗██║██║ ██║██║╚██╗██║██╔══╝ ", - " ██║ ╚═╝ ██║██║ ██║██║██║ ╚████║███████╗██║██║ ╚████║███████╗", - " ╚═╝ ╚═╝╚═╝ ╚═╝╚═╝╚═╝ ╚═══╝╚══════╝╚═╝╚═╝ ╚═══╝╚══════╝", -] - - -def _read_picker_key(): - ch = sys.stdin.read(1) - if ch == "\x03": - return "interrupt" - if ch in ("\r", "\n"): - return "enter" - if ch == "\x1b": - c1 = sys.stdin.read(1) - if c1 != "[": - return None - c2 = sys.stdin.read(1) - if c2 == "A": - return "up" - if c2 == "B": - return "down" - return None - if ch in ("k", "K"): - return "up" - if ch in ("j", "J"): - return "down" - if ch in ("q", "Q"): - return "enter" - return None - - -def _draw_color_picker(themes_list, selected): - """Draw the color theme picker menu. - - Args: - themes_list: List of (theme_id, Theme) tuples from THEME_REGISTRY.items() - selected: Index of currently selected theme (0-2) - """ - print(CLR, end="") - print() - - print( - f" {G_HI}▼ COLOR THEME{RST} {W_GHOST}─ ↑/↓ or j/k to move, Enter/q to select{RST}" - ) - print(f" {W_GHOST}{'─' * (tw() - 4)}{RST}\n") - - for i, (theme_id, theme) in enumerate(themes_list): - prefix = " ▶ " if i == selected else " " - color = G_HI if i == selected else "" - reset = "" if i == selected else W_GHOST - print(f"{prefix}{color}{theme.name}{reset}") - - print() - - -def _normalize_preview_rows(rows): - """Trim shared left padding and trailing spaces for stable on-screen previews.""" - non_empty = [r for r in rows if r.strip()] - if not non_empty: - return [""] - left_pad = min(len(r) - len(r.lstrip(" ")) for r in non_empty) - out = [] - for row in rows: - if left_pad < len(row): - out.append(row[left_pad:].rstrip()) - else: - out.append(row.rstrip()) - return out - - -def _draw_font_picker(faces, selected): - w = tw() - h = 24 - try: - h = os.get_terminal_size().lines - except Exception: - pass - - max_preview_w = max(24, w - 8) - header_h = 6 - footer_h = 3 - preview_h = max(4, min(config.RENDER_H + 2, max(4, h // 2))) - visible = max(1, h - header_h - preview_h - footer_h) - top = max(0, selected - (visible // 2)) - bottom = min(len(faces), top + visible) - top = max(0, bottom - visible) - - print(CLR, end="") - print(CURSOR_OFF, end="") - print() - print(f" {G_HI}FONT PICKER{RST}") - print(f" {W_GHOST}{'─' * (w - 4)}{RST}") - print(f" {W_DIM}{config.FONT_DIR[:max_preview_w]}{RST}") - print(f" {W_GHOST}↑/↓ move · Enter select · q accept current{RST}") - print() - - for pos in range(top, bottom): - face = faces[pos] - active = pos == selected - pointer = "▶" if active else " " - color = G_HI if active else W_DIM - print( - f" {color}{pointer} {face['name']}{RST}{W_GHOST} · {face['file_name']}{RST}" - ) - - if top > 0: - print(f" {W_GHOST}… {top} above{RST}") - if bottom < len(faces): - print(f" {W_GHOST}… {len(faces) - bottom} below{RST}") - - print() - print(f" {W_GHOST}{'─' * (w - 4)}{RST}") - print( - f" {W_DIM}Preview: {faces[selected]['name']} · {faces[selected]['file_name']}{RST}" - ) - preview_rows = faces[selected]["preview_rows"][:preview_h] - for row in preview_rows: - shown = row[:max_preview_w] - print(f" {shown}") - - -def pick_color_theme(): - """Interactive color theme picker. Defaults to 'green' if not TTY. - - Displays a menu of available themes and lets user select with arrow keys. - Non-interactive environments (piped stdin, CI) silently default to green. - """ - # Non-interactive fallback - if not sys.stdin.isatty(): - config.set_active_theme("green") - return - - # Interactive picker - themes_list = list(themes.THEME_REGISTRY.items()) - selected = 0 - - fd = sys.stdin.fileno() - old_settings = termios.tcgetattr(fd) - try: - tty.setcbreak(fd) - while True: - _draw_color_picker(themes_list, selected) - key = _read_picker_key() - if key == "up": - selected = max(0, selected - 1) - elif key == "down": - selected = min(len(themes_list) - 1, selected + 1) - elif key == "enter": - break - elif key == "interrupt": - raise KeyboardInterrupt - finally: - termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) - - selected_theme_id = themes_list[selected][0] - config.set_active_theme(selected_theme_id) - - theme_name = themes_list[selected][1].name - print(f" {G_DIM}> using {theme_name}{RST}") - time.sleep(0.8) - print(CLR, end="") - print(CURSOR_OFF, end="") - print() - - -def pick_font_face(): - """Interactive startup picker for selecting a face from repo OTF files.""" - if not config.FONT_PICKER: - return - - font_files = config.list_repo_font_files() - if not font_files: - print(CLR, end="") - print(CURSOR_OFF, end="") - print() - print(f" {G_HI}FONT PICKER{RST}") - print(f" {W_GHOST}{'─' * (tw() - 4)}{RST}") - print(f" {G_DIM}> no .otf/.ttf/.ttc files found in: {config.FONT_DIR}{RST}") - print(f" {W_GHOST}> add font files to the fonts folder, then rerun{RST}") - time.sleep(1.8) - sys.exit(1) - - prepared = [] - for font_path in font_files: - try: - faces = render.list_font_faces(font_path, max_faces=64) - except Exception: - fallback = os.path.splitext(os.path.basename(font_path))[0] - faces = [{"index": 0, "name": fallback}] - for face in faces: - idx = face["index"] - name = face["name"] - file_name = os.path.basename(font_path) - try: - fnt = render.load_font_face(font_path, idx) - rows = _normalize_preview_rows(render.render_line(name, fnt)) - except Exception: - rows = ["(preview unavailable)"] - prepared.append( - { - "font_path": font_path, - "font_index": idx, - "name": name, - "file_name": file_name, - "preview_rows": rows, - } - ) - - if not prepared: - print(CLR, end="") - print(CURSOR_OFF, end="") - print() - print(f" {G_HI}FONT PICKER{RST}") - print(f" {W_GHOST}{'─' * (tw() - 4)}{RST}") - print(f" {G_DIM}> no readable font faces found in: {config.FONT_DIR}{RST}") - time.sleep(1.8) - sys.exit(1) - - def _same_path(a, b): - try: - return os.path.samefile(a, b) - except Exception: - return os.path.abspath(a) == os.path.abspath(b) - - selected = next( - ( - i - for i, f in enumerate(prepared) - if _same_path(f["font_path"], config.FONT_PATH) - and f["font_index"] == config.FONT_INDEX - ), - 0, - ) - - if not sys.stdin.isatty(): - selected_font = prepared[selected] - config.set_font_selection( - font_path=selected_font["font_path"], - font_index=selected_font["font_index"], - ) - render.clear_font_cache() - print( - f" {G_DIM}> using {selected_font['name']} ({selected_font['file_name']}){RST}" - ) - time.sleep(0.8) - print(CLR, end="") - print(CURSOR_OFF, end="") - print() - return - - fd = sys.stdin.fileno() - old_settings = termios.tcgetattr(fd) - try: - tty.setcbreak(fd) - while True: - _draw_font_picker(prepared, selected) - key = _read_picker_key() - if key == "up": - selected = max(0, selected - 1) - elif key == "down": - selected = min(len(prepared) - 1, selected + 1) - elif key == "enter": - break - elif key == "interrupt": - raise KeyboardInterrupt - finally: - termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) - - selected_font = prepared[selected] - config.set_font_selection( - font_path=selected_font["font_path"], - font_index=selected_font["font_index"], - ) - render.clear_font_cache() - print( - f" {G_DIM}> using {selected_font['name']} ({selected_font['file_name']}){RST}" - ) - time.sleep(0.8) - print(CLR, end="") - print(CURSOR_OFF, end="") - print() - - -def main(): - atexit.register(lambda: print(CURSOR_ON, end="", flush=True)) - - def handle_sigint(*_): - print(f"\n\n {G_DIM}> SIGNAL LOST{RST}") - print(f" {W_GHOST}> connection terminated{RST}\n") - sys.exit(0) - - signal.signal(signal.SIGINT, handle_sigint) - - w = tw() - print(CLR, end="") - print(CURSOR_OFF, end="") - pick_color_theme() - pick_font_face() - w = tw() - print() - time.sleep(0.4) - - for ln in TITLE: - print(f"{G_HI}{ln}{RST}") - time.sleep(0.07) - - print() - _subtitle = { - "poetry": "literary consciousness stream", - "code": "source consciousness stream", - }.get(config.MODE, "digital consciousness stream") - print(f" {W_DIM}v0.1 · {_subtitle}{RST}") - print(f" {W_GHOST}{'─' * (w - 4)}{RST}") - print() - time.sleep(0.4) - - cached = load_cache() if "--refresh" not in sys.argv else None - if cached: - items = cached - boot_ln("Cache", f"LOADED [{len(items)} SIGNALS]", True) - elif config.MODE == "poetry": - slow_print(" > INITIALIZING LITERARY CORPUS...\n") - time.sleep(0.2) - print() - items, linked, failed = fetch_poetry() - print() - print( - f" {G_DIM}>{RST} {G_MID}{linked} TEXTS LOADED{RST} {W_GHOST}· {failed} DARK{RST}" - ) - print(f" {G_DIM}>{RST} {G_MID}{len(items)} STANZAS ACQUIRED{RST}") - save_cache(items) - elif config.MODE == "code": - from engine.fetch_code import fetch_code - - slow_print(" > INITIALIZING SOURCE ARRAY...\n") - time.sleep(0.2) - print() - items, line_count, _ = fetch_code() - print() - print(f" {G_DIM}>{RST} {G_MID}{line_count} LINES ACQUIRED{RST}") - else: - slow_print(" > INITIALIZING FEED ARRAY...\n") - time.sleep(0.2) - print() - items, linked, failed = fetch_all() - print() - print( - f" {G_DIM}>{RST} {G_MID}{linked} SOURCES LINKED{RST} {W_GHOST}· {failed} DARK{RST}" - ) - print(f" {G_DIM}>{RST} {G_MID}{len(items)} SIGNALS ACQUIRED{RST}") - save_cache(items) - - if not items: - print(f"\n {W_DIM}> NO SIGNAL — check network{RST}") - sys.exit(1) - - print() - mic = MicMonitor(threshold_db=config.MIC_THRESHOLD_DB) - mic_ok = mic.start() - if mic.available: - boot_ln( - "Microphone", - "ACTIVE" - if mic_ok - else "OFFLINE · check System Settings → Privacy → Microphone", - bool(mic_ok), - ) - - ntfy = NtfyPoller( - config.NTFY_TOPIC, - reconnect_delay=config.NTFY_RECONNECT_DELAY, - display_secs=config.MESSAGE_DISPLAY_SECS, - ) - ntfy_ok = ntfy.start() - boot_ln("ntfy", "LISTENING" if ntfy_ok else "OFFLINE", ntfy_ok) - - if config.FIREHOSE: - boot_ln("Firehose", "ENGAGED", True) - - if config.FIGMENT: - try: - from effects_plugins.figment import FigmentEffect # noqa: F401 - - boot_ln("Figment", f"ARMED [{config.FIGMENT_INTERVAL}s interval]", True) - except (ImportError, OSError): - boot_ln("Figment", "UNAVAILABLE — run: brew install cairo", False) - - time.sleep(0.4) - slow_print(" > STREAMING...\n") - time.sleep(0.2) - print(f" {W_GHOST}{'─' * (w - 4)}{RST}") - print() - time.sleep(0.4) - - stream(items, ntfy, mic) - - print() - print(f" {W_GHOST}{'─' * (tw() - 4)}{RST}") - print(f" {G_DIM}> {config.HEADLINE_LIMIT} SIGNALS PROCESSED{RST}") - print(f" {W_GHOST}> end of stream{RST}") - print() +if __name__ == "__main__": + main() diff --git a/engine/app/__init__.py b/engine/app/__init__.py new file mode 100644 index 0000000..9f5cf65 --- /dev/null +++ b/engine/app/__init__.py @@ -0,0 +1,34 @@ +""" +Application orchestrator — pipeline mode entry point. + +This package contains the main application logic for the pipeline mode, +including pipeline construction, UI controller setup, and the main render loop. +""" + +# Re-export from engine for backward compatibility with tests +# Re-export effects plugins for backward compatibility with tests +import engine.effects.plugins as effects_plugins +from engine import config + +# Re-export display registry for backward compatibility with tests +from engine.display import DisplayRegistry + +# Re-export fetch functions for backward compatibility with tests +from engine.fetch import fetch_all, fetch_poetry, load_cache +from engine.pipeline import list_presets + +from .main import main, run_pipeline_mode_direct +from .pipeline_runner import run_pipeline_mode + +__all__ = [ + "config", + "list_presets", + "main", + "run_pipeline_mode", + "run_pipeline_mode_direct", + "fetch_all", + "fetch_poetry", + "load_cache", + "DisplayRegistry", + "effects_plugins", +] diff --git a/engine/app/main.py b/engine/app/main.py new file mode 100644 index 0000000..483686b --- /dev/null +++ b/engine/app/main.py @@ -0,0 +1,457 @@ +""" +Main entry point and CLI argument parsing for the application. +""" + +import sys +import time + +from engine import config +from engine.display import BorderMode, DisplayRegistry +from engine.effects import get_registry +from engine.fetch import fetch_all, fetch_all_fast, fetch_poetry, load_cache, save_cache +from engine.pipeline import ( + Pipeline, + PipelineConfig, + PipelineContext, + list_presets, +) +from engine.pipeline.adapters import ( + CameraStage, + DataSourceStage, + EffectPluginStage, + create_stage_from_display, + create_stage_from_effect, +) +from engine.pipeline.params import PipelineParams +from engine.pipeline.ui import UIConfig, UIPanel +from engine.pipeline.validation import validate_pipeline_config + +try: + from engine.display.backends.websocket import WebSocketDisplay +except ImportError: + WebSocketDisplay = None + +from .pipeline_runner import run_pipeline_mode + + +def main(): + """Main entry point - all modes now use presets or CLI construction.""" + if config.PIPELINE_DIAGRAM: + try: + from engine.pipeline import generate_pipeline_diagram + except ImportError: + print("Error: pipeline diagram not available") + return + print(generate_pipeline_diagram()) + return + + # Check for direct pipeline construction flags + if "--pipeline-source" in sys.argv: + # Construct pipeline directly from CLI args + run_pipeline_mode_direct() + return + + preset_name = None + + if config.PRESET: + preset_name = config.PRESET + elif config.PIPELINE_MODE: + preset_name = config.PIPELINE_PRESET + else: + preset_name = "demo" + + available = list_presets() + if preset_name not in available: + print(f"Error: Unknown preset '{preset_name}'") + print(f"Available presets: {', '.join(available)}") + sys.exit(1) + + run_pipeline_mode(preset_name) + + +def run_pipeline_mode_direct(): + """Construct and run a pipeline directly from CLI arguments. + + Usage: + python -m engine.app --pipeline-source headlines --pipeline-effects noise,fade --display null + python -m engine.app --pipeline-source fixture --pipeline-effects glitch --pipeline-ui --display null + + Flags: + --pipeline-source : Headlines, fixture, poetry, empty, pipeline-inspect + --pipeline-effects : Comma-separated list (noise, fade, glitch, firehose, hud, tint, border, crop) + --pipeline-camera : scroll, feed, horizontal, omni, floating, bounce + --pipeline-display : terminal, pygame, websocket, null, multi:term,pygame + --pipeline-ui: Enable UI panel (BorderMode.UI) + --pipeline-border : off, simple, ui + """ + import engine.effects.plugins as effects_plugins + from engine.camera import Camera + from engine.data_sources.pipeline_introspection import PipelineIntrospectionSource + from engine.data_sources.sources import EmptyDataSource, ListDataSource + from engine.pipeline.adapters import ( + FontStage, + ViewportFilterStage, + ) + + # Discover and register all effect plugins + effects_plugins.discover_plugins() + + # Parse CLI arguments + source_name = None + effect_names = [] + camera_type = None + display_name = None + ui_enabled = False + border_mode = BorderMode.OFF + source_items = None + allow_unsafe = False + viewport_width = None + viewport_height = None + + i = 1 + argv = sys.argv + while i < len(argv): + arg = argv[i] + if arg == "--pipeline-source" and i + 1 < len(argv): + source_name = argv[i + 1] + i += 2 + elif arg == "--pipeline-effects" and i + 1 < len(argv): + effect_names = [e.strip() for e in argv[i + 1].split(",") if e.strip()] + i += 2 + elif arg == "--pipeline-camera" and i + 1 < len(argv): + camera_type = argv[i + 1] + i += 2 + elif arg == "--viewport" and i + 1 < len(argv): + vp = argv[i + 1] + try: + viewport_width, viewport_height = map(int, vp.split("x")) + except ValueError: + print("Error: Invalid viewport format. Use WxH (e.g., 40x15)") + sys.exit(1) + i += 2 + elif arg == "--pipeline-display" and i + 1 < len(argv): + display_name = argv[i + 1] + i += 2 + elif arg == "--pipeline-ui": + ui_enabled = True + i += 1 + elif arg == "--pipeline-border" and i + 1 < len(argv): + mode = argv[i + 1] + if mode == "simple": + border_mode = True + elif mode == "ui": + border_mode = BorderMode.UI + else: + border_mode = False + i += 2 + elif arg == "--allow-unsafe": + allow_unsafe = True + i += 1 + else: + i += 1 + + if not source_name: + print("Error: --pipeline-source is required") + print( + "Usage: python -m engine.app --pipeline-source [--pipeline-effects ] ..." + ) + sys.exit(1) + + print(" \033[38;5;245mDirect pipeline construction\033[0m") + print(f" Source: {source_name}") + print(f" Effects: {effect_names}") + print(f" Camera: {camera_type}") + print(f" Display: {display_name}") + print(f" UI Enabled: {ui_enabled}") + + # Create initial config and params + params = PipelineParams() + params.source = source_name + params.camera_mode = camera_type if camera_type is not None else "" + params.effect_order = effect_names + params.border = border_mode + + # Create minimal config for validation + config_obj = PipelineConfig( + source=source_name, + display=display_name or "", # Will be filled by validation + camera=camera_type if camera_type is not None else "", + effects=effect_names, + ) + + # Run MVP validation + result = validate_pipeline_config(config_obj, params, allow_unsafe=allow_unsafe) + + if result.warnings and not allow_unsafe: + print(" \033[38;5;226mWarning: MVP validation found issues:\033[0m") + for warning in result.warnings: + print(f" - {warning}") + + if result.changes: + print(" \033[38;5;226mApplied MVP defaults:\033[0m") + for change in result.changes: + print(f" {change}") + + if not result.valid: + print( + " \033[38;5;196mPipeline configuration invalid and could not be fixed\033[0m" + ) + sys.exit(1) + + # Show MVP summary + print(" \033[38;5;245mMVP Configuration:\033[0m") + print(f" Source: {result.config.source}") + print(f" Display: {result.config.display}") + print(f" Camera: {result.config.camera or 'static (none)'}") + print(f" Effects: {result.config.effects if result.config.effects else 'none'}") + print(f" Border: {result.params.border}") + + # Load source items + if source_name == "headlines": + cached = load_cache() + if cached: + source_items = cached + else: + source_items = fetch_all_fast() + if source_items: + import threading + + def background_fetch(): + full_items, _, _ = fetch_all() + save_cache(full_items) + + background_thread = threading.Thread( + target=background_fetch, daemon=True + ) + background_thread.start() + elif source_name == "fixture": + source_items = load_cache() + if not source_items: + print(" \033[38;5;196mNo fixture cache available\033[0m") + sys.exit(1) + elif source_name == "poetry": + source_items, _, _ = fetch_poetry() + elif source_name == "empty" or source_name == "pipeline-inspect": + source_items = [] + else: + print(f" \033[38;5;196mUnknown source: {source_name}\033[0m") + sys.exit(1) + + if source_items is not None: + print(f" \033[38;5;82mLoaded {len(source_items)} items\033[0m") + + # Set border mode + if ui_enabled: + border_mode = BorderMode.UI + + # Build pipeline using validated config and params + params = result.params + params.viewport_width = viewport_width if viewport_width is not None else 80 + params.viewport_height = viewport_height if viewport_height is not None else 24 + + ctx = PipelineContext() + ctx.params = params + + # Create display using validated display name + display_name = result.config.display or "terminal" # Default to terminal if empty + display = DisplayRegistry.create(display_name) + if not display: + print(f" \033[38;5;196mFailed to create display: {display_name}\033[0m") + sys.exit(1) + display.init(0, 0) + + # Create pipeline using validated config + pipeline = Pipeline(config=result.config, context=ctx) + + # Add stages + # Source stage + if source_name == "pipeline-inspect": + introspection_source = PipelineIntrospectionSource( + pipeline=None, + viewport_width=params.viewport_width, + viewport_height=params.viewport_height, + ) + pipeline.add_stage( + "source", DataSourceStage(introspection_source, name="pipeline-inspect") + ) + elif source_name == "empty": + empty_source = EmptyDataSource( + width=params.viewport_width, height=params.viewport_height + ) + pipeline.add_stage("source", DataSourceStage(empty_source, name="empty")) + else: + list_source = ListDataSource(source_items, name=source_name) + pipeline.add_stage("source", DataSourceStage(list_source, name=source_name)) + + # Add viewport filter and font for headline sources + if source_name in ["headlines", "poetry", "fixture"]: + pipeline.add_stage( + "viewport_filter", ViewportFilterStage(name="viewport-filter") + ) + pipeline.add_stage("font", FontStage(name="font")) + else: + # Fallback to simple conversion for other sources + from engine.pipeline.adapters import SourceItemsToBufferStage + + pipeline.add_stage("render", SourceItemsToBufferStage(name="items-to-buffer")) + + # Add camera + speed = getattr(params, "camera_speed", 1.0) + camera = None + if camera_type == "feed": + camera = Camera.feed(speed=speed) + elif camera_type == "scroll": + camera = Camera.scroll(speed=speed) + elif camera_type == "horizontal": + camera = Camera.horizontal(speed=speed) + elif camera_type == "omni": + camera = Camera.omni(speed=speed) + elif camera_type == "floating": + camera = Camera.floating(speed=speed) + elif camera_type == "bounce": + camera = Camera.bounce(speed=speed) + + if camera: + pipeline.add_stage("camera", CameraStage(camera, name=camera_type)) + + # Add effects + effect_registry = get_registry() + for effect_name in effect_names: + effect = effect_registry.get(effect_name) + if effect: + pipeline.add_stage( + f"effect_{effect_name}", create_stage_from_effect(effect, effect_name) + ) + + # Add display + pipeline.add_stage("display", create_stage_from_display(display, display_name)) + + pipeline.build() + + if not pipeline.initialize(): + print(" \033[38;5;196mFailed to initialize pipeline\033[0m") + sys.exit(1) + + # Create UI panel if border mode is UI + ui_panel = None + if params.border == BorderMode.UI: + ui_panel = UIPanel(UIConfig(panel_width=24, start_with_preset_picker=True)) + # Enable raw mode for terminal input if supported + if hasattr(display, "set_raw_mode"): + display.set_raw_mode(True) + for stage in pipeline.stages.values(): + if isinstance(stage, EffectPluginStage): + effect = stage._effect + enabled = effect.config.enabled if hasattr(effect, "config") else True + stage_control = ui_panel.register_stage(stage, enabled=enabled) + stage_control.effect = effect # type: ignore[attr-defined] + + if ui_panel.stages: + first_stage = next(iter(ui_panel.stages)) + ui_panel.select_stage(first_stage) + ctrl = ui_panel.stages[first_stage] + if hasattr(ctrl, "effect"): + effect = ctrl.effect + if hasattr(effect, "config"): + config = effect.config + try: + import dataclasses + + if dataclasses.is_dataclass(config): + for field_name, field_obj in dataclasses.fields(config): + if field_name == "enabled": + continue + value = getattr(config, field_name, None) + if value is not None: + ctrl.params[field_name] = value + ctrl.param_schema[field_name] = { + "type": type(value).__name__, + "min": 0 + if isinstance(value, (int, float)) + else None, + "max": 1 if isinstance(value, float) else None, + "step": 0.1 if isinstance(value, float) else 1, + } + except Exception: + pass + + # Run pipeline loop + from engine.display import render_ui_panel + + ctx.set("display", display) + ctx.set("items", source_items) + ctx.set("pipeline", pipeline) + ctx.set("pipeline_order", pipeline.execution_order) + + current_width = params.viewport_width + current_height = params.viewport_height + + # Only get dimensions from display if viewport wasn't explicitly set + if "--viewport" not in sys.argv and hasattr(display, "get_dimensions"): + current_width, current_height = display.get_dimensions() + params.viewport_width = current_width + params.viewport_height = current_height + + print(" \033[38;5;82mStarting pipeline...\033[0m") + print(" \033[38;5;245mPress Ctrl+C to exit\033[0m\n") + + try: + frame = 0 + while True: + params.frame_number = frame + ctx.params = params + + result = pipeline.execute(source_items) + if not result.success: + error_msg = f" ({result.error})" if result.error else "" + print(f" \033[38;5;196mPipeline execution failed{error_msg}\033[0m") + break + + # Render with UI panel + if ui_panel is not None: + buf = render_ui_panel( + result.data, current_width, current_height, ui_panel + ) + display.show(buf, border=False) + else: + display.show(result.data, border=border_mode) + + # Handle keyboard events if UI is enabled + if ui_panel is not None: + # Try pygame first + if hasattr(display, "_pygame"): + try: + import pygame + + for event in pygame.event.get(): + if event.type == pygame.KEYDOWN: + ui_panel.process_key_event(event.key, event.mod) + except (ImportError, Exception): + pass + # Try terminal input + elif hasattr(display, "get_input_keys"): + try: + keys = display.get_input_keys() + for key in keys: + ui_panel.process_key_event(key, 0) + except Exception: + pass + + # Check for quit request + if hasattr(display, "is_quit_requested") and display.is_quit_requested(): + if hasattr(display, "clear_quit_request"): + display.clear_quit_request() + raise KeyboardInterrupt() + + time.sleep(1 / 60) + frame += 1 + + except KeyboardInterrupt: + pipeline.cleanup() + display.cleanup() + print("\n \033[38;5;245mPipeline stopped\033[0m") + return + + pipeline.cleanup() + display.cleanup() + print("\n \033[38;5;245mPipeline stopped\033[0m") diff --git a/engine/app/pipeline_runner.py b/engine/app/pipeline_runner.py new file mode 100644 index 0000000..95bf161 --- /dev/null +++ b/engine/app/pipeline_runner.py @@ -0,0 +1,852 @@ +""" +Pipeline runner - handles preset-based pipeline construction and execution. +""" + +import sys +import time +from typing import Any + +from engine.display import BorderMode, DisplayRegistry +from engine.effects import get_registry +from engine.fetch import fetch_all, fetch_all_fast, fetch_poetry, load_cache, save_cache +from engine.pipeline import Pipeline, PipelineConfig, PipelineContext, get_preset +from engine.pipeline.adapters import ( + EffectPluginStage, + SourceItemsToBufferStage, + create_stage_from_display, + create_stage_from_effect, +) +from engine.pipeline.ui import UIConfig, UIPanel + +try: + from engine.display.backends.websocket import WebSocketDisplay +except ImportError: + WebSocketDisplay = None + + +def _handle_pipeline_mutation(pipeline: Pipeline, command: dict) -> bool: + """Handle pipeline mutation commands from WebSocket or other external control. + + Args: + pipeline: The pipeline to mutate + command: Command dictionary with 'action' and other parameters + + Returns: + True if command was successfully handled, False otherwise + """ + action = command.get("action") + + if action == "add_stage": + # For now, this just returns True to acknowledge the command + # In a full implementation, we'd need to create the appropriate stage + print(f" [Pipeline] add_stage command received: {command}") + return True + + elif action == "remove_stage": + stage_name = command.get("stage") + if stage_name: + result = pipeline.remove_stage(stage_name) + print(f" [Pipeline] Removed stage '{stage_name}': {result is not None}") + return result is not None + + elif action == "replace_stage": + stage_name = command.get("stage") + # For now, this just returns True to acknowledge the command + print(f" [Pipeline] replace_stage command received: {command}") + return True + + elif action == "swap_stages": + stage1 = command.get("stage1") + stage2 = command.get("stage2") + if stage1 and stage2: + result = pipeline.swap_stages(stage1, stage2) + print(f" [Pipeline] Swapped stages '{stage1}' and '{stage2}': {result}") + return result + + elif action == "move_stage": + stage_name = command.get("stage") + after = command.get("after") + before = command.get("before") + if stage_name: + result = pipeline.move_stage(stage_name, after, before) + print(f" [Pipeline] Moved stage '{stage_name}': {result}") + return result + + elif action == "enable_stage": + stage_name = command.get("stage") + if stage_name: + result = pipeline.enable_stage(stage_name) + print(f" [Pipeline] Enabled stage '{stage_name}': {result}") + return result + + elif action == "disable_stage": + stage_name = command.get("stage") + if stage_name: + result = pipeline.disable_stage(stage_name) + print(f" [Pipeline] Disabled stage '{stage_name}': {result}") + return result + + elif action == "cleanup_stage": + stage_name = command.get("stage") + if stage_name: + pipeline.cleanup_stage(stage_name) + print(f" [Pipeline] Cleaned up stage '{stage_name}'") + return True + + elif action == "can_hot_swap": + stage_name = command.get("stage") + if stage_name: + can_swap = pipeline.can_hot_swap(stage_name) + print(f" [Pipeline] Can hot-swap '{stage_name}': {can_swap}") + return True + + return False + + +def run_pipeline_mode(preset_name: str = "demo"): + """Run using the new unified pipeline architecture.""" + import engine.effects.plugins as effects_plugins + from engine.effects import PerformanceMonitor, set_monitor + + print(" \033[1;38;5;46mPIPELINE MODE\033[0m") + print(" \033[38;5;245mUsing unified pipeline architecture\033[0m") + + effects_plugins.discover_plugins() + + monitor = PerformanceMonitor() + set_monitor(monitor) + + preset = get_preset(preset_name) + if not preset: + print(f" \033[38;5;196mUnknown preset: {preset_name}\033[0m") + sys.exit(1) + + print(f" \033[38;5;245mPreset: {preset.name} - {preset.description}\033[0m") + + params = preset.to_params() + # Use preset viewport if available, else default to 80x24 + params.viewport_width = getattr(preset, "viewport_width", 80) + params.viewport_height = getattr(preset, "viewport_height", 24) + + if "--viewport" in sys.argv: + idx = sys.argv.index("--viewport") + if idx + 1 < len(sys.argv): + vp = sys.argv[idx + 1] + try: + params.viewport_width, params.viewport_height = map(int, vp.split("x")) + except ValueError: + print("Error: Invalid viewport format. Use WxH (e.g., 40x15)") + sys.exit(1) + + pipeline = Pipeline(config=preset.to_config()) + + print(" \033[38;5;245mFetching content...\033[0m") + + # Handle special sources that don't need traditional fetching + introspection_source = None + if preset.source == "pipeline-inspect": + items = [] + print(" \033[38;5;245mUsing pipeline introspection source\033[0m") + elif preset.source == "empty": + items = [] + print(" \033[38;5;245mUsing empty source (no content)\033[0m") + elif preset.source == "fixture": + items = load_cache() + if not items: + print(" \033[38;5;196mNo fixture cache available\033[0m") + sys.exit(1) + print(f" \033[38;5;82mLoaded {len(items)} items from fixture\033[0m") + else: + cached = load_cache() + if cached: + items = cached + print(f" \033[38;5;82mLoaded {len(items)} items from cache\033[0m") + elif preset.source == "poetry": + items, _, _ = fetch_poetry() + else: + items = fetch_all_fast() + if items: + print( + f" \033[38;5;82mFast start: {len(items)} items from first 5 sources\033[0m" + ) + + import threading + + def background_fetch(): + full_items, _, _ = fetch_all() + save_cache(full_items) + + background_thread = threading.Thread(target=background_fetch, daemon=True) + background_thread.start() + + if not items: + print(" \033[38;5;196mNo content available\033[0m") + sys.exit(1) + + print(f" \033[38;5;82mLoaded {len(items)} items\033[0m") + + # CLI --display flag takes priority over preset + # Check if --display was explicitly provided + display_name = preset.display + if "--display" in sys.argv: + idx = sys.argv.index("--display") + if idx + 1 < len(sys.argv): + display_name = sys.argv[idx + 1] + + display = DisplayRegistry.create(display_name) + if not display and not display_name.startswith("multi"): + print(f" \033[38;5;196mFailed to create display: {display_name}\033[0m") + sys.exit(1) + + # Handle multi display (format: "multi:terminal,pygame") + if not display and display_name.startswith("multi"): + parts = display_name[6:].split( + "," + ) # "multi:terminal,pygame" -> ["terminal", "pygame"] + display = DisplayRegistry.create_multi(parts) + if not display: + print(f" \033[38;5;196mFailed to create multi display: {parts}\033[0m") + sys.exit(1) + + if not display: + print(f" \033[38;5;196mFailed to create display: {display_name}\033[0m") + sys.exit(1) + + display.init(0, 0) + + # Determine if we need UI controller for WebSocket or border=UI + need_ui_controller = False + web_control_active = False + if WebSocketDisplay and isinstance(display, WebSocketDisplay): + need_ui_controller = True + web_control_active = True + elif isinstance(params.border, BorderMode) and params.border == BorderMode.UI: + need_ui_controller = True + + effect_registry = get_registry() + + # Create source stage based on preset source type + if preset.source == "pipeline-inspect": + from engine.data_sources.pipeline_introspection import ( + PipelineIntrospectionSource, + ) + from engine.pipeline.adapters import DataSourceStage + + introspection_source = PipelineIntrospectionSource( + pipeline=None, # Will be set after pipeline.build() + viewport_width=80, + viewport_height=24, + ) + pipeline.add_stage( + "source", DataSourceStage(introspection_source, name="pipeline-inspect") + ) + elif preset.source == "empty": + from engine.data_sources.sources import EmptyDataSource + from engine.pipeline.adapters import DataSourceStage + + empty_source = EmptyDataSource(width=80, height=24) + pipeline.add_stage("source", DataSourceStage(empty_source, name="empty")) + else: + from engine.data_sources.sources import ListDataSource + from engine.pipeline.adapters import DataSourceStage + + list_source = ListDataSource(items, name=preset.source) + pipeline.add_stage("source", DataSourceStage(list_source, name=preset.source)) + + # Add camera state update stage if specified in preset (must run before viewport filter) + camera = None + if preset.camera: + from engine.camera import Camera + from engine.pipeline.adapters import CameraClockStage, CameraStage + + speed = getattr(preset, "camera_speed", 1.0) + if preset.camera == "feed": + camera = Camera.feed(speed=speed) + elif preset.camera == "scroll": + camera = Camera.scroll(speed=speed) + elif preset.camera == "vertical": + camera = Camera.scroll(speed=speed) # Backwards compat + elif preset.camera == "horizontal": + camera = Camera.horizontal(speed=speed) + elif preset.camera == "omni": + camera = Camera.omni(speed=speed) + elif preset.camera == "floating": + camera = Camera.floating(speed=speed) + elif preset.camera == "bounce": + camera = Camera.bounce(speed=speed) + elif preset.camera == "radial": + camera = Camera.radial(speed=speed) + elif preset.camera == "static" or preset.camera == "": + # Static camera: no movement, but provides camera_y=0 for viewport filter + camera = Camera.scroll(speed=0.0) # Speed 0 = no movement + camera.set_canvas_size(200, 200) + + if camera: + # Add camera update stage to ensure camera_y is available for viewport filter + pipeline.add_stage( + "camera_update", CameraClockStage(camera, name="camera-clock") + ) + + # Add FontStage for headlines/poetry (default for demo) + if preset.source in ["headlines", "poetry"]: + from engine.pipeline.adapters import FontStage, ViewportFilterStage + + # Add viewport filter to prevent rendering all items + pipeline.add_stage( + "viewport_filter", ViewportFilterStage(name="viewport-filter") + ) + pipeline.add_stage("font", FontStage(name="font")) + else: + # Fallback to simple conversion for other sources + pipeline.add_stage("render", SourceItemsToBufferStage(name="items-to-buffer")) + + # Add camera stage if specified in preset (after font/render stage) + if camera: + pipeline.add_stage("camera", CameraStage(camera, name=preset.camera)) + + for effect_name in preset.effects: + effect = effect_registry.get(effect_name) + if effect: + pipeline.add_stage( + f"effect_{effect_name}", create_stage_from_effect(effect, effect_name) + ) + + pipeline.add_stage("display", create_stage_from_display(display, display_name)) + + pipeline.build() + + # For pipeline-inspect, set the pipeline after build to avoid circular dependency + if introspection_source is not None: + introspection_source.set_pipeline(pipeline) + + if not pipeline.initialize(): + print(" \033[38;5;196mFailed to initialize pipeline\033[0m") + sys.exit(1) + + # Initialize UI panel if needed (border mode or WebSocket control) + ui_panel = None + render_ui_panel_in_terminal = False + + if need_ui_controller: + from engine.display import render_ui_panel + + ui_panel = UIPanel(UIConfig(panel_width=24, start_with_preset_picker=True)) + + # Determine if we should render UI panel in terminal + # Only render if border mode is UI (not for WebSocket-only mode) + render_ui_panel_in_terminal = ( + isinstance(params.border, BorderMode) and params.border == BorderMode.UI + ) + + # Enable raw mode for terminal input if supported + if hasattr(display, "set_raw_mode"): + display.set_raw_mode(True) + + # Register effect plugin stages from pipeline for UI control + for stage in pipeline.stages.values(): + if isinstance(stage, EffectPluginStage): + effect = stage._effect + enabled = effect.config.enabled if hasattr(effect, "config") else True + stage_control = ui_panel.register_stage(stage, enabled=enabled) + # Store reference to effect for easier access + stage_control.effect = effect # type: ignore[attr-defined] + + # Select first stage by default + if ui_panel.stages: + first_stage = next(iter(ui_panel.stages)) + ui_panel.select_stage(first_stage) + # Populate param schema from EffectConfig if it's a dataclass + ctrl = ui_panel.stages[first_stage] + if hasattr(ctrl, "effect"): + effect = ctrl.effect + if hasattr(effect, "config"): + config = effect.config + # Try to get fields via dataclasses if available + try: + import dataclasses + + if dataclasses.is_dataclass(config): + for field_name, field_obj in dataclasses.fields(config): + if field_name == "enabled": + continue + value = getattr(config, field_name, None) + if value is not None: + ctrl.params[field_name] = value + ctrl.param_schema[field_name] = { + "type": type(value).__name__, + "min": 0 + if isinstance(value, (int, float)) + else None, + "max": 1 if isinstance(value, float) else None, + "step": 0.1 if isinstance(value, float) else 1, + } + except Exception: + pass # No dataclass fields, skip param UI + + # Set up callback for stage toggles + def on_stage_toggled(stage_name: str, enabled: bool): + """Update the actual stage's enabled state when UI toggles.""" + stage = pipeline.get_stage(stage_name) + if stage: + # Set stage enabled flag for pipeline execution + stage._enabled = enabled + # Also update effect config if it's an EffectPluginStage + if isinstance(stage, EffectPluginStage): + stage._effect.config.enabled = enabled + + # Broadcast state update if WebSocket is active + if web_control_active and isinstance(display, WebSocketDisplay): + state = display._get_state_snapshot() + if state: + display.broadcast_state(state) + + ui_panel.set_event_callback("stage_toggled", on_stage_toggled) + + # Set up callback for parameter changes + def on_param_changed(stage_name: str, param_name: str, value: Any): + """Update the effect config when UI adjusts a parameter.""" + stage = pipeline.get_stage(stage_name) + if stage and isinstance(stage, EffectPluginStage): + effect = stage._effect + if hasattr(effect, "config"): + setattr(effect.config, param_name, value) + # Mark effect as needing reconfiguration if it has a configure method + if hasattr(effect, "configure"): + try: + effect.configure(effect.config) + except Exception: + pass # Ignore reconfiguration errors + + # Broadcast state update if WebSocket is active + if web_control_active and isinstance(display, WebSocketDisplay): + state = display._get_state_snapshot() + if state: + display.broadcast_state(state) + + ui_panel.set_event_callback("param_changed", on_param_changed) + + # Set up preset list and handle preset changes + from engine.pipeline import list_presets + + ui_panel.set_presets(list_presets(), preset_name) + + # Connect WebSocket to UI panel for remote control + if web_control_active and isinstance(display, WebSocketDisplay): + display.set_controller(ui_panel) + + def handle_websocket_command(command: dict) -> None: + """Handle commands from WebSocket clients.""" + action = command.get("action") + + # Handle pipeline mutation commands directly + if action in ( + "add_stage", + "remove_stage", + "replace_stage", + "swap_stages", + "move_stage", + "enable_stage", + "disable_stage", + "cleanup_stage", + "can_hot_swap", + ): + result = _handle_pipeline_mutation(pipeline, command) + if result: + state = display._get_state_snapshot() + if state: + display.broadcast_state(state) + return + + # Handle UI panel commands + if ui_panel.execute_command(command): + # Broadcast updated state after command execution + state = display._get_state_snapshot() + if state: + display.broadcast_state(state) + + display.set_command_callback(handle_websocket_command) + + def on_preset_changed(preset_name: str): + """Handle preset change from UI - rebuild pipeline.""" + nonlocal \ + pipeline, \ + display, \ + items, \ + params, \ + ui_panel, \ + current_width, \ + current_height, \ + web_control_active, \ + render_ui_panel_in_terminal + + print(f" \033[38;5;245mSwitching to preset: {preset_name}\033[0m") + + # Save current UI panel state before rebuild + ui_state = ui_panel.save_state() if ui_panel else None + + try: + # Clean up old pipeline + pipeline.cleanup() + + # Get new preset + new_preset = get_preset(preset_name) + if not new_preset: + print(f" \033[38;5;196mUnknown preset: {preset_name}\033[0m") + return + + # Update params for new preset + params = new_preset.to_params() + params.viewport_width = current_width + params.viewport_height = current_height + + # Reconstruct pipeline configuration + new_config = PipelineConfig( + source=new_preset.source, + display=new_preset.display, + camera=new_preset.camera, + effects=new_preset.effects, + ) + + # Create new pipeline instance + pipeline = Pipeline(config=new_config, context=PipelineContext()) + + # Re-add stages (similar to initial construction) + # Source stage + if new_preset.source == "pipeline-inspect": + from engine.data_sources.pipeline_introspection import ( + PipelineIntrospectionSource, + ) + from engine.pipeline.adapters import DataSourceStage + + introspection_source = PipelineIntrospectionSource( + pipeline=None, + viewport_width=current_width, + viewport_height=current_height, + ) + pipeline.add_stage( + "source", + DataSourceStage(introspection_source, name="pipeline-inspect"), + ) + elif new_preset.source == "empty": + from engine.data_sources.sources import EmptyDataSource + from engine.pipeline.adapters import DataSourceStage + + empty_source = EmptyDataSource( + width=current_width, height=current_height + ) + pipeline.add_stage( + "source", DataSourceStage(empty_source, name="empty") + ) + elif new_preset.source == "fixture": + items = load_cache() + if not items: + print(" \033[38;5;196mNo fixture cache available\033[0m") + return + from engine.data_sources.sources import ListDataSource + from engine.pipeline.adapters import DataSourceStage + + list_source = ListDataSource(items, name="fixture") + pipeline.add_stage( + "source", DataSourceStage(list_source, name="fixture") + ) + else: + # Fetch or use cached items + cached = load_cache() + if cached: + items = cached + elif new_preset.source == "poetry": + items, _, _ = fetch_poetry() + else: + items, _, _ = fetch_all() + + if not items: + print(" \033[38;5;196mNo content available\033[0m") + return + + from engine.data_sources.sources import ListDataSource + from engine.pipeline.adapters import DataSourceStage + + list_source = ListDataSource(items, name=new_preset.source) + pipeline.add_stage( + "source", DataSourceStage(list_source, name=new_preset.source) + ) + + # Add viewport filter and font for headline/poetry sources + if new_preset.source in ["headlines", "poetry", "fixture"]: + from engine.pipeline.adapters import FontStage, ViewportFilterStage + + pipeline.add_stage( + "viewport_filter", ViewportFilterStage(name="viewport-filter") + ) + pipeline.add_stage("font", FontStage(name="font")) + + # Add camera if specified + if new_preset.camera: + from engine.camera import Camera + from engine.pipeline.adapters import CameraClockStage, CameraStage + + speed = getattr(new_preset, "camera_speed", 1.0) + camera = None + cam_type = new_preset.camera + if cam_type == "feed": + camera = Camera.feed(speed=speed) + elif cam_type == "scroll" or cam_type == "vertical": + camera = Camera.scroll(speed=speed) + elif cam_type == "horizontal": + camera = Camera.horizontal(speed=speed) + elif cam_type == "omni": + camera = Camera.omni(speed=speed) + elif cam_type == "floating": + camera = Camera.floating(speed=speed) + elif cam_type == "bounce": + camera = Camera.bounce(speed=speed) + elif cam_type == "radial": + camera = Camera.radial(speed=speed) + elif cam_type == "static" or cam_type == "": + # Static camera: no movement, but provides camera_y=0 for viewport filter + camera = Camera.scroll(speed=0.0) + camera.set_canvas_size(200, 200) + + if camera: + # Add camera update stage to ensure camera_y is available for viewport filter + pipeline.add_stage( + "camera_update", + CameraClockStage(camera, name="camera-clock"), + ) + pipeline.add_stage("camera", CameraStage(camera, name=cam_type)) + + # Add effects + effect_registry = get_registry() + for effect_name in new_preset.effects: + effect = effect_registry.get(effect_name) + if effect: + pipeline.add_stage( + f"effect_{effect_name}", + create_stage_from_effect(effect, effect_name), + ) + + # Add display (respect CLI override) + display_name = new_preset.display + if "--display" in sys.argv: + idx = sys.argv.index("--display") + if idx + 1 < len(sys.argv): + display_name = sys.argv[idx + 1] + + new_display = DisplayRegistry.create(display_name) + if not new_display and not display_name.startswith("multi"): + print( + f" \033[38;5;196mFailed to create display: {display_name}\033[0m" + ) + return + + if not new_display and display_name.startswith("multi"): + parts = display_name[6:].split(",") + new_display = DisplayRegistry.create_multi(parts) + if not new_display: + print( + f" \033[38;5;196mFailed to create multi display: {parts}\033[0m" + ) + return + + if not new_display: + print( + f" \033[38;5;196mFailed to create display: {display_name}\033[0m" + ) + return + + new_display.init(0, 0) + + pipeline.add_stage( + "display", create_stage_from_display(new_display, display_name) + ) + + pipeline.build() + + # Set pipeline for introspection source if needed + if ( + new_preset.source == "pipeline-inspect" + and introspection_source is not None + ): + introspection_source.set_pipeline(pipeline) + + if not pipeline.initialize(): + print(" \033[38;5;196mFailed to initialize pipeline\033[0m") + return + + # Replace global references with new pipeline and display + display = new_display + + # Reinitialize UI panel with new effect stages + # Update web_control_active for new display + web_control_active = WebSocketDisplay is not None and isinstance( + display, WebSocketDisplay + ) + # Update render_ui_panel_in_terminal + render_ui_panel_in_terminal = ( + isinstance(params.border, BorderMode) + and params.border == BorderMode.UI + ) + + if need_ui_controller: + ui_panel = UIPanel( + UIConfig(panel_width=24, start_with_preset_picker=True) + ) + for stage in pipeline.stages.values(): + if isinstance(stage, EffectPluginStage): + effect = stage._effect + enabled = ( + effect.config.enabled + if hasattr(effect, "config") + else True + ) + stage_control = ui_panel.register_stage( + stage, enabled=enabled + ) + stage_control.effect = effect # type: ignore[attr-defined] + + # Restore UI panel state if it was saved + if ui_state: + ui_panel.restore_state(ui_state) + + if ui_panel.stages: + first_stage = next(iter(ui_panel.stages)) + ui_panel.select_stage(first_stage) + ctrl = ui_panel.stages[first_stage] + if hasattr(ctrl, "effect"): + effect = ctrl.effect + if hasattr(effect, "config"): + config = effect.config + try: + import dataclasses + + if dataclasses.is_dataclass(config): + for field_name, field_obj in dataclasses.fields( + config + ): + if field_name == "enabled": + continue + value = getattr(config, field_name, None) + if value is not None: + ctrl.params[field_name] = value + ctrl.param_schema[field_name] = { + "type": type(value).__name__, + "min": 0 + if isinstance(value, (int, float)) + else None, + "max": 1 + if isinstance(value, float) + else None, + "step": 0.1 + if isinstance(value, float) + else 1, + } + except Exception: + pass + + # Reconnect WebSocket to UI panel if needed + if web_control_active and isinstance(display, WebSocketDisplay): + display.set_controller(ui_panel) + + def handle_websocket_command(command: dict) -> None: + """Handle commands from WebSocket clients.""" + if ui_panel.execute_command(command): + # Broadcast updated state after command execution + state = display._get_state_snapshot() + if state: + display.broadcast_state(state) + + display.set_command_callback(handle_websocket_command) + + # Broadcast initial state after preset change + state = display._get_state_snapshot() + if state: + display.broadcast_state(state) + + print(f" \033[38;5;82mPreset switched to {preset_name}\033[0m") + + except Exception as e: + print(f" \033[38;5;196mError switching preset: {e}\033[0m") + + ui_panel.set_event_callback("preset_changed", on_preset_changed) + + print(" \033[38;5;82mStarting pipeline...\033[0m") + print(" \033[38;5;245mPress Ctrl+C to exit\033[0m\n") + + ctx = pipeline.context + ctx.params = params + ctx.set("display", display) + ctx.set("items", items) + ctx.set("pipeline", pipeline) + ctx.set("pipeline_order", pipeline.execution_order) + ctx.set("camera_y", 0) + + current_width = params.viewport_width + current_height = params.viewport_height + + # Only get dimensions from display if viewport wasn't explicitly set + if "--viewport" not in sys.argv and hasattr(display, "get_dimensions"): + current_width, current_height = display.get_dimensions() + params.viewport_width = current_width + params.viewport_height = current_height + + try: + frame = 0 + while True: + params.frame_number = frame + ctx.params = params + + result = pipeline.execute(items) + if result.success: + # Handle UI panel compositing if enabled + if ui_panel is not None and render_ui_panel_in_terminal: + from engine.display import render_ui_panel + + buf = render_ui_panel( + result.data, + current_width, + current_height, + ui_panel, + fps=params.fps if hasattr(params, "fps") else 60.0, + frame_time=0.0, + ) + # Render with border=OFF since we already added borders + display.show(buf, border=False) + # Handle pygame events for UI + if display_name == "pygame": + import pygame + + for event in pygame.event.get(): + if event.type == pygame.KEYDOWN: + ui_panel.process_key_event(event.key, event.mod) + # If space toggled stage, we could rebuild here (TODO) + else: + # Normal border handling + show_border = ( + params.border if isinstance(params.border, bool) else False + ) + display.show(result.data, border=show_border) + + if hasattr(display, "is_quit_requested") and display.is_quit_requested(): + if hasattr(display, "clear_quit_request"): + display.clear_quit_request() + raise KeyboardInterrupt() + + if "--viewport" not in sys.argv and hasattr(display, "get_dimensions"): + new_w, new_h = display.get_dimensions() + if new_w != current_width or new_h != current_height: + current_width, current_height = new_w, new_h + params.viewport_width = current_width + params.viewport_height = current_height + + time.sleep(1 / 60) + frame += 1 + + except KeyboardInterrupt: + pipeline.cleanup() + display.cleanup() + print("\n \033[38;5;245mPipeline stopped\033[0m") + return + + pipeline.cleanup() + display.cleanup() + print("\n \033[38;5;245mPipeline stopped\033[0m") diff --git a/engine/benchmark.py b/engine/benchmark.py new file mode 100644 index 0000000..14788ca --- /dev/null +++ b/engine/benchmark.py @@ -0,0 +1,73 @@ +""" +Benchmark module for performance testing. + +Usage: + python -m engine.benchmark # Run all benchmarks + python -m engine.benchmark --hook # Run benchmarks in hook mode (for CI) + python -m engine.benchmark --displays null --iterations 20 +""" + +import argparse +import sys + + +def main(): + parser = argparse.ArgumentParser(description="Run performance benchmarks") + parser.add_argument( + "--hook", + action="store_true", + help="Run in hook mode (fail on regression)", + ) + parser.add_argument( + "--displays", + default="null", + help="Comma-separated list of displays to benchmark", + ) + parser.add_argument( + "--iterations", + type=int, + default=100, + help="Number of iterations per benchmark", + ) + args = parser.parse_args() + + # Run pytest with benchmark markers + pytest_args = [ + "-v", + "-m", + "benchmark", + ] + + if args.hook: + # Hook mode: stricter settings + pytest_args.extend( + [ + "--benchmark-only", + "--benchmark-compare", + "--benchmark-compare-fail=min:5%", # Fail if >5% slower + ] + ) + + # Add display filter if specified + if args.displays: + pytest_args.extend(["-k", args.displays]) + + # Add iterations + if args.iterations: + # Set environment variable for benchmark tests + import os + + os.environ["BENCHMARK_ITERATIONS"] = str(args.iterations) + + # Run pytest + import subprocess + + result = subprocess.run( + [sys.executable, "-m", "pytest", "tests/test_benchmark.py"] + pytest_args, + cwd=None, # Current directory + ) + sys.exit(result.returncode) + + +if __name__ == "__main__": + main() diff --git a/engine/camera.py b/engine/camera.py new file mode 100644 index 0000000..b443e1b --- /dev/null +++ b/engine/camera.py @@ -0,0 +1,473 @@ +""" +Camera system for viewport scrolling. + +Provides abstraction for camera motion in different modes: +- Vertical: traditional upward scroll +- Horizontal: left/right movement +- Omni: combination of both +- Floating: sinusoidal/bobbing motion + +The camera defines a visible viewport into a larger Canvas. +""" + +import math +from collections.abc import Callable +from dataclasses import dataclass, field +from enum import Enum, auto + + +class CameraMode(Enum): + FEED = auto() # Single item view (static or rapid cycling) + SCROLL = auto() # Smooth vertical scrolling (movie credits style) + HORIZONTAL = auto() + OMNI = auto() + FLOATING = auto() + BOUNCE = auto() + RADIAL = auto() # Polar coordinates (r, theta) for radial scanning + + +@dataclass +class CameraViewport: + """Represents the visible viewport.""" + + x: int + y: int + width: int + height: int + + +@dataclass +class Camera: + """Camera for viewport scrolling. + + The camera defines a visible viewport into a Canvas. + It can be smaller than the canvas to allow scrolling, + and supports zoom to scale the view. + + Attributes: + x: Current horizontal offset (positive = scroll left) + y: Current vertical offset (positive = scroll up) + mode: Current camera mode + speed: Base scroll speed + zoom: Zoom factor (1.0 = 100%, 2.0 = 200% zoom out) + canvas_width: Width of the canvas being viewed + canvas_height: Height of the canvas being viewed + custom_update: Optional custom update function + """ + + x: int = 0 + y: int = 0 + mode: CameraMode = CameraMode.FEED + speed: float = 1.0 + zoom: float = 1.0 + canvas_width: int = 200 # Larger than viewport for scrolling + canvas_height: int = 200 + custom_update: Callable[["Camera", float], None] | None = None + _x_float: float = field(default=0.0, repr=False) + _y_float: float = field(default=0.0, repr=False) + _time: float = field(default=0.0, repr=False) + + @property + def w(self) -> int: + """Shorthand for viewport_width.""" + return self.viewport_width + + def set_speed(self, speed: float) -> None: + """Set the camera scroll speed dynamically. + + This allows camera speed to be modulated during runtime + via PipelineParams or directly. + + Args: + speed: New speed value (0.0 = stopped, >0 = movement) + """ + self.speed = max(0.0, speed) + + @property + def h(self) -> int: + """Shorthand for viewport_height.""" + return self.viewport_height + + @property + def viewport_width(self) -> int: + """Get the visible viewport width. + + This is the canvas width divided by zoom. + """ + return max(1, int(self.canvas_width / self.zoom)) + + @property + def viewport_height(self) -> int: + """Get the visible viewport height. + + This is the canvas height divided by zoom. + """ + return max(1, int(self.canvas_height / self.zoom)) + + def get_viewport(self, viewport_height: int | None = None) -> CameraViewport: + """Get the current viewport bounds. + + Args: + viewport_height: Optional viewport height to use instead of camera's viewport_height + + Returns: + CameraViewport with position and size (clamped to canvas bounds) + """ + vw = self.viewport_width + vh = viewport_height if viewport_height is not None else self.viewport_height + + clamped_x = max(0, min(self.x, self.canvas_width - vw)) + clamped_y = max(0, min(self.y, self.canvas_height - vh)) + + return CameraViewport( + x=clamped_x, + y=clamped_y, + width=vw, + height=vh, + ) + + return CameraViewport( + x=clamped_x, + y=clamped_y, + width=vw, + height=vh, + ) + + def set_zoom(self, zoom: float) -> None: + """Set the zoom factor. + + Args: + zoom: Zoom factor (1.0 = 100%, 2.0 = zoomed out 2x, 0.5 = zoomed in 2x) + """ + self.zoom = max(0.1, min(10.0, zoom)) + + def update(self, dt: float) -> None: + """Update camera position based on mode. + + Args: + dt: Delta time in seconds + """ + self._time += dt + + if self.custom_update: + self.custom_update(self, dt) + return + + if self.mode == CameraMode.FEED: + self._update_feed(dt) + elif self.mode == CameraMode.SCROLL: + self._update_scroll(dt) + elif self.mode == CameraMode.HORIZONTAL: + self._update_horizontal(dt) + elif self.mode == CameraMode.OMNI: + self._update_omni(dt) + elif self.mode == CameraMode.FLOATING: + self._update_floating(dt) + elif self.mode == CameraMode.BOUNCE: + self._update_bounce(dt) + elif self.mode == CameraMode.RADIAL: + self._update_radial(dt) + + # Bounce mode handles its own bounds checking + if self.mode != CameraMode.BOUNCE: + self._clamp_to_bounds() + + def _clamp_to_bounds(self) -> None: + """Clamp camera position to stay within canvas bounds. + + Only clamps if the viewport is smaller than the canvas. + If viewport equals canvas (no scrolling needed), allows any position + for backwards compatibility with original behavior. + """ + vw = self.viewport_width + vh = self.viewport_height + + # Only clamp if there's room to scroll + if vw < self.canvas_width: + self.x = max(0, min(self.x, self.canvas_width - vw)) + if vh < self.canvas_height: + self.y = max(0, min(self.y, self.canvas_height - vh)) + + def _update_feed(self, dt: float) -> None: + """Feed mode: rapid scrolling (1 row per frame at speed=1.0).""" + self.y += int(self.speed * dt * 60) + + def _update_scroll(self, dt: float) -> None: + """Scroll mode: smooth vertical scrolling with float accumulation.""" + self._y_float += self.speed * dt * 60 + self.y = int(self._y_float) + + def _update_horizontal(self, dt: float) -> None: + self.x += int(self.speed * dt * 60) + + def _update_omni(self, dt: float) -> None: + speed = self.speed * dt * 60 + self.y += int(speed) + self.x += int(speed * 0.5) + + def _update_floating(self, dt: float) -> None: + base = self.speed * 30 + self.y = int(math.sin(self._time * 2) * base) + self.x = int(math.cos(self._time * 1.5) * base * 0.5) + + def _update_bounce(self, dt: float) -> None: + """Bouncing DVD-style camera that bounces off canvas edges.""" + vw = self.viewport_width + vh = self.viewport_height + + # Initialize direction if not set + if not hasattr(self, "_bounce_dx"): + self._bounce_dx = 1 + self._bounce_dy = 1 + + # Calculate max positions + max_x = max(0, self.canvas_width - vw) + max_y = max(0, self.canvas_height - vh) + + # Move + move_speed = self.speed * dt * 60 + + # Bounce off edges - reverse direction when hitting bounds + self.x += int(move_speed * self._bounce_dx) + self.y += int(move_speed * self._bounce_dy) + + # Bounce horizontally + if self.x <= 0: + self.x = 0 + self._bounce_dx = 1 + elif self.x >= max_x: + self.x = max_x + self._bounce_dx = -1 + + # Bounce vertically + if self.y <= 0: + self.y = 0 + self._bounce_dy = 1 + elif self.y >= max_y: + self.y = max_y + self._bounce_dy = -1 + + def _update_radial(self, dt: float) -> None: + """Radial camera mode: polar coordinate scrolling (r, theta). + + The camera rotates around the center of the canvas while optionally + moving outward/inward along rays. This enables: + - Radar sweep animations + - Pendulum view oscillation + - Spiral scanning motion + + Uses polar coordinates internally: + - _r_float: radial distance from center (accumulates smoothly) + - _theta_float: angle in radians (accumulates smoothly) + - Updates x, y based on conversion from polar to Cartesian + """ + # Initialize radial state if needed + if not hasattr(self, "_r_float"): + self._r_float = 0.0 + self._theta_float = 0.0 + + # Update angular position (rotation around center) + # Speed controls rotation rate + theta_speed = self.speed * dt * 1.0 # radians per second + self._theta_float += theta_speed + + # Update radial position (inward/outward from center) + # Can be modulated by external sensor + if hasattr(self, "_radial_input"): + r_input = self._radial_input + else: + # Default: slow outward drift + r_input = 0.0 + + r_speed = self.speed * dt * 20.0 # pixels per second + self._r_float += r_input + r_speed * 0.01 + + # Clamp radial position to canvas bounds + max_r = min(self.canvas_width, self.canvas_height) / 2 + self._r_float = max(0.0, min(self._r_float, max_r)) + + # Convert polar to Cartesian, centered at canvas center + center_x = self.canvas_width / 2 + center_y = self.canvas_height / 2 + + self.x = int(center_x + self._r_float * math.cos(self._theta_float)) + self.y = int(center_y + self._r_float * math.sin(self._theta_float)) + + # Clamp to canvas bounds + self._clamp_to_bounds() + + def set_radial_input(self, value: float) -> None: + """Set radial input for sensor-driven radius modulation. + + Args: + value: Sensor value (0-1) that modulates radial distance + """ + self._radial_input = value * 10.0 # Scale to reasonable pixel range + + def set_radial_angle(self, angle: float) -> None: + """Set radial angle directly (for OSC integration). + + Args: + angle: Angle in radians (0 to 2π) + """ + self._theta_float = angle + + def reset(self) -> None: + """Reset camera position and state.""" + self.x = 0 + self.y = 0 + self._time = 0.0 + self.zoom = 1.0 + # Reset bounce direction state + if hasattr(self, "_bounce_dx"): + self._bounce_dx = 1 + self._bounce_dy = 1 + # Reset radial state + if hasattr(self, "_r_float"): + self._r_float = 0.0 + self._theta_float = 0.0 + + def set_canvas_size(self, width: int, height: int) -> None: + """Set the canvas size and clamp position if needed. + + Args: + width: New canvas width + height: New canvas height + """ + self.canvas_width = width + self.canvas_height = height + self._clamp_to_bounds() + + def apply( + self, buffer: list[str], viewport_width: int, viewport_height: int | None = None + ) -> list[str]: + """Apply camera viewport to a text buffer. + + Slices the buffer based on camera position (x, y) and viewport dimensions. + Handles ANSI escape codes correctly for colored/styled text. + + Args: + buffer: List of strings representing lines of text + viewport_width: Width of the visible viewport in characters + viewport_height: Height of the visible viewport (overrides camera's viewport_height if provided) + + Returns: + Sliced buffer containing only the visible lines and columns + """ + from engine.effects.legacy import vis_offset, vis_trunc + + if not buffer: + return buffer + + # Get current viewport bounds (clamped to canvas size) + viewport = self.get_viewport(viewport_height) + + # Use provided viewport_height if given, otherwise use camera's viewport + vh = viewport_height if viewport_height is not None else viewport.height + + # Vertical slice: extract lines that fit in viewport height + start_y = viewport.y + end_y = min(viewport.y + vh, len(buffer)) + + if start_y >= len(buffer): + # Scrolled past end of buffer, return empty viewport + return [""] * vh + + vertical_slice = buffer[start_y:end_y] + + # Horizontal slice: apply horizontal offset and truncate to width + horizontal_slice = [] + for line in vertical_slice: + # Apply horizontal offset (skip first x characters, handling ANSI) + offset_line = vis_offset(line, viewport.x) + # Truncate to viewport width (handling ANSI) + truncated_line = vis_trunc(offset_line, viewport_width) + + # Pad line to full viewport width to prevent ghosting when panning + # Skip padding for empty lines to preserve intentional blank lines + import re + + visible_len = len(re.sub(r"\x1b\[[0-9;]*m", "", truncated_line)) + if visible_len < viewport_width and visible_len > 0: + truncated_line += " " * (viewport_width - visible_len) + + horizontal_slice.append(truncated_line) + + # Pad with empty lines if needed to fill viewport height + while len(horizontal_slice) < vh: + horizontal_slice.append("") + + return horizontal_slice + + @classmethod + def feed(cls, speed: float = 1.0) -> "Camera": + """Create a feed camera (rapid single-item scrolling, 1 row/frame at speed=1.0).""" + return cls(mode=CameraMode.FEED, speed=speed, canvas_height=200) + + @classmethod + def scroll(cls, speed: float = 0.5) -> "Camera": + """Create a smooth scrolling camera (movie credits style). + + Uses float accumulation for sub-integer speeds. + Sets canvas_width=0 so it matches viewport_width for proper text wrapping. + """ + return cls( + mode=CameraMode.SCROLL, speed=speed, canvas_width=0, canvas_height=200 + ) + + @classmethod + def vertical(cls, speed: float = 1.0) -> "Camera": + """Deprecated: Use feed() or scroll() instead.""" + return cls(mode=CameraMode.FEED, speed=speed, canvas_height=200) + + @classmethod + def horizontal(cls, speed: float = 1.0) -> "Camera": + """Create a horizontal scrolling camera.""" + return cls(mode=CameraMode.HORIZONTAL, speed=speed, canvas_width=200) + + @classmethod + def omni(cls, speed: float = 1.0) -> "Camera": + """Create an omnidirectional scrolling camera.""" + return cls( + mode=CameraMode.OMNI, speed=speed, canvas_width=200, canvas_height=200 + ) + + @classmethod + def floating(cls, speed: float = 1.0) -> "Camera": + """Create a floating/bobbing camera.""" + return cls( + mode=CameraMode.FLOATING, speed=speed, canvas_width=200, canvas_height=200 + ) + + @classmethod + def bounce(cls, speed: float = 1.0) -> "Camera": + """Create a bouncing DVD-style camera that bounces off canvas edges.""" + return cls( + mode=CameraMode.BOUNCE, speed=speed, canvas_width=200, canvas_height=200 + ) + + @classmethod + def radial(cls, speed: float = 1.0) -> "Camera": + """Create a radial camera (polar coordinate scanning). + + The camera rotates around the center of the canvas with smooth angular motion. + Enables radar sweep, pendulum view, and spiral scanning animations. + + Args: + speed: Rotation speed (higher = faster rotation) + + Returns: + Camera configured for radial polar coordinate scanning + """ + cam = cls( + mode=CameraMode.RADIAL, speed=speed, canvas_width=200, canvas_height=200 + ) + # Initialize radial state + cam._r_float = 0.0 + cam._theta_float = 0.0 + return cam + + @classmethod + def custom(cls, update_fn: Callable[["Camera", float], None]) -> "Camera": + """Create a camera with custom update function.""" + return cls(custom_update=update_fn) diff --git a/engine/canvas.py b/engine/canvas.py new file mode 100644 index 0000000..9341223 --- /dev/null +++ b/engine/canvas.py @@ -0,0 +1,186 @@ +""" +Canvas - 2D surface for rendering. + +The Canvas represents a full rendered surface that can be larger than the display. +The Camera then defines the visible viewport into this canvas. +""" + +from dataclasses import dataclass + + +@dataclass +class CanvasRegion: + """A rectangular region on the canvas.""" + + x: int + y: int + width: int + height: int + + def is_valid(self) -> bool: + """Check if region has positive dimensions.""" + return self.width > 0 and self.height > 0 + + def rows(self) -> set[int]: + """Return set of row indices in this region.""" + return set(range(self.y, self.y + self.height)) + + +class Canvas: + """2D canvas for rendering content. + + The canvas is a 2D grid of cells that can hold text content. + It can be larger than the visible viewport (display). + + Attributes: + width: Total width in characters + height: Total height in characters + """ + + def __init__(self, width: int = 80, height: int = 24): + self.width = width + self.height = height + self._grid: list[list[str]] = [ + [" " for _ in range(width)] for _ in range(height) + ] + self._dirty_regions: list[CanvasRegion] = [] # Track dirty regions + + def clear(self) -> None: + """Clear the entire canvas.""" + self._grid = [[" " for _ in range(self.width)] for _ in range(self.height)] + self._dirty_regions = [CanvasRegion(0, 0, self.width, self.height)] + + def mark_dirty(self, x: int, y: int, width: int, height: int) -> None: + """Mark a region as dirty (caller declares what they changed).""" + self._dirty_regions.append(CanvasRegion(x, y, width, height)) + + def get_dirty_regions(self) -> list[CanvasRegion]: + """Get all dirty regions and clear the set.""" + regions = self._dirty_regions + self._dirty_regions = [] + return regions + + def get_dirty_rows(self) -> set[int]: + """Get union of all dirty rows.""" + rows: set[int] = set() + for region in self._dirty_regions: + rows.update(region.rows()) + return rows + + def is_dirty(self) -> bool: + """Check if any region is dirty.""" + return len(self._dirty_regions) > 0 + + def get_region(self, x: int, y: int, width: int, height: int) -> list[list[str]]: + """Get a rectangular region from the canvas. + + Args: + x: Left position + y: Top position + width: Region width + height: Region height + + Returns: + 2D list of characters (height rows, width columns) + """ + region: list[list[str]] = [] + for py in range(y, y + height): + row: list[str] = [] + for px in range(x, x + width): + if 0 <= py < self.height and 0 <= px < self.width: + row.append(self._grid[py][px]) + else: + row.append(" ") + region.append(row) + return region + + def get_region_flat(self, x: int, y: int, width: int, height: int) -> list[str]: + """Get a rectangular region as flat list of lines. + + Args: + x: Left position + y: Top position + width: Region width + height: Region height + + Returns: + List of strings (one per row) + """ + region = self.get_region(x, y, width, height) + return ["".join(row) for row in region] + + def put_region(self, x: int, y: int, content: list[list[str]]) -> None: + """Put content into a rectangular region on the canvas. + + Args: + x: Left position + y: Top position + content: 2D list of characters to place + """ + height = len(content) if content else 0 + width = len(content[0]) if height > 0 else 0 + + for py, row in enumerate(content): + for px, char in enumerate(row): + canvas_x = x + px + canvas_y = y + py + if 0 <= canvas_y < self.height and 0 <= canvas_x < self.width: + self._grid[canvas_y][canvas_x] = char + + if width > 0 and height > 0: + self.mark_dirty(x, y, width, height) + + def put_text(self, x: int, y: int, text: str) -> None: + """Put a single line of text at position. + + Args: + x: Left position + y: Row position + text: Text to place + """ + text_len = len(text) + for i, char in enumerate(text): + canvas_x = x + i + if 0 <= canvas_x < self.width and 0 <= y < self.height: + self._grid[y][canvas_x] = char + + if text_len > 0: + self.mark_dirty(x, y, text_len, 1) + + def fill(self, x: int, y: int, width: int, height: int, char: str = " ") -> None: + """Fill a rectangular region with a character. + + Args: + x: Left position + y: Top position + width: Region width + height: Region height + char: Character to fill with + """ + for py in range(y, y + height): + for px in range(x, x + width): + if 0 <= py < self.height and 0 <= px < self.width: + self._grid[py][px] = char + + if width > 0 and height > 0: + self.mark_dirty(x, y, width, height) + + def resize(self, width: int, height: int) -> None: + """Resize the canvas. + + Args: + width: New width + height: New height + """ + if width == self.width and height == self.height: + return + + new_grid: list[list[str]] = [[" " for _ in range(width)] for _ in range(height)] + + for py in range(min(self.height, height)): + for px in range(min(self.width, width)): + new_grid[py][px] = self._grid[py][px] + + self.width = width + self.height = height + self._grid = new_grid diff --git a/engine/config.py b/engine/config.py index 8ca8191..f7f86a3 100644 --- a/engine/config.py +++ b/engine/config.py @@ -105,6 +105,8 @@ class Config: firehose: bool = False ntfy_topic: str = "https://ntfy.sh/klubhaus_terminal_mainline/json" + ntfy_cc_cmd_topic: str = "https://ntfy.sh/klubhaus_terminal_mainline_cc_cmd/json" + ntfy_cc_resp_topic: str = "https://ntfy.sh/klubhaus_terminal_mainline_cc_resp/json" ntfy_reconnect_delay: int = 5 message_display_secs: int = 30 @@ -127,6 +129,10 @@ class Config: script_fonts: dict[str, str] = field(default_factory=_get_platform_font_paths) + display: str = "pygame" + websocket: bool = False + websocket_port: int = 8765 + @classmethod def from_args(cls, argv: list[str] | None = None) -> "Config": """Create Config from CLI arguments (or custom argv for testing).""" @@ -148,6 +154,8 @@ class Config: mode="poetry" if "--poetry" in argv or "-p" in argv else "news", firehose="--firehose" in argv, ntfy_topic="https://ntfy.sh/klubhaus_terminal_mainline/json", + ntfy_cc_cmd_topic="https://ntfy.sh/klubhaus_terminal_mainline_cc_cmd/json", + ntfy_cc_resp_topic="https://ntfy.sh/klubhaus_terminal_mainline_cc_resp/json", ntfy_reconnect_delay=5, message_display_secs=30, font_dir=font_dir, @@ -164,6 +172,9 @@ class Config: glitch_glyphs="░▒▓█▌▐╌╍╎╏┃┆┇┊┋", kata_glyphs="ハミヒーウシナモニサワツオリアホテマケメエカキムユラセネスタヌヘ", script_fonts=_get_platform_font_paths(), + display=_arg_value("--display", argv) or "terminal", + websocket="--websocket" in argv, + websocket_port=_arg_int("--websocket-port", 8765, argv), ) @@ -188,19 +199,13 @@ def set_config(config: Config) -> None: HEADLINE_LIMIT = 1000 FEED_TIMEOUT = 10 MIC_THRESHOLD_DB = 50 # dB above which glitches intensify -MODE = ( - "poetry" - if "--poetry" in sys.argv or "-p" in sys.argv - else "code" - if "--code" in sys.argv - else "news" -) +MODE = "poetry" if "--poetry" in sys.argv or "-p" in sys.argv else "news" FIREHOSE = "--firehose" in sys.argv -FIGMENT = "--figment" in sys.argv -FIGMENT_INTERVAL = _arg_int("--figment-interval", 60) # seconds between appearances # ─── NTFY MESSAGE QUEUE ────────────────────────────────── NTFY_TOPIC = "https://ntfy.sh/klubhaus_terminal_mainline/json" +NTFY_CC_CMD_TOPIC = "https://ntfy.sh/klubhaus_terminal_mainline_cc_cmd/json" +NTFY_CC_RESP_TOPIC = "https://ntfy.sh/klubhaus_terminal_mainline_cc_resp/json" NTFY_RECONNECT_DELAY = 5 # seconds before reconnecting after a dropped stream MESSAGE_DISPLAY_SECS = 30 # how long a message holds the screen @@ -231,6 +236,26 @@ GRAD_SPEED = 0.08 # gradient traversal speed (cycles/sec, ~12s full sweep) GLITCH = "░▒▓█▌▐╌╍╎╏┃┆┇┊┋" KATA = "ハミヒーウシナモニサワツオリアホテマケメエカキムユラセネスタヌヘ" +# ─── WEBSOCKET ───────────────────────────────────────────── +DISPLAY = _arg_value("--display", sys.argv) or "pygame" +WEBSOCKET = "--websocket" in sys.argv +WEBSOCKET_PORT = _arg_int("--websocket-port", 8765) + +# ─── DEMO MODE ──────────────────────────────────────────── +DEMO = "--demo" in sys.argv +DEMO_EFFECT_DURATION = 5.0 # seconds per effect +PIPELINE_DEMO = "--pipeline-demo" in sys.argv + +# ─── PIPELINE MODE (new unified architecture) ───────────── +PIPELINE_MODE = "--pipeline" in sys.argv +PIPELINE_PRESET = _arg_value("--pipeline-preset", sys.argv) or "demo" + +# ─── PRESET MODE ──────────────────────────────────────────── +PRESET = _arg_value("--preset", sys.argv) + +# ─── PIPELINE DIAGRAM ──────────────────────────────────── +PIPELINE_DIAGRAM = "--pipeline-diagram" in sys.argv + def set_font_selection(font_path=None, font_index=None): """Set runtime primary font selection.""" @@ -239,26 +264,3 @@ def set_font_selection(font_path=None, font_index=None): FONT_PATH = _resolve_font_path(font_path) if font_index is not None: FONT_INDEX = max(0, int(font_index)) - - -# ─── THEME MANAGEMENT ───────────────────────────────────────── -ACTIVE_THEME = None - - -def set_active_theme(theme_id: str = "green"): - """Set the active theme by ID. - - Args: - theme_id: Theme identifier ("green", "orange", or "purple") - Defaults to "green" - - Raises: - KeyError: If theme_id is not in the theme registry - - Side Effects: - Sets the ACTIVE_THEME global variable - """ - global ACTIVE_THEME - from engine import themes - - ACTIVE_THEME = themes.get_theme(theme_id) diff --git a/engine/controller.py b/engine/controller.py deleted file mode 100644 index e6e2e3d..0000000 --- a/engine/controller.py +++ /dev/null @@ -1,68 +0,0 @@ -""" -Stream controller - manages input sources and orchestrates the render stream. -""" - -from engine.config import Config, get_config -from engine.eventbus import EventBus -from engine.events import EventType, StreamEvent -from engine.mic import MicMonitor -from engine.ntfy import NtfyPoller -from engine.scroll import stream - - -class StreamController: - """Controls the stream lifecycle - initializes sources and runs the stream.""" - - def __init__(self, config: Config | None = None, event_bus: EventBus | None = None): - self.config = config or get_config() - self.event_bus = event_bus - self.mic: MicMonitor | None = None - self.ntfy: NtfyPoller | None = None - - def initialize_sources(self) -> tuple[bool, bool]: - """Initialize microphone and ntfy sources. - - Returns: - (mic_ok, ntfy_ok) - success status for each source - """ - self.mic = MicMonitor(threshold_db=self.config.mic_threshold_db) - mic_ok = self.mic.start() if self.mic.available else False - - self.ntfy = NtfyPoller( - self.config.ntfy_topic, - reconnect_delay=self.config.ntfy_reconnect_delay, - display_secs=self.config.message_display_secs, - ) - ntfy_ok = self.ntfy.start() - - return bool(mic_ok), ntfy_ok - - def run(self, items: list) -> None: - """Run the stream with initialized sources.""" - if self.mic is None or self.ntfy is None: - self.initialize_sources() - - if self.event_bus: - self.event_bus.publish( - EventType.STREAM_START, - StreamEvent( - event_type=EventType.STREAM_START, - headline_count=len(items), - ), - ) - - stream(items, self.ntfy, self.mic) - - if self.event_bus: - self.event_bus.publish( - EventType.STREAM_END, - StreamEvent( - event_type=EventType.STREAM_END, - headline_count=len(items), - ), - ) - - def cleanup(self) -> None: - """Clean up resources.""" - if self.mic: - self.mic.stop() diff --git a/engine/data_sources/__init__.py b/engine/data_sources/__init__.py new file mode 100644 index 0000000..2f5493c --- /dev/null +++ b/engine/data_sources/__init__.py @@ -0,0 +1,12 @@ +""" +Data source implementations for the pipeline architecture. + +Import directly from submodules: + from engine.data_sources.sources import DataSource, SourceItem, HeadlinesDataSource + from engine.data_sources.pipeline_introspection import PipelineIntrospectionSource +""" + +# Re-export for convenience +from engine.data_sources.sources import ImageItem, SourceItem + +__all__ = ["ImageItem", "SourceItem"] diff --git a/engine/data_sources/checkerboard.py b/engine/data_sources/checkerboard.py new file mode 100644 index 0000000..48326f2 --- /dev/null +++ b/engine/data_sources/checkerboard.py @@ -0,0 +1,60 @@ +"""Checkerboard data source for visual pattern generation.""" + +from engine.data_sources.sources import DataSource, SourceItem + + +class CheckerboardDataSource(DataSource): + """Data source that generates a checkerboard pattern. + + Creates a grid of alternating characters, useful for testing motion effects + and camera movement. The pattern is static; movement comes from camera panning. + """ + + def __init__( + self, + width: int = 200, + height: int = 200, + square_size: int = 10, + char_a: str = "#", + char_b: str = " ", + ): + """Initialize checkerboard data source. + + Args: + width: Total pattern width in characters + height: Total pattern height in lines + square_size: Size of each checker square in characters + char_a: Character for "filled" squares (default: '#') + char_b: Character for "empty" squares (default: ' ') + """ + self.width = width + self.height = height + self.square_size = square_size + self.char_a = char_a + self.char_b = char_b + + @property + def name(self) -> str: + return "checkerboard" + + @property + def is_dynamic(self) -> bool: + return False + + def fetch(self) -> list[SourceItem]: + """Generate the checkerboard pattern as a single SourceItem.""" + lines = [] + for y in range(self.height): + line_chars = [] + for x in range(self.width): + # Determine which square this position belongs to + square_x = x // self.square_size + square_y = y // self.square_size + # Alternate pattern based on parity of square coordinates + if (square_x + square_y) % 2 == 0: + line_chars.append(self.char_a) + else: + line_chars.append(self.char_b) + lines.append("".join(line_chars)) + content = "\n".join(lines) + return [SourceItem(content=content, source="checkerboard", timestamp="0")] diff --git a/engine/data_sources/pipeline_introspection.py b/engine/data_sources/pipeline_introspection.py new file mode 100644 index 0000000..b7c372d --- /dev/null +++ b/engine/data_sources/pipeline_introspection.py @@ -0,0 +1,312 @@ +""" +Pipeline introspection source - Renders live visualization of pipeline DAG and metrics. + +This DataSource introspects one or more Pipeline instances and renders +an ASCII visualization showing: +- Stage DAG with signal flow connections +- Per-stage execution times +- Sparkline of frame times +- Stage breakdown bars + +Example: + source = PipelineIntrospectionSource(pipelines=[my_pipeline]) + items = source.fetch() # Returns ASCII visualization +""" + +from typing import TYPE_CHECKING + +from engine.data_sources.sources import DataSource, SourceItem + +if TYPE_CHECKING: + from engine.pipeline.controller import Pipeline + + +SPARKLINE_CHARS = " ▁▂▃▄▅▆▇█" +BAR_CHARS = " ▁▂▃▄▅▆▇█" + + +class PipelineIntrospectionSource(DataSource): + """Data source that renders live pipeline introspection visualization. + + Renders: + - DAG of stages with signal flow + - Per-stage execution times + - Sparkline of frame history + - Stage breakdown bars + """ + + def __init__( + self, + pipeline: "Pipeline | None" = None, + viewport_width: int = 100, + viewport_height: int = 35, + ): + self._pipeline = pipeline # May be None initially, set later via set_pipeline() + self.viewport_width = viewport_width + self.viewport_height = viewport_height + self.frame = 0 + self._ready = False + + def set_pipeline(self, pipeline: "Pipeline") -> None: + """Set the pipeline to introspect (call after pipeline is built).""" + self._pipeline = [pipeline] # Wrap in list for iteration + self._ready = True + + @property + def ready(self) -> bool: + """Check if source is ready to fetch.""" + return self._ready + + @property + def name(self) -> str: + return "pipeline-inspect" + + @property + def is_dynamic(self) -> bool: + return True + + @property + def inlet_types(self) -> set: + from engine.pipeline.core import DataType + + return {DataType.NONE} + + @property + def outlet_types(self) -> set: + from engine.pipeline.core import DataType + + return {DataType.SOURCE_ITEMS} + + def add_pipeline(self, pipeline: "Pipeline") -> None: + """Add a pipeline to visualize.""" + if self._pipeline is None: + self._pipeline = [pipeline] + elif isinstance(self._pipeline, list): + self._pipeline.append(pipeline) + else: + self._pipeline = [self._pipeline, pipeline] + self._ready = True + + def remove_pipeline(self, pipeline: "Pipeline") -> None: + """Remove a pipeline from visualization.""" + if self._pipeline is None: + return + elif isinstance(self._pipeline, list): + self._pipeline = [p for p in self._pipeline if p is not pipeline] + if not self._pipeline: + self._pipeline = None + self._ready = False + elif self._pipeline is pipeline: + self._pipeline = None + self._ready = False + + def fetch(self) -> list[SourceItem]: + """Fetch the introspection visualization.""" + if not self._ready: + # Return a placeholder until ready + return [ + SourceItem( + content="Initializing...", + source="pipeline-inspect", + timestamp="init", + ) + ] + + lines = self._render() + self.frame += 1 + content = "\n".join(lines) + return [ + SourceItem( + content=content, source="pipeline-inspect", timestamp=f"f{self.frame}" + ) + ] + + def get_items(self) -> list[SourceItem]: + return self.fetch() + + def _render(self) -> list[str]: + """Render the full visualization.""" + lines: list[str] = [] + + # Header + lines.extend(self._render_header()) + + # Render pipeline(s) if ready + if self._ready and self._pipeline: + pipelines = ( + self._pipeline if isinstance(self._pipeline, list) else [self._pipeline] + ) + for pipeline in pipelines: + lines.extend(self._render_pipeline(pipeline)) + + # Footer with sparkline + lines.extend(self._render_footer()) + + return lines + + @property + def _pipelines(self) -> list: + """Return pipelines as a list for iteration.""" + if self._pipeline is None: + return [] + elif isinstance(self._pipeline, list): + return self._pipeline + else: + return [self._pipeline] + + def _render_header(self) -> list[str]: + """Render the header with frame info and metrics summary.""" + lines: list[str] = [] + + if not self._pipeline: + return ["PIPELINE INTROSPECTION"] + + # Get aggregate metrics + total_ms = 0.0 + fps = 0.0 + frame_count = 0 + + for pipeline in self._pipelines: + try: + metrics = pipeline.get_metrics_summary() + if metrics and "error" not in metrics: + # Get avg_ms from pipeline metrics + pipeline_avg = metrics.get("pipeline", {}).get("avg_ms", 0) + total_ms = max(total_ms, pipeline_avg) + # Calculate FPS from avg_ms + if pipeline_avg > 0: + fps = max(fps, 1000.0 / pipeline_avg) + frame_count = max(frame_count, metrics.get("frame_count", 0)) + except Exception: + pass + + header = f"PIPELINE INTROSPECTION -- frame: {self.frame} -- avg: {total_ms:.1f}ms -- fps: {fps:.1f}" + lines.append(header) + + return lines + + def _render_pipeline(self, pipeline: "Pipeline") -> list[str]: + """Render a single pipeline's DAG.""" + lines: list[str] = [] + + stages = pipeline.stages + execution_order = pipeline.execution_order + + if not stages: + lines.append(" (no stages)") + return lines + + # Build stage info + stage_infos: list[dict] = [] + for name in execution_order: + stage = stages.get(name) + if not stage: + continue + + try: + metrics = pipeline.get_metrics_summary() + stage_ms = metrics.get("stages", {}).get(name, {}).get("avg_ms", 0.0) + except Exception: + stage_ms = 0.0 + + stage_infos.append( + { + "name": name, + "category": stage.category, + "ms": stage_ms, + } + ) + + # Calculate total time for percentages + total_time = sum(s["ms"] for s in stage_infos) or 1.0 + + # Render DAG - group by category + lines.append("") + lines.append(" Signal Flow:") + + # Group stages by category for display + categories: dict[str, list[dict]] = {} + for info in stage_infos: + cat = info["category"] + if cat not in categories: + categories[cat] = [] + categories[cat].append(info) + + # Render categories in order + cat_order = ["source", "render", "effect", "overlay", "display", "system"] + + for cat in cat_order: + if cat not in categories: + continue + + cat_stages = categories[cat] + cat_names = [s["name"] for s in cat_stages] + lines.append(f" {cat}: {' → '.join(cat_names)}") + + # Render timing breakdown + lines.append("") + lines.append(" Stage Timings:") + + for info in stage_infos: + name = info["name"] + ms = info["ms"] + pct = (ms / total_time) * 100 + bar = self._render_bar(pct, 20) + lines.append(f" {name:12s} {ms:6.2f}ms {bar} {pct:5.1f}%") + + lines.append("") + + return lines + + def _render_footer(self) -> list[str]: + """Render the footer with sparkline.""" + lines: list[str] = [] + + # Get frame history from first pipeline + pipelines = self._pipelines + if pipelines: + try: + frame_times = pipelines[0].get_frame_times() + except Exception: + frame_times = [] + else: + frame_times = [] + + if frame_times: + sparkline = self._render_sparkline(frame_times[-60:], 50) + lines.append(f" Frame Time History (last {len(frame_times[-60:])} frames)") + lines.append(f" {sparkline}") + else: + lines.append(" Frame Time History") + lines.append(" (collecting data...)") + + lines.append("") + + return lines + + def _render_bar(self, percentage: float, width: int) -> str: + """Render a horizontal bar for percentage.""" + filled = int((percentage / 100.0) * width) + bar = "█" * filled + "░" * (width - filled) + return bar + + def _render_sparkline(self, values: list[float], width: int) -> str: + """Render a sparkline from values.""" + if not values: + return " " * width + + min_val = min(values) + max_val = max(values) + range_val = max_val - min_val or 1.0 + + result = [] + for v in values[-width:]: + normalized = (v - min_val) / range_val + idx = int(normalized * (len(SPARKLINE_CHARS) - 1)) + idx = max(0, min(idx, len(SPARKLINE_CHARS) - 1)) + result.append(SPARKLINE_CHARS[idx]) + + # Pad to width + while len(result) < width: + result.insert(0, " ") + return "".join(result[:width]) diff --git a/engine/data_sources/sources.py b/engine/data_sources/sources.py new file mode 100644 index 0000000..f1717ee --- /dev/null +++ b/engine/data_sources/sources.py @@ -0,0 +1,490 @@ +""" +Data sources for the pipeline architecture. + +This module contains all DataSource implementations: +- DataSource: Abstract base class +- SourceItem, ImageItem: Data containers +- HeadlinesDataSource, PoetryDataSource, ImageDataSource: Concrete sources +- SourceRegistry: Registry for source discovery +""" + +from abc import ABC, abstractmethod +from collections.abc import Callable +from dataclasses import dataclass +from typing import Any + + +@dataclass +class SourceItem: + """A single item from a data source.""" + + content: str + source: str + timestamp: str + metadata: dict[str, Any] | None = None + + +@dataclass +class ImageItem: + """An image item from a data source - wraps a PIL Image.""" + + image: Any # PIL Image + source: str + timestamp: str + path: str | None = None # File path or URL if applicable + metadata: dict[str, Any] | None = None + + +class DataSource(ABC): + """Abstract base class for data sources. + + Static sources: Data fetched once and cached. Safe to call fetch() multiple times. + Dynamic sources: Data changes over time. fetch() should be idempotent. + """ + + @property + @abstractmethod + def name(self) -> str: + """Display name for this source.""" + ... + + @property + def is_dynamic(self) -> bool: + """Whether this source updates dynamically while the app runs. Default False.""" + return False + + @abstractmethod + def fetch(self) -> list[SourceItem]: + """Fetch fresh data from the source. Must be idempotent.""" + ... + + def get_items(self) -> list[SourceItem]: + """Get current items. Default implementation returns cached fetch results.""" + if not hasattr(self, "_items") or self._items is None: + self._items = self.fetch() + return self._items + + def refresh(self) -> list[SourceItem]: + """Force refresh - clear cache and fetch fresh data.""" + self._items = self.fetch() + return self._items + + def stream(self): + """Optional: Yield items continuously. Override for streaming sources.""" + raise NotImplementedError + + def __post_init__(self): + self._items: list[SourceItem] | None = None + + +class HeadlinesDataSource(DataSource): + """Data source for RSS feed headlines.""" + + @property + def name(self) -> str: + return "headlines" + + def fetch(self) -> list[SourceItem]: + from engine.fetch import fetch_all + + items, _, _ = fetch_all() + return [SourceItem(content=t, source=s, timestamp=ts) for t, s, ts in items] + + +class EmptyDataSource(DataSource): + """Empty data source that produces blank lines for testing. + + Useful for testing display borders, effects, and other pipeline + components without needing actual content. + """ + + def __init__(self, width: int = 80, height: int = 24): + self.width = width + self.height = height + + @property + def name(self) -> str: + return "empty" + + @property + def is_dynamic(self) -> bool: + return False + + def fetch(self) -> list[SourceItem]: + # Return empty lines as content + content = "\n".join([" " * self.width for _ in range(self.height)]) + return [SourceItem(content=content, source="empty", timestamp="0")] + + +class ListDataSource(DataSource): + """Data source that wraps a pre-fetched list of items. + + Used for bootstrap loading when items are already available in memory. + This is a simple wrapper for already-fetched data. + """ + + def __init__(self, items, name: str = "list"): + self._raw_items = items # Store raw items separately + self._items = None # Cache for converted SourceItem objects + self._name = name + + @property + def name(self) -> str: + return self._name + + @property + def is_dynamic(self) -> bool: + return False + + def fetch(self) -> list[SourceItem]: + # Convert tuple items to SourceItem if needed + result = [] + for item in self._raw_items: + if isinstance(item, SourceItem): + result.append(item) + elif isinstance(item, tuple) and len(item) >= 3: + # Assume (content, source, timestamp) tuple format + result.append( + SourceItem(content=item[0], source=item[1], timestamp=str(item[2])) + ) + else: + # Fallback: treat as string content + result.append( + SourceItem(content=str(item), source="list", timestamp="0") + ) + return result + + +class PoetryDataSource(DataSource): + """Data source for Poetry DB.""" + + @property + def name(self) -> str: + return "poetry" + + def fetch(self) -> list[SourceItem]: + from engine.fetch import fetch_poetry + + items, _, _ = fetch_poetry() + return [SourceItem(content=t, source=s, timestamp=ts) for t, s, ts in items] + + +class ImageDataSource(DataSource): + """Data source that loads PNG images from file paths or URLs. + + Supports: + - Local file paths (e.g., /path/to/image.png) + - URLs (e.g., https://example.com/image.png) + + Yields ImageItem objects containing PIL Image objects that can be + converted to text buffers by an ImageToTextTransform stage. + """ + + def __init__( + self, + path: str | list[str] | None = None, + urls: str | list[str] | None = None, + ): + """ + Args: + path: Single path or list of paths to PNG files + urls: Single URL or list of URLs to PNG images + """ + self._paths = [path] if isinstance(path, str) else (path or []) + self._urls = [urls] if isinstance(urls, str) else (urls or []) + self._images: list[ImageItem] = [] + self._load_images() + + def _load_images(self) -> None: + """Load all images from paths and URLs.""" + from datetime import datetime + from io import BytesIO + from urllib.request import urlopen + + timestamp = datetime.now().isoformat() + + for path in self._paths: + try: + from PIL import Image + + img = Image.open(path) + if img.mode != "RGBA": + img = img.convert("RGBA") + self._images.append( + ImageItem( + image=img, + source=f"file:{path}", + timestamp=timestamp, + path=path, + ) + ) + except Exception: + pass + + for url in self._urls: + try: + from PIL import Image + + with urlopen(url) as response: + img = Image.open(BytesIO(response.read())) + if img.mode != "RGBA": + img = img.convert("RGBA") + self._images.append( + ImageItem( + image=img, + source=f"url:{url}", + timestamp=timestamp, + path=url, + ) + ) + except Exception: + pass + + @property + def name(self) -> str: + return "image" + + @property + def is_dynamic(self) -> bool: + return False # Static images, not updating + + def fetch(self) -> list[ImageItem]: + """Return loaded images as ImageItem list.""" + return self._images + + def get_items(self) -> list[ImageItem]: + """Return current image items.""" + return self._images + + +class MetricsDataSource(DataSource): + """Data source that renders live pipeline metrics as ASCII art. + + Wraps a Pipeline and displays active stages with their average execution + time and approximate FPS impact. Updates lazily when camera is about to + focus on a new node (frame % 15 == 12). + """ + + def __init__( + self, + pipeline: Any, + viewport_width: int = 80, + viewport_height: int = 24, + ): + self.pipeline = pipeline + self.viewport_width = viewport_width + self.viewport_height = viewport_height + self.frame = 0 + self._cached_metrics: dict | None = None + + @property + def name(self) -> str: + return "metrics" + + @property + def is_dynamic(self) -> bool: + return True + + def fetch(self) -> list[SourceItem]: + if self.frame % 15 == 12: + self._cached_metrics = None + + if self._cached_metrics is None: + self._cached_metrics = self._fetch_metrics() + + buffer = self._render_metrics(self._cached_metrics) + self.frame += 1 + content = "\n".join(buffer) + return [ + SourceItem(content=content, source="metrics", timestamp=f"f{self.frame}") + ] + + def _fetch_metrics(self) -> dict: + if hasattr(self.pipeline, "get_metrics_summary"): + metrics = self.pipeline.get_metrics_summary() + if "error" not in metrics: + return metrics + return {"stages": {}, "pipeline": {"avg_ms": 0}} + + def _render_metrics(self, metrics: dict) -> list[str]: + stages = metrics.get("stages", {}) + + if not stages: + return self._render_empty() + + active_stages = { + name: stats for name, stats in stages.items() if stats.get("avg_ms", 0) > 0 + } + + if not active_stages: + return self._render_empty() + + total_avg = sum(s["avg_ms"] for s in active_stages.values()) + if total_avg == 0: + total_avg = 1 + + lines: list[str] = [] + lines.append("═" * self.viewport_width) + lines.append(" PIPELINE METRICS ".center(self.viewport_width, "─")) + lines.append("─" * self.viewport_width) + + header = f"{'STAGE':<20} {'AVG_MS':>8} {'FPS %':>8}" + lines.append(header) + lines.append("─" * self.viewport_width) + + for name, stats in sorted(active_stages.items()): + avg_ms = stats.get("avg_ms", 0) + fps_impact = (avg_ms / 16.67) * 100 if avg_ms > 0 else 0 + + row = f"{name:<20} {avg_ms:>7.2f} {fps_impact:>7.1f}%" + lines.append(row[: self.viewport_width]) + + lines.append("─" * self.viewport_width) + total_row = ( + f"{'TOTAL':<20} {total_avg:>7.2f} {(total_avg / 16.67) * 100:>7.1f}%" + ) + lines.append(total_row[: self.viewport_width]) + lines.append("─" * self.viewport_width) + lines.append( + f" Frame:{self.frame:04d} Cache:{'HIT' if self._cached_metrics else 'MISS'}" + ) + + while len(lines) < self.viewport_height: + lines.append(" " * self.viewport_width) + + return lines[: self.viewport_height] + + def _render_empty(self) -> list[str]: + lines = [" " * self.viewport_width for _ in range(self.viewport_height)] + msg = "No metrics available" + y = self.viewport_height // 2 + x = (self.viewport_width - len(msg)) // 2 + lines[y] = " " * x + msg + " " * (self.viewport_width - x - len(msg)) + return lines + + def get_items(self) -> list[SourceItem]: + return self.fetch() + + +class CachedDataSource(DataSource): + """Data source that wraps another source with caching.""" + + def __init__(self, source: DataSource, max_items: int = 100): + self.source = source + self.max_items = max_items + + @property + def name(self) -> str: + return f"cached:{self.source.name}" + + def fetch(self) -> list[SourceItem]: + items = self.source.fetch() + return items[: self.max_items] + + def get_items(self) -> list[SourceItem]: + if not hasattr(self, "_items") or self._items is None: + self._items = self.fetch() + return self._items + + +class TransformDataSource(DataSource): + """Data source that transforms items from another source. + + Applies optional filter and map functions to each item. + This enables chaining: source → transform → transformed output. + + Args: + source: The source to fetch items from + filter_fn: Optional function(item: SourceItem) -> bool + map_fn: Optional function(item: SourceItem) -> SourceItem + """ + + def __init__( + self, + source: DataSource, + filter_fn: Callable[[SourceItem], bool] | None = None, + map_fn: Callable[[SourceItem], SourceItem] | None = None, + ): + self.source = source + self.filter_fn = filter_fn + self.map_fn = map_fn + + @property + def name(self) -> str: + return f"transform:{self.source.name}" + + def fetch(self) -> list[SourceItem]: + items = self.source.fetch() + + if self.filter_fn: + items = [item for item in items if self.filter_fn(item)] + + if self.map_fn: + items = [self.map_fn(item) for item in items] + + return items + + +class CompositeDataSource(DataSource): + """Data source that combines multiple sources.""" + + def __init__(self, sources: list[DataSource]): + self.sources = sources + + @property + def name(self) -> str: + return "composite" + + def fetch(self) -> list[SourceItem]: + items = [] + for source in self.sources: + items.extend(source.fetch()) + return items + + +class SourceRegistry: + """Registry for data sources.""" + + def __init__(self): + self._sources: dict[str, DataSource] = {} + self._default: str | None = None + + def register(self, source: DataSource, default: bool = False) -> None: + self._sources[source.name] = source + if default or self._default is None: + self._default = source.name + + def get(self, name: str) -> DataSource | None: + return self._sources.get(name) + + def list_all(self) -> dict[str, DataSource]: + return dict(self._sources) + + def default(self) -> DataSource | None: + if self._default: + return self._sources.get(self._default) + return None + + def create_headlines(self) -> HeadlinesDataSource: + return HeadlinesDataSource() + + def create_poetry(self) -> PoetryDataSource: + return PoetryDataSource() + + +_global_registry: SourceRegistry | None = None + + +def get_source_registry() -> SourceRegistry: + global _global_registry + if _global_registry is None: + _global_registry = SourceRegistry() + return _global_registry + + +def init_default_sources() -> SourceRegistry: + """Initialize the default source registry with standard sources.""" + registry = get_source_registry() + registry.register(HeadlinesDataSource(), default=True) + registry.register(PoetryDataSource()) + return registry diff --git a/engine/display.py b/engine/display.py deleted file mode 100644 index 32eb09e..0000000 --- a/engine/display.py +++ /dev/null @@ -1,102 +0,0 @@ -""" -Display output abstraction - allows swapping output backends. - -Protocol: - - init(width, height): Initialize display with terminal dimensions - - show(buffer): Render buffer (list of strings) to display - - clear(): Clear the display - - cleanup(): Shutdown display -""" - -import time -from typing import Protocol - - -class Display(Protocol): - """Protocol for display backends.""" - - def init(self, width: int, height: int) -> None: - """Initialize display with dimensions.""" - ... - - def show(self, buffer: list[str]) -> None: - """Show buffer on display.""" - ... - - def clear(self) -> None: - """Clear display.""" - ... - - def cleanup(self) -> None: - """Shutdown display.""" - ... - - -def get_monitor(): - """Get the performance monitor.""" - try: - from engine.effects.performance import get_monitor as _get_monitor - - return _get_monitor() - except Exception: - return None - - -class TerminalDisplay: - """ANSI terminal display backend.""" - - def __init__(self): - self.width = 80 - self.height = 24 - - def init(self, width: int, height: int) -> None: - from engine.terminal import CURSOR_OFF - - self.width = width - self.height = height - print(CURSOR_OFF, end="", flush=True) - - def show(self, buffer: list[str]) -> None: - import sys - - t0 = time.perf_counter() - sys.stdout.buffer.write("".join(buffer).encode()) - sys.stdout.flush() - elapsed_ms = (time.perf_counter() - t0) * 1000 - - monitor = get_monitor() - if monitor: - chars_in = sum(len(line) for line in buffer) - monitor.record_effect("terminal_display", elapsed_ms, chars_in, chars_in) - - def clear(self) -> None: - from engine.terminal import CLR - - print(CLR, end="", flush=True) - - def cleanup(self) -> None: - from engine.terminal import CURSOR_ON - - print(CURSOR_ON, end="", flush=True) - - -class NullDisplay: - """Headless/null display - discards all output.""" - - def init(self, width: int, height: int) -> None: - self.width = width - self.height = height - - def show(self, buffer: list[str]) -> None: - monitor = get_monitor() - if monitor: - t0 = time.perf_counter() - chars_in = sum(len(line) for line in buffer) - elapsed_ms = (time.perf_counter() - t0) * 1000 - monitor.record_effect("null_display", elapsed_ms, chars_in, chars_in) - - def clear(self) -> None: - pass - - def cleanup(self) -> None: - pass diff --git a/engine/display/__init__.py b/engine/display/__init__.py new file mode 100644 index 0000000..bbf9a30 --- /dev/null +++ b/engine/display/__init__.py @@ -0,0 +1,290 @@ +""" +Display backend system with registry pattern. + +Allows swapping output backends via the Display protocol. +Supports auto-discovery of display backends. +""" + +from enum import Enum, auto +from typing import Protocol + +# Optional backend - requires moderngl package +try: + from engine.display.backends.moderngl import ModernGLDisplay + + _MODERNGL_AVAILABLE = True +except ImportError: + ModernGLDisplay = None + _MODERNGL_AVAILABLE = False + +from engine.display.backends.multi import MultiDisplay +from engine.display.backends.null import NullDisplay +from engine.display.backends.pygame import PygameDisplay +from engine.display.backends.replay import ReplayDisplay +from engine.display.backends.terminal import TerminalDisplay +from engine.display.backends.websocket import WebSocketDisplay + + +class BorderMode(Enum): + """Border rendering modes for displays.""" + + OFF = auto() # No border + SIMPLE = auto() # Traditional border with FPS/frame time + UI = auto() # Right-side UI panel with interactive controls + + +class Display(Protocol): + """Protocol for display backends. + + Required attributes: + - width: int + - height: int + + Required methods (duck typing - actual signatures may vary): + - init(width, height, reuse=False) + - show(buffer, border=False) + - clear() + - cleanup() + - get_dimensions() -> (width, height) + + Optional attributes (for UI mode): + - ui_panel: UIPanel instance (set by app when border=UI) + + Optional methods: + - is_quit_requested() -> bool + - clear_quit_request() -> None + """ + + width: int + height: int + + +class DisplayRegistry: + """Registry for display backends with auto-discovery.""" + + _backends: dict[str, type[Display]] = {} + _initialized = False + + @classmethod + def register(cls, name: str, backend_class: type[Display]) -> None: + cls._backends[name.lower()] = backend_class + + @classmethod + def get(cls, name: str) -> type[Display] | None: + return cls._backends.get(name.lower()) + + @classmethod + def list_backends(cls) -> list[str]: + return list(cls._backends.keys()) + + @classmethod + def create(cls, name: str, **kwargs) -> Display | None: + cls.initialize() + backend_class = cls.get(name) + if backend_class: + return backend_class(**kwargs) + return None + + @classmethod + def initialize(cls) -> None: + if cls._initialized: + return + cls.register("terminal", TerminalDisplay) + cls.register("null", NullDisplay) + cls.register("replay", ReplayDisplay) + cls.register("websocket", WebSocketDisplay) + cls.register("pygame", PygameDisplay) + if _MODERNGL_AVAILABLE: + cls.register("moderngl", ModernGLDisplay) # type: ignore[arg-type] + cls._initialized = True + + @classmethod + def create_multi(cls, names: list[str]) -> MultiDisplay | None: + displays = [] + for name in names: + backend = cls.create(name) + if backend: + displays.append(backend) + else: + return None + if not displays: + return None + return MultiDisplay(displays) + + +def get_monitor(): + """Get the performance monitor.""" + try: + from engine.effects.performance import get_monitor as _get_monitor + + return _get_monitor() + except Exception: + return None + + +def _strip_ansi(s: str) -> str: + """Strip ANSI escape sequences from string for length calculation.""" + import re + + return re.sub(r"\x1b\[[0-9;]*[a-zA-Z]", "", s) + + +def _render_simple_border( + buf: list[str], width: int, height: int, fps: float = 0.0, frame_time: float = 0.0 +) -> list[str]: + """Render a traditional border around the buffer.""" + if not buf or width < 3 or height < 3: + return buf + + inner_w = width - 2 + inner_h = height - 2 + + cropped = [] + for i in range(min(inner_h, len(buf))): + line = buf[i] + visible_len = len(_strip_ansi(line)) + if visible_len > inner_w: + cropped.append(line[:inner_w]) + else: + cropped.append(line + " " * (inner_w - visible_len)) + + while len(cropped) < inner_h: + cropped.append(" " * inner_w) + + if fps > 0: + fps_str = f" FPS:{fps:.0f}" + if len(fps_str) < inner_w: + right_len = inner_w - len(fps_str) + top_border = "┌" + "─" * right_len + fps_str + "┐" + else: + top_border = "┌" + "─" * inner_w + "┐" + else: + top_border = "┌" + "─" * inner_w + "┐" + + if frame_time > 0: + ft_str = f" {frame_time:.1f}ms" + if len(ft_str) < inner_w: + right_len = inner_w - len(ft_str) + bottom_border = "└" + "─" * right_len + ft_str + "┘" + else: + bottom_border = "└" + "─" * inner_w + "┘" + else: + bottom_border = "└" + "─" * inner_w + "┘" + + result = [top_border] + for line in cropped: + if len(line) < inner_w: + line = line + " " * (inner_w - len(line)) + elif len(line) > inner_w: + line = line[:inner_w] + result.append("│" + line + "│") + result.append(bottom_border) + + return result + + +def render_ui_panel( + buf: list[str], + width: int, + height: int, + ui_panel, + fps: float = 0.0, + frame_time: float = 0.0, +) -> list[str]: + """Render buffer with a right-side UI panel.""" + from engine.pipeline.ui import UIPanel + + if not isinstance(ui_panel, UIPanel): + return _render_simple_border(buf, width, height, fps, frame_time) + + panel_width = min(ui_panel.config.panel_width, width - 4) + main_width = width - panel_width - 1 + + panel_lines = ui_panel.render(panel_width, height) + + main_buf = buf[: height - 2] + main_result = _render_simple_border( + main_buf, main_width + 2, height, fps, frame_time + ) + + combined = [] + for i in range(height): + if i < len(main_result): + main_line = main_result[i] + if len(main_line) >= 2: + main_content = ( + main_line[1:-1] if main_line[-1] in "│┌┐└┘" else main_line[1:] + ) + main_content = main_content.ljust(main_width)[:main_width] + else: + main_content = " " * main_width + else: + main_content = " " * main_width + + panel_idx = i + panel_line = ( + panel_lines[panel_idx][:panel_width].ljust(panel_width) + if panel_idx < len(panel_lines) + else " " * panel_width + ) + + separator = "│" if 0 < i < height - 1 else "┼" if i == 0 else "┴" + combined.append(main_content + separator + panel_line) + + return combined + + +def render_border( + buf: list[str], + width: int, + height: int, + fps: float = 0.0, + frame_time: float = 0.0, + border_mode: BorderMode | bool = BorderMode.SIMPLE, +) -> list[str]: + """Render a border or UI panel around the buffer. + + Args: + buf: Input buffer + width: Display width + height: Display height + fps: FPS for top border + frame_time: Frame time for bottom border + border_mode: Border rendering mode + + Returns: + Buffer with border/panel applied + """ + # Normalize border_mode to BorderMode enum + if isinstance(border_mode, bool): + border_mode = BorderMode.SIMPLE if border_mode else BorderMode.OFF + + if border_mode == BorderMode.UI: + # UI panel requires a UIPanel instance (injected separately) + # For now, this will be called by displays that have a ui_panel attribute + # This function signature doesn't include ui_panel, so we'll handle it in render_ui_panel + # Fall back to simple border if no panel available + return _render_simple_border(buf, width, height, fps, frame_time) + elif border_mode == BorderMode.SIMPLE: + return _render_simple_border(buf, width, height, fps, frame_time) + else: + return buf + + +__all__ = [ + "Display", + "DisplayRegistry", + "get_monitor", + "render_border", + "render_ui_panel", + "BorderMode", + "TerminalDisplay", + "NullDisplay", + "ReplayDisplay", + "WebSocketDisplay", + "MultiDisplay", + "PygameDisplay", +] + +if _MODERNGL_AVAILABLE: + __all__.append("ModernGLDisplay") diff --git a/engine/display/backends/multi.py b/engine/display/backends/multi.py new file mode 100644 index 0000000..fd13be5 --- /dev/null +++ b/engine/display/backends/multi.py @@ -0,0 +1,50 @@ +""" +Multi display backend - forwards to multiple displays. +""" + + +class MultiDisplay: + """Display that forwards to multiple displays. + + Supports reuse - passes reuse flag to all child displays. + """ + + width: int = 80 + height: int = 24 + + def __init__(self, displays: list): + self.displays = displays + self.width = 80 + self.height = 24 + + def init(self, width: int, height: int, reuse: bool = False) -> None: + """Initialize all child displays with dimensions. + + Args: + width: Terminal width in characters + height: Terminal height in rows + reuse: If True, use reuse mode for child displays + """ + self.width = width + self.height = height + for d in self.displays: + d.init(width, height, reuse=reuse) + + def show(self, buffer: list[str], border: bool = False) -> None: + for d in self.displays: + d.show(buffer, border=border) + + def clear(self) -> None: + for d in self.displays: + d.clear() + + def get_dimensions(self) -> tuple[int, int]: + """Get dimensions from the first child display that supports it.""" + for d in self.displays: + if hasattr(d, "get_dimensions"): + return d.get_dimensions() + return (self.width, self.height) + + def cleanup(self) -> None: + for d in self.displays: + d.cleanup() diff --git a/engine/display/backends/null.py b/engine/display/backends/null.py new file mode 100644 index 0000000..835644f --- /dev/null +++ b/engine/display/backends/null.py @@ -0,0 +1,183 @@ +""" +Null/headless display backend. +""" + +import json +import time +from pathlib import Path +from typing import Any + + +class NullDisplay: + """Headless/null display - discards all output. + + This display does nothing - useful for headless benchmarking + or when no display output is needed. Captures last buffer + for testing purposes. Supports frame recording for replay + and file export/import. + """ + + width: int = 80 + height: int = 24 + _last_buffer: list[str] | None = None + + def __init__(self): + self._last_buffer = None + self._is_recording = False + self._recorded_frames: list[dict[str, Any]] = [] + self._frame_count = 0 + + def init(self, width: int, height: int, reuse: bool = False) -> None: + """Initialize display with dimensions. + + Args: + width: Terminal width in characters + height: Terminal height in rows + reuse: Ignored for NullDisplay (no resources to reuse) + """ + self.width = width + self.height = height + self._last_buffer = None + + def show(self, buffer: list[str], border: bool = False) -> None: + import sys + + from engine.display import get_monitor, render_border + + fps = 0.0 + frame_time = 0.0 + monitor = get_monitor() + if monitor: + stats = monitor.get_stats() + avg_ms = stats.get("pipeline", {}).get("avg_ms", 0) if stats else 0 + frame_count = stats.get("frame_count", 0) if stats else 0 + if avg_ms and frame_count > 0: + fps = 1000.0 / avg_ms + frame_time = avg_ms + + if border: + buffer = render_border(buffer, self.width, self.height, fps, frame_time) + + self._last_buffer = buffer + + if self._is_recording: + self._recorded_frames.append( + { + "frame_number": self._frame_count, + "buffer": buffer, + "width": self.width, + "height": self.height, + } + ) + + if self._frame_count <= 5 or self._frame_count % 10 == 0: + sys.stdout.write("\n" + "=" * 80 + "\n") + sys.stdout.write( + f"Frame {self._frame_count} (buffer height: {len(buffer)})\n" + ) + sys.stdout.write("=" * 80 + "\n") + for i, line in enumerate(buffer[:30]): + sys.stdout.write(f"{i:2}: {line}\n") + if len(buffer) > 30: + sys.stdout.write(f"... ({len(buffer) - 30} more lines)\n") + sys.stdout.flush() + + if monitor: + t0 = time.perf_counter() + chars_in = sum(len(line) for line in buffer) + elapsed_ms = (time.perf_counter() - t0) * 1000 + monitor.record_effect("null_display", elapsed_ms, chars_in, chars_in) + + self._frame_count += 1 + + def start_recording(self) -> None: + """Begin recording frames.""" + self._is_recording = True + self._recorded_frames = [] + + def stop_recording(self) -> None: + """Stop recording frames.""" + self._is_recording = False + + def get_frames(self) -> list[list[str]]: + """Get recorded frames as list of buffers. + + Returns: + List of buffers, each buffer is a list of strings (lines) + """ + return [frame["buffer"] for frame in self._recorded_frames] + + def get_recorded_data(self) -> list[dict[str, Any]]: + """Get full recorded data including metadata. + + Returns: + List of frame dicts with 'frame_number', 'buffer', 'width', 'height' + """ + return self._recorded_frames + + def clear_recording(self) -> None: + """Clear recorded frames.""" + self._recorded_frames = [] + + def save_recording(self, filepath: str | Path) -> None: + """Save recorded frames to a JSON file. + + Args: + filepath: Path to save the recording + """ + path = Path(filepath) + data = { + "version": 1, + "display": "null", + "width": self.width, + "height": self.height, + "frame_count": len(self._recorded_frames), + "frames": self._recorded_frames, + } + path.write_text(json.dumps(data, indent=2)) + + def load_recording(self, filepath: str | Path) -> list[dict[str, Any]]: + """Load recorded frames from a JSON file. + + Args: + filepath: Path to load the recording from + + Returns: + List of frame dicts + """ + path = Path(filepath) + data = json.loads(path.read_text()) + self._recorded_frames = data.get("frames", []) + self.width = data.get("width", 80) + self.height = data.get("height", 24) + return self._recorded_frames + + def replay_frames(self) -> list[list[str]]: + """Get frames for replay. + + Returns: + List of buffers for replay + """ + return self.get_frames() + + def clear(self) -> None: + pass + + def cleanup(self) -> None: + pass + + def get_dimensions(self) -> tuple[int, int]: + """Get current dimensions. + + Returns: + (width, height) in character cells + """ + return (self.width, self.height) + + def is_quit_requested(self) -> bool: + """Check if quit was requested (optional protocol method).""" + return False + + def clear_quit_request(self) -> None: + """Clear quit request (optional protocol method).""" + pass diff --git a/engine/display/backends/pygame.py b/engine/display/backends/pygame.py new file mode 100644 index 0000000..989bab1 --- /dev/null +++ b/engine/display/backends/pygame.py @@ -0,0 +1,369 @@ +""" +Pygame display backend - renders to a native application window. +""" + +import time + +from engine.display.renderer import parse_ansi + + +class PygameDisplay: + """Pygame display backend - renders to native window. + + Supports reuse mode - when reuse=True, skips SDL initialization + and reuses the existing pygame window from a previous instance. + """ + + width: int = 80 + window_width: int = 800 + window_height: int = 600 + + def __init__( + self, + cell_width: int = 10, + cell_height: int = 18, + window_width: int = 800, + window_height: int = 600, + target_fps: float = 30.0, + ): + self.width = 80 + self.height = 24 + self.cell_width = cell_width + self.cell_height = cell_height + self.window_width = window_width + self.window_height = window_height + self.target_fps = target_fps + self._initialized = False + self._pygame = None + self._screen = None + self._font = None + self._resized = False + self._quit_requested = False + self._last_frame_time = 0.0 + self._frame_period = 1.0 / target_fps if target_fps > 0 else 0 + self._glyph_cache = {} + + def _get_font_path(self) -> str | None: + """Get font path for rendering.""" + import os + import sys + from pathlib import Path + + env_font = os.environ.get("MAINLINE_PYGAME_FONT") + if env_font and os.path.exists(env_font): + return env_font + + def search_dir(base_path: str) -> str | None: + if not os.path.exists(base_path): + return None + if os.path.isfile(base_path): + return base_path + for font_file in Path(base_path).rglob("*"): + if font_file.suffix.lower() in (".ttf", ".otf", ".ttc"): + name = font_file.stem.lower() + if "geist" in name and ("nerd" in name or "mono" in name): + return str(font_file) + return None + + search_dirs = [] + if sys.platform == "darwin": + search_dirs.append(os.path.expanduser("~/Library/Fonts/")) + elif sys.platform == "win32": + search_dirs.append( + os.path.expanduser("~\\AppData\\Local\\Microsoft\\Windows\\Fonts\\") + ) + else: + search_dirs.extend( + [ + os.path.expanduser("~/.local/share/fonts/"), + os.path.expanduser("~/.fonts/"), + "/usr/share/fonts/", + ] + ) + + for search_dir_path in search_dirs: + found = search_dir(search_dir_path) + if found: + return found + + return None + + def init(self, width: int, height: int, reuse: bool = False) -> None: + """Initialize display with dimensions. + + Args: + width: Terminal width in characters + height: Terminal height in rows + reuse: If True, attach to existing pygame window instead of creating new + """ + self.width = width + self.height = height + + try: + import pygame + except ImportError: + return + + if reuse and PygameDisplay._pygame_initialized: + self._pygame = pygame + self._initialized = True + return + + pygame.init() + pygame.display.set_caption("Mainline") + + self._screen = pygame.display.set_mode( + (self.window_width, self.window_height), + pygame.RESIZABLE, + ) + self._pygame = pygame + PygameDisplay._pygame_initialized = True + + # Calculate character dimensions from actual window size + self.width = max(1, self.window_width // self.cell_width) + self.height = max(1, self.window_height // self.cell_height) + + font_path = self._get_font_path() + if font_path: + try: + self._font = pygame.font.Font(font_path, self.cell_height - 2) + except Exception: + self._font = pygame.font.SysFont("monospace", self.cell_height - 2) + else: + self._font = pygame.font.SysFont("monospace", self.cell_height - 2) + + # Check if font supports box-drawing characters; if not, try to find one + self._use_fallback_border = False + if self._font: + try: + # Test rendering some key box-drawing characters + test_chars = ["┌", "─", "┐", "│", "└", "┘"] + for ch in test_chars: + surf = self._font.render(ch, True, (255, 255, 255)) + # If surface is empty (width=0 or all black), font lacks glyph + if surf.get_width() == 0: + raise ValueError("Missing glyph") + except Exception: + # Font doesn't support box-drawing, will use line drawing fallback + self._use_fallback_border = True + + self._initialized = True + + def show(self, buffer: list[str], border: bool = False) -> None: + if not self._initialized or not self._pygame: + return + + t0 = time.perf_counter() + + for event in self._pygame.event.get(): + if event.type == self._pygame.QUIT: + self._quit_requested = True + elif event.type == self._pygame.KEYDOWN: + if event.key in (self._pygame.K_ESCAPE, self._pygame.K_c): + if event.key == self._pygame.K_c and not ( + event.mod & self._pygame.KMOD_LCTRL + or event.mod & self._pygame.KMOD_RCTRL + ): + continue + self._quit_requested = True + elif event.type == self._pygame.VIDEORESIZE: + self.window_width = event.w + self.window_height = event.h + self.width = max(1, self.window_width // self.cell_width) + self.height = max(1, self.window_height // self.cell_height) + self._resized = True + + # FPS limiting - skip frame if we're going too fast + if self._frame_period > 0: + now = time.perf_counter() + elapsed = now - self._last_frame_time + if elapsed < self._frame_period: + return # Skip this frame + self._last_frame_time = now + + # Get metrics for border display + fps = 0.0 + frame_time = 0.0 + from engine.display import get_monitor + + monitor = get_monitor() + if monitor: + stats = monitor.get_stats() + avg_ms = stats.get("pipeline", {}).get("avg_ms", 0) if stats else 0 + frame_count = stats.get("frame_count", 0) if stats else 0 + if avg_ms and frame_count > 0: + fps = 1000.0 / avg_ms + frame_time = avg_ms + + self._screen.fill((0, 0, 0)) + + # If border requested but font lacks box-drawing glyphs, use graphical fallback + if border and self._use_fallback_border: + self._draw_fallback_border(fps, frame_time) + # Adjust content area to fit inside border + content_offset_x = self.cell_width + content_offset_y = self.cell_height + self.window_width - 2 * self.cell_width + self.window_height - 2 * self.cell_height + else: + # Normal rendering (with or without text border) + content_offset_x = 0 + content_offset_y = 0 + + if border: + from engine.display import render_border + + buffer = render_border(buffer, self.width, self.height, fps, frame_time) + + blit_list = [] + + for row_idx, line in enumerate(buffer[: self.height]): + if row_idx >= self.height: + break + + tokens = parse_ansi(line) + x_pos = content_offset_x + + for text, fg, bg, _bold in tokens: + if not text: + continue + + # Use None as key for no background + bg_key = bg if bg != (0, 0, 0) else None + cache_key = (text, fg, bg_key) + + if cache_key not in self._glyph_cache: + # Render and cache + if bg_key is not None: + self._glyph_cache[cache_key] = self._font.render( + text, True, fg, bg_key + ) + else: + self._glyph_cache[cache_key] = self._font.render(text, True, fg) + + surface = self._glyph_cache[cache_key] + blit_list.append( + (surface, (x_pos, content_offset_y + row_idx * self.cell_height)) + ) + x_pos += self._font.size(text)[0] + + self._screen.blits(blit_list) + + # Draw fallback border using graphics if needed + if border and self._use_fallback_border: + self._draw_fallback_border(fps, frame_time) + + self._pygame.display.flip() + + elapsed_ms = (time.perf_counter() - t0) * 1000 + + if monitor: + chars_in = sum(len(line) for line in buffer) + monitor.record_effect("pygame_display", elapsed_ms, chars_in, chars_in) + + def _draw_fallback_border(self, fps: float, frame_time: float) -> None: + """Draw border using pygame graphics primitives instead of text.""" + if not self._screen or not self._pygame: + return + + # Colors + border_color = (0, 255, 0) # Green (like terminal border) + text_color = (255, 255, 255) + + # Calculate dimensions + x1 = 0 + y1 = 0 + x2 = self.window_width - 1 + y2 = self.window_height - 1 + + # Draw outer rectangle + self._pygame.draw.rect( + self._screen, border_color, (x1, y1, x2 - x1 + 1, y2 - y1 + 1), 1 + ) + + # Draw top border with FPS + if fps > 0: + fps_text = f" FPS:{fps:.0f}" + else: + fps_text = "" + # We need to render this text with a fallback font that has basic ASCII + # Use system font which should have these characters + try: + font = self._font # May not have box chars but should have alphanumeric + text_surf = font.render(fps_text, True, text_color, (0, 0, 0)) + text_rect = text_surf.get_rect() + # Position on top border, right-aligned + text_x = x2 - text_rect.width - 5 + text_y = y1 + 2 + self._screen.blit(text_surf, (text_x, text_y)) + except Exception: + pass + + # Draw bottom border with frame time + if frame_time > 0: + ft_text = f" {frame_time:.1f}ms" + try: + ft_surf = self._font.render(ft_text, True, text_color, (0, 0, 0)) + ft_rect = ft_surf.get_rect() + ft_x = x2 - ft_rect.width - 5 + ft_y = y2 - ft_rect.height - 2 + self._screen.blit(ft_surf, (ft_x, ft_y)) + except Exception: + pass + + def clear(self) -> None: + if self._screen and self._pygame: + self._screen.fill((0, 0, 0)) + self._pygame.display.flip() + + def get_dimensions(self) -> tuple[int, int]: + """Get current terminal dimensions based on window size. + + Returns: + (width, height) in character cells + """ + # Query actual window size and recalculate character cells + if self._screen and self._pygame: + try: + w, h = self._screen.get_size() + if w != self.window_width or h != self.window_height: + self.window_width = w + self.window_height = h + self.width = max(1, w // self.cell_width) + self.height = max(1, h // self.cell_height) + except Exception: + pass + return self.width, self.height + + def cleanup(self, quit_pygame: bool = True) -> None: + """Cleanup display resources. + + Args: + quit_pygame: If True, quit pygame entirely. Set to False when + reusing the display to avoid closing shared window. + """ + if quit_pygame and self._pygame: + self._pygame.quit() + PygameDisplay._pygame_initialized = False + + @classmethod + def reset_state(cls) -> None: + """Reset pygame state - useful for testing.""" + cls._pygame_initialized = False + + def is_quit_requested(self) -> bool: + """Check if user requested quit (Ctrl+C, Ctrl+Q, or Escape). + + Returns True if the user pressed Ctrl+C, Ctrl+Q, or Escape. + The main loop should check this and raise KeyboardInterrupt. + """ + return self._quit_requested + + def clear_quit_request(self) -> bool: + """Clear the quit request flag after handling. + + Returns the previous quit request state. + """ + was_requested = self._quit_requested + self._quit_requested = False + return was_requested diff --git a/engine/display/backends/replay.py b/engine/display/backends/replay.py new file mode 100644 index 0000000..4076ffe --- /dev/null +++ b/engine/display/backends/replay.py @@ -0,0 +1,122 @@ +""" +Replay display backend - plays back recorded frames. +""" + +from typing import Any + + +class ReplayDisplay: + """Replay display - plays back recorded frames. + + This display reads frames from a recording (list of frame data) + and yields them sequentially, useful for testing and demo purposes. + """ + + width: int = 80 + height: int = 24 + + def __init__(self): + self._frames: list[dict[str, Any]] = [] + self._current_frame = 0 + self._playback_index = 0 + self._loop = False + + def init(self, width: int, height: int, reuse: bool = False) -> None: + """Initialize display with dimensions. + + Args: + width: Terminal width in characters + height: Terminal height in rows + reuse: Ignored for ReplayDisplay + """ + self.width = width + self.height = height + + def set_frames(self, frames: list[dict[str, Any]]) -> None: + """Set frames to replay. + + Args: + frames: List of frame dicts with 'buffer', 'width', 'height' + """ + self._frames = frames + self._current_frame = 0 + self._playback_index = 0 + + def set_loop(self, loop: bool) -> None: + """Set loop playback mode. + + Args: + loop: True to loop, False to stop at end + """ + self._loop = loop + + def show(self, buffer: list[str], border: bool = False) -> None: + """Display a frame (ignored in replay mode). + + Args: + buffer: Buffer to display (ignored) + border: Border flag (ignored) + """ + pass + + def get_next_frame(self) -> list[str] | None: + """Get the next frame in the recording. + + Returns: + Buffer list of strings, or None if playback is done + """ + if not self._frames: + return None + + if self._playback_index >= len(self._frames): + if self._loop: + self._playback_index = 0 + else: + return None + + frame = self._frames[self._playback_index] + self._playback_index += 1 + return frame.get("buffer") + + def reset(self) -> None: + """Reset playback to the beginning.""" + self._playback_index = 0 + + def seek(self, index: int) -> None: + """Seek to a specific frame. + + Args: + index: Frame index to seek to + """ + if 0 <= index < len(self._frames): + self._playback_index = index + + def is_finished(self) -> bool: + """Check if playback is finished. + + Returns: + True if at end of frames and not looping + """ + return not self._loop and self._playback_index >= len(self._frames) + + def clear(self) -> None: + pass + + def cleanup(self) -> None: + pass + + def get_dimensions(self) -> tuple[int, int]: + """Get current dimensions. + + Returns: + (width, height) in character cells + """ + return (self.width, self.height) + + def is_quit_requested(self) -> bool: + """Check if quit was requested (optional protocol method).""" + return False + + def clear_quit_request(self) -> None: + """Clear quit request (optional protocol method).""" + pass diff --git a/engine/display/backends/terminal.py b/engine/display/backends/terminal.py new file mode 100644 index 0000000..a699e43 --- /dev/null +++ b/engine/display/backends/terminal.py @@ -0,0 +1,133 @@ +""" +ANSI terminal display backend. +""" + +import os + + +class TerminalDisplay: + """ANSI terminal display backend. + + Renders buffer to stdout using ANSI escape codes. + Supports reuse - when reuse=True, skips re-initializing terminal state. + Auto-detects terminal dimensions on init. + """ + + width: int = 80 + height: int = 24 + _initialized: bool = False + + def __init__(self, target_fps: float = 30.0): + self.target_fps = target_fps + self._frame_period = 1.0 / target_fps if target_fps > 0 else 0 + self._last_frame_time = 0.0 + self._cached_dimensions: tuple[int, int] | None = None + + def init(self, width: int, height: int, reuse: bool = False) -> None: + """Initialize display with dimensions. + + If width/height are not provided (0/None), auto-detects terminal size. + Otherwise uses provided dimensions or falls back to terminal size + if the provided dimensions exceed terminal capacity. + + Args: + width: Desired terminal width (0 = auto-detect) + height: Desired terminal height (0 = auto-detect) + reuse: If True, skip terminal re-initialization + """ + from engine.terminal import CURSOR_OFF + + # Auto-detect terminal size (handle case where no terminal) + try: + term_size = os.get_terminal_size() + term_width = term_size.columns + term_height = term_size.lines + except OSError: + # No terminal available (e.g., in tests) + term_width = width if width > 0 else 80 + term_height = height if height > 0 else 24 + + # Use provided dimensions if valid, otherwise use terminal size + if width > 0 and height > 0: + self.width = min(width, term_width) + self.height = min(height, term_height) + else: + self.width = term_width + self.height = term_height + + if not reuse or not self._initialized: + print(CURSOR_OFF, end="", flush=True) + self._initialized = True + + def get_dimensions(self) -> tuple[int, int]: + """Get current terminal dimensions. + + Returns cached dimensions to avoid querying terminal every frame, + which can cause inconsistent results. Dimensions are only refreshed + when they actually change. + + Returns: + (width, height) in character cells + """ + try: + term_size = os.get_terminal_size() + new_dims = (term_size.columns, term_size.lines) + except OSError: + new_dims = (self.width, self.height) + + # Only update cached dimensions if they actually changed + if self._cached_dimensions is None or self._cached_dimensions != new_dims: + self._cached_dimensions = new_dims + self.width = new_dims[0] + self.height = new_dims[1] + + return self._cached_dimensions + + def show(self, buffer: list[str], border: bool = False) -> None: + import sys + + from engine.display import get_monitor, render_border + + # Note: Frame rate limiting is handled by the caller (e.g., FrameTimer). + # This display renders every frame it receives. + + # Get metrics for border display + fps = 0.0 + frame_time = 0.0 + monitor = get_monitor() + if monitor: + stats = monitor.get_stats() + avg_ms = stats.get("pipeline", {}).get("avg_ms", 0) if stats else 0 + frame_count = stats.get("frame_count", 0) if stats else 0 + if avg_ms and frame_count > 0: + fps = 1000.0 / avg_ms + frame_time = avg_ms + + # Apply border if requested + from engine.display import BorderMode + + if border and border != BorderMode.OFF: + buffer = render_border(buffer, self.width, self.height, fps, frame_time) + + # Write buffer with cursor home + erase down to avoid flicker + output = "\033[H\033[J" + "".join(buffer) + sys.stdout.buffer.write(output.encode()) + sys.stdout.flush() + + def clear(self) -> None: + from engine.terminal import CLR + + print(CLR, end="", flush=True) + + def cleanup(self) -> None: + from engine.terminal import CURSOR_ON + + print(CURSOR_ON, end="", flush=True) + + def is_quit_requested(self) -> bool: + """Check if quit was requested (optional protocol method).""" + return False + + def clear_quit_request(self) -> None: + """Clear quit request (optional protocol method).""" + pass diff --git a/engine/display/backends/websocket.py b/engine/display/backends/websocket.py new file mode 100644 index 0000000..b159cfd --- /dev/null +++ b/engine/display/backends/websocket.py @@ -0,0 +1,464 @@ +""" +WebSocket display backend - broadcasts frame buffer to connected web clients. + +Supports streaming protocols: +- Full frame (JSON) - default for compatibility +- Binary streaming - efficient binary protocol +- Diff streaming - only sends changed lines + +TODO: Transform to a true streaming backend with: +- Proper WebSocket message streaming (currently sends full buffer each frame) +- Connection pooling and backpressure handling +- Binary protocol for efficiency (instead of JSON) +- Client management with proper async handling +- Mark for deprecation if replaced by a new streaming implementation + +Current implementation: Simple broadcast of text frames to all connected clients. +""" + +import asyncio +import base64 +import json +import threading +import time +from enum import IntFlag + +from engine.display.streaming import ( + MessageType, + compress_frame, + compute_diff, + encode_binary_message, + encode_diff_message, +) + + +class StreamingMode(IntFlag): + """Streaming modes for WebSocket display.""" + + JSON = 0x01 # Full JSON frames (default, compatible) + BINARY = 0x02 # Binary compression + DIFF = 0x04 # Differential updates + + +try: + import websockets +except ImportError: + websockets = None + + +def get_monitor(): + """Get the performance monitor.""" + try: + from engine.effects.performance import get_monitor as _get_monitor + + return _get_monitor() + except Exception: + return None + + +class WebSocketDisplay: + """WebSocket display backend - broadcasts to HTML Canvas clients.""" + + width: int = 80 + height: int = 24 + + def __init__( + self, + host: str = "0.0.0.0", + port: int = 8765, + http_port: int = 8766, + streaming_mode: StreamingMode = StreamingMode.JSON, + ): + self.host = host + self.port = port + self.http_port = http_port + self.width = 80 + self.height = 24 + self._clients: set = set() + self._server_running = False + self._http_running = False + self._server_thread: threading.Thread | None = None + self._http_thread: threading.Thread | None = None + self._available = True + self._max_clients = 10 + self._client_connected_callback = None + self._client_disconnected_callback = None + self._command_callback = None + self._controller = None # Reference to UI panel or pipeline controller + self._frame_delay = 0.0 + self._httpd = None # HTTP server instance + + # Streaming configuration + self._streaming_mode = streaming_mode + self._last_buffer: list[str] = [] + self._client_capabilities: dict = {} # Track client capabilities + + try: + import websockets as _ws + + self._available = _ws is not None + except ImportError: + self._available = False + + def is_available(self) -> bool: + """Check if WebSocket support is available.""" + return self._available + + def init(self, width: int, height: int, reuse: bool = False) -> None: + """Initialize display with dimensions and start server. + + Args: + width: Terminal width in characters + height: Terminal height in rows + reuse: If True, skip starting servers (assume already running) + """ + self.width = width + self.height = height + + if not reuse or not self._server_running: + self.start_server() + self.start_http_server() + + def show(self, buffer: list[str], border: bool = False) -> None: + """Broadcast buffer to all connected clients using streaming protocol.""" + t0 = time.perf_counter() + + # Get metrics for border display + fps = 0.0 + frame_time = 0.0 + monitor = get_monitor() + if monitor: + stats = monitor.get_stats() + avg_ms = stats.get("pipeline", {}).get("avg_ms", 0) if stats else 0 + frame_count = stats.get("frame_count", 0) if stats else 0 + if avg_ms and frame_count > 0: + fps = 1000.0 / avg_ms + frame_time = avg_ms + + # Apply border if requested + if border: + from engine.display import render_border + + buffer = render_border(buffer, self.width, self.height, fps, frame_time) + + if not self._clients: + self._last_buffer = buffer + return + + # Send to each client based on their capabilities + disconnected = set() + for client in list(self._clients): + try: + client_id = id(client) + client_mode = self._client_capabilities.get( + client_id, StreamingMode.JSON + ) + + if client_mode & StreamingMode.DIFF: + self._send_diff_frame(client, buffer) + elif client_mode & StreamingMode.BINARY: + self._send_binary_frame(client, buffer) + else: + self._send_json_frame(client, buffer) + except Exception: + disconnected.add(client) + + for client in disconnected: + self._clients.discard(client) + if self._client_disconnected_callback: + self._client_disconnected_callback(client) + + self._last_buffer = buffer + + elapsed_ms = (time.perf_counter() - t0) * 1000 + if monitor: + chars_in = sum(len(line) for line in buffer) + monitor.record_effect("websocket_display", elapsed_ms, chars_in, chars_in) + + def _send_json_frame(self, client, buffer: list[str]) -> None: + """Send frame as JSON.""" + frame_data = { + "type": "frame", + "width": self.width, + "height": self.height, + "lines": buffer, + } + message = json.dumps(frame_data) + asyncio.run(client.send(message)) + + def _send_binary_frame(self, client, buffer: list[str]) -> None: + """Send frame as compressed binary.""" + compressed = compress_frame(buffer) + message = encode_binary_message( + MessageType.FULL_FRAME, self.width, self.height, compressed + ) + encoded = base64.b64encode(message).decode("utf-8") + asyncio.run(client.send(encoded)) + + def _send_diff_frame(self, client, buffer: list[str]) -> None: + """Send frame as diff.""" + diff = compute_diff(self._last_buffer, buffer) + + if not diff.changed_lines: + return + + diff_payload = encode_diff_message(diff) + message = encode_binary_message( + MessageType.DIFF_FRAME, self.width, self.height, diff_payload + ) + encoded = base64.b64encode(message).decode("utf-8") + asyncio.run(client.send(encoded)) + + def set_streaming_mode(self, mode: StreamingMode) -> None: + """Set the default streaming mode for new clients.""" + self._streaming_mode = mode + + def get_streaming_mode(self) -> StreamingMode: + """Get the current streaming mode.""" + return self._streaming_mode + + def clear(self) -> None: + """Broadcast clear command to all clients.""" + if self._clients: + clear_data = {"type": "clear"} + message = json.dumps(clear_data) + for client in list(self._clients): + try: + asyncio.run(client.send(message)) + except Exception: + pass + + def cleanup(self) -> None: + """Stop the servers.""" + self.stop_server() + self.stop_http_server() + + async def _websocket_handler(self, websocket): + """Handle WebSocket connections.""" + if len(self._clients) >= self._max_clients: + await websocket.close() + return + + self._clients.add(websocket) + if self._client_connected_callback: + self._client_connected_callback(websocket) + + try: + async for message in websocket: + try: + data = json.loads(message) + msg_type = data.get("type") + + if msg_type == "resize": + self.width = data.get("width", 80) + self.height = data.get("height", 24) + elif msg_type == "command" and self._command_callback: + # Forward commands to the pipeline controller + command = data.get("command", {}) + self._command_callback(command) + elif msg_type == "state_request": + # Send current state snapshot + state = self._get_state_snapshot() + if state: + response = {"type": "state", "state": state} + await websocket.send(json.dumps(response)) + except json.JSONDecodeError: + pass + except Exception: + pass + finally: + self._clients.discard(websocket) + if self._client_disconnected_callback: + self._client_disconnected_callback(websocket) + + async def _run_websocket_server(self): + """Run the WebSocket server.""" + if not websockets: + return + async with websockets.serve(self._websocket_handler, self.host, self.port): + while self._server_running: + await asyncio.sleep(0.1) + + async def _run_http_server(self): + """Run simple HTTP server for the client.""" + import os + from http.server import HTTPServer, SimpleHTTPRequestHandler + + # Find the project root by locating 'engine' directory in the path + websocket_file = os.path.abspath(__file__) + parts = websocket_file.split(os.sep) + if "engine" in parts: + engine_idx = parts.index("engine") + project_root = os.sep.join(parts[:engine_idx]) + client_dir = os.path.join(project_root, "client") + else: + # Fallback: go up 4 levels from websocket.py + # websocket.py: .../engine/display/backends/websocket.py + # We need: .../client + client_dir = os.path.join( + os.path.dirname( + os.path.dirname(os.path.dirname(os.path.dirname(__file__))) + ), + "client", + ) + + class Handler(SimpleHTTPRequestHandler): + def __init__(self, *args, **kwargs): + super().__init__(*args, directory=client_dir, **kwargs) + + def log_message(self, format, *args): + pass + + httpd = HTTPServer((self.host, self.http_port), Handler) + # Store reference for shutdown + self._httpd = httpd + # Serve requests continuously + httpd.serve_forever() + + def _run_async(self, coro): + """Run coroutine in background.""" + try: + asyncio.run(coro) + except Exception as e: + print(f"WebSocket async error: {e}") + + def start_server(self): + """Start the WebSocket server in a background thread.""" + if not self._available: + return + if self._server_thread is not None: + return + + self._server_running = True + self._server_thread = threading.Thread( + target=self._run_async, args=(self._run_websocket_server(),), daemon=True + ) + self._server_thread.start() + + def stop_server(self): + """Stop the WebSocket server.""" + self._server_running = False + self._server_thread = None + + def start_http_server(self): + """Start the HTTP server in a background thread.""" + if not self._available: + return + if self._http_thread is not None: + return + + self._http_running = True + + self._http_running = True + self._http_thread = threading.Thread( + target=self._run_async, args=(self._run_http_server(),), daemon=True + ) + self._http_thread.start() + + def stop_http_server(self): + """Stop the HTTP server.""" + self._http_running = False + if hasattr(self, "_httpd") and self._httpd: + self._httpd.shutdown() + self._http_thread = None + + def client_count(self) -> int: + """Return number of connected clients.""" + return len(self._clients) + + def get_ws_port(self) -> int: + """Return WebSocket port.""" + return self.port + + def get_http_port(self) -> int: + """Return HTTP port.""" + return self.http_port + + def set_frame_delay(self, delay: float) -> None: + """Set delay between frames in seconds.""" + self._frame_delay = delay + + def get_frame_delay(self) -> float: + """Get delay between frames.""" + return self._frame_delay + + def set_client_connected_callback(self, callback) -> None: + """Set callback for client connections.""" + self._client_connected_callback = callback + + def set_client_disconnected_callback(self, callback) -> None: + """Set callback for client disconnections.""" + self._client_disconnected_callback = callback + + def set_command_callback(self, callback) -> None: + """Set callback for incoming command messages from clients.""" + self._command_callback = callback + + def set_controller(self, controller) -> None: + """Set controller (UI panel or pipeline) for state queries and command execution.""" + self._controller = controller + + def broadcast_state(self, state: dict) -> None: + """Broadcast state update to all connected clients. + + Args: + state: Dictionary containing state data to send to clients + """ + if not self._clients: + return + + message = json.dumps({"type": "state", "state": state}) + + disconnected = set() + for client in list(self._clients): + try: + asyncio.run(client.send(message)) + except Exception: + disconnected.add(client) + + for client in disconnected: + self._clients.discard(client) + if self._client_disconnected_callback: + self._client_disconnected_callback(client) + + def _get_state_snapshot(self) -> dict | None: + """Get current state snapshot from controller.""" + if not self._controller: + return None + + try: + # Expect controller to have methods we need + state = {} + + # Get stages info if UIPanel + if hasattr(self._controller, "stages"): + state["stages"] = { + name: { + "enabled": ctrl.enabled, + "params": ctrl.params, + "selected": ctrl.selected, + } + for name, ctrl in self._controller.stages.items() + } + + # Get current preset + if hasattr(self._controller, "_current_preset"): + state["preset"] = self._controller._current_preset + if hasattr(self._controller, "_presets"): + state["presets"] = self._controller._presets + + # Get selected stage + if hasattr(self._controller, "selected_stage"): + state["selected_stage"] = self._controller.selected_stage + + return state + except Exception: + return None + + def get_dimensions(self) -> tuple[int, int]: + """Get current dimensions. + + Returns: + (width, height) in character cells + """ + return (self.width, self.height) diff --git a/engine/display/renderer.py b/engine/display/renderer.py new file mode 100644 index 0000000..81017c0 --- /dev/null +++ b/engine/display/renderer.py @@ -0,0 +1,280 @@ +""" +Shared display rendering utilities. + +Provides common functionality for displays that render text to images +(Pygame, Sixel, Kitty displays). +""" + +from typing import Any + +ANSI_COLORS = { + 0: (0, 0, 0), + 1: (205, 49, 49), + 2: (13, 188, 121), + 3: (229, 229, 16), + 4: (36, 114, 200), + 5: (188, 63, 188), + 6: (17, 168, 205), + 7: (229, 229, 229), + 8: (102, 102, 102), + 9: (241, 76, 76), + 10: (35, 209, 139), + 11: (245, 245, 67), + 12: (59, 142, 234), + 13: (214, 112, 214), + 14: (41, 184, 219), + 15: (255, 255, 255), +} + + +def parse_ansi( + text: str, +) -> list[tuple[str, tuple[int, int, int], tuple[int, int, int], bool]]: + """Parse ANSI escape sequences into text tokens with colors. + + Args: + text: Text containing ANSI escape sequences + + Returns: + List of (text, fg_rgb, bg_rgb, bold) tuples + """ + tokens = [] + current_text = "" + fg = (204, 204, 204) + bg = (0, 0, 0) + bold = False + i = 0 + + ANSI_COLORS_4BIT = { + 0: (0, 0, 0), + 1: (205, 49, 49), + 2: (13, 188, 121), + 3: (229, 229, 16), + 4: (36, 114, 200), + 5: (188, 63, 188), + 6: (17, 168, 205), + 7: (229, 229, 229), + 8: (102, 102, 102), + 9: (241, 76, 76), + 10: (35, 209, 139), + 11: (245, 245, 67), + 12: (59, 142, 234), + 13: (214, 112, 214), + 14: (41, 184, 219), + 15: (255, 255, 255), + } + + while i < len(text): + char = text[i] + + if char == "\x1b" and i + 1 < len(text) and text[i + 1] == "[": + if current_text: + tokens.append((current_text, fg, bg, bold)) + current_text = "" + + i += 2 + code = "" + while i < len(text): + c = text[i] + if c.isalpha(): + break + code += c + i += 1 + + if code: + codes = code.split(";") + for c in codes: + if c == "0": + fg = (204, 204, 204) + bg = (0, 0, 0) + bold = False + elif c == "1": + bold = True + elif c == "22": + bold = False + elif c == "39": + fg = (204, 204, 204) + elif c == "49": + bg = (0, 0, 0) + elif c.isdigit(): + color_idx = int(c) + if color_idx in ANSI_COLORS_4BIT: + fg = ANSI_COLORS_4BIT[color_idx] + elif 30 <= color_idx <= 37: + fg = ANSI_COLORS_4BIT.get(color_idx - 30, fg) + elif 40 <= color_idx <= 47: + bg = ANSI_COLORS_4BIT.get(color_idx - 40, bg) + elif 90 <= color_idx <= 97: + fg = ANSI_COLORS_4BIT.get(color_idx - 90 + 8, fg) + elif 100 <= color_idx <= 107: + bg = ANSI_COLORS_4BIT.get(color_idx - 100 + 8, bg) + elif c.startswith("38;5;"): + idx = int(c.split(";")[-1]) + if idx < 256: + if idx < 16: + fg = ANSI_COLORS_4BIT.get(idx, fg) + elif idx < 232: + c_idx = idx - 16 + fg = ( + (c_idx >> 4) * 51, + ((c_idx >> 2) & 7) * 51, + (c_idx & 3) * 85, + ) + else: + gray = (idx - 232) * 10 + 8 + fg = (gray, gray, gray) + elif c.startswith("48;5;"): + idx = int(c.split(";")[-1]) + if idx < 256: + if idx < 16: + bg = ANSI_COLORS_4BIT.get(idx, bg) + elif idx < 232: + c_idx = idx - 16 + bg = ( + (c_idx >> 4) * 51, + ((c_idx >> 2) & 7) * 51, + (c_idx & 3) * 85, + ) + else: + gray = (idx - 232) * 10 + 8 + bg = (gray, gray, gray) + i += 1 + else: + current_text += char + i += 1 + + if current_text: + tokens.append((current_text, fg, bg, bold)) + + return tokens if tokens else [("", fg, bg, bold)] + + +def get_default_font_path() -> str | None: + """Get the path to a default monospace font.""" + import os + import sys + from pathlib import Path + + def search_dir(base_path: str) -> str | None: + if not os.path.exists(base_path): + return None + if os.path.isfile(base_path): + return base_path + for font_file in Path(base_path).rglob("*"): + if font_file.suffix.lower() in (".ttf", ".otf", ".ttc"): + name = font_file.stem.lower() + if "geist" in name and ("nerd" in name or "mono" in name): + return str(font_file) + if "mono" in name or "courier" in name or "terminal" in name: + return str(font_file) + return None + + search_dirs = [] + if sys.platform == "darwin": + search_dirs.extend( + [ + os.path.expanduser("~/Library/Fonts/"), + "/System/Library/Fonts/", + ] + ) + elif sys.platform == "win32": + search_dirs.extend( + [ + os.path.expanduser("~\\AppData\\Local\\Microsoft\\Windows\\Fonts\\"), + "C:\\Windows\\Fonts\\", + ] + ) + else: + search_dirs.extend( + [ + os.path.expanduser("~/.local/share/fonts/"), + os.path.expanduser("~/.fonts/"), + "/usr/share/fonts/", + ] + ) + + for search_dir_path in search_dirs: + found = search_dir(search_dir_path) + if found: + return found + + if sys.platform != "win32": + try: + import subprocess + + for pattern in ["monospace", "DejaVuSansMono", "LiberationMono"]: + result = subprocess.run( + ["fc-match", "-f", "%{file}", pattern], + capture_output=True, + text=True, + timeout=5, + ) + if result.returncode == 0 and result.stdout.strip(): + font_file = result.stdout.strip() + if os.path.exists(font_file): + return font_file + except Exception: + pass + + return None + + +def render_to_pil( + buffer: list[str], + width: int, + height: int, + cell_width: int = 10, + cell_height: int = 18, + font_path: str | None = None, +) -> Any: + """Render buffer to a PIL Image. + + Args: + buffer: List of text lines to render + width: Terminal width in characters + height: Terminal height in rows + cell_width: Width of each character cell in pixels + cell_height: Height of each character cell in pixels + font_path: Path to TTF/OTF font file (optional) + + Returns: + PIL Image object + """ + from PIL import Image, ImageDraw, ImageFont + + img_width = width * cell_width + img_height = height * cell_height + + img = Image.new("RGBA", (img_width, img_height), (0, 0, 0, 255)) + draw = ImageDraw.Draw(img) + + if font_path: + try: + font = ImageFont.truetype(font_path, cell_height - 2) + except Exception: + font = ImageFont.load_default() + else: + font = ImageFont.load_default() + + for row_idx, line in enumerate(buffer[:height]): + if row_idx >= height: + break + + tokens = parse_ansi(line) + x_pos = 0 + y_pos = row_idx * cell_height + + for text, fg, bg, _bold in tokens: + if not text: + continue + + if bg != (0, 0, 0): + bbox = draw.textbbox((x_pos, y_pos), text, font=font) + draw.rectangle(bbox, fill=(*bg, 255)) + + draw.text((x_pos, y_pos), text, fill=(*fg, 255), font=font) + + if font: + x_pos += draw.textlength(text, font=font) + + return img diff --git a/engine/display/streaming.py b/engine/display/streaming.py new file mode 100644 index 0000000..54d08a6 --- /dev/null +++ b/engine/display/streaming.py @@ -0,0 +1,268 @@ +""" +Streaming protocol utilities for efficient frame transmission. + +Provides: +- Frame differencing: Only send changed lines +- Run-length encoding: Compress repeated lines +- Binary encoding: Compact message format +""" + +import json +import zlib +from dataclasses import dataclass +from enum import IntEnum + + +class MessageType(IntEnum): + """Message types for streaming protocol.""" + + FULL_FRAME = 1 + DIFF_FRAME = 2 + STATE = 3 + CLEAR = 4 + PING = 5 + PONG = 6 + + +@dataclass +class FrameDiff: + """Represents a diff between two frames.""" + + width: int + height: int + changed_lines: list[tuple[int, str]] # (line_index, content) + + +def compute_diff(old_buffer: list[str], new_buffer: list[str]) -> FrameDiff: + """Compute differences between old and new buffer. + + Args: + old_buffer: Previous frame buffer + new_buffer: Current frame buffer + + Returns: + FrameDiff with only changed lines + """ + height = len(new_buffer) + changed_lines = [] + + for i, line in enumerate(new_buffer): + if i >= len(old_buffer) or line != old_buffer[i]: + changed_lines.append((i, line)) + + return FrameDiff( + width=len(new_buffer[0]) if new_buffer else 0, + height=height, + changed_lines=changed_lines, + ) + + +def encode_rle(lines: list[tuple[int, str]]) -> list[tuple[int, str, int]]: + """Run-length encode consecutive identical lines. + + Args: + lines: List of (index, content) tuples (must be sorted by index) + + Returns: + List of (start_index, content, run_length) tuples + """ + if not lines: + return [] + + encoded = [] + start_idx = lines[0][0] + current_line = lines[0][1] + current_rle = 1 + + for idx, line in lines[1:]: + if line == current_line: + current_rle += 1 + else: + encoded.append((start_idx, current_line, current_rle)) + start_idx = idx + current_line = line + current_rle = 1 + + encoded.append((start_idx, current_line, current_rle)) + return encoded + + +def decode_rle(encoded: list[tuple[int, str, int]]) -> list[tuple[int, str]]: + """Decode run-length encoded lines. + + Args: + encoded: List of (start_index, content, run_length) tuples + + Returns: + List of (index, content) tuples + """ + result = [] + for start_idx, line, rle in encoded: + for i in range(rle): + result.append((start_idx + i, line)) + return result + + +def compress_frame(buffer: list[str], level: int = 6) -> bytes: + """Compress a frame buffer using zlib. + + Args: + buffer: Frame buffer (list of lines) + level: Compression level (0-9) + + Returns: + Compressed bytes + """ + content = "\n".join(buffer) + return zlib.compress(content.encode("utf-8"), level) + + +def decompress_frame(data: bytes, height: int) -> list[str]: + """Decompress a frame buffer. + + Args: + data: Compressed bytes + height: Number of lines in original buffer + + Returns: + Frame buffer (list of lines) + """ + content = zlib.decompress(data).decode("utf-8") + lines = content.split("\n") + if len(lines) > height: + lines = lines[:height] + while len(lines) < height: + lines.append("") + return lines + + +def encode_binary_message( + msg_type: MessageType, width: int, height: int, payload: bytes +) -> bytes: + """Encode a binary message. + + Message format: + - 1 byte: message type + - 2 bytes: width (uint16) + - 2 bytes: height (uint16) + - 4 bytes: payload length (uint32) + - N bytes: payload + + Args: + msg_type: Message type + width: Frame width + height: Frame height + payload: Message payload + + Returns: + Encoded binary message + """ + import struct + + header = struct.pack("!BHHI", msg_type.value, width, height, len(payload)) + return header + payload + + +def decode_binary_message(data: bytes) -> tuple[MessageType, int, int, bytes]: + """Decode a binary message. + + Args: + data: Binary message data + + Returns: + Tuple of (msg_type, width, height, payload) + """ + import struct + + msg_type_val, width, height, payload_len = struct.unpack("!BHHI", data[:9]) + payload = data[9 : 9 + payload_len] + return MessageType(msg_type_val), width, height, payload + + +def encode_diff_message(diff: FrameDiff, use_rle: bool = True) -> bytes: + """Encode a diff message for transmission. + + Args: + diff: Frame diff + use_rle: Whether to use run-length encoding + + Returns: + Encoded diff payload + """ + + if use_rle: + encoded_lines = encode_rle(diff.changed_lines) + data = [[idx, line, rle] for idx, line, rle in encoded_lines] + else: + data = [[idx, line] for idx, line in diff.changed_lines] + + payload = json.dumps(data).encode("utf-8") + return payload + + +def decode_diff_message(payload: bytes, use_rle: bool = True) -> list[tuple[int, str]]: + """Decode a diff message. + + Args: + payload: Encoded diff payload + use_rle: Whether run-length encoding was used + + Returns: + List of (line_index, content) tuples + """ + + data = json.loads(payload.decode("utf-8")) + + if use_rle: + return decode_rle([(idx, line, rle) for idx, line, rle in data]) + else: + return [(idx, line) for idx, line in data] + + +def should_use_diff( + old_buffer: list[str], new_buffer: list[str], threshold: float = 0.3 +) -> bool: + """Determine if diff or full frame is more efficient. + + Args: + old_buffer: Previous frame + new_buffer: Current frame + threshold: Max changed ratio to use diff (0.0-1.0) + + Returns: + True if diff is more efficient + """ + if not old_buffer or not new_buffer: + return False + + diff = compute_diff(old_buffer, new_buffer) + total_lines = len(new_buffer) + changed_ratio = len(diff.changed_lines) / total_lines if total_lines > 0 else 1.0 + + return changed_ratio <= threshold + + +def apply_diff(old_buffer: list[str], diff: FrameDiff) -> list[str]: + """Apply a diff to an old buffer to get the new buffer. + + Args: + old_buffer: Previous frame buffer + diff: Frame diff to apply + + Returns: + New frame buffer + """ + new_buffer = list(old_buffer) + + for line_idx, content in diff.changed_lines: + if line_idx < len(new_buffer): + new_buffer[line_idx] = content + else: + while len(new_buffer) < line_idx: + new_buffer.append("") + new_buffer.append(content) + + while len(new_buffer) < diff.height: + new_buffer.append("") + + return new_buffer[: diff.height] diff --git a/engine/effects/__init__.py b/engine/effects/__init__.py index 923d361..4ee702d 100644 --- a/engine/effects/__init__.py +++ b/engine/effects/__init__.py @@ -6,18 +6,17 @@ from engine.effects.legacy import ( glitch_bar, next_headline, noise, + vis_offset, vis_trunc, ) from engine.effects.performance import PerformanceMonitor, get_monitor, set_monitor from engine.effects.registry import EffectRegistry, get_registry, set_registry -from engine.effects.types import EffectConfig, EffectContext, PipelineConfig - - -def get_effect_chain(): - from engine.layers import get_effect_chain as _chain - - return _chain() - +from engine.effects.types import ( + EffectConfig, + EffectContext, + PipelineConfig, + create_effect_context, +) __all__ = [ "EffectChain", @@ -25,9 +24,9 @@ __all__ = [ "EffectConfig", "EffectContext", "PipelineConfig", + "create_effect_context", "get_registry", "set_registry", - "get_effect_chain", "get_monitor", "set_monitor", "PerformanceMonitor", @@ -39,4 +38,5 @@ __all__ = [ "noise", "next_headline", "vis_trunc", + "vis_offset", ] diff --git a/engine/effects/chain.py b/engine/effects/chain.py index c687266..bb20587 100644 --- a/engine/effects/chain.py +++ b/engine/effects/chain.py @@ -2,7 +2,7 @@ import time from engine.effects.performance import PerformanceMonitor, get_monitor from engine.effects.registry import EffectRegistry -from engine.effects.types import EffectContext +from engine.effects.types import EffectContext, PartialUpdate class EffectChain: @@ -51,6 +51,18 @@ class EffectChain: frame_number = ctx.frame_number monitor.start_frame(frame_number) + # Get dirty regions from canvas via context (set by CanvasStage) + dirty_rows = ctx.get_state("canvas.dirty_rows") + + # Create PartialUpdate for effects that support it + full_buffer = dirty_rows is None or len(dirty_rows) == 0 + partial = PartialUpdate( + rows=None, + cols=None, + dirty=dirty_rows, + full_buffer=full_buffer, + ) + frame_start = time.perf_counter() result = list(buf) for name in self._order: @@ -59,7 +71,11 @@ class EffectChain: chars_in = sum(len(line) for line in result) effect_start = time.perf_counter() try: - result = plugin.process(result, ctx) + # Use process_partial if supported, otherwise fall back to process + if getattr(plugin, "supports_partial_updates", False): + result = plugin.process_partial(result, ctx, partial) + else: + result = plugin.process(result, ctx) except Exception: plugin.config.enabled = False elapsed = time.perf_counter() - effect_start diff --git a/engine/effects/controller.py b/engine/effects/controller.py index 3e72881..8f9141f 100644 --- a/engine/effects/controller.py +++ b/engine/effects/controller.py @@ -6,14 +6,7 @@ _effect_chain_ref = None def _get_effect_chain(): global _effect_chain_ref - if _effect_chain_ref is not None: - return _effect_chain_ref - try: - from engine.layers import get_effect_chain as _chain - - return _chain() - except Exception: - return None + return _effect_chain_ref def set_effect_chain_ref(chain) -> None: diff --git a/engine/effects/legacy.py b/engine/effects/legacy.py index 92ca9ec..ac82096 100644 --- a/engine/effects/legacy.py +++ b/engine/effects/legacy.py @@ -1,6 +1,14 @@ """ Visual effects: noise, glitch, fade, ANSI-aware truncation, firehose, headline pool. Depends on: config, terminal, sources. + +These are low-level functional implementations of visual effects. They are used +internally by the EffectPlugin system (effects_plugins/*.py) and also directly +by layers.py and scroll.py for rendering. + +The plugin system provides a higher-level OOP interface with configuration +support, while these legacy functions provide direct functional access. +Both systems coexist - there are no current plans to deprecate the legacy functions. """ import random @@ -74,6 +82,37 @@ def vis_trunc(s, w): return "".join(result) +def vis_offset(s, offset): + """Offset string by skipping first offset visual characters, skipping ANSI escape codes.""" + if offset <= 0: + return s + result = [] + vw = 0 + i = 0 + skipping = True + while i < len(s): + if s[i] == "\033" and i + 1 < len(s) and s[i + 1] == "[": + j = i + 2 + while j < len(s) and not s[j].isalpha(): + j += 1 + if skipping: + i = j + 1 + continue + result.append(s[i : j + 1]) + i = j + 1 + else: + if skipping: + if vw >= offset: + skipping = False + result.append(s[i]) + vw += 1 + i += 1 + else: + result.append(s[i]) + i += 1 + return "".join(result) + + def next_headline(pool, items, seen): """Pull the next unique headline from pool, refilling as needed.""" while True: diff --git a/effects_plugins/__init__.py b/engine/effects/plugins/__init__.py similarity index 84% rename from effects_plugins/__init__.py rename to engine/effects/plugins/__init__.py index 64caea2..2cb92c9 100644 --- a/effects_plugins/__init__.py +++ b/engine/effects/plugins/__init__.py @@ -18,7 +18,7 @@ def discover_plugins(): continue try: - module = __import__(f"effects_plugins.{module_name}", fromlist=[""]) + module = __import__(f"engine.effects.plugins.{module_name}", fromlist=[""]) for attr_name in dir(module): attr = getattr(module, attr_name) if ( @@ -28,6 +28,8 @@ def discover_plugins(): and attr_name.endswith("Effect") ): plugin = attr() + if not isinstance(plugin, EffectPlugin): + continue registry.register(plugin) imported[plugin.name] = plugin except Exception: diff --git a/engine/effects/plugins/afterimage.py b/engine/effects/plugins/afterimage.py new file mode 100644 index 0000000..0709312 --- /dev/null +++ b/engine/effects/plugins/afterimage.py @@ -0,0 +1,122 @@ +"""Afterimage effect using previous frame.""" + +from engine.effects.types import EffectConfig, EffectContext, EffectPlugin + + +class AfterimageEffect(EffectPlugin): + """Show a faint ghost of the previous frame. + + This effect requires a FrameBufferStage to be present in the pipeline. + It shows a dimmed version of the previous frame super-imposed on the + current frame. + + Attributes: + name: "afterimage" + config: EffectConfig with intensity parameter (0.0-1.0) + param_bindings: Optional sensor bindings for intensity modulation + + Example: + >>> effect = AfterimageEffect() + >>> effect.configure(EffectConfig(intensity=0.3)) + >>> result = effect.process(buffer, ctx) + """ + + name = "afterimage" + config: EffectConfig = EffectConfig(enabled=True, intensity=0.3) + param_bindings: dict[str, dict[str, str | float]] = {} + supports_partial_updates = False + + def process(self, buf: list[str], ctx: EffectContext) -> list[str]: + """Apply afterimage effect using the previous frame. + + Args: + buf: Current text buffer (list of strings) + ctx: Effect context with access to framebuffer history + + Returns: + Buffer with ghost of previous frame overlaid + """ + if not buf: + return buf + + # Get framebuffer history from context + history = None + + for key in ctx.state: + if key.startswith("framebuffer.") and key.endswith(".history"): + history = ctx.state[key] + break + + if not history or len(history) < 1: + # No previous frame available + return buf + + # Get intensity from config + intensity = self.config.params.get("intensity", self.config.intensity) + intensity = max(0.0, min(1.0, intensity)) + + if intensity <= 0.0: + return buf + + # Get the previous frame (index 1, since index 0 is current) + prev_frame = history[1] if len(history) > 1 else None + if not prev_frame: + return buf + + # Blend current and previous frames + viewport_height = ctx.terminal_height - ctx.ticker_height + result = [] + + for row in range(len(buf)): + if row >= viewport_height: + result.append(buf[row]) + continue + + current_line = buf[row] + prev_line = prev_frame[row] if row < len(prev_frame) else "" + + if not prev_line: + result.append(current_line) + continue + + # Apply dimming effect by reducing ANSI color intensity or adding transparency + # For a simple text version, we'll use a blend strategy + blended = self._blend_lines(current_line, prev_line, intensity) + result.append(blended) + + return result + + def _blend_lines(self, current: str, previous: str, intensity: float) -> str: + """Blend current and previous line with given intensity. + + For text with ANSI codes, true blending is complex. This is a simplified + version that uses color averaging when possible. + + A more sophisticated implementation would: + 1. Parse ANSI color codes from both lines + 2. Blend RGB values based on intensity + 3. Reconstruct the line with blended colors + + For now, we'll use a heuristic: if lines are similar, return current. + If they differ, we alternate or use the previous as a faint overlay. + """ + if current == previous: + return current + + # Simple blending: intensity determines mix + # intensity=1.0 => fully current + # intensity=0.3 => 70% previous ghost, 30% current + + if intensity > 0.7: + return current + elif intensity < 0.3: + # Show previous but dimmed (simulate by adding faint color/gray) + return previous # Would need to dim ANSI colors + else: + # For medium intensity, alternate based on character pattern + # This is a placeholder for proper blending + return current + + def configure(self, config: EffectConfig) -> None: + """Configure the effect.""" + self.config = config diff --git a/engine/effects/plugins/border.py b/engine/effects/plugins/border.py new file mode 100644 index 0000000..7b158c4 --- /dev/null +++ b/engine/effects/plugins/border.py @@ -0,0 +1,105 @@ +from engine.effects.types import EffectConfig, EffectContext, EffectPlugin + + +class BorderEffect(EffectPlugin): + """Simple border effect for terminal display. + + Draws a border around the buffer and optionally displays + performance metrics in the border corners. + + Internally crops to display dimensions to ensure border fits. + """ + + name = "border" + config = EffectConfig(enabled=True, intensity=1.0) + + def process(self, buf: list[str], ctx: EffectContext) -> list[str]: + if not buf: + return buf + + # Get actual display dimensions from context + display_w = ctx.terminal_width + display_h = ctx.terminal_height + + # If dimensions are reasonable, crop first - use slightly smaller to ensure fit + if display_w >= 10 and display_h >= 3: + # Subtract 2 for border characters (left and right) + crop_w = display_w - 2 + crop_h = display_h - 2 + buf = self._crop_to_size(buf, crop_w, crop_h) + w = display_w + h = display_h + else: + # Use buffer dimensions + h = len(buf) + w = max(len(line) for line in buf) if buf else 0 + + if w < 3 or h < 3: + return buf + + inner_w = w - 2 + + # Get metrics from context + fps = 0.0 + frame_time = 0.0 + metrics = ctx.get_state("metrics") + if metrics: + avg_ms = metrics.get("avg_ms") + frame_count = metrics.get("frame_count", 0) + if avg_ms and frame_count > 0: + fps = 1000.0 / avg_ms + frame_time = avg_ms + + # Build borders + # Top border: ┌────────────────────┐ or with FPS + if fps > 0: + fps_str = f" FPS:{fps:.0f}" + if len(fps_str) < inner_w: + right_len = inner_w - len(fps_str) + top_border = "┌" + "─" * right_len + fps_str + "┐" + else: + top_border = "┌" + "─" * inner_w + "┐" + else: + top_border = "┌" + "─" * inner_w + "┐" + + # Bottom border: └────────────────────┘ or with frame time + if frame_time > 0: + ft_str = f" {frame_time:.1f}ms" + if len(ft_str) < inner_w: + right_len = inner_w - len(ft_str) + bottom_border = "└" + "─" * right_len + ft_str + "┘" + else: + bottom_border = "└" + "─" * inner_w + "┘" + else: + bottom_border = "└" + "─" * inner_w + "┘" + + # Build result with left/right borders + result = [top_border] + for line in buf[: h - 2]: + if len(line) >= inner_w: + result.append("│" + line[:inner_w] + "│") + else: + result.append("│" + line + " " * (inner_w - len(line)) + "│") + + result.append(bottom_border) + + return result + + def _crop_to_size(self, buf: list[str], w: int, h: int) -> list[str]: + """Crop buffer to fit within w x h.""" + result = [] + for i in range(min(h, len(buf))): + line = buf[i] + if len(line) > w: + result.append(line[:w]) + else: + result.append(line + " " * (w - len(line))) + + # Pad with empty lines if needed (for border) + while len(result) < h: + result.append(" " * w) + + return result + + def configure(self, config: EffectConfig) -> None: + self.config = config diff --git a/engine/effects/plugins/crop.py b/engine/effects/plugins/crop.py new file mode 100644 index 0000000..6e0431a --- /dev/null +++ b/engine/effects/plugins/crop.py @@ -0,0 +1,42 @@ +from engine.effects.types import EffectConfig, EffectContext, EffectPlugin + + +class CropEffect(EffectPlugin): + """Crop effect that crops the input buffer to fit the display. + + This ensures the output buffer matches the actual display dimensions, + useful when the source produces a buffer larger than the viewport. + """ + + name = "crop" + config = EffectConfig(enabled=True, intensity=1.0) + + def process(self, buf: list[str], ctx: EffectContext) -> list[str]: + if not buf: + return buf + + # Get actual display dimensions from context + w = ( + ctx.terminal_width + if ctx.terminal_width > 0 + else max(len(line) for line in buf) + ) + h = ctx.terminal_height if ctx.terminal_height > 0 else len(buf) + + # Crop buffer to fit + result = [] + for i in range(min(h, len(buf))): + line = buf[i] + if len(line) > w: + result.append(line[:w]) + else: + result.append(line + " " * (w - len(line))) + + # Pad with empty lines if needed + while len(result) < h: + result.append(" " * w) + + return result + + def configure(self, config: EffectConfig) -> None: + self.config = config diff --git a/effects_plugins/fade.py b/engine/effects/plugins/fade.py similarity index 87% rename from effects_plugins/fade.py rename to engine/effects/plugins/fade.py index be106ee..189b9f1 100644 --- a/effects_plugins/fade.py +++ b/engine/effects/plugins/fade.py @@ -5,7 +5,7 @@ from engine.effects.types import EffectConfig, EffectContext, EffectPlugin class FadeEffect(EffectPlugin): name = "fade" - config = EffectConfig(enabled=True, intensity=1.0) + config = EffectConfig(enabled=True, intensity=1.0, entropy=0.1) def process(self, buf: list[str], ctx: EffectContext) -> list[str]: if not ctx.ticker_height: @@ -36,7 +36,7 @@ class FadeEffect(EffectPlugin): if fade >= 1.0: return s if fade <= 0.0: - return "" + return s # Preserve original line length - don't return empty result = [] i = 0 while i < len(s): @@ -54,5 +54,5 @@ class FadeEffect(EffectPlugin): i += 1 return "".join(result) - def configure(self, cfg: EffectConfig) -> None: - self.config = cfg + def configure(self, config: EffectConfig) -> None: + self.config = config diff --git a/effects_plugins/firehose.py b/engine/effects/plugins/firehose.py similarity index 95% rename from effects_plugins/firehose.py rename to engine/effects/plugins/firehose.py index 38157cb..0daf287 100644 --- a/effects_plugins/firehose.py +++ b/engine/effects/plugins/firehose.py @@ -9,7 +9,7 @@ from engine.terminal import C_DIM, G_DIM, G_LO, RST, W_GHOST class FirehoseEffect(EffectPlugin): name = "firehose" - config = EffectConfig(enabled=True, intensity=1.0) + config = EffectConfig(enabled=True, intensity=1.0, entropy=0.9) def process(self, buf: list[str], ctx: EffectContext) -> list[str]: firehose_h = config.FIREHOSE_H if config.FIREHOSE else 0 @@ -68,5 +68,5 @@ class FirehoseEffect(EffectPlugin): color = random.choice([G_LO, C_DIM, W_GHOST]) return f"{color}{text}{RST}" - def configure(self, cfg: EffectConfig) -> None: - self.config = cfg + def configure(self, config: EffectConfig) -> None: + self.config = config diff --git a/engine/effects/plugins/glitch.py b/engine/effects/plugins/glitch.py new file mode 100644 index 0000000..40eada2 --- /dev/null +++ b/engine/effects/plugins/glitch.py @@ -0,0 +1,52 @@ +import random + +from engine.effects.types import EffectConfig, EffectContext, EffectPlugin +from engine.terminal import DIM, G_LO, RST + + +class GlitchEffect(EffectPlugin): + name = "glitch" + config = EffectConfig(enabled=True, intensity=1.0, entropy=0.8) + + def process(self, buf: list[str], ctx: EffectContext) -> list[str]: + if not buf: + return buf + result = list(buf) + intensity = self.config.intensity + + glitch_prob = 0.32 + min(0.9, ctx.mic_excess * 0.16) + glitch_prob = glitch_prob * intensity + n_hits = 4 + int(ctx.mic_excess / 2) + n_hits = int(n_hits * intensity) + + if random.random() < glitch_prob: + # Store original visible lengths before any modifications + # Strip ANSI codes to get visible length + import re + + ansi_pattern = re.compile(r"\x1b\[[0-9;]*[a-zA-Z]") + original_lengths = [len(ansi_pattern.sub("", line)) for line in result] + for _ in range(min(n_hits, len(result))): + gi = random.randint(0, len(result) - 1) + result[gi] + target_len = original_lengths[gi] # Use stored original length + glitch_bar = self._glitch_bar(target_len) + result[gi] = glitch_bar + return result + + def _glitch_bar(self, target_len: int) -> str: + c = random.choice(["░", "▒", "─", "\xc2"]) + n = random.randint(3, max(3, target_len // 2)) + o = random.randint(0, max(0, target_len - n)) + + glitch_chars = c * n + trailing_spaces = target_len - o - n + trailing_spaces = max(0, trailing_spaces) + + glitch_part = f"{G_LO}{DIM}" + glitch_chars + RST + result = " " * o + glitch_part + " " * trailing_spaces + + return result + + def configure(self, config: EffectConfig) -> None: + self.config = config diff --git a/engine/effects/plugins/hud.py b/engine/effects/plugins/hud.py new file mode 100644 index 0000000..ece8202 --- /dev/null +++ b/engine/effects/plugins/hud.py @@ -0,0 +1,102 @@ +from engine.effects.types import ( + EffectConfig, + EffectContext, + EffectPlugin, + PartialUpdate, +) + + +class HudEffect(EffectPlugin): + name = "hud" + config = EffectConfig(enabled=True, intensity=1.0) + supports_partial_updates = True # Enable partial update optimization + + # Cache last HUD content to detect changes + _last_hud_content: tuple | None = None + + def process_partial( + self, buf: list[str], ctx: EffectContext, partial: PartialUpdate + ) -> list[str]: + # If full buffer requested, process normally + if partial.full_buffer: + return self.process(buf, ctx) + + # If HUD rows (0, 1, 2) aren't dirty, skip processing + if partial.dirty: + hud_rows = {0, 1, 2} + dirty_hud_rows = partial.dirty & hud_rows + if not dirty_hud_rows: + return buf # Nothing for HUD to do + + # Proceed with full processing + return self.process(buf, ctx) + + def process(self, buf: list[str], ctx: EffectContext) -> list[str]: + result = list(buf) + + # Read metrics from pipeline context (first-class citizen) + # Falls back to global monitor for backwards compatibility + metrics = ctx.get_state("metrics") + if not metrics: + # Fallback to global monitor for backwards compatibility + from engine.effects.performance import get_monitor + + monitor = get_monitor() + if monitor: + stats = monitor.get_stats() + if stats and "pipeline" in stats: + metrics = stats + + fps = 0.0 + frame_time = 0.0 + if metrics: + if "error" in metrics: + pass # No metrics available yet + elif "pipeline" in metrics: + frame_time = metrics["pipeline"].get("avg_ms", 0.0) + frame_count = metrics.get("frame_count", 0) + if frame_count > 0 and frame_time > 0: + fps = 1000.0 / frame_time + elif "avg_ms" in metrics: + # Direct metrics format + frame_time = metrics.get("avg_ms", 0.0) + frame_count = metrics.get("frame_count", 0) + if frame_count > 0 and frame_time > 0: + fps = 1000.0 / frame_time + + effect_name = self.config.params.get("display_effect", "none") + effect_intensity = self.config.params.get("display_intensity", 0.0) + + hud_lines = [] + hud_lines.append( + f"\033[1;1H\033[38;5;46mMAINLINE DEMO\033[0m \033[38;5;245m|\033[0m \033[38;5;39mFPS: {fps:.1f}\033[0m \033[38;5;245m|\033[0m \033[38;5;208m{frame_time:.1f}ms\033[0m" + ) + + bar_width = 20 + filled = int(bar_width * effect_intensity) + bar = ( + "\033[38;5;82m" + + "█" * filled + + "\033[38;5;240m" + + "░" * (bar_width - filled) + + "\033[0m" + ) + hud_lines.append( + f"\033[2;1H\033[38;5;45mEFFECT:\033[0m \033[1;38;5;227m{effect_name:12s}\033[0m \033[38;5;245m|\033[0m {bar} \033[38;5;245m|\033[0m \033[38;5;219m{effect_intensity * 100:.0f}%\033[0m" + ) + + # Get pipeline order from context + pipeline_order = ctx.get_state("pipeline_order") + pipeline_str = ",".join(pipeline_order) if pipeline_order else "(none)" + hud_lines.append(f"\033[3;1H\033[38;5;44mPIPELINE:\033[0m {pipeline_str}") + + for i, line in enumerate(hud_lines): + if i < len(result): + result[i] = line + else: + result.append(line) + + return result + + def configure(self, config: EffectConfig) -> None: + self.config = config diff --git a/engine/effects/plugins/motionblur.py b/engine/effects/plugins/motionblur.py new file mode 100644 index 0000000..d329b96 --- /dev/null +++ b/engine/effects/plugins/motionblur.py @@ -0,0 +1,119 @@ +"""Motion blur effect using frame history.""" + +from engine.effects.types import EffectConfig, EffectContext, EffectPlugin + + +class MotionBlurEffect(EffectPlugin): + """Apply motion blur by blending current frame with previous frames. + + This effect requires a FrameBufferStage to be present in the pipeline. + The framebuffer provides frame history which is blended with the current + frame based on intensity. + + Attributes: + name: "motionblur" + config: EffectConfig with intensity parameter (0.0-1.0) + param_bindings: Optional sensor bindings for intensity modulation + + Example: + >>> effect = MotionBlurEffect() + >>> effect.configure(EffectConfig(intensity=0.5)) + >>> result = effect.process(buffer, ctx) + """ + + name = "motionblur" + config: EffectConfig = EffectConfig(enabled=True, intensity=0.5) + param_bindings: dict[str, dict[str, str | float]] = {} + supports_partial_updates = False + + def process(self, buf: list[str], ctx: EffectContext) -> list[str]: + """Apply motion blur by blending with previous frames. + + Args: + buf: Current text buffer (list of strings) + ctx: Effect context with access to framebuffer history + + Returns: + Blended buffer with motion blur effect applied + """ + if not buf: + return buf + + # Get framebuffer history from context + # We'll look for the first available framebuffer history + history = None + + for key in ctx.state: + if key.startswith("framebuffer.") and key.endswith(".history"): + history = ctx.state[key] + break + + if not history: + # No framebuffer available, return unchanged + return buf + + # Get intensity from config + intensity = self.config.params.get("intensity", self.config.intensity) + intensity = max(0.0, min(1.0, intensity)) + + if intensity <= 0.0: + return buf + + # Get decay factor (how quickly older frames fade) + decay = self.config.params.get("decay", 0.7) + + # Build output buffer + result = [] + viewport_height = ctx.terminal_height - ctx.ticker_height + + # Determine how many frames to blend (up to history depth) + max_frames = min(len(history), 5) # Cap at 5 frames for performance + + for row in range(len(buf)): + if row >= viewport_height: + # Beyond viewport, just copy + result.append(buf[row]) + continue + + # Start with current frame + blended = buf[row] + + # Blend with historical frames + weight_sum = 1.0 + if max_frames > 0 and intensity > 0: + for i in range(max_frames): + frame_weight = intensity * (decay**i) + if frame_weight < 0.01: # Skip negligible weights + break + + hist_row = history[i][row] if row < len(history[i]) else "" + # Simple string blending: we'll concatenate with space + # For a proper effect, we'd need to blend ANSI colors + # This is a simplified version that just adds the frames + blended = self._blend_strings(blended, hist_row, frame_weight) + weight_sum += frame_weight + + result.append(blended) + + return result + + def _blend_strings(self, current: str, historical: str, weight: float) -> str: + """Blend two strings with given weight. + + This is a simplified blending that works with ANSI codes. + For proper blending we'd need to parse colors, but for now + we use a heuristic: if strings are identical, return one. + If they differ, we alternate or concatenate based on weight. + """ + if current == historical: + return current + + # If weight is high, show current; if low, show historical + if weight > 0.5: + return current + else: + return historical + + def configure(self, config: EffectConfig) -> None: + """Configure the effect.""" + self.config = config diff --git a/effects_plugins/noise.py b/engine/effects/plugins/noise.py similarity index 78% rename from effects_plugins/noise.py rename to engine/effects/plugins/noise.py index 71d6833..5892608 100644 --- a/effects_plugins/noise.py +++ b/engine/effects/plugins/noise.py @@ -7,7 +7,7 @@ from engine.terminal import C_DIM, G_DIM, G_LO, RST, W_GHOST class NoiseEffect(EffectPlugin): name = "noise" - config = EffectConfig(enabled=True, intensity=0.15) + config = EffectConfig(enabled=True, intensity=0.15, entropy=0.4) def process(self, buf: list[str], ctx: EffectContext) -> list[str]: if not ctx.ticker_height: @@ -19,7 +19,8 @@ class NoiseEffect(EffectPlugin): for r in range(len(result)): cy = ctx.scroll_cam + r if random.random() < probability: - result[r] = self._generate_noise(ctx.terminal_width, cy) + original_line = result[r] + result[r] = self._generate_noise(len(original_line), cy) return result def _generate_noise(self, w: int, cy: int) -> str: @@ -32,5 +33,5 @@ class NoiseEffect(EffectPlugin): for _ in range(w) ) - def configure(self, cfg: EffectConfig) -> None: - self.config = cfg + def configure(self, config: EffectConfig) -> None: + self.config = config diff --git a/engine/effects/plugins/tint.py b/engine/effects/plugins/tint.py new file mode 100644 index 0000000..ce8c941 --- /dev/null +++ b/engine/effects/plugins/tint.py @@ -0,0 +1,99 @@ +from engine.effects.types import EffectConfig, EffectContext, EffectPlugin + + +class TintEffect(EffectPlugin): + """Tint effect that applies an RGB color overlay to the buffer. + + Uses ANSI escape codes to tint text with the specified RGB values. + Supports transparency (0-100%) for blending. + + Inlets: + - r: Red component (0-255) + - g: Green component (0-255) + - b: Blue component (0-255) + - a: Alpha/transparency (0.0-1.0, where 0.0 = fully transparent) + """ + + name = "tint" + config = EffectConfig(enabled=True, intensity=1.0) + + # Define inlet types for PureData-style typing + @property + def inlet_types(self) -> set: + from engine.pipeline.core import DataType + + return {DataType.TEXT_BUFFER} + + @property + def outlet_types(self) -> set: + from engine.pipeline.core import DataType + + return {DataType.TEXT_BUFFER} + + def process(self, buf: list[str], ctx: EffectContext) -> list[str]: + if not buf: + return buf + + # Get tint values from effect params or sensors + r = self.config.params.get("r", 255) + g = self.config.params.get("g", 255) + b = self.config.params.get("b", 255) + a = self.config.params.get("a", 0.3) # Default 30% tint + + # Clamp values + r = max(0, min(255, int(r))) + g = max(0, min(255, int(g))) + b = max(0, min(255, int(b))) + a = max(0.0, min(1.0, float(a))) + + if a <= 0: + return buf + + # Convert RGB to ANSI 256 color + ansi_color = self._rgb_to_ansi256(r, g, b) + + # Apply tint with transparency effect + result = [] + for line in buf: + if not line.strip(): + result.append(line) + continue + + # Check if line already has ANSI codes + if "\033[" in line: + # For lines with existing colors, wrap the whole line + result.append(f"\033[38;5;{ansi_color}m{line}\033[0m") + else: + # Apply tint to plain text lines + result.append(f"\033[38;5;{ansi_color}m{line}\033[0m") + + return result + + def _rgb_to_ansi256(self, r: int, g: int, b: int) -> int: + """Convert RGB (0-255 each) to ANSI 256 color code.""" + if r == g == b == 0: + return 16 + if r == g == b == 255: + return 231 + + # Calculate grayscale + gray = int((0.299 * r + 0.587 * g + 0.114 * b) / 255 * 24) + 232 + + # Calculate color cube + ri = int(r / 51) + gi = int(g / 51) + bi = int(b / 51) + color = 16 + 36 * ri + 6 * gi + bi + + # Use whichever is closer - gray or color + gray_dist = abs(r - gray) + color_dist = ( + (r - ri * 51) ** 2 + (g - gi * 51) ** 2 + (b - bi * 51) ** 2 + ) ** 0.5 + + if gray_dist < color_dist: + return gray + return color + + def configure(self, config: EffectConfig) -> None: + self.config = config diff --git a/engine/effects/types.py b/engine/effects/types.py index 1291af5..3b1f03c 100644 --- a/engine/effects/types.py +++ b/engine/effects/types.py @@ -1,10 +1,54 @@ +""" +Visual effects type definitions and base classes. + +EffectPlugin Architecture: +- Uses ABC (Abstract Base Class) for interface enforcement +- Runtime discovery via directory scanning (effects_plugins/) +- Configuration via EffectConfig dataclass +- Context passed through EffectContext dataclass + +Plugin System Research (see AGENTS.md for references): +- VST: Standardized audio interfaces, chaining, presets (FXP/FXB) +- Python Entry Points: Namespace packages, importlib.metadata discovery +- Shadertoy: Shader-based with uniforms as context + +Current gaps vs industry patterns: +- No preset save/load system +- No external plugin distribution via entry points +- No plugin metadata (version, author, description) +""" + from abc import ABC, abstractmethod from dataclasses import dataclass, field from typing import Any +@dataclass +class PartialUpdate: + """Represents a partial buffer update for optimized rendering. + + Instead of processing the full buffer every frame, effects that support + partial updates can process only changed regions. + + Attributes: + rows: Row indices that changed (None = all rows) + cols: Column range that changed (None = full width) + dirty: Set of dirty row indices + """ + + rows: tuple[int, int] | None = None # (start, end) inclusive + cols: tuple[int, int] | None = None # (start, end) inclusive + dirty: set[int] | None = None # Set of dirty row indices + full_buffer: bool = True # If True, process entire buffer + + @dataclass class EffectContext: + """Context passed to effect plugins during processing. + + Contains terminal dimensions, camera state, frame info, and real-time sensor values. + """ + terminal_width: int terminal_height: int scroll_cam: int @@ -15,24 +59,140 @@ class EffectContext: frame_number: int = 0 has_message: bool = False items: list = field(default_factory=list) + _state: dict[str, Any] = field(default_factory=dict, repr=False) + + def compute_entropy(self, effect_name: str, data: Any) -> float: + """Compute entropy score for an effect based on its output. + + Args: + effect_name: Name of the effect + data: Processed buffer or effect-specific data + + Returns: + Entropy score 0.0-1.0 representing visual chaos + """ + # Default implementation: use effect name as seed for deterministic randomness + # Better implementations can analyze actual buffer content + import hashlib + + data_str = str(data)[:100] if data else "" + hash_val = hashlib.md5(f"{effect_name}:{data_str}".encode()).hexdigest() + # Convert hash to float 0.0-1.0 + entropy = int(hash_val[:8], 16) / 0xFFFFFFFF + return min(max(entropy, 0.0), 1.0) + + def get_sensor_value(self, sensor_name: str) -> float | None: + """Get a sensor value from context state. + + Args: + sensor_name: Name of the sensor (e.g., "mic", "camera") + + Returns: + Sensor value as float, or None if not available. + """ + return self._state.get(f"sensor.{sensor_name}") + + def set_state(self, key: str, value: Any) -> None: + """Set a state value in the context.""" + self._state[key] = value + + def get_state(self, key: str, default: Any = None) -> Any: + """Get a state value from the context.""" + return self._state.get(key, default) + + @property + def state(self) -> dict[str, Any]: + """Get the state dictionary for direct access by effects.""" + return self._state @dataclass class EffectConfig: enabled: bool = True intensity: float = 1.0 + entropy: float = 0.0 # Visual chaos metric (0.0 = calm, 1.0 = chaotic) params: dict[str, Any] = field(default_factory=dict) class EffectPlugin(ABC): + """Abstract base class for effect plugins. + + Subclasses must define: + - name: str - unique identifier for the effect + - config: EffectConfig - current configuration + + Optional class attribute: + - param_bindings: dict - Declarative sensor-to-param bindings + Example: + param_bindings = { + "intensity": {"sensor": "mic", "transform": "linear"}, + "rate": {"sensor": "mic", "transform": "exponential"}, + } + + And implement: + - process(buf, ctx) -> list[str] + - configure(config) -> None + + Effect Behavior with ticker_height=0: + - NoiseEffect: Returns buffer unchanged (no ticker to apply noise to) + - FadeEffect: Returns buffer unchanged (no ticker to fade) + - GlitchEffect: Processes normally (doesn't depend on ticker_height) + - FirehoseEffect: Returns buffer unchanged if no items in context + + Effects should handle missing or zero context values gracefully by + returning the input buffer unchanged rather than raising errors. + + The param_bindings system enables PureData-style signal routing: + - Sensors emit values that effects can bind to + - Transform functions map sensor values to param ranges + - Effects read bound values from context.state["sensor.{name}"] + """ + name: str config: EffectConfig + param_bindings: dict[str, dict[str, str | float]] = {} + supports_partial_updates: bool = False # Override in subclasses for optimization @abstractmethod - def process(self, buf: list[str], ctx: EffectContext) -> list[str]: ... + def process(self, buf: list[str], ctx: EffectContext) -> list[str]: + """Process the buffer with this effect applied. + + Args: + buf: List of lines to process + ctx: Effect context with terminal state + + Returns: + Processed buffer (may be same object or new list) + """ + ... + + def process_partial( + self, buf: list[str], ctx: EffectContext, partial: PartialUpdate + ) -> list[str]: + """Process a partial buffer for optimized rendering. + + Override this in subclasses that support partial updates for performance. + Default implementation falls back to full buffer processing. + + Args: + buf: List of lines to process + ctx: Effect context with terminal state + partial: PartialUpdate indicating which regions changed + + Returns: + Processed buffer (may be same object or new list) + """ + # Default: fall back to full processing + return self.process(buf, ctx) @abstractmethod - def configure(self, config: EffectConfig) -> None: ... + def configure(self, config: EffectConfig) -> None: + """Configure the effect with new settings. + + Args: + config: New configuration to apply + """ + ... def create_effect_context( @@ -40,7 +200,6 @@ def create_effect_context( terminal_height: int = 24, scroll_cam: int = 0, ticker_height: int = 0, - camera_x: int = 0, mic_excess: float = 0.0, grad_offset: float = 0.0, frame_number: int = 0, @@ -53,7 +212,6 @@ def create_effect_context( terminal_height=terminal_height, scroll_cam=scroll_cam, ticker_height=ticker_height, - camera_x=camera_x, mic_excess=mic_excess, grad_offset=grad_offset, frame_number=frame_number, @@ -66,3 +224,58 @@ def create_effect_context( class PipelineConfig: order: list[str] = field(default_factory=list) effects: dict[str, EffectConfig] = field(default_factory=dict) + + +def apply_param_bindings( + effect: "EffectPlugin", + ctx: EffectContext, +) -> EffectConfig: + """Apply sensor bindings to effect config. + + This resolves param_bindings declarations by reading sensor values + from the context and applying transform functions. + + Args: + effect: The effect with param_bindings to apply + ctx: EffectContext containing sensor values + + Returns: + Modified EffectConfig with sensor-driven values applied. + """ + import copy + + if not effect.param_bindings: + return effect.config + + config = copy.deepcopy(effect.config) + + for param_name, binding in effect.param_bindings.items(): + sensor_name: str = binding.get("sensor", "") + transform: str = binding.get("transform", "linear") + + if not sensor_name: + continue + + sensor_value = ctx.get_sensor_value(sensor_name) + if sensor_value is None: + continue + + if transform == "linear": + applied_value: float = sensor_value + elif transform == "exponential": + applied_value = sensor_value**2 + elif transform == "threshold": + threshold = float(binding.get("threshold", 0.5)) + applied_value = 1.0 if sensor_value > threshold else 0.0 + elif transform == "inverse": + applied_value = 1.0 - sensor_value + else: + applied_value = sensor_value + + config.params[f"{param_name}_sensor"] = applied_value + + if param_name == "intensity": + base_intensity = effect.config.intensity + config.intensity = base_intensity * (0.5 + applied_value * 0.5) + + return config diff --git a/engine/emitters.py b/engine/emitters.py deleted file mode 100644 index 6d6a5a1..0000000 --- a/engine/emitters.py +++ /dev/null @@ -1,25 +0,0 @@ -""" -Event emitter protocols - abstract interfaces for event-producing components. -""" - -from collections.abc import Callable -from typing import Any, Protocol - - -class EventEmitter(Protocol): - """Protocol for components that emit events.""" - - def subscribe(self, callback: Callable[[Any], None]) -> None: ... - def unsubscribe(self, callback: Callable[[Any], None]) -> None: ... - - -class Startable(Protocol): - """Protocol for components that can be started.""" - - def start(self) -> Any: ... - - -class Stoppable(Protocol): - """Protocol for components that can be stopped.""" - - def stop(self) -> None: ... diff --git a/engine/events.py b/engine/events.py index 61fcfc0..d686285 100644 --- a/engine/events.py +++ b/engine/events.py @@ -18,7 +18,6 @@ class EventType(Enum): NTFY_MESSAGE = auto() STREAM_START = auto() STREAM_END = auto() - FIGMENT_TRIGGER = auto() @dataclass @@ -66,12 +65,3 @@ class StreamEvent: event_type: EventType headline_count: int = 0 timestamp: datetime | None = None - - -@dataclass -class FigmentTriggerEvent: - """Event emitted when a figment is triggered.""" - - action: str - value: float | str | None = None - timestamp: datetime | None = None diff --git a/engine/fetch.py b/engine/fetch.py index 5d6f9bb..08ba4b1 100644 --- a/engine/fetch.py +++ b/engine/fetch.py @@ -7,6 +7,7 @@ import json import pathlib import re import urllib.request +from concurrent.futures import ThreadPoolExecutor, as_completed from datetime import datetime from typing import Any @@ -17,54 +18,98 @@ from engine.filter import skip, strip_tags from engine.sources import FEEDS, POETRY_SOURCES from engine.terminal import boot_ln -# Type alias for headline items HeadlineTuple = tuple[str, str, str] +DEFAULT_MAX_WORKERS = 10 +FAST_START_SOURCES = 5 +FAST_START_TIMEOUT = 3 -# ─── SINGLE FEED ────────────────────────────────────────── -def fetch_feed(url: str) -> Any | None: - """Fetch and parse a single RSS feed URL.""" + +def fetch_feed(url: str) -> tuple[str, Any] | tuple[None, None]: + """Fetch and parse a single RSS feed URL. Returns (url, feed) tuple.""" try: req = urllib.request.Request(url, headers={"User-Agent": "mainline/0.1"}) - resp = urllib.request.urlopen(req, timeout=config.FEED_TIMEOUT) - return feedparser.parse(resp.read()) + timeout = FAST_START_TIMEOUT if url in _fast_start_urls else config.FEED_TIMEOUT + resp = urllib.request.urlopen(req, timeout=timeout) + return (url, feedparser.parse(resp.read())) except Exception: - return None + return (url, None) + + +def _parse_feed(feed: Any, src: str) -> list[HeadlineTuple]: + """Parse a feed and return list of headline tuples.""" + items = [] + if feed is None or (feed.bozo and not feed.entries): + return items + + for e in feed.entries: + t = strip_tags(e.get("title", "")) + if not t or skip(t): + continue + pub = e.get("published_parsed") or e.get("updated_parsed") + try: + ts = datetime(*pub[:6]).strftime("%H:%M") if pub else "——:——" + except Exception: + ts = "——:——" + items.append((t, src, ts)) + return items + + +def fetch_all_fast() -> list[HeadlineTuple]: + """Fetch only the first N sources for fast startup.""" + global _fast_start_urls + _fast_start_urls = set(list(FEEDS.values())[:FAST_START_SOURCES]) + + items: list[HeadlineTuple] = [] + with ThreadPoolExecutor(max_workers=FAST_START_SOURCES) as executor: + futures = { + executor.submit(fetch_feed, url): src + for src, url in list(FEEDS.items())[:FAST_START_SOURCES] + } + for future in as_completed(futures): + src = futures[future] + url, feed = future.result() + if feed is None or (feed.bozo and not feed.entries): + boot_ln(src, "DARK", False) + continue + parsed = _parse_feed(feed, src) + if parsed: + items.extend(parsed) + boot_ln(src, f"LINKED [{len(parsed)}]", True) + else: + boot_ln(src, "EMPTY", False) + return items -# ─── ALL RSS FEEDS ──────────────────────────────────────── def fetch_all() -> tuple[list[HeadlineTuple], int, int]: - """Fetch all RSS feeds and return items, linked count, failed count.""" + """Fetch all RSS feeds concurrently and return items, linked count, failed count.""" + global _fast_start_urls + _fast_start_urls = set() + items: list[HeadlineTuple] = [] linked = failed = 0 - for src, url in FEEDS.items(): - feed = fetch_feed(url) - if feed is None or (feed.bozo and not feed.entries): - boot_ln(src, "DARK", False) - failed += 1 - continue - n = 0 - for e in feed.entries: - t = strip_tags(e.get("title", "")) - if not t or skip(t): + + with ThreadPoolExecutor(max_workers=DEFAULT_MAX_WORKERS) as executor: + futures = {executor.submit(fetch_feed, url): src for src, url in FEEDS.items()} + for future in as_completed(futures): + src = futures[future] + url, feed = future.result() + if feed is None or (feed.bozo and not feed.entries): + boot_ln(src, "DARK", False) + failed += 1 continue - pub = e.get("published_parsed") or e.get("updated_parsed") - try: - ts = datetime(*pub[:6]).strftime("%H:%M") if pub else "——:——" - except Exception: - ts = "——:——" - items.append((t, src, ts)) - n += 1 - if n: - boot_ln(src, f"LINKED [{n}]", True) - linked += 1 - else: - boot_ln(src, "EMPTY", False) - failed += 1 + parsed = _parse_feed(feed, src) + if parsed: + items.extend(parsed) + boot_ln(src, f"LINKED [{len(parsed)}]", True) + linked += 1 + else: + boot_ln(src, "EMPTY", False) + failed += 1 + return items, linked, failed -# ─── PROJECT GUTENBERG ──────────────────────────────────── def _fetch_gutenberg(url: str, label: str) -> list[HeadlineTuple]: """Download and parse stanzas/passages from a Project Gutenberg text.""" try: @@ -76,23 +121,21 @@ def _fetch_gutenberg(url: str, label: str) -> list[HeadlineTuple]: .replace("\r\n", "\n") .replace("\r", "\n") ) - # Strip PG boilerplate m = re.search(r"\*\*\*\s*START OF[^\n]*\n", text) if m: text = text[m.end() :] m = re.search(r"\*\*\*\s*END OF", text) if m: text = text[: m.start()] - # Split on blank lines into stanzas/passages blocks = re.split(r"\n{2,}", text.strip()) items = [] for blk in blocks: - blk = " ".join(blk.split()) # flatten to one line + blk = " ".join(blk.split()) if len(blk) < 20 or len(blk) > 280: continue - if blk.isupper(): # skip all-caps headers + if blk.isupper(): continue - if re.match(r"^[IVXLCDM]+\.?\s*$", blk): # roman numerals + if re.match(r"^[IVXLCDM]+\.?\s*$", blk): continue items.append((blk, label, "")) return items @@ -100,28 +143,35 @@ def _fetch_gutenberg(url: str, label: str) -> list[HeadlineTuple]: return [] -def fetch_poetry(): - """Fetch all poetry/literature sources.""" +def fetch_poetry() -> tuple[list[HeadlineTuple], int, int]: + """Fetch all poetry/literature sources concurrently.""" items = [] linked = failed = 0 - for label, url in POETRY_SOURCES.items(): - stanzas = _fetch_gutenberg(url, label) - if stanzas: - boot_ln(label, f"LOADED [{len(stanzas)}]", True) - items.extend(stanzas) - linked += 1 - else: - boot_ln(label, "DARK", False) - failed += 1 + + with ThreadPoolExecutor(max_workers=DEFAULT_MAX_WORKERS) as executor: + futures = { + executor.submit(_fetch_gutenberg, url, label): label + for label, url in POETRY_SOURCES.items() + } + for future in as_completed(futures): + label = futures[future] + stanzas = future.result() + if stanzas: + boot_ln(label, f"LOADED [{len(stanzas)}]", True) + items.extend(stanzas) + linked += 1 + else: + boot_ln(label, "DARK", False) + failed += 1 + return items, linked, failed -# ─── CACHE ──────────────────────────────────────────────── -_CACHE_DIR = pathlib.Path(__file__).resolve().parent.parent +_cache_dir = pathlib.Path(__file__).resolve().parent / "fixtures" def _cache_path(): - return _CACHE_DIR / f".mainline_cache_{config.MODE}.json" + return _cache_dir / "headlines.json" def load_cache(): @@ -143,3 +193,6 @@ def save_cache(items): _cache_path().write_text(json.dumps({"items": items})) except Exception: pass + + +_fast_start_urls: set = set() diff --git a/engine/fetch_code.py b/engine/fetch_code.py deleted file mode 100644 index 3d1160f..0000000 --- a/engine/fetch_code.py +++ /dev/null @@ -1,67 +0,0 @@ -""" -Source code feed — reads engine/*.py and emits non-blank, non-comment lines -as scroll items. Used by --code mode. -Depends on: nothing (stdlib only). -""" - -import ast -from pathlib import Path - -_ENGINE_DIR = Path(__file__).resolve().parent - - -def _scope_map(source: str) -> dict[int, str]: - """Return {line_number: scope_label} for every line in source. - - Nodes are sorted by range size descending so inner scopes overwrite - outer ones, guaranteeing the narrowest enclosing scope wins. - """ - try: - tree = ast.parse(source) - except SyntaxError: - return {} - - nodes = [] - for node in ast.walk(tree): - if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)): - end = getattr(node, "end_lineno", node.lineno) - span = end - node.lineno - nodes.append((span, node)) - - # Largest range first → inner scopes overwrite on second pass - nodes.sort(key=lambda x: x[0], reverse=True) - - scope = {} - for _, node in nodes: - end = getattr(node, "end_lineno", node.lineno) - if isinstance(node, ast.ClassDef): - label = node.name - else: - label = f"{node.name}()" - for ln in range(node.lineno, end + 1): - scope[ln] = label - - return scope - - -def fetch_code(): - """Read engine/*.py and return (items, line_count, 0). - - Each item is (text, src, ts) where: - text = the code line (rstripped, indentation preserved) - src = enclosing function/class name, e.g. 'stream()' or '' - ts = dotted module path, e.g. 'engine.scroll' - """ - items = [] - for path in sorted(_ENGINE_DIR.glob("*.py")): - module = f"engine.{path.stem}" - source = path.read_text(encoding="utf-8") - scope = _scope_map(source) - for lineno, raw in enumerate(source.splitlines(), start=1): - stripped = raw.strip() - if not stripped or stripped.startswith("#"): - continue - label = scope.get(lineno, "") - items.append((raw.rstrip(), label, module)) - - return items, len(items), 0 diff --git a/engine/figment_render.py b/engine/figment_render.py deleted file mode 100644 index 0b9e0ea..0000000 --- a/engine/figment_render.py +++ /dev/null @@ -1,90 +0,0 @@ -""" -SVG to half-block terminal art rasterization. - -Pipeline: SVG -> cairosvg -> PIL -> greyscale threshold -> half-block encode. -Follows the same pixel-pair approach as engine/render.py for OTF fonts. -""" - -from __future__ import annotations - -import os -import sys -from io import BytesIO - -# cairocffi (used by cairosvg) calls dlopen() to find the Cairo C library. -# On macOS with Homebrew, Cairo lives in /opt/homebrew/lib (Apple Silicon) or -# /usr/local/lib (Intel), which are not in dyld's default search path. -# Setting DYLD_LIBRARY_PATH before the import directs dlopen() to those paths. -if sys.platform == "darwin" and not os.environ.get("DYLD_LIBRARY_PATH"): - for _brew_lib in ("/opt/homebrew/lib", "/usr/local/lib"): - if os.path.exists(os.path.join(_brew_lib, "libcairo.2.dylib")): - os.environ["DYLD_LIBRARY_PATH"] = _brew_lib - break - -import cairosvg -from PIL import Image - -_cache: dict[tuple[str, int, int], list[str]] = {} - - -def rasterize_svg(svg_path: str, width: int, height: int) -> list[str]: - """Convert SVG file to list of half-block terminal rows (uncolored). - - Args: - svg_path: Path to SVG file. - width: Target terminal width in columns. - height: Target terminal height in rows. - - Returns: - List of strings, one per terminal row, containing block characters. - """ - cache_key = (svg_path, width, height) - if cache_key in _cache: - return _cache[cache_key] - - # SVG -> PNG in memory - png_bytes = cairosvg.svg2png( - url=svg_path, - output_width=width, - output_height=height * 2, # 2 pixel rows per terminal row - ) - - # PNG -> greyscale PIL image - # Composite RGBA onto white background so transparent areas become white (255) - # and drawn pixels retain their luminance values. - img_rgba = Image.open(BytesIO(png_bytes)).convert("RGBA") - img_rgba = img_rgba.resize((width, height * 2), Image.Resampling.LANCZOS) - background = Image.new("RGBA", img_rgba.size, (255, 255, 255, 255)) - background.paste(img_rgba, mask=img_rgba.split()[3]) - img = background.convert("L") - - data = img.tobytes() - pix_w = width - pix_h = height * 2 - # White (255) = empty space, dark (< threshold) = filled pixel - threshold = 128 - - # Half-block encode: walk pixel pairs - rows: list[str] = [] - for y in range(0, pix_h, 2): - row: list[str] = [] - for x in range(pix_w): - top = data[y * pix_w + x] < threshold - bot = data[(y + 1) * pix_w + x] < threshold if y + 1 < pix_h else False - if top and bot: - row.append("█") - elif top: - row.append("▀") - elif bot: - row.append("▄") - else: - row.append(" ") - rows.append("".join(row)) - - _cache[cache_key] = rows - return rows - - -def clear_cache() -> None: - """Clear the rasterization cache (e.g., on terminal resize).""" - _cache.clear() diff --git a/engine/figment_trigger.py b/engine/figment_trigger.py deleted file mode 100644 index d3aac9c..0000000 --- a/engine/figment_trigger.py +++ /dev/null @@ -1,36 +0,0 @@ -""" -Figment trigger protocol and command types. - -Defines the extensible input abstraction for triggering figment displays -from any control surface (ntfy, MQTT, serial, etc.). -""" - -from __future__ import annotations - -from dataclasses import dataclass -from enum import Enum -from typing import Protocol - - -class FigmentAction(Enum): - TRIGGER = "trigger" - SET_INTENSITY = "set_intensity" - SET_INTERVAL = "set_interval" - SET_COLOR = "set_color" - STOP = "stop" - - -@dataclass -class FigmentCommand: - action: FigmentAction - value: float | str | None = None - - -class FigmentTrigger(Protocol): - """Protocol for figment trigger sources. - - Any input source (ntfy, MQTT, serial) can implement this - to trigger and control figment displays. - """ - - def poll(self) -> FigmentCommand | None: ... diff --git a/engine/fixtures/headlines.json b/engine/fixtures/headlines.json new file mode 100644 index 0000000..4bcab08 --- /dev/null +++ b/engine/fixtures/headlines.json @@ -0,0 +1 @@ +{"items": []} \ No newline at end of file diff --git a/engine/interfaces/__init__.py b/engine/interfaces/__init__.py new file mode 100644 index 0000000..2d8879f --- /dev/null +++ b/engine/interfaces/__init__.py @@ -0,0 +1,73 @@ +""" +Core interfaces for the mainline pipeline architecture. + +This module provides all abstract base classes and protocols that define +the contracts between pipeline components: + +- Stage: Base class for pipeline components (imported from pipeline.core) +- DataSource: Abstract data providers (imported from data_sources.sources) +- EffectPlugin: Visual effects interface (imported from effects.types) +- Sensor: Real-time input interface (imported from sensors) +- Display: Output backend protocol (imported from display) + +This module provides a centralized import location for all interfaces. +""" + +from engine.data_sources.sources import ( + DataSource, + ImageItem, + SourceItem, +) +from engine.display import Display +from engine.effects.types import ( + EffectConfig, + EffectContext, + EffectPlugin, + PartialUpdate, + PipelineConfig, + apply_param_bindings, + create_effect_context, +) +from engine.pipeline.core import ( + DataType, + Stage, + StageConfig, + StageError, + StageResult, + create_stage_error, +) +from engine.sensors import ( + Sensor, + SensorStage, + SensorValue, + create_sensor_stage, +) + +__all__ = [ + # Stage interfaces + "DataType", + "Stage", + "StageConfig", + "StageError", + "StageResult", + "create_stage_error", + # Data source interfaces + "DataSource", + "ImageItem", + "SourceItem", + # Effect interfaces + "EffectConfig", + "EffectContext", + "EffectPlugin", + "PartialUpdate", + "PipelineConfig", + "apply_param_bindings", + "create_effect_context", + # Sensor interfaces + "Sensor", + "SensorStage", + "SensorValue", + "create_sensor_stage", + # Display protocol + "Display", +] diff --git a/engine/layers.py b/engine/layers.py deleted file mode 100644 index a3cc0d5..0000000 --- a/engine/layers.py +++ /dev/null @@ -1,356 +0,0 @@ -""" -Layer compositing — message overlay, ticker zone, firehose, noise. -Depends on: config, render, effects. -""" - -import random -import re -import time -from datetime import datetime - -from engine import config -from engine.effects import ( - EffectChain, - EffectContext, - fade_line, - firehose_line, - glitch_bar, - noise, - vis_trunc, -) -from engine.render import big_wrap, lr_gradient, msg_gradient -from engine.terminal import RST, W_COOL - -MSG_META = "\033[38;5;245m" -MSG_BORDER = "\033[2;38;5;37m" - - -def render_message_overlay( - msg: tuple[str, str, float] | None, - w: int, - h: int, - msg_cache: tuple, -) -> tuple[list[str], tuple]: - """Render ntfy message overlay. - - Args: - msg: (title, body, timestamp) or None - w: terminal width - h: terminal height - msg_cache: (cache_key, rendered_rows) for caching - - Returns: - (list of ANSI strings, updated cache) - """ - overlay = [] - if msg is None: - return overlay, msg_cache - - m_title, m_body, m_ts = msg - display_text = m_body or m_title or "(empty)" - display_text = re.sub(r"\s+", " ", display_text.upper()) - - cache_key = (display_text, w) - if msg_cache[0] != cache_key: - msg_rows = big_wrap(display_text, w - 4) - msg_cache = (cache_key, msg_rows) - else: - msg_rows = msg_cache[1] - - msg_rows = msg_gradient(msg_rows, (time.monotonic() * config.GRAD_SPEED) % 1.0) - - elapsed_s = int(time.monotonic() - m_ts) - remaining = max(0, config.MESSAGE_DISPLAY_SECS - elapsed_s) - ts_str = datetime.now().strftime("%H:%M:%S") - panel_h = len(msg_rows) + 2 - panel_top = max(0, (h - panel_h) // 2) - - row_idx = 0 - for mr in msg_rows: - ln = vis_trunc(mr, w) - overlay.append(f"\033[{panel_top + row_idx + 1};1H {ln}\033[0m\033[K") - row_idx += 1 - - meta_parts = [] - if m_title and m_title != m_body: - meta_parts.append(m_title) - meta_parts.append(f"ntfy \u00b7 {ts_str} \u00b7 {remaining}s") - meta = ( - " " + " \u00b7 ".join(meta_parts) - if len(meta_parts) > 1 - else " " + meta_parts[0] - ) - overlay.append(f"\033[{panel_top + row_idx + 1};1H{MSG_META}{meta}\033[0m\033[K") - row_idx += 1 - - bar = "\u2500" * (w - 4) - overlay.append(f"\033[{panel_top + row_idx + 1};1H {MSG_BORDER}{bar}\033[0m\033[K") - - return overlay, msg_cache - - -def render_ticker_zone( - active: list, - scroll_cam: int, - ticker_h: int, - w: int, - noise_cache: dict, - grad_offset: float, -) -> tuple[list[str], dict]: - """Render the ticker scroll zone. - - Args: - active: list of (content_rows, color, canvas_y, meta_idx) - scroll_cam: camera position (viewport top) - ticker_h: height of ticker zone - w: terminal width - noise_cache: dict of cy -> noise string - grad_offset: gradient animation offset - - Returns: - (list of ANSI strings, updated noise_cache) - """ - buf = [] - top_zone = max(1, int(ticker_h * 0.25)) - bot_zone = max(1, int(ticker_h * 0.10)) - - def noise_at(cy): - if cy not in noise_cache: - noise_cache[cy] = noise(w) if random.random() < 0.15 else None - return noise_cache[cy] - - for r in range(ticker_h): - scr_row = r + 1 - cy = scroll_cam + r - top_f = min(1.0, r / top_zone) if top_zone > 0 else 1.0 - bot_f = min(1.0, (ticker_h - 1 - r) / bot_zone) if bot_zone > 0 else 1.0 - row_fade = min(top_f, bot_f) - drawn = False - - for content, hc, by, midx in active: - cr = cy - by - if 0 <= cr < len(content): - raw = content[cr] - if cr != midx: - colored = lr_gradient([raw], grad_offset)[0] - else: - colored = raw - ln = vis_trunc(colored, w) - if row_fade < 1.0: - ln = fade_line(ln, row_fade) - - if cr == midx: - buf.append(f"\033[{scr_row};1H{W_COOL}{ln}{RST}\033[K") - elif ln.strip(): - buf.append(f"\033[{scr_row};1H{ln}{RST}\033[K") - else: - buf.append(f"\033[{scr_row};1H\033[K") - drawn = True - break - - if not drawn: - n = noise_at(cy) - if row_fade < 1.0 and n: - n = fade_line(n, row_fade) - if n: - buf.append(f"\033[{scr_row};1H{n}") - else: - buf.append(f"\033[{scr_row};1H\033[K") - - return buf, noise_cache - - -def apply_glitch( - buf: list[str], - ticker_buf_start: int, - mic_excess: float, - w: int, -) -> list[str]: - """Apply glitch effect to ticker buffer. - - Args: - buf: current buffer - ticker_buf_start: index where ticker starts in buffer - mic_excess: mic level above threshold - w: terminal width - - Returns: - Updated buffer with glitches applied - """ - glitch_prob = 0.32 + min(0.9, mic_excess * 0.16) - n_hits = 4 + int(mic_excess / 2) - ticker_buf_len = len(buf) - ticker_buf_start - - if random.random() < glitch_prob and ticker_buf_len > 0: - for _ in range(min(n_hits, ticker_buf_len)): - gi = random.randint(0, ticker_buf_len - 1) - scr_row = gi + 1 - buf[ticker_buf_start + gi] = f"\033[{scr_row};1H{glitch_bar(w)}" - - return buf - - -def render_firehose(items: list, w: int, fh: int, h: int) -> list[str]: - """Render firehose strip at bottom of screen.""" - buf = [] - if fh > 0: - for fr in range(fh): - scr_row = h - fh + fr + 1 - fline = firehose_line(items, w) - buf.append(f"\033[{scr_row};1H{fline}\033[K") - return buf - - -_effect_chain = None - - -def init_effects() -> None: - """Initialize effect plugins and chain.""" - global _effect_chain - from engine.effects import EffectChain, get_registry - - registry = get_registry() - - import effects_plugins - - effects_plugins.discover_plugins() - - chain = EffectChain(registry) - chain.set_order(["noise", "fade", "glitch", "firehose"]) - _effect_chain = chain - - -def process_effects( - buf: list[str], - w: int, - h: int, - scroll_cam: int, - ticker_h: int, - mic_excess: float, - grad_offset: float, - frame_number: int, - has_message: bool, - items: list, -) -> list[str]: - """Process buffer through effect chain.""" - if _effect_chain is None: - init_effects() - - ctx = EffectContext( - terminal_width=w, - terminal_height=h, - scroll_cam=scroll_cam, - ticker_height=ticker_h, - mic_excess=mic_excess, - grad_offset=grad_offset, - frame_number=frame_number, - has_message=has_message, - items=items, - ) - return _effect_chain.process(buf, ctx) - - -def get_effect_chain() -> EffectChain | None: - """Get the effect chain instance.""" - global _effect_chain - if _effect_chain is None: - init_effects() - return _effect_chain - - -def render_figment_overlay( - figment_state, - w: int, - h: int, -) -> list[str]: - """Render figment overlay as ANSI cursor-positioning commands. - - Args: - figment_state: FigmentState with phase, progress, rows, gradient, centering. - w: terminal width - h: terminal height - - Returns: - List of ANSI strings to append to display buffer. - """ - from engine.render import _color_codes_to_ansi - - rows = figment_state.rows - if not rows: - return [] - - phase = figment_state.phase - progress = figment_state.progress - gradient = figment_state.gradient - center_row = figment_state.center_row - center_col = figment_state.center_col - - cols = _color_codes_to_ansi(gradient) - - # Build a list of non-space cell positions - cell_positions = [] - for r_idx, row in enumerate(rows): - for c_idx, ch in enumerate(row): - if ch != " ": - cell_positions.append((r_idx, c_idx)) - - n_cells = len(cell_positions) - if n_cells == 0: - return [] - - # Use a deterministic seed so the reveal/dissolve pattern is stable per-figment - rng = random.Random(hash(tuple(rows[0][:10])) if rows[0] else 42) - shuffled = list(cell_positions) - rng.shuffle(shuffled) - - # Phase-dependent visibility - from effects_plugins.figment import FigmentPhase - - if phase == FigmentPhase.REVEAL: - visible_count = int(n_cells * progress) - visible = set(shuffled[:visible_count]) - elif phase == FigmentPhase.HOLD: - visible = set(cell_positions) - # Strobe: dim some cells periodically - if int(progress * 20) % 3 == 0: - dim_count = int(n_cells * 0.3) - visible -= set(shuffled[:dim_count]) - elif phase == FigmentPhase.DISSOLVE: - remaining_count = int(n_cells * (1.0 - progress)) - visible = set(shuffled[:remaining_count]) - else: - visible = set(cell_positions) - - # Build overlay commands - overlay: list[str] = [] - n_cols = len(cols) - max_x = max((len(r.rstrip()) for r in rows if r.strip()), default=1) - - for r_idx, row in enumerate(rows): - scr_row = center_row + r_idx + 1 # 1-indexed - if scr_row < 1 or scr_row > h: - continue - - line_buf: list[str] = [] - has_content = False - - for c_idx, ch in enumerate(row): - scr_col = center_col + c_idx + 1 - if scr_col < 1 or scr_col > w: - continue - - if ch != " " and (r_idx, c_idx) in visible: - # Apply gradient color - shifted = (c_idx / max(max_x - 1, 1)) % 1.0 - idx = min(round(shifted * (n_cols - 1)), n_cols - 1) - line_buf.append(f"{cols[idx]}{ch}{RST}") - has_content = True - else: - line_buf.append(" ") - - if has_content: - line_str = "".join(line_buf).rstrip() - if line_str.strip(): - overlay.append(f"\033[{scr_row};{center_col + 1}H{line_str}{RST}") - - return overlay diff --git a/engine/mic.py b/engine/mic.py deleted file mode 100644 index c72a440..0000000 --- a/engine/mic.py +++ /dev/null @@ -1,96 +0,0 @@ -""" -Microphone input monitor — standalone, no internal dependencies. -Gracefully degrades if sounddevice/numpy are unavailable. -""" - -import atexit -from collections.abc import Callable -from datetime import datetime - -try: - import numpy as _np - import sounddevice as _sd - - _HAS_MIC = True -except Exception: - _HAS_MIC = False - - -from engine.events import MicLevelEvent - - -class MicMonitor: - """Background mic stream that exposes current RMS dB level.""" - - def __init__(self, threshold_db=50): - self.threshold_db = threshold_db - self._db = -99.0 - self._stream = None - self._subscribers: list[Callable[[MicLevelEvent], None]] = [] - - @property - def available(self): - """True if sounddevice is importable.""" - return _HAS_MIC - - @property - def db(self): - """Current RMS dB level.""" - return self._db - - @property - def excess(self): - """dB above threshold (clamped to 0).""" - return max(0.0, self._db - self.threshold_db) - - def subscribe(self, callback: Callable[[MicLevelEvent], None]) -> None: - """Register a callback to be called when mic level changes.""" - self._subscribers.append(callback) - - def unsubscribe(self, callback: Callable[[MicLevelEvent], None]) -> None: - """Remove a registered callback.""" - if callback in self._subscribers: - self._subscribers.remove(callback) - - def _emit(self, event: MicLevelEvent) -> None: - """Emit an event to all subscribers.""" - for cb in self._subscribers: - try: - cb(event) - except Exception: - pass - - def start(self): - """Start background mic stream. Returns True on success, False/None otherwise.""" - if not _HAS_MIC: - return None - - def _cb(indata, frames, t, status): - rms = float(_np.sqrt(_np.mean(indata**2))) - self._db = 20 * _np.log10(rms) if rms > 0 else -99.0 - if self._subscribers: - event = MicLevelEvent( - db_level=self._db, - excess_above_threshold=max(0.0, self._db - self.threshold_db), - timestamp=datetime.now(), - ) - self._emit(event) - - try: - self._stream = _sd.InputStream( - callback=_cb, channels=1, samplerate=44100, blocksize=2048 - ) - self._stream.start() - atexit.register(self.stop) - return True - except Exception: - return False - - def stop(self): - """Stop the mic stream if running.""" - if self._stream: - try: - self._stream.stop() - except Exception: - pass - self._stream = None diff --git a/engine/pipeline/__init__.py b/engine/pipeline/__init__.py new file mode 100644 index 0000000..ff03c3f --- /dev/null +++ b/engine/pipeline/__init__.py @@ -0,0 +1,106 @@ +""" +Unified Pipeline Architecture. + +This module provides a clean, dependency-managed pipeline system: +- Stage: Base class for all pipeline components +- Pipeline: DAG-based execution orchestrator +- PipelineParams: Runtime configuration for animation +- PipelinePreset: Pre-configured pipeline configurations +- StageRegistry: Unified registration for all stage types + +The pipeline architecture supports: +- Sources: Data providers (headlines, poetry, pipeline viz) +- Effects: Post-processors (noise, fade, glitch, hud) +- Displays: Output backends (terminal, pygame, websocket) +- Cameras: Viewport controllers (vertical, horizontal, omni) + +Example: + from engine.pipeline import Pipeline, PipelineConfig, StageRegistry + + pipeline = Pipeline(PipelineConfig(source="headlines", display="terminal")) + pipeline.add_stage("source", StageRegistry.create("source", "headlines")) + pipeline.add_stage("display", StageRegistry.create("display", "terminal")) + pipeline.build().initialize() + + result = pipeline.execute(initial_data) +""" + +from engine.pipeline.controller import ( + Pipeline, + PipelineConfig, + PipelineRunner, + create_default_pipeline, + create_pipeline_from_params, +) +from engine.pipeline.core import ( + PipelineContext, + Stage, + StageConfig, + StageError, + StageResult, +) +from engine.pipeline.params import ( + DEFAULT_HEADLINE_PARAMS, + DEFAULT_PIPELINE_PARAMS, + DEFAULT_PYGAME_PARAMS, + PipelineParams, +) +from engine.pipeline.presets import ( + DEMO_PRESET, + FIREHOSE_PRESET, + PIPELINE_VIZ_PRESET, + POETRY_PRESET, + UI_PRESET, + WEBSOCKET_PRESET, + PipelinePreset, + create_preset_from_params, + get_preset, + list_presets, +) +from engine.pipeline.registry import ( + StageRegistry, + discover_stages, + register_camera, + register_display, + register_effect, + register_source, +) + +__all__ = [ + # Core + "Stage", + "StageConfig", + "StageError", + "StageResult", + "PipelineContext", + # Controller + "Pipeline", + "PipelineConfig", + "PipelineRunner", + "create_default_pipeline", + "create_pipeline_from_params", + # Params + "PipelineParams", + "DEFAULT_HEADLINE_PARAMS", + "DEFAULT_PIPELINE_PARAMS", + "DEFAULT_PYGAME_PARAMS", + # Presets + "PipelinePreset", + "PRESETS", + "DEMO_PRESET", + "POETRY_PRESET", + "PIPELINE_VIZ_PRESET", + "WEBSOCKET_PRESET", + "FIREHOSE_PRESET", + "UI_PRESET", + "get_preset", + "list_presets", + "create_preset_from_params", + # Registry + "StageRegistry", + "discover_stages", + "register_source", + "register_effect", + "register_display", + "register_camera", +] diff --git a/engine/pipeline/adapters.py b/engine/pipeline/adapters.py new file mode 100644 index 0000000..b12fd8d --- /dev/null +++ b/engine/pipeline/adapters.py @@ -0,0 +1,50 @@ +""" +Stage adapters - Bridge existing components to the Stage interface. + +This module provides adapters that wrap existing components +(EffectPlugin, Display, DataSource, Camera) as Stage implementations. + +DEPRECATED: This file is now a compatibility wrapper. +Use `engine.pipeline.adapters` package instead. +""" + +# Re-export from the new package structure for backward compatibility +from engine.pipeline.adapters import ( + # Adapter classes + CameraStage, + CanvasStage, + DataSourceStage, + DisplayStage, + EffectPluginStage, + FontStage, + ImageToTextStage, + PassthroughStage, + SourceItemsToBufferStage, + ViewportFilterStage, + # Factory functions + create_stage_from_camera, + create_stage_from_display, + create_stage_from_effect, + create_stage_from_font, + create_stage_from_source, +) + +__all__ = [ + # Adapter classes + "EffectPluginStage", + "DisplayStage", + "DataSourceStage", + "PassthroughStage", + "SourceItemsToBufferStage", + "CameraStage", + "ViewportFilterStage", + "FontStage", + "ImageToTextStage", + "CanvasStage", + # Factory functions + "create_stage_from_display", + "create_stage_from_effect", + "create_stage_from_source", + "create_stage_from_camera", + "create_stage_from_font", +] diff --git a/engine/pipeline/adapters/__init__.py b/engine/pipeline/adapters/__init__.py new file mode 100644 index 0000000..396ddd9 --- /dev/null +++ b/engine/pipeline/adapters/__init__.py @@ -0,0 +1,44 @@ +"""Stage adapters - Bridge existing components to the Stage interface. + +This module provides adapters that wrap existing components +(EffectPlugin, Display, DataSource, Camera) as Stage implementations. +""" + +from .camera import CameraClockStage, CameraStage +from .data_source import DataSourceStage, PassthroughStage, SourceItemsToBufferStage +from .display import DisplayStage +from .effect_plugin import EffectPluginStage +from .factory import ( + create_stage_from_camera, + create_stage_from_display, + create_stage_from_effect, + create_stage_from_font, + create_stage_from_source, +) +from .transform import ( + CanvasStage, + FontStage, + ImageToTextStage, + ViewportFilterStage, +) + +__all__ = [ + # Adapter classes + "EffectPluginStage", + "DisplayStage", + "DataSourceStage", + "PassthroughStage", + "SourceItemsToBufferStage", + "CameraStage", + "CameraClockStage", + "ViewportFilterStage", + "FontStage", + "ImageToTextStage", + "CanvasStage", + # Factory functions + "create_stage_from_display", + "create_stage_from_effect", + "create_stage_from_source", + "create_stage_from_camera", + "create_stage_from_font", +] diff --git a/engine/pipeline/adapters/camera.py b/engine/pipeline/adapters/camera.py new file mode 100644 index 0000000..7b25236 --- /dev/null +++ b/engine/pipeline/adapters/camera.py @@ -0,0 +1,219 @@ +"""Adapter for camera stage.""" + +import time +from typing import Any + +from engine.pipeline.core import DataType, PipelineContext, Stage + + +class CameraClockStage(Stage): + """Per-frame clock stage that updates camera state. + + This stage runs once per frame and updates the camera's internal state + (position, time). It makes camera_y/camera_x available to subsequent + stages via the pipeline context. + + Unlike other stages, this is a pure clock stage and doesn't process + data - it just updates camera state and passes data through unchanged. + """ + + def __init__(self, camera, name: str = "camera-clock"): + self._camera = camera + self.name = name + self.category = "camera" + self.optional = False + self._last_frame_time: float | None = None + + @property + def stage_type(self) -> str: + return "camera" + + @property + def capabilities(self) -> set[str]: + # Provides camera state info only + # NOTE: Do NOT provide "source" as it conflicts with viewport_filter's "source.filtered" + return {"camera.state"} + + @property + def dependencies(self) -> set[str]: + # Clock stage - no dependencies (updates every frame regardless of data flow) + return set() + + @property + def inlet_types(self) -> set: + # Accept any data type - this is a pass-through stage + return {DataType.ANY} + + @property + def outlet_types(self) -> set: + # Pass through whatever was received + return {DataType.ANY} + + def process(self, data: Any, ctx: PipelineContext) -> Any: + """Update camera state and pass data through. + + This stage updates the camera's internal state (position, time) and + makes the updated camera_y/camera_x available to subsequent stages + via the pipeline context. + + The data is passed through unchanged - this stage only updates + camera state, it doesn't transform the data. + """ + if data is None: + return data + + # Update camera speed from params if explicitly set (for dynamic modulation) + # Only update if camera_speed in params differs from the default (1.0) + # This preserves camera speed set during construction + if ( + ctx.params + and hasattr(ctx.params, "camera_speed") + and ctx.params.camera_speed != 1.0 + ): + self._camera.set_speed(ctx.params.camera_speed) + + current_time = time.perf_counter() + dt = 0.0 + if self._last_frame_time is not None: + dt = current_time - self._last_frame_time + self._camera.update(dt) + self._last_frame_time = current_time + + # Update context with current camera position + ctx.set_state("camera_y", self._camera.y) + ctx.set_state("camera_x", self._camera.x) + + # Pass data through unchanged + return data + + +class CameraStage(Stage): + """Adapter wrapping Camera as a Stage. + + This stage applies camera viewport transformation to the rendered buffer. + Camera state updates are handled by CameraClockStage. + """ + + def __init__(self, camera, name: str = "vertical"): + self._camera = camera + self.name = name + self.category = "camera" + self.optional = True + self._last_frame_time: float | None = None + + def save_state(self) -> dict[str, Any]: + """Save camera state for restoration after pipeline rebuild. + + Returns: + Dictionary containing camera state that can be restored + """ + state = { + "x": self._camera.x, + "y": self._camera.y, + "mode": self._camera.mode.value + if hasattr(self._camera.mode, "value") + else self._camera.mode, + "speed": self._camera.speed, + "zoom": self._camera.zoom, + "canvas_width": self._camera.canvas_width, + "canvas_height": self._camera.canvas_height, + "_x_float": getattr(self._camera, "_x_float", 0.0), + "_y_float": getattr(self._camera, "_y_float", 0.0), + "_time": getattr(self._camera, "_time", 0.0), + } + # Save radial camera state if present + if hasattr(self._camera, "_r_float"): + state["_r_float"] = self._camera._r_float + if hasattr(self._camera, "_theta_float"): + state["_theta_float"] = self._camera._theta_float + if hasattr(self._camera, "_radial_input"): + state["_radial_input"] = self._camera._radial_input + return state + + def restore_state(self, state: dict[str, Any]) -> None: + """Restore camera state from saved state. + + Args: + state: Dictionary containing camera state from save_state() + """ + from engine.camera import CameraMode + + self._camera.x = state.get("x", 0) + self._camera.y = state.get("y", 0) + + # Restore mode - handle both enum value and direct enum + mode_value = state.get("mode", 0) + if isinstance(mode_value, int): + self._camera.mode = CameraMode(mode_value) + else: + self._camera.mode = mode_value + + self._camera.speed = state.get("speed", 1.0) + self._camera.zoom = state.get("zoom", 1.0) + self._camera.canvas_width = state.get("canvas_width", 200) + self._camera.canvas_height = state.get("canvas_height", 200) + + # Restore internal state + if hasattr(self._camera, "_x_float"): + self._camera._x_float = state.get("_x_float", 0.0) + if hasattr(self._camera, "_y_float"): + self._camera._y_float = state.get("_y_float", 0.0) + if hasattr(self._camera, "_time"): + self._camera._time = state.get("_time", 0.0) + + # Restore radial camera state if present + if hasattr(self._camera, "_r_float"): + self._camera._r_float = state.get("_r_float", 0.0) + if hasattr(self._camera, "_theta_float"): + self._camera._theta_float = state.get("_theta_float", 0.0) + if hasattr(self._camera, "_radial_input"): + self._camera._radial_input = state.get("_radial_input", 0.0) + + @property + def stage_type(self) -> str: + return "camera" + + @property + def capabilities(self) -> set[str]: + return {"camera"} + + @property + def dependencies(self) -> set[str]: + return {"render.output"} + + @property + def inlet_types(self) -> set: + return {DataType.TEXT_BUFFER} + + @property + def outlet_types(self) -> set: + return {DataType.TEXT_BUFFER} + + def process(self, data: Any, ctx: PipelineContext) -> Any: + """Apply camera transformation to items.""" + if data is None: + return data + + # Camera state is updated by CameraClockStage + # We only apply the viewport transformation here + + if hasattr(self._camera, "apply"): + viewport_width = ctx.params.viewport_width if ctx.params else 80 + viewport_height = ctx.params.viewport_height if ctx.params else 24 + + # Use filtered camera position if available (from ViewportFilterStage) + # This handles the case where the buffer has been filtered and starts at row 0 + filtered_camera_y = ctx.get("camera_y", self._camera.y) + + # Temporarily adjust camera position for filtering + original_y = self._camera.y + self._camera.y = filtered_camera_y + + try: + result = self._camera.apply(data, viewport_width, viewport_height) + finally: + # Restore original camera position + self._camera.y = original_y + + return result + return data diff --git a/engine/pipeline/adapters/data_source.py b/engine/pipeline/adapters/data_source.py new file mode 100644 index 0000000..04a59af --- /dev/null +++ b/engine/pipeline/adapters/data_source.py @@ -0,0 +1,143 @@ +""" +Stage adapters - Bridge existing components to the Stage interface. + +This module provides adapters that wrap existing components +(DataSource) as Stage implementations. +""" + +from typing import Any + +from engine.data_sources import SourceItem +from engine.pipeline.core import DataType, PipelineContext, Stage + + +class DataSourceStage(Stage): + """Adapter wrapping DataSource as a Stage.""" + + def __init__(self, data_source, name: str = "headlines"): + self._source = data_source + self.name = name + self.category = "source" + self.optional = False + + @property + def capabilities(self) -> set[str]: + return {f"source.{self.name}"} + + @property + def dependencies(self) -> set[str]: + return set() + + @property + def inlet_types(self) -> set: + return {DataType.NONE} # Sources don't take input + + @property + def outlet_types(self) -> set: + return {DataType.SOURCE_ITEMS} + + def process(self, data: Any, ctx: PipelineContext) -> Any: + """Fetch data from source.""" + if hasattr(self._source, "get_items"): + return self._source.get_items() + return data + + +class PassthroughStage(Stage): + """Simple stage that passes data through unchanged. + + Used for sources that already provide the data in the correct format + (e.g., pipeline introspection that outputs text directly). + """ + + def __init__(self, name: str = "passthrough"): + self.name = name + self.category = "render" + self.optional = True + + @property + def stage_type(self) -> str: + return "render" + + @property + def capabilities(self) -> set[str]: + return {"render.output"} + + @property + def dependencies(self) -> set[str]: + return {"source"} + + @property + def inlet_types(self) -> set: + return {DataType.SOURCE_ITEMS} + + @property + def outlet_types(self) -> set: + return {DataType.SOURCE_ITEMS} + + def process(self, data: Any, ctx: PipelineContext) -> Any: + """Pass data through unchanged.""" + return data + + +class SourceItemsToBufferStage(Stage): + """Convert SourceItem objects to text buffer. + + Takes a list of SourceItem objects and extracts their content, + splitting on newlines to create a proper text buffer for display. + """ + + def __init__(self, name: str = "items-to-buffer"): + self.name = name + self.category = "render" + self.optional = True + + @property + def stage_type(self) -> str: + return "render" + + @property + def capabilities(self) -> set[str]: + return {"render.output"} + + @property + def dependencies(self) -> set[str]: + return {"source"} + + @property + def inlet_types(self) -> set: + return {DataType.SOURCE_ITEMS} + + @property + def outlet_types(self) -> set: + return {DataType.TEXT_BUFFER} + + def process(self, data: Any, ctx: PipelineContext) -> Any: + """Convert SourceItem list to text buffer.""" + if data is None: + return [] + + # If already a list of strings, return as-is + if isinstance(data, list) and data and isinstance(data[0], str): + return data + + # If it's a list of SourceItem, extract content + if isinstance(data, list): + result = [] + for item in data: + if isinstance(item, SourceItem): + # Split content by newline to get individual lines + lines = item.content.split("\n") + result.extend(lines) + elif hasattr(item, "content"): # Has content attribute + lines = str(item.content).split("\n") + result.extend(lines) + else: + result.append(str(item)) + return result + + # Single item + if isinstance(data, SourceItem): + return data.content.split("\n") + + return [str(data)] diff --git a/engine/pipeline/adapters/display.py b/engine/pipeline/adapters/display.py new file mode 100644 index 0000000..7fa885c --- /dev/null +++ b/engine/pipeline/adapters/display.py @@ -0,0 +1,93 @@ +"""Adapter wrapping Display as a Stage.""" + +from typing import Any + +from engine.pipeline.core import PipelineContext, Stage + + +class DisplayStage(Stage): + """Adapter wrapping Display as a Stage.""" + + def __init__(self, display, name: str = "terminal"): + self._display = display + self.name = name + self.category = "display" + self.optional = False + self._initialized = False + self._init_width = 80 + self._init_height = 24 + + def save_state(self) -> dict[str, Any]: + """Save display state for restoration after pipeline rebuild. + + Returns: + Dictionary containing display state that can be restored + """ + return { + "initialized": self._initialized, + "init_width": self._init_width, + "init_height": self._init_height, + "width": getattr(self._display, "width", 80), + "height": getattr(self._display, "height", 24), + } + + def restore_state(self, state: dict[str, Any]) -> None: + """Restore display state from saved state. + + Args: + state: Dictionary containing display state from save_state() + """ + self._initialized = state.get("initialized", False) + self._init_width = state.get("init_width", 80) + self._init_height = state.get("init_height", 24) + + # Restore display dimensions if the display supports it + if hasattr(self._display, "width"): + self._display.width = state.get("width", 80) + if hasattr(self._display, "height"): + self._display.height = state.get("height", 24) + + @property + def capabilities(self) -> set[str]: + return {"display.output"} + + @property + def dependencies(self) -> set[str]: + return {"render.output"} # Display needs rendered content + + @property + def inlet_types(self) -> set: + from engine.pipeline.core import DataType + + return {DataType.TEXT_BUFFER} # Display consumes rendered text + + @property + def outlet_types(self) -> set: + from engine.pipeline.core import DataType + + return {DataType.NONE} # Display is a terminal stage (no output) + + def init(self, ctx: PipelineContext) -> bool: + w = ctx.params.viewport_width if ctx.params else 80 + h = ctx.params.viewport_height if ctx.params else 24 + + # Try to reuse display if already initialized + reuse = self._initialized + result = self._display.init(w, h, reuse=reuse) + + # Update initialization state + if result is not False: + self._initialized = True + self._init_width = w + self._init_height = h + + return result is not False + + def process(self, data: Any, ctx: PipelineContext) -> Any: + """Output data to display.""" + if data is not None: + self._display.show(data) + return data + + def cleanup(self) -> None: + self._display.cleanup() diff --git a/engine/pipeline/adapters/effect_plugin.py b/engine/pipeline/adapters/effect_plugin.py new file mode 100644 index 0000000..2e42c95 --- /dev/null +++ b/engine/pipeline/adapters/effect_plugin.py @@ -0,0 +1,117 @@ +"""Adapter wrapping EffectPlugin as a Stage.""" + +from typing import Any + +from engine.pipeline.core import PipelineContext, Stage + + +class EffectPluginStage(Stage): + """Adapter wrapping EffectPlugin as a Stage. + + Supports capability-based dependencies through the dependencies parameter. + """ + + def __init__( + self, + effect_plugin, + name: str = "effect", + dependencies: set[str] | None = None, + ): + self._effect = effect_plugin + self.name = name + self.category = "effect" + self.optional = False + self._dependencies = dependencies or set() + + @property + def stage_type(self) -> str: + """Return stage_type based on effect name. + + HUD effects are overlays. + """ + if self.name == "hud": + return "overlay" + return self.category + + @property + def render_order(self) -> int: + """Return render_order based on effect type. + + HUD effects have high render_order to appear on top. + """ + if self.name == "hud": + return 100 # High order for overlays + return 0 + + @property + def is_overlay(self) -> bool: + """Return True for HUD effects. + + HUD is an overlay - it composes on top of the buffer + rather than transforming it for the next stage. + """ + return self.name == "hud" + + @property + def capabilities(self) -> set[str]: + return {f"effect.{self.name}"} + + @property + def dependencies(self) -> set[str]: + return self._dependencies + + @property + def inlet_types(self) -> set: + from engine.pipeline.core import DataType + + return {DataType.TEXT_BUFFER} + + @property + def outlet_types(self) -> set: + from engine.pipeline.core import DataType + + return {DataType.TEXT_BUFFER} + + def process(self, data: Any, ctx: PipelineContext) -> Any: + """Process data through the effect.""" + if data is None: + return None + from engine.effects.types import EffectContext, apply_param_bindings + + w = ctx.params.viewport_width if ctx.params else 80 + h = ctx.params.viewport_height if ctx.params else 24 + frame = ctx.params.frame_number if ctx.params else 0 + + effect_ctx = EffectContext( + terminal_width=w, + terminal_height=h, + scroll_cam=0, + ticker_height=h, + camera_x=0, + mic_excess=0.0, + grad_offset=(frame * 0.01) % 1.0, + frame_number=frame, + has_message=False, + items=ctx.get("items", []), + ) + + # Copy sensor state from PipelineContext to EffectContext + for key, value in ctx.state.items(): + if key.startswith("sensor."): + effect_ctx.set_state(key, value) + + # Copy metrics from PipelineContext to EffectContext + if "metrics" in ctx.state: + effect_ctx.set_state("metrics", ctx.state["metrics"]) + + # Copy pipeline_order from PipelineContext services to EffectContext state + pipeline_order = ctx.get("pipeline_order") + if pipeline_order: + effect_ctx.set_state("pipeline_order", pipeline_order) + + # Apply sensor param bindings if effect has them + if hasattr(self._effect, "param_bindings") and self._effect.param_bindings: + bound_config = apply_param_bindings(self._effect, effect_ctx) + self._effect.configure(bound_config) + + return self._effect.process(data, effect_ctx) diff --git a/engine/pipeline/adapters/factory.py b/engine/pipeline/adapters/factory.py new file mode 100644 index 0000000..983bdf5 --- /dev/null +++ b/engine/pipeline/adapters/factory.py @@ -0,0 +1,38 @@ +"""Factory functions for creating stage instances.""" + +from engine.pipeline.adapters.camera import CameraStage +from engine.pipeline.adapters.data_source import DataSourceStage +from engine.pipeline.adapters.display import DisplayStage +from engine.pipeline.adapters.effect_plugin import EffectPluginStage +from engine.pipeline.adapters.transform import FontStage + + +def create_stage_from_display(display, name: str = "terminal") -> DisplayStage: + """Create a DisplayStage from a display instance.""" + return DisplayStage(display, name=name) + + +def create_stage_from_effect(effect_plugin, name: str) -> EffectPluginStage: + """Create an EffectPluginStage from an effect plugin.""" + return EffectPluginStage(effect_plugin, name=name) + + +def create_stage_from_source(data_source, name: str = "headlines") -> DataSourceStage: + """Create a DataSourceStage from a data source.""" + return DataSourceStage(data_source, name=name) + + +def create_stage_from_camera(camera, name: str = "vertical") -> CameraStage: + """Create a CameraStage from a camera instance.""" + return CameraStage(camera, name=name) + + +def create_stage_from_font( + font_path: str | None = None, + font_size: int | None = None, + font_ref: str | None = "default", + name: str = "font", +) -> FontStage: + """Create a FontStage with specified font configuration.""" + # FontStage currently doesn't use these parameters but keeps them for compatibility + return FontStage(name=name) diff --git a/engine/pipeline/adapters/transform.py b/engine/pipeline/adapters/transform.py new file mode 100644 index 0000000..e1b6c08 --- /dev/null +++ b/engine/pipeline/adapters/transform.py @@ -0,0 +1,293 @@ +"""Adapters for transform stages (viewport, font, image, canvas).""" + +from typing import Any + +import engine.render +from engine.data_sources import SourceItem +from engine.pipeline.core import DataType, PipelineContext, Stage + + +def estimate_simple_height(text: str, width: int) -> int: + """Estimate height in terminal rows using simple word wrap. + + Uses conservative estimation suitable for headlines. + Each wrapped line is approximately 6 terminal rows (big block rendering). + """ + words = text.split() + if not words: + return 6 + + lines = 1 + current_len = 0 + for word in words: + word_len = len(word) + if current_len + word_len + 1 > width - 4: # -4 for margins + lines += 1 + current_len = word_len + else: + current_len += word_len + 1 + + return lines * 6 # 6 rows per line for big block rendering + + +class ViewportFilterStage(Stage): + """Filter items to viewport height based on rendered height.""" + + def __init__(self, name: str = "viewport-filter"): + self.name = name + self.category = "render" + self.optional = True + self._layout: list[int] = [] + + @property + def stage_type(self) -> str: + return "render" + + @property + def capabilities(self) -> set[str]: + return {"source.filtered"} + + @property + def dependencies(self) -> set[str]: + # Always requires camera.state for viewport filtering + # CameraUpdateStage provides this (auto-injected if missing) + return {"source", "camera.state"} + + @property + def inlet_types(self) -> set: + return {DataType.SOURCE_ITEMS} + + @property + def outlet_types(self) -> set: + return {DataType.SOURCE_ITEMS} + + def process(self, data: Any, ctx: PipelineContext) -> Any: + """Filter items to viewport height based on rendered height.""" + if data is None: + return data + + if not isinstance(data, list): + return data + + if not data: + return [] + + # Get viewport parameters from context + viewport_height = ctx.params.viewport_height if ctx.params else 24 + viewport_width = ctx.params.viewport_width if ctx.params else 80 + camera_y = ctx.get("camera_y", 0) + + # Estimate height for each item and cache layout + self._layout = [] + cumulative_heights = [] + current_height = 0 + + for item in data: + title = item.content if isinstance(item, SourceItem) else str(item) + # Use simple height estimation (not PIL-based) + estimated_height = estimate_simple_height(title, viewport_width) + self._layout.append(estimated_height) + current_height += estimated_height + cumulative_heights.append(current_height) + + # Find visible range based on camera_y and viewport_height + # camera_y is the scroll offset (how many rows are scrolled up) + start_y = camera_y + end_y = camera_y + viewport_height + + # Find start index (first item that intersects with visible range) + start_idx = 0 + start_item_y = 0 # Y position where the first visible item starts + for i, total_h in enumerate(cumulative_heights): + if total_h > start_y: + start_idx = i + # Calculate the Y position of the start of this item + if i > 0: + start_item_y = cumulative_heights[i - 1] + break + + # Find end index (first item that extends beyond visible range) + end_idx = len(data) + for i, total_h in enumerate(cumulative_heights): + if total_h >= end_y: + end_idx = i + 1 + break + + # Adjust camera_y for the filtered buffer + # The filtered buffer starts at row 0, but the camera position + # needs to be relative to where the first visible item starts + filtered_camera_y = camera_y - start_item_y + + # Update context with the filtered camera position + # This ensures CameraStage can correctly slice the filtered buffer + ctx.set_state("camera_y", filtered_camera_y) + ctx.set_state("camera_x", ctx.get("camera_x", 0)) # Keep camera_x unchanged + + # Return visible items + return data[start_idx:end_idx] + + +class FontStage(Stage): + """Render items using font.""" + + def __init__(self, name: str = "font"): + self.name = name + self.category = "render" + self.optional = False + + @property + def stage_type(self) -> str: + return "render" + + @property + def capabilities(self) -> set[str]: + return {"render.output"} + + @property + def stage_dependencies(self) -> set[str]: + # Must connect to viewport_filter stage to get filtered source + return {"viewport_filter"} + + @property + def dependencies(self) -> set[str]: + # Depend on source.filtered (provided by viewport_filter) + # This ensures we get the filtered/processed source, not raw source + return {"source.filtered"} + + @property + def inlet_types(self) -> set: + return {DataType.SOURCE_ITEMS} + + @property + def outlet_types(self) -> set: + return {DataType.TEXT_BUFFER} + + def process(self, data: Any, ctx: PipelineContext) -> Any: + """Render items to text buffer using font.""" + if data is None: + return [] + + if not isinstance(data, list): + return [str(data)] + + import os + + if os.environ.get("DEBUG_CAMERA"): + print(f"FontStage: input items={len(data)}") + + viewport_width = ctx.params.viewport_width if ctx.params else 80 + + result = [] + for item in data: + if isinstance(item, SourceItem): + title = item.content + src = item.source + ts = item.timestamp + content_lines, _, _ = engine.render.make_block( + title, src, ts, viewport_width + ) + result.extend(content_lines) + elif hasattr(item, "content"): + title = str(item.content) + content_lines, _, _ = engine.render.make_block( + title, "", "", viewport_width + ) + result.extend(content_lines) + else: + result.append(str(item)) + return result + + +class ImageToTextStage(Stage): + """Convert image items to text.""" + + def __init__(self, name: str = "image-to-text"): + self.name = name + self.category = "render" + self.optional = True + + @property + def stage_type(self) -> str: + return "render" + + @property + def capabilities(self) -> set[str]: + return {"render.output"} + + @property + def dependencies(self) -> set[str]: + return {"source"} + + @property + def inlet_types(self) -> set: + return {DataType.SOURCE_ITEMS} + + @property + def outlet_types(self) -> set: + return {DataType.TEXT_BUFFER} + + def process(self, data: Any, ctx: PipelineContext) -> Any: + """Convert image items to text representation.""" + if data is None: + return [] + + if not isinstance(data, list): + return [str(data)] + + result = [] + for item in data: + # Check if item is an image + if hasattr(item, "image_path") or hasattr(item, "image_data"): + # Placeholder: would normally render image to ASCII art + result.append(f"[Image: {getattr(item, 'image_path', 'data')}]") + elif isinstance(item, SourceItem): + result.extend(item.content.split("\n")) + else: + result.append(str(item)) + return result + + +class CanvasStage(Stage): + """Render items to canvas.""" + + def __init__(self, name: str = "canvas"): + self.name = name + self.category = "render" + self.optional = False + + @property + def stage_type(self) -> str: + return "render" + + @property + def capabilities(self) -> set[str]: + return {"render.output"} + + @property + def dependencies(self) -> set[str]: + return {"source"} + + @property + def inlet_types(self) -> set: + return {DataType.SOURCE_ITEMS} + + @property + def outlet_types(self) -> set: + return {DataType.TEXT_BUFFER} + + def process(self, data: Any, ctx: PipelineContext) -> Any: + """Render items to canvas.""" + if data is None: + return [] + + if not isinstance(data, list): + return [str(data)] + + # Simple canvas rendering + result = [] + for item in data: + if isinstance(item, SourceItem): + result.extend(item.content.split("\n")) + else: + result.append(str(item)) + return result diff --git a/engine/pipeline/controller.py b/engine/pipeline/controller.py new file mode 100644 index 0000000..bc857ce --- /dev/null +++ b/engine/pipeline/controller.py @@ -0,0 +1,1055 @@ +""" +Pipeline controller - DAG-based pipeline execution. + +The Pipeline class orchestrates stages in dependency order, handling +the complete render cycle from source to display. +""" + +import time +from dataclasses import dataclass, field +from typing import Any + +from engine.pipeline.core import PipelineContext, Stage, StageError, StageResult +from engine.pipeline.params import PipelineParams +from engine.pipeline.registry import StageRegistry + + +@dataclass +class PipelineConfig: + """Configuration for a pipeline instance.""" + + source: str = "headlines" + display: str = "terminal" + camera: str = "vertical" + effects: list[str] = field(default_factory=list) + enable_metrics: bool = True + + +@dataclass +class StageMetrics: + """Metrics for a single stage execution.""" + + name: str + duration_ms: float + chars_in: int = 0 + chars_out: int = 0 + + +@dataclass +class FrameMetrics: + """Metrics for a single frame through the pipeline.""" + + frame_number: int + total_ms: float + stages: list[StageMetrics] = field(default_factory=list) + + +class Pipeline: + """Main pipeline orchestrator. + + Manages the execution of all stages in dependency order, + handling initialization, processing, and cleanup. + + Supports dynamic mutation during runtime via the mutation API. + """ + + def __init__( + self, + config: PipelineConfig | None = None, + context: PipelineContext | None = None, + ): + self.config = config or PipelineConfig() + self.context = context or PipelineContext() + self._stages: dict[str, Stage] = {} + self._execution_order: list[str] = [] + self._initialized = False + self._capability_map: dict[str, list[str]] = {} + + self._metrics_enabled = self.config.enable_metrics + self._frame_metrics: list[FrameMetrics] = [] + self._max_metrics_frames = 60 + + # Minimum capabilities required for pipeline to function + # NOTE: Research later - allow presets to override these defaults + self._minimum_capabilities: set[str] = { + "source", + "render.output", + "display.output", + "camera.state", # Always required for viewport filtering + } + self._current_frame_number = 0 + + def add_stage(self, name: str, stage: Stage, initialize: bool = True) -> "Pipeline": + """Add a stage to the pipeline. + + Args: + name: Unique name for the stage + stage: Stage instance to add + initialize: If True, initialize the stage immediately + + Returns: + Self for method chaining + """ + self._stages[name] = stage + if self._initialized and initialize: + stage.init(self.context) + return self + + def remove_stage(self, name: str, cleanup: bool = True) -> Stage | None: + """Remove a stage from the pipeline. + + Args: + name: Name of the stage to remove + cleanup: If True, call cleanup() on the removed stage + + Returns: + The removed stage, or None if not found + """ + stage = self._stages.pop(name, None) + if stage and cleanup: + try: + stage.cleanup() + except Exception: + pass + + # Rebuild execution order and capability map if stage was removed + if stage and self._initialized: + self._rebuild() + + return stage + + def remove_stage_safe(self, name: str, cleanup: bool = True) -> Stage | None: + """Remove a stage and rebuild execution order safely. + + This is an alias for remove_stage() that explicitly rebuilds + the execution order after removal. + + Args: + name: Name of the stage to remove + cleanup: If True, call cleanup() on the removed stage + + Returns: + The removed stage, or None if not found + """ + return self.remove_stage(name, cleanup) + + def cleanup_stage(self, name: str) -> None: + """Clean up a specific stage without removing it. + + This is useful for stages that need to release resources + (like display connections) without being removed from the pipeline. + + Args: + name: Name of the stage to clean up + """ + stage = self._stages.get(name) + if stage: + try: + stage.cleanup() + except Exception: + pass + + def can_hot_swap(self, name: str) -> bool: + """Check if a stage can be safely hot-swapped. + + A stage can be hot-swapped if: + 1. It exists in the pipeline + 2. It's not required for basic pipeline function + 3. It doesn't have strict dependencies that can't be re-resolved + + Args: + name: Name of the stage to check + + Returns: + True if the stage can be hot-swapped, False otherwise + """ + # Check if stage exists + if name not in self._stages: + return False + + # Check if stage is a minimum capability provider + stage = self._stages[name] + stage_caps = stage.capabilities if hasattr(stage, "capabilities") else set() + minimum_caps = self._minimum_capabilities + + # If stage provides a minimum capability, it's more critical + # but still potentially swappable if another stage provides the same capability + for cap in stage_caps: + if cap in minimum_caps: + # Check if another stage provides this capability + providers = self._capability_map.get(cap, []) + # This stage is the sole provider - might be critical + # but still allow hot-swap if pipeline is not initialized + if len(providers) <= 1 and self._initialized: + return False + + return True + + def replace_stage( + self, name: str, new_stage: Stage, preserve_state: bool = True + ) -> Stage | None: + """Replace a stage in the pipeline with a new one. + + Args: + name: Name of the stage to replace + new_stage: New stage instance + preserve_state: If True, copy relevant state from old stage + + Returns: + The old stage, or None if not found + """ + old_stage = self._stages.get(name) + if not old_stage: + return None + + if preserve_state: + self._copy_stage_state(old_stage, new_stage) + + old_stage.cleanup() + self._stages[name] = new_stage + new_stage.init(self.context) + + if self._initialized: + self._rebuild() + + return old_stage + + def swap_stages(self, name1: str, name2: str) -> bool: + """Swap two stages in the pipeline. + + Args: + name1: First stage name + name2: Second stage name + + Returns: + True if successful, False if either stage not found + """ + stage1 = self._stages.get(name1) + stage2 = self._stages.get(name2) + + if not stage1 or not stage2: + return False + + self._stages[name1] = stage2 + self._stages[name2] = stage1 + + if self._initialized: + self._rebuild() + + return True + + def move_stage( + self, name: str, after: str | None = None, before: str | None = None + ) -> bool: + """Move a stage's position in execution order. + + Args: + name: Stage to move + after: Place this stage after this stage name + before: Place this stage before this stage name + + Returns: + True if successful, False if stage not found + """ + if name not in self._stages: + return False + + if not self._initialized: + return False + + current_order = list(self._execution_order) + if name not in current_order: + return False + + current_order.remove(name) + + if after and after in current_order: + idx = current_order.index(after) + 1 + current_order.insert(idx, name) + elif before and before in current_order: + idx = current_order.index(before) + current_order.insert(idx, name) + else: + current_order.append(name) + + self._execution_order = current_order + return True + + def _copy_stage_state(self, old_stage: Stage, new_stage: Stage) -> None: + """Copy relevant state from old stage to new stage during replacement. + + Args: + old_stage: The old stage being replaced + new_stage: The new stage + """ + if hasattr(old_stage, "_enabled"): + new_stage._enabled = old_stage._enabled + + # Preserve camera state + if hasattr(old_stage, "save_state") and hasattr(new_stage, "restore_state"): + try: + state = old_stage.save_state() + new_stage.restore_state(state) + except Exception: + # If state preservation fails, continue without it + pass + + def _rebuild(self) -> None: + """Rebuild execution order after mutation or auto-injection.""" + was_initialized = self._initialized + self._initialized = False + + self._capability_map = self._build_capability_map() + self._execution_order = self._resolve_dependencies() + + # Note: We intentionally DO NOT validate dependencies here. + # Mutation operations (remove/swap/move) might leave the pipeline + # temporarily invalid (e.g., removing a stage that others depend on). + # Validation is performed explicitly in build() or can be checked + # manually via validate_minimum_capabilities(). + # try: + # self._validate_dependencies() + # self._validate_types() + # except StageError: + # pass + + # Restore initialized state + self._initialized = was_initialized + + def get_stage(self, name: str) -> Stage | None: + """Get a stage by name.""" + return self._stages.get(name) + + def enable_stage(self, name: str) -> bool: + """Enable a stage in the pipeline. + + Args: + name: Stage name to enable + + Returns: + True if successful, False if stage not found + """ + stage = self._stages.get(name) + if stage: + stage.set_enabled(True) + return True + return False + + def disable_stage(self, name: str) -> bool: + """Disable a stage in the pipeline. + + Args: + name: Stage name to disable + + Returns: + True if successful, False if stage not found + """ + stage = self._stages.get(name) + if stage: + stage.set_enabled(False) + return True + return False + + def get_stage_info(self, name: str) -> dict | None: + """Get detailed information about a stage. + + Args: + name: Stage name + + Returns: + Dictionary with stage information, or None if not found + """ + stage = self._stages.get(name) + if not stage: + return None + + return { + "name": name, + "category": stage.category, + "stage_type": stage.stage_type, + "enabled": stage.is_enabled(), + "optional": stage.optional, + "capabilities": list(stage.capabilities), + "dependencies": list(stage.dependencies), + "inlet_types": [dt.name for dt in stage.inlet_types], + "outlet_types": [dt.name for dt in stage.outlet_types], + "render_order": stage.render_order, + "is_overlay": stage.is_overlay, + } + + def get_pipeline_info(self) -> dict: + """Get comprehensive information about the pipeline. + + Returns: + Dictionary with pipeline state + """ + return { + "stages": {name: self.get_stage_info(name) for name in self._stages}, + "execution_order": self._execution_order.copy(), + "initialized": self._initialized, + "stage_count": len(self._stages), + } + + @property + def minimum_capabilities(self) -> set[str]: + """Get minimum capabilities required for pipeline to function.""" + return self._minimum_capabilities + + @minimum_capabilities.setter + def minimum_capabilities(self, value: set[str]): + """Set minimum required capabilities. + + NOTE: Research later - allow presets to override these defaults + """ + self._minimum_capabilities = value + + def validate_minimum_capabilities(self) -> tuple[bool, list[str]]: + """Validate that all minimum capabilities are provided. + + Returns: + Tuple of (is_valid, missing_capabilities) + """ + missing = [] + for cap in self._minimum_capabilities: + if not self._find_stage_with_capability(cap): + missing.append(cap) + return len(missing) == 0, missing + + def ensure_minimum_capabilities(self) -> list[str]: + """Automatically inject MVP stages if minimum capabilities are missing. + + Auto-injection is always on, but defaults are trivial to override. + Returns: + List of stages that were injected + """ + from engine.camera import Camera + from engine.data_sources.sources import EmptyDataSource + from engine.display import DisplayRegistry + from engine.pipeline.adapters import ( + CameraClockStage, + CameraStage, + DataSourceStage, + DisplayStage, + SourceItemsToBufferStage, + ) + + injected = [] + + # Check for source capability + if ( + not self._find_stage_with_capability("source") + and "source" not in self._stages + ): + empty_source = EmptyDataSource(width=80, height=24) + self.add_stage("source", DataSourceStage(empty_source, name="empty")) + injected.append("source") + + # Check for camera.state capability (must be BEFORE render to accept SOURCE_ITEMS) + camera = None + if not self._find_stage_with_capability("camera.state"): + # Inject static camera (trivial, no movement) + camera = Camera.scroll(speed=0.0) + camera.set_canvas_size(200, 200) + if "camera_update" not in self._stages: + self.add_stage( + "camera_update", CameraClockStage(camera, name="camera-clock") + ) + injected.append("camera_update") + + # Check for render capability + if ( + not self._find_stage_with_capability("render.output") + and "render" not in self._stages + ): + self.add_stage("render", SourceItemsToBufferStage(name="items-to-buffer")) + injected.append("render") + + # Check for camera stage (must be AFTER render to accept TEXT_BUFFER) + if camera and "camera" not in self._stages: + self.add_stage("camera", CameraStage(camera, name="static")) + injected.append("camera") + + # Check for display capability + if ( + not self._find_stage_with_capability("display.output") + and "display" not in self._stages + ): + display = DisplayRegistry.create("terminal") + if display: + self.add_stage("display", DisplayStage(display, name="terminal")) + injected.append("display") + + # Rebuild pipeline if stages were injected + if injected: + self._rebuild() + + return injected + + def build(self, auto_inject: bool = True) -> "Pipeline": + """Build execution order based on dependencies. + + Args: + auto_inject: If True, automatically inject MVP stages for missing capabilities + """ + self._capability_map = self._build_capability_map() + self._execution_order = self._resolve_dependencies() + + # Validate minimum capabilities and auto-inject if needed + if auto_inject: + is_valid, missing = self.validate_minimum_capabilities() + if not is_valid: + injected = self.ensure_minimum_capabilities() + if injected: + print( + f" \033[38;5;226mAuto-injected stages for missing capabilities: {injected}\033[0m" + ) + # Rebuild after auto-injection + self._capability_map = self._build_capability_map() + self._execution_order = self._resolve_dependencies() + + # Re-validate after injection attempt (whether anything was injected or not) + # If injection didn't run (injected empty), we still need to check if we're valid + # If injection ran but failed to fix (injected empty), we need to check + is_valid, missing = self.validate_minimum_capabilities() + if not is_valid: + raise StageError( + "build", + f"Auto-injection failed to provide minimum capabilities: {missing}", + ) + + self._validate_dependencies() + self._validate_types() + self._initialized = True + return self + + def _build_capability_map(self) -> dict[str, list[str]]: + """Build a map of capabilities to stage names. + + Returns: + Dict mapping capability -> list of stage names that provide it + """ + capability_map: dict[str, list[str]] = {} + for name, stage in self._stages.items(): + for cap in stage.capabilities: + if cap not in capability_map: + capability_map[cap] = [] + capability_map[cap].append(name) + return capability_map + + def _find_stage_with_capability(self, capability: str) -> str | None: + """Find a stage that provides the given capability. + + Supports wildcard matching: + - "source" matches "source.headlines" (prefix match) + - "source.*" matches "source.headlines" + - "source.headlines" matches exactly + + Args: + capability: The capability to find + + Returns: + Stage name that provides the capability, or None if not found + """ + # Exact match + if capability in self._capability_map: + return self._capability_map[capability][0] + + # Prefix match (e.g., "source" -> "source.headlines") + for cap, stages in self._capability_map.items(): + if cap.startswith(capability + "."): + return stages[0] + + # Wildcard match (e.g., "source.*" -> "source.headlines") + if ".*" in capability: + prefix = capability[:-2] # Remove ".*" + for cap in self._capability_map: + if cap.startswith(prefix + "."): + return self._capability_map[cap][0] + + return None + + def _resolve_dependencies(self) -> list[str]: + """Resolve stage execution order using topological sort with capability matching.""" + ordered = [] + visited = set() + temp_mark = set() + + def visit(name: str) -> None: + if name in temp_mark: + raise StageError(name, "Circular dependency detected") + if name in visited: + return + + temp_mark.add(name) + stage = self._stages.get(name) + if stage: + # Handle capability-based dependencies + for dep in stage.dependencies: + # Find a stage that provides this capability + dep_stage_name = self._find_stage_with_capability(dep) + if dep_stage_name: + visit(dep_stage_name) + + # Handle direct stage dependencies + for stage_dep in stage.stage_dependencies: + if stage_dep in self._stages: + visit(stage_dep) + else: + # Stage dependency not found - this is an error + raise StageError( + name, + f"Missing stage dependency: '{stage_dep}' not found in pipeline", + ) + + temp_mark.remove(name) + visited.add(name) + ordered.append(name) + + for name in self._stages: + if name not in visited: + visit(name) + + return ordered + + def _validate_dependencies(self) -> None: + """Validate that all dependencies can be satisfied. + + Raises StageError if any dependency cannot be resolved. + """ + missing: list[tuple[str, str]] = [] # (stage_name, capability) + + for name, stage in self._stages.items(): + for dep in stage.dependencies: + if not self._find_stage_with_capability(dep): + missing.append((name, dep)) + + if missing: + msgs = [f" - {stage} needs {cap}" for stage, cap in missing] + raise StageError( + "validation", + "Missing capabilities:\n" + "\n".join(msgs), + ) + + def _validate_types(self) -> None: + """Validate inlet/outlet types between connected stages. + + PureData-style type validation. Each stage declares its inlet_types + (what it accepts) and outlet_types (what it produces). This method + validates that connected stages have compatible types. + + Raises StageError if type mismatch is detected. + """ + from engine.pipeline.core import DataType + + errors: list[str] = [] + + for i, name in enumerate(self._execution_order): + stage = self._stages.get(name) + if not stage: + continue + + inlet_types = stage.inlet_types + + # Check against previous stage's outlet types + if i > 0: + prev_name = self._execution_order[i - 1] + prev_stage = self._stages.get(prev_name) + if prev_stage: + prev_outlets = prev_stage.outlet_types + + # Check if any outlet type is accepted by this inlet + compatible = ( + DataType.ANY in inlet_types + or DataType.ANY in prev_outlets + or bool(prev_outlets & inlet_types) + ) + + if not compatible: + errors.append( + f" - {name} (inlet: {inlet_types}) " + f"← {prev_name} (outlet: {prev_outlets})" + ) + + # Check display/sink stages (should accept TEXT_BUFFER) + if ( + stage.category == "display" + and DataType.TEXT_BUFFER not in inlet_types + and DataType.ANY not in inlet_types + ): + errors.append(f" - {name} is display but doesn't accept TEXT_BUFFER") + + if errors: + raise StageError( + "type_validation", + "Type mismatch in pipeline connections:\n" + "\n".join(errors), + ) + + def initialize(self) -> bool: + """Initialize all stages in execution order.""" + for name in self._execution_order: + stage = self._stages.get(name) + if stage and not stage.init(self.context) and not stage.optional: + return False + return True + + def execute(self, data: Any | None = None) -> StageResult: + """Execute the pipeline with the given input data. + + Pipeline execution: + 1. Execute all non-overlay stages in dependency order + 2. Apply overlay stages on top (sorted by render_order) + """ + import os + import sys + + debug = os.environ.get("MAINLINE_DEBUG_DATAFLOW") == "1" + + if debug: + print( + f"[PIPELINE.execute] Starting with data type: {type(data).__name__ if data else 'None'}", + file=sys.stderr, + flush=True, + ) + + if not self._initialized: + self.build() + + if not self._initialized: + return StageResult( + success=False, + data=None, + error="Pipeline not initialized", + ) + + current_data = data + frame_start = time.perf_counter() if self._metrics_enabled else 0 + stage_timings: list[StageMetrics] = [] + + # Separate overlay stages and display stage from regular stages + overlay_stages: list[tuple[int, Stage]] = [] + display_stage: Stage | None = None + regular_stages: list[str] = [] + + for name in self._execution_order: + stage = self._stages.get(name) + if not stage or not stage.is_enabled(): + continue + + # Check if this is the display stage - execute last + if stage.category == "display": + display_stage = stage + continue + + # Safely check is_overlay - handle MagicMock and other non-bool returns + try: + is_overlay = bool(getattr(stage, "is_overlay", False)) + except Exception: + is_overlay = False + + if is_overlay: + # Safely get render_order + try: + render_order = int(getattr(stage, "render_order", 0)) + except Exception: + render_order = 0 + overlay_stages.append((render_order, stage)) + else: + regular_stages.append(name) + + # Execute regular stages in dependency order (excluding display) + for name in regular_stages: + stage = self._stages.get(name) + if not stage or not stage.is_enabled(): + continue + + stage_start = time.perf_counter() if self._metrics_enabled else 0 + + try: + if debug: + data_info = type(current_data).__name__ + if isinstance(current_data, list): + data_info += f"[{len(current_data)}]" + print( + f"[STAGE.{name}] Starting with: {data_info}", + file=sys.stderr, + flush=True, + ) + + current_data = stage.process(current_data, self.context) + + if debug: + data_info = type(current_data).__name__ + if isinstance(current_data, list): + data_info += f"[{len(current_data)}]" + print( + f"[STAGE.{name}] Completed, output: {data_info}", + file=sys.stderr, + flush=True, + ) + except Exception as e: + if debug: + print(f"[STAGE.{name}] ERROR: {e}", file=sys.stderr, flush=True) + if not stage.optional: + return StageResult( + success=False, + data=current_data, + error=str(e), + stage_name=name, + ) + continue + + if self._metrics_enabled: + stage_duration = (time.perf_counter() - stage_start) * 1000 + chars_in = len(str(data)) if data else 0 + chars_out = len(str(current_data)) if current_data else 0 + stage_timings.append( + StageMetrics( + name=name, + duration_ms=stage_duration, + chars_in=chars_in, + chars_out=chars_out, + ) + ) + + # Apply overlay stages (sorted by render_order) + overlay_stages.sort(key=lambda x: x[0]) + for render_order, stage in overlay_stages: + stage_start = time.perf_counter() if self._metrics_enabled else 0 + stage_name = f"[overlay]{stage.name}" + + try: + # Overlays receive current_data but don't pass their output to next stage + # Instead, their output is composited on top + overlay_output = stage.process(current_data, self.context) + # For now, we just let the overlay output pass through + # In a more sophisticated implementation, we'd composite it + if overlay_output is not None: + current_data = overlay_output + except Exception as e: + if not stage.optional: + return StageResult( + success=False, + data=current_data, + error=str(e), + stage_name=stage_name, + ) + + if self._metrics_enabled: + stage_duration = (time.perf_counter() - stage_start) * 1000 + chars_in = len(str(data)) if data else 0 + chars_out = len(str(current_data)) if current_data else 0 + stage_timings.append( + StageMetrics( + name=stage_name, + duration_ms=stage_duration, + chars_in=chars_in, + chars_out=chars_out, + ) + ) + + # Execute display stage LAST (after overlay stages) + # This ensures overlay effects like HUD are visible in the final output + if display_stage: + stage_start = time.perf_counter() if self._metrics_enabled else 0 + + try: + current_data = display_stage.process(current_data, self.context) + except Exception as e: + if not display_stage.optional: + return StageResult( + success=False, + data=current_data, + error=str(e), + stage_name=display_stage.name, + ) + + if self._metrics_enabled: + stage_duration = (time.perf_counter() - stage_start) * 1000 + chars_in = len(str(data)) if data else 0 + chars_out = len(str(current_data)) if current_data else 0 + stage_timings.append( + StageMetrics( + name=display_stage.name, + duration_ms=stage_duration, + chars_in=chars_in, + chars_out=chars_out, + ) + ) + + if self._metrics_enabled: + total_duration = (time.perf_counter() - frame_start) * 1000 + self._frame_metrics.append( + FrameMetrics( + frame_number=self._current_frame_number, + total_ms=total_duration, + stages=stage_timings, + ) + ) + + # Store metrics in context for other stages (like HUD) + # This makes metrics a first-class pipeline citizen + if self.context: + self.context.state["metrics"] = self.get_metrics_summary() + + if len(self._frame_metrics) > self._max_metrics_frames: + self._frame_metrics.pop(0) + self._current_frame_number += 1 + + return StageResult(success=True, data=current_data) + + def cleanup(self) -> None: + """Clean up all stages in reverse order.""" + for name in reversed(self._execution_order): + stage = self._stages.get(name) + if stage: + try: + stage.cleanup() + except Exception: + pass + self._stages.clear() + self._initialized = False + + @property + def stages(self) -> dict[str, Stage]: + """Get all stages.""" + return self._stages.copy() + + @property + def execution_order(self) -> list[str]: + """Get execution order.""" + return self._execution_order.copy() + + def get_stage_names(self) -> list[str]: + """Get list of stage names.""" + return list(self._stages.keys()) + + def get_overlay_stages(self) -> list[Stage]: + """Get all overlay stages sorted by render_order.""" + overlays = [stage for stage in self._stages.values() if stage.is_overlay] + overlays.sort(key=lambda s: s.render_order) + return overlays + + def get_stage_type(self, name: str) -> str: + """Get the stage_type for a stage.""" + stage = self._stages.get(name) + return stage.stage_type if stage else "" + + def get_render_order(self, name: str) -> int: + """Get the render_order for a stage.""" + stage = self._stages.get(name) + return stage.render_order if stage else 0 + + def get_metrics_summary(self) -> dict: + """Get summary of collected metrics.""" + if not self._frame_metrics: + return {"error": "No metrics collected"} + + total_times = [f.total_ms for f in self._frame_metrics] + avg_total = sum(total_times) / len(total_times) + min_total = min(total_times) + max_total = max(total_times) + + stage_stats: dict[str, dict] = {} + for frame in self._frame_metrics: + for stage in frame.stages: + if stage.name not in stage_stats: + stage_stats[stage.name] = {"times": [], "total_chars": 0} + stage_stats[stage.name]["times"].append(stage.duration_ms) + stage_stats[stage.name]["total_chars"] += stage.chars_out + + for name, stats in stage_stats.items(): + times = stats["times"] + stats["avg_ms"] = sum(times) / len(times) + stats["min_ms"] = min(times) + stats["max_ms"] = max(times) + del stats["times"] + + return { + "frame_count": len(self._frame_metrics), + "pipeline": { + "avg_ms": avg_total, + "min_ms": min_total, + "max_ms": max_total, + }, + "stages": stage_stats, + } + + def reset_metrics(self) -> None: + """Reset collected metrics.""" + self._frame_metrics.clear() + self._current_frame_number = 0 + + def get_frame_times(self) -> list[float]: + """Get historical frame times for sparklines/charts.""" + return [f.total_ms for f in self._frame_metrics] + + +class PipelineRunner: + """High-level pipeline runner with animation support.""" + + def __init__( + self, + pipeline: Pipeline, + params: PipelineParams | None = None, + ): + self.pipeline = pipeline + self.params = params or PipelineParams() + self._running = False + + def start(self) -> bool: + """Start the pipeline.""" + self._running = True + return self.pipeline.initialize() + + def step(self, input_data: Any | None = None) -> Any: + """Execute one pipeline step.""" + self.params.frame_number += 1 + self.pipeline.context.params = self.params + result = self.pipeline.execute(input_data) + return result.data if result.success else None + + def stop(self) -> None: + """Stop and clean up the pipeline.""" + self._running = False + self.pipeline.cleanup() + + @property + def is_running(self) -> bool: + """Check if runner is active.""" + return self._running + + +def create_pipeline_from_params(params: PipelineParams) -> Pipeline: + """Create a pipeline from PipelineParams.""" + config = PipelineConfig( + source=params.source, + display=params.display, + camera=params.camera_mode, + effects=params.effect_order, + ) + return Pipeline(config=config) + + +def create_default_pipeline() -> Pipeline: + """Create a default pipeline with all standard components.""" + from engine.data_sources.sources import HeadlinesDataSource + from engine.pipeline.adapters import ( + DataSourceStage, + SourceItemsToBufferStage, + ) + + pipeline = Pipeline() + + # Add source stage (wrapped as Stage) + source = HeadlinesDataSource() + pipeline.add_stage("source", DataSourceStage(source, name="headlines")) + + # Add render stage to convert items to text buffer + pipeline.add_stage("render", SourceItemsToBufferStage(name="items-to-buffer")) + + # Add display stage + display = StageRegistry.create("display", "terminal") + if display: + pipeline.add_stage("display", display) + + return pipeline.build() diff --git a/engine/pipeline/core.py b/engine/pipeline/core.py new file mode 100644 index 0000000..55ebf8c --- /dev/null +++ b/engine/pipeline/core.py @@ -0,0 +1,321 @@ +""" +Pipeline core - Unified Stage abstraction and PipelineContext. + +This module provides the foundation for a clean, dependency-managed pipeline: +- Stage: Base class for all pipeline components (sources, effects, displays, cameras) +- PipelineContext: Dependency injection context for runtime data exchange +- Capability system: Explicit capability declarations with duck-typing support +- DataType: PureData-style inlet/outlet typing for validation +""" + +from abc import ABC, abstractmethod +from collections.abc import Callable +from dataclasses import dataclass, field +from enum import Enum, auto +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from engine.pipeline.params import PipelineParams + + +class DataType(Enum): + """PureData-style data types for inlet/outlet validation. + + Each type represents a specific data format that flows through the pipeline. + This enables compile-time-like validation of connections. + + Examples: + SOURCE_ITEMS: List[SourceItem] - raw items from sources + ITEM_TUPLES: List[tuple] - (title, source, timestamp) tuples + TEXT_BUFFER: List[str] - rendered ANSI buffer for display + RAW_TEXT: str - raw text strings + PIL_IMAGE: PIL Image object + """ + + SOURCE_ITEMS = auto() # List[SourceItem] - from DataSource + ITEM_TUPLES = auto() # List[tuple] - (title, source, ts) + TEXT_BUFFER = auto() # List[str] - ANSI buffer + RAW_TEXT = auto() # str - raw text + PIL_IMAGE = auto() # PIL Image object + ANY = auto() # Accepts any type + NONE = auto() # No data (terminator) + + +@dataclass +class StageConfig: + """Configuration for a single stage.""" + + name: str + category: str + enabled: bool = True + optional: bool = False + params: dict[str, Any] = field(default_factory=dict) + + +class Stage(ABC): + """Abstract base class for all pipeline stages. + + A Stage is a single component in the rendering pipeline. Stages can be: + - Sources: Data providers (headlines, poetry, pipeline viz) + - Effects: Post-processors (noise, fade, glitch, hud) + - Displays: Output backends (terminal, pygame, websocket) + - Cameras: Viewport controllers (vertical, horizontal, omni) + - Overlays: UI elements that compose on top (HUD) + + Stages declare: + - capabilities: What they provide to other stages + - dependencies: What they need from other stages + - stage_type: Category of stage (source, effect, overlay, display) + - render_order: Execution order within category + - is_overlay: If True, output is composited on top, not passed downstream + + Duck-typing is supported: any class with the required methods can act as a Stage. + """ + + name: str + category: str # "source", "effect", "overlay", "display", "camera" + optional: bool = False # If True, pipeline continues even if stage fails + + @property + def stage_type(self) -> str: + """Category of stage for ordering. + + Valid values: "source", "effect", "overlay", "display", "camera" + Defaults to category for backwards compatibility. + """ + return self.category + + @property + def render_order(self) -> int: + """Execution order within stage_type group. + + Higher values execute later. Useful for ordering overlays + or effects that need specific execution order. + """ + return 0 + + @property + def is_overlay(self) -> bool: + """If True, this stage's output is composited on top of the buffer. + + Overlay stages don't pass their output to the next stage. + Instead, their output is layered on top of the final buffer. + Use this for HUD, status displays, and similar UI elements. + """ + return False + + @property + def inlet_types(self) -> set[DataType]: + """Return set of data types this stage accepts. + + PureData-style inlet typing. If the connected upstream stage's + outlet_type is not in this set, the pipeline will raise an error. + + Examples: + - Source stages: {DataType.NONE} (no input needed) + - Transform stages: {DataType.ITEM_TUPLES, DataType.TEXT_BUFFER} + - Display stages: {DataType.TEXT_BUFFER} + """ + return {DataType.ANY} + + @property + def outlet_types(self) -> set[DataType]: + """Return set of data types this stage produces. + + PureData-style outlet typing. Downstream stages must accept + this type in their inlet_types. + + Examples: + - Source stages: {DataType.SOURCE_ITEMS} + - Transform stages: {DataType.TEXT_BUFFER} + - Display stages: {DataType.NONE} (consumes data) + """ + return {DataType.ANY} + + @property + def capabilities(self) -> set[str]: + """Return set of capabilities this stage provides. + + Examples: + - "source.headlines" + - "effect.noise" + - "display.output" + - "camera" + """ + return {f"{self.category}.{self.name}"} + + @property + def dependencies(self) -> set[str]: + """Return set of capability names this stage needs. + + Examples: + - {"display.output"} + - {"source.headlines"} + - {"camera"} + """ + return set() + + @property + def stage_dependencies(self) -> set[str]: + """Return set of stage names this stage must connect to directly. + + This allows explicit stage-to-stage dependencies, useful for enforcing + pipeline structure when capability matching alone is insufficient. + + Examples: + - {"viewport_filter"} # Must connect to viewport_filter stage + - {"camera_update"} # Must connect to camera_update stage + + NOTE: These are stage names (as added to pipeline), not capabilities. + """ + return set() + + def init(self, ctx: "PipelineContext") -> bool: + """Initialize stage with pipeline context. + + Args: + ctx: PipelineContext for accessing services + + Returns: + True if initialization succeeded, False otherwise + """ + return True + + @abstractmethod + def process(self, data: Any, ctx: "PipelineContext") -> Any: + """Process input data and return output. + + Args: + data: Input data from previous stage (or initial data for first stage) + ctx: PipelineContext for accessing services and state + + Returns: + Processed data for next stage + """ + ... + + def cleanup(self) -> None: # noqa: B027 + """Clean up resources when pipeline shuts down.""" + pass + + def get_config(self) -> StageConfig: + """Return current configuration of this stage.""" + return StageConfig( + name=self.name, + category=self.category, + optional=self.optional, + ) + + def set_enabled(self, enabled: bool) -> None: + """Enable or disable this stage.""" + self._enabled = enabled # type: ignore[attr-defined] + + def is_enabled(self) -> bool: + """Check if stage is enabled.""" + return getattr(self, "_enabled", True) + + +@dataclass +class StageResult: + """Result of stage processing, including success/failure info.""" + + success: bool + data: Any + error: str | None = None + stage_name: str = "" + + +class PipelineContext: + """Dependency injection context passed through the pipeline. + + Provides: + - services: Named services (display, config, event_bus, etc.) + - state: Runtime state shared between stages + - params: PipelineParams for animation-driven config + + Services can be injected at construction time or lazily resolved. + """ + + def __init__( + self, + services: dict[str, Any] | None = None, + initial_state: dict[str, Any] | None = None, + ): + self.services: dict[str, Any] = services or {} + self.state: dict[str, Any] = initial_state or {} + self._params: PipelineParams | None = None + + # Lazy resolvers for common services + self._lazy_resolvers: dict[str, Callable[[], Any]] = { + "config": self._resolve_config, + "event_bus": self._resolve_event_bus, + } + + def _resolve_config(self) -> Any: + from engine.config import get_config + + return get_config() + + def _resolve_event_bus(self) -> Any: + from engine.eventbus import get_event_bus + + return get_event_bus() + + def get(self, key: str, default: Any = None) -> Any: + """Get a service or state value by key. + + First checks services, then state, then lazy resolution. + """ + if key in self.services: + return self.services[key] + if key in self.state: + return self.state[key] + if key in self._lazy_resolvers: + try: + return self._lazy_resolvers[key]() + except Exception: + return default + return default + + def set(self, key: str, value: Any) -> None: + """Set a service or state value.""" + self.services[key] = value + + def set_state(self, key: str, value: Any) -> None: + """Set a runtime state value.""" + self.state[key] = value + + def get_state(self, key: str, default: Any = None) -> Any: + """Get a runtime state value.""" + return self.state.get(key, default) + + @property + def params(self) -> "PipelineParams | None": + """Get current pipeline params (for animation).""" + return self._params + + @params.setter + def params(self, value: "PipelineParams") -> None: + """Set pipeline params (from animation controller).""" + self._params = value + + def has_capability(self, capability: str) -> bool: + """Check if a capability is available.""" + return capability in self.services or capability in self._lazy_resolvers + + +class StageError(Exception): + """Raised when a stage fails to process.""" + + def __init__(self, stage_name: str, message: str, is_optional: bool = False): + self.stage_name = stage_name + self.message = message + self.is_optional = is_optional + super().__init__(f"Stage '{stage_name}' failed: {message}") + + +def create_stage_error( + stage_name: str, error: Exception, is_optional: bool = False +) -> StageError: + """Helper to create a StageError from an exception.""" + return StageError(stage_name, str(error), is_optional) diff --git a/engine/pipeline/params.py b/engine/pipeline/params.py new file mode 100644 index 0000000..4c00641 --- /dev/null +++ b/engine/pipeline/params.py @@ -0,0 +1,150 @@ +""" +Pipeline parameters - Runtime configuration layer for animation control. + +PipelineParams is the target for AnimationController - animation events +modify these params, which the pipeline then applies to its stages. +""" + +from dataclasses import dataclass, field +from typing import Any + +try: + from engine.display import BorderMode +except ImportError: + BorderMode = object # Fallback for type checking + + +@dataclass +class PipelineParams: + """Runtime configuration for the pipeline. + + This is the canonical config object that AnimationController modifies. + Stages read from these params to adjust their behavior. + """ + + # Source config + source: str = "headlines" + source_refresh_interval: float = 60.0 + + # Display config + display: str = "terminal" + border: bool | BorderMode = False + + # Camera config + camera_mode: str = "vertical" + camera_speed: float = 1.0 # Default speed + camera_x: int = 0 # For horizontal scrolling + + # Effect config + effect_order: list[str] = field( + default_factory=lambda: ["noise", "fade", "glitch", "firehose"] + ) + effect_enabled: dict[str, bool] = field(default_factory=dict) + effect_intensity: dict[str, float] = field(default_factory=dict) + + # Animation-driven state (set by AnimationController) + pulse: float = 0.0 + current_effect: str | None = None + path_progress: float = 0.0 + + # Viewport + viewport_width: int = 80 + viewport_height: int = 24 + + # Firehose + firehose_enabled: bool = False + + # Runtime state + frame_number: int = 0 + fps: float = 60.0 + + def get_effect_config(self, name: str) -> tuple[bool, float]: + """Get (enabled, intensity) for an effect.""" + enabled = self.effect_enabled.get(name, True) + intensity = self.effect_intensity.get(name, 1.0) + return enabled, intensity + + def set_effect_config(self, name: str, enabled: bool, intensity: float) -> None: + """Set effect configuration.""" + self.effect_enabled[name] = enabled + self.effect_intensity[name] = intensity + + def is_effect_enabled(self, name: str) -> bool: + """Check if an effect is enabled.""" + if name not in self.effect_enabled: + return True # Default to enabled + return self.effect_enabled.get(name, True) + + def get_effect_intensity(self, name: str) -> float: + """Get effect intensity (0.0 to 1.0).""" + return self.effect_intensity.get(name, 1.0) + + def to_dict(self) -> dict[str, Any]: + """Convert to dictionary for serialization.""" + return { + "source": self.source, + "display": self.display, + "camera_mode": self.camera_mode, + "camera_speed": self.camera_speed, + "effect_order": self.effect_order, + "effect_enabled": self.effect_enabled.copy(), + "effect_intensity": self.effect_intensity.copy(), + "pulse": self.pulse, + "current_effect": self.current_effect, + "viewport_width": self.viewport_width, + "viewport_height": self.viewport_height, + "firehose_enabled": self.firehose_enabled, + } + + @classmethod + def from_dict(cls, data: dict[str, Any]) -> "PipelineParams": + """Create from dictionary.""" + params = cls() + for key, value in data.items(): + if hasattr(params, key): + setattr(params, key, value) + return params + + def copy(self) -> "PipelineParams": + """Create a copy of this params object.""" + params = PipelineParams() + params.source = self.source + params.display = self.display + params.camera_mode = self.camera_mode + params.camera_speed = self.camera_speed + params.camera_x = self.camera_x + params.effect_order = self.effect_order.copy() + params.effect_enabled = self.effect_enabled.copy() + params.effect_intensity = self.effect_intensity.copy() + params.pulse = self.pulse + params.current_effect = self.current_effect + params.path_progress = self.path_progress + params.viewport_width = self.viewport_width + params.viewport_height = self.viewport_height + params.firehose_enabled = self.firehose_enabled + params.frame_number = self.frame_number + params.fps = self.fps + return params + + +# Default params for different modes +DEFAULT_HEADLINE_PARAMS = PipelineParams( + source="headlines", + display="terminal", + camera_mode="vertical", + effect_order=["noise", "fade", "glitch", "firehose"], +) + +DEFAULT_PYGAME_PARAMS = PipelineParams( + source="headlines", + display="pygame", + camera_mode="vertical", + effect_order=["noise", "fade", "glitch", "firehose"], +) + +DEFAULT_PIPELINE_PARAMS = PipelineParams( + source="pipeline", + display="pygame", + camera_mode="trace", + effect_order=[], # No effects for pipeline viz +) diff --git a/engine/pipeline/pipeline_introspection_demo.py b/engine/pipeline/pipeline_introspection_demo.py new file mode 100644 index 0000000..358b5d1 --- /dev/null +++ b/engine/pipeline/pipeline_introspection_demo.py @@ -0,0 +1,300 @@ +""" +Pipeline introspection demo controller - 3-phase animation system. + +Phase 1: Toggle each effect on/off one at a time (3s each, 1s gap) +Phase 2: LFO drives intensity default → max → min → default for each effect +Phase 3: All effects with shared LFO driving full waveform + +This controller manages the animation and updates the pipeline accordingly. +""" + +import time +from dataclasses import dataclass +from enum import Enum, auto +from typing import Any + +from engine.effects import get_registry +from engine.sensors.oscillator import OscillatorSensor + + +class DemoPhase(Enum): + """The three phases of the pipeline introspection demo.""" + + PHASE_1_TOGGLE = auto() # Toggle each effect on/off + PHASE_2_LFO = auto() # LFO drives intensity up/down + PHASE_3_SHARED_LFO = auto() # All effects with shared LFO + + +@dataclass +class PhaseState: + """State for a single phase of the demo.""" + + phase: DemoPhase + start_time: float + current_effect_index: int = 0 + effect_start_time: float = 0.0 + lfo_phase: float = 0.0 # 0.0 to 1.0 + + +@dataclass +class DemoConfig: + """Configuration for the demo animation.""" + + effect_cycle_duration: float = 3.0 # seconds per effect + gap_duration: float = 1.0 # seconds between effects + lfo_duration: float = ( + 4.0 # seconds for full LFO cycle (default → max → min → default) + ) + phase_2_effect_duration: float = 4.0 # seconds per effect in phase 2 + phase_3_lfo_duration: float = 6.0 # seconds for full waveform in phase 3 + + +class PipelineIntrospectionDemo: + """Controller for the 3-phase pipeline introspection demo. + + Manages effect toggling and LFO modulation across the pipeline. + """ + + def __init__( + self, + pipeline: Any, + effect_names: list[str] | None = None, + config: DemoConfig | None = None, + ): + self._pipeline = pipeline + self._config = config or DemoConfig() + self._effect_names = effect_names or ["noise", "fade", "glitch", "firehose"] + self._phase = DemoPhase.PHASE_1_TOGGLE + self._phase_state = PhaseState( + phase=DemoPhase.PHASE_1_TOGGLE, + start_time=time.time(), + ) + self._shared_oscillator: OscillatorSensor | None = None + self._frame = 0 + + # Register shared oscillator for phase 3 + self._shared_oscillator = OscillatorSensor( + name="demo-lfo", + waveform="sine", + frequency=1.0 / self._config.phase_3_lfo_duration, + ) + + @property + def phase(self) -> DemoPhase: + return self._phase + + @property + def phase_display(self) -> str: + """Get a human-readable phase description.""" + phase_num = { + DemoPhase.PHASE_1_TOGGLE: 1, + DemoPhase.PHASE_2_LFO: 2, + DemoPhase.PHASE_3_SHARED_LFO: 3, + } + return f"Phase {phase_num[self._phase]}" + + @property + def effect_names(self) -> list[str]: + return self._effect_names + + @property + def shared_oscillator(self) -> OscillatorSensor | None: + return self._shared_oscillator + + def update(self) -> dict[str, Any]: + """Update the demo state and return current parameters. + + Returns: + dict with current effect settings for the pipeline + """ + self._frame += 1 + current_time = time.time() + elapsed = current_time - self._phase_state.start_time + + # Phase transition logic + phase_duration = self._get_phase_duration() + if elapsed >= phase_duration: + self._advance_phase() + + # Update based on current phase + if self._phase == DemoPhase.PHASE_1_TOGGLE: + return self._update_phase_1(current_time) + elif self._phase == DemoPhase.PHASE_2_LFO: + return self._update_phase_2(current_time) + else: + return self._update_phase_3(current_time) + + def _get_phase_duration(self) -> float: + """Get duration of current phase in seconds.""" + if self._phase == DemoPhase.PHASE_1_TOGGLE: + # Duration = (effect_time + gap) * num_effects + final_gap + return ( + self._config.effect_cycle_duration + self._config.gap_duration + ) * len(self._effect_names) + self._config.gap_duration + elif self._phase == DemoPhase.PHASE_2_LFO: + return self._config.phase_2_effect_duration * len(self._effect_names) + else: + # Phase 3 runs indefinitely + return float("inf") + + def _advance_phase(self) -> None: + """Advance to the next phase.""" + if self._phase == DemoPhase.PHASE_1_TOGGLE: + self._phase = DemoPhase.PHASE_2_LFO + elif self._phase == DemoPhase.PHASE_2_LFO: + self._phase = DemoPhase.PHASE_3_SHARED_LFO + # Start the shared oscillator + if self._shared_oscillator: + self._shared_oscillator.start() + else: + # Phase 3 loops indefinitely - reset for demo replay after long time + self._phase = DemoPhase.PHASE_1_TOGGLE + + self._phase_state = PhaseState( + phase=self._phase, + start_time=time.time(), + ) + + def _update_phase_1(self, current_time: float) -> dict[str, Any]: + """Phase 1: Toggle each effect on/off one at a time.""" + effect_time = current_time - self._phase_state.effect_start_time + + # Check if we should move to next effect + cycle_time = self._config.effect_cycle_duration + self._config.gap_duration + effect_index = int((current_time - self._phase_state.start_time) / cycle_time) + + # Clamp to valid range + if effect_index >= len(self._effect_names): + effect_index = len(self._effect_names) - 1 + + # Calculate current effect state + in_gap = effect_time >= self._config.effect_cycle_duration + + # Build effect states + effect_states: dict[str, dict[str, Any]] = {} + for i, name in enumerate(self._effect_names): + if i < effect_index: + # Past effects - leave at default + effect_states[name] = {"enabled": False, "intensity": 0.5} + elif i == effect_index: + # Current effect - toggle on/off + if in_gap: + effect_states[name] = {"enabled": False, "intensity": 0.5} + else: + effect_states[name] = {"enabled": True, "intensity": 1.0} + else: + # Future effects - off + effect_states[name] = {"enabled": False, "intensity": 0.5} + + # Apply to effect registry + self._apply_effect_states(effect_states) + + return { + "phase": "PHASE_1_TOGGLE", + "phase_display": self.phase_display, + "current_effect": self._effect_names[effect_index] + if effect_index < len(self._effect_names) + else None, + "effect_states": effect_states, + "frame": self._frame, + } + + def _update_phase_2(self, current_time: float) -> dict[str, Any]: + """Phase 2: LFO drives intensity default → max → min → default.""" + elapsed = current_time - self._phase_state.start_time + effect_index = int(elapsed / self._config.phase_2_effect_duration) + effect_index = min(effect_index, len(self._effect_names) - 1) + + # Calculate LFO position (0 → 1 → 0) + effect_elapsed = elapsed % self._config.phase_2_effect_duration + lfo_position = effect_elapsed / self._config.phase_2_effect_duration + + # LFO: 0 → 1 → 0 (triangle wave) + if lfo_position < 0.5: + lfo_value = lfo_position * 2 # 0 → 1 + else: + lfo_value = 2 - lfo_position * 2 # 1 → 0 + + # Map to intensity: 0.3 (default) → 1.0 (max) → 0.0 (min) → 0.3 (default) + if lfo_position < 0.25: + # 0.3 → 1.0 + intensity = 0.3 + (lfo_position / 0.25) * 0.7 + elif lfo_position < 0.75: + # 1.0 → 0.0 + intensity = 1.0 - ((lfo_position - 0.25) / 0.5) * 1.0 + else: + # 0.0 → 0.3 + intensity = ((lfo_position - 0.75) / 0.25) * 0.3 + + # Build effect states + effect_states: dict[str, dict[str, Any]] = {} + for i, name in enumerate(self._effect_names): + if i < effect_index: + # Past effects - default + effect_states[name] = {"enabled": True, "intensity": 0.5} + elif i == effect_index: + # Current effect - LFO modulated + effect_states[name] = {"enabled": True, "intensity": intensity} + else: + # Future effects - off + effect_states[name] = {"enabled": False, "intensity": 0.5} + + # Apply to effect registry + self._apply_effect_states(effect_states) + + return { + "phase": "PHASE_2_LFO", + "phase_display": self.phase_display, + "current_effect": self._effect_names[effect_index], + "lfo_value": lfo_value, + "intensity": intensity, + "effect_states": effect_states, + "frame": self._frame, + } + + def _update_phase_3(self, current_time: float) -> dict[str, Any]: + """Phase 3: All effects with shared LFO driving full waveform.""" + # Read shared oscillator + lfo_value = 0.5 # Default + if self._shared_oscillator: + sensor_val = self._shared_oscillator.read() + if sensor_val: + lfo_value = sensor_val.value + + # All effects enabled with shared LFO + effect_states: dict[str, dict[str, Any]] = {} + for name in self._effect_names: + effect_states[name] = {"enabled": True, "intensity": lfo_value} + + # Apply to effect registry + self._apply_effect_states(effect_states) + + return { + "phase": "PHASE_3_SHARED_LFO", + "phase_display": self.phase_display, + "lfo_value": lfo_value, + "effect_states": effect_states, + "frame": self._frame, + } + + def _apply_effect_states(self, effect_states: dict[str, dict[str, Any]]) -> None: + """Apply effect states to the effect registry.""" + try: + registry = get_registry() + for name, state in effect_states.items(): + effect = registry.get(name) + if effect: + effect.config.enabled = state["enabled"] + effect.config.intensity = state["intensity"] + except Exception: + pass # Silently fail if registry not available + + def cleanup(self) -> None: + """Clean up resources.""" + if self._shared_oscillator: + self._shared_oscillator.stop() + + # Reset all effects to default + self._apply_effect_states( + {name: {"enabled": False, "intensity": 0.5} for name in self._effect_names} + ) diff --git a/engine/pipeline/preset_loader.py b/engine/pipeline/preset_loader.py new file mode 100644 index 0000000..a0db6f0 --- /dev/null +++ b/engine/pipeline/preset_loader.py @@ -0,0 +1,280 @@ +""" +Preset loader - Loads presets from TOML files. + +Supports: +- Built-in presets.toml in the package +- User overrides in ~/.config/mainline/presets.toml +- Local override in ./presets.toml +- Fallback DEFAULT_PRESET when loading fails +""" + +import os +from pathlib import Path +from typing import Any + +import tomllib + +DEFAULT_PRESET: dict[str, Any] = { + "description": "Default fallback preset", + "source": "headlines", + "display": "terminal", + "camera": "vertical", + "effects": [], + "viewport": {"width": 80, "height": 24}, + "camera_speed": 1.0, + "firehose_enabled": False, +} + + +def get_preset_paths() -> list[Path]: + """Get list of preset file paths in load order (later overrides earlier).""" + paths = [] + + builtin = Path(__file__).parent.parent / "presets.toml" + if builtin.exists(): + paths.append(builtin) + + user_config = Path(os.path.expanduser("~/.config/mainline/presets.toml")) + if user_config.exists(): + paths.append(user_config) + + local = Path("presets.toml") + if local.exists(): + paths.append(local) + + return paths + + +def load_presets() -> dict[str, Any]: + """Load all presets, merging from multiple sources.""" + merged: dict[str, Any] = {"presets": {}, "sensors": {}, "effect_configs": {}} + + for path in get_preset_paths(): + try: + with open(path, "rb") as f: + data = tomllib.load(f) + + if "presets" in data: + merged["presets"].update(data["presets"]) + + if "sensors" in data: + merged["sensors"].update(data["sensors"]) + + if "effect_configs" in data: + merged["effect_configs"].update(data["effect_configs"]) + + except Exception as e: + print(f"Warning: Failed to load presets from {path}: {e}") + + return merged + + +def get_preset(name: str) -> dict[str, Any] | None: + """Get a preset by name.""" + presets = load_presets() + return presets.get("presets", {}).get(name) + + +def list_preset_names() -> list[str]: + """List all available preset names.""" + presets = load_presets() + return list(presets.get("presets", {}).keys()) + + +def get_sensor_config(name: str) -> dict[str, Any] | None: + """Get sensor configuration by name.""" + sensors = load_presets() + return sensors.get("sensors", {}).get(name) + + +def get_effect_config(name: str) -> dict[str, Any] | None: + """Get effect configuration by name.""" + configs = load_presets() + return configs.get("effect_configs", {}).get(name) + + +def get_all_effect_configs() -> dict[str, Any]: + """Get all effect configurations.""" + configs = load_presets() + return configs.get("effect_configs", {}) + + +def get_preset_or_default(name: str) -> dict[str, Any]: + """Get a preset by name, or return DEFAULT_PRESET if not found.""" + preset = get_preset(name) + if preset is not None: + return preset + return DEFAULT_PRESET.copy() + + +def ensure_preset_available(name: str | None) -> dict[str, Any]: + """Ensure a preset is available, falling back to DEFAULT_PRESET.""" + if name is None: + return DEFAULT_PRESET.copy() + return get_preset_or_default(name) + + +class PresetValidationError(Exception): + """Raised when preset validation fails.""" + + +def validate_preset(preset: dict[str, Any]) -> list[str]: + """Validate a preset and return list of errors (empty if valid).""" + errors: list[str] = [] + + required_fields = ["source", "display", "effects"] + for field in required_fields: + if field not in preset: + errors.append(f"Missing required field: {field}") + + if "effects" in preset: + if not isinstance(preset["effects"], list): + errors.append("'effects' must be a list") + else: + for effect in preset["effects"]: + if not isinstance(effect, str): + errors.append( + f"Effect must be string, got {type(effect)}: {effect}" + ) + + if "viewport" in preset: + viewport = preset["viewport"] + if not isinstance(viewport, dict): + errors.append("'viewport' must be a dict") + else: + if "width" in viewport and not isinstance(viewport["width"], int): + errors.append("'viewport.width' must be an int") + if "height" in viewport and not isinstance(viewport["height"], int): + errors.append("'viewport.height' must be an int") + + return errors + + +def validate_signal_flow(stages: list[dict]) -> list[str]: + """Validate signal flow based on inlet/outlet types. + + This validates that the preset's stage configuration produces valid + data flow using the PureData-style type system. + + Args: + stages: List of stage configs with 'name', 'category', 'inlet_types', 'outlet_types' + + Returns: + List of errors (empty if valid) + """ + errors: list[str] = [] + + if not stages: + errors.append("Signal flow is empty") + return errors + + # Define expected types for each category + type_map = { + "source": {"inlet": "NONE", "outlet": "SOURCE_ITEMS"}, + "data": {"inlet": "ANY", "outlet": "SOURCE_ITEMS"}, + "transform": {"inlet": "SOURCE_ITEMS", "outlet": "TEXT_BUFFER"}, + "effect": {"inlet": "TEXT_BUFFER", "outlet": "TEXT_BUFFER"}, + "overlay": {"inlet": "TEXT_BUFFER", "outlet": "TEXT_BUFFER"}, + "camera": {"inlet": "TEXT_BUFFER", "outlet": "TEXT_BUFFER"}, + "display": {"inlet": "TEXT_BUFFER", "outlet": "NONE"}, + "render": {"inlet": "SOURCE_ITEMS", "outlet": "TEXT_BUFFER"}, + } + + # Check stage order and type compatibility + for i, stage in enumerate(stages): + category = stage.get("category", "unknown") + name = stage.get("name", f"stage_{i}") + + if category not in type_map: + continue # Skip unknown categories + + expected = type_map[category] + + # Check against previous stage + if i > 0: + prev = stages[i - 1] + prev_category = prev.get("category", "unknown") + if prev_category in type_map: + prev_outlet = type_map[prev_category]["outlet"] + inlet = expected["inlet"] + + # Validate type compatibility + if inlet != "ANY" and prev_outlet != "ANY" and inlet != prev_outlet: + errors.append( + f"Type mismatch at '{name}': " + f"expects {inlet} but previous stage outputs {prev_outlet}" + ) + + return errors + + +def validate_signal_path(stages: list[str]) -> list[str]: + """Validate signal path for circular dependencies and connectivity. + + Args: + stages: List of stage names in execution order + + Returns: + List of errors (empty if valid) + """ + errors: list[str] = [] + + if not stages: + errors.append("Signal path is empty") + return errors + + seen: set[str] = set() + for i, stage in enumerate(stages): + if stage in seen: + errors.append( + f"Circular dependency: '{stage}' appears multiple times at index {i}" + ) + seen.add(stage) + + return errors + + +def generate_preset_toml( + name: str, + source: str = "headlines", + display: str = "terminal", + effects: list[str] | None = None, + viewport_width: int = 80, + viewport_height: int = 24, + camera: str = "vertical", + camera_speed: float = 1.0, + firehose_enabled: bool = False, +) -> str: + """Generate a TOML preset skeleton with default values. + + Args: + name: Preset name + source: Data source name + display: Display backend + effects: List of effect names + viewport_width: Viewport width in columns + viewport_height: Viewport height in rows + camera: Camera mode + camera_speed: Camera scroll speed + firehose_enabled: Enable firehose mode + + Returns: + TOML string for the preset + """ + + if effects is None: + effects = ["fade"] + + output = [] + output.append(f"[presets.{name}]") + output.append(f'description = "Auto-generated preset: {name}"') + output.append(f'source = "{source}"') + output.append(f'display = "{display}"') + output.append(f'camera = "{camera}"') + output.append(f"effects = {effects}") + output.append(f"viewport_width = {viewport_width}") + output.append(f"viewport_height = {viewport_height}") + output.append(f"camera_speed = {camera_speed}") + output.append(f"firehose_enabled = {str(firehose_enabled).lower()}") + + return "\n".join(output) diff --git a/engine/pipeline/presets.py b/engine/pipeline/presets.py new file mode 100644 index 0000000..9d1b3ca --- /dev/null +++ b/engine/pipeline/presets.py @@ -0,0 +1,237 @@ +""" +Pipeline presets - Pre-configured pipeline configurations. + +Provides PipelinePreset as a unified preset system. +Presets can be loaded from TOML files (presets.toml) or defined in code. + +Loading order: +1. Built-in presets.toml in the package +2. User config ~/.config/mainline/presets.toml +3. Local ./presets.toml (overrides earlier) +""" + +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any + +from engine.display import BorderMode +from engine.pipeline.params import PipelineParams + +if TYPE_CHECKING: + from engine.pipeline.controller import PipelineConfig + + +def _load_toml_presets() -> dict[str, Any]: + """Load presets from TOML file.""" + try: + from engine.pipeline.preset_loader import load_presets + + return load_presets() + except Exception: + return {} + + +_YAML_PRESETS = _load_toml_presets() + + +@dataclass +class PipelinePreset: + """Pre-configured pipeline with stages and animation. + + A PipelinePreset packages: + - Initial params: Starting configuration + - Stages: List of stage configurations to create + + This is the new unified preset that works with the Pipeline class. + """ + + name: str + description: str = "" + source: str = "headlines" + display: str = "terminal" + camera: str = "scroll" + effects: list[str] = field(default_factory=list) + border: bool | BorderMode = ( + False # Border mode: False=off, True=simple, BorderMode.UI for panel + ) + # Extended fields for fine-tuning + camera_speed: float = 1.0 # Camera movement speed + viewport_width: int = 80 # Viewport width in columns + viewport_height: int = 24 # Viewport height in rows + source_items: list[dict[str, Any]] | None = None # For ListDataSource + enable_metrics: bool = True # Enable performance metrics collection + + def to_params(self) -> PipelineParams: + """Convert to PipelineParams (runtime configuration).""" + from engine.display import BorderMode + + params = PipelineParams() + params.source = self.source + params.display = self.display + params.border = ( + self.border + if isinstance(self.border, bool) + else BorderMode.UI + if self.border == BorderMode.UI + else False + ) + params.camera_mode = self.camera + params.effect_order = self.effects.copy() + params.camera_speed = self.camera_speed + # Note: viewport_width/height are read from PipelinePreset directly + # in pipeline_runner.py, not from PipelineParams + return params + + def to_config(self) -> "PipelineConfig": + """Convert to PipelineConfig (static pipeline construction config). + + PipelineConfig is used once at pipeline initialization and contains + the core settings that don't change during execution. + """ + from engine.pipeline.controller import PipelineConfig + + return PipelineConfig( + source=self.source, + display=self.display, + camera=self.camera, + effects=self.effects.copy(), + enable_metrics=self.enable_metrics, + ) + + @classmethod + def from_yaml(cls, name: str, data: dict[str, Any]) -> "PipelinePreset": + """Create a PipelinePreset from YAML data.""" + return cls( + name=name, + description=data.get("description", ""), + source=data.get("source", "headlines"), + display=data.get("display", "terminal"), + camera=data.get("camera", "vertical"), + effects=data.get("effects", []), + border=data.get("border", False), + camera_speed=data.get("camera_speed", 1.0), + viewport_width=data.get("viewport_width", 80), + viewport_height=data.get("viewport_height", 24), + source_items=data.get("source_items"), + enable_metrics=data.get("enable_metrics", True), + ) + + +# Built-in presets +DEMO_PRESET = PipelinePreset( + name="demo", + description="Demo mode with effect cycling and camera modes", + source="headlines", + display="pygame", + camera="scroll", + effects=["noise", "fade", "glitch", "firehose"], +) + +UI_PRESET = PipelinePreset( + name="ui", + description="Interactive UI mode with right-side control panel", + source="fixture", + display="pygame", + camera="scroll", + effects=["noise", "fade", "glitch"], + border=BorderMode.UI, +) + +POETRY_PRESET = PipelinePreset( + name="poetry", + description="Poetry feed with subtle effects", + source="poetry", + display="pygame", + camera="scroll", + effects=["fade"], +) + +PIPELINE_VIZ_PRESET = PipelinePreset( + name="pipeline", + description="Pipeline visualization mode", + source="pipeline", + display="terminal", + camera="trace", + effects=[], +) + +WEBSOCKET_PRESET = PipelinePreset( + name="websocket", + description="WebSocket display mode", + source="headlines", + display="websocket", + camera="scroll", + effects=["noise", "fade", "glitch"], +) + +FIREHOSE_PRESET = PipelinePreset( + name="firehose", + description="High-speed firehose mode", + source="headlines", + display="pygame", + camera="scroll", + effects=["noise", "fade", "glitch", "firehose"], +) + +FIXTURE_PRESET = PipelinePreset( + name="fixture", + description="Use cached headline fixtures", + source="fixture", + display="pygame", + camera="scroll", + effects=["noise", "fade"], + border=False, +) + + +# Build presets from YAML data +def _build_presets() -> dict[str, PipelinePreset]: + """Build preset dictionary from all sources.""" + result = {} + + # Add YAML presets + yaml_presets = _YAML_PRESETS.get("presets", {}) + for name, data in yaml_presets.items(): + result[name] = PipelinePreset.from_yaml(name, data) + + # Add built-in presets as fallback (if not in YAML) + builtins = { + "demo": DEMO_PRESET, + "poetry": POETRY_PRESET, + "pipeline": PIPELINE_VIZ_PRESET, + "websocket": WEBSOCKET_PRESET, + "firehose": FIREHOSE_PRESET, + "ui": UI_PRESET, + "fixture": FIXTURE_PRESET, + } + + for name, preset in builtins.items(): + if name not in result: + result[name] = preset + + return result + + +PRESETS: dict[str, PipelinePreset] = _build_presets() + + +def get_preset(name: str) -> PipelinePreset | None: + """Get a preset by name.""" + return PRESETS.get(name) + + +def list_presets() -> list[str]: + """List all available preset names.""" + return list(PRESETS.keys()) + + +def create_preset_from_params( + params: PipelineParams, name: str = "custom" +) -> PipelinePreset: + """Create a preset from PipelineParams.""" + return PipelinePreset( + name=name, + source=params.source, + display=params.display, + camera=params.camera_mode, + effects=params.effect_order.copy() if hasattr(params, "effect_order") else [], + ) diff --git a/engine/pipeline/registry.py b/engine/pipeline/registry.py new file mode 100644 index 0000000..6e9bcac --- /dev/null +++ b/engine/pipeline/registry.py @@ -0,0 +1,189 @@ +""" +Stage registry - Unified registration for all pipeline stages. + +Provides a single registry for sources, effects, displays, and cameras. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, TypeVar + +from engine.pipeline.core import Stage + +if TYPE_CHECKING: + from engine.pipeline.core import Stage + +T = TypeVar("T") + + +class StageRegistry: + """Unified registry for all pipeline stage types.""" + + _categories: dict[str, dict[str, type[Any]]] = {} + _discovered: bool = False + _instances: dict[str, Stage] = {} + + @classmethod + def register(cls, category: str, stage_class: type[Any]) -> None: + """Register a stage class in a category. + + Args: + category: Category name (source, effect, display, camera) + stage_class: Stage subclass to register + """ + if category not in cls._categories: + cls._categories[category] = {} + + key = getattr(stage_class, "__name__", stage_class.__class__.__name__) + cls._categories[category][key] = stage_class + + @classmethod + def get(cls, category: str, name: str) -> type[Any] | None: + """Get a stage class by category and name.""" + return cls._categories.get(category, {}).get(name) + + @classmethod + def list(cls, category: str) -> list[str]: + """List all stage names in a category.""" + return list(cls._categories.get(category, {}).keys()) + + @classmethod + def list_categories(cls) -> list[str]: + """List all registered categories.""" + return list(cls._categories.keys()) + + @classmethod + def create(cls, category: str, name: str, **kwargs) -> Stage | None: + """Create a stage instance by category and name.""" + stage_class = cls.get(category, name) + if stage_class: + return stage_class(**kwargs) + return None + + @classmethod + def create_instance(cls, stage: Stage | type[Stage], **kwargs) -> Stage: + """Create an instance from a stage class or return as-is.""" + if isinstance(stage, Stage): + return stage + if isinstance(stage, type) and issubclass(stage, Stage): + return stage(**kwargs) + raise TypeError(f"Expected Stage class or instance, got {type(stage)}") + + @classmethod + def register_instance(cls, name: str, stage: Stage) -> None: + """Register a stage instance by name.""" + cls._instances[name] = stage + + @classmethod + def get_instance(cls, name: str) -> Stage | None: + """Get a registered stage instance by name.""" + return cls._instances.get(name) + + +def discover_stages() -> None: + """Auto-discover and register all stage implementations.""" + if StageRegistry._discovered: + return + + # Import and register all stage implementations + try: + from engine.data_sources.sources import ( + HeadlinesDataSource, + PoetryDataSource, + ) + + StageRegistry.register("source", HeadlinesDataSource) + StageRegistry.register("source", PoetryDataSource) + + StageRegistry._categories["source"]["headlines"] = HeadlinesDataSource + StageRegistry._categories["source"]["poetry"] = PoetryDataSource + except ImportError: + pass + + # Register pipeline introspection source + try: + from engine.data_sources.pipeline_introspection import ( + PipelineIntrospectionSource, + ) + + StageRegistry.register("source", PipelineIntrospectionSource) + StageRegistry._categories["source"]["pipeline-inspect"] = ( + PipelineIntrospectionSource + ) + except ImportError: + pass + + try: + from engine.effects.types import EffectPlugin # noqa: F401 + except ImportError: + pass + + # Register buffer stages (framebuffer, etc.) + try: + from engine.pipeline.stages.framebuffer import FrameBufferStage + + StageRegistry.register("effect", FrameBufferStage) + except ImportError: + pass + + # Register display stages + _register_display_stages() + + StageRegistry._discovered = True + + +def _register_display_stages() -> None: + """Register display backends as stages.""" + try: + from engine.display import DisplayRegistry + except ImportError: + return + + DisplayRegistry.initialize() + + for backend_name in DisplayRegistry.list_backends(): + factory = _DisplayStageFactory(backend_name) + StageRegistry._categories.setdefault("display", {})[backend_name] = factory + + +class _DisplayStageFactory: + """Factory that creates DisplayStage instances for a specific backend.""" + + def __init__(self, backend_name: str): + self._backend_name = backend_name + + def __call__(self): + from engine.display import DisplayRegistry + from engine.pipeline.adapters import DisplayStage + + display = DisplayRegistry.create(self._backend_name) + if display is None: + raise RuntimeError( + f"Failed to create display backend: {self._backend_name}" + ) + return DisplayStage(display, name=self._backend_name) + + @property + def __name__(self) -> str: + return self._backend_name.capitalize() + "Stage" + + +# Convenience functions +def register_source(stage_class: type[Stage]) -> None: + """Register a source stage.""" + StageRegistry.register("source", stage_class) + + +def register_effect(stage_class: type[Stage]) -> None: + """Register an effect stage.""" + StageRegistry.register("effect", stage_class) + + +def register_display(stage_class: type[Stage]) -> None: + """Register a display stage.""" + StageRegistry.register("display", stage_class) + + +def register_camera(stage_class: type[Stage]) -> None: + """Register a camera stage.""" + StageRegistry.register("camera", stage_class) diff --git a/engine/pipeline/stages/framebuffer.py b/engine/pipeline/stages/framebuffer.py new file mode 100644 index 0000000..790be34 --- /dev/null +++ b/engine/pipeline/stages/framebuffer.py @@ -0,0 +1,174 @@ +""" +Frame buffer stage - stores previous frames for temporal effects. + +Provides (per-instance, using instance name): +- framebuffer.{name}.history: list of previous buffers (most recent first) +- framebuffer.{name}.intensity_history: list of corresponding intensity maps +- framebuffer.{name}.current_intensity: intensity map for current frame + +Capability: "framebuffer.history.{name}" +""" + +import threading +from dataclasses import dataclass +from typing import Any + +from engine.display import _strip_ansi +from engine.pipeline.core import DataType, PipelineContext, Stage + + +@dataclass +class FrameBufferConfig: + """Configuration for FrameBufferStage.""" + + history_depth: int = 2 # Number of previous frames to keep + name: str = "default" # Unique instance name for capability and context keys + + +class FrameBufferStage(Stage): + """Stores frame history and computes intensity maps. + + Supports multiple instances with unique capabilities and context keys. + """ + + name = "framebuffer" + category = "effect" # It's an effect that enriches context with frame history + + def __init__( + self, + config: FrameBufferConfig | None = None, + history_depth: int = 2, + name: str = "default", + ): + self.config = config or FrameBufferConfig( + history_depth=history_depth, name=name + ) + self._lock = threading.Lock() + + @property + def capabilities(self) -> set[str]: + return {f"framebuffer.history.{self.config.name}"} + + @property + def dependencies(self) -> set[str]: + # Depends on rendered output (since we want to capture final buffer) + return {"render.output"} + + @property + def inlet_types(self) -> set: + return {DataType.TEXT_BUFFER} + + @property + def outlet_types(self) -> set: + return {DataType.TEXT_BUFFER} # Pass through unchanged + + def init(self, ctx: PipelineContext) -> bool: + """Initialize framebuffer state in context.""" + prefix = f"framebuffer.{self.config.name}" + ctx.set(f"{prefix}.history", []) + ctx.set(f"{prefix}.intensity_history", []) + return True + + def process(self, data: Any, ctx: PipelineContext) -> Any: + """Store frame in history and compute intensity. + + Args: + data: Current text buffer (list[str]) + ctx: Pipeline context + + Returns: + Same buffer (pass-through) + """ + if not isinstance(data, list): + return data + + prefix = f"framebuffer.{self.config.name}" + + # Compute intensity map for current buffer (per-row, length = buffer rows) + intensity_map = self._compute_buffer_intensity(data, len(data)) + + # Store in context + ctx.set(f"{prefix}.current_intensity", intensity_map) + + with self._lock: + # Get existing histories + history = ctx.get(f"{prefix}.history", []) + intensity_hist = ctx.get(f"{prefix}.intensity_history", []) + + # Prepend current frame to history + history.insert(0, data.copy()) + intensity_hist.insert(0, intensity_map) + + # Trim to configured depth + max_depth = self.config.history_depth + ctx.set(f"{prefix}.history", history[:max_depth]) + ctx.set(f"{prefix}.intensity_history", intensity_hist[:max_depth]) + + return data + + def _compute_buffer_intensity( + self, buf: list[str], max_rows: int = 24 + ) -> list[float]: + """Compute average intensity per row in buffer. + + Uses ANSI color if available; falls back to character density. + + Args: + buf: Text buffer (list of strings) + max_rows: Maximum number of rows to process + + Returns: + List of intensity values (0.0-1.0) per row + """ + intensities = [] + # Limit to viewport height + lines = buf[:max_rows] + + for line in lines: + # Strip ANSI codes for length calc + + plain = _strip_ansi(line) + if not plain: + intensities.append(0.0) + continue + + # Simple heuristic: ratio of non-space characters + # More sophisticated version could parse ANSI RGB brightness + filled = sum(1 for c in plain if c not in (" ", "\t")) + total = len(plain) + intensity = filled / total if total > 0 else 0.0 + intensities.append(max(0.0, min(1.0, intensity))) + + # Pad to max_rows if needed + while len(intensities) < max_rows: + intensities.append(0.0) + + return intensities + + def get_frame( + self, index: int = 0, ctx: PipelineContext | None = None + ) -> list[str] | None: + """Get frame from history by index (0 = current, 1 = previous, etc).""" + if ctx is None: + return None + prefix = f"framebuffer.{self.config.name}" + history = ctx.get(f"{prefix}.history", []) + if 0 <= index < len(history): + return history[index] + return None + + def get_intensity( + self, index: int = 0, ctx: PipelineContext | None = None + ) -> list[float] | None: + """Get intensity map from history by index.""" + if ctx is None: + return None + prefix = f"framebuffer.{self.config.name}" + intensity_hist = ctx.get(f"{prefix}.intensity_history", []) + if 0 <= index < len(intensity_hist): + return intensity_hist[index] + return None + + def cleanup(self) -> None: + """Cleanup resources.""" + pass diff --git a/engine/pipeline/ui.py b/engine/pipeline/ui.py new file mode 100644 index 0000000..60d5aaa --- /dev/null +++ b/engine/pipeline/ui.py @@ -0,0 +1,674 @@ +""" +Pipeline UI panel - Interactive controls for pipeline configuration. + +Provides: +- Stage list with enable/disable toggles +- Parameter sliders for selected effect +- Keyboard/mouse interaction + +This module implements the right-side UI panel that appears in border="ui" mode. +""" + +from collections.abc import Callable +from dataclasses import dataclass, field +from typing import Any + + +@dataclass +class UIConfig: + """Configuration for the UI panel.""" + + panel_width: int = 24 # Characters wide + stage_list_height: int = 12 # Number of stages to show at once + param_height: int = 8 # Space for parameter controls + scroll_offset: int = 0 # Scroll position in stage list + start_with_preset_picker: bool = False # Show preset picker immediately + + +@dataclass +class StageControl: + """Represents a stage in the UI panel with its toggle state.""" + + name: str + stage_name: str # Actual pipeline stage name + category: str + enabled: bool = True + selected: bool = False + params: dict[str, Any] = field(default_factory=dict) # Current param values + param_schema: dict[str, dict] = field(default_factory=dict) # Param metadata + + def toggle(self) -> None: + """Toggle enabled state.""" + self.enabled = not self.enabled + + def get_param(self, name: str) -> Any: + """Get current parameter value.""" + return self.params.get(name) + + def set_param(self, name: str, value: Any) -> None: + """Set parameter value.""" + self.params[name] = value + + +class UIPanel: + """Interactive UI panel for pipeline configuration. + + Manages: + - Stage list with enable/disable checkboxes + - Parameter sliders for selected stage + - Keyboard/mouse event handling + - Scroll state for long stage lists + + The panel is rendered as a right border (panel_width characters wide) + alongside the main viewport. + """ + + def __init__(self, config: UIConfig | None = None): + self.config = config or UIConfig() + self.stages: dict[str, StageControl] = {} # stage_name -> StageControl + self.scroll_offset = 0 + self.selected_stage: str | None = None + self._focused_param: str | None = None # For slider adjustment + self._callbacks: dict[str, Callable] = {} # Event callbacks + self._presets: list[str] = [] # Available preset names + self._current_preset: str = "" # Current preset name + self._show_preset_picker: bool = ( + config.start_with_preset_picker if config else False + ) # Picker overlay visible + self._show_panel: bool = True # UI panel visibility + self._preset_scroll_offset: int = 0 # Scroll in preset list + + def save_state(self) -> dict[str, Any]: + """Save UI panel state for restoration after pipeline rebuild. + + Returns: + Dictionary containing UI panel state that can be restored + """ + # Save stage control states (enabled, params, etc.) + stage_states = {} + for name, ctrl in self.stages.items(): + stage_states[name] = { + "enabled": ctrl.enabled, + "selected": ctrl.selected, + "params": dict(ctrl.params), # Copy params dict + } + + return { + "stage_states": stage_states, + "scroll_offset": self.scroll_offset, + "selected_stage": self.selected_stage, + "_focused_param": self._focused_param, + "_show_panel": self._show_panel, + "_show_preset_picker": self._show_preset_picker, + "_preset_scroll_offset": self._preset_scroll_offset, + } + + def restore_state(self, state: dict[str, Any]) -> None: + """Restore UI panel state from saved state. + + Args: + state: Dictionary containing UI panel state from save_state() + """ + # Restore stage control states + stage_states = state.get("stage_states", {}) + for name, stage_state in stage_states.items(): + if name in self.stages: + ctrl = self.stages[name] + ctrl.enabled = stage_state.get("enabled", True) + ctrl.selected = stage_state.get("selected", False) + # Restore params + saved_params = stage_state.get("params", {}) + for param_name, param_value in saved_params.items(): + if param_name in ctrl.params: + ctrl.params[param_name] = param_value + + # Restore UI panel state + self.scroll_offset = state.get("scroll_offset", 0) + self.selected_stage = state.get("selected_stage") + self._focused_param = state.get("_focused_param") + self._show_panel = state.get("_show_panel", True) + self._show_preset_picker = state.get("_show_preset_picker", False) + self._preset_scroll_offset = state.get("_preset_scroll_offset", 0) + + def register_stage(self, stage: Any, enabled: bool = True) -> StageControl: + """Register a stage for UI control. + + Args: + stage: Stage instance (must have .name, .category attributes) + enabled: Initial enabled state + + Returns: + The created StageControl instance + """ + control = StageControl( + name=stage.name, + stage_name=stage.name, + category=stage.category, + enabled=enabled, + ) + self.stages[stage.name] = control + return control + + def unregister_stage(self, stage_name: str) -> None: + """Remove a stage from UI control.""" + if stage_name in self.stages: + del self.stages[stage_name] + + def get_enabled_stages(self) -> list[str]: + """Get list of stage names that are currently enabled.""" + return [name for name, ctrl in self.stages.items() if ctrl.enabled] + + def select_stage(self, stage_name: str | None = None) -> None: + """Select a stage (for parameter editing).""" + if stage_name in self.stages: + self.selected_stage = stage_name + self.stages[stage_name].selected = True + # Deselect others + for name, ctrl in self.stages.items(): + if name != stage_name: + ctrl.selected = False + # Auto-focus first parameter when stage selected + if self.stages[stage_name].params: + self._focused_param = next(iter(self.stages[stage_name].params.keys())) + else: + self._focused_param = None + + def toggle_stage(self, stage_name: str) -> bool: + """Toggle a stage's enabled state. + + Returns: + New enabled state + """ + if stage_name in self.stages: + ctrl = self.stages[stage_name] + ctrl.enabled = not ctrl.enabled + return ctrl.enabled + return False + + def adjust_selected_param(self, delta: float) -> None: + """Adjust the currently focused parameter of selected stage. + + Args: + delta: Amount to add (positive or negative) + """ + if self.selected_stage and self._focused_param: + ctrl = self.stages[self.selected_stage] + if self._focused_param in ctrl.params: + current = ctrl.params[self._focused_param] + # Determine step size from schema + schema = ctrl.param_schema.get(self._focused_param, {}) + step = schema.get("step", 0.1 if isinstance(current, float) else 1) + new_val = current + delta * step + # Clamp to min/max if specified + if "min" in schema: + new_val = max(schema["min"], new_val) + if "max" in schema: + new_val = min(schema["max"], new_val) + # Only emit if value actually changed + if new_val != current: + ctrl.params[self._focused_param] = new_val + self._emit_event( + "param_changed", + stage_name=self.selected_stage, + param_name=self._focused_param, + value=new_val, + ) + + def scroll_stages(self, delta: int) -> None: + """Scroll the stage list.""" + max_offset = max(0, len(self.stages) - self.config.stage_list_height) + self.scroll_offset = max(0, min(max_offset, self.scroll_offset + delta)) + + def render(self, width: int, height: int) -> list[str]: + """Render the UI panel. + + Args: + width: Total display width (panel uses last `panel_width` cols) + height: Total display height + + Returns: + List of strings, each of length `panel_width`, to overlay on right side + """ + panel_width = min( + self.config.panel_width, width - 4 + ) # Reserve at least 2 for main + lines = [] + + # If panel is hidden, render empty space + if not self._show_panel: + return [" " * panel_width for _ in range(height)] + + # If preset picker is active, render that overlay instead of normal panel + if self._show_preset_picker: + picker_lines = self._render_preset_picker(panel_width) + # Pad to full panel height if needed + while len(picker_lines) < height: + picker_lines.append(" " * panel_width) + return [ + line.ljust(panel_width)[:panel_width] for line in picker_lines[:height] + ] + + # Header + title_line = "┌" + "─" * (panel_width - 2) + "┐" + lines.append(title_line) + + # Stage list section (occupies most of the panel) + list_height = self.config.stage_list_height + stage_names = list(self.stages.keys()) + for i in range(list_height): + idx = i + self.scroll_offset + if idx < len(stage_names): + stage_name = stage_names[idx] + ctrl = self.stages[stage_name] + status = "✓" if ctrl.enabled else "✗" + sel = ">" if ctrl.selected else " " + # Truncate to fit panel (leave room for ">✓ " prefix and padding) + max_name_len = panel_width - 5 + display_name = ctrl.name[:max_name_len] + line = f"│{sel}{status} {display_name:<{max_name_len}}" + lines.append(line[:panel_width]) + else: + lines.append("│" + " " * (panel_width - 2) + "│") + + # Separator + lines.append("├" + "─" * (panel_width - 2) + "┤") + + # Parameter section (if stage selected) + if self.selected_stage and self.selected_stage in self.stages: + ctrl = self.stages[self.selected_stage] + if ctrl.params: + # Render each parameter as "name: [=====] value" with focus indicator + for param_name, param_value in ctrl.params.items(): + schema = ctrl.param_schema.get(param_name, {}) + is_focused = param_name == self._focused_param + # Format value based on type + if isinstance(param_value, float): + val_str = f"{param_value:.2f}" + elif isinstance(param_value, int): + val_str = f"{param_value}" + elif isinstance(param_value, bool): + val_str = str(param_value) + else: + val_str = str(param_value) + + # Build parameter line + if ( + isinstance(param_value, (int, float)) + and "min" in schema + and "max" in schema + ): + # Render as slider + min_val = schema["min"] + max_val = schema["max"] + # Normalize to 0-1 for bar length + if max_val != min_val: + ratio = (param_value - min_val) / (max_val - min_val) + else: + ratio = 0 + bar_width = ( + panel_width - len(param_name) - len(val_str) - 10 + ) # approx space for "[] : =" + if bar_width < 1: + bar_width = 1 + filled = int(round(ratio * bar_width)) + bar = "[" + "=" * filled + " " * (bar_width - filled) + "]" + param_line = f"│ {param_name}: {bar} {val_str}" + else: + # Simple name=value + param_line = f"│ {param_name}={val_str}" + + # Highlight focused parameter + if is_focused: + # Invert colors conceptually - for now use > prefix + param_line = "│> " + param_line[2:] + + # Truncate to fit panel width + if len(param_line) > panel_width - 1: + param_line = param_line[: panel_width - 1] + lines.append(param_line + "│") + else: + lines.append("│ (no params)".ljust(panel_width - 1) + "│") + else: + lines.append("│ (select a stage)".ljust(panel_width - 1) + "│") + + # Info line before footer + info_parts = [] + if self._current_preset: + info_parts.append(f"Preset: {self._current_preset}") + if self._presets: + info_parts.append("[P] presets") + info_str = " | ".join(info_parts) if info_parts else "" + if info_str: + padded = info_str.ljust(panel_width - 2) + lines.append("│" + padded + "│") + + # Footer with instructions + footer_line = self._render_footer(panel_width) + lines.append(footer_line) + + # Ensure all lines are exactly panel_width + return [line.ljust(panel_width)[:panel_width] for line in lines] + + def _render_footer(self, width: int) -> str: + """Render footer with key hints.""" + if width >= 40: + # Show preset name and key hints + preset_info = ( + f"Preset: {self._current_preset}" if self._current_preset else "" + ) + hints = " [S]elect [Space]UI [Tab]Params [Arrows/HJKL]Adjust " + if self._presets: + hints += "[P]Preset " + combined = f"{preset_info}{hints}" + if len(combined) > width - 4: + combined = combined[: width - 4] + footer = "└" + "─" * (width - 2) + "┘" + return footer # Just the line, we'll add info above in render + else: + return "└" + "─" * (width - 2) + "┘" + + def execute_command(self, command: dict) -> bool: + """Execute a command from external control (e.g., WebSocket). + + Supported UI commands: + - {"action": "toggle_stage", "stage": "stage_name"} + - {"action": "select_stage", "stage": "stage_name"} + - {"action": "adjust_param", "stage": "stage_name", "param": "param_name", "delta": 0.1} + - {"action": "change_preset", "preset": "preset_name"} + - {"action": "cycle_preset", "direction": 1} + + Pipeline Mutation commands are handled by the WebSocket/runner handler: + - {"action": "add_stage", "stage": "stage_name", "type": "source|display|camera|effect"} + - {"action": "remove_stage", "stage": "stage_name"} + - {"action": "replace_stage", "stage": "old_stage_name", "with": "new_stage_type"} + - {"action": "swap_stages", "stage1": "name1", "stage2": "name2"} + - {"action": "move_stage", "stage": "stage_name", "after": "other_stage"|"before": "other_stage"} + - {"action": "enable_stage", "stage": "stage_name"} + - {"action": "disable_stage", "stage": "stage_name"} + - {"action": "cleanup_stage", "stage": "stage_name"} + - {"action": "can_hot_swap", "stage": "stage_name"} + + Returns: + True if command was handled, False if not + """ + action = command.get("action") + + if action == "toggle_stage": + stage_name = command.get("stage") + if stage_name in self.stages: + self.toggle_stage(stage_name) + self._emit_event( + "stage_toggled", + stage_name=stage_name, + enabled=self.stages[stage_name].enabled, + ) + return True + + elif action == "select_stage": + stage_name = command.get("stage") + if stage_name in self.stages: + self.select_stage(stage_name) + self._emit_event("stage_selected", stage_name=stage_name) + return True + + elif action == "adjust_param": + stage_name = command.get("stage") + param_name = command.get("param") + delta = command.get("delta", 0.1) + if stage_name == self.selected_stage and param_name: + self._focused_param = param_name + self.adjust_selected_param(delta) + self._emit_event( + "param_changed", + stage_name=stage_name, + param_name=param_name, + value=self.stages[stage_name].params.get(param_name), + ) + return True + + elif action == "change_preset": + preset_name = command.get("preset") + if preset_name in self._presets: + self._current_preset = preset_name + self._emit_event("preset_changed", preset_name=preset_name) + return True + + elif action == "cycle_preset": + direction = command.get("direction", 1) + self.cycle_preset(direction) + return True + + return False + + def process_key_event(self, key: str | int, modifiers: int = 0) -> bool: + """Process a keyboard event. + + Args: + key: Key symbol (e.g., ' ', 's', pygame.K_UP, etc.) + modifiers: Modifier bits (Shift, Ctrl, Alt) + + Returns: + True if event was handled, False if not + """ + # Normalize to string for simplicity + key_str = self._normalize_key(key, modifiers) + + # Space: toggle UI panel visibility (only when preset picker not active) + if key_str == " " and not self._show_preset_picker: + self._show_panel = not getattr(self, "_show_panel", True) + return True + + # Space: toggle UI panel visibility (only when preset picker not active) + if key_str == " " and not self._show_preset_picker: + self._show_panel = not getattr(self, "_show_panel", True) + return True + + # S: select stage (cycle) + if key_str == "s" and modifiers == 0: + stages = list(self.stages.keys()) + if not stages: + return False + if self.selected_stage: + current_idx = stages.index(self.selected_stage) + next_idx = (current_idx + 1) % len(stages) + else: + next_idx = 0 + self.select_stage(stages[next_idx]) + return True + + # P: toggle preset picker (only when panel is visible) + if key_str == "p" and self._show_panel: + self._show_preset_picker = not self._show_preset_picker + if self._show_preset_picker: + self._preset_scroll_offset = 0 + return True + + # HJKL or Arrow Keys: scroll stage list, preset list, or adjust param + # vi-style: K=up, J=down (J is actually next line in vi, but we use for down) + # We'll use J for down, K for up, H for left, L for right + elif key_str in ("up", "down", "kp8", "kp2", "j", "k"): + # If preset picker is open, scroll preset list + if self._show_preset_picker: + delta = -1 if key_str in ("up", "kp8", "k") else 1 + self._preset_scroll_offset = max(0, self._preset_scroll_offset + delta) + # Ensure scroll doesn't go past end + max_offset = max(0, len(self._presets) - 1) + self._preset_scroll_offset = min(max_offset, self._preset_scroll_offset) + return True + # If param is focused, adjust param value + elif self.selected_stage and self._focused_param: + delta = -1.0 if key_str in ("up", "kp8", "k") else 1.0 + self.adjust_selected_param(delta) + return True + # Otherwise scroll stages + else: + delta = -1 if key_str in ("up", "kp8", "k") else 1 + self.scroll_stages(delta) + return True + + # Left/Right or H/L: adjust param (if param selected) + elif key_str in ("left", "right", "kp4", "kp6", "h", "l"): + if self.selected_stage: + delta = -0.1 if key_str in ("left", "kp4", "h") else 0.1 + self.adjust_selected_param(delta) + return True + + # Tab: cycle through parameters + if key_str == "tab" and self.selected_stage: + ctrl = self.stages[self.selected_stage] + param_names = list(ctrl.params.keys()) + if param_names: + if self._focused_param in param_names: + current_idx = param_names.index(self._focused_param) + next_idx = (current_idx + 1) % len(param_names) + else: + next_idx = 0 + self._focused_param = param_names[next_idx] + return True + + # Preset picker navigation + if self._show_preset_picker: + # Enter: select currently highlighted preset + if key_str == "return": + if self._presets: + idx = self._preset_scroll_offset + if idx < len(self._presets): + self._current_preset = self._presets[idx] + self._emit_event( + "preset_changed", preset_name=self._current_preset + ) + self._show_preset_picker = False + return True + # Escape: close picker without changing + elif key_str == "escape": + self._show_preset_picker = False + return True + + # Escape: deselect stage (only when picker not active) + elif key_str == "escape" and self.selected_stage: + self.selected_stage = None + for ctrl in self.stages.values(): + ctrl.selected = False + self._focused_param = None + return True + + return False + + def _normalize_key(self, key: str | int, modifiers: int) -> str: + """Normalize key to a string identifier.""" + # Handle pygame keysyms if imported + try: + import pygame + + if isinstance(key, int): + # Map pygame constants to strings + key_map = { + pygame.K_UP: "up", + pygame.K_DOWN: "down", + pygame.K_LEFT: "left", + pygame.K_RIGHT: "right", + pygame.K_SPACE: " ", + pygame.K_ESCAPE: "escape", + pygame.K_s: "s", + pygame.K_w: "w", + # HJKL navigation (vi-style) + pygame.K_h: "h", + pygame.K_j: "j", + pygame.K_k: "k", + pygame.K_l: "l", + } + # Check for keypad keys with KP prefix + if hasattr(pygame, "K_KP8") and key == pygame.K_KP8: + return "kp8" + if hasattr(pygame, "K_KP2") and key == pygame.K_KP2: + return "kp2" + if hasattr(pygame, "K_KP4") and key == pygame.K_KP4: + return "kp4" + if hasattr(pygame, "K_KP6") and key == pygame.K_KP6: + return "kp6" + return key_map.get(key, f"pygame_{key}") + except ImportError: + pass + + # Already a string? + if isinstance(key, str): + return key.lower() + + return str(key) + + def set_event_callback(self, event_type: str, callback: Callable) -> None: + """Register a callback for UI events. + + Args: + event_type: Event type ("stage_toggled", "param_changed", "stage_selected", "preset_changed") + callback: Function to call when event occurs + """ + self._callbacks[event_type] = callback + + def _emit_event(self, event_type: str, **data) -> None: + """Emit an event to registered callbacks.""" + callback = self._callbacks.get(event_type) + if callback: + try: + callback(**data) + except Exception: + pass + + def set_presets(self, presets: list[str], current: str) -> None: + """Set available presets and current selection. + + Args: + presets: List of preset names + current: Currently active preset name + """ + self._presets = presets + self._current_preset = current + + def cycle_preset(self, direction: int = 1) -> str: + """Cycle to next/previous preset. + + Args: + direction: 1 for next, -1 for previous + + Returns: + New preset name + """ + if not self._presets: + return self._current_preset + try: + current_idx = self._presets.index(self._current_preset) + except ValueError: + current_idx = 0 + next_idx = (current_idx + direction) % len(self._presets) + self._current_preset = self._presets[next_idx] + self._emit_event("preset_changed", preset_name=self._current_preset) + return self._current_preset + + def _render_preset_picker(self, panel_width: int) -> list[str]: + """Render a full-screen preset picker overlay.""" + lines = [] + picker_height = min(len(self._presets) + 2, self.config.stage_list_height) + # Create a centered box + title = " Select Preset " + box_width = min(40, panel_width - 2) + lines.append("┌" + "─" * (box_width - 2) + "┐") + lines.append("│" + title.center(box_width - 2) + "│") + lines.append("├" + "─" * (box_width - 2) + "┤") + # List presets with selection + visible_start = self._preset_scroll_offset + visible_end = visible_start + picker_height - 2 + for i in range(visible_start, min(visible_end, len(self._presets))): + preset_name = self._presets[i] + is_current = preset_name == self._current_preset + prefix = "▶ " if is_current else " " + line = f"│ {prefix}{preset_name}" + if len(line) < box_width - 1: + line = line.ljust(box_width - 1) + lines.append(line[: box_width - 1] + "│") + # Footer with help + help_text = "[P] close [↑↓] navigate [Enter] select" + footer = "├" + "─" * (box_width - 2) + "┤" + lines.append(footer) + lines.append("│" + help_text.center(box_width - 2) + "│") + lines.append("└" + "─" * (box_width - 2) + "┘") + return lines diff --git a/engine/pipeline/validation.py b/engine/pipeline/validation.py new file mode 100644 index 0000000..8cc0781 --- /dev/null +++ b/engine/pipeline/validation.py @@ -0,0 +1,221 @@ +""" +Pipeline validation and MVP (Minimum Viable Pipeline) injection. + +Provides validation functions to ensure pipelines meet minimum requirements +and can auto-inject sensible defaults when fields are missing or invalid. +""" + +from dataclasses import dataclass +from typing import Any + +from engine.display import BorderMode, DisplayRegistry +from engine.effects import get_registry +from engine.pipeline.params import PipelineParams + +# Known valid values +VALID_SOURCES = ["headlines", "poetry", "fixture", "empty", "pipeline-inspect"] +VALID_CAMERAS = [ + "feed", + "scroll", + "vertical", + "horizontal", + "omni", + "floating", + "bounce", + "radial", + "static", + "none", + "", +] +VALID_DISPLAYS = None # Will be populated at runtime from DisplayRegistry + + +@dataclass +class ValidationResult: + """Result of validation with changes and warnings.""" + + valid: bool + warnings: list[str] + changes: list[str] + config: Any # PipelineConfig (forward ref) + params: PipelineParams + + +# MVP defaults +MVP_DEFAULTS = { + "source": "fixture", + "display": "terminal", + "camera": "static", # Static camera provides camera_y=0 for viewport filtering + "effects": [], + "border": False, +} + + +def validate_pipeline_config( + config: Any, params: PipelineParams, allow_unsafe: bool = False +) -> ValidationResult: + """Validate pipeline configuration against MVP requirements. + + Args: + config: PipelineConfig object (has source, display, camera, effects fields) + params: PipelineParams object (has border field) + allow_unsafe: If True, don't inject defaults or enforce MVP + + Returns: + ValidationResult with validity, warnings, changes, and validated config/params + """ + warnings = [] + changes = [] + + if allow_unsafe: + # Still do basic validation but don't inject defaults + # Always return valid=True when allow_unsafe is set + warnings.extend(_validate_source(config.source)) + warnings.extend(_validate_display(config.display)) + warnings.extend(_validate_camera(config.camera)) + warnings.extend(_validate_effects(config.effects)) + warnings.extend(_validate_border(params.border)) + return ValidationResult( + valid=True, # Always valid with allow_unsafe + warnings=warnings, + changes=[], + config=config, + params=params, + ) + + # MVP injection mode + # Source + source_issues = _validate_source(config.source) + if source_issues: + warnings.extend(source_issues) + config.source = MVP_DEFAULTS["source"] + changes.append(f"source → {MVP_DEFAULTS['source']}") + + # Display + display_issues = _validate_display(config.display) + if display_issues: + warnings.extend(display_issues) + config.display = MVP_DEFAULTS["display"] + changes.append(f"display → {MVP_DEFAULTS['display']}") + + # Camera + camera_issues = _validate_camera(config.camera) + if camera_issues: + warnings.extend(camera_issues) + config.camera = MVP_DEFAULTS["camera"] + changes.append("camera → static (no camera stage)") + + # Effects + effect_issues = _validate_effects(config.effects) + if effect_issues: + warnings.extend(effect_issues) + # Only change if all effects are invalid + if len(config.effects) == 0 or all( + e not in _get_valid_effects() for e in config.effects + ): + config.effects = MVP_DEFAULTS["effects"] + changes.append("effects → [] (none)") + else: + # Remove invalid effects, keep valid ones + valid_effects = [e for e in config.effects if e in _get_valid_effects()] + if valid_effects != config.effects: + config.effects = valid_effects + changes.append(f"effects → {valid_effects}") + + # Border (in params) + border_issues = _validate_border(params.border) + if border_issues: + warnings.extend(border_issues) + params.border = MVP_DEFAULTS["border"] + changes.append(f"border → {MVP_DEFAULTS['border']}") + + valid = len(warnings) == 0 + if changes: + # If we made changes, pipeline should be valid now + valid = True + + return ValidationResult( + valid=valid, + warnings=warnings, + changes=changes, + config=config, + params=params, + ) + + +def _validate_source(source: str) -> list[str]: + """Validate source field.""" + if not source: + return ["source is empty"] + if source not in VALID_SOURCES: + return [f"unknown source '{source}', valid sources: {VALID_SOURCES}"] + return [] + + +def _validate_display(display: str) -> list[str]: + """Validate display field.""" + if not display: + return ["display is empty"] + # Check if display is available (lazy load registry) + try: + available = DisplayRegistry.list_backends() + if display not in available: + return [f"display '{display}' not available, available: {available}"] + except Exception as e: + return [f"error checking display availability: {e}"] + return [] + + +def _validate_camera(camera: str | None) -> list[str]: + """Validate camera field.""" + if camera is None: + return ["camera is None"] + # Empty string is valid (static, no camera stage) + if camera == "": + return [] + if camera not in VALID_CAMERAS: + return [f"unknown camera '{camera}', valid cameras: {VALID_CAMERAS}"] + return [] + + +def _get_valid_effects() -> set[str]: + """Get set of valid effect names.""" + registry = get_registry() + return set(registry.list_all().keys()) + + +def _validate_effects(effects: list[str]) -> list[str]: + """Validate effects list.""" + if effects is None: + return ["effects is None"] + valid_effects = _get_valid_effects() + issues = [] + for effect in effects: + if effect not in valid_effects: + issues.append( + f"unknown effect '{effect}', valid effects: {sorted(valid_effects)}" + ) + return issues + + +def _validate_border(border: bool | BorderMode) -> list[str]: + """Validate border field.""" + if isinstance(border, bool): + return [] + if isinstance(border, BorderMode): + return [] + return [f"invalid border value, must be bool or BorderMode, got {type(border)}"] + + +def get_mvp_summary(config: Any, params: PipelineParams) -> str: + """Get a human-readable summary of the MVP pipeline configuration.""" + camera_text = "none" if not config.camera else config.camera + effects_text = "none" if not config.effects else ", ".join(config.effects) + return ( + f"MVP Pipeline Configuration:\n" + f" Source: {config.source}\n" + f" Display: {config.display}\n" + f" Camera: {camera_text} (static if empty)\n" + f" Effects: {effects_text}\n" + f" Border: {params.border}" + ) diff --git a/engine/render/__init__.py b/engine/render/__init__.py new file mode 100644 index 0000000..8db2d73 --- /dev/null +++ b/engine/render/__init__.py @@ -0,0 +1,37 @@ +"""Modern block rendering system - OTF font to terminal half-block conversion. + +This module provides the core rendering capabilities for big block letters +and styled text output using PIL fonts and ANSI terminal rendering. + +Exports: + - make_block: Render a headline into a content block with color + - big_wrap: Word-wrap text and render with OTF font + - render_line: Render a line of text as terminal rows using half-blocks + - font_for_lang: Get appropriate font for a language + - clear_font_cache: Reset cached font objects + - lr_gradient: Color block characters with left-to-right gradient + - lr_gradient_opposite: Complementary gradient coloring +""" + +from engine.render.blocks import ( + big_wrap, + clear_font_cache, + font_for_lang, + list_font_faces, + load_font_face, + make_block, + render_line, +) +from engine.render.gradient import lr_gradient, lr_gradient_opposite + +__all__ = [ + "big_wrap", + "clear_font_cache", + "font_for_lang", + "list_font_faces", + "load_font_face", + "lr_gradient", + "lr_gradient_opposite", + "make_block", + "render_line", +] diff --git a/engine/render.py b/engine/render/blocks.py similarity index 61% rename from engine/render.py rename to engine/render/blocks.py index c0ecb7d..3492317 100644 --- a/engine/render.py +++ b/engine/render/blocks.py @@ -1,7 +1,6 @@ -""" -OTF → terminal half-block rendering pipeline. -Font loading, text rasterization, word-wrap, gradient coloring, headline block assembly. -Depends on: config, terminal, sources, translate. +"""Block rendering core - Font loading, text rasterization, word-wrap, and headline assembly. + +Provides PIL font-based rendering to terminal half-block characters. """ import random @@ -12,74 +11,50 @@ from PIL import Image, ImageDraw, ImageFont from engine import config from engine.sources import NO_UPPER, SCRIPT_FONTS, SOURCE_LANGS -from engine.terminal import RST from engine.translate import detect_location_language, translate_headline -# ─── GRADIENT ───────────────────────────────────────────── -def _color_codes_to_ansi(color_codes): - """Convert a list of 256-color codes to ANSI escape code strings. +def estimate_block_height(title: str, width: int, fnt=None) -> int: + """Estimate rendered block height without full PIL rendering. - Pattern: first 2 are bold, middle 8 are normal, last 2 are dim. + Uses font bbox measurement to count wrapped lines, then computes: + height = num_lines * RENDER_H + (num_lines - 1) + 2 Args: - color_codes: List of 12 integers (256-color palette codes) + title: Headline text to measure + width: Terminal width in characters + fnt: Optional PIL font (uses default if None) Returns: - List of ANSI escape code strings + Estimated height in terminal rows """ - if not color_codes or len(color_codes) != 12: - # Fallback to default green if invalid - return _default_green_gradient() - - result = [] - for i, code in enumerate(color_codes): - if i < 2: - # Bold for first 2 (bright leading edge) - result.append(f"\033[1;38;5;{code}m") - elif i < 10: - # Normal for middle 8 - result.append(f"\033[38;5;{code}m") + if fnt is None: + fnt = font() + text = re.sub(r"\s+", " ", title.upper()) + words = text.split() + lines = 0 + cur = "" + for word in words: + test = f"{cur} {word}".strip() if cur else word + bbox = fnt.getbbox(test) + if bbox: + img_h = bbox[3] - bbox[1] + 8 + pix_h = config.RENDER_H * 2 + scale = pix_h / max(img_h, 1) + term_w = int((bbox[2] - bbox[0] + 8) * scale) else: - # Dim for last 2 (dark trailing edge) - result.append(f"\033[2;38;5;{code}m") - return result - - -def _default_green_gradient(): - """Return the default 12-color green gradient for fallback when no theme is active.""" - return [ - "\033[1;38;5;231m", # white - "\033[1;38;5;195m", # pale cyan-white - "\033[38;5;123m", # bright cyan - "\033[38;5;118m", # bright lime - "\033[38;5;82m", # lime - "\033[38;5;46m", # bright green - "\033[38;5;40m", # green - "\033[38;5;34m", # medium green - "\033[38;5;28m", # dark green - "\033[38;5;22m", # deep green - "\033[2;38;5;22m", # dim deep green - "\033[2;38;5;235m", # near black - ] - - -def _default_magenta_gradient(): - """Return the default 12-color magenta gradient for fallback when no theme is active.""" - return [ - "\033[1;38;5;231m", # white - "\033[1;38;5;225m", # pale pink-white - "\033[38;5;219m", # bright pink - "\033[38;5;213m", # hot pink - "\033[38;5;207m", # magenta - "\033[38;5;201m", # bright magenta - "\033[38;5;165m", # orchid-red - "\033[38;5;161m", # ruby-magenta - "\033[38;5;125m", # dark magenta - "\033[38;5;89m", # deep maroon-magenta - "\033[2;38;5;89m", # dim deep maroon-magenta - "\033[2;38;5;235m", # near black - ] + term_w = 0 + max_term_w = width - 4 - 4 + if term_w > max_term_w and cur: + lines += 1 + cur = word + else: + cur = test + if cur: + lines += 1 + if lines == 0: + lines = 1 + return lines * config.RENDER_H + max(0, lines - 1) + 2 # ─── FONT LOADING ───────────────────────────────────────── @@ -223,65 +198,22 @@ def big_wrap(text, max_w, fnt=None): return out -def lr_gradient(rows, offset=0.0, cols=None): - """Color each non-space block character with a shifting left-to-right gradient.""" - if cols is None: - from engine import config - - if config.ACTIVE_THEME: - cols = _color_codes_to_ansi(config.ACTIVE_THEME.main_gradient) - else: - cols = _default_green_gradient() - n = len(cols) - max_x = max((len(r.rstrip()) for r in rows if r.strip()), default=1) - out = [] - for row in rows: - if not row.strip(): - out.append(row) - continue - buf = [] - for x, ch in enumerate(row): - if ch == " ": - buf.append(" ") - else: - shifted = (x / max(max_x - 1, 1) + offset) % 1.0 - idx = min(round(shifted * (n - 1)), n - 1) - buf.append(f"{cols[idx]}{ch}{RST}") - out.append("".join(buf)) - return out - - -def lr_gradient_opposite(rows, offset=0.0): - """Complementary (opposite wheel) gradient used for queue message panels.""" - return lr_gradient(rows, offset, _default_magenta_gradient()) - - -def msg_gradient(rows, offset): - """Apply message (ntfy) gradient using theme complementary colors. - - Returns colored rows using ACTIVE_THEME.message_gradient if available, - falling back to default magenta if no theme is set. - - Args: - rows: List of text strings to colorize - offset: Gradient offset (0.0-1.0) for animation - - Returns: - List of rows with ANSI color codes applied - """ - from engine import config - - cols = ( - _color_codes_to_ansi(config.ACTIVE_THEME.message_gradient) - if config.ACTIVE_THEME - else _default_magenta_gradient() - ) - return lr_gradient(rows, offset, cols) - - # ─── HEADLINE BLOCK ASSEMBLY ───────────────────────────── def make_block(title, src, ts, w): - """Render a headline into a content block with color.""" + """Render a headline into a content block with color. + + Args: + title: Headline text to render + src: Source identifier (for metadata) + ts: Timestamp string (for metadata) + w: Width constraint in terminal characters + + Returns: + tuple: (content_lines, color_code, meta_row_index) + - content_lines: List of rendered text lines + - color_code: ANSI color code for display + - meta_row_index: Row index of metadata line + """ target_lang = ( (SOURCE_LANGS.get(src) or detect_location_language(title)) if config.MODE == "news" diff --git a/engine/render/gradient.py b/engine/render/gradient.py new file mode 100644 index 0000000..14a6c5a --- /dev/null +++ b/engine/render/gradient.py @@ -0,0 +1,82 @@ +"""Gradient coloring for rendered block characters. + +Provides left-to-right and complementary gradient effects for terminal display. +""" + +from engine.terminal import RST + +# Left → right: white-hot leading edge fades to near-black +GRAD_COLS = [ + "\033[1;38;5;231m", # white + "\033[1;38;5;195m", # pale cyan-white + "\033[38;5;123m", # bright cyan + "\033[38;5;118m", # bright lime + "\033[38;5;82m", # lime + "\033[38;5;46m", # bright green + "\033[38;5;40m", # green + "\033[38;5;34m", # medium green + "\033[38;5;28m", # dark green + "\033[38;5;22m", # deep green + "\033[2;38;5;22m", # dim deep green + "\033[2;38;5;235m", # near black +] + +# Complementary sweep for queue messages (opposite hue family from ticker greens) +MSG_GRAD_COLS = [ + "\033[1;38;5;231m", # white + "\033[1;38;5;225m", # pale pink-white + "\033[38;5;219m", # bright pink + "\033[38;5;213m", # hot pink + "\033[38;5;207m", # magenta + "\033[38;5;201m", # bright magenta + "\033[38;5;165m", # orchid-red + "\033[38;5;161m", # ruby-magenta + "\033[38;5;125m", # dark magenta + "\033[38;5;89m", # deep maroon-magenta + "\033[2;38;5;89m", # dim deep maroon-magenta + "\033[2;38;5;235m", # near black +] + + +def lr_gradient(rows, offset=0.0, grad_cols=None): + """Color each non-space block character with a shifting left-to-right gradient. + + Args: + rows: List of text lines with block characters + offset: Gradient offset (0.0-1.0) for animation + grad_cols: List of ANSI color codes (default: GRAD_COLS) + + Returns: + List of lines with gradient coloring applied + """ + cols = grad_cols or GRAD_COLS + n = len(cols) + max_x = max((len(r.rstrip()) for r in rows if r.strip()), default=1) + out = [] + for row in rows: + if not row.strip(): + out.append(row) + continue + buf = [] + for x, ch in enumerate(row): + if ch == " ": + buf.append(" ") + else: + shifted = (x / max(max_x - 1, 1) + offset) % 1.0 + idx = min(round(shifted * (n - 1)), n - 1) + buf.append(f"{cols[idx]}{ch}{RST}") + out.append("".join(buf)) + return out + + +def lr_gradient_opposite(rows, offset=0.0): + """Complementary (opposite wheel) gradient used for queue message panels. + + Args: + rows: List of text lines with block characters + offset: Gradient offset (0.0-1.0) for animation + + Returns: + List of lines with complementary gradient coloring applied + """ + return lr_gradient(rows, offset, MSG_GRAD_COLS) diff --git a/engine/scroll.py b/engine/scroll.py deleted file mode 100644 index 1bb90a2..0000000 --- a/engine/scroll.py +++ /dev/null @@ -1,161 +0,0 @@ -""" -Render engine — ticker content, scroll motion, message panel, and firehose overlay. -Orchestrates viewport, frame timing, and layers. -""" - -import random -import time - -from engine import config -from engine.display import ( - Display, - TerminalDisplay, -) -from engine.display import ( - get_monitor as _get_display_monitor, -) -from engine.frame import calculate_scroll_step -from engine.layers import ( - apply_glitch, - process_effects, - render_figment_overlay, - render_firehose, - render_message_overlay, - render_ticker_zone, -) -from engine.viewport import th, tw - -USE_EFFECT_CHAIN = True - - -def stream(items, ntfy_poller, mic_monitor, display: Display | None = None): - """Main render loop with four layers: message, ticker, scroll motion, firehose.""" - if display is None: - display = TerminalDisplay() - random.shuffle(items) - pool = list(items) - seen = set() - queued = 0 - - time.sleep(0.5) - w, h = tw(), th() - display.init(w, h) - display.clear() - fh = config.FIREHOSE_H if config.FIREHOSE else 0 - ticker_view_h = h - fh - GAP = 3 - scroll_step_interval = calculate_scroll_step(config.SCROLL_DUR, ticker_view_h) - - active = [] - scroll_cam = 0 - ticker_next_y = ticker_view_h - noise_cache = {} - scroll_motion_accum = 0.0 - msg_cache = (None, None) - frame_number = 0 - - # Figment overlay (optional — requires cairosvg) - figment = None - if config.FIGMENT: - try: - from effects_plugins.figment import FigmentEffect - - figment = FigmentEffect() - figment.config.enabled = True - figment.config.params["interval_secs"] = config.FIGMENT_INTERVAL - except (ImportError, OSError): - pass - - while True: - if queued >= config.HEADLINE_LIMIT and not active: - break - - t0 = time.monotonic() - w, h = tw(), th() - fh = config.FIREHOSE_H if config.FIREHOSE else 0 - ticker_view_h = h - fh - scroll_step_interval = calculate_scroll_step(config.SCROLL_DUR, ticker_view_h) - - msg = ntfy_poller.get_active_message() - msg_overlay, msg_cache = render_message_overlay(msg, w, h, msg_cache) - - buf = [] - ticker_h = ticker_view_h - - scroll_motion_accum += config.FRAME_DT - while scroll_motion_accum >= scroll_step_interval: - scroll_motion_accum -= scroll_step_interval - scroll_cam += 1 - - while ( - ticker_next_y < scroll_cam + ticker_view_h + 10 - and queued < config.HEADLINE_LIMIT - ): - from engine.effects import next_headline - from engine.render import make_block - - t, src, ts = next_headline(pool, items, seen) - ticker_content, hc, midx = make_block(t, src, ts, w) - active.append((ticker_content, hc, ticker_next_y, midx)) - ticker_next_y += len(ticker_content) + GAP - queued += 1 - - active = [ - (c, hc, by, mi) for c, hc, by, mi in active if by + len(c) > scroll_cam - ] - for k in list(noise_cache): - if k < scroll_cam: - del noise_cache[k] - - grad_offset = (time.monotonic() * config.GRAD_SPEED) % 1.0 - ticker_buf_start = len(buf) - - ticker_buf, noise_cache = render_ticker_zone( - active, scroll_cam, ticker_h, w, noise_cache, grad_offset - ) - buf.extend(ticker_buf) - - mic_excess = mic_monitor.excess - render_start = time.perf_counter() - - if USE_EFFECT_CHAIN: - buf = process_effects( - buf, - w, - h, - scroll_cam, - ticker_h, - mic_excess, - grad_offset, - frame_number, - msg is not None, - items, - ) - else: - buf = apply_glitch(buf, ticker_buf_start, mic_excess, w) - firehose_buf = render_firehose(items, w, fh, h) - buf.extend(firehose_buf) - - # Figment overlay (between effects and ntfy message) - if figment and figment.config.enabled: - figment_state = figment.get_figment_state(frame_number, w, h) - if figment_state is not None: - figment_buf = render_figment_overlay(figment_state, w, h) - buf.extend(figment_buf) - - if msg_overlay: - buf.extend(msg_overlay) - - render_elapsed = (time.perf_counter() - render_start) * 1000 - monitor = _get_display_monitor() - if monitor: - chars = sum(len(line) for line in buf) - monitor.record_effect("render", render_elapsed, chars, chars) - - display.show(buf) - - elapsed = time.monotonic() - t0 - time.sleep(max(0, config.FRAME_DT - elapsed)) - frame_number += 1 - - display.cleanup() diff --git a/engine/sensors/__init__.py b/engine/sensors/__init__.py new file mode 100644 index 0000000..24dd5ff --- /dev/null +++ b/engine/sensors/__init__.py @@ -0,0 +1,203 @@ +""" +Sensor framework - PureData-style real-time input system. + +Sensors are data sources that emit values over time, similar to how +PureData objects emit signals. Effects can bind to sensors to modulate +their parameters dynamically. + +Architecture: +- Sensor: Base class for all sensors (mic, camera, ntfy, OSC, etc.) +- SensorRegistry: Global registry for sensor discovery +- SensorStage: Pipeline stage wrapper for sensors +- Effect param_bindings: Declarative sensor-to-param routing + +Example: + class GlitchEffect(EffectPlugin): + param_bindings = { + "intensity": {"sensor": "mic", "transform": "linear"}, + } + +This binds the mic sensor to the glitch intensity parameter. +""" + +from abc import ABC, abstractmethod +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from engine.pipeline.core import PipelineContext + + +@dataclass +class SensorValue: + """A sensor reading with metadata.""" + + sensor_name: str + value: float + timestamp: float + unit: str = "" + + +class Sensor(ABC): + """Abstract base class for sensors. + + Sensors are real-time data sources that emit values. They can be: + - Physical: mic, camera, joystick, MIDI, OSC + - Virtual: ntfy, timer, random, noise + + Each sensor has a name and emits SensorValue objects. + """ + + name: str + unit: str = "" + + @property + def available(self) -> bool: + """Whether the sensor is currently available.""" + return True + + @abstractmethod + def read(self) -> SensorValue | None: + """Read current sensor value. + + Returns: + SensorValue if available, None if sensor is not ready. + """ + ... + + @abstractmethod + def start(self) -> bool: + """Start the sensor. + + Returns: + True if started successfully. + """ + ... + + @abstractmethod + def stop(self) -> None: + """Stop the sensor and release resources.""" + ... + + +class SensorRegistry: + """Global registry for sensors. + + Provides: + - Registration of sensor instances + - Lookup by name + - Global start/stop + """ + + _sensors: dict[str, Sensor] = {} + _started: bool = False + + @classmethod + def register(cls, sensor: Sensor) -> None: + """Register a sensor instance.""" + cls._sensors[sensor.name] = sensor + + @classmethod + def get(cls, name: str) -> Sensor | None: + """Get a sensor by name.""" + return cls._sensors.get(name) + + @classmethod + def list_sensors(cls) -> list[str]: + """List all registered sensor names.""" + return list(cls._sensors.keys()) + + @classmethod + def start_all(cls) -> bool: + """Start all sensors. + + Returns: + True if all sensors started successfully. + """ + if cls._started: + return True + + all_started = True + for sensor in cls._sensors.values(): + if sensor.available and not sensor.start(): + all_started = False + + cls._started = all_started + return all_started + + @classmethod + def stop_all(cls) -> None: + """Stop all sensors.""" + for sensor in cls._sensors.values(): + sensor.stop() + cls._started = False + + @classmethod + def read_all(cls) -> dict[str, float]: + """Read all sensor values. + + Returns: + Dict mapping sensor name to current value. + """ + result = {} + for name, sensor in cls._sensors.items(): + value = sensor.read() + if value: + result[name] = value.value + return result + + +class SensorStage: + """Pipeline stage wrapper for sensors. + + Provides sensor data to the pipeline context. + Sensors don't transform data - they inject sensor values into context. + """ + + def __init__(self, sensor: Sensor, name: str | None = None): + self._sensor = sensor + self.name = name or sensor.name + self.category = "sensor" + self.optional = True + + @property + def stage_type(self) -> str: + return "sensor" + + @property + def inlet_types(self) -> set: + from engine.pipeline.core import DataType + + return {DataType.ANY} + + @property + def outlet_types(self) -> set: + from engine.pipeline.core import DataType + + return {DataType.ANY} + + @property + def capabilities(self) -> set[str]: + return {f"sensor.{self.name}"} + + @property + def dependencies(self) -> set[str]: + return set() + + def init(self, ctx: "PipelineContext") -> bool: + return self._sensor.start() + + def process(self, data: Any, ctx: "PipelineContext") -> Any: + value = self._sensor.read() + if value: + ctx.set_state(f"sensor.{self.name}", value.value) + ctx.set_state(f"sensor.{self.name}.full", value) + return data + + def cleanup(self) -> None: + self._sensor.stop() + + +def create_sensor_stage(sensor: Sensor, name: str | None = None) -> SensorStage: + """Create a pipeline stage from a sensor.""" + return SensorStage(sensor, name) diff --git a/engine/sensors/mic.py b/engine/sensors/mic.py new file mode 100644 index 0000000..3d7ee72 --- /dev/null +++ b/engine/sensors/mic.py @@ -0,0 +1,145 @@ +""" +Mic sensor - audio input as a pipeline sensor. + +Self-contained implementation that handles audio input directly, +with graceful degradation if sounddevice is unavailable. +""" + +import atexit +import time +from collections.abc import Callable +from dataclasses import dataclass +from datetime import datetime +from typing import Any + +try: + import numpy as np + import sounddevice as sd + + _HAS_AUDIO = True +except Exception: + np = None # type: ignore + sd = None # type: ignore + _HAS_AUDIO = False + + +from engine.events import MicLevelEvent +from engine.sensors import Sensor, SensorRegistry, SensorValue + + +@dataclass +class AudioConfig: + """Configuration for audio input.""" + + threshold_db: float = 50.0 + sample_rate: float = 44100.0 + block_size: int = 1024 + + +class MicSensor(Sensor): + """Microphone sensor for pipeline integration. + + Self-contained implementation with graceful degradation. + No external dependencies required - works with or without sounddevice. + """ + + def __init__(self, threshold_db: float = 50.0, name: str = "mic"): + self.name = name + self.unit = "dB" + self._config = AudioConfig(threshold_db=threshold_db) + self._db: float = -99.0 + self._stream: Any = None + self._subscribers: list[Callable[[MicLevelEvent], None]] = [] + + @property + def available(self) -> bool: + """Check if audio input is available.""" + return _HAS_AUDIO and self._stream is not None + + def start(self) -> bool: + """Start the microphone stream.""" + if not _HAS_AUDIO or sd is None: + return False + + try: + self._stream = sd.InputStream( + samplerate=self._config.sample_rate, + blocksize=self._config.block_size, + channels=1, + callback=self._audio_callback, + ) + self._stream.start() + atexit.register(self.stop) + return True + except Exception: + return False + + def stop(self) -> None: + """Stop the microphone stream.""" + if self._stream: + try: + self._stream.stop() + self._stream.close() + except Exception: + pass + self._stream = None + + def _audio_callback(self, indata, frames, time_info, status) -> None: + """Process audio data from sounddevice.""" + if not _HAS_AUDIO or np is None: + return + + rms = np.sqrt(np.mean(indata**2)) + if rms > 0: + db = 20 * np.log10(rms) + else: + db = -99.0 + + self._db = db + + excess = max(0.0, db - self._config.threshold_db) + event = MicLevelEvent( + db_level=db, excess_above_threshold=excess, timestamp=datetime.now() + ) + self._emit(event) + + def _emit(self, event: MicLevelEvent) -> None: + """Emit event to all subscribers.""" + for callback in self._subscribers: + try: + callback(event) + except Exception: + pass + + def subscribe(self, callback: Callable[[MicLevelEvent], None]) -> None: + """Subscribe to mic level events.""" + if callback not in self._subscribers: + self._subscribers.append(callback) + + def unsubscribe(self, callback: Callable[[MicLevelEvent], None]) -> None: + """Unsubscribe from mic level events.""" + if callback in self._subscribers: + self._subscribers.remove(callback) + + def read(self) -> SensorValue | None: + """Read current mic level as sensor value.""" + if not self.available: + return None + + excess = max(0.0, self._db - self._config.threshold_db) + return SensorValue( + sensor_name=self.name, + value=excess, + timestamp=time.time(), + unit=self.unit, + ) + + +def register_mic_sensor() -> None: + """Register the mic sensor with the global registry.""" + sensor = MicSensor() + SensorRegistry.register(sensor) + + +# Auto-register when imported +register_mic_sensor() diff --git a/engine/sensors/oscillator.py b/engine/sensors/oscillator.py new file mode 100644 index 0000000..d814723 --- /dev/null +++ b/engine/sensors/oscillator.py @@ -0,0 +1,161 @@ +""" +Oscillator sensor - Modular synth-style oscillator as a pipeline sensor. + +Provides various waveforms that can be: +1. Self-driving (phase accumulates over time) +2. Sensor-driven (phase modulated by external sensor) + +Built-in waveforms: +- sine: Pure sine wave +- square: Square wave (0 to 1) +- sawtooth: Rising sawtooth (0 to 1, wraps) +- triangle: Triangle wave (0 to 1 to 0) +- noise: Random values (0 to 1) + +Example usage: + osc = OscillatorSensor(waveform="sine", frequency=0.5) + # Or driven by mic sensor: + osc = OscillatorSensor(waveform="sine", frequency=1.0, input_sensor="mic") +""" + +import math +import random +import time +from enum import Enum + +from engine.sensors import Sensor, SensorRegistry, SensorValue + + +class Waveform(Enum): + """Built-in oscillator waveforms.""" + + SINE = "sine" + SQUARE = "square" + SAWTOOTH = "sawtooth" + TRIANGLE = "triangle" + NOISE = "noise" + + +class OscillatorSensor(Sensor): + """Oscillator sensor that generates periodic or random values. + + Can run in two modes: + - Self-driving: phase accumulates based on frequency + - Sensor-driven: phase modulated by external sensor value + """ + + WAVEFORMS = { + "sine": lambda p: (math.sin(2 * math.pi * p) + 1) / 2, + "square": lambda p: 1.0 if (p % 1.0) < 0.5 else 0.0, + "sawtooth": lambda p: p % 1.0, + "triangle": lambda p: 2 * abs(2 * (p % 1.0) - 1) - 1, + "noise": lambda _: random.random(), + } + + def __init__( + self, + name: str = "osc", + waveform: str = "sine", + frequency: float = 1.0, + input_sensor: str | None = None, + input_scale: float = 1.0, + ): + """Initialize oscillator sensor. + + Args: + name: Sensor name + waveform: Waveform type (sine, square, sawtooth, triangle, noise) + frequency: Frequency in Hz (self-driving mode) + input_sensor: Optional sensor name to drive phase + input_scale: Scale factor for input sensor + """ + self.name = name + self.unit = "" + self._waveform = waveform + self._frequency = frequency + self._input_sensor = input_sensor + self._input_scale = input_scale + self._phase = 0.0 + self._start_time = time.time() + + @property + def available(self) -> bool: + return True + + @property + def waveform(self) -> str: + return self._waveform + + @waveform.setter + def waveform(self, value: str) -> None: + if value not in self.WAVEFORMS: + raise ValueError(f"Unknown waveform: {value}") + self._waveform = value + + @property + def frequency(self) -> float: + return self._frequency + + @frequency.setter + def frequency(self, value: float) -> None: + self._frequency = max(0.0, value) + + def start(self) -> bool: + self._phase = 0.0 + self._start_time = time.time() + return True + + def stop(self) -> None: + pass + + def _get_input_value(self) -> float: + """Get value from input sensor if configured.""" + if self._input_sensor: + from engine.sensors import SensorRegistry + + sensor = SensorRegistry.get(self._input_sensor) + if sensor: + reading = sensor.read() + if reading: + return reading.value * self._input_scale + return 0.0 + + def read(self) -> SensorValue | None: + current_time = time.time() + elapsed = current_time - self._start_time + + if self._input_sensor: + input_val = self._get_input_value() + phase_increment = (self._frequency * elapsed) + input_val + else: + phase_increment = self._frequency * elapsed + + self._phase += phase_increment + + waveform_fn = self.WAVEFORMS.get(self._waveform) + if waveform_fn is None: + return None + + value = waveform_fn(self._phase) + value = max(0.0, min(1.0, value)) + + return SensorValue( + sensor_name=self.name, + value=value, + timestamp=current_time, + unit=self.unit, + ) + + def set_waveform(self, waveform: str) -> None: + """Change waveform at runtime.""" + self.waveform = waveform + + def set_frequency(self, frequency: float) -> None: + """Change frequency at runtime.""" + self.frequency = frequency + + +def register_oscillator_sensor(name: str = "osc", **kwargs) -> None: + """Register an oscillator sensor with the global registry.""" + sensor = OscillatorSensor(name=name, **kwargs) + SensorRegistry.register(sensor) diff --git a/engine/sensors/pipeline_metrics.py b/engine/sensors/pipeline_metrics.py new file mode 100644 index 0000000..98f2793 --- /dev/null +++ b/engine/sensors/pipeline_metrics.py @@ -0,0 +1,114 @@ +""" +Pipeline metrics sensor - Exposes pipeline performance data as sensor values. + +This sensor reads metrics from a Pipeline instance and provides them +as sensor values that can drive effect parameters. + +Example: + sensor = PipelineMetricsSensor(pipeline) + sensor.read() # Returns SensorValue with total_ms, fps, etc. +""" + +from typing import TYPE_CHECKING + +from engine.sensors import Sensor, SensorValue + +if TYPE_CHECKING: + from engine.pipeline.controller import Pipeline + + +class PipelineMetricsSensor(Sensor): + """Sensor that reads metrics from a Pipeline instance. + + Provides real-time performance data: + - total_ms: Total frame time in milliseconds + - fps: Calculated frames per second + - stage_timings: Dict of stage name -> duration_ms + + Can be bound to effect parameters for reactive visuals. + """ + + def __init__(self, pipeline: "Pipeline | None" = None, name: str = "pipeline"): + self._pipeline = pipeline + self.name = name + self.unit = "ms" + self._last_values: dict[str, float] = { + "total_ms": 0.0, + "fps": 0.0, + "avg_ms": 0.0, + "min_ms": 0.0, + "max_ms": 0.0, + } + + @property + def available(self) -> bool: + return self._pipeline is not None + + def set_pipeline(self, pipeline: "Pipeline") -> None: + """Set or update the pipeline to read metrics from.""" + self._pipeline = pipeline + + def read(self) -> SensorValue | None: + """Read current metrics from the pipeline.""" + if not self._pipeline: + return None + + try: + metrics = self._pipeline.get_metrics_summary() + except Exception: + return None + + if not metrics or "error" in metrics: + return None + + self._last_values["total_ms"] = metrics.get("total_ms", 0.0) + self._last_values["fps"] = metrics.get("fps", 0.0) + self._last_values["avg_ms"] = metrics.get("avg_ms", 0.0) + self._last_values["min_ms"] = metrics.get("min_ms", 0.0) + self._last_values["max_ms"] = metrics.get("max_ms", 0.0) + + # Provide total_ms as primary value (for LFO-style effects) + return SensorValue( + sensor_name=self.name, + value=self._last_values["total_ms"], + timestamp=0.0, + unit=self.unit, + ) + + def get_stage_timing(self, stage_name: str) -> float: + """Get timing for a specific stage.""" + if not self._pipeline: + return 0.0 + try: + metrics = self._pipeline.get_metrics_summary() + stages = metrics.get("stages", {}) + return stages.get(stage_name, {}).get("avg_ms", 0.0) + except Exception: + return 0.0 + + def get_all_timings(self) -> dict[str, float]: + """Get all stage timings as a dict.""" + if not self._pipeline: + return {} + try: + metrics = self._pipeline.get_metrics_summary() + return metrics.get("stages", {}) + except Exception: + return {} + + def get_frame_history(self) -> list[float]: + """Get historical frame times for sparklines.""" + if not self._pipeline: + return [] + try: + return self._pipeline.get_frame_times() + except Exception: + return [] + + def start(self) -> bool: + """Start the sensor (no-op for read-only metrics).""" + return True + + def stop(self) -> None: + """Stop the sensor (no-op for read-only metrics).""" + pass diff --git a/engine/themes.py b/engine/themes.py deleted file mode 100644 index a6d3432..0000000 --- a/engine/themes.py +++ /dev/null @@ -1,60 +0,0 @@ -""" -Theme definitions with color gradients for terminal rendering. - -This module is data-only and does not import config or render -to prevent circular dependencies. -""" - - -class Theme: - """Represents a color theme with two gradients.""" - - def __init__(self, name, main_gradient, message_gradient): - """Initialize a theme with name and color gradients. - - Args: - name: Theme identifier string - main_gradient: List of 12 ANSI 256-color codes for main gradient - message_gradient: List of 12 ANSI 256-color codes for message gradient - """ - self.name = name - self.main_gradient = main_gradient - self.message_gradient = message_gradient - - -# ─── GRADIENT DEFINITIONS ───────────────────────────────────────────────── -# Each gradient is 12 ANSI 256-color codes in sequence -# Format: [light...] → [medium...] → [dark...] → [black] - -_GREEN_MAIN = [231, 195, 123, 118, 82, 46, 40, 34, 28, 22, 22, 235] -_GREEN_MSG = [231, 225, 219, 213, 207, 201, 165, 161, 125, 89, 89, 235] - -_ORANGE_MAIN = [231, 215, 209, 208, 202, 166, 130, 94, 58, 94, 94, 235] -_ORANGE_MSG = [231, 195, 33, 27, 21, 21, 21, 18, 18, 18, 18, 235] - -_PURPLE_MAIN = [231, 225, 177, 171, 165, 135, 129, 93, 57, 57, 57, 235] -_PURPLE_MSG = [231, 226, 226, 220, 220, 184, 184, 178, 178, 172, 172, 235] - - -# ─── THEME REGISTRY ─────────────────────────────────────────────────────── - -THEME_REGISTRY = { - "green": Theme("green", _GREEN_MAIN, _GREEN_MSG), - "orange": Theme("orange", _ORANGE_MAIN, _ORANGE_MSG), - "purple": Theme("purple", _PURPLE_MAIN, _PURPLE_MSG), -} - - -def get_theme(theme_id): - """Retrieve a theme by ID. - - Args: - theme_id: Theme identifier string - - Returns: - Theme object matching the ID - - Raises: - KeyError: If theme_id is not in registry - """ - return THEME_REGISTRY[theme_id] diff --git a/figments/animal-head-symbol-of-mexico-antique-cultures-svgrepo-com.svg b/figments/animal-head-symbol-of-mexico-antique-cultures-svgrepo-com.svg deleted file mode 100644 index 264eb08..0000000 --- a/figments/animal-head-symbol-of-mexico-antique-cultures-svgrepo-com.svg +++ /dev/null @@ -1,32 +0,0 @@ - - - - - - - - - - \ No newline at end of file diff --git a/figments/mayan-mask-of-mexico-svgrepo-com.svg b/figments/mayan-mask-of-mexico-svgrepo-com.svg deleted file mode 100644 index 75fca60..0000000 --- a/figments/mayan-mask-of-mexico-svgrepo-com.svg +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/figments/mayan-symbol-of-mexico-svgrepo-com.svg b/figments/mayan-symbol-of-mexico-svgrepo-com.svg deleted file mode 100644 index a396536..0000000 --- a/figments/mayan-symbol-of-mexico-svgrepo-com.svg +++ /dev/null @@ -1,110 +0,0 @@ - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/fonts/Kapiler.otf b/fonts/Kapiler.otf deleted file mode 100644 index 695140a2180db69683d986a8438f253a790afab8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 76120 zcmeFa34B%8eeb)E0LhV^!8V>qkX17o2_Fp@As}rN2_ZC~5eXsEG#JY^2?j@CH?GEr zK|?ehAL$%!HI_+3HAXt;a24C=(zNfs+m<%zklYT5L(|*5T-x-tNg%<#djH=S;Q`~M zeQDoIdj+5U*VaC3@4eRVyVlzOwf;+I=kDB@>)h$oI*Xk6)NR|M-}r~SFFMW-`yD6o ze0pAPVfy!GUUwY-Nyqus@Z7?<#KhmJ{zJ!cMLUl3seRAxeQtGfv&V6kyzDq(v(J?6 zeKP8w_FG!Ves_|du>*Jf(vm3K|GsT2pLzDk=lsr!4?4~rm5#Ic(a)5XANLn z-1TM0Y5MB3dq4M_D>BcLx*jwWesJ%zWov)o(RKEFt)<`c+@Zrq=FU5bmi~-=9__f4 z)Nwu*IDF_=KlsrfIExm2$BsCE)f!)atee&6MMz*RB7A7=ZF^ZSdOkGcHw`-`0@*VpFvmpBi)i~`pQccQ}H zKffR0l!nF5@897p4Ld%+|32IQPxJecPHos|>Y?XeEZ_h1Ge@Ey-M2P6F+MRN`iU2# zi=RJm`8al-skooC@U{0d;0kUd&}ojryklI zJ^w-U{3n~a;w=Y@%gPV$KXfoU!5tqz|IuUf>2FCHz5j6Z-smIcd!Hlqtqqo`NR2?}~9-Xk>9UuKv z`Jrc{w_D*K%Rcn<{(bI!hn{s(okPxZ&WldDv)_5zdB!*dIcx1_q7!dhg6%zF zDWaWX=XvLVrFhZKD6`*nuG@Zo+>RcwpKo2`Fu#>qbt$u>FPMGOQf4`O?acj_R!0lW zKFt;P+Hdc?wywO{uJO(@qHj4zDQ@QRC#Njtj)(2uhn$0UR)Xaw-u}PytjBJ-{yRUt z_3qJjHI;L(ZAYvGd##LR&a?Kj-1&?hJLEiN_MPRrsoCw0-tO2=BXZ*|Pmy!c`8B(P z?z_+KQ)c@|%sy$qKW|6(Tj`#*{cZE9Q{PN;$o5q3^^`o?@>XtXp0#7!?JSXh%*nPR zPup+%EX_VXwYI7r$2Qj^^DX%!YEAs^xZc(=@u?lZ^`9$j(c&fH5qG>V^3J>7|A7yF z=>mT2cv@vsa`W;F3U?M2?<(0{y5|$W^2uLyYHI6_HGBNsT|Irh z{in{HK6`F(U}$(`Y;=5bA}}2c{pquNpLptP&To9?8RxT4JD>MD2VeWvlQ%NC|9R&( zr+)oY{PO7+zV521Zh!feKe+VOKmNl%`l2&4`xoE%?|<$%e^&mLKXIy#SDrZ9(AZSp z+UB%$`Z~U3U6`GASf?&_^_tDG{%wi#0jJvOcYezmb*7y;*CJPh>u%SFT?wwuuC1=^ zt|HeS*Irkd>$9%vMT-~Ru_$uUU5nO4FO6On9TojZ^y=u%(OaX>M2|dj&m;Fe^3Wq? zb8~jxXuIBT+V#eqkSoj;?z+qMpj~g1E7g_m%5#;tKIMASbMb4s#`Rm;q{b2M* zqNDA4o9ue0-@M+Fb8~awn%g^f!M0cCtOPgy&FP6V|8MS-H+J6m?Kl3*8?$d*c;oDA zGp~hS3%(Y3?TfGVzvh2!(`$*}_=9i!?l*RSqjJ7B+&neKo*3Kgf#x_nt;&};rB)>! zt5l!12LI3d_do4f_jk_g&fhz4IRCd*qH*!DlgP$4x8hzF57Tm)grbn>YC={Y@Qy z!nup@{>kU6YodS9Q_lX)1+1;#{-1S8O_v5v-ffQ!MRhm{}B7j%-&=FQ*FE7+^c&z_g*{qo3?*1_CCA9z0L#dKWNvvmvirR zbna5pFSYa`+trZ)fc` z`_SCqm|bQ^9w7Y#mcHBe@5L_VxY9ph=}XP(E)S6YK}&zhdC*e6Z1%pnFPnW}?lrRy z%}tqIHutB_Lzd>U*?Uachb-l+*`;&eHGALOZ=1b;?(fV#ICsSCLvw#=cG=w5oMq%> znVogn_V1nhL$k`sGRnKm@*8IR56pef?1Of%Wp?ggnUz+LI4kGAVs@3~%n@l9oSB+59`OC@+rj4Prys^IU%hJ8O1@^N8JRg$)jtnw6?6oORg8ux@M|c7t7Eg|iWx zjNLppV@FcBLI!E1=L&N>Wp`k6u({YG&f0}7!IpA`3a(Izt-@AgYp}IgslCFH+In`# zO6?U+Gxj9c=^|w}wg=mb?ZftCrRoYts;+RP>Iz4yu5d=M=ef=dcGm1l)8~rWl~~oZ zl};l2Td*nEtytBcl}-Wsd$1MQN^BLj8e4;{#nxd}XI46@GbK)sU5rYRF1QHDslu8nV*4Xen1&=`NWS{Z-Ik z<-}SGT}2&OWdjHu7yVVv&^RTEY@stK!{EUu>du!{Py z%27>NotEd&Ls1>WI6|1NftDKXh z>B4qnd$7IOK5Rc$wPF>uVimPw6}4g&wPKZXo-{MqSIPMW>?}tvT5eaP;c7Hoje4t5 zZ?z4)mfEM%a5c)TMw!*Lk=1Cj+Qu6CRLZP&RHD_CXf-8TO^H@hqScP_xte^gCZDUx z=W6n~#%NABYskqOa#@1kKu`0_N%Cd&Ctf4GxD9akBgHJoLKCB~^duuZ8kj_^yTTTKKMo?^+x4DNR1M09%L^_q9$P z_9*rk_Bi$gR>aprd@aP+LVPX6*E%hH+KO$%wqre5FIKf;t&K8dRV&sysugRUQ`pnk zGuX4(b6C}fwbX~T)Q7dyhqcs)wKm$;r(@W0>;!faJB3x9S?j3ItaVgp);g*)G4P6k zSB$kh9TBe>s}Zu|6=VNpRik3y6$7src*Vdg23|4dizjigCm%#u2X=N4#R_ z$70|W1Fsl(#lR~DUNP{Bal|Xe5w93WykZ>jim~|^S@DX2R}8#j@n>RzduI`Uy+9r-Y^j(nI{W1;kFBe7^6i{`Ot9*gF&rn%CzV%xCoSP#~VRU3&#^H?;G zMe|tm5Of#$Ke3MdpIAr!Ppl*VC)Sbw6Kh;_eYKZZ^p8dVSoDuY|5(#sX~wYQ*a_?; zb_y%~W6?hr{bSL8oq0-^Olx`R>m2z@>&#!0-C~}=I`hD_FW-BeBY$a~`Af2U__U6F z`9kaPh1THXB~ddI`eZb8}*ggN3rqP zMC^vSPur(S>~EZFu>EB0=DBLy-^%gr*gR|jwurNe+22L_5^O1F)p5K9+lp<&wqre5 zFSY~QiS=Rq*ppo66#JvtG3+>Y0y~MF!cJoY*dR89JBrpkV{TfLo7Uu} zHMwa`Zd#L@*5sxibJLHx>A~EzEH^!vn;y(f59X!^bJK&lX-#fglbhD$rZu@~O>TNH zH$9k}9?VS-=B5X8(}TI`!Q8YaH?7G{YjV?rx#_{&^k8m!FgHDzn-=7z1-WTKZd#C= z7UZS}bJK&l>A~FeU~YOaH$9k}9?VS(a?^s`v>=;*F;(MW6bGX?7{$RT&KO16r}AOq zjFGH-m^kAgD<38fMsYBTC;rMzZI(|P8_7-AQfk%lt%tfoRN~1{}X4V zWaagI659;*6Khl2;aIykzB-#ThSIc?favii1}i zyyD;$2d_BerIZ0|5F0WZ&loD+W+yJ&zMeSow4iuKQt`B-c-m1s?I_-^p)?8Bi^dxl zS&gjYX-Dyluj3hE#WTW+cQnF^XKWSkq;VHLo8oCr7VzSTJk@yGR6K1eo;DS4jCB1% ztVZAQw5fPUBd>TyUh#~);vJ2=;_WFWt1(x+J;h`-=8Ctcn5@QJ@wB*jT3kFWE}j+_ zPm7DEHO14K;%QCsw5E8*V)6Eb(iJooi?=6~tj1#T_JoqvSS+5F6;I2Gr)9;{vf^o3 z@wA$FT1`ByCZ1Lk&loM<(HJe>(YP$ZxMZ6Zkpze&7?GE3Up-p_L=-gCzKA5Ce}ZX# z#r8L2^~_2z<#ioto`B{Frn#;o%@fc(0nHQ8JOPChOjTW{6sxCOf@!PkRAQ^J)z}(r zEmpok0@@~^Z35aRplt%$CZKHs+9sfFf@!OJb#aAmY!9{<+lTGP>bakQwh3sPfVK%} zn}D_nXq$kx322*O+Uoi<*jcQes)^M6M63CiY+pJjqH`iOKM|c1(K!*F6VX}W=SWMb znxAMj|LbOx*w@@bA~ipenx9C`PsDRbr2Z#b{Z|^XNQ6Zq^*<37iLgk7MItN`ssD-8 z|3s&Vdlhpp)&E55e2tsoIniI7T!R3fAj zjg(5)iS=RqSj|Wz(h3r31&OqRL|Q>2tss$BkVq>?gj6D=5+RicsYD~C>j$twYzV8F zibPsLBCQ}1|7Sh(SL@9yyJEVF)Otv*HxErmG^4fNSX?$M3hSY;9{tyw7o<mAK*tw*c%Xtf@#)}z&Wr-e^jv2ECPtOx7GO0o6k z@98?Kz3b6rJ({dXll9IhpN?V2u@l%y>=ahYtVfykDD$}0-pkJ8R#&f>RZD-|>X>X6 zN7QB?x0)bZiLJs`V{5Rr*e+~0wg=mb?ZftC)m}E(9k-a3FS@~4$jTSpVEW5yhHZo8 zL22Z9ZZOSd<#}!}on+IQqt#g16jEwrw~>+EMxKuw8Q*PW ze7Di+tv;TaB&3)?zgV+-N<6tmeZvG6vkp7;qzFz>SOnH=0Ur`?MR|gYCuk zVf(QfCvLRapFeCi5son>eCBmlPP;LWlyH;$&@{rvL{paWXhgQ z*^?R7CR6rg%AQQwlPP;LWlyH;$yTr2c72sS+3J<7%ARcXN>*i0rtHa-J(;p6Q}$%a zo=n-3DSI+yPqrGOb5-_at0A%~dop9>WXhgQ*^?=IGG$Mu?8%fpnX)HS_Dz&}6Q$ln zc{fqsO;+B^&L&E_iIQ%jjGHLqCM%;pRsJ`T|4rn76Zzjn{x^~TP2_(QdD}$ZHj%eY z0ZYH0bjfm2yHf|=Ln<289+-@ef zn~jCiRAQ^J)z}(rEmmdOOj$Nlmd%u9GiBLKSvFIa&Bj7k>&EtAd$E1keyr-@X3D#n z@@}TQnWfgncV)ac{v8OFo-((eg+G4zvUeAavRz_JpBeqx>W%Z2MVr7(7>}iXYQC6|1 zEmq%T6?@uZ^-WgKh%Ht|Sv@1RSQ%yYjM!pjl+`n03(trxJR?%9q+fATi0!7pG=--_ zip?!8wd0yWN`Y*O`6pI#V!J8krMPWhvE3B&QkI!jY&XUH!`IAeekp}#L5eL1)O9r1 zlmh(}^Hp>m#dcH7W6>29+f6Z#Mb}YmH-)}Eg}yz7zCDG$J%##^LVZY~KBQ0|Qm79p z)Q1#DvE3A>6RX&6isQ#Bwwq!q>H6|yQs}o+=(khow^OJgDb$b@YDfw-B!wE1LJdiA z6x&U40$9a%Q=AZ1vE3B&aAXzRO|`o86|;KgrlM*pPuo@3B0Q+c+gqGu{k)l{CUsXSFvd8($Gs+TOKJit^uz*N&ySJ1p@s?|eT&5Nd5 zJ(ShFXsXpiS&e#9Q9Bj2Q&BsWCuypUZgd^ZZKhfcl-1m3s?|VQ&26Sy4V0Ctsi>NY zs;Q`&%F{5_MlrgMV!NrP&y0BwYHM4q2Fj|YZMB-F{ViCvvaMFHWL1B*TK&=f9<18e zR@&EA+SgXv*H+rsR@&EAt4m6wXW3Tj%vS2mR_e@F>daQ^%vRdhR@&EA+SgXv*H+rs zR@&EAt2w%wo@HC9AzP^-Td5&isUcgbAzO*0EMjP3NGTSK8HcGUO5^bYI+bGdC^0|$CZX=)D$mcfl znQk;EY@SXzNhc@iM)Q*Gi)K3cNhd$)ZaQU2r!47|C7rUQQUL8m%0>d37u}8~+f5Vg zYhHA_X(Fq+&F!XxtQ6R83TR(*o7+tRSz&C@L+YDxIGnl!}VCFUh?itM7 zW-xP`!OU$2Gq)Mc+-5*L1L7GF&wzLa#50(=&0yv>gPGe5W^OZ>xy@kaHiKG`L9NK3 zR%B2sGMH)4VCFW1ncECzZZnv<&7eMHP#-d=4;j>l4C+G$Gq)Mc+-5Lyo59R&1~a!A z%-m)$ua!Za$)L_;P-kpKqiK~1uS|Gl!YdPAnefVlS0=nN;gt!mOn7C&D-&Ls@XCZ& zCcHA?l?ks*cxA#X6JD9{%7j-YyfWdH39n3eWx^{HUYYR9gjXiKGU1g8uS|Gl!YdPA znefVlS0=nN;gt!mOn7C&D-&Ls@XCZ&CcHA?l?ks*cxA#X6JD9{%7j-YyfWdH39n3e zWx^{HUYYR9gjXiKGU1g8uS|Gl!YdPAnefVlS0=o&@Mp4^PsoB!7IdavQRz?<+D&e3+1y=K8sm`EOgGohsnZ+$-;-p!iUMihsnZ+ z$zo0*3(d38JPXaU&^!yxvzQaeVoo57Ie{$Z1hSYD$YM?)3(d38JPXaU&^!zOCky{4 z3;!nz|0fIoCky{4i#dTT^v^>7EcDMp|19*+Voo57Ie{$Z1hSYD$YM?)i#dTT^v^>7 zEcDMp|7=EH*?8&M_)FP%=Gn}iWi#f=#`n&~U&_Yc&Sp+Eo3UXwzEC#4P&U3$Hoj0c zzEC#4P`3FwmyC-1oNWA@Z2X*T{G4q3oNWA@Z00t1Fjm^ZSZN1ir5%iwc9>85Y2&Kc z?hf;98q6xTyTiPiYO@+W?O^n@gVECtMo&A;kJVXw9gLWE*eF0L72Dn6D7L%9Mgg*l?e4HLYF}fe9gLNBFjm^ZSZN1i zr5%iwc9^%XG>Yx+a1`6!Vcx#1V!J!cZ;@4O*Ve0BtRp*PE+H_M^D7KbAvl%Aqyo(3)~AN6JYK zHW!wVD36g;9_=WPc9cgu%A+0S8J$QwSM$|* zjI8r$M|q5}@)%*|F~Z7Ygq6qGDv#K19<3>l)|AHxD~~pnN1Mu{P32kcmfDr`xxVJE z^B8^S@wCcgngG7R#e$<sT}QFq0&0E%HNSwGUqH<-fIi(FQEPxQ2z@cRX{5!pcNF*3JPci1+;e!V4|J3oXJ6Ey4>e!V4|J3oWwz zE4^a7MV5bA#deD<|FVkh7U7E);fogGix%OF7U7E);fogGixx3IR>b^RG5td^W3giM zULvjSsqPh1_liyVOSZ4MsbYGGVtR>UdWmAjWySOr#l&`t=`o7wrHkneiW#XDo8O`< zt7aBcGmDL&uB_RcV)}$)`h;TogkpOVD!qD!Vrwz7>K}^jztU^gv6!}BOxrJ}?HAMb zi)s7CjN^)_)y356Vrq3UwYr!dpqO!7G2^&m#&N}r~FQz{) zrhh1=e<-GZD5ifXrhh1=e<)@gS4@j2rbQIfB8q7d#q<)zjN^(K#}zY~`^d z+{O5A7vsBKR&SM3vE5y!t*qjkyG&bI#W#1Ewz74k)O`3Z#(=vR1MXrBxQj91F2;bn zOeLjPYlTX@y%U~6L&FA+{HL?7vscTj1zY;PTa*fahKKGOO{@--4e=P zLfK0wdkJMPq3k7;y@axtQ1%kaUP9SRD0>NIFQM!ul)Z$qmr(W+%3ea*ODKB@WiO%Z zC5(SdD0>NIFQM!ul)Z$qmr(W+%3ea*ODKB@WiO%ZC6v8{vX@Zy63Sje*-I#U31u&# z>?M?aH>KW9sdrP}-IRB?mG`ofU8pU=? z$!94 z?6EQ`jh+#EtcUl~E}*U$Mu^D681+9xJ1)V!L~+jIyti z{({*GE9qCv>hE?dU|PY`p@P_M1!OBATVei*MXZVKR+yLKwtdBRE6ht-W>&G?3Szqz zaIfH5P(f_B0{RuuuP|Rl*HLV@!aNpTL9yKm^H_8p#da&`+bihXE9l!R=-Vr(4;9ph z3hF}z^`V0LP(gjDAhugUY`4N@b#xa$R^)*ls1-R-$bs+E$`%CE8Y^Z6(@PqHQJG zR-$bs+E$`%CE8Y^Z6(@PqHQJGR-$bs+E$`%CE8Y^Z6(@PqHQJGR-$bs+E$`%CE8Y^ zZ6(@PqHQJGR-$bs+E$`%CE8Y^Z6(@PnwO~ZO4~}btwh^Ow5>$jO0=y++e);pMB7TV ztwh^Ow5>$jO0=y++e-5ub$w}DiMCZ}TZOh&Xj_H0RcKp)l zo2=I1)R{io--DGeREIBAhc8rzFI0ywREIBAXId$Z#vOHNQimpWXi|qJb!bwDFH~p# zp3=(~s>2tm!xyT<7pgO5bX@VNIuxiwfjShZLxDOJsI!@YD>hfIaq&@V%TY$eM;Q?x zWkh_G5%Ez*xJRi8M;Y55Wo&ztvF%aDwnt%o6xK&!eH7M58IvA`)KN$sh15|<9c8q5 zj6578568&EG4gPXJRBnr$H>Dm@^Fki93v0M$ip%6aEv@0BM-;O!!h!3j6578568&E zG4gPXJRBnr$H~KS@^G9y948OQ$-{B-aGX3GClANT!*TL(oID&S568*Faq@7SJRBzv z$H~KS@^G9y948OQ$-{B-aDqIXAP*WKl>69cLz22@WBsGc0wlf!y)SWgb? z$zeS?tS1IkPYkG@7*IVipnB#{>WKl>69cLz22@WBsGb;5J*BIsboG?3p3>D*x_U}i zZ?h;%t%Qo!)!QtJtYUWc%%ap=t=Ilhtp1|Bo)}O)F`#<;O-CBAjo2n^GgdL6dSXEJ z#DMCF0o4-&swW0i&&)|ZGbi=ToYXUOQcn!1o)}O)F`#;4K=s6c>e0L&&Fj&;9?k3L zHCGI%o)}O)F`#;4K=s6c>WKl>Gh0&6Y)L({CH2gfG{CC?UJdYSfL8;&8sOCcuLgMO zZ-|##`Sq7&4e)A!R|C8n;MD-H26#2Vs{vjO@M?fp1H2mG)c~&scqtaGD=WI%0Ivpk zHNdL@UJdYSfL8;&8sODH%&q}m4e)A!SA$(iR}-%WYH|ZLxq+Bn1H2mG)c~&scs0PQ z0bUL8YJgV*yc*!u0IvpkHNdL@UJdYSfL8;&8sOCcuLfdv4e)A!R|C8n;MD-H26#2V zs{vjO@Y466Ewz%Y#IDA!!9I#rZ`=s4MtC*Cs}Wv}@M?rtBfRXJ07 z)da65cs0SR30_UKuO@gk!K(>gP4H@hR};LN;MD}LCU`Z$s|j9B@M?lr6TF(>)da65 zcs0SR30_U`YJyi2yqe(E1g|D|HNmS1UQO_7f>#r~n&8z0uO@gk!K(>gP4H@hR};LN z;MD}LCU`Z$s|j9B@M?lr6TF(>)da65cs0SR30_U`YJyi2yqe(E1g|D|HN&eJUd`}o zhF3GZnvGYKl}&3HnvIvN)-W_1FIlZ&Xogoayqe+F46kP6rBAhnp&4Gy@M?xvGrXFO zm(pkrL$mRc)f$Fo<0Y##49)OrhF3GZn&H)Ky!5G_U(N7phF3GZn&H)Kyp%?37@Ccj ztky6z+kaU-znbCI46kN*HN&eJUd_f!DYb^7*?7rn4MQ`$n&H(9uV#2P!>ieNDUH@J zG#f8jtzl?3Ub1?AHN&eJUd`}ohF3GZnvIuIY7IlP@siaVh8FX#FPYWfr?=3WT4+rz zHovMPiqW*trdnuIEwrf?qZ4VzwT7X^Ml`yDRus3`h(`O#SbYOni;Zh!Q?UB`^cLDw z3vH^!W@vRp@tPJ|Rtqhwg_hM~jC6g)Xj*K%qw6SA(_-Tt?U!Kn&0sAy>XB6hr^UQ| zS*;syF>ha1>&9Em+n1FG(n5=Cp~bb(;#y1(9oOI7wa}VcXiY7&rWPCZ=v=KQZ!!O0 zR_n=I%>S3wdh! zhf6zL+Tqd;mv*?c!=)WA?Qm&_OFLZJ;nEJ5cDS^|r5!HqaA}82J6zh~(hiq)xU|Ei z9WL!~X@^TYT-xE%4wrVgw8NzxF70q>hf6zL+Tqd;mv*?c!=)WA?Qm&_OFLZJ;nEJ5 zcDS^|r5!HqaA}82J6zh~(hiq)xU|E?0~ZfmJaF;A#RC@)Ts&~`z{LX>4_rKO@xa9c z7Y|%KaPh#!0~ZfmJaF;A#RC@)Ts&~`z{LX>4_rKO@xa9c7Y|%KaPh#!0~ZfmJaF;A z#RC@)Ts&~`z{LX>4_rKO@xa9c7Y|%KaPh#!0~ZfmJaF;A#RC@)Ts&~`z{LX>4_rKO z@xa9c7Y|%KaPh#!0~ZfmJaF;A#S0fNT)c4c!o>?0FI>EE@xsLm7cX4AaPh*$3l}e3 zym0Zt#S0fNT)c4c!o>?0FI>EE@xsLm7cX4AaPh*$3l}e3ym0Zt#S0fNT)c4c!o>?0 zFI>EE@xsLm7cX4AaPh*$3l}e3ym0Zt#S0fNT)c4c!o>?0FI>EE@xsLm7cX4AaPh*$ z3l}e3ym0Zt#S0fNT)c4c!leT)9p*nrTAs9WrUN=1(CIKbm+XkVvJU8UK&Jyb9p*VJ zy?nC{^PFYnp>@Eh0~Q_TIqQg4&U8Se10o$pL|2w4)&Y?Yh;%@t!+d6?mp|5FKC`U6 zvJSX(pnnJYcc6a<`gfpz2l{uIU#;}=zB-`L0fi1IbU>lQd>^IJx7&4?|18^%^VQ-Sq&i^K0izBWb-<_tMjbHfFdtjjl!w*wsnlG&`W#3A;|%b;7O_cAc>6gk2}>I$_rdyH413!mbl`ov`bKT_@~1 zVb=+}PS|zAt`l~hux5k=>^foB3A;|%b;7O_cAc>6 zgk2}>I$_rdyH413!mbl`ov`bKU8jwGB8`^Vb(&^N&Fb4bJ7L!eyH413!mbl`ov`bK zT_@~1Vb=+}PS|zAt`l~hu>^foRgPjj{KG^wS z=YyROc0SnoVCRFK4|YD-`C#XRoey?C*!f`RgPjj{KG^wS=YyROc0SnoVCRFK4|YD- z`C#XRoey?C*!f`RgPjj{KG^wS=YyROc0SnoVCRFK4|YD-`C#XRoey?C*!f`RgPjj{ zKG^wS=YyROc0SnoVCRFK4|YD-`C#XRoey?C*!f`RgPjj{KG^wS=YyROc0SnoVCRFK z4|aao`C;dWoga37*!f}Shn*jGe%Se8=ZBpic7E9TVdsaPA9jA&`C;dWoga37*!f}S zhn*jGe%Se8=ZBpic7E9TVdsaPA9jA&`C;dWoga37*!f}Shn*jGe%Se8=ZBpic7E9T zVdsaPA9jA&`C;dWoga37*!f}Shn*jGe%Se8=ZBpic7E9TVdsaPA9jA&`C;dWoga37 z*!f}Shn*jGe%Se8chcA`b52@x?~+-4FX&0*^^#dd`c4uvJ!uq_Mk~8c5*0mZ$}5fH zeJ6>Bo-{p`Mk~8cnx3*+*>%$Nl-0_vlNRYydPVw966rfhr0*n=zLP}yP7>)mY04!c|otCd|REz+m!DAIS5NZ&~!eJ6?Zog~tCl1N_{(cv!hwO(_&C}S5j zvy13(7bWeYq+RAy*|%vB9quxp%5D2v>C|Q3)H1VL>C|Q3l&+(1vFM^6b`c%!LX9re z=rXTK*U?I+F7ujn1+8@IGJi?e(MqQ->Qxu@s*8HnMZM}mpDy(2LZ2@5=|Z0_^ywlx z+(mS_i|B9{(cv!hmvnut#_6KYbWvxzs54#EnJyITLa{Cs>q4e-+V-PuKic-AZ9m%fqisLh_M>e-+V-PuKic-AZ9m%fqisLh_M>e-+V-Pu zKic-AZ9m%fqisLh_M>e-+V-PuKic-AZ9m%fqisLh_M>e-+V-PuKic-AZ9m%fqisLh z_M>e-+V-PuKic-AZ9m%fqwOj4g|3*trIk*nQ1X;1seP?6H0DvRd_h%KRT$t@=J?{*SCyeV;O|Waa;yLX%TyatcjOp~)#UIc5HjQfj5sDf541 zwd(tn`9HE+^?k~ek(K{*3I$G~z$p|sg#xEg;1nyJPUDlGrna2M8$XRVej0E5G~W1W zyzA4{gwy!dr}3*#<5!=?uRaay)381b>(j74jX!)EQl}wx8d9erbsA6g40$+19?p=5 zGvwh6c{oEJ&X9*Qv2yVbeXWG|FrvW0x14N1kh!hW?%mB&^pv(Zu44}+_joNfp zE4B^Wj`d)@Sk0CUFlrlM)HcAVZNOTaj-SS!!Rp-^14N1k?5Fkzu|wEl>>>UW4!& zgx4Uv2H`abuR(YX!fOy-gYX)J*C4zG;WY@aL3j>>UV}EuUTXS^*C3JoK_dHujIsyeH3+Xkcn!j95MF~s z_6LdV55jBEl-6~`YmhcFNE;cXjSSkzTF1p}5MF~evev$M4HDTOWMn-EuR(YX!fOa# zL+~1c*ATph;57uVA$SeJYY1LL@EU^G5WI%qH3Y9Acn!g82wp?*8iLmlyoTU41g{}@ z4Z&*&UPJI2g4YndhTt^>uOWC1!D|R!L+~1c*ATph;57uVA$SeJYY1LL@EU^G5WI%q zH3Y9Acn!g82wp?*8iLmlyoTU41g{}@4Z&*&UPJI2GVPZdEw!&9cn!g82wp?*8iLml zyoTU41g{}@4Z&*&UPJI2hSxB>hT$~~uVHu%!)q8`!|)o0*D$<>;WZ4eVR#M0YZzX` z@EV5KFuaE0H4Lv|cn!mA7+%Bh8iv;}yoTX546k8$4Z~{~Uc>MjhSxB>hT$~~uVHu% z!)q8`!|)o0*D$<>;WZ4eVR#M0YZzX`@EV5KFuaE0H4Lv|cn!mA7+%Bh8iv;}yoTX5 z46k8$4Z~{~Uc>MjhSxB>hT$~~uVHu%!)q8`!|)n`*9g2u;57oT5qOOluPD2RRyvIs zFIla08Zln7TIn%;57oT5qOQjYs7ddjaE917%y3^bQ&>UvU+}vz-t6v zBk&r5*9g2ujF(bsrPGMFi1AVyt#leOUb0&0G-Cf{)gO+) zYXn{+@EU>F2)ss&mr`n_(}?ks)k>#PW{F2_LN+(xygf zQ=>*F(vGjkYNgXCt!dOoG}_lnr%@uFqc*P5zE(Po+PFs7l#ek=n;NwlOC3>sZIqTZ zO3NChWsMpm-K7w#l}@9ysZks6XkROxMu~Wi+NekSTIn>(?C~hG$D_<1k1~5a%Ixtd zEpC(+H%f~erNxcX;znsrqqL?`TGJ@4Y1BqNy0TU}jWRPl%FOU6GsC0I439E1JW9(N zrDcuMvPNlHqo%jYC@*4^Rx?Vg8Ku>X+NeiY)=H;Q8}(c=mRjjF2A46ojKO6LE@N;R zgUc9P#^5prmod1E*+^4Y&x^-mod1E!DSpS z<8T>=%Q#%d;W7@Fakz}bWgIT!a2bcoI9$fzG7gt=%Q#%d;W7@Fakz}bWgIT!a2bcoI9$fz zG7gtfXf72Cg3swmkGE`z-0n16L6V;%LH5| z;4%T13AjwaWdbe}aG8M11Y9QIG69zfxJ zfXf72Cg3swmkGE`z-0n16L6V;%LH5|;4%T13AjwaWdbe}aG8M11Y9QIG69zfxJ`Y%OqST;W7!A zNw`eHWfCrvaG8Y5BwQxpG6|PSxJ<%j5-yW)nS{$ETqfZ%371K@Ou}UnE|YMXgv%sc zCgCy(mr1xx!etUJlW>`Y%OqST;W7!ANw`eHWfCrvaG8Y5BwQxpG6|PSxJ<%j5-yW) znS{$ETqfZ%371K@Ou}UfE>m!sg3Am!sg3Am!sg3A)M zHvd`Hi`D$kG^C~>H4Uk07)`@y8b;GFnugIdjHb=U)^#+uFb%0`NKHd(8dB5de<_Vt zI!&7|EvuDI)8z%Bs00PF&=3&1V_y8!G0unWL00J{L}0;kY0z%Bs00PF&=3&1V_y8!G0unWL00J{L}0;kY0z%Bs00PF&= z3&1V_y8!G0unWL00J{L}0;kY0(8>a^3&1V_y8!G0unWL00J{L}0;kY0z%Bs00PF&=3&1V_y8!G0unQQsrAAKgzzx7I0J|XUg0Kt1E(p6I?1Hch z!Y&BAAnby$3&Ji4yCCd>unWR22)iKcg0Kt1E(p6I?1Hch!Y&BAAnby$3&Ji4yCCd> zunWR22)iKcg0Kt1E(p6I?1Hch!Y&BAAnby$3&Ji4yCCd>unWR22)iKcg0Kt1E(p6I z?1Hch!Y&BAAnby$3&Ji4yCCd>unWR22)iKcg0Kt1E(p6I?1Hch!Y&BAAnby$3&Ab~ zyAbR`unWO11iKLILa+_V^$!7c>55bQ#*3&Ab~yAbR`unWO11iKLILa+_V^$!7c>55bQ#*3&Ab~yAbR`unWO11iKLILa+_V^$!7c>55bQ#* z3&Ab~yAbR`unWO11iKLILa+_V^$!7c>55bQ#*3&Ab~yAbR`unWO11iKLI zLa+(BPt5eZQBWGKbUIH|^t>srG+OC&o_Oea z(^F}*(&@bEDXW!E=S@#pt#mqXkv^qYr0+bDzVk%-&J*c7Po(cWk-qb$jM8hR(|J=y zRx6#(n=-Om>2%&AeY%b!edmevohQ^uLm6i%;|yh-p^P(>afUL^P{tX`I71m-;#{;cUN)=mTf1nb zlhyaFUF7@LE?OCtM&Gw~(aI>R?_0ZQrIXe7tzEP-$|~}H(aI>R$ooYrqpTwD7p;s+ zukTyCXl0bu_pM#DGRo@v)-GBZWfgh9Xl0aD_%+2wM>oaBLI(8$S z%RYM_`*+xW4n@P;lAjmVNcZ-(P-z)Pqsg564Gm{=$Jrs#hFX*|ciws+U$jzUGsU-tp)c z*M4MeFs39nW!<{R?zh*De>d(=;y)e#<%Fn&&n0{{@%xY8zacrPbYo?5>!$wAU)=KL zkN(*&e&?6(`PfH4{!~hPYRuN9TmM(uceZ^s{pIcFGCrE&%lL=P%*-=cznlG?oXFgD zc{%x?Dkv{Jw$oQMSUj`q+q*wdn!M+UJ+FVl_la-+O5(5lzLoy&xo^HL*mal$R)oNC) zW!0N$^fop<=?ZMq3RbOO)#_EPUDe7}ty|T&s=>BK+w|QbddBHZkZrcL+vc&&Yg>nH zowoUG^V_DCsaltMBi^dG-BOMo92`?$6sQb-iW66 zq3LaCdKa4Bgr@hP=`Cotmb%%_)tk@s-ZQ=ROz%9?8_)E4jGdcEOH?>E!i z%~sL-X{FH(KK6z_+gER=^+h)r*f%@czIr$HZR*+7uc=p4pH^qvdugveOg)(TFZEvP zyI3=1Ki^C5i}sn`D|+coT@{zPsjYN8igiSG+}UA4{>HH}zGN<)+5cS3UGi zef5{(ik|2lV~U^X@5J?2;`$qL{e`&xKK%AN+gfD%On>*SzxvkSeCsd1_4nSl*V#N9 z`|MS__XXP&<52yY$vB zy|e3fIleJAT{`a{FGZUu8^!49q}CK=ttrY{lir&ZWi2OaKDSZUa`gU%D9cfl^#f7X z4@6l%pzkw?vO7hY=N@HkJIdO&B7b`O)(X3#RvBrH(Hh$xwQa3!w`;rcc09p0ttQf1 zBI%|$tX2_e4N7&bHfWtXS!oZR56S)ri)NXob(M+Ak>d(-is`{k>)? z-@D%EoylrrUttXW-t|M@H?DSe`+iOTYk2@Z28!F_mbYcr1viAtxJ06l00hp)AFX}OUsj%A1yCh?^x0smh^rlyiSqy~q1*{$Dr#k?-PvLj~1=+k3(9t^;4UIuQA;I&edsf4*0&_uS|$H+sj7 z-f;6TuLqIV8X~PVL|SWzBtCev9!MRxJqxbbv*5~`^+9i}(ceSpZ8bMr!DcJ9Ml5D@E(rLtUD`&ljMsK0fJ81L<8ohr;Z=X?VPT9{}pP72!jNUe* zcg^TcGkVXA-ZJy{T70i@z07Rg4K|mVr@Nu=WvhLct@d5E+IQJ%-({M~dRU)t9|{6cTTrt3Ol3 zR(;wHk4bN=jwI%Ali=+}CwZ1e{g`61>dS6;QMa$JKf4!o`_bOd>P5wvQmWUNm?6>Z z|4nb|aXY5BsoeCaH0yuUtJ-Ow>HR5sdy3wjqBp1Ly(xNYN}YZ7Uf!18lA?E{=nW}) zKZ@RtqIaX5vUA?c3wzZ*yI|YDpPVjxQ_r`we79$+x0L)Ot$wm5mw5}xYNPko-QG@5 zQJdS5Q=~?58bxUoqfvxL@tL2L6UAc`jZrK{kr>5c6ovUoIaD8UjXpv%I`8zC zZ}buGHLLTJ`iS?M)%odt#KPR*!rb7EdD(@zL3uehpPdVHgZewBw`Su*g%xTeEcwbA#`Ut1rwAzSpezzva5Y|9xgb z7fSjcZ`JDp)Bku(e?Hd%|J$vUUAXd)jjM-k(|pas%Fmy+@^e9<1%(#YSKqo4?)EDo z-yO+Ym=AvE8vTW}qW^kpMHgnsZ=E4uP-H=o+pV8?dlY4%jx5xXpH@c}*7NF{k@USt zKkcug7L;00>ejyq{#nDm#!43E>lfzh7v}5#|C+B~_)eXm^_!Yu zdar(HVTJp`w_m*ddvzA(W^es#zXe4W6!~d?5xP)E7V5~qq>e1iss3~4R2P(5P--Ef z@zcL$@!#hAfqs4?8b7}|``7)pt$*FC{rsx-^Z$)OVa^AfyPakB&GV1g_X0*bDb61I z7T*e|*7+s-X2&M`&ah_t7StW~EvP5$`@p)KerLCRL*uaXtG2Rv(s{@f zyPRvqVb^vox>mAy|Me$7viMs+zGKNp?ppJ~kG%g^?)H6T^;hq4e#tK4++{=k<@UwG zk2}9;U+1^e`IO!GvreV+1mJub zu7_Q#T(PbM*GAV?`!fAp*C$+0xSnwxbRBX1y6f|-tZwKG%rri>{FC%dY?G z`fp)jVJTtxVI^UoeCEaHo+&%{?EZt#AKw4up(6*%4j+EHeD4co`wl((#NK_+A7N+T z{_=g#Kl{{yvd`)8k^KjrRlfJ&zQg;UK6qe%`Ck1jd%paTvT@{@y$27Kmwop6 zy$6mQdb+%9?~$_dcN{Aue>=H!Df@@9M>z89?3ccovtNI6uYgZJ{zQ4%3uW2tr{1uf zmv-psLkG(~lg2Mw=WY3+y+?9Ll*5tSXP!U!^xpF4pFObm`6C4!DpFC4-hRBC6ZUc+ zE3ve^VM$+h<34uGlzDRA>OKcabdV#5-f=w+J+J#b|MufKMcLV@1*wOgd$D}~)6X1< z{=}pEKCw1BF+MRN`iU2#i=RJm#h2I^1U--k}Yr@xuZw&uf_}1|4;XA^M!b`)S z53dS85&nhn)8V7xQ{n$P{CC4Ig?}~tJK?WKEROh4#IlH}h&2)GA`&B#BDO?iL=;7o zMm!yHDB{;5Dk5qkz7Ww7(Gzhx;u<&xk*c_=|{dM*L00-$(o);`+70+WY>; zYvaH7dk=r@Yp4J6AD3OtyZ+eKb=PyFKD|7C!!ub~4}a{J%QvrFc0K>xA|LLJczXGk$gB5Xf8hFthd&o_eZzHjqwcJI`P%os6difz7he8_$UE0AkGsFq z+uH7V_@(w%&r8c*YVaI$FaN-zMg?U7NlCOU;cfORgrL zx_aN$=&LEp$T~9edQbSZ18>}!vOMD2i!a~z`u1yozU10R!>>Mm^#Qx-hU@XyKXiTR z^|)gX-}(CbHwxob+&rE+aiy~Ex&V1ZRD3+W7qz1(X}7n|MKhQ5!YINbmx-S zFYdYaiSXB(-?(eZovv%qFE6^5fB)4#i@5fotJjxYzgBv+I{f-?UGKVn%I*eLxns6_tMg-0v708DUm|jTy-q^{b@}B^ zfn&!I=G^JQ2SW1*CM7?BQzDZd@>9f8NAxoKb-?fXc?<1a$Tg<{*e(l@eFqI(GpfD- zRDIta02aGLX(vu?21Fi{SE1KcDhM*F>03CZJ4q}ZE*gSIhTu23wA~@JiyV1HB)sT+ zXFoTIKf-@2;omI=@Ld*uit!AeCLY1yGT74q_yxRo72rLQK#P&mS$zQ4>X?#CzdU(Z zJ8&9L9N%e+W0g zF|qCm$j0D#tpie2PY&@Av6ptvBNW-%f;@uc zxBn;H0pCgJ;MM#dn8}l3!ZBq?$hjl!UgP1I0>2p=58n@e9iJj0oP8Lq<=e%43gP@O zc8MF0L|IX#zSf9WRhQM&Qmtv` z2qR-S7FQOn2o2Y!MY6TA;+hjDDmoPH&H348wlUkF$)NIdxp}z^!KWNYl`G;jXbhU~ znBqUaLPF^Ba#(F{ehx98=2%x(bG%K4*7Eh^%Pr|*f{bk1Ja5;}5z6m)q#7ZBCB~&$gjvLXUYgRU7eGvqLdY>wdqW0KEW@<2z|Yx zy1F3GfXYF2xnd0}Bz`A79*sex84wIyh)N2>_y{c^VrYygL&d1*@-kvV?V5&TcwiDa6)5kjM95n6~cP0>rSgp<@bHX|MUSOgytnxrPSDak-o zpb_Uz^GDa%tM;zT*Qx7Km$%uOGh4lyi4%=E>D8=WTb5OlftO?ysnrxwRvrOzwZ#*6 zFJDaO#?v_j3D2yIJk8fi2s^qJbvn%&)y;Gfm3kFTz}JL*R>F$*r-KYYP|f;AQypV6 zHR_v@hUnnyVjEWMYJp(-J(@rr^Oa!!6@Ec5TtL8O=ux+c2*nbw#DiN|LIB7$1bv+O33RFhB@r=|()Yp4vZ{(#q_1`jtK zdb>AOEEQU8qAwh?$soeH`nXkCgX)zKJJt`4dty>MdI&VN#R7ue5aAaP?1vM|ZcCO; zA-!dKy0f&2|*>kA?TXdw>d znFyK$0+RqO%z;)Q5u^(Q2?Dg>2l|CQ_V1eXPZdI9?V zZNaAk!4;w4hCpylfNV6hHU#oE1fL58wFu0U{KAoxlscpyN2h@eq` zyc5Cq0>QVF1V0Fn(jUb2@h!+WWf+Q9ZUPK^~N@No{ z;y6)Elo5KuK-3UM!bCI>W}=yBBTf)qL@&`tScw7RG%-Y+BhC{a6PJil;_8&RDe4zq zec`ni4!m&wg{RVE(o51WUp)EZsj2g(22PEgCY?5Qn)|fZr!Aj$WLns?h-p#N)YI-w zdpzym?k~A-a{p5%kYTbZGO28;%v~mzdB|Rt&63TL&6T|+dsntZwoJA{wojl$$SK`MyQO z?eTEWeolWfrz_uFLmA8K3?`=9RA)A~;kI+N9X-^?N4j>z z#%IUJFiDbnb6sVN!rGJ=ax5b(?f@HqFe5C1N-aB99mQ1ffGjR4E-Pc(JH;iX#U-Uw zNnweul!@&Sm*ne;bqZa6VZM%yjTPtT7wYmUU6HQ1faweq>q<%rN)=M)t5WB)`cdIU zo9Kya1aa2dFeNX2pO$)qG#G1za|&q3B9R5glJG@r6(8N7c@L(DVVmo7-}BGQto)nU zFq|WXq?MP{(0nWUza|*IhwbI(h% zv+=DNUHw#lO;=kR)7sWm)2Dzk%;vmLA|rs6^mSz(M#j|8 zTz|sUUe{UIZM^PvXVteW@m)K&#;sE%!#~IIaT)R@%Bw6Ij1o94vH!lKN|*p`T{mJ^K~rcP65 zbz7ym(p+jNDhV;XRG6>Ps+eqL<}r0_TC6HAIez)=b)Rqj%-%HEVr|EVEG;KbQ~m1B z=w_xlI=K3fVsAitOgI}BlNz+2ifHvex0MM_30K9aqLs&zQxoyjMASQ}-7Nw3C?;~~ zO4=R8S9iLu-D10A+K-u4W>rmANdYcBO7q7PtqzX@+9u*+jHK5_W5;%Od&hojs2vZr zr(77NM(Z#1*_l53knKtb-f`t-?Hz?xvUp``_$GE!_@2WdyYP@*svX;?O^x0scQdwK z7XofZ;t@CQq&-kb`65^0Rw1{+DzYS!-`_R$-e7O^o$(@_&F<00tF+)Lp{&=5%y`K-|lj0ZZ&xHE#58fVw$86uI_Cn`L zofJ2K8W`F|@GO@?ZW$Fnwav#q9gXi;`2NC&@iCwJi021A#kTmg#eVnocPqqFJ_kOr z31KdhHYtThsVp&KfiXFxqS6*=Z}gzeA7;naRndzSA(Map zuIKt4_Or3R`+uYU7J6^p7<1k0y`K4sdGAMT_F~tq4_p2=^|oc%XX}~GcOFLk1L=E( z<3+0wzKy!;9>-#2HwXVQKE;Yj^GmGUKdr)FtX!2<vjvFWM+HtrQzVOr2 z`_x$UMekEg->xn7UWyg#lt;bUO$Q>jucEBj_4eV57N+mw)%wpAH!h_`4YBs{)#%^JnK zt5PF3v-RQP&e2=7Un<6~Xc8~5qY0f6fs{(CN=sq(`Yny(C+8Fgztn=s9=nd8XD=jn zMwK7M^Ce28R!U}{-?C_avNpExrFYiU_w8bnqQtVNN3P#X{z)NS4nIfz?4eEMz#NOn zApWK^3WKZQNMr*a$d8M;Uj8#1CJnCv{oF_WA~>E$exsq6l+lSPDrLG7hc@n|e&-73 zrhdt#_MwkYGAEnpFRv?)ew+(-Z&t2SY}{_L>}Hdr#6eeXs=ic6BWi$^JB?atyqnyA zsIoqg4K#(fCUoHmJ(<0I6e{DwL>YHc85eSa-)7}X)E&On&>xi1NhvB-x~gcn{%r4H z@8B8BXdB)(dcEdzMZbg>&xzW)m^~UD6_y-?CmzWRI7IQ2B!`TFT|vx=Fl*dUGM;>P zIO_t^tsM@jRd|jI5pgw+cS&bRF!vU6S)KpYpDC8kIBtVatuPv$V6|J}uyx2Lf?wIl z%{xuK2ib$Yi33B_Q1ifRP;5;TD z2{SZDvT^40)(+}KZEJfI)6{J-3@8RHnxqc4GpRNqmWoMFh(`W;Oqk|~!ntDd?N1vA zF0mH}+lMbxm*dXu8f0S4QO3xss8kuNCTN6F>QFh8H_Ebu5LTeP>8d(z_lJ~qxd5ljdq4=L& z*M4Aaon4mZlX%lf!{A30oFqA|J{4_aVk{vi4w`YZ|K6(IW%M@xs2!WB&0W4%{h5Q; z?j(Mr_~uUYnQLrweM3X7xo}U)f$+m&0SCi;;_-N&%^7QvGCzZdMxo;Qm;w*IPeZ@S z^U$dCgqwl(JrX_SX1q^#^7CqG=jwZ<0G@(kKXiB<%&>lfysvT*9DuDPUmgt0G5GT2 z=O3ic&;1An#UPJwXZnf?7tzyFSg^@b&W!WspJ2 z*E`aNw~l;TGo~2(G$Ubz9f>~^ZVAA9155|@P(@f_MuA$Fipw76C18v zoU+`I!Dbk;>hjFF=DgPY4!l&7Kakb0?!m3e!3}EC#Ow>1-gi?V+V}=}_QJ`-1m?bUx~E+4)xPSJa6_P}xiJZ*srF zH?91x@nvuqCQYYHNWR{&JTx7mI0lNUO`MFTC?C|@l#cgFeiw3`oxi#~Xmq->fGaGe zVI0>$z6W1_oqw%6`TEZn7oeH*ulFpQ*S7cUZ;c3Q3Z9B47(ln(NIc#=Ketj!tEHV|Dn3ChgMJ0^vQAtyg z*-SMQG~}BY+0#6ewl1?qtJjvJFl|-2p`->csVz2|sQSW&0y9%A(U}Vy8!2OP7C0KM zNmjC&3~ibosEUJOwsPK9q9al6s+>P|OJDPhkQPlGpyi1~s z)Ow&q7Zsta3Mkh_spujlx~LwsH|pz*Y-MG6bxAF{dy%n@YWVB!NJev^xdDlg1^yDL zI={Y|YA9+fF{Aqxn~TuXH6mf^nf!Wfou&piE29lZk)@PHD+`qc$xPw#c@*o%@l@!`6By?Ayog+n3do-dODl%oi7nWozw+#L^7tZM9nAL%R_EV?e~op*IN^TlvM-iLDufU%65gm;9kW z#%G}vMT#p$alp=B#NZW_qKHQ(Aek&FEr1=Dkh)3HJ0iluyCJqB0YZ zAC-_99Y%#!N3|v}RoNA}<+@y5PHt8Y)|9GO zCLRZT9d`d)u{Xv`d3P}<#4MfFt*z|y$LP=ON^E7aD|0IHN^t2)8b)VY|7C^uZBT^# z6u8RDIgt&lfXhgJCbr$Lt~-DY=uWbIM19m`9qeYh2QSoJQNWotbac&jWzb%B@6m+) zyC~n*1H(Z~(C`&xKfsw7|L&{Xz|EcugSI%Zp^*;O(*u4NkBmj*QDa}IUB1h7c)LS5 z%+-syG33MY$GJE7cIO*nxEuQgE)s`e%TPd?cf(*L{2YeyzamF^WgV#gYZixFd(l+x zdUDurh4ZbyqlwT4RaR)TMY`T)|JK0A@F@xB!%)dUZ+Ku}c=!=^z7HuI*YxFCGJ2A6 zdsItE0A(w|bR#imeEU-jDV52nr7(197n;zU*`_8#dl%KEX-{fqniAs-Q3}4;`HB@T z!p;<03%c|0F1~IXxP?Tt#(|BEbda7t^_PgZCPYl7KLC8rFIE_cd?F33mvU(7Y&g;Z$2*0?-JH5x#Jvbd$WJbrt*6*?XX=Ko zQc(Q*PyAHoRm|HrDL9bbcPPz&8^w#CE`(E=e|3`S`T07;&NTL=Vm7YE{LD3Mtfh(cS!~R%e5aW zk!l zMrNGL?t;E(z^j7~$OE~dnF&lryjqDY{B%`ja#oxsF-@t$6B9L&L6l$1L3;=jGIUur zrnq&vq2JC9^|ze4Km}ug%FrXxOw^HmYG1_;e`D7X_DH9)?-bQt*CSzkwtD&i? zsm5GquB&f0cH*_26{kL?R1@Q}EANnl@(vTDbA>B;`M;0Ob7=m84?tVvMznRVSx0|B zzXRT;*U;~SR{+QOxVCFHqAl!LN54z&ps)IX9rV+UXtRDDy&Nn7%ju=`Qm}+xNY4j6 zga_f*$n}0G;)GC3@-M?_kcNu5cAn*3Q6G7HEM#~Y$}cI}VJa8+h99Ai@v)N z%%b01M=wMc2o!AuAEKn(O%FTD@?~O!ed_2gwlmro8bAf8L!(ixM1`sY6#kM+Lyad- zvL|hgLnGAbOk0A5kvS83BU+9caa&L0z;GTN6t5Eo5TFm(mt4e;K`jPNnzJE0G#M-wER! zlLB3;Hy<8B4g}KN8)$;$yMkdCnk774JGt{B?gzBfc|MpskD&n2QF?s(ZXGSU>xa@% z-oP6;ukX~+Hmm6FE?*j@^RzBo55+$uTqjvVJCUiF4`-v&eu6qH3`Yn3P;AFF4Zp(| zSz!^JYvt#{a5&pqLi6GLJIExw3*UYqwEQ7@1m7l&)unm^Tct0nG*E`Tsw|WS*JPHe z73$1I%s?(kEgv@OzJW^u$K$^crzFOn-OzORHTU zKZ4tlKa>O@0a+_#!+;R7Ln7!6SrVBHD1XRK}Na0Qr)>!%4PbSO?jyKj~E9i8I?6;6$t9Nj)D|h5;qQ6ImNV#iTkFpCtL${d% zFOn$6$eW6=nxg8WYR2_NkJZ>)2W>Wd71+N&YG=kaJRrCqE6gm+%%BRxg8L=<4|_I@ z?8G&&FQTeu_&|KphRQBl&KsLTL2ymPzx6H)`ydSu?F8Ea@(qwdG)KqG8j{ zzZ?YQ9L%NX(*NfRq4hPIdzJGmqEQk5hpWbQu0XZ=IQAzicc4g->^ZoD3DzMP?T{c2 zC73QEtW7Kg!Ue+J!Vm=Q}2-f*@(l;W1QJ_dK`g-!J$r~p7O!h}m&aKH% zJV}g;-Nm$cg?JMJa+KmUagO-7_($=t*g9-0wii2q1!5<#ZtN}wB}*h5Bs(R6k{HP` zNxCFgG9>wjxZewm=xIJ+j2O=N=Q^9n=fLY)*@GkHI>%lg#3;2V>AP9tm zB#;GifDV)c6KDc$pch!dS#Swl0XM!72yT1 z0Rm;WUD%E*D~htax!PA~f56>9f%{>Pe?EBd>$BD0&Sd$^*c$KAJ9j=Az0Gnob_6&} zu{leZzW(}yrT=8%W$ezVkJpEO+n2M>H};^sc&W$G_zY;?PrnKH45t7iuE0N!ftz;j z){w}t#p5Q%KZSpa;k#HM*hQlO8>V7VWheO{jDHg@`f4dZ6VAcQX+%wI-Meoaiwb-d zK(E(m0uc#`G%6#yh(!Gyu}FwOSj@f)cmxokm(LNJAS;nL6>Rx`Q%JcBz*`>A_x(>Y zX#sL^=`V(7fJjF;SyiE>b6%lUKwF$W0S)9V0?$zhJ^`h|Q3SA-9}^Q?wZJ+u&+IEa5{SKg;;s{ z@xpR8Wt=7de!u7D82kLmaO0_pv%?8s!CD|?a}c_Kn5vRoWFq8E@IIS|XVZM6BW&seh@5Z*oYhbATVR{mITwTPK@SA6dHy%VCfq)6(LP4%njjaTZpF{HLW!$u z!8>S@5*CpKD?w9UgT}n7vyhNemcenz*ZOfc7H6^7sZo&Pi$C9t%%#`w~O)HwJ~aW9?qYE*GN~mWHSa2 z^Q~Sq7cFs7bq{MkvtGJ3{o>UVU;K~&-c%r>4n=MX92dQi^l)(;ji8)ycSq^)8t~a1 zKt{zGniAP$b5`pKs=cb!jN-D*?S?KzEl7x?Vl@d#F001{P(3XnJ35S(lax(uiRob4 zqa$jA6~W=@anWo{OnO)lb+k6DBZkrE7;h!6^4q63Y_2< zZ=(rGRb`$=&sJzk)76wZKRrjoXtde6St!C_uCHt`vc~!fluRR{(J;ASS;VGIEHN9v zA9e!7zI?z>=|W^uCjG64W5s?KE`vJs!lesxl0X5t$knL6?(^^c)d%21L*q5J6YT>Xoz>5p>yE`5H80KK23# zAMB{{Y^9 zpka?M<*V8oa)1drurFj=EFQacefmm8@Z?8dn|f}s#9UC!6(AJsdKqw;Wt`5JcI2(4 zchiWb{QbB+k#_Wm9Jf4pC-Nwr@X3Gkj_tgGZ-WLgOn1>(hEiL_L)?f2p5;dn z5V}~*O~v?y2y;3EN5sw%310?ZCHZ;S-LJ$_3<4+>Z@~Bu_?3vHsWX*V*RzS8;v51& ziCGB1d=4cN7NmyFc4)E-$A$< zZ7t^fPv;FWw@(7S`7OPc2`bdFb$mI=_ruQcFj&OriKR6zqVg9cLIUcq{`@Hk|1q!S zFThMOJcDR2JG?J}AM+Y`0cynvsgvdzatyhKctAY5edp1SYwj#!iFX!?Z()#vGsJ|U zvbbE&>dQ+j^prloLR-$1yMkt#Y+bGvF=IO9yJ<^u^tnu~zOn#;r+kzqKsOWs_-;|w2#T5k}JA#?W{GDAfkaBM#vO{0lh%KO0iK?sL> z#YONrJ`-^X2;lSh3*ak!4)9IDdX088Ye|uk3=xQP(>Ab;T7#xI122^1PM~foWt1jR zrzw0XJi4+jo=uGx6Kus|Iwzhc{)TG*svRcT-6t%BcDw91!g-4;gtL%mC=tQA#K_34 z6AzFo^J=&!AqMW?-{H5-Mp-~22b5M27jWx^hD)Q=&E(M|7ns2Fy9U;G;+^YP8LqWS`a0PSmY(xbts9edy7Xbax6cs3nTtA0tnxc zm|G7h!r6k-7bpkXXLWyI)u5n_{5#nr7Yco!FqYIXB{gML6?(k3+Kli(s{|2g+h_Ag zy4DzoB;_+9f)!zsSckB#3WV=nINH8{XC9)_cE;`Z3u2BE<+&x!dZGGW9KT8id%f;ZhO2i#H-k>|>pq0E`^9MRR5K@*#S t5H*gM6?$RPyy`q6CnqakjfQhXRHG0Xvds`wmw+(2F)^P(xU4MVe*hPMrr!Vn diff --git a/fonts/Kapiler.ttf b/fonts/Kapiler.ttf deleted file mode 100644 index 930f77ec1e9666d12d08cc1a642de3e020d5cbcc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 118964 zcmeFa37jNFl|LSlSyfs0ecwmd(OuQuRb5kk_e^)wGt+b57sJfJ+{1kdGctlG3^IVX zA}s5*h~kO5;vBewxa{J3;Dw@y;0?Os>VP74)&Kh_v!48$jFSW z$cXn|ym;~6i%7;9V_rl$HoS0b{sJE1|IPR%m*Z|>;mQ?j7Cw9IZ@9jLv2BKhYu1gj z5VJD=$eqY{E?v83eAPu8`x!H=WQ^Natm*9P`uD59i}ZgY|LpZ+%QjvxbUD(>*^KF* z+;PD<7cV)n>ORJ@XW{zh9amhQWZT&q#!8RidHe2*_grAubVmVAT;$D?@0f5AEXFP0P1 zi}?M=@cZP2=UlL>HB~4vzR!r?4_tiFWtU%Z@ZOz_UoeL2&WkVIb@2}_{b339Lwnez zj9`iInZ2v;+2$C0j)i2jgR!5ywS0qizPIs2qpW*MCit3|M0auEH}d6;GBfIR$p5MC zDN&M|r#@h&JKW69XL{DlWG&x}S~F=KBF<##jr{G%Wzv1pVf^T3ao)+dvM%;ldd_gN zJo#jjvGJwJBpYTdy-TeF>PFbW`|zu~rMq!TGKrsH6}rQBszLe%%g6zik-mWNDVAXu zu?(L;_#487xTnLt_aOZt?R*)Y??PRkXI;`C5I%_T5rpp`d>5ey;YRhEe#bt5-!4Qr zLp@71{QfY)W`yMkd4vxm#1RtOHOKF+z`K*Uj^OOTxf|#A;(QZ=teq=3qwe&Kxc_FB z(WAcdR)n+g{GE8G0ReTUGi%^X?Kq13f6)8cFY&$$5OC%jaRyAJ*Wvw3aZT|3wVE%D ztIx>az`cuDmH!UUAT8?qecaQrINy)!d3X->XE!3eQw`Ev)#s&AJa+)$9S8(xYU9fU z-GAUd!JS|}C9o?RzX#lXI6r~#Tf)mE%n9yO0>?Y1UQeb0cYzh1nMj2ApqC%V)Yr^w^3-)E!9J$(I+U~3~~kc0V_fKgsZ9e zqW+@cW)eU2d=m~c{8s0h`M07???KzZrN%gpfu)Ejh|^L(U%Hf65$JM&kMT6lOZfoB zV>p)L)|BIw@~IRa;`lGBKSWte)zYVw!g*R0KE~-46rYlbi_`Kcduk~~?fGi8s-^y( zQ&sR#a8mIjjGKQkHkgqa8+Gu0ip|W_m|zxWMr>sk#5QI{Y-cva4rXusjX9YEv5Pqo zyG86_E~LH8-FOjw&V$&`yodwLhd9Xmh(j#U_$v#GIKqNRM_CAQjD;I7usDk#POvEA zB#R+Vu{h#1OEmrh98%oEl8Cb`g*eC3h(Sq>KeGaBLEOr+h}&2WagpT_mk>YC+F1c{ z2Wv&#$=VQ?SrKuCl^V~1`?n*mvJS*^L|kK?jT5Y!l@a%_3gTYYg}9GZ5%;q>jX$vg zRzp0~AU=Z)A|7KyjsIcuMZAE`MS3Bdhj^R~ zBVNQt8h>Dm*(l;A>>73n;y1EO5nszLL;NPhzhG}>mm|K8y$>Y^jMEn!>5%x~Rcd>UN{wRAl z;*YWSAikTuxA8dpIJ*_`C)oQCf0EsX_){YOG`qd=WA+*LPl)efcOd>O`)9TbrN7{L0}^>|==k zO~hYjcO!j2`#9nU*e4o4U|(UMMEoH86ymS4Pb28{sy}j@i*BQ5q}Hu_aF-%LHuoY6!CZ1F~r|x#}R*z-Piao zM!PQ|ew6(i;vcXtBmSX?A7l47zQcaR9zgtK_7%jBvj-9Xgnbq96YSp`-)28$4f5E?y8|giCOq*}y77OwX21$}+9Y@S% z1ByYVbvjAG3AsA7PBAEW3Xz`PM87a1)-FxjrOeGX1JmiyGzGBG5iE2B3;F?IA)!3Q zY%&?msF?|^(j#dSH%vB@Ns;j;oiu&Eq{2d{a7D5h^cJ10*9%w>q>TpAW)pylgJ5Ac zn(5GqSj|K{eLlCL5HFiDl9BAr~p4OnguM3Ry5v9sN#y* zU@=+6h1KFPn~g*ldTEAyf`!qnGjXGAQw+9ASP-O5iom}a1ySKR&|~S)i`Yysr(@cD zGj~v)k?6Mx3!*OoMUPqm7AS#WVK$nmGnvf>DvvrA5Q;o;Y1J-e$>vg+UXRzBI6ANb zy3hj_0(d437B-93Y@;I3Dg{Z44dvLZE{nw^>j@S!^k#yEkzk=11YHP}C<4D0 zCsh#dk|dkiMu%R+7K`;2@-31Jl>o_}goXYjEG#-55fESj5-?jq5EhGoz7=V!#fs9! zrCqy}C5OidSm3p03GC7+86<;E2P(i1%vM>_>-8qP&1Q92tv0KKu4}W{t#;teZu8h| zW}QK&Gsx5D%Mw6kvgs|7Np_eNhe2mFfMG~7Rl{NwZML}u84xTS7CRjV5!Wte+hCBbdc6T981x`BvlX4mY_%EbRn)Q22)lOa)Gl?B(`RA^ z0~!jfgK#B9GT3zh8DL?t60PaY4!hmvpn9QIM!Ur!E*y5B-EJXRDDn*X61tt)Zm@!4 zoo1s`(HRvL7DmvSNwnGS6>U~w;h@7HV!KVverdka%$L0Qt=(b)EDQ>PKr#@0A)zR; z4X{AX6g|kyVsn5*Y<2;C2hwPSL%VcompaMiCs^2kY77SkldQ;q1$qULZn0rN)f>!C zhr{lqdZAS&ht(-AoDM(ARADhgK42k(J=*{am&NE(bS4!RRwJNo7Hx6>s8l!(r`1V^ zB4USK&7L7&^84*}G-V1bER(RXV-y7%6>6Od3%f%=-{}M_P`GyK)-H9jCunAhVt3e4 zU9)1AAqzP5=oLV^)ecw~3>LT3>2OoMB=Ae8&FydlI&N3c>9pwqb)#t6OY;`Jj z$)fXEO&+7(Y&3x1$~F_AZ4qsD21J_)7H*rH4x@;j4mEp*d?^@oIGi>cID-N{j`3Qt zs<1E`Wd{Tbm(vO90VLpXf%u$G0e!dIW^*}R$P<@d?NTTEA{M{`twD7$q{$}P=+fiL z;jr2rB&H}=h}}*#`=$A&X1)}OIGt|0onQezPOz}4uz1 zFyy*ic9#dF=W<)Gnw3Io(vXYg=lE<8az&b&F*l?I*dei zzt8LOhd!?tL#_wt z^MlB|J{!HtkF?+G_iL9S?NYCcWNgfA_M)v|fo8kTqBHvypWE&6I9whQQ%sIvAm9)A z{XU#&K!Vq~0%ou5G=yEYu*G0Elk}r=*_)t)(%+s0$#5SUg*$Sb(VnQ_jtWtr`xO30~XG3C>V&KUVa%} zH{c2T!?*~Cv!RgJ2$Hwzr_Wa)!Ry>Xi(hsrQMWy6H8?B+79J3+#~Bg_fQo})5%GlS zu!=b3557WvNX}*hfsoe=SeUIk%sO;tw+aggY(WSVpZb{s%`kmFU}4dFBi5koRuW!k!lt;an4{_SK0w;%j))^Knhs*o#e7jZY$A>Z z)$GW$`B7bKD{A8RW3FYfVH`E;EnXA@h7Y+S41ppVi9q}ah5Vry2qGMD(W~NdpDz}U z#k9+mc4^S3+FZjxs_zR~WE}g<_F#I2LaM zEKKPBcEj}fMxD{3_eHHCoyVB;Ig@t40&_II-tYAI{2sN<1r!$!6D;Ds7#%hdM?;Zm z^6grFRM*xP4r{Qm>uKU?@oBKI8zK;rV$mqXk1$9%4p>B@E}$)(NcjEnNIb4xrnO6h zK3n!Mn=KMUOFbUD$KWv76Q(Gj9rXpHdIQ8XUn-f1r{nQ>ROgL(!6F?@(cutrGMadW z{G_h4GZssRLYQ{h9Ds#MZwsIp41NxW5{HDDOe7#N$AG>x`dd8dr8lLKP9xSX^V+3i zXzTVfha-`S#q>U(!*6gKoEdX68cigEk%U3PNEFPqWYYO`I-S(}Q~q=$m&~P8sa&qR zr6p>CoZ>c4pKsPf=nQ8Z34PGq8uqoijeaNQB?d#(7m7xLE#j!6>~tE$qR&Tibht#^ zlFUq#@7D5L^tDhv5sO} zp%c~086t&nYpOlhUdR{P+lPzAG)>Tars?wm3zw2;b7%E2OC=Gi_$*ywM~=n)AEak;o*F~m`E4` z2Jxd%NEg~c5UoW4{m#x*s=c+nUAvs4U7D5d`BCQa6o6_&B}2j?@Cy zsMiI}h_NM-Ot&OD#4%c;HWMtmGG#jaBJOBypC&(`<#!n7&u?q%$YwF`^aK>}2csvg z!onXgm)ve|XGc54k76-ftbia&9nlir)z#I~QYlp`+GVeHX*TvRjxnFF*j^|Y;xS*` z6g2re?WKIatu2*nGm*5LoYPh5>_)XpN}?mtnVVCZ)7joJXU^iTt~^c9f|lv?Z3dgy z)Kc*k4Jli1ORP6&jr%>&otSd5bT*gj634<$YBLy~(w&<_M?l0~rOGtwg46w{DMJqCnU^jr4sabIy&+lwR}F`*_G(TySlq`xmstfrdr*Tk?5d$M;IA2T)Ew$@cD1)xD!F3CWWh+38|v@vo?Gkgt{PivneNhHb+Efu z8ysBU-{0;Ixm{7~^!X0O5iqy)g}RiyW27xL618W-K4?bFrBuFD%Jqw5HHxd%AnhCH zmImpFh`7JnJ57F6%kMX?TUV>~x3^oZu0Yh}u)EBGHh>h+Ac8*0Ny#uwXF_(|zEeT6>uDh?R+S604^jNK- zV5m4YJa1?LUf*YG9V`r0#`?#G`Ul6(ID2HI>Wz9m3H$W6{noH5OfqI`{7nlg!tNUv@ksx?Y4Q_V{)lPYw*LN+Y87*>aKh|z zdMx2innBs^@q}$C7>bV0o0kZM1_rAGV=xI14Ht&+t_2Iay2gga#jm zAjprZ{xq<{3f~=gs2JdxLC1uaBQ8Y5;jE(`&GX5;Q&XyaS>{lA!hcH-7as-oWfSbm z7TA+*up>KQKX$=x?18=52Rm^9_Tdoh!V%OY20L&9_TLojz8Tniv#|5#Vc%_qUAG8( zZaeI_ov`0lV7INpUR#5mwg>jvKGSQb76xWhV69}HrFxOS{J~^Iu6_FV%Sud z!j`(6t$_V>73`*KU@u(oMPx*7J+EwF=bLxZk?4e?E|7v2mj->tCI{S*6V z*yi33%iM=yh5HxS;O>Ie?W3@{eH>P{Ps7Ic8Q9bAf$i*HVLAIEY*4Uz!gjhF7AOXr z-+8c+GFUwKAifsXLI!*2ldv%DMC#ZiTXZ}M&#!O89>!oDy&iVbL$H9p1s2Z3uz0=| zR!jz~=6hhHd^aq4beu0@20N^d-;4BWz)%ky!L1FLxEl7vPoZTEE^~#u;Ez|~{qS)Z z=j-@peh%NoFX2~17jBfzvQ^F}y~%Jgo=him$^PV6^3LQZ)9$n{9ZK(NG{k#sz;Fib z{1hPfMJ{mzxAGv$?Bi$fg?uI7$j|0G`GtJH%%EUX%k(6J$tVQST(S>k{>x;UJ5eTw z?VdHBYJ3^-ml|q|@S+C!oM}g6tD2a2aN>c9`zO9U@sTHwJ^95a?|t$MPk!#n4?cP8 zlYLKiJ@Jhv9(rQa69+ZwUoL*3#*0Ba%Rwg$G_n=A*)AY@C z2>uQBzzOz0=q!Ka99F?PY=RBg3twb^;u1T-W%eA`2~0l&yWjI%&wd8G12 z{=yaD`xmf2{svaZ2Vi5J#oGUcSUUnI%_T>eY30Vxa{ks$M|k7qYwzd5T z51ZR$rkIw}W0!Q+QFp@eg_5^xr`Iw$auzO*EHkDOb% zan;6q-Z&PyXLxK=B$Z0ezvupy8}GS)ERx!^2~Qa&Yl<4bVP8q)^XIV|cfzv}Dc3`KW1XQ#)K4FpeBU4~LU#kskw76^Sn+aS8hNa*QGwbY)}l z%NV`h3ch>t4*N2GcKSmS28b&#Y&3s{vh{dODdI&cBv6_c<5dnJ4=*1&bVxJ)pmZPJ zs$h)kJ^+;ntN`rZ&;nt8=-)TU?kB~#? zMp%Hb9$`1a6$r*l(A4XZcn^Yf3FzN>gbh57@yZ}%5dsfTCxCJs5`+YqLWgNh)s=F| zDOW0=tIg@@uFc6|mZ;R`@@f}!&Iai13_h90xa4_&bC3_w()F zcI69+m^El3b0t-dRa=ITMh)l(YE*g-}I2iEn#x*k~91M7NVT@TXHgLL#D9X&`#57N#(^&5K$mf#%Q(&fTpf8_JJdzUSpQ|syZ#Ot}eR$H>9RvR6?d2{_QXdEs5S~X61ZP0Pk3fIj2n!I_BkV@F0zrFi4zJBMiM|A) zFExq21fnm2=u05_5{SM8qA!8yOCb6Zh`t1(FM;SwAo>!Bz67E#f#^#h`cjkVXD&q4 z;V|HSJA&5NXm!pgUOS4{j^eeWcP5ir0?fwWE0LC|)~?*N!${ zJ98n_*A|0(mr?JK&^s77X#w1GP!Iry56nD?C>9`97AYN4L8Qu+K%NKm8lg8yzXziP zk+(|VRYCjB&M?z6zszt`Oa0$FNMGL3nNB3q?>76rl}^9US}b<-_H>l)n<39)mp*3y zYbsG}#m|#%#Y9Sm76INz#i7AsYins>u)cPtT$LZ6kPb`#f-xk|&gLIGjt;NGB$*nY zL25{(<|7qBO7sW|sLBG8v4E;9fRF`LWdT)LKvfoyrUj&F0cl!5nii0z1*B;KX<9&< zmL_S=T!==R7LcZ;NtzMRNTf+45zt5kG!g-gL_i}E&`1O{5&?}wKqC>*NCY$z0gXgJ zBN5O@1T+!>jYOI>GIJrQ0|_n}v^Im*X3*LUTAM*@Gia??UxC(U(Ao@In?Y+cXl(|q z&7idzv^LXh?aYN}tre?d(7T*-Fzbnv_;odo>! z=b6iU@Gls!Mssn_6+A{yHkofvcaLDgDvd(AiSs}=Zg$rKcCO%G+`$ji=dPJ|L8YTu z3`P6_m`E~~aM|xm+d}C~b*Rv4S7Q9#AqhtMLU7mXE*xJJ4tBQpE}7G{{EUwF@!rMd zy@RQM-)_yqOHOgy!kG|M)vnCyzKeVOp>Q;wZ4F25PJ_a?+k!S**d9){`U8n%pE>R* z)-Q@la4c!@Z!LGME@o1RSY&H;QL&iG^qeuLkgA>A-CAgiM`gHhIi0ycXCcr6?H2KS zj&&w~VoRk%;P*Zf_jI_WD*}*Q^m52H`D!31=OzB9cfC_Q-*}q;Nctk^>3zp(2qdXm zrE?E3?*ZmLz`O^T_W<)AVBP~d_W<)AVBQ1Fdw_WlFz*58J;1yNnD;a>KXV}(<~^Wu z4=@i=HH12=GLRK1o4~bxlCku3SA7_4)!{bauks(ge*3Qe!QP&oJFdOx%+1|UYM!;_ z{6M9=dex4z)~uRS%@vD_7jM|OXuO2=e;5m&=8uWCe(*TNa~-A|6?J zp`ddq=v)ejQ9WhGGLAQCyH0_8WOop@JeS!)P`yTJ|_^b%2hf%V!OhY=(4!9Yr~^IfK#w}mqYm__13l_Mk2=t!4)myN(&Nm95IrUV zZxX(}okwZ(KQTh=-wGyR1rxA>30T1dtY89GFaax=fE7%@3MOC$6R?5_SiuCWU;)v+ZzI_+I?QIvHd+vqVjT;Xh+^})uhJ#ma+L+zG=Pif!Zr{H5 z&<%UHgGRbQ7Y_m-dKRJAPV!EYj+(Q}as^^LsPVzvdGC-RjuXw`chZ-nyP>1KA9V&6 zO)3^mz#`TMt5`GvizZ;v1T30>MH8@S0v1icq6t_u0gEPJ(F81-fJGCqXaW{Zz@n*% z#hD8sET(`(H?Zgi7BRmE5KRDQ7^!0zI^AL#h1m@OHmP+2n*KETPBx{EqAfIy5u<1Z zN&%JZ^%xyf(w8P)*S;cCICBn{7B4zy-|E%59cNx~!J0Gczw6u4#(yLG3Lal~>JvNG zRy(`r?6`R0VE4Q|2fmOU@B^X*tHy+Mf%Fj!==Z4-mJbLOH17knd`)Qi04*P&Q zfR+!?@&Q^tK+6Yc`2Z~+pydO!e1Mh@(DDIVK0wRYgx1W35VSIYR+WTcVjP`7a{`bE z31TC~0+Ai=1AtJW0DyViaS5h~F{#I2B^EuzA@vq{UgOq)S)Oz!8oQok+i4y@yxqV0 z`shcOgiI@!?F&kGEe~#5zVEUP?#jiZ=We(#U0&Xny(qozn9(lJO>}eXidbAXkV)sv zt0J+vxpXNV4CSh8O1Wyp1&QQO(kzC1G)y7>suLVc<@8{~ z)ORwjlqUd1wz*Pj5vLc5x8pLQj1dl);P#LYNN~2zo z6tHVUY8sX+m~K8d#K9mY1vBLoP+qzc$EBC9Seebun>W6C{-V~7OmgszA3ty7hB5~SUgaww8bB&+>n7oIV9pnYpLpBo)n8jUuAtnv`>TOHu62c)U%PUX(o zyoW~gNcIQyUHm(;RXPtW8ho9`+V#@MXsl%;(z@dqYjiM|5`AE-(KK+F_mNzPdvlSR z)E@uiN8^##X6zMw*b6?4&ZCZ6UT{M%_^=mz*b6@F1t0c;4|~Cfz2L)M@L@0buoryT z3qI@xANGO|d%=gjO+GwxA=*SUicu&ENh1nLBMM0)3P~dhNh1nLBMM0)3P~dhNh1nL zBMM0)3P~dhNh1nLBifWSW-dgNG@_6+qG;eTS_aMLf_WT6>(tZ{HW!`ajAnOGJ3$4V zASlQJ`YF*J=%+>ok74msHT6=diUvNsL&Sjv2*6*IA=S>+lNXxh6c;6 zt(`*`tXaLfwPLgxdwcs9FY4>-vpItuzSXPO}z)ziR5O$nrrWK)@g4P}*Ys=RS-8IpSKEtX#Ef#krf-cY|TIY`f~<)-7AM z9$Y;>R%%aW`rq^mu&(Y(XCyK(xNcql;M2sf&R7};zgo0?`{MbDL~UU7!m+`@_N}N) z25WUM<^Lgg(4EAAFegI&UNecGAW0HGjR#Tf43V3L40YPmbK0~gf7clmeuSZ&9NBs&cNC!#U?SM<2O$&?tiAt?^f^@{w|CO z2~C`npz$X?KiS}p0nlm+#DG$j0Js14H=!>bTXF4!u@CGPb$W&;`K3UK0-56E4DsX~ znItF7LJ@d!0>?ACK^j`{jto2f-8#Hm2qNO$*w2UQkiy&{kH0{l`{hh8+(6{e*XR=te1vpgkGS911%hV5CxnNbYcZ}rfCvJ2on;tky9%|AHpJpO$hrC z4j|l!a2vwi2uBefMtBV2X@nOLG_%aig^-kNg`9wngt-J!3K?b+s9p@|KBATa24K?L zkhDxd67s;Qs<@Mr2vx;Kio4TjiFf4vzGNcpN!UU|bJwpQnwwj+XxHw=<7r368>4jq zaHOP3H?3N<+`PI}3I&`-pT%FE*U{eIId@f0Z*T9)!L&Vy#WLP}?t!UvLUUsbaUgyih#v>y$AS29 zAbuQ(9|z*cn~0ye5N%jqi|VgMP1d6NYf=5RsQy}1e=VxN7BE-~7_0>h)&d4=0fV)G z!CJszEnu*=34@sn(O|F^FjzYYg9~T~J`+gWEHJbespTR?I$(8J?nVmQdd$#90JIW- zRuVl$I-zoWkYEjSGn&OhW9aApK-CxLWR%m46&WAV;S|-d7y^741)y6L`nR48rood% zVBA24FHH(%F@CFq2ES-)-j0R2SmM;xo=#zjmgFm$WX_GIXl>7mq+2JM{k~MHGSrd` z_=~LrE0Y0(!KC=Sg$)ItQ77^0ob?;_?AfrPYtEc1QkQQjFNyZms%LM#>UCSTKDsFE z#X6{tP$=NbW?DA$=-`N?vs(g%Tzc+6GQ&|}nD}5NuB!sSPL{}!?Mp6Pp`v$Nw zXy8&NUhS#MILWO3<@)i@9_W2f{c-*QeuXqT@$94Z7o-nQY{l=WtfF)`C?>55{6yafn#}J-IcmYALUmxN{2%8Z0Asj%s z5#ct3yAh5eJdE%d!qW&ZAZQhsxe#KpK*Xqzk5=5$DzRKPQtbe-|b`ioK|HBbsABeAX)Z?boB zTPo0swR?%yKz;oWP9*!qSh8J%skS9ONq#Fm7if*UpQ_(Z zk0pECg7yDGU5SQhr8-%gU_lq;2zra!GMGxA&1b7umE8_!fu2s5H#-L1^1v?3qTk`l~fSI zcn>jBj7sSaz+|peP04WNNZSFgI4*qr%7s^LsnxbzwUFQP)z(|<4-cNVqN8KQd4qgA zr@IRdZmHFeh5pvD;@m;XDy!oNldr*C(gRroS}c;|NM|IdHw5CrV5m}W2+0u0AOz|S zfp|h7o)Cy91mX#SctRka5QrxP;t7FxLLivCW%+%&J}wG2G3p5ffs`hNG}3DHdZCY zzRClD7(&NM+9o17j#(53bD)YEQ7KN9B8v8Es5S~ZS z=~MOZ)xTS*@DgvY@Ws#mu6|UyyZ+(%F^9<7>A%|K7r?5tX1vpWH(fWVGr@_liiLe zTveCBTt&RsMF1qGocUpKS#PTT0`F~Iu(ei6^(`&(D=PI(wXF+U1y5--q`%-D3MhGi z2I0wC5x|Jh6a*vMPcsy~Xg?V+)fAS}ixlQiT*tF`TSc(1T|OnEEUBmn}A=#%J+nF@a&pz;>dhh#9;d(iK! zn9bgJggH?;s;Cn*NS9Hh$x{I{sJC$|`4NuOu{rL}lVB>5hq?N*xntNvR333;}R$JSXp!K8rbM zCtG-gl~HgUp^P^*IZhn!cZr%1cWDzTk{>EWi=AkT%44y^FR(<|(~H5c8}je~*F*(7 zhf#U11m$f9ym~PnLhpeAk?&u4@T~4)U-{ttJHYoTb=CZ4cfX@KHd?^=7cPH8U%5&n z043&BXw3999}o#tL!}exNx;>K8RQdcTl~}})K`V97&ZMEHT}&|(~sfKk5SW)QPYo6 z(~nWpk5SW)QPYo6(~nWpk5SW)QPYo6(~nWp&z?qj0YMYFW-f$AO+SDKu@nSIcp^$t zQR@`f*>=lkvHpvlp~xJ+;xLttvgOJaqjnLWr733{aUJIrOT`&OMn*YAHB05hW3C@c;=VrIA0= zaWqQpwntbVEjlT$k$Wi(Y?Cq?HZz?f;K%g=e&(+#lcGJ5*9ZAoXB;>w*k|&7XQ(aD zcN1;BJf&p|L1(Cx2feAX+X=wA%CCe!E$T^(cMj^Lx&{dNz7}?EviNlf5|=;&9fH5g zld0%rDyF4SVf-RPBta9AnLW@BA^*JycOl%1@DReI2){yj9zhemW-de%y_#-pP*4ME zwa3NU3G`$jnR*&2q>qCfo6Lg-6U?|VtdQm>@;F8alF27|$K<>P{=z5cElnA^cPOLhX&y92u=Tqt6FBsnymLWQwB8{jT0^%4nDAKS*@)zVB$m`@uBYnUAMQMMz zJn2_%)yLG4n* z*h%A*5Ur5Jo(7yhwC>iy!FiSbU5Az}J+!O8GH-D3*13a&?_0|a^}jAVw6ni|=b>ek zUi-em!MW77MngUuG;YHDnh0E#;HI`nogyh{8BIe>7%Wqergqhz<6C*x^L$bLYU%FE z#4n{tWnw)lNc^eM&^?7RotPEYP%8P`IZ-lcQ%K3Lx;_!ZCapWIMv!r~Yu7Hk2}=d> zF7A<2vPZm7P6ecQe7-)P@2o%1x4!RN{Jwwv4DWglZ-2w8`XAoG2kKvyK8{*oE#Pqd zEA>B$+DHd}+QgqcfckjA)u&3Rv!q@V3m3o~^mHH>@pCs4_T9W%v z7c1*gZL7?}ipO;!SLycw_F{#$P60ebaySOK}c0?Asb z)NiZ*oBBTKT0y&{$*0vtszgMCH@QQik8v^*qh0(j^e`7y~ zPq0FBN?K(^KT=78W`Xh}D3v_t)KbZpj4%u@F)(T$!q4x&|MuH;(noU>AJNKtIe#!c z@8P^u|8omBS>VS!vGKHbSB{ArvVasZ2~xll()9HHrU%>7M}!(($PtAv6e)tctn|xzv*MTk>t*G{Lfpr`QAxuZV48T^Q-aH z>3(y10-*4lYr$La9NzI>={h_&)#uHFGOt{lT*IH=@87SM2Ua+%Z=BUP&gvUy^^LRo z##w!1N}ej$&gvUAd1+SPIIC}*)i-LkxLJK8R0gy9#z|fEtiEwp-w5SRAHpJpO$hrC z4j|l!a2oh-;v-(Dua%c68v--yW zWBSIg!2fq4kCRUZsnAH@NDA4e&6m=o5GBPprD=;2tTH6iMjVRsINYP+F!#mb9uPh&N3eGZ&(nHsbKHio>p^ns3#$ zMQXaavWU#OBdFR4s!1N@r0^$mH7t!#UKVhN40mMAE+S24OiH&S-H!AK(gR2jXz3xO zhdQCMB%h@kIf#?jKCMiwHQg_p%fbBtpIAVl2!(9(*R9=;pDnQJ2R2T5$&o(=2=QN>42VTVx7`WtW&!D2m+q(z?s%{kjMFmFbBPq z9#lG+hbQL|ZO}$g-9#JM*g^Hmt${>TG*7OZpCkqN`--$iEe3c&2#O+_g5UaHlgkFI zbKo6VZySue@ckEjTE`L~G69v~yBD@VzDYj(42#j}#(w)r&J%GJ&AFr(JFmwR&bYnB zWg$8-`z`T=l#GNuep?W9lncjnQaqLjl1o-bvh$j>6aF3!G^XhwC80qa4)Vg&n3j}w zS2d^PfPqV1coZ4kMu!))SZcN8GqIq<7Kli8>@$ongebv8k}E!sIqoJJBHPp%SaMg&>0znJzS20KLltyMaervpTs`Mn3O#&;oa-oBZzi_{59SHAZ|&gg(TC@BZLx z%7UJ~(rW2;tY9~&{&5U8J-h+ko$NbJo|QyeB?{V7Ni)jMya~$xiJer`N;yC~4v-BQ zQ)q1|Rg}!zboS#+4l0z6BONCiqs~e8dM8>&o2IFzD%DAvJa5QhpG;M>c^H|@Xu||; zQz-bDW@!HcZM6eFm0HMg-R*7X|6n~WB3;kTKh5dSU4r9Wy>$DMM)>oCbJwgN9I6Zs z;?}+;O9YI$+zR_yh^-*+R&~{oYDgsbhO`>^IyFllSu&5qg^r6Q87D17KnEVB7Lt7} zh|2cB;+TiUC{OF3RM%Lz$_H@wD(TG=%jH~-@7!^V<{Eoo-FnXT^%pDUKjj8)*|~%K zV%M$TKLGm=XpF1-OgONS)I5NU9KwhL*fM^;iR9=WdPV5)fbD@VOgRjTeOykwrC4{(Pgt*u`@b-cxZP;vIK(k zg|KqCQztE7fh|+#gljIJ&s7W8bSqaF%J}ROJ~V`H@6b1kY{Tu6!N5z=qGHP9vs~p^ z#bT+%D(1XH>>GjxeiiVKvYSp{V@=~Tkr9o@bSA?tt=*YQQbAgWP4-Rl$f7MxPOeua z$9VdvHx2hT{M<=D$C;>-{EmpQ$O}J!GdY{lN;tB)N603s?ur3u)X0(<5jJBVU_pVu zvGpA|>R;y_|M|=Reu#J0@B0vr*lj8f#5f{`kp$G)?fi5oaP0;w#lC(Pno;x4wFKYfI)04!d^HP9;8HMtC57|=H7(PLjFA(3Th^ShLm53e$Hply!T zRk07A_9J*Fs=wiH&jteykNeq7C}a<&c#F=gWU|>>{avpgT*(z=hJu-I%`1emPJBWN z8{K#Fox27H8+bev%6w~XKAg31hOgj6d{G&XhP@ftCz+L8Pj@zZ^%`X0$vMeVcNSkN za%8(}+3dc#cn)_f*^cfkZTd~LEcqaNjbmg^z;8$ORi}Yd?cUT^jc7md(bHEhHrai1 zCF6%|^l3tT&j_Er+1E^)%@)aL#1{>)Q#-!ghQV7+Q;flteE&|!AIIL8KA>?vzn8XX z)6qVCKo!>7!JJR2VWenf%?Ql}X5MJ;5c1!Pa2LY82oE7VitsCh=MgkFr(!(iZaHt+(xuDJyXBTG!y_ZZTW((w@fbWHLw69pcW7EnNU6uCsFx6nEUmI8<&3n&RradTboJEyGf zO{;5sb(8(ci1l9mY_()RPCNZ!qCptL@F3O-9Ys+`DMv zhz(ly0T-s<<#L7|V7icNy-`#EViBc)?-OGc0ncNE8Hz{N>GF#z!I$m7 zyY|eR*Wb5$$6M6?+qZiUKMOmBm)RBg`bR-(n_vSNKoZ6ek zTut)ut0khfQ};j<&Tzt)OY#VLPztlpVUH#U%d&GwP$%&KB=#dDoTBCSNfk&j^-a1WPc2B^bdH zj9>{yummIciV-k1g0C3CSB&5*M(`CQ_=*vH1>2CQ8!gXV2=Nt~GODv%H7!<%#n51N z^rCT$^h`j`>6>a!S;=ppeX-M>4rhyZS0tRvC*sLcuB$UsxU5tEDSm@1U^M#dqb*qI z;Pn>!=3mtCzD%W%{v$72PTG#)c1;;n5(d%3$KQ*rq4kyvBJky~cthvMN7*T>98 z?zLJIx`bKr*lpfu(CN?n+yQ)@T^G!HJ$Zwu^K?Frqlq<1{=9!bZE{9!8(_Qn1)!h^ zMl~AIXgL(=##AjI>3Lo%Gmbg%y=kMvNiiS5XDr31-YFJkHukcMz-3+PK3~*@2~5Ph zR3~1tcIi&wedOOs=o$7>4;har`elRZn?I+vtliDWMli!-q}+~bE^ zEG@(26|W`ssXC5Vlvhw&F_oj;lGT*(6BhSKB&2(Q9~RQ|baIOGPY6Lp6<7GXgruU$ zDGE>Lxyv=q_{n5n8)@)02@s{4eHZQv;FeuME1qZ>1e7dy3-YB``a zz~tiXYc52gwS`2NW=MC~gF@^0w97MAsrj&04SEJhR#?#ZF511a{+*>I{z&4-XGKOn zk`P;2)_={j$@+`+{crfN^Rk1W1*-RYt=_HdM@Lv2il==M+oq|v;DagfL1>3m`6>lA zkOCh}fe)s@2UFmKDe%D*_+ScrFaJU<%;E zR0;qz6If71C#qN^(5N6$g;6~~L;K0m;thfx=`d-Ri77zB2fE zuI*X;toL!+!=JW03?jc*P%JgnGf4;8s@m2Lv30VxQ>4(wsoXa}Lw0}|u>T4BQ%~5I(FP|&sg0zajib~U%NE0Ph`K|2~lJaDfj7h>|Ozy28uAjqO-qU;F zvwS`useb`00cfibt~@HaCjL!&Tr3Ls-&6Ko7=ceITMWii%cjpyP#>SU-eAf~{-rfv zm%_@3Ddo%OzSFzyY_X?l^BsP`CN17U-zAv=He9AjK8d&B6=c~VRT=i12^da)@9%`A z$7OPO!zr@#_*z_=sb>ll@1J7z(b>(y>;oPl@D)a#XzvKJ3^Z$W9Qz+}u|--FZ^Y76 z^NbtkH0hSH=tsq)ro3p*g__EY=Jn|{u0LmoqRZ$cS;o9iO;c={0z2u~Qx#p43NY+T zN4!nc`y5g4c6Q@2Z0+3AoPD%l_R-RueYBuqEofQ`Mu--S5G@!XS};PiV1#JF2+@KO zq6H&F3r2_*j1Vo%wuMv*EvzI)Mg3&TsHFfST7g8|k@ok4As&=X+V@lTz>zdnW>c3F z&~^|~wKghTPTwY{e#mWwrNQOIw|pI=I;$lRFyp)QK0m(3?4PoSj`Tm6#=snb*lD$y z@zHF9L9b3?f{_?xOF?V8J0Uj*AY1OEC55Dhfh~_Fx$5essb(rK?$IWws>nbKWl5!P zz?q~{QtO+=ZZg=d3P4exvLV-p9J24z(_zp<7Hj*ed36^c?!oFQb$-Xt#fLzu8v2 zmZVx8xHbknbqVLp8)+TFgYQMS3*labhY%h`_!YwQ2%2tl=0Y?*LvuGSA(mtymO#s( ziX|BkT?Ry#0nueZbQus`21J(u(Pcn%84z6tM3({4Wk7Tph$R__B^ih%+A>lT5+Q_E zgg%5t2%8Z0Asj#;u_S|`S>3WZh&!|e7&uoPT*U;4(ZuXHu&SozjUZvSibP2z-V1q(L80bvtLMx}7%E+#u>p8Xh;=LZ2Lf z(1aF{&;*?hZ6{1$ZD{g-;`l8<5q6}(J|g6eCpMo^U9zyMA5JqJv?a}4N$gXDEotl$ zwrdMW-uYLR`-lIJtclOXqRrzBGUcLE+PvtrHmmJhOzoRDrG3X>AEapq znJZL-(ceY0!gw)dg4gVduac1GcwHu!5&teXJp77n!o$yQ4@6!aG^B4TIz|5yM?kWF zQa{Gdk(dw8!&kjG!T;kF=&XEM=&T}^d$Jezq8F<>2Q~Y%>cu2T|6Mf?iksICsPOZ@;_Vu!1-d3T$NMDDby>+?(a~heF=$8S( zJ!$DlQKZDy<}@c&w@#%^g2??)RhrX0&4vQV3at8HqdxvFuul3Y35=EaU~3u7!&^KE#sZ64h zMzoH0ds9a~b%SGd&nnd^QH*&s!F%)QiMOT}9s=n7k_lYN;a z`I9R-RC^6DLY~Jn?RImFk!Ono2t|8cP_@uDJTNVb$qoM!KUlxxnflZ9KmHb}>QiC{)PC)BNEa(WN4pV0f zG=m_fs;aykc$j7i2`IyoC`SFpQ`4yt`zT}2##)}fK-ZioV1GYP-Y3oLNa5(fE`IgL zcsgC?UmK}(c9uu$-JkzFuccC*$bYK7kKg+HmhIa+Djg^KP zD-AVP8fvUG)L3cg!qU)%rJ)N;Ll>5YE-VdQSQ@&pG<0FAM2+>b?qpv64&znb;+pw$K9}mvjZX=8UfIAFOn=4-dDu6H=vQQlK04 z)yBYCd&XUZ6%^2uI#)?Pw3u~~ZwRMNBd5^aa`S$oN2m~B0c@B)J7&Ngc=5o6q} zy@Uj68sn$27tPvBu#iMuJ2Go8QRfM>_L5n9$*jGEEGM(}l4)&0Q}iXIAD^|COtBHo z+Dm5bB`^K7hcuJDM8+pcZsgbVLy*?Ur{OMgNTV;nXr2SF#d;TYov!NLouq~Av@s32 zLz6cL>D*vCfrrcK-=sYKd#C(*TG#Vw`T`|+=wVk3*Oc^$+&hd$8*PtavchZjD^oA< zl35$jtPKcXCYZGW&DwxwZ9ua&AYlW-n1F$yPmaK57=^3*mY260s;=7V*Bdc-k@b-L ze`yJzY6UszAxNvm$s>R^q@l%uF`UVpJ4@EnQ>=-vWH3F=(ztRON5a#rkH4ALlY;yx z*x&KpJ=h5v&*{Dgp9w#_ao`nA+rrUewE;9oUG`23mB^=nRugs#V`33zz)oQ;>O#Z2 z;5^r>-2vQaKcXyPN5;Q^fL$w6Z6ZbAU9BLc8UlMY7r`F-fN*^zk}Q<*6;s-KO6ECLw8JiRkY=$ize!!@K!y( zDs#Ke73c|tB&qD}U3ul4eM?t1T`bmfeF?kMHQg*sWp{Avb>+UUk)D8?K7ct3BeJI6R^uySs%aY2!DW*ZEeZB2t_8w{FlFle^HpxFk4W*ZEeZ7^uI!Jr9| z7U4F8yAh5eJdE%d!qW&ZAZW_!nF}#q(%jUWcFt)MF@14vnhU@}#n zOjRaRb0$+gT1s7QI6xLzaxCmcXR83lw2qs;bVch<)Nj6$&nJ2xS{%e0!>Z`49LpzP z?kiW-m-)(7swbn+Y5;hGw$T@^!sXFI{aceCx4LoT*rB?9^3zup%J5{$@%njJjNQr4 z5#B5GwX6JCHP-yr)#jU{U87n0+7;c2b>)1o|Ah45scX7sX$BLwD-cfG@@JN2UY2H> zV|mB_Gh_Mm%L9ZBU%D1N_5W+{O`zkt&O6Unltf9iBwHf1F*Zy~BuD~OVG}`+RJ>pz zhyqwB6o>){I-XDxwKz2CrYOfY*5-yKSY1FB6Rg7xOJhv|RUE_;acK8Urjuc8PwaSd zk~qWTWIP_m$#k4PnY3ff_x}|jkrZhsoy|!@I`#44)vfpL{r~R0@4L&p_kQz2K5R9B z-ksJvq3^!wqoqx?M3t3u_S&*{3gcz?l^;Lj`v3m+8?N8{@dK_+l+N^3ziivZy{G*B z(_LR{G~8`1huik6x@)-G*L?Ux+hgmLrC4v7F3;ZAe#Lre;@$WD@88r&l2@Htu52tW z|Kw|c>K=cWwO^#mKla)cmRJ5o*HP2`|1GX(BUU%(IlV0s-`h=j&N{W?v!5t)@t4M2 ze|S@#`{!6s#JT3$;r!5IbL)CT>S}ZC`0@8$Ph7LNWGo%dwN>7`gj?nP+i$K)SqkU+ zOQ(31`W5XBFnbHje##q$+H29V+1P70Wj>j7&!$zbzYKnf>;8SiFa9T|*!hLKUjNCZ z>u2fm^1bc)ze;@WckHbgE%KuFi8>qO>v3pL+U3LtMVYIu5=GTzp9EU;E-)l^@leU1IA_b49|+stiCaf&a{sx^V19aP-9 zLZPHV*Sc?7?2^8j^w)oFQKU@tJMGR7tbwcj6oo)JeEV&U_We(M!2WyU@kedp+x~|h z`oN>sC;iZ))-!F_P=E7}?|H}?Gg`kg)!qob`wEtE6*Xs8nRl~T-FBys{FJ{vk7|+V z4}3riM!R0P+aIeQy#L{cAAH~eTYOimNZrC|x1Rpqj%QvO{Loj>fALOxc4{4z-CE6r z#Yy6voO|}zJCbXi3Uw@KN~*F$yhz7oegE?Mzj}My7U!{ib8`+_-U(Q}HF_mbIY={~ zm4lRXGUMy~zVp((miPUQwAWty{*V7=)&KFg9e?PuP2|4vm0wBvjrg1OzoHipmHzme z(I0V6$5#o6&qP-vOxaX|s=#PT0Bv5cW9516FQ72$Hu})5>q%<^q6A{E7hcul6fcDu z*oj|2GqII_K4|}HrD?r*sTtbqEfw3XdqgWYQwnZgZTdr6OhEs$;`FLbNl6=Tc^}<2 z?)|T}>hzD&)80;cA?cBKUqpYz{Y?tjo1AC1KA^D`@XryZGb3v302*FzY_JJCOGBzBiLxYaV+r z=~H{+3u^ByOnUIKHLfJm^+Q*kOG`Z5;rYpWeLpk>R@t|X>ov;CgZ6$S9+;@ ze3&)2kln;LIql`Sq&nYPL_VdL>_3N#&J;PTe+2XX0 zJu|-QN`2?N?IFEtwXNE?9M1OTwQ#%Q;hevE=Y0J4`)>dJJ+E5XN^;6M|269;z4bZ= z-g6yYf3>c^6tDQ-7H;Wp+e6;&pSg6O7naX!Pg?ukGs}OwZV>LdrR)EPrSmSWRbjv1 zu=M-wx13+C^Go;nuf*eBeQx{v9(%9a?zeF{Jbx$YZ`1FvUYK3F&Kum%^pvjlJh=J% zYOB@xTkrL)nz8FypWsdDT&;A*|JEMd_I=jf{$3T=rEDo(x9gI#{JSMx_AbTec3tw8 zet)|zcK#M!cHiyqdwu}#e6I7gd(JI~ThhzU-=a&&Qaofc$dbc}sL*rUN7PF4Zn-Nya;_|70&IY|3dEHA(FDchsrre|&I zs{8)--{1ULwPe_TKZ^hFdU;jfy<6|=zHfZ>GpqYnfBXK2?q7fZ^Y;ht|MUYH4|pH= z?t`rlM%Vb?xB7kS9~yu7#fSg&kq zkd*MzgwH07C;aB)X^$U&{12X}dD3}u_NmKH{lU{oPy3!e`}7~JdtzPIx=*b;zHVUM zm)Cu5-FJU(%?DS1uTd?@cj#}gA0Kb82o#F51Be%Sf&Yabq8pRj)RBiSFh@yuU5 zoAB(Xo^5*eYa5=};N5U!ncLx=ml({L#&mn}0WXXYvr306?TOo8*nW8XeD)`^-`=rx$FJ>7*m-W(M|b_=u5Z1N^up&}_}!oP|NO6g z^wS@!{Mea~&3^1pKc4pS)=zx!6Q^=s%Nfb}UT$*k+1zOE@8;c`=g<3Lejxv=yEAr= z7wjzf)}9yle7kUS;pYp#<=N-yD>__sz36X>TlXeCqU+p*h^x~hGU1w*WB>a;WzR!-=ViP#8@K0L!Dzm!FlZ1cD!e6uZ z($+41+3e$sUp4#W;vbuRdNE>l-QqW`f9KJ~>t-KYoHM(2alvfD;`hxyzW5chPb~hK z*{2qdn|*rm56rGx{5|UtSIpPhudbV2yZAe16_a(Ocb&y|l^s91_~&MyvU{zw-~ORl zY4wb=Ve#u`H(E@daW-2FpRoal1hXmFG|psTw=M3n^I05kUv%4XHg@OYIy=teI{92@ z4`Do9p$O~aj2~Ohbpjk$aD_^271uezbsG6q6Sf)Kf^Fkh?br@%C$=df;UDt4P);aO)pHXFNhao*14aD_s`NY7`j7eRI}){8B{mh-C$Y$djeD^zoZ z8f-1L4qK0Hz)J0B9jUFArevk|vra4aEZ6BJWFNL4JAfU;4q>J0vyN2NN>#E_^;u^e zdztIZV;9VBFnw;A-GEh2+u&qyybGIy&BH4HY;gP>AHY^)Yp}K0I&3|*0eb?gJhQ=3 zp4s3i&unm%XEr#>GaDS`nGH@aA^WiX*a7Syb_jb8s~obyQ4ZPQD2HrtltVT+${`yZ z<&X`|RSUV%Qg_X)=x>DnMkmEe=tlCuM)JT$^Ro2q^Mn_DTNM+#1G^Ku%gXXb%L)2c zIboxtoUqX;;%dqd8_5qFZQTVO@8fF92^$^dgpJOAY!&tZSF7f@23w1*!`5RPu*wx1 z$rT&P6&uMF8_5+LowJ1L#r9$Qu>;sa>=0JDVk5a?Be`NDxnd)^Vxx1JF!R{AiTM@m z0%xvTY?IJ12@R7_FA4RMY?Fip`&Jqzp-d9WBvD3^&?L!G8A(E!Bu6PqB1K80D2Ws$ zk)k9=@k}C~NyIaWcqS3gO-6ItUKkb4O~hoA(Y$8ITREc=y@~j2B0ihUAJ&;1ekE#~ zh}$NkreBHLCStgW7;Ykln{0i1ov-FsHP~8g9kw3ZfK^&Hk(N!QWfN)HL|QgE-F(}F z4PnFBUVht$?Z*ya2eCs~>97eMHlf2Nbl9|{!z|y1wTOhs#;#(lT1>#$rHok4gc3?ZPUDzO2xnhg$bRer- zvBgoY*y5bSp2uFmUc_F)DnD!?KWrgCY#~2vAwO(!Cir#|JB6Lb&R`>0<(Vyx^2`=n zK~YwDCK+DI@JfbPvgJFS7cWhhkWI!aM?|@+l~vg7PUSpMvr!D4$}=E2MNz zvHI}qX63`AIPzgq9QiOQj(nIDM?Or7u~2wvo`U8nXr6-RDU^{EDwhoHO2|4DJ=|D-tbe^MOzKPisr4L>^iM(m6!cG_yrfup zQJ6{W6m}XrgNGri zY&*6C+llSM2C?1P9&893#-8Om=Qy6gPGYC9)7Tko1UrkJ!$z?&>}9Sqk6ke9rZ#g^ zo4KjY+|*`nYBM+G#Z7r}o305K{yD50n~F`tsy1^|kGUy3Zt5{N^_ZJ_%uPM!rXF)s zn%tBoH>JrwDV+>|Ca^_ZJ_%uNmErewLP!Q9kfZfY<$HJFQ<~hACO4(Y zO=)uT#Bo!Dxv9b2)L?FEFgG=rn;OhbX>wDV+>|CaHJFb5n!4DM4;Zked>e3ZqmQrNSr`MyW7LHAab6 zbIFHEHAb@XVN#8TtbCYMo;ayIaZ-8WXitg+JDfuT*%Y8ZU*B@04o1WaT@h8ZTKr zaZ=%x3a?anrNS!}Ua7`QA+;BCs_~KyW91>F!YdVCsqjjLSE}(+NO@(c#!FUSS*r1p z)e|QbUa9a(g;y%PQsI?qycBW{8^y-VrqPBd+FWV&6q8k(D~%GD zMu|(K#HCT<(kO9hl%_OFQyQfyjnb4xTP)3&;rD~(c@?~v3h2uoASDjG*3tKbkkhdDdx;xtQT8?l|PVfs_HsbSUug+O<9c|OmHXUu#O_MqLF^D#&;4|? zO-I{wv`t6bbhJ%J+jO)|N85DMR@a}$E@1Uk%^>GzSkAv@$I>|ioioV!8R(pW&Kc;O zfzBCrr8O2(IX}bMw)kzcSsbfxA%mQsLC()0=V#zKWRU+eEdMKvSY*H=gZ!TXiwyFA z2KhgO{GUPo&mjM2IOW_cz`d0JGsyoL_^& z*e+}kE5$O+-_v!Jdo$4_6HPMFB-5GT+ez#cb{ac_jbNorCdy=@%=4CeuiJ=iihcV$ zdF*-1V>(mB8I{@REhosUS8-n^QC3wg>Exn zT2@}@HuI%rdkNo%?Z*ya2eCugbA*&Hy3PD;S^1*d%-@!kFS^bAZCUk?ZFAH=mPP%L zMO!S(yq7h0MtLubyq9IlU$bM?##z)7S=16))Dl@%iz~e9iY&{~vZ^t%?0 zwJh^nbY3|#i=3Hd1of@z=Pc@kEb4?T>Vzy>ZCTU|S?0ayw|YKi@np=R>}OH-vncyn zl>IEqeimgvi*{TVxjKtnokgzBB3EZo17y*T%c32ZWuA<#uXbFPc`~v=tlDu|)aO~$ z=ULR}S=0|%)DKzI4_VX?S=0|%)DKy-J5ooi^Zh+JM_>18z5! z-1coBwjVoy9mEb{)lS@Q^@FVX!?)8;+)g`jJMF~nv=g^mzSXx^%x07JY|@@h+OtV} zHfhf$?b)O~o3v-ss?8?t*`z(2v}cp{Y|@@h+OsWRx$XK&d$#2(S*1PO@|CR8o=w`b zNqaVF&nE5Jq&=IoXOs49(w=QOM88$qvn_|nD(%^{k+VsAHfhf$?b)O~o3v+>_H5Fg zP1<*m>K&wd2kG5GdUsfQuiIQYQQkpHcaX*%q;ZF(QQs>5JBa@d;=hCV?;!p=i2n}a zzk|5#AZ|N|+YaKkgShP=ZaawE4vU+ve8uce?AEaNy|>svXivzBrQ9Qh3?pg?Z*ya z2eCs~<-?t%cPHuHNqTpZ-kqd(C+W4(ORG6|SsJgK)iYw3rA}7Qh+RA*c3Bz~M$d>{ zmPT1UBX(KpWc7^LW%*53V^6y*zsYLsX_w_US&cpIGF}R=XT&Z`qpY40yDW{edPeNB zG|Fo1X_uu@R%1`QEWgQW>}i+fH(5O+c3B!_^^DkMX_VD7Vwa^+R?moCJR^4TjL5N+ ze%;AoY&Qp{IXoS5tZym7&Z`G02eLWlpIFKn+s!dA#cjtL+s!dAWt~}#?dF(&_{V0| zzm&tXAjh_y*LBp_lmq=7^Hp>mjqT=`$D%7}Y&XX|7F|bUyE)YDIn?br)a^Oc?K$L! z9P&dB`5}k=kVAgRAwT3e8r#irdaxSX&2hq5jqT=`O1i#0nH=iv9O~^H>g^nINDet9 zha8ea4#^>hLG0nyT}5tg+o(o~^m)nafi(m#1njPt{zWs=21Bek%_!7Y{Jk z^wbs9FPdxlP*(k-xt0%Q)i0WB`A}A^o?O(QmIGzgx0!1> zP*#1Lxt0TErD`s!=AvpYs^;=E%(YgGuA{NtT+?UXya$!FJj;Qy%4vC)({#KGt5TL{ z`ASy#C(rVajt^i}zVawvd6cg_%2yucE06M(XL(6s^eoFG&*YJ3^2jrJAm7x})zr_BZ|cdapCRAWlT|-MzNsgxT#-+%$R}6ilPmJc75Vfh zFT#-+|K|bw*eCnlq>ZN?z2>GVu zTh^PY60{o)ccb2J)Z0x7+Kq<0DI>d4W;dGbMw8u?k=-b>n-uLPMY~DSZc?KEN(n#ihebC2mDD+TtL0y9L!v874|0$wRpa}ho(7y=%i_pIa{fp=mD56iGh(3WL`UHyT6DXojpa}ho z(7y=%i_pKAmRB)edNKY|F`juby=TR=xr*_# zRE#fFj4xDde$F+cB0r}XKc^T!rx-t{7(b^NKc|?!&Aqgh_R?0`OIv9#ZKb{D(|+2x zYHW9}`8G{vHMYCgyqY?*YCY|x^|Y7P(_UInd(Ds4uQay1*L)a-*Vyh}^Ir5TwV3wO zV%kfKX)i6Ny|kG2(qh_6i)pX50u)kXyL%mt?e4W!fUL%L_gWftthUl#+DdzAEA6GN zw3oKhUfN1~&D&QPjqUDrG`73fynR`X?d~%u8wVQkuM!Ca=XP!Q!Ja z5ih05OFibL9`jOzc_~?5YA`P~n3o#NOAY3w2J=#yyp$#{rO8Wa@=}_-)L>p}FfTQj zmm17V4d$f=^HQ3;lqN5w$xCVSQiFM^!MxO9UTQEeHJFzY>F!G&BjF+r@rxN2O+s1F(u^m{=CMtnf3A{>- zmqKc6x5RkKYHYUzUM27)@G5~< ziSbfMjqR2gFIkQ4`e;M>7~Azxf_#)9A1x^#<;X`l@==a_lp~+fS!2Idf3=U6wU2V- zqlM+8h2^7#<)ek=qiyA5Y}ZF=@==<6w6J`XDIaCZN15_j>=NwCrCeWq*FIX`KFXAj zmY0u~myed0kCvB@HkXe!myb4=k2aT&HkXe!myZ(Xqr~|raXw0%j}qsjH2Ek^K1!31 z(&VEp=A$j_=xmIp0st z_gl{Ywp~YKyMA)MpPcU}=ljX|ekl0K|9-}H{jl)E!cYG9!@>^>KP>#P@LNoEW#xar z<$v8vW4nIxzn}c?C;$7&|9(jMDFuE?fuB;~rxf@p1%66_A5wlu`613{A?=qzp~U%tum4jqR41k0h(H-7@o$WHq*1hB9R+Q-(78Ecae_ z_E}!NVOAx5pXD)GjqUCuC+xGFpfeiV-Df#LR%5&SEGNinY8jtp9AC5?U$h)ww45Hca*Kn)$nz}6^DM{nEXVUKr{ApHVx=$| z+by?P$!cu3++roGvE6dK&~m)ca=g%TywGyI&~m)ca=g%Ti@(BaY`5IvFRQWLa*MyL z#&*l`Ma%I;%kf3a@kPt=Ma%I;%kf3a=^rbne=I=#5TGpHxVq zKn)O}9T%V-7oZ&%pdA;W9T%V-7oa{5P@e~=&jZxw0qTbU^+SOAAwc~QpneEYKLlvU z1t<{#N<@GX5uii_s3iil;{vqf0<_}-wBrJ_;{vqf0@V2d>iht8etyg_+|y|#0uJp z6|@s8XeU(1 zq;x-N+)o26 zy0XT0tBlJ!vl`p2B8F9PsUn_L#IwqXD2(D+MLeq@Qbla5h;5azP#BHvRv8OfjqO$$ z3t5frR*{w}(o#iQsz^%}X{jPDRivfLSSY;4cB_nqtj2b$jD@VmcB@El73r-ay;Y>Q ziu6{I-YU|2z|wfb))mq-;((=5R%5#dEOoM)|8v07D64100ZXH-#&!=_>SQ&xd%)5t ztFhe!mPT2P?H;f+%4%%)fTdC4HMV=e(kQF3-2;|JS&i);ur$i{5?*7w2P}=U8rwZ! zX_VF2?g2}qLaM*wfTdAZW4i||jj|fsJz!~+eVg!C%vM`Uziw9V?N-CInx{iGW4qOm zt%hv1`6o7F&Dd_Wc`0r?*4S>fc`576YHYWfvE6F8SMw~WW^A__`qj{{HeW^8(b#Ua zc`Uku#&)aCW6^aqwp&fzUQOLzP2FBi-Cj+8s3t#DlOL+d57p#{YVt!hW4qOi?N(c_ zj_wl1YHYXKRMPe3hgVZ?S5t3SQ*T$3L#oLk)#Q+Ba!55fq?#O3&Dd@=W4qPX+oS8p zuo~N~HV;QuW4kqITZ6VWXj_A}HE3Idwl!#5gSItjTZ6VWXj_A}HE3Idwl!#5gSItj zTZ6VWXj_A}HE3Idwl!#5gSItjTZ6VWXj_A}HE3Idwl!#5gSItjTZ6VWXj_A}HE3Id zwl!#5V_u@tD{X7gwgzo$(6$C`YtXg^ZEMiB25oE5wgzo$(6$C`YtXg^ZEMVX)b*up z4cgYCZ7tf?qHQhO)}n1K+Sa0NE!x(iZ7tf?qHQhO)}n1K+Sa0NE!x(iZ7tf?qHQhO z)}n1K+Sa0NE!x(iZ7tf?qHQhO)}n1K+Sa0NE!x(iZ7tf?qHQhO)}n1K+Sa0NE!x(i zZ7tf?qHQhO)}n1K+Sa0NE!x(iZ7tf?qHQhO)}n1K+Sa0NE!x(iZ7tf?p=}-7)}d`3 z+SZ|M9op8RZ5`Uyp=}-7)}d`3+SZ|M9op8RZ5`Uyp=}-7)}d`3+SZ|M9op8RZ5`Uy zp=}-7)}d`3+SZ|M9op8RZ5`Uyp=}-7)}d`3+SZ|M9op8RZ5`Uyp=}-7)}d`3+SZ|M z9op8RZ5`Uyp=}-7)}d`3+SZ|M9op8RZ5`Uyp=}-7)}w7b+Sa3OJ=)fzZ9Ur7qisFf z)}w7b+Sa3OJ=)fzZ9Ur7qisFf)}w7b+Sa3OJ=)fzZ9Ur7qisFf)}w7b+Sa3OJ=)fz zZ9Ur7qisFf)}w7b+Sa3OJ=)fzZ9Ur7qisFf)}w7b+Sa3OJ=)fzZ9Ur7qisFf)}w7b z+Sa3OJ=)fzZ9Ur7qisFf)}w7b+BTqV1KKvAZ3Egiplt)%HlS?-+BTqV1KKvAZ3Egi zplt)%HlS?-+BTqV1KKvAZ3Egiplt)%HlS?-+BTqV1KKvAZ3Egiplt)%HlS?-+BTqV z1KKvAZ3Egiplt)%HlS?-+BTqV1KKvAZ3Egiplt)%HlS?-+BTqV1KKvAZ3Egiplt)% zHlS?-+BTqV1KOUzFFIig->}w~=HZ+$C3Ub!$q9U+6Xx$JynLY(_(CV}g-+lLoiJr| zUgJ|IP~Zd#oIrsSC~yJ=PFT;t4eKjcyZ9uzcQleCCW(jq=di})lh+>_*lleBG5 z(zZQG+x8@F+mo<93G0)vJ_+lSv`J4w>LjF2Lh2->PSRRDMI25Mhf~Di6md9398M93 zQ^esEaX3XBP7#Mw#NiZiI7J*z5rWDKa0F`!1qfEtNmBQb0whKWDKa0 zF`!2JPZ}8mYGe$kkujh~#()|b18O97jij!T)HRa2MpD;E>Kd&VCBafyiq#wCjn<1I zt6r2w%k{D+v3jGtkujh~#()~_FP&+^He>Zhc_Yd+G6vMhm|Y`dc8!eLH8KX&$QV!~ zJtvLyoHWvN(n!xqBV#~~i~%(=2GqzHP$OeNjcDG8=8b6Hh~|w;nrjTGkukeQ#_Spy zvuk7wsF5+CMtVyc=`Crbx1^EYk|uaH!K(>gP4H@hR};LN;MD{#y+fQ}>33sOv6|J> z1g|D|HNmS1UQO_7f>#r~n&8z0uO@gk!K(>g8jIG|G`iXZuO@gk!K=x7y>wo@n&8z0 zuO@gkF=p2UuO@gk!K=x_D!h0#k&~O?)da65cs0SR30_U`YJyi2yqe(E1g|D|HNmS1 zUQO_7f>#r~n&8z0uO@gk!K(>gO^n$!!K(>gP4H@hR};LN;MD}LCU`Z$s~KL+@M?xv zGrXGN)eNs@cs0YT8D7osYNmWOQ@)zv)eNs@cs0YT8D7osYKB)cyqe+F46kN*HN&eJ zUd`}ohF3GZn&H(9uV#2P!>buy&G2f5S2Mht;nfVUW_UHjs~KL+@M?xvGrXGN)eNs@ zcs0YT8D7osYKB)cyqe+F46kN*HN&eJUd`}ohF3GZn&H(9uV#2P!>buy&G2f5S2Mht z;nfVUW_UHjs|8*y@M?iq3%pw3)dH^;c(uT*1zs)iYJpb^yjtMZ0THw_JuNHWTHw_JuNHWi<}kDxFImlDXoXiR zyjtPa3a?hGp2&0%OYUb1?AwZf|vUajzI zg;y)QT8)=NY7RrI@siaXhBouAubI{R>1~vzHcC^Q^{?uT#%S6oQ*D%~Hp*0+(OF~X zHHV?iS~R+XW)!zsi$=%USiPU#X6+i;9IW0?Z=+1LQKs6ghgN4aUeiX&YNKSeQL@^M zk*=>Xnl@|i=sNN-+N`~!<4UYn25YlckE}*;+RWRR)x7aG^Y&#mZ@kUCeOY-RZIrk+ zN?aQyuFdq&dA)PjMrmrJG__Hh+N{;1-)cU2oB98;nor(l{=cl|led}wFRT8&HcD0- zC992+)keu`qtvufYT76@ZIqfeYxU^2dOy9*T0PgS%!^AqT-xE%4wrVgw8NzxF70q> zhf6zL+Tqd;mv*?c!=)WA?ToOu!=)WA?bhDBZuinST030Y;nEJ5cDS^|r5!HqaA}82 zJ6zh~(hiq)xU@62-VT>`xU|Ei9WL!~X@^TYT-xE%4wrVgw8NzxF70q>hf6zL+Tqd; zmv*?c!=)WA?Qm&_OFLZJ;nEJ5cDS^|r5!HqaA}82J6zh~(hiq)xU|Ei11=qK>3~ZI zTsq*=0hbQAbikzpE*)^`fJ+BlI^fa)mkzjez@-B&9dPM@O9xy!;L-t?4!Cr{r2{S< zaOr?c2V6Sf(gBwaxOBj!11=qK>3~ZITsq*=0hbQAbikzpE*)^`fJ+BlI^fa)mkzje zz@-B&9dPM@O9xy!;L-t?4!Cr{r2{S3~ZITsq;> z371Z|bi$<*E}d}cgi9w}I^og@mrl5J!le@~op9-dOD9}9;nE40PPlZ!r4uflaOs3g zCtNz=(g~MNxOBp$6E2-_>4ZxsTsq;>371Z|bi$<*E}d}cgi9w}I^og@mrl5J!le@~ zop9-dOD9}9;nE40PPlZ!r4uflaOs3gCtNz=(g~MNxOBp$6E2-_>4ZxsTsq;>371Z| zbi$U2y4wOBY4HlaT)N=W1(zU2y4w zOBY4HlaT)N=W1(zp0lhxv~C!6 z!=l?fXPwc^nQn-5L!{e?=*sfMx*^gHk#2}|o6oH9^2fT(XO@*$)(w|#^zTOhZuIX) z|8Df}M*nW}s})|}S2q;8q0kM5ZYXq{@1rnU-LBjGXW33{7dD8Mht>_LZb)@QsvAb# zFzSX;H;lSr)D5F<^RabJd1&2`>V{M|q`D#1ZN9X^Xmz`8^QC2{u`}2RR&SYgL$e#2 z-O%iTT@UPfVAlh?9@zE3t_OBKuw#Sl?0R6=1G^sB z^}wzNc0I7`fn5*mdSKTByB^r}z^(^&J+SM6T@UPfVAlh?9@zE3t_OBKu_V^$!7c>55bQ#*3&Ab~yAbR`unWO11iKLILa+_V^$!7c>55bQ#* z3&Ab~yAbR`unWO11iKLILa+_V^$!7c>55bQ#*3&Ab~yAbR`unWO11iKLI zLa+_V^$!7c>55bQ#*3&Ab~yAbR`unWO11iKLILa+%~BpEX`zGOLlkvy7RZH3|x&nO$cY6+LUpD~!hb&N3c)*7Q^u&FngB zddg~M*ICn3Rx`WK+DM)|63r&FngB%E)SF*I83W zRx`WK+DMDMUQ*g? zK9#Me!RTe^QLqitzywjKI~<5xED2gQKQ$qCS6A}oqEk{ z(iJq*sn`4^T}LyWddXM4wCzLNKD6ya+dj1IL)$*I?L*r>wCzLNKD6ya+dj1IL)$*I z?L*r>wCzLNKD6ya+dj1IL)$*I?L*r>wCzLNKD6ya+dj1IL)$*I?L*r>wCzLNKD6ya z+dj1IL)$*I?L*r>wCzLNKD6ya+dj1IL)$*I?L*r>M&A3-whwLl(6$e4`_Q%zZTr!- zA8q^5wjXW#(Y7CL`_Z-^ZTr!-A8q^5wjXW#(Y7CL`_Z-^ZTr!-A8q^5wjXW#(Y7CL z`_Z-^ZTr!-A8q^5wjXW#(Y7CLZC^mkIsItckGB12+mE*WXxopr{b<{dw*6?^kGB12 z+mE*WXxopr{b<{dw*6?^kGB12+mE*WXxopr{b<{dw*6?^kGB12JAk$WXgh$m186&d zwgYH8fVKl@JAk$WXgh$m186&dwgYH8fVKl@JAk$WXgh$m186&dwgYH8fVKl@JAk$W zXgh$m186&dwgYH8fVKl@JAk$WXgh$m186&dwgYH8fVKl@JAk$WXgh$m186&dwgYH8 zfVKl@JAk$WXgh$m186&dwgYH8fVKl@JAk$WXgi3ugJ?U5wu5Lph_-`hJBYS}Xgi3u zgJ?U5wu5Lph_-`hJBYS}Xgi3ugJ?U5wu5Lph_-`hJBYS}Xgi3ugJ?U5wu5Lph_-`h zJBYS}Xgi3ugJ?U5wu5Lph_-`hJBYS}Xgi3ugJ?U5wu5Lph_-`hJBYS}Xgi3ugJ?U5 zwu5Lph_-`hJBYS}Xgi3ugJ?U1wnJz;gtkLyJA}4FXgh?qLufmMwnJz;gtkLyJA}4F zXgh?qLufmMwnJz;gtkLyJA}4FXgh?qLufmMwnJz;gtkLyJA}4FXgh?qLufmMwnJz; zgtkLyJA}4FXgh?qLufmMwnJz;gtkLyJA}4FXgh?qLufmMwnJz;gtkLyJA}4FXgh?q zLufmMwnJ!p&U~R8=5J}H(>atpXG-c=Go8+v7bL5hPUq0)oav)u&2&0v{*SC?eV;S` zM^>}G&zb)tt6AUYOe2%KgA6d=%K4<=qtY&?m zGi7Au|C~dCb0}~Q1ht*3=kcr0!}>g|&%^pWtk2^QpNG_WNS%k&c}ShdQ@ubOE)a(c#Nh&QxIi2(5QhuI z;R12EKpZX*hYQ5v0&%!N94-)t3&i09akxMnE)a(c#Nh&QxIi2(5QmGz;UaOkNE|K_ zhl|AFB5}A#94-=vi^Sm~akxkvE)s`}#Ni@wxJVo>5{HY#;UaOkNE|K_hl|AFB5}Ax z94--uOT^(4akxYrE)j=I#NiTgxI`Q-5r<2};SzDUL>w*=hfBoa5^=ah94--uOT^(4 zakxYrE)j=eMv8~6)s|r4lCYbwo3WY~H%ttNiQzCY943at#BkW!ZVIFIaE7hzCad*u zhH1MEGg3TkZ8x3KO6J3i6c1ZFPRAPU8YXqaq;8nh4U@WIQa5ZZIEB$nr(tWs$!g}z zFfF)YYn|y>Go6MRDIR8|c$ksmVU!t0nPHR}Mwwxh8MaoNex;G(VQaO?c4E7*L9BX9 zhH148(`p;0)i!LUP3O;JFJQHI#xNtr!}hO^N3f$F z2)st%H3F{@c#Xho1YRTX8iCgcyhh+P!pQyzBl{z?vPa-G0qwpGq*C@P3;WY}cQFx8Q zYZP9i@EV2JD7;4DH43j$c#Xnq6ken78im&=yhhqwpGq*C@P3;WY}cQFx8QYZP9i@EV2JD7;4DH43j$c#Xnq6ken78im&= zyhhdH3qLSc#Xkp3|?dK8iUsuyvE=)2Cp%A zjlpXSUSseYgVz|m#^5yuuQ7Oy!D|d&WAGY-*BHFU;57!XF?fx^YYbjv@EU{H7`(>d zH3qLSc#Xkp3|?dK8iUsuyvE=)2Cp%Ajl*jkUgPi@hu1i~#*J5^-9s~-#*LS(W;%@< zFImlW8i&_7yvE@*4zF?JrEfLUX&heT@EV8LIK0Mx<0Y$^PUG+z zhu1i~#^E(?y!5S}U*qr^hu1i~#^E(?yc9+=oyLuqtY$im8!uTszsBJ;4zF=|jl*jk zUgO3~AvM!!+<3`qrqejQ#^E&%uW@*d!)x4lDU4=1jTK|7BGlj>BslUgPi@ zhu1i~#*LRkYNpe;@sia{rwMwAC#>ai-H!FNnxHgISbw9=s1BTOu}UnE|YMXgv%scCgCy( zmr1xx!e!E0n!18!piRPM5-yW)nS{$ETqfZ%371K@Ou}UnE|YMXgv%scCgCy(mq}}f z>MojrHVKzWxJ<%j5-yW)nS{$ETqfZ%371K@Ou}UnE|YMXgv%scCgCy(mr1xx!etUJ zlW>`Y%OqST;W7!ANw`eHWfCrvaG8Y5BwQxpG6|PSxJ<%j5-yW)nS{#}T&CbM1(zwf zOu=OeE>m!sg3Am!sg3AE(W_8>|(Hs z!7c{780=!Oi@`1iyBO?Zu#3Si2D=#SVz7(BE(W_8>|(Hs!7c{780=!Oi@`1iyBO?Z zu#3Si2D=#SVz7(BE(W_8>|(Hs!7c{780=!Oi@`1iyBO?Zu#3Si2D=#SVz7(BE(W_8 z>|(Hs!7c{780=!Oi@`1iyBO?Zu#3Si2D=#SVz7(BE(W_8>|(Hs!7c{780=!Oi@`1i zyUWIIopaen_pX`MOsC7n>q}-e)9Er}rk9O^!f2+`Wky9WoAL^ynNF7(54~)9DvV}2 zT{b;sHPh*`=_#w3PM2+@PvJGvcbSpC%Z&70W~A>jBYl?{>AP&oD7Pa5Y*<2-46+tPU5dE3%;!>m@1eA`katJNdlX7$Lo zE&e*A)g#}w_{(bb$hR&2vRXazZHtwxR*!t!VkN89Bj2`I$!hh;w^=>%3U|E19j|c5 zE8Ou4cf7(KuW-jJ-0=!`yuux?aK|g$@d|gm!X2-0$1B|N3U|E19j|c51voCiaRH7C za9n`n0vs3MxB$lmI4;0(0gelBT!7;O92elY0LKM5F2HdCjtg*Hfa3xj7vQ)6#|1bp zz;OYN3vgV3;{qHP;J5(C1voCiaRH7Ca9n`n0vxYe8gDpPEsfXBYQ43qmO5Fjw|15F z)~;F_6-Mi=U9~jIYQ43qmO5Fjw|3RiD65h8tCmJtjl5sAG|Fn^{i>x=;kDk{RZF9+ z)?2%3X_VD^Yga9evKo26YH5_!$oo}GqpU{WuUZ;qwcgrQOQWpTTf1s$l+}7`S1paQ zT5s(t>#bd7O-{%Dd&2dIn=+RoQ69p7)yoc5*T2h4l?jiuuU?f5?~9j|dd?);8(kK?-cLA(E9*ZY@_ zUCtw}>ZRjVcHF#le2?>SS9t08UMJD@drQanIZwI%YUy~jlep^PrQ`dZs#PgV#}7CO zt4=Q+KWN9lx^%q8X;?Lpd-StkIQIIhuN_~%`K2xEGtx5B*T49M^?^4JAOFH<53SEV z>R$he!-v-s;*Isk4!v>c*yj$t>@GU^+1C#rI_5w0>YIlT9$N~Xdvxder7zYmeY2A* z-gYu@=-3;tA3d@@-JO=U^wo1q;cpAM{`EK3A6$R@*uj?%edge?Us`|km78PsZY1A5 z+V`6MDZxz#}B={{>?XDKl1AO-F7-xXO13QpPuPXTmQo@x< zS3GEc-Fj_Zd8b|D)?ci@?KcXs9LJv=GQb_*uzMeMj@YlX#dDhdf9tQFyY2e7etY}f z*W1;U&V%+jZYeluX*}e7#{NC#{F0qJ>bzq1)^sgvcBivlwwr&%dB(=mHaHt?ByE#rye;O@rkE%DoJB!9 z)~DNjGi|TkZB|olx7{muShm>ZykK?JN1cyZo$?9W)h^d+t9(mxfh9#d9clNLz2>KC z*Md?*T4t*;mm7!*TjzDZt;~7A`K0p;&ZnIJ!TCj#@4StHop#wT_=To1b*alPO50q57vFID^DtF+|*qxsrb&C~u5=3QKI zzU+F`^_Xj|^XIMv*W<1yTu-{5ay{)@=lVI<2VEa>CAvQBTJQX=v*`MW>lxRxt_`k@ zt|Zqc*JjriSF$U`wbk{U^JACea=TJpX|8lvhAY$cyla~)%eCFP&$;(rZ+UU?`(FFP zXJ0#Xx_v7<*G`Rt)%uOEH+r9(%KA3ApEwR z{M;d{jb424;7e~F=jf4_UO)EIo1b~*@S)H1#qrkq)`Srtx59;{+uN*scX8Qy zzkckX4(>hl=CPxXDpJQ^J9y;iu|xmv&4Y)JA5D0bn)TQ%Cs&sd+tvHH#p^kP^c12xZbN!c9>+e}}&u`!R;(Z^#@7Gp;a&`3nZ$I!i5B_M)i|oQquFAKDGIQ&A-0ogIl7>l_@z}w?6lT+i`zC^>@=go%YrA z#PrXne#Hw(>*v4svBy6C!Y5wI>C8>eOUV0i{`Yo& zqu|SXE)~8|7%Kb=kH>SN=zlK$o_9^jR-d=@7yZY|PVEboj|Ao`zPtaCs_X+V9{ABG zL!bQaFJ%0}Z<`8_E`G;(Y;nw4Yx@u;EdG}Bxc&Xq;yLGO`*)pvR#~l^Yc=lXcfx+h z!hXlXz2!Vf*x!05?DExOOIK4UUF{q1gmBrF65hGeTQ`M!%fjuqaDQTVee0bNeHNn6 z;`J8ydup+2=}K?eGvZNe{jFX6vVHSai^(6`Hxc{oH;wP3=C?j(&zZH03-;UZTZpeX zPb~hK-Ql=}`U4B~dzNG3DZg%^erM?p-?7-OvTOahg^XCpKSTlT2mE#G`~PEFG07Hx z?HsEXcZPlRwoR7(t2b@*o{iqJ(K|MJ!$$Adcr2`Th*j%bdjspO8og7aH)`}gjozlw zyEN+SRZp+_dDY9ScDLR`=&( zYSVVC7MI#vYHg{lrIwc3StsmsH|^D~QmaaBDz&K8o?;%e{d+gPuiAIo>-^d~x@xY- zvbNGyt8y>vs`YL(2Shuf=Gec>`fBFKvc}R^t;S`2wR(+abZFmG&FRqEHCnkw>(*%1 z8m(D#XPxc+DEm%p(P$+atwW<#XtV~+oprX}7W?jPyZ06QXy%1>FMZ;jlr5)fImax= zemUmLx#Lc;xzin$ALHZG%8we&x&02Bub~}Cm+z!;ncMHC*%sPwRJ)C8uTkwZs(nVa z%jlhAqA?S_UmUVe*go1(RQri)H&N{+s+~l&kLaCZ9PckrSn9J&K$}GKL-ll0X-c%x zlxU?%dwC{W$w^#_ZK9PN?Fp4=F-o+0Akpf9M5_n1#!{l)Dbacp6Rm6~TG`fYB<%?L ztX)wvpEZwmlYKVZXN!IARCYDXS#z84O9 zW-yo8N1AHx*Gc=Fvd?MzoUzZH^wd1BcKdYLM>CW)KRIZh+jZ8QfueGD=Xze_ znRm_`Yv_&F_@zcKHFl|y%iHtD-JS;;qr7t-Swj!I#w0Z&sqx78XrxB6-YuU`(?6-6 z^!W2aHScwMR$SxR9e-Y^X1z`if641{jb_yJ%s8XRPUhr2DNjf_Kno8k=ip- zJ4UL9Q2m4I9aP_-_KDOkk=i3tJ49-KNc91#2T=Wg>itvSpL+h(@2CABwHu`Ng49ls z+6Pj-eCp#<51;z?)Vrs?J@xEq-$(8Gs68LGR_)NYU3>v8vgQ|%d%h*w^DWVyZ;93eai@Cs zy-%FG*Sm>UrV{O$muNi%iB?a@+tD1^j&Y2Q8Vy7%|b2WzY}tg+It#!AB)<}5Ae1F7S- zXTc477TkCzKWG;{y`!le^p;D(PD`~~EZR43IUi`pJiV={-SU=mdX@cqH zb<2I%E%#lw+;`n_-*wA<*DY6Fw_J7Ia@BRqRo5+7UAJ6iV@mefY**X$p68<4LfXUb z_Gh8mJKDESyVhyXI_+4e{pz$^-3hze-JXfsolbkxX=gg^OQ&7wv?tv;`_0{+i_7gB z5BXT$k!F^?TbI4J6_RG(t3J~#GSz9S&s2ZOW76EQWrBBVo%rl~)nl3`rn)TdMcp~S z{=>bfJGb`!pG-nQ?q*ymqQOxL}m=et_I*E7}5 zX+KG;pRCDsc1BAwdhg!tW_p@Qawl?{J)*fInmM9*Bbqg$IU_$QCYmFn86uh=qS+yu z8={#ZKPiT)BW~QKj!=)zt^RVnj<{Q|&QGc%?$)dG)9Z+pzQL8g!Fa#yO5dQooaJZd zO5dQ~wtlxae%y|4_nXyky4-49=^I?>8~oYoJzeP=TW)~+72kNRs?W`6#( znV%~PtthlIzxwu>aCe>w`QDMdmHyyc=jgA@75&GXE4tD{etQr3iXtnD+-d&A&7&wQ zd1NJz{Ioo>GM`tgHEONKpY~SNic%{|-Tp@KKXi`xN}l?==Bc0E`PeHxrT_MNN>`Y! zF#XrNV#$9$xp!qmV|jMi^4oO9*3-Y&KFe={-u@0`t9^I-dyZ-&{A(O5S?RA|>91eu zum6|ouU}bHCu;SkdYJB353S5_Us?U)=5=*e`etu`+iyjY6-9p98=)(CWF?RMee%dk zpX%SWPjyA96{S{2G=BOji~lt11O4odX#DKP>>qcvt$*CB{p_mtv%g}H?M!1kS*)`) z=%2B50oT}`69;S+-)h^B;iI;)V~edB)@rMu?zL4=&)Rxmy|$ynep{h&%yyX24&|@e mDyElhp9fp<+IDw%+gY$(9 +# +# Built-in presets can be overridden by user presets in: +# - ~/.config/mainline/presets.toml +# - ./presets.toml (local override) + +# ============================================ +# TEST PRESETS (for CI and development) +# ============================================ + +[presets.test-basic] +description = "Test: Basic pipeline with no effects" +source = "empty" +display = "null" +camera = "feed" +effects = [] +viewport_width = 100 # Custom size for testing +viewport_height = 30 + +[presets.test-border] +description = "Test: Single item with border effect" +source = "empty" +display = "null" +camera = "feed" +effects = ["border"] +viewport_width = 80 +viewport_height = 24 + +[presets.test-scroll-camera] +description = "Test: Scrolling camera movement" +source = "empty" +display = "null" +camera = "scroll" +effects = [] +camera_speed = 0.5 +viewport_width = 80 +viewport_height = 24 + +# ============================================ +# DEMO PRESETS (for demonstration and exploration) +# ============================================ + +[presets.demo-base] +description = "Demo: Base preset for effect hot-swapping" +source = "headlines" +display = "terminal" +camera = "feed" +effects = [] # Demo script will add/remove effects dynamically +camera_speed = 0.1 +viewport_width = 80 +viewport_height = 24 + +[presets.demo-pygame] +description = "Demo: Pygame display version" +source = "headlines" +display = "pygame" +camera = "feed" +effects = [] # Demo script will add/remove effects dynamically +camera_speed = 0.1 +viewport_width = 80 +viewport_height = 24 + +[presets.demo-camera-showcase] +description = "Demo: Camera mode showcase" +source = "headlines" +display = "terminal" +camera = "feed" +effects = [] # Demo script will cycle through camera modes +camera_speed = 0.5 +viewport_width = 80 +viewport_height = 24 + +# ============================================ +# SENSOR CONFIGURATION +# ============================================ + +[sensors.mic] +enabled = false +threshold_db = 50.0 + +[sensors.oscillator] +enabled = true # Enable for demo script gentle oscillation +waveform = "sine" +frequency = 0.05 # ~20 second cycle (gentle) +amplitude = 0.5 # 50% modulation + +# ============================================ +# EFFECT CONFIGURATIONS +# ============================================ + +[effect_configs.noise] +enabled = true +intensity = 1.0 + +[effect_configs.fade] +enabled = true +intensity = 1.0 + +[effect_configs.glitch] +enabled = true +intensity = 0.5 + +[effect_configs.firehose] +enabled = true +intensity = 1.0 + +[effect_configs.hud] +enabled = true +intensity = 1.0 + +[effect_configs.tint] +enabled = true +intensity = 1.0 + +[effect_configs.border] +enabled = true +intensity = 1.0 + +[effect_configs.crop] +enabled = true +intensity = 1.0 diff --git a/pyproject.toml b/pyproject.toml index 84f1b52..c238079 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,6 +23,7 @@ dependencies = [ "feedparser>=6.0.0", "Pillow>=10.0.0", "pyright>=1.1.408", + "numpy>=1.24.0", ] [project.optional-dependencies] @@ -30,11 +31,18 @@ mic = [ "sounddevice>=0.4.0", "numpy>=1.24.0", ] -figment = [ - "cairosvg>=2.7.0", +websocket = [ + "websockets>=12.0", +] +pygame = [ + "pygame>=2.0.0", +] +browser = [ + "playwright>=1.40.0", ] dev = [ "pytest>=8.0.0", + "pytest-benchmark>=4.0.0", "pytest-cov>=4.1.0", "pytest-mock>=3.12.0", "ruff>=0.1.0", @@ -50,6 +58,7 @@ build-backend = "hatchling.build" [dependency-groups] dev = [ "pytest>=8.0.0", + "pytest-benchmark>=4.0.0", "pytest-cov>=4.1.0", "pytest-mock>=3.12.0", "ruff>=0.1.0", @@ -64,6 +73,12 @@ addopts = [ "--tb=short", "-v", ] +markers = [ + "benchmark: marks tests as performance benchmarks (may be slow)", + "e2e: marks tests as end-to-end tests (require network/display)", + "integration: marks tests as integration tests (require external services)", + "ntfy: marks tests that require ntfy service", +] filterwarnings = [ "ignore::DeprecationWarning", ] diff --git a/requirements-dev.txt b/requirements-dev.txt deleted file mode 100644 index 489170d..0000000 --- a/requirements-dev.txt +++ /dev/null @@ -1,4 +0,0 @@ -pytest>=8.0.0 -pytest-cov>=4.1.0 -pytest-mock>=3.12.0 -ruff>=0.1.0 diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index c108486..0000000 --- a/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -feedparser>=6.0.0 -Pillow>=10.0.0 -sounddevice>=0.4.0 -numpy>=1.24.0 diff --git a/scripts/demo_hot_rebuild.py b/scripts/demo_hot_rebuild.py new file mode 100644 index 0000000..57074c5 --- /dev/null +++ b/scripts/demo_hot_rebuild.py @@ -0,0 +1,222 @@ +#!/usr/bin/env python3 +""" +Demo script for testing pipeline hot-rebuild and state preservation. + +Usage: + python scripts/demo_hot_rebuild.py + python scripts/demo_hot_rebuild.py --viewport 40x15 + +This script: +1. Creates a small viewport (40x15) for easier capture +2. Uses NullDisplay with recording enabled +3. Runs the pipeline for N frames (capturing initial state) +4. Triggers a "hot-rebuild" (e.g., toggling an effect stage) +5. Runs the pipeline for M more frames +6. Verifies state preservation by comparing frames before/after rebuild +7. Prints visual comparison to stdout +""" + +import sys +import time +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from engine.display import DisplayRegistry +from engine.effects import get_registry +from engine.fetch import load_cache +from engine.pipeline import Pipeline, PipelineConfig, PipelineContext +from engine.pipeline.adapters import ( + EffectPluginStage, + FontStage, + SourceItemsToBufferStage, + ViewportFilterStage, + create_stage_from_display, + create_stage_from_effect, +) +from engine.pipeline.params import PipelineParams + + +def run_demo(viewport_width: int = 40, viewport_height: int = 15): + """Run the hot-rebuild demo.""" + print(f"\n{'=' * 60}") + print(f"Pipeline Hot-Rebuild Demo") + print(f"Viewport: {viewport_width}x{viewport_height}") + print(f"{'=' * 60}\n") + + import engine.effects.plugins as effects_plugins + + effects_plugins.discover_plugins() + + print("[1/6] Loading source items...") + items = load_cache() + if not items: + print(" ERROR: No fixture cache available") + sys.exit(1) + print(f" Loaded {len(items)} items") + + print("[2/6] Creating NullDisplay with recording...") + display = DisplayRegistry.create("null") + display.init(viewport_width, viewport_height) + display.start_recording() + print(" Recording started") + + print("[3/6] Building pipeline...") + params = PipelineParams() + params.viewport_width = viewport_width + params.viewport_height = viewport_height + + config = PipelineConfig( + source="fixture", + display="null", + camera="scroll", + effects=["noise", "fade"], + ) + + pipeline = Pipeline(config=config, context=PipelineContext()) + + from engine.data_sources.sources import ListDataSource + from engine.pipeline.adapters import DataSourceStage + + list_source = ListDataSource(items, name="fixture") + pipeline.add_stage("source", DataSourceStage(list_source, name="fixture")) + pipeline.add_stage("viewport_filter", ViewportFilterStage(name="viewport-filter")) + pipeline.add_stage("font", FontStage(name="font")) + + effect_registry = get_registry() + for effect_name in config.effects: + effect = effect_registry.get(effect_name) + if effect: + pipeline.add_stage( + f"effect_{effect_name}", + create_stage_from_effect(effect, effect_name), + ) + + pipeline.add_stage("display", create_stage_from_display(display, "null")) + pipeline.build() + + if not pipeline.initialize(): + print(" ERROR: Failed to initialize pipeline") + sys.exit(1) + + print(" Pipeline built and initialized") + + ctx = pipeline.context + ctx.params = params + ctx.set("display", display) + ctx.set("items", items) + ctx.set("pipeline", pipeline) + ctx.set("pipeline_order", pipeline.execution_order) + ctx.set("camera_y", 0) + + print("[4/6] Running pipeline for 10 frames (before rebuild)...") + frames_before = [] + for frame in range(10): + params.frame_number = frame + ctx.params = params + result = pipeline.execute(items) + if result.success: + frames_before.append(display._last_buffer) + print(f" Captured {len(frames_before)} frames") + + print("[5/6] Triggering hot-rebuild (toggling 'fade' effect)...") + fade_stage = pipeline.get_stage("effect_fade") + if fade_stage and isinstance(fade_stage, EffectPluginStage): + new_enabled = not fade_stage.is_enabled() + fade_stage.set_enabled(new_enabled) + fade_stage._effect.config.enabled = new_enabled + print(f" Fade effect enabled: {new_enabled}") + else: + print(" WARNING: Could not find fade effect stage") + + print("[6/6] Running pipeline for 10 more frames (after rebuild)...") + frames_after = [] + for frame in range(10, 20): + params.frame_number = frame + ctx.params = params + result = pipeline.execute(items) + if result.success: + frames_after.append(display._last_buffer) + print(f" Captured {len(frames_after)} frames") + + display.stop_recording() + + print("\n" + "=" * 60) + print("RESULTS") + print("=" * 60) + + print("\n[State Preservation Check]") + if frames_before and frames_after: + last_before = frames_before[-1] + first_after = frames_after[0] + + if last_before == first_after: + print(" PASS: Buffer state preserved across rebuild") + else: + print(" INFO: Buffer changed after rebuild (expected - effect toggled)") + + print("\n[Frame Continuity Check]") + recorded_frames = display.get_frames() + print(f" Total recorded frames: {len(recorded_frames)}") + print(f" Frames before rebuild: {len(frames_before)}") + print(f" Frames after rebuild: {len(frames_after)}") + + if len(recorded_frames) == 20: + print(" PASS: All frames recorded") + else: + print(" WARNING: Frame count mismatch") + + print("\n[Visual Comparison - First frame before vs after rebuild]") + print("\n--- Before rebuild (frame 9) ---") + for i, line in enumerate(frames_before[0][:viewport_height]): + print(f"{i:2}: {line}") + + print("\n--- After rebuild (frame 10) ---") + for i, line in enumerate(frames_after[0][:viewport_height]): + print(f"{i:2}: {line}") + + print("\n[Recording Save/Load Test]") + test_file = Path("/tmp/test_recording.json") + display.save_recording(test_file) + print(f" Saved recording to: {test_file}") + + display2 = DisplayRegistry.create("null") + display2.init(viewport_width, viewport_height) + display2.load_recording(test_file) + loaded_frames = display2.get_frames() + print(f" Loaded {len(loaded_frames)} frames from file") + + if len(loaded_frames) == len(recorded_frames): + print(" PASS: Recording save/load works correctly") + else: + print(" WARNING: Frame count mismatch after load") + + test_file.unlink(missing_ok=True) + + pipeline.cleanup() + display.cleanup() + + print("\n" + "=" * 60) + print("Demo complete!") + print("=" * 60 + "\n") + + +def main(): + viewport_width = 40 + viewport_height = 15 + + if "--viewport" in sys.argv: + idx = sys.argv.index("--viewport") + if idx + 1 < len(sys.argv): + vp = sys.argv[idx + 1] + try: + viewport_width, viewport_height = map(int, vp.split("x")) + except ValueError: + print("Error: Invalid viewport format. Use WxH (e.g., 40x15)") + sys.exit(1) + + run_demo(viewport_width, viewport_height) + + +if __name__ == "__main__": + main() diff --git a/scripts/demo_image_oscilloscope.py b/scripts/demo_image_oscilloscope.py new file mode 100644 index 0000000..d72a1d5 --- /dev/null +++ b/scripts/demo_image_oscilloscope.py @@ -0,0 +1,378 @@ +#!/usr/bin/env python3 +""" +Oscilloscope with Image Data Source Integration + +This demo: +1. Uses pygame to render oscillator waveforms +2. Converts to PIL Image (8-bit grayscale with transparency) +3. Renders to ANSI using image data source patterns +4. Features LFO modulation chain + +Usage: + uv run python scripts/demo_image_oscilloscope.py --lfo --modulate +""" + +import argparse +import sys +import time +from pathlib import Path + +# Add mainline to path +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from engine.data_sources.sources import DataSource, ImageItem +from engine.sensors.oscillator import OscillatorSensor, register_oscillator_sensor + + +class ModulatedOscillator: + """Oscillator with frequency modulation from another oscillator.""" + + def __init__( + self, + name: str, + waveform: str = "sine", + base_frequency: float = 1.0, + modulator: "OscillatorSensor | None" = None, + modulation_depth: float = 0.5, + ): + self.name = name + self.waveform = waveform + self.base_frequency = base_frequency + self.modulator = modulator + self.modulation_depth = modulation_depth + + register_oscillator_sensor( + name=name, waveform=waveform, frequency=base_frequency + ) + self.osc = OscillatorSensor( + name=name, waveform=waveform, frequency=base_frequency + ) + self.osc.start() + + def read(self): + if self.modulator: + mod_reading = self.modulator.read() + if mod_reading: + mod_offset = (mod_reading.value - 0.5) * 2 * self.modulation_depth + effective_freq = self.base_frequency + mod_offset + effective_freq = max(0.1, min(effective_freq, 20.0)) + self.osc._frequency = effective_freq + return self.osc.read() + + def get_phase(self): + return self.osc._phase + + def get_effective_frequency(self): + if self.modulator and self.modulator.read(): + mod_reading = self.modulator.read() + mod_offset = (mod_reading.value - 0.5) * 2 * self.modulation_depth + return max(0.1, min(self.base_frequency + mod_offset, 20.0)) + return self.base_frequency + + def stop(self): + self.osc.stop() + + +class OscilloscopeDataSource(DataSource): + """Dynamic data source that generates oscilloscope images from oscillators.""" + + def __init__( + self, + modulator: OscillatorSensor, + modulated: ModulatedOscillator, + width: int = 200, + height: int = 100, + ): + self.modulator = modulator + self.modulated = modulated + self.width = width + self.height = height + self.frame = 0 + + # Check if pygame and PIL are available + import importlib.util + + self.pygame_available = importlib.util.find_spec("pygame") is not None + self.pil_available = importlib.util.find_spec("PIL") is not None + + @property + def name(self) -> str: + return "oscilloscope_image" + + @property + def is_dynamic(self) -> bool: + return True + + def fetch(self) -> list[ImageItem]: + """Generate oscilloscope image from oscillators.""" + if not self.pygame_available or not self.pil_available: + # Fallback to text-based source + return [] + + import pygame + from PIL import Image + + # Create Pygame surface + surface = pygame.Surface((self.width, self.height)) + surface.fill((10, 10, 20)) # Dark background + + # Get readings + mod_reading = self.modulator.read() + mod_val = mod_reading.value if mod_reading else 0.5 + modulated_reading = self.modulated.read() + modulated_val = modulated_reading.value if modulated_reading else 0.5 + + # Draw modulator waveform (top half) + top_height = self.height // 2 + waveform_fn = self.modulator.WAVEFORMS[self.modulator.waveform] + mod_time_offset = self.modulator._phase * self.modulator.frequency * 0.3 + + prev_x, prev_y = 0, 0 + for x in range(self.width): + col_fraction = x / self.width + time_pos = mod_time_offset + col_fraction + sample = waveform_fn(time_pos * self.modulator.frequency * 2) + y = int(top_height - (sample * (top_height - 10)) - 5) + if x > 0: + pygame.draw.line(surface, (100, 200, 255), (prev_x, prev_y), (x, y), 1) + prev_x, prev_y = x, y + + # Draw separator + pygame.draw.line( + surface, (80, 80, 100), (0, top_height), (self.width, top_height), 1 + ) + + # Draw modulated waveform (bottom half) + bottom_start = top_height + 1 + bottom_height = self.height - bottom_start - 1 + waveform_fn = self.modulated.osc.WAVEFORMS[self.modulated.waveform] + modulated_time_offset = ( + self.modulated.get_phase() * self.modulated.get_effective_frequency() * 0.3 + ) + + prev_x, prev_y = 0, 0 + for x in range(self.width): + col_fraction = x / self.width + time_pos = modulated_time_offset + col_fraction + sample = waveform_fn( + time_pos * self.modulated.get_effective_frequency() * 2 + ) + y = int( + bottom_start + (bottom_height - (sample * (bottom_height - 10))) - 5 + ) + if x > 0: + pygame.draw.line(surface, (255, 150, 100), (prev_x, prev_y), (x, y), 1) + prev_x, prev_y = x, y + + # Convert Pygame surface to PIL Image (8-bit grayscale with alpha) + img_str = pygame.image.tostring(surface, "RGB") + pil_rgb = Image.frombytes("RGB", (self.width, self.height), img_str) + + # Convert to 8-bit grayscale + pil_gray = pil_rgb.convert("L") + + # Create alpha channel (full opacity for now) + alpha = Image.new("L", (self.width, self.height), 255) + + # Combine into RGBA + pil_rgba = Image.merge("RGBA", (pil_gray, pil_gray, pil_gray, alpha)) + + # Create ImageItem + item = ImageItem( + image=pil_rgba, + source="oscilloscope_image", + timestamp=str(time.time()), + path=None, + metadata={ + "frame": self.frame, + "mod_value": mod_val, + "modulated_value": modulated_val, + }, + ) + + self.frame += 1 + return [item] + + +def render_pil_to_ansi( + pil_image, terminal_width: int = 80, terminal_height: int = 30 +) -> str: + """Convert PIL image (8-bit grayscale with transparency) to ANSI.""" + # Resize for terminal display + resized = pil_image.resize((terminal_width * 2, terminal_height * 2)) + + # Extract grayscale and alpha channels + gray = resized.convert("L") + alpha = resized.split()[3] if len(resized.split()) > 3 else None + + # ANSI character ramp (dark to light) + chars = " .:-=+*#%@" + + lines = [] + for y in range(0, resized.height, 2): # Sample every 2nd row for aspect ratio + line = "" + for x in range(0, resized.width, 2): + pixel = gray.getpixel((x, y)) + + # Check alpha if available + if alpha: + a = alpha.getpixel((x, y)) + if a < 128: # Transparent + line += " " + continue + + char_index = int((pixel / 255) * (len(chars) - 1)) + line += chars[char_index] + lines.append(line) + + return "\n".join(lines) + + +def demo_image_oscilloscope( + waveform: str = "sine", + base_freq: float = 0.5, + modulate: bool = False, + mod_waveform: str = "sine", + mod_freq: float = 0.5, + mod_depth: float = 0.5, + frames: int = 0, +): + """Run oscilloscope with image data source integration.""" + frame_interval = 1.0 / 15.0 # 15 FPS + + print("Oscilloscope with Image Data Source Integration") + print("Frame rate: 15 FPS") + print() + + # Create oscillators + modulator = OscillatorSensor( + name="modulator", waveform=mod_waveform, frequency=mod_freq + ) + modulator.start() + + modulated = ModulatedOscillator( + name="modulated", + waveform=waveform, + base_frequency=base_freq, + modulator=modulator if modulate else None, + modulation_depth=mod_depth, + ) + + # Create image data source + image_source = OscilloscopeDataSource( + modulator=modulator, + modulated=modulated, + width=200, + height=100, + ) + + # Run demo loop + try: + frame = 0 + last_time = time.time() + + while frames == 0 or frame < frames: + # Fetch image from data source + images = image_source.fetch() + + if images: + # Convert to ANSI + visualization = render_pil_to_ansi( + images[0].image, terminal_width=80, terminal_height=30 + ) + else: + # Fallback to text message + visualization = ( + "Pygame or PIL not available\n\n[Image rendering disabled]" + ) + + # Add header + header = f"IMAGE SOURCE MODE | Frame: {frame}" + header_line = "─" * 80 + visualization = f"{header}\n{header_line}\n" + visualization + + # Display + print("\033[H" + visualization) + + # Frame timing + elapsed = time.time() - last_time + sleep_time = max(0, frame_interval - elapsed) + time.sleep(sleep_time) + last_time = time.time() + + frame += 1 + + except KeyboardInterrupt: + print("\n\nDemo stopped by user") + + finally: + modulator.stop() + modulated.stop() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Oscilloscope with image data source integration" + ) + parser.add_argument( + "--waveform", + choices=["sine", "square", "sawtooth", "triangle", "noise"], + default="sine", + help="Main waveform type", + ) + parser.add_argument( + "--frequency", + type=float, + default=0.5, + help="Main oscillator frequency", + ) + parser.add_argument( + "--lfo", + action="store_true", + help="Use slow LFO frequency (0.5Hz)", + ) + parser.add_argument( + "--modulate", + action="store_true", + help="Enable LFO modulation chain", + ) + parser.add_argument( + "--mod-waveform", + choices=["sine", "square", "sawtooth", "triangle", "noise"], + default="sine", + help="Modulator waveform type", + ) + parser.add_argument( + "--mod-freq", + type=float, + default=0.5, + help="Modulator frequency in Hz", + ) + parser.add_argument( + "--mod-depth", + type=float, + default=0.5, + help="Modulation depth", + ) + parser.add_argument( + "--frames", + type=int, + default=0, + help="Number of frames to render", + ) + + args = parser.parse_args() + + base_freq = args.frequency + if args.lfo: + base_freq = 0.5 + + demo_image_oscilloscope( + waveform=args.waveform, + base_freq=base_freq, + modulate=args.modulate, + mod_waveform=args.mod_waveform, + mod_freq=args.mod_freq, + mod_depth=args.mod_depth, + frames=args.frames, + ) diff --git a/scripts/demo_oscillator_simple.py b/scripts/demo_oscillator_simple.py new file mode 100644 index 0000000..326434e --- /dev/null +++ b/scripts/demo_oscillator_simple.py @@ -0,0 +1,137 @@ +#!/usr/bin/env python3 +""" +Simple Oscillator Sensor Demo + +This script demonstrates the oscillator sensor by: +1. Creating an oscillator sensor with various waveforms +2. Printing the waveform data in real-time + +Usage: + uv run python scripts/demo_oscillator_simple.py --waveform sine --frequency 1.0 + uv run python scripts/demo_oscillator_simple.py --waveform square --frequency 2.0 +""" + +import argparse +import math +import time +import sys +from pathlib import Path + +# Add mainline to path +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from engine.sensors.oscillator import OscillatorSensor, register_oscillator_sensor + + +def render_waveform(width: int, height: int, osc: OscillatorSensor, frame: int) -> str: + """Render a waveform visualization.""" + # Get current reading + current_reading = osc.read() + current_value = current_reading.value if current_reading else 0.0 + + # Generate waveform data - sample the waveform function directly + # This shows what the waveform looks like, not the live reading + samples = [] + waveform_fn = osc.WAVEFORMS[osc._waveform] + + for i in range(width): + # Sample across one complete cycle (0 to 1) + phase = i / width + value = waveform_fn(phase) + samples.append(value) + + # Build visualization + lines = [] + + # Header with sensor info + header = ( + f"Oscillator: {osc.name} | Waveform: {osc.waveform} | Freq: {osc.frequency}Hz" + ) + lines.append(header) + lines.append("─" * width) + + # Waveform plot (scaled to fit height) + num_rows = height - 3 # Header, separator, footer + for row in range(num_rows): + # Calculate the sample value that corresponds to this row + # 0.0 is bottom, 1.0 is top + row_value = 1.0 - (row / (num_rows - 1)) if num_rows > 1 else 0.5 + + line_chars = [] + for x, sample in enumerate(samples): + # Determine if this sample should be drawn in this row + # Map sample (0.0-1.0) to row (0 to num_rows-1) + # 0.0 -> row 0 (bottom), 1.0 -> row num_rows-1 (top) + sample_row = int(sample * (num_rows - 1)) + if sample_row == row: + # Use different characters for waveform vs current position marker + # Check if this is the current reading position + if abs(x / width - (osc._phase % 1.0)) < 0.02: + line_chars.append("◎") # Current position marker + else: + line_chars.append("█") + else: + line_chars.append(" ") + lines.append("".join(line_chars)) + + # Footer with current value and phase info + footer = f"Value: {current_value:.3f} | Frame: {frame} | Phase: {osc._phase:.2f}" + lines.append(footer) + + return "\n".join(lines) + + +def demo_oscillator(waveform: str = "sine", frequency: float = 1.0, frames: int = 0): + """Run oscillator demo.""" + print(f"Starting oscillator demo: {waveform} wave at {frequency}Hz") + if frames > 0: + print(f"Running for {frames} frames") + else: + print("Press Ctrl+C to stop") + print() + + # Create oscillator sensor + register_oscillator_sensor(name="demo_osc", waveform=waveform, frequency=frequency) + osc = OscillatorSensor(name="demo_osc", waveform=waveform, frequency=frequency) + osc.start() + + # Run demo loop + try: + frame = 0 + while frames == 0 or frame < frames: + # Render waveform + visualization = render_waveform(80, 20, osc, frame) + + # Print with ANSI escape codes to clear screen and move cursor + print("\033[H\033[J" + visualization) + + time.sleep(0.05) # 20 FPS + frame += 1 + + except KeyboardInterrupt: + print("\n\nDemo stopped by user") + + finally: + osc.stop() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Oscillator sensor demo") + parser.add_argument( + "--waveform", + choices=["sine", "square", "sawtooth", "triangle", "noise"], + default="sine", + help="Waveform type", + ) + parser.add_argument( + "--frequency", type=float, default=1.0, help="Oscillator frequency in Hz" + ) + parser.add_argument( + "--frames", + type=int, + default=0, + help="Number of frames to render (0 = infinite until Ctrl+C)", + ) + + args = parser.parse_args() + demo_oscillator(args.waveform, args.frequency, args.frames) diff --git a/scripts/demo_oscilloscope.py b/scripts/demo_oscilloscope.py new file mode 100644 index 0000000..bbc8909 --- /dev/null +++ b/scripts/demo_oscilloscope.py @@ -0,0 +1,204 @@ +#!/usr/bin/env python3 +""" +Oscilloscope Demo - Real-time waveform visualization + +This demonstrates a real oscilloscope-style display where: +1. A complete waveform is drawn on the canvas +2. The camera scrolls horizontally (time axis) +3. The "pen" traces the waveform vertically at the center + +Think of it as: +- Canvas: Contains the waveform pattern (like a stamp) +- Camera: Moves left-to-right, revealing different parts of the waveform +- Pen: Always at center X, moves vertically with the signal value + +Usage: + uv run python scripts/demo_oscilloscope.py --frequency 1.0 --speed 10 +""" + +import argparse +import math +import time +import sys +from pathlib import Path + +# Add mainline to path +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from engine.sensors.oscillator import OscillatorSensor, register_oscillator_sensor + + +def render_oscilloscope( + width: int, + height: int, + osc: OscillatorSensor, + frame: int, +) -> str: + """Render an oscilloscope-style display.""" + # Get current reading (0.0 to 1.0) + reading = osc.read() + current_value = reading.value if reading else 0.5 + phase = osc._phase + frequency = osc.frequency + + # Build visualization + lines = [] + + # Header with sensor info + header = ( + f"Oscilloscope: {osc.name} | Wave: {osc.waveform} | " + f"Freq: {osc.frequency}Hz | Phase: {phase:.2f}" + ) + lines.append(header) + lines.append("─" * width) + + # Center line (zero reference) + center_row = height // 2 + + # Draw oscilloscope trace + waveform_fn = osc.WAVEFORMS[osc._waveform] + + # Calculate time offset for scrolling + # The trace scrolls based on phase - this creates the time axis movement + # At frequency 1.0, the trace completes one full sweep per frequency cycle + time_offset = phase * frequency * 2.0 + + # Pre-calculate all sample values for this frame + # Each column represents a time point on the X axis + samples = [] + for col in range(width): + # Time position for this column (0.0 to 1.0 across width) + col_fraction = col / width + # Combine with time offset for scrolling effect + time_pos = time_offset + col_fraction + + # Sample the waveform at this time point + # Multiply by frequency to get correct number of cycles shown + sample_value = waveform_fn(time_pos * frequency * 2) + samples.append(sample_value) + + # Draw the trace + # For each row, check which columns have their sample value in this row + for row in range(height - 3): # Reserve 3 lines for header/footer + # Calculate vertical position (0.0 at bottom, 1.0 at top) + row_pos = 1.0 - (row / (height - 4)) + + line_chars = [] + for col in range(width): + sample = samples[col] + + # Check if this sample falls in this row + tolerance = 1.0 / (height - 4) + if abs(sample - row_pos) < tolerance: + line_chars.append("█") + else: + line_chars.append(" ") + lines.append("".join(line_chars)) + + # Draw center indicator line + center_line = list(" " * width) + # Position the indicator based on current value + indicator_x = int((current_value) * (width - 1)) + if 0 <= indicator_x < width: + center_line[indicator_x] = "◎" + lines.append("".join(center_line)) + + # Footer with current value + footer = f"Value: {current_value:.3f} | Frame: {frame} | Phase: {phase:.2f}" + lines.append(footer) + + return "\n".join(lines) + + +def demo_oscilloscope( + waveform: str = "sine", + frequency: float = 1.0, + frames: int = 0, +): + """Run oscilloscope demo.""" + # Determine if this is LFO range + is_lfo = frequency <= 20.0 and frequency >= 0.1 + freq_type = "LFO" if is_lfo else "Audio" + + print(f"Oscilloscope demo: {waveform} wave") + print(f"Frequency: {frequency}Hz ({freq_type} range)") + if frames > 0: + print(f"Running for {frames} frames") + else: + print("Press Ctrl+C to stop") + print() + + # Create oscillator sensor + register_oscillator_sensor( + name="oscilloscope_osc", waveform=waveform, frequency=frequency + ) + osc = OscillatorSensor( + name="oscilloscope_osc", waveform=waveform, frequency=frequency + ) + osc.start() + + # Run demo loop + try: + frame = 0 + while frames == 0 or frame < frames: + # Render oscilloscope display + visualization = render_oscilloscope(80, 22, osc, frame) + + # Print with ANSI escape codes to clear screen and move cursor + print("\033[H\033[J" + visualization) + + time.sleep(1.0 / 60.0) # 60 FPS + frame += 1 + + except KeyboardInterrupt: + print("\n\nDemo stopped by user") + + finally: + osc.stop() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Oscilloscope demo") + parser.add_argument( + "--waveform", + choices=["sine", "square", "sawtooth", "triangle", "noise"], + default="sine", + help="Waveform type", + ) + parser.add_argument( + "--frequency", + type=float, + default=1.0, + help="Oscillator frequency in Hz (LFO: 0.1-20Hz, Audio: >20Hz)", + ) + parser.add_argument( + "--lfo", + action="store_true", + help="Use LFO frequency (0.5Hz - slow modulation)", + ) + parser.add_argument( + "--fast-lfo", + action="store_true", + help="Use fast LFO frequency (5Hz - rhythmic modulation)", + ) + parser.add_argument( + "--frames", + type=int, + default=0, + help="Number of frames to render (0 = infinite until Ctrl+C)", + ) + + args = parser.parse_args() + + # Determine frequency based on mode + frequency = args.frequency + if args.lfo: + frequency = 0.5 # Slow LFO for modulation + elif args.fast_lfo: + frequency = 5.0 # Fast LFO for rhythmic modulation + + demo_oscilloscope( + waveform=args.waveform, + frequency=frequency, + frames=args.frames, + ) diff --git a/scripts/demo_oscilloscope_mod.py b/scripts/demo_oscilloscope_mod.py new file mode 100644 index 0000000..b809274 --- /dev/null +++ b/scripts/demo_oscilloscope_mod.py @@ -0,0 +1,380 @@ +#!/usr/bin/env python3 +""" +Enhanced Oscilloscope with LFO Modulation Chain + +This demo features: +1. Slower frame rate (15 FPS) for human appreciation +2. Reduced flicker using cursor positioning +3. LFO modulation chain: LFO1 modulates LFO2 frequency +4. Multiple visualization modes + +Usage: + # Simple LFO + uv run python scripts/demo_oscilloscope_mod.py --lfo + + # LFO modulation chain: LFO1 modulates LFO2 frequency + uv run python scripts/demo_oscilloscope_mod.py --modulate --lfo + + # Custom modulation depth and rate + uv run python scripts/demo_oscilloscope_mod.py --modulate --lfo --mod-depth 0.5 --mod-rate 0.25 +""" + +import argparse +import sys +import time +from pathlib import Path + +# Add mainline to path +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from engine.sensors.oscillator import OscillatorSensor, register_oscillator_sensor + + +class ModulatedOscillator: + """ + Oscillator with frequency modulation from another oscillator. + + Frequency = base_frequency + (modulator_value * modulation_depth) + """ + + def __init__( + self, + name: str, + waveform: str = "sine", + base_frequency: float = 1.0, + modulator: "OscillatorSensor | None" = None, + modulation_depth: float = 0.5, + ): + self.name = name + self.waveform = waveform + self.base_frequency = base_frequency + self.modulator = modulator + self.modulation_depth = modulation_depth + + # Create the oscillator sensor + register_oscillator_sensor( + name=name, waveform=waveform, frequency=base_frequency + ) + self.osc = OscillatorSensor( + name=name, waveform=waveform, frequency=base_frequency + ) + self.osc.start() + + def read(self): + """Read current value, applying modulation if present.""" + # Update frequency based on modulator + if self.modulator: + mod_reading = self.modulator.read() + if mod_reading: + # Modulator value (0-1) affects frequency + # Map 0-1 to -modulation_depth to +modulation_depth + mod_offset = (mod_reading.value - 0.5) * 2 * self.modulation_depth + effective_freq = self.base_frequency + mod_offset + # Clamp to reasonable range + effective_freq = max(0.1, min(effective_freq, 20.0)) + self.osc._frequency = effective_freq + + return self.osc.read() + + def get_phase(self): + """Get current phase.""" + return self.osc._phase + + def get_effective_frequency(self): + """Get current effective frequency (after modulation).""" + if self.modulator and self.modulator.read(): + mod_reading = self.modulator.read() + mod_offset = (mod_reading.value - 0.5) * 2 * self.modulation_depth + return max(0.1, min(self.base_frequency + mod_offset, 20.0)) + return self.base_frequency + + def stop(self): + """Stop the oscillator.""" + self.osc.stop() + + +def render_dual_waveform( + width: int, + height: int, + modulator: OscillatorSensor, + modulated: ModulatedOscillator, + frame: int, +) -> str: + """Render both modulator and modulated waveforms.""" + # Get readings + mod_reading = modulator.read() + mod_val = mod_reading.value if mod_reading else 0.5 + + modulated_reading = modulated.read() + modulated_val = modulated_reading.value if modulated_reading else 0.5 + + # Build visualization + lines = [] + + # Header with sensor info + header1 = f"MODULATOR: {modulator.name} | Wave: {modulator.waveform} | Freq: {modulator.frequency:.2f}Hz" + header2 = f"MODULATED: {modulated.name} | Wave: {modulated.waveform} | Base: {modulated.base_frequency:.2f}Hz | Eff: {modulated.get_effective_frequency():.2f}Hz" + lines.append(header1) + lines.append(header2) + lines.append("─" * width) + + # Render modulator waveform (top half) + top_height = (height - 5) // 2 + waveform_fn = modulator.WAVEFORMS[modulator.waveform] + + # Calculate time offset for scrolling + mod_time_offset = modulator._phase * modulator.frequency * 0.3 + + for row in range(top_height): + row_pos = 1.0 - (row / (top_height - 1)) + line_chars = [] + for col in range(width): + col_fraction = col / width + time_pos = mod_time_offset + col_fraction + sample = waveform_fn(time_pos * modulator.frequency * 2) + tolerance = 1.0 / (top_height - 1) + if abs(sample - row_pos) < tolerance: + line_chars.append("█") + else: + line_chars.append(" ") + lines.append("".join(line_chars)) + + # Separator line with modulation info + lines.append( + f"─ MODULATION: depth={modulated.modulation_depth:.2f} | mod_value={mod_val:.2f} ─" + ) + + # Render modulated waveform (bottom half) + bottom_height = height - top_height - 5 + waveform_fn = modulated.osc.WAVEFORMS[modulated.waveform] + + # Calculate time offset for scrolling + modulated_time_offset = ( + modulated.get_phase() * modulated.get_effective_frequency() * 0.3 + ) + + for row in range(bottom_height): + row_pos = 1.0 - (row / (bottom_height - 1)) + line_chars = [] + for col in range(width): + col_fraction = col / width + time_pos = modulated_time_offset + col_fraction + sample = waveform_fn(time_pos * modulated.get_effective_frequency() * 2) + tolerance = 1.0 / (bottom_height - 1) + if abs(sample - row_pos) < tolerance: + line_chars.append("█") + else: + line_chars.append(" ") + lines.append("".join(line_chars)) + + # Footer with current values + footer = f"Mod Value: {mod_val:.3f} | Modulated Value: {modulated_val:.3f} | Frame: {frame}" + lines.append(footer) + + return "\n".join(lines) + + +def render_single_waveform( + width: int, + height: int, + osc: OscillatorSensor, + frame: int, +) -> str: + """Render a single waveform (for non-modulated mode).""" + reading = osc.read() + current_value = reading.value if reading else 0.5 + phase = osc._phase + frequency = osc.frequency + + # Build visualization + lines = [] + + # Header with sensor info + header = ( + f"Oscilloscope: {osc.name} | Wave: {osc.waveform} | " + f"Freq: {frequency:.2f}Hz | Phase: {phase:.2f}" + ) + lines.append(header) + lines.append("─" * width) + + # Draw oscilloscope trace + waveform_fn = osc.WAVEFORMS[osc.waveform] + time_offset = phase * frequency * 0.3 + + for row in range(height - 3): + row_pos = 1.0 - (row / (height - 4)) + line_chars = [] + for col in range(width): + col_fraction = col / width + time_pos = time_offset + col_fraction + sample = waveform_fn(time_pos * frequency * 2) + tolerance = 1.0 / (height - 4) + if abs(sample - row_pos) < tolerance: + line_chars.append("█") + else: + line_chars.append(" ") + lines.append("".join(line_chars)) + + # Footer + footer = f"Value: {current_value:.3f} | Frame: {frame} | Phase: {phase:.2f}" + lines.append(footer) + + return "\n".join(lines) + + +def demo_oscilloscope_mod( + waveform: str = "sine", + base_freq: float = 1.0, + modulate: bool = False, + mod_waveform: str = "sine", + mod_freq: float = 0.5, + mod_depth: float = 0.5, + frames: int = 0, +): + """Run enhanced oscilloscope demo with modulation support.""" + # Frame timing for smooth 15 FPS + frame_interval = 1.0 / 15.0 # 66.67ms per frame + + print("Enhanced Oscilloscope Demo") + print("Frame rate: 15 FPS (66ms per frame)") + if modulate: + print( + f"Modulation: {mod_waveform} @ {mod_freq}Hz -> {waveform} @ {base_freq}Hz" + ) + print(f"Modulation depth: {mod_depth}") + else: + print(f"Waveform: {waveform} @ {base_freq}Hz") + if frames > 0: + print(f"Running for {frames} frames") + else: + print("Press Ctrl+C to stop") + print() + + # Create oscillators + if modulate: + # Create modulation chain: modulator -> modulated + modulator = OscillatorSensor( + name="modulator", waveform=mod_waveform, frequency=mod_freq + ) + modulator.start() + + modulated = ModulatedOscillator( + name="modulated", + waveform=waveform, + base_frequency=base_freq, + modulator=modulator, + modulation_depth=mod_depth, + ) + else: + # Single oscillator + register_oscillator_sensor( + name="oscilloscope", waveform=waveform, frequency=base_freq + ) + osc = OscillatorSensor( + name="oscilloscope", waveform=waveform, frequency=base_freq + ) + osc.start() + + # Run demo loop with consistent timing + try: + frame = 0 + last_time = time.time() + + while frames == 0 or frame < frames: + # Render based on mode + if modulate: + visualization = render_dual_waveform( + 80, 30, modulator, modulated, frame + ) + else: + visualization = render_single_waveform(80, 22, osc, frame) + + # Use cursor positioning instead of full clear to reduce flicker + print("\033[H" + visualization) + + # Calculate sleep time for consistent 15 FPS + elapsed = time.time() - last_time + sleep_time = max(0, frame_interval - elapsed) + time.sleep(sleep_time) + last_time = time.time() + + frame += 1 + + except KeyboardInterrupt: + print("\n\nDemo stopped by user") + + finally: + if modulate: + modulator.stop() + modulated.stop() + else: + osc.stop() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Enhanced oscilloscope with LFO modulation chain" + ) + parser.add_argument( + "--waveform", + choices=["sine", "square", "sawtooth", "triangle", "noise"], + default="sine", + help="Main waveform type", + ) + parser.add_argument( + "--frequency", + type=float, + default=1.0, + help="Main oscillator frequency (LFO range: 0.1-20Hz)", + ) + parser.add_argument( + "--lfo", + action="store_true", + help="Use slow LFO frequency (0.5Hz) for main oscillator", + ) + parser.add_argument( + "--modulate", + action="store_true", + help="Enable LFO modulation chain (modulator modulates main oscillator)", + ) + parser.add_argument( + "--mod-waveform", + choices=["sine", "square", "sawtooth", "triangle", "noise"], + default="sine", + help="Modulator waveform type", + ) + parser.add_argument( + "--mod-freq", + type=float, + default=0.5, + help="Modulator frequency in Hz", + ) + parser.add_argument( + "--mod-depth", + type=float, + default=0.5, + help="Modulation depth (0.0-1.0, higher = more frequency variation)", + ) + parser.add_argument( + "--frames", + type=int, + default=0, + help="Number of frames to render (0 = infinite until Ctrl+C)", + ) + + args = parser.parse_args() + + # Set frequency based on LFO flag + base_freq = args.frequency + if args.lfo: + base_freq = 0.5 + + demo_oscilloscope_mod( + waveform=args.waveform, + base_freq=base_freq, + modulate=args.modulate, + mod_waveform=args.mod_waveform, + mod_freq=args.mod_freq, + mod_depth=args.mod_depth, + frames=args.frames, + ) diff --git a/scripts/demo_oscilloscope_pipeline.py b/scripts/demo_oscilloscope_pipeline.py new file mode 100644 index 0000000..9b987ae --- /dev/null +++ b/scripts/demo_oscilloscope_pipeline.py @@ -0,0 +1,411 @@ +#!/usr/bin/env python3 +""" +Enhanced Oscilloscope with Pipeline Switching + +This demo features: +1. Text-based oscilloscope (first 15 seconds) +2. Pygame renderer with PIL to ANSI conversion (next 15 seconds) +3. Continuous looping between the two modes + +Usage: + uv run python scripts/demo_oscilloscope_pipeline.py --lfo --modulate +""" + +import argparse +import sys +import time +from pathlib import Path + +# Add mainline to path +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from engine.sensors.oscillator import OscillatorSensor, register_oscillator_sensor + + +class ModulatedOscillator: + """Oscillator with frequency modulation from another oscillator.""" + + def __init__( + self, + name: str, + waveform: str = "sine", + base_frequency: float = 1.0, + modulator: "OscillatorSensor | None" = None, + modulation_depth: float = 0.5, + ): + self.name = name + self.waveform = waveform + self.base_frequency = base_frequency + self.modulator = modulator + self.modulation_depth = modulation_depth + + register_oscillator_sensor( + name=name, waveform=waveform, frequency=base_frequency + ) + self.osc = OscillatorSensor( + name=name, waveform=waveform, frequency=base_frequency + ) + self.osc.start() + + def read(self): + """Read current value, applying modulation if present.""" + if self.modulator: + mod_reading = self.modulator.read() + if mod_reading: + mod_offset = (mod_reading.value - 0.5) * 2 * self.modulation_depth + effective_freq = self.base_frequency + mod_offset + effective_freq = max(0.1, min(effective_freq, 20.0)) + self.osc._frequency = effective_freq + return self.osc.read() + + def get_phase(self): + return self.osc._phase + + def get_effective_frequency(self): + if self.modulator: + mod_reading = self.modulator.read() + if mod_reading: + mod_offset = (mod_reading.value - 0.5) * 2 * self.modulation_depth + return max(0.1, min(self.base_frequency + mod_offset, 20.0)) + return self.base_frequency + + def stop(self): + self.osc.stop() + + +def render_text_mode( + width: int, + height: int, + modulator: OscillatorSensor, + modulated: ModulatedOscillator, + frame: int, +) -> str: + """Render dual waveforms in text mode.""" + mod_reading = modulator.read() + mod_val = mod_reading.value if mod_reading else 0.5 + modulated_reading = modulated.read() + modulated_val = modulated_reading.value if modulated_reading else 0.5 + + lines = [] + header1 = ( + f"TEXT MODE | MODULATOR: {modulator.waveform} @ {modulator.frequency:.2f}Hz" + ) + header2 = ( + f"MODULATED: {modulated.waveform} @ {modulated.get_effective_frequency():.2f}Hz" + ) + lines.append(header1) + lines.append(header2) + lines.append("─" * width) + + # Modulator waveform (top half) + top_height = (height - 5) // 2 + waveform_fn = modulator.WAVEFORMS[modulator.waveform] + mod_time_offset = modulator._phase * modulator.frequency * 0.3 + + for row in range(top_height): + row_pos = 1.0 - (row / (top_height - 1)) + line_chars = [] + for col in range(width): + col_fraction = col / width + time_pos = mod_time_offset + col_fraction + sample = waveform_fn(time_pos * modulator.frequency * 2) + tolerance = 1.0 / (top_height - 1) + if abs(sample - row_pos) < tolerance: + line_chars.append("█") + else: + line_chars.append(" ") + lines.append("".join(line_chars)) + + lines.append( + f"─ MODULATION: depth={modulated.modulation_depth:.2f} | mod_value={mod_val:.2f} ─" + ) + + # Modulated waveform (bottom half) + bottom_height = height - top_height - 5 + waveform_fn = modulated.osc.WAVEFORMS[modulated.waveform] + modulated_time_offset = ( + modulated.get_phase() * modulated.get_effective_frequency() * 0.3 + ) + + for row in range(bottom_height): + row_pos = 1.0 - (row / (bottom_height - 1)) + line_chars = [] + for col in range(width): + col_fraction = col / width + time_pos = modulated_time_offset + col_fraction + sample = waveform_fn(time_pos * modulated.get_effective_frequency() * 2) + tolerance = 1.0 / (bottom_height - 1) + if abs(sample - row_pos) < tolerance: + line_chars.append("█") + else: + line_chars.append(" ") + lines.append("".join(line_chars)) + + footer = ( + f"Mod Value: {mod_val:.3f} | Modulated: {modulated_val:.3f} | Frame: {frame}" + ) + lines.append(footer) + return "\n".join(lines) + + +def render_pygame_to_ansi( + width: int, + height: int, + modulator: OscillatorSensor, + modulated: ModulatedOscillator, + frame: int, + font_path: str | None, +) -> str: + """Render waveforms using Pygame, convert to ANSI with PIL.""" + try: + import pygame + from PIL import Image + except ImportError: + return "Pygame or PIL not available\n\n" + render_text_mode( + width, height, modulator, modulated, frame + ) + + # Initialize Pygame surface (smaller for ANSI conversion) + pygame_width = width * 2 # Double for better quality + pygame_height = height * 4 + surface = pygame.Surface((pygame_width, pygame_height)) + surface.fill((10, 10, 20)) # Dark background + + # Get readings + mod_reading = modulator.read() + mod_val = mod_reading.value if mod_reading else 0.5 + modulated_reading = modulated.read() + modulated_val = modulated_reading.value if modulated_reading else 0.5 + + # Draw modulator waveform (top half) + top_height = pygame_height // 2 + waveform_fn = modulator.WAVEFORMS[modulator.waveform] + mod_time_offset = modulator._phase * modulator.frequency * 0.3 + + prev_x, prev_y = 0, 0 + for x in range(pygame_width): + col_fraction = x / pygame_width + time_pos = mod_time_offset + col_fraction + sample = waveform_fn(time_pos * modulator.frequency * 2) + y = int(top_height - (sample * (top_height - 20)) - 10) + if x > 0: + pygame.draw.line(surface, (100, 200, 255), (prev_x, prev_y), (x, y), 2) + prev_x, prev_y = x, y + + # Draw separator + pygame.draw.line( + surface, (80, 80, 100), (0, top_height), (pygame_width, top_height), 1 + ) + + # Draw modulated waveform (bottom half) + bottom_start = top_height + 10 + bottom_height = pygame_height - bottom_start - 20 + waveform_fn = modulated.osc.WAVEFORMS[modulated.waveform] + modulated_time_offset = ( + modulated.get_phase() * modulated.get_effective_frequency() * 0.3 + ) + + prev_x, prev_y = 0, 0 + for x in range(pygame_width): + col_fraction = x / pygame_width + time_pos = modulated_time_offset + col_fraction + sample = waveform_fn(time_pos * modulated.get_effective_frequency() * 2) + y = int(bottom_start + (bottom_height - (sample * (bottom_height - 20))) - 10) + if x > 0: + pygame.draw.line(surface, (255, 150, 100), (prev_x, prev_y), (x, y), 2) + prev_x, prev_y = x, y + + # Draw info text on pygame surface + try: + if font_path: + font = pygame.font.Font(font_path, 16) + info_text = f"PYGAME MODE | Mod: {mod_val:.2f} | Out: {modulated_val:.2f} | Frame: {frame}" + text_surface = font.render(info_text, True, (200, 200, 200)) + surface.blit(text_surface, (10, 10)) + except Exception: + pass + + # Convert Pygame surface to PIL Image + img_str = pygame.image.tostring(surface, "RGB") + pil_image = Image.frombytes("RGB", (pygame_width, pygame_height), img_str) + + # Convert to ANSI + return pil_to_ansi(pil_image) + + +def pil_to_ansi(image) -> str: + """Convert PIL image to ANSI escape codes.""" + # Resize for terminal display + terminal_width = 80 + terminal_height = 30 + image = image.resize((terminal_width * 2, terminal_height * 2)) + + # Convert to grayscale + image = image.convert("L") + + # ANSI character ramp (dark to light) + chars = " .:-=+*#%@" + + lines = [] + for y in range(0, image.height, 2): # Sample every 2nd row for aspect ratio + line = "" + for x in range(0, image.width, 2): + pixel = image.getpixel((x, y)) + char_index = int((pixel / 255) * (len(chars) - 1)) + line += chars[char_index] + lines.append(line) + + # Add header info + header = "PYGAME → ANSI RENDER MODE" + header_line = "─" * terminal_width + return f"{header}\n{header_line}\n" + "\n".join(lines) + + +def demo_with_pipeline_switching( + waveform: str = "sine", + base_freq: float = 0.5, + modulate: bool = False, + mod_waveform: str = "sine", + mod_freq: float = 0.5, + mod_depth: float = 0.5, + frames: int = 0, +): + """Run demo with pipeline switching every 15 seconds.""" + frame_interval = 1.0 / 15.0 # 15 FPS + mode_duration = 15.0 # 15 seconds per mode + + print("Enhanced Oscilloscope with Pipeline Switching") + print(f"Mode duration: {mode_duration} seconds") + print("Frame rate: 15 FPS") + print() + + # Create oscillators + modulator = OscillatorSensor( + name="modulator", waveform=mod_waveform, frequency=mod_freq + ) + modulator.start() + + modulated = ModulatedOscillator( + name="modulated", + waveform=waveform, + base_frequency=base_freq, + modulator=modulator if modulate else None, + modulation_depth=mod_depth, + ) + + # Find font path + font_path = Path("fonts/Pixel_Sparta.otf") + if not font_path.exists(): + font_path = Path("fonts/Pixel Sparta.otf") + font_path = str(font_path) if font_path.exists() else None + + # Run demo loop + try: + frame = 0 + mode_start_time = time.time() + mode_index = 0 # 0 = text, 1 = pygame + + while frames == 0 or frame < frames: + elapsed = time.time() - mode_start_time + + # Switch mode every 15 seconds + if elapsed >= mode_duration: + mode_index = (mode_index + 1) % 2 + mode_start_time = time.time() + print(f"\n{'=' * 60}") + print( + f"SWITCHING TO {'PYGAME+ANSI' if mode_index == 1 else 'TEXT'} MODE" + ) + print(f"{'=' * 60}\n") + time.sleep(1.0) # Brief pause to show mode switch + + # Render based on mode + if mode_index == 0: + # Text mode + visualization = render_text_mode(80, 30, modulator, modulated, frame) + else: + # Pygame + PIL to ANSI mode + visualization = render_pygame_to_ansi( + 80, 30, modulator, modulated, frame, font_path + ) + + # Display with cursor positioning + print("\033[H" + visualization) + + # Frame timing + time.sleep(frame_interval) + frame += 1 + + except KeyboardInterrupt: + print("\n\nDemo stopped by user") + + finally: + modulator.stop() + modulated.stop() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Enhanced oscilloscope with pipeline switching" + ) + parser.add_argument( + "--waveform", + choices=["sine", "square", "sawtooth", "triangle", "noise"], + default="sine", + help="Main waveform type", + ) + parser.add_argument( + "--frequency", + type=float, + default=0.5, + help="Main oscillator frequency (LFO range)", + ) + parser.add_argument( + "--lfo", + action="store_true", + help="Use slow LFO frequency (0.5Hz)", + ) + parser.add_argument( + "--modulate", + action="store_true", + help="Enable LFO modulation chain", + ) + parser.add_argument( + "--mod-waveform", + choices=["sine", "square", "sawtooth", "triangle", "noise"], + default="sine", + help="Modulator waveform type", + ) + parser.add_argument( + "--mod-freq", + type=float, + default=0.5, + help="Modulator frequency in Hz", + ) + parser.add_argument( + "--mod-depth", + type=float, + default=0.5, + help="Modulation depth", + ) + parser.add_argument( + "--frames", + type=int, + default=0, + help="Number of frames to render (0 = infinite)", + ) + + args = parser.parse_args() + + base_freq = args.frequency + if args.lfo: + base_freq = 0.5 + + demo_with_pipeline_switching( + waveform=args.waveform, + base_freq=base_freq, + modulate=args.modulate, + mod_waveform=args.mod_waveform, + mod_freq=args.mod_freq, + mod_depth=args.mod_depth, + ) diff --git a/scripts/oscillator_data_export.py b/scripts/oscillator_data_export.py new file mode 100644 index 0000000..94be2e1 --- /dev/null +++ b/scripts/oscillator_data_export.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python3 +""" +Oscillator Data Export + +Exports oscillator sensor data in JSON format for external use. + +Usage: + uv run python scripts/oscillator_data_export.py --waveform sine --frequency 1.0 --duration 5.0 +""" + +import argparse +import json +import time +import sys +from pathlib import Path +from datetime import datetime + +# Add mainline to path +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from engine.sensors.oscillator import OscillatorSensor, register_oscillator_sensor + + +def export_oscillator_data( + waveform: str = "sine", + frequency: float = 1.0, + duration: float = 5.0, + sample_rate: float = 60.0, + output_file: str | None = None, +): + """Export oscillator data to JSON.""" + print(f"Exporting oscillator data: {waveform} wave at {frequency}Hz") + print(f"Duration: {duration}s, Sample rate: {sample_rate}Hz") + + # Create oscillator sensor + register_oscillator_sensor( + name="export_osc", waveform=waveform, frequency=frequency + ) + osc = OscillatorSensor(name="export_osc", waveform=waveform, frequency=frequency) + osc.start() + + # Collect data + data = { + "waveform": waveform, + "frequency": frequency, + "duration": duration, + "sample_rate": sample_rate, + "timestamp": datetime.now().isoformat(), + "samples": [], + } + + sample_interval = 1.0 / sample_rate + num_samples = int(duration * sample_rate) + + print(f"Collecting {num_samples} samples...") + + for i in range(num_samples): + reading = osc.read() + if reading: + data["samples"].append( + { + "index": i, + "timestamp": reading.timestamp, + "value": reading.value, + "phase": osc._phase, + } + ) + time.sleep(sample_interval) + + osc.stop() + + # Export to JSON + if output_file: + with open(output_file, "w") as f: + json.dump(data, f, indent=2) + print(f"Data exported to {output_file}") + else: + print(json.dumps(data, indent=2)) + + return data + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Export oscillator sensor data") + parser.add_argument( + "--waveform", + choices=["sine", "square", "sawtooth", "triangle", "noise"], + default="sine", + help="Waveform type", + ) + parser.add_argument( + "--frequency", type=float, default=1.0, help="Oscillator frequency in Hz" + ) + parser.add_argument( + "--duration", type=float, default=5.0, help="Duration to record in seconds" + ) + parser.add_argument( + "--sample-rate", type=float, default=60.0, help="Sample rate in Hz" + ) + parser.add_argument( + "--output", "-o", type=str, help="Output JSON file (default: print to stdout)" + ) + + args = parser.parse_args() + export_oscillator_data( + waveform=args.waveform, + frequency=args.frequency, + duration=args.duration, + sample_rate=args.sample_rate, + output_file=args.output, + ) diff --git a/scripts/pipeline_demo.py b/scripts/pipeline_demo.py new file mode 100644 index 0000000..b1a1d56 --- /dev/null +++ b/scripts/pipeline_demo.py @@ -0,0 +1,509 @@ +#!/usr/bin/env python3 +""" +Pipeline Demo Orchestrator + +Demonstrates all effects and camera modes with gentle oscillation. +Runs a comprehensive test of the Mainline pipeline system with proper +frame rate control and extended duration for visibility. +""" + +import argparse +import math +import signal +import sys +import time +from typing import Any + +from engine.camera import Camera +from engine.data_sources.checkerboard import CheckerboardDataSource +from engine.data_sources.sources import SourceItem +from engine.display import DisplayRegistry, NullDisplay +from engine.effects.plugins import discover_plugins +from engine.effects import get_registry +from engine.effects.types import EffectConfig +from engine.frame import FrameTimer +from engine.pipeline import Pipeline, PipelineConfig, PipelineContext +from engine.pipeline.adapters import ( + CameraClockStage, + CameraStage, + DataSourceStage, + DisplayStage, + EffectPluginStage, + SourceItemsToBufferStage, +) +from engine.pipeline.stages.framebuffer import FrameBufferStage + + +class GentleOscillator: + """Produces smooth, gentle sinusoidal values.""" + + def __init__( + self, speed: float = 60.0, amplitude: float = 1.0, offset: float = 0.0 + ): + self.speed = speed # Period length in frames + self.amplitude = amplitude # Amplitude + self.offset = offset # Base offset + + def value(self, frame: int) -> float: + """Get oscillated value for given frame.""" + return self.offset + self.amplitude * 0.5 * (1 + math.sin(frame / self.speed)) + + +class PipelineDemoOrchestrator: + """Orchestrates comprehensive pipeline demonstrations.""" + + def __init__( + self, + use_terminal: bool = True, + target_fps: float = 30.0, + effect_duration: float = 8.0, + mode_duration: float = 3.0, + enable_fps_switch: bool = False, + loop: bool = False, + verbose: bool = False, + ): + self.use_terminal = use_terminal + self.target_fps = target_fps + self.effect_duration = effect_duration + self.mode_duration = mode_duration + self.enable_fps_switch = enable_fps_switch + self.loop = loop + self.verbose = verbose + self.frame_count = 0 + self.pipeline = None + self.context = None + self.framebuffer = None + self.camera = None + self.timer = None + + def log(self, message: str, verbose: bool = False): + """Print with timestamp if verbose or always-important.""" + if self.verbose or not verbose: + print(f"[{time.strftime('%H:%M:%S')}] {message}") + + def build_base_pipeline( + self, camera_type: str = "scroll", camera_speed: float = 0.5 + ): + """Build a base pipeline with all required components.""" + self.log(f"Building base pipeline: camera={camera_type}, speed={camera_speed}") + + # Camera + camera = Camera.scroll(speed=camera_speed) + camera.set_canvas_size(200, 200) + + # Context + ctx = PipelineContext() + + # Pipeline config + config = PipelineConfig( + source="empty", + display="terminal" if self.use_terminal else "null", + camera=camera_type, + effects=[], + enable_metrics=True, + ) + pipeline = Pipeline(config=config, context=ctx) + + # Use a large checkerboard pattern for visible motion effects + source = CheckerboardDataSource(width=200, height=200, square_size=10) + pipeline.add_stage("source", DataSourceStage(source, name="checkerboard")) + + # Add camera clock (must run every frame) + pipeline.add_stage( + "camera_update", CameraClockStage(camera, name="camera-clock") + ) + + # Add render + pipeline.add_stage("render", SourceItemsToBufferStage(name="items-to-buffer")) + + # Add camera stage + pipeline.add_stage("camera", CameraStage(camera, name="camera")) + + # Add framebuffer (optional for effects that use it) + self.framebuffer = FrameBufferStage(name="default", history_depth=5) + pipeline.add_stage("framebuffer", self.framebuffer) + + # Add display + display_backend = "terminal" if self.use_terminal else "null" + display = DisplayRegistry.create(display_backend) + if display: + pipeline.add_stage("display", DisplayStage(display, name=display_backend)) + + # Build and initialize + pipeline.build(auto_inject=False) + pipeline.initialize() + + self.pipeline = pipeline + self.context = ctx + self.camera = camera + + self.log("Base pipeline built successfully") + return pipeline + + def test_effects_oscillation(self): + """Test each effect with gentle intensity oscillation.""" + self.log("\n=== EFFECTS OSCILLATION TEST ===") + self.log( + f"Duration: {self.effect_duration}s per effect at {self.target_fps} FPS" + ) + + discover_plugins() # Ensure all plugins are registered + registry = get_registry() + all_effects = registry.list_all() + effect_names = [ + name + for name in all_effects.keys() + if name not in ("motionblur", "afterimage") + ] + + # Calculate frames based on duration and FPS + frames_per_effect = int(self.effect_duration * self.target_fps) + oscillator = GentleOscillator(speed=90, amplitude=0.7, offset=0.3) + + total_effects = len(effect_names) + 2 # +2 for motionblur and afterimage + estimated_total = total_effects * self.effect_duration + + self.log(f"Testing {len(effect_names)} regular effects + 2 framebuffer effects") + self.log(f"Estimated time: {estimated_total:.0f}s") + + for idx, effect_name in enumerate(sorted(effect_names), 1): + try: + self.log(f"[{idx}/{len(effect_names)}] Testing effect: {effect_name}") + + effect = registry.get(effect_name) + if not effect: + self.log(f" Skipped: plugin not found") + continue + + stage = EffectPluginStage(effect, name=effect_name) + self.pipeline.add_stage(f"effect_{effect_name}", stage) + self.pipeline.build(auto_inject=False) + + self._run_frames( + frames_per_effect, oscillator=oscillator, effect=effect + ) + + self.pipeline.remove_stage(f"effect_{effect_name}") + self.pipeline.build(auto_inject=False) + + self.log(f" ✓ {effect_name} completed successfully") + + except Exception as e: + self.log(f" ✗ {effect_name} failed: {e}") + + # Test motionblur and afterimage separately with framebuffer + for effect_name in ["motionblur", "afterimage"]: + try: + self.log( + f"[{len(effect_names) + 1}/{total_effects}] Testing effect: {effect_name} (with framebuffer)" + ) + + effect = registry.get(effect_name) + if not effect: + self.log(f" Skipped: plugin not found") + continue + + stage = EffectPluginStage( + effect, + name=effect_name, + dependencies={"framebuffer.history.default"}, + ) + self.pipeline.add_stage(f"effect_{effect_name}", stage) + self.pipeline.build(auto_inject=False) + + self._run_frames( + frames_per_effect, oscillator=oscillator, effect=effect + ) + + self.pipeline.remove_stage(f"effect_{effect_name}") + self.pipeline.build(auto_inject=False) + self.log(f" ✓ {effect_name} completed successfully") + + except Exception as e: + self.log(f" ✗ {effect_name} failed: {e}") + + def _run_frames(self, num_frames: int, oscillator=None, effect=None): + """Run a specified number of frames with proper timing.""" + for frame in range(num_frames): + self.frame_count += 1 + self.context.set("frame_number", frame) + + if oscillator and effect: + intensity = oscillator.value(frame) + effect.configure(EffectConfig(intensity=intensity)) + + dt = self.timer.sleep_until_next_frame() + self.camera.update(dt) + self.pipeline.execute([]) + + def test_framebuffer(self): + """Test framebuffer functionality.""" + self.log("\n=== FRAMEBUFFER TEST ===") + + try: + # Run frames using FrameTimer for consistent pacing + self._run_frames(10) + + # Check framebuffer history + history = self.context.get("framebuffer.default.history") + assert history is not None, "No framebuffer history found" + assert len(history) > 0, "Framebuffer history is empty" + + self.log(f"History frames: {len(history)}") + self.log(f"Configured depth: {self.framebuffer.config.history_depth}") + + # Check intensity computation + intensity = self.context.get("framebuffer.default.current_intensity") + assert intensity is not None, "No intensity map found" + self.log(f"Intensity map length: {len(intensity)}") + + # Check that frames are being stored correctly + recent_frame = self.framebuffer.get_frame(0, self.context) + assert recent_frame is not None, "Cannot retrieve recent frame" + self.log(f"Recent frame rows: {len(recent_frame)}") + + self.log("✓ Framebuffer test passed") + + except Exception as e: + self.log(f"✗ Framebuffer test failed: {e}") + raise + + def test_camera_modes(self): + """Test each camera mode.""" + self.log("\n=== CAMERA MODES TEST ===") + self.log(f"Duration: {self.mode_duration}s per mode at {self.target_fps} FPS") + + camera_modes = [ + ("feed", 0.1), + ("scroll", 0.5), + ("horizontal", 0.3), + ("omni", 0.3), + ("floating", 0.5), + ("bounce", 0.5), + ("radial", 0.3), + ] + + frames_per_mode = int(self.mode_duration * self.target_fps) + self.log(f"Testing {len(camera_modes)} camera modes") + self.log(f"Estimated time: {len(camera_modes) * self.mode_duration:.0f}s") + + for idx, (camera_type, speed) in enumerate(camera_modes, 1): + try: + self.log(f"[{idx}/{len(camera_modes)}] Testing camera: {camera_type}") + + # Rebuild camera + self.camera.reset() + cam_class = getattr(Camera, camera_type, Camera.scroll) + new_camera = cam_class(speed=speed) + new_camera.set_canvas_size(200, 200) + + # Update camera stages + clock_stage = CameraClockStage(new_camera, name="camera-clock") + self.pipeline.replace_stage("camera_update", clock_stage) + + camera_stage = CameraStage(new_camera, name="camera") + self.pipeline.replace_stage("camera", camera_stage) + + self.camera = new_camera + + # Run frames with proper timing + self._run_frames(frames_per_mode) + + # Verify camera moved (check final position) + x, y = self.camera.x, self.camera.y + self.log(f" Final position: ({x:.1f}, {y:.1f})") + + if camera_type == "feed": + assert x == 0 and y == 0, "Feed camera should not move" + elif camera_type in ("scroll", "horizontal"): + assert abs(x) > 0 or abs(y) > 0, "Camera should have moved" + else: + self.log(f" Position check skipped (mode={camera_type})") + + self.log(f" ✓ {camera_type} completed successfully") + + except Exception as e: + self.log(f" ✗ {camera_type} failed: {e}") + + def test_fps_switch_demo(self): + """Demonstrate the effect of different frame rates on animation smoothness.""" + if not self.enable_fps_switch: + return + + self.log("\n=== FPS SWITCH DEMONSTRATION ===") + + fps_sequence = [ + (30.0, 5.0), # 30 FPS for 5 seconds + (60.0, 5.0), # 60 FPS for 5 seconds + (30.0, 5.0), # Back to 30 FPS for 5 seconds + (20.0, 3.0), # 20 FPS for 3 seconds + (60.0, 3.0), # 60 FPS for 3 seconds + ] + + original_fps = self.target_fps + + for fps, duration in fps_sequence: + self.log(f"\n--- Switching to {fps} FPS for {duration}s ---") + self.target_fps = fps + self.timer.target_frame_dt = 1.0 / fps + + # Update display FPS if supported + display = ( + self.pipeline.get_stage("display").stage + if self.pipeline.get_stage("display") + else None + ) + if display and hasattr(display, "target_fps"): + display.target_fps = fps + display._frame_period = 1.0 / fps if fps > 0 else 0 + + frames = int(duration * fps) + camera_type = "radial" # Use radial for smooth rotation that's visible at different FPS + speed = 0.3 + + # Rebuild camera if needed + self.camera.reset() + new_camera = Camera.radial(speed=speed) + new_camera.set_canvas_size(200, 200) + clock_stage = CameraClockStage(new_camera, name="camera-clock") + self.pipeline.replace_stage("camera_update", clock_stage) + camera_stage = CameraStage(new_camera, name="camera") + self.pipeline.replace_stage("camera", camera_stage) + self.camera = new_camera + + for frame in range(frames): + self.context.set("frame_number", frame) + dt = self.timer.sleep_until_next_frame() + self.camera.update(dt) + result = self.pipeline.execute([]) + + self.log(f" Completed {frames} frames at {fps} FPS") + + # Restore original FPS + self.target_fps = original_fps + self.timer.target_frame_dt = 1.0 / original_fps + self.log("✓ FPS switch demo completed") + + def run(self): + """Run the complete demo.""" + start_time = time.time() + self.log("Starting Pipeline Demo Orchestrator") + self.log("=" * 50) + + # Initialize frame timer + self.timer = FrameTimer(target_frame_dt=1.0 / self.target_fps) + + # Build pipeline + self.build_base_pipeline() + + try: + # Test framebuffer first (needed for motion blur effects) + self.test_framebuffer() + + # Test effects + self.test_effects_oscillation() + + # Test camera modes + self.test_camera_modes() + + # Optional FPS switch demonstration + if self.enable_fps_switch: + self.test_fps_switch_demo() + else: + self.log("\n=== FPS SWITCH DEMO ===") + self.log("Skipped (enable with --switch-fps)") + + elapsed = time.time() - start_time + self.log("\n" + "=" * 50) + self.log("Demo completed successfully!") + self.log(f"Total frames processed: {self.frame_count}") + self.log(f"Total elapsed time: {elapsed:.1f}s") + self.log(f"Average FPS: {self.frame_count / elapsed:.1f}") + + finally: + # Always cleanup properly + self._cleanup() + + def _cleanup(self): + """Clean up pipeline resources.""" + self.log("Cleaning up...", verbose=True) + if self.pipeline: + try: + self.pipeline.cleanup() + if self.verbose: + self.log("Pipeline cleaned up successfully", verbose=True) + except Exception as e: + self.log(f"Error during pipeline cleanup: {e}", verbose=True) + + # If not looping, clear references + if not self.loop: + self.pipeline = None + self.context = None + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Pipeline Demo Orchestrator - comprehensive demo of Mainline pipeline" + ) + parser.add_argument( + "--null", + action="store_true", + help="Use null display (no visual output)", + ) + parser.add_argument( + "--fps", + type=float, + default=30.0, + help="Target frame rate (default: 30)", + ) + parser.add_argument( + "--effect-duration", + type=float, + default=8.0, + help="Duration per effect in seconds (default: 8)", + ) + parser.add_argument( + "--mode-duration", + type=float, + default=3.0, + help="Duration per camera mode in seconds (default: 3)", + ) + parser.add_argument( + "--switch-fps", + action="store_true", + help="Include FPS switching demonstration", + ) + parser.add_argument( + "--loop", + action="store_true", + help="Run demo in an infinite loop", + ) + parser.add_argument( + "--verbose", + action="store_true", + help="Enable verbose output", + ) + + args = parser.parse_args() + + orchestrator = PipelineDemoOrchestrator( + use_terminal=not args.null, + target_fps=args.fps, + effect_duration=args.effect_duration, + mode_duration=args.mode_duration, + enable_fps_switch=args.switch_fps, + loop=args.loop, + verbose=args.verbose, + ) + + try: + orchestrator.run() + except KeyboardInterrupt: + print("\nInterrupted by user") + sys.exit(0) + except Exception as e: + print(f"\nDemo failed: {e}") + import traceback + + traceback.print_exc() + sys.exit(1) diff --git a/scripts/render-diagrams.py b/scripts/render-diagrams.py new file mode 100644 index 0000000..8985bf2 --- /dev/null +++ b/scripts/render-diagrams.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 +"""Render Mermaid diagrams in markdown files to ASCII art.""" + +import re +import subprocess +import sys + + +def extract_mermaid_blocks(content: str) -> list[str]: + """Extract mermaid blocks from markdown.""" + return re.findall(r"```mermaid\n(.*?)\n```", content, re.DOTALL) + + +def render_diagram(block: str) -> str: + """Render a single mermaid block to ASCII.""" + result = subprocess.run( + ["mermaid-ascii", "-f", "-"], + input=block, + capture_output=True, + text=True, + ) + if result.returncode != 0: + return f"ERROR: {result.stderr}" + return result.stdout + + +def main(): + if len(sys.argv) < 2: + print("Usage: render-diagrams.py ") + sys.exit(1) + + filename = sys.argv[1] + content = open(filename).read() + blocks = extract_mermaid_blocks(content) + + print(f"Found {len(blocks)} mermaid diagram(s) in {filename}") + print() + + for i, block in enumerate(blocks): + # Skip if empty + if not block.strip(): + continue + + print(f"=== Diagram {i + 1} ===") + print(render_diagram(block)) + + +if __name__ == "__main__": + main() diff --git a/scripts/validate-diagrams.py b/scripts/validate-diagrams.py new file mode 100644 index 0000000..9ffba0d --- /dev/null +++ b/scripts/validate-diagrams.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 +"""Validate Mermaid diagrams in markdown files.""" + +import glob +import re +import sys + + +# Diagram types that are valid in Mermaid +VALID_TYPES = { + "flowchart", + "graph", + "classDiagram", + "sequenceDiagram", + "stateDiagram", + "stateDiagram-v2", + "erDiagram", + "gantt", + "pie", + "mindmap", + "journey", + "gitGraph", + "requirementDiagram", +} + + +def extract_mermaid_blocks(content: str) -> list[tuple[int, str]]: + """Extract mermaid blocks with their positions.""" + blocks = [] + for match in re.finditer(r"```mermaid\n(.*?)\n```", content, re.DOTALL): + blocks.append((match.start(), match.group(1))) + return blocks + + +def validate_block(block: str) -> bool: + """Check if a mermaid block has a valid diagram type.""" + if not block.strip(): + return True # Empty block is OK + first_line = block.strip().split("\n")[0] + return any(first_line.startswith(t) for t in VALID_TYPES) + + +def main(): + md_files = glob.glob("docs/*.md") + + errors = [] + for filepath in md_files: + content = open(filepath).read() + blocks = extract_mermaid_blocks(content) + + for i, (_, block) in enumerate(blocks): + if not validate_block(block): + errors.append(f"{filepath}: invalid diagram type in block {i + 1}") + + if errors: + for e in errors: + print(f"ERROR: {e}") + sys.exit(1) + + print(f"Validated {len(md_files)} markdown files - all OK") + + +if __name__ == "__main__": + main() diff --git a/test_ui_simple.py b/test_ui_simple.py new file mode 100644 index 0000000..ebd3925 --- /dev/null +++ b/test_ui_simple.py @@ -0,0 +1,56 @@ +""" +Simple test for UIPanel integration. +""" + +from engine.pipeline.ui import UIPanel, UIConfig, StageControl + +# Create panel +panel = UIPanel(UIConfig(panel_width=24)) + +# Add some mock stages +panel.register_stage( + type( + "Stage", (), {"name": "noise", "category": "effect", "is_enabled": lambda: True} + ), + enabled=True, +) +panel.register_stage( + type( + "Stage", (), {"name": "fade", "category": "effect", "is_enabled": lambda: True} + ), + enabled=False, +) +panel.register_stage( + type( + "Stage", + (), + {"name": "glitch", "category": "effect", "is_enabled": lambda: True}, + ), + enabled=True, +) +panel.register_stage( + type( + "Stage", + (), + {"name": "font", "category": "transform", "is_enabled": lambda: True}, + ), + enabled=True, +) + +# Select first stage +panel.select_stage("noise") + +# Render at 80x24 +lines = panel.render(80, 24) +print("\n".join(lines)) + +print("\nStage list:") +for name, ctrl in panel.stages.items(): + print(f" {name}: enabled={ctrl.enabled}, selected={ctrl.selected}") + +print("\nToggle 'fade' and re-render:") +panel.toggle_stage("fade") +lines = panel.render(80, 24) +print("\n".join(lines)) + +print("\nEnabled stages:", panel.get_enabled_stages()) diff --git a/tests/acceptance_report.py b/tests/acceptance_report.py new file mode 100644 index 0000000..463503a --- /dev/null +++ b/tests/acceptance_report.py @@ -0,0 +1,473 @@ +""" +HTML Acceptance Test Report Generator + +Generates HTML reports showing frame buffers from acceptance tests. +Uses NullDisplay to capture frames and renders them with monospace font. +""" + +import html +from datetime import datetime +from pathlib import Path +from typing import Any + +ANSI_256_TO_RGB = { + 0: (0, 0, 0), + 1: (128, 0, 0), + 2: (0, 128, 0), + 3: (128, 128, 0), + 4: (0, 0, 128), + 5: (128, 0, 128), + 6: (0, 128, 128), + 7: (192, 192, 192), + 8: (128, 128, 128), + 9: (255, 0, 0), + 10: (0, 255, 0), + 11: (255, 255, 0), + 12: (0, 0, 255), + 13: (255, 0, 255), + 14: (0, 255, 255), + 15: (255, 255, 255), +} + + +def ansi_to_rgb(color_code: int) -> tuple[int, int, int]: + """Convert ANSI 256-color code to RGB tuple.""" + if 0 <= color_code <= 15: + return ANSI_256_TO_RGB.get(color_code, (255, 255, 255)) + elif 16 <= color_code <= 231: + color_code -= 16 + r = (color_code // 36) * 51 + g = ((color_code % 36) // 6) * 51 + b = (color_code % 6) * 51 + return (r, g, b) + elif 232 <= color_code <= 255: + gray = (color_code - 232) * 10 + 8 + return (gray, gray, gray) + return (255, 255, 255) + + +def parse_ansi_line(line: str) -> list[dict[str, Any]]: + """Parse a single line with ANSI escape codes into styled segments. + + Returns list of dicts with 'text', 'fg', 'bg', 'bold' keys. + """ + import re + + segments = [] + current_fg = None + current_bg = None + current_bold = False + pos = 0 + + # Find all ANSI escape sequences + escape_pattern = re.compile(r"\x1b\[([0-9;]*)m") + + while pos < len(line): + match = escape_pattern.search(line, pos) + if not match: + # Remaining text with current styling + if pos < len(line): + text = line[pos:] + if text: + segments.append( + { + "text": text, + "fg": current_fg, + "bg": current_bg, + "bold": current_bold, + } + ) + break + + # Add text before escape sequence + if match.start() > pos: + text = line[pos : match.start()] + if text: + segments.append( + { + "text": text, + "fg": current_fg, + "bg": current_bg, + "bold": current_bold, + } + ) + + # Parse escape sequence + codes = match.group(1).split(";") if match.group(1) else ["0"] + for code in codes: + code = code.strip() + if not code or code == "0": + current_fg = None + current_bg = None + current_bold = False + elif code == "1": + current_bold = True + elif code.isdigit(): + code_int = int(code) + if 30 <= code_int <= 37: + current_fg = ansi_to_rgb(code_int - 30 + 8) + elif 90 <= code_int <= 97: + current_fg = ansi_to_rgb(code_int - 90) + elif code_int == 38: + current_fg = (255, 255, 255) + elif code_int == 39: + current_fg = None + + pos = match.end() + + return segments + + +def render_line_to_html(line: str) -> str: + """Render a single terminal line to HTML with styling.""" + import re + + result = "" + pos = 0 + current_fg = None + current_bg = None + current_bold = False + + escape_pattern = re.compile(r"(\x1b\[[0-9;]*m)|(\x1b\[([0-9]+);([0-9]+)H)") + + while pos < len(line): + match = escape_pattern.search(line, pos) + if not match: + # Remaining text + if pos < len(line): + text = html.escape(line[pos:]) + if text: + style = _build_style(current_fg, current_bg, current_bold) + result += f"{text}" + break + + # Handle cursor positioning - just skip it for rendering + if match.group(2): # Cursor positioning \x1b[row;colH + pos = match.end() + continue + + # Handle style codes + if match.group(1): + codes = match.group(1)[2:-1].split(";") if match.group(1) else ["0"] + for code in codes: + code = code.strip() + if not code or code == "0": + current_fg = None + current_bg = None + current_bold = False + elif code == "1": + current_bold = True + elif code.isdigit(): + code_int = int(code) + if 30 <= code_int <= 37: + current_fg = ansi_to_rgb(code_int - 30 + 8) + elif 90 <= code_int <= 97: + current_fg = ansi_to_rgb(code_int - 90) + + pos = match.end() + continue + + pos = match.end() + + # Handle remaining text without escape codes + if pos < len(line): + text = html.escape(line[pos:]) + if text: + style = _build_style(current_fg, current_bg, current_bold) + result += f"{text}" + + return result or html.escape(line) + + +def _build_style( + fg: tuple[int, int, int] | None, bg: tuple[int, int, int] | None, bold: bool +) -> str: + """Build CSS style string from color values.""" + styles = [] + if fg: + styles.append(f"color: rgb({fg[0]},{fg[1]},{fg[2]})") + if bg: + styles.append(f"background-color: rgb({bg[0]},{bg[1]},{bg[2]})") + if bold: + styles.append("font-weight: bold") + if not styles: + return "" + return f' style="{"; ".join(styles)}"' + + +def render_frame_to_html(frame: list[str], frame_number: int = 0) -> str: + """Render a complete frame (list of lines) to HTML.""" + html_lines = [] + for i, line in enumerate(frame): + # Strip ANSI cursor positioning but preserve colors + clean_line = ( + line.replace("\x1b[1;1H", "") + .replace("\x1b[2;1H", "") + .replace("\x1b[3;1H", "") + ) + rendered = render_line_to_html(clean_line) + html_lines.append(f'
{rendered}
') + + return f"""
+
Frame {frame_number} ({len(frame)} lines)
+
+ {"".join(html_lines)} +
+
""" + + +def generate_test_report( + test_name: str, + frames: list[list[str]], + status: str = "PASS", + duration_ms: float = 0.0, + metadata: dict[str, Any] | None = None, +) -> str: + """Generate HTML report for a single test.""" + frames_html = "" + for i, frame in enumerate(frames): + frames_html += render_frame_to_html(frame, i) + + metadata_html = "" + if metadata: + metadata_html = '" + + status_class = "pass" if status == "PASS" else "fail" + + return f""" + + + + {test_name} - Acceptance Test Report + + + +
+
+
{test_name}
+
{status}
+
+ {metadata_html} + {frames_html} + +
+ +""" + + +def save_report( + test_name: str, + frames: list[list[str]], + output_dir: str = "test-reports", + status: str = "PASS", + duration_ms: float = 0.0, + metadata: dict[str, Any] | None = None, +) -> str: + """Save HTML report to disk and return the file path.""" + output_path = Path(output_dir) + output_path.mkdir(parents=True, exist_ok=True) + + # Sanitize test name for filename + safe_name = "".join(c if c.isalnum() or c in "-_" else "_" for c in test_name) + filename = f"{safe_name}.html" + filepath = output_path / filename + + html_content = generate_test_report( + test_name, frames, status, duration_ms, metadata + ) + filepath.write_text(html_content) + + return str(filepath) + + +def save_index_report( + reports: list[dict[str, Any]], + output_dir: str = "test-reports", +) -> str: + """Generate an index HTML page linking to all test reports.""" + output_path = Path(output_dir) + output_path.mkdir(parents=True, exist_ok=True) + + rows = "" + for report in reports: + safe_name = "".join( + c if c.isalnum() or c in "-_" else "_" for c in report["test_name"] + ) + filename = f"{safe_name}.html" + status_class = "pass" if report["status"] == "PASS" else "fail" + rows += f""" + +
{report["test_name"]} + {report["status"]} + {report.get("duration_ms", 0):.1f}ms + {report.get("frame_count", 0)} + + """ + + html = f""" + + + + Acceptance Test Reports + + + +

Acceptance Test Reports

+ + + + + + + + + + + {rows} + +
TestStatusDurationFrames
+ +""" + + index_path = output_path / "index.html" + index_path.write_text(html) + return str(index_path) diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..b664a7e --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,36 @@ +""" +Pytest configuration for mainline. +""" + +import pytest + + +def pytest_configure(config): + """Configure pytest to skip integration tests by default.""" + config.addinivalue_line( + "markers", + "integration: marks tests as integration tests (require external services)", + ) + config.addinivalue_line("markers", "ntfy: marks tests that require ntfy service") + + +def pytest_collection_modifyitems(config, items): + """Skip integration/e2e tests unless explicitly requested with -m.""" + # Get the current marker expression + marker_expr = config.getoption("-m", default="") + + # If explicitly running integration or e2e, don't skip them + if marker_expr in ("integration", "e2e", "integration or e2e"): + return + + # Skip integration tests + skip_integration = pytest.mark.skip(reason="need -m integration to run") + for item in items: + if "integration" in item.keywords: + item.add_marker(skip_integration) + + # Skip e2e tests by default (they require browser/display) + skip_e2e = pytest.mark.skip(reason="need -m e2e to run") + for item in items: + if "e2e" in item.keywords and "integration" not in item.keywords: + item.add_marker(skip_e2e) diff --git a/tests/e2e/test_web_client.py b/tests/e2e/test_web_client.py new file mode 100644 index 0000000..daf4efb --- /dev/null +++ b/tests/e2e/test_web_client.py @@ -0,0 +1,133 @@ +""" +End-to-end tests for web client with headless browser. +""" + +import os +import socketserver +import threading +from http.server import HTTPServer, SimpleHTTPRequestHandler +from pathlib import Path + +import pytest + +CLIENT_DIR = Path(__file__).parent.parent.parent / "client" + + +class ThreadedHTTPServer(socketserver.ThreadingMixIn, HTTPServer): + """Threaded HTTP server for handling concurrent requests.""" + + daemon_threads = True + + +@pytest.fixture(scope="module") +def http_server(): + """Start a local HTTP server for the client.""" + os.chdir(CLIENT_DIR) + + handler = SimpleHTTPRequestHandler + server = ThreadedHTTPServer(("127.0.0.1", 0), handler) + port = server.server_address[1] + + thread = threading.Thread(target=server.serve_forever, daemon=True) + thread.start() + + yield f"http://127.0.0.1:{port}" + + server.shutdown() + + +class TestWebClient: + """Tests for the web client using Playwright.""" + + @pytest.fixture(autouse=True) + def setup_browser(self): + """Set up browser for tests.""" + pytest.importorskip("playwright") + from playwright.sync_api import sync_playwright + + self.playwright = sync_playwright().start() + self.browser = self.playwright.chromium.launch(headless=True) + self.context = self.browser.new_context() + self.page = self.context.new_page() + + yield + + self.page.close() + self.context.close() + self.browser.close() + self.playwright.stop() + + def test_client_loads(self, http_server): + """Web client loads without errors.""" + response = self.page.goto(http_server) + assert response.status == 200, f"Page load failed with status {response.status}" + + self.page.wait_for_load_state("domcontentloaded") + + content = self.page.content() + assert " 0, "Canvas not found" + + def test_status_shows_connecting(self, http_server): + """Status shows connecting initially.""" + self.page.goto(http_server) + self.page.wait_for_load_state("domcontentloaded") + + status = self.page.locator("#status") + assert status.count() > 0, "Status element not found" + + def test_canvas_has_dimensions(self, http_server): + """Canvas has correct dimensions after load.""" + self.page.goto(http_server) + self.page.wait_for_load_state("domcontentloaded") + + canvas = self.page.locator("#terminal") + assert canvas.count() > 0, "Canvas not found" + + def test_no_console_errors_on_load(self, http_server): + """No JavaScript errors on page load (websocket errors are expected without server).""" + js_errors = [] + + def handle_console(msg): + if msg.type == "error": + text = msg.text + if "WebSocket" not in text: + js_errors.append(text) + + self.page.on("console", handle_console) + self.page.goto(http_server) + self.page.wait_for_load_state("domcontentloaded") + + assert len(js_errors) == 0, f"JavaScript errors: {js_errors}" + + +class TestWebClientProtocol: + """Tests for WebSocket protocol handling in client.""" + + @pytest.fixture(autouse=True) + def setup_browser(self): + """Set up browser for tests.""" + pytest.importorskip("playwright") + from playwright.sync_api import sync_playwright + + self.playwright = sync_playwright().start() + self.browser = self.playwright.chromium.launch(headless=True) + self.context = self.browser.new_context() + self.page = self.context.new_page() + + yield + + self.page.close() + self.context.close() + self.browser.close() + self.playwright.stop() + + def test_websocket_reconnection(self, http_server): + """Client attempts reconnection on disconnect.""" + self.page.goto(http_server) + self.page.wait_for_load_state("domcontentloaded") + + status = self.page.locator("#status") + assert status.count() > 0, "Status element not found" diff --git a/tests/fixtures/test.svg b/tests/fixtures/test.svg deleted file mode 100644 index f35f4b3..0000000 --- a/tests/fixtures/test.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/tests/kitty_test.py b/tests/kitty_test.py new file mode 100644 index 0000000..eed1a95 --- /dev/null +++ b/tests/kitty_test.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +"""Test script for Kitty graphics display.""" + +import sys + + +def test_kitty_simple(): + """Test simple Kitty graphics output with embedded PNG.""" + import base64 + + # Minimal 1x1 red pixel PNG (pre-encoded) + # This is a tiny valid PNG with a red pixel + png_red_1x1 = ( + b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00" + b"\x01\x00\x00\x00\x01\x08\x02\x00\x00\x00\x90wS\xde" + b"\x00\x00\x00\x0cIDATx\x9cc\xf8\xcf\xc0\x00\x00\x00" + b"\x03\x00\x01\x00\x05\xfe\xd4\x00\x00\x00\x00IEND\xaeB`\x82" + ) + + encoded = base64.b64encode(png_red_1x1).decode("ascii") + + graphic = f"\x1b_Gf=100,t=d,s=1,v=1,c=1,r=1;{encoded}\x1b\\" + sys.stdout.buffer.write(graphic.encode("utf-8")) + sys.stdout.flush() + + print("\n[If you see a red dot above, Kitty graphics is working!]") + print("[If you see nothing or garbage, it's not working]") + + +if __name__ == "__main__": + test_kitty_simple() diff --git a/tests/test_acceptance.py b/tests/test_acceptance.py new file mode 100644 index 0000000..a94c9ca --- /dev/null +++ b/tests/test_acceptance.py @@ -0,0 +1,290 @@ +""" +Acceptance tests for HUD visibility and positioning. + +These tests verify that HUD appears in the final output frame. +Frames are captured and saved as HTML reports for visual verification. +""" + +import queue + +from engine.data_sources.sources import ListDataSource, SourceItem +from engine.effects.plugins.hud import HudEffect +from engine.pipeline import Pipeline, PipelineConfig +from engine.pipeline.adapters import ( + DataSourceStage, + DisplayStage, + EffectPluginStage, + SourceItemsToBufferStage, +) +from engine.pipeline.core import PipelineContext +from engine.pipeline.params import PipelineParams +from tests.acceptance_report import save_report + + +class FrameCaptureDisplay: + """Display that captures frames for HTML report generation.""" + + def __init__(self): + self.frames: queue.Queue[list[str]] = queue.Queue() + self.width = 80 + self.height = 24 + self._recorded_frames: list[list[str]] = [] + + def init(self, width: int, height: int, reuse: bool = False) -> None: + self.width = width + self.height = height + + def show(self, buffer: list[str], border: bool = False) -> None: + self._recorded_frames.append(list(buffer)) + self.frames.put(list(buffer)) + + def clear(self) -> None: + pass + + def cleanup(self) -> None: + pass + + def get_dimensions(self) -> tuple[int, int]: + return (self.width, self.height) + + def get_recorded_frames(self) -> list[list[str]]: + return self._recorded_frames + + +def _build_pipeline_with_hud( + items: list[SourceItem], +) -> tuple[Pipeline, FrameCaptureDisplay, PipelineContext]: + """Build a pipeline with HUD effect.""" + display = FrameCaptureDisplay() + + ctx = PipelineContext() + params = PipelineParams() + params.viewport_width = display.width + params.viewport_height = display.height + params.frame_number = 0 + params.effect_order = ["noise", "hud"] + params.effect_enabled = {"noise": False} + ctx.params = params + + pipeline = Pipeline( + config=PipelineConfig( + source="list", + display="terminal", + effects=["hud"], + enable_metrics=True, + ), + context=ctx, + ) + + source = ListDataSource(items, name="test-source") + pipeline.add_stage("source", DataSourceStage(source, name="test-source")) + pipeline.add_stage("render", SourceItemsToBufferStage(name="items-to-buffer")) + + hud_effect = HudEffect() + pipeline.add_stage("hud", EffectPluginStage(hud_effect, name="hud")) + + pipeline.add_stage("display", DisplayStage(display, name="terminal")) + + pipeline.build() + pipeline.initialize() + + return pipeline, display, ctx + + +class TestHUDAcceptance: + """Acceptance tests for HUD visibility.""" + + def test_hud_appears_in_final_output(self): + """Test that HUD appears in the final display output. + + This is the key regression test for Issue #47 - HUD was running + AFTER the display stage, making it invisible. Now it should appear + in the frame captured by the display. + """ + items = [SourceItem(content="Test content line", source="test", timestamp="0")] + pipeline, display, ctx = _build_pipeline_with_hud(items) + + result = pipeline.execute(items) + assert result.success, f"Pipeline execution failed: {result.error}" + + frame = display.frames.get(timeout=1) + frame_text = "\n".join(frame) + + assert "MAINLINE" in frame_text, "HUD header not found in final output" + assert "EFFECT:" in frame_text, "EFFECT line not found in final output" + assert "PIPELINE:" in frame_text, "PIPELINE line not found in final output" + + save_report( + test_name="test_hud_appears_in_final_output", + frames=display.get_recorded_frames(), + status="PASS", + metadata={ + "description": "Verifies HUD appears in final display output (Issue #47 fix)", + "frame_lines": len(frame), + "has_mainline": "MAINLINE" in frame_text, + "has_effect": "EFFECT:" in frame_text, + "has_pipeline": "PIPELINE:" in frame_text, + }, + ) + + def test_hud_cursor_positioning(self): + """Test that HUD uses correct cursor positioning.""" + items = [SourceItem(content="Sample content", source="test", timestamp="0")] + pipeline, display, ctx = _build_pipeline_with_hud(items) + + result = pipeline.execute(items) + assert result.success + + frame = display.frames.get(timeout=1) + has_cursor_pos = any("\x1b[" in line and "H" in line for line in frame) + + save_report( + test_name="test_hud_cursor_positioning", + frames=display.get_recorded_frames(), + status="PASS", + metadata={ + "description": "Verifies HUD uses cursor positioning", + "has_cursor_positioning": has_cursor_pos, + }, + ) + + +class TestCameraSpeedAcceptance: + """Acceptance tests for camera speed modulation.""" + + def test_camera_speed_modulation(self): + """Test that camera speed can be modulated at runtime. + + This verifies the camera speed modulation feature added in Phase 1. + """ + from engine.camera import Camera + from engine.pipeline.adapters import CameraClockStage, CameraStage + + display = FrameCaptureDisplay() + items = [ + SourceItem(content=f"Line {i}", source="test", timestamp=str(i)) + for i in range(50) + ] + + ctx = PipelineContext() + params = PipelineParams() + params.viewport_width = display.width + params.viewport_height = display.height + params.frame_number = 0 + params.camera_speed = 1.0 + ctx.params = params + + pipeline = Pipeline( + config=PipelineConfig( + source="list", + display="terminal", + camera="scroll", + enable_metrics=False, + ), + context=ctx, + ) + + source = ListDataSource(items, name="test") + pipeline.add_stage("source", DataSourceStage(source, name="test")) + pipeline.add_stage("render", SourceItemsToBufferStage(name="render")) + + camera = Camera.scroll(speed=0.5) + pipeline.add_stage( + "camera_update", CameraClockStage(camera, name="camera-clock") + ) + pipeline.add_stage("camera", CameraStage(camera, name="camera")) + pipeline.add_stage("display", DisplayStage(display, name="terminal")) + + pipeline.build() + pipeline.initialize() + + initial_camera_speed = camera.speed + + for _ in range(3): + pipeline.execute(items) + + speed_after_first_run = camera.speed + + params.camera_speed = 5.0 + ctx.params = params + + for _ in range(3): + pipeline.execute(items) + + speed_after_increase = camera.speed + + assert speed_after_increase == 5.0, ( + f"Camera speed should be modulated to 5.0, got {speed_after_increase}" + ) + + params.camera_speed = 0.0 + ctx.params = params + + for _ in range(3): + pipeline.execute(items) + + speed_after_stop = camera.speed + assert speed_after_stop == 0.0, ( + f"Camera speed should be 0.0, got {speed_after_stop}" + ) + + save_report( + test_name="test_camera_speed_modulation", + frames=display.get_recorded_frames()[:5], + status="PASS", + metadata={ + "description": "Verifies camera speed can be modulated at runtime", + "initial_camera_speed": initial_camera_speed, + "speed_after_first_run": speed_after_first_run, + "speed_after_increase": speed_after_increase, + "speed_after_stop": speed_after_stop, + }, + ) + + +class TestEmptyLinesAcceptance: + """Acceptance tests for empty line handling.""" + + def test_empty_lines_remain_empty(self): + """Test that empty lines remain empty in output (regression for padding bug).""" + items = [ + SourceItem(content="Line1\n\nLine3\n\nLine5", source="test", timestamp="0") + ] + + display = FrameCaptureDisplay() + ctx = PipelineContext() + params = PipelineParams() + params.viewport_width = display.width + params.viewport_height = display.height + ctx.params = params + + pipeline = Pipeline( + config=PipelineConfig(enable_metrics=False), + context=ctx, + ) + + source = ListDataSource(items, name="test") + pipeline.add_stage("source", DataSourceStage(source, name="test")) + pipeline.add_stage("render", SourceItemsToBufferStage(name="render")) + pipeline.add_stage("display", DisplayStage(display, name="terminal")) + + pipeline.build() + pipeline.initialize() + + result = pipeline.execute(items) + assert result.success + + frame = display.frames.get(timeout=1) + has_truly_empty = any(not line for line in frame) + + save_report( + test_name="test_empty_lines_remain_empty", + frames=display.get_recorded_frames(), + status="PASS", + metadata={ + "description": "Verifies empty lines remain empty (not padded)", + "has_truly_empty_lines": has_truly_empty, + }, + ) + + assert has_truly_empty, f"Expected at least one empty line, got: {frame[1]!r}" diff --git a/tests/test_adapters.py b/tests/test_adapters.py new file mode 100644 index 0000000..3bd7024 --- /dev/null +++ b/tests/test_adapters.py @@ -0,0 +1,345 @@ +""" +Tests for engine/pipeline/adapters.py - Stage adapters for the pipeline. + +Tests Stage adapters that bridge existing components to the Stage interface: +- DataSourceStage: Wraps DataSource objects +- DisplayStage: Wraps Display backends +- PassthroughStage: Simple pass-through stage for pre-rendered data +- SourceItemsToBufferStage: Converts SourceItem objects to text buffers +- EffectPluginStage: Wraps effect plugins +""" + +from unittest.mock import MagicMock + +from engine.data_sources.sources import SourceItem +from engine.pipeline.adapters import ( + DataSourceStage, + DisplayStage, + EffectPluginStage, + PassthroughStage, + SourceItemsToBufferStage, +) +from engine.pipeline.core import PipelineContext + + +class TestDataSourceStage: + """Test DataSourceStage adapter.""" + + def test_datasource_stage_name(self): + """DataSourceStage stores name correctly.""" + mock_source = MagicMock() + stage = DataSourceStage(mock_source, name="headlines") + assert stage.name == "headlines" + + def test_datasource_stage_category(self): + """DataSourceStage has 'source' category.""" + mock_source = MagicMock() + stage = DataSourceStage(mock_source, name="headlines") + assert stage.category == "source" + + def test_datasource_stage_capabilities(self): + """DataSourceStage advertises source capability.""" + mock_source = MagicMock() + stage = DataSourceStage(mock_source, name="headlines") + assert "source.headlines" in stage.capabilities + + def test_datasource_stage_dependencies(self): + """DataSourceStage has no dependencies.""" + mock_source = MagicMock() + stage = DataSourceStage(mock_source, name="headlines") + assert stage.dependencies == set() + + def test_datasource_stage_process_calls_get_items(self): + """DataSourceStage.process() calls source.get_items().""" + mock_items = [ + SourceItem(content="Item 1", source="headlines", timestamp="12:00"), + ] + mock_source = MagicMock() + mock_source.get_items.return_value = mock_items + + stage = DataSourceStage(mock_source, name="headlines") + ctx = PipelineContext() + result = stage.process(None, ctx) + + assert result == mock_items + mock_source.get_items.assert_called_once() + + def test_datasource_stage_process_fallback_returns_data(self): + """DataSourceStage.process() returns data if no get_items method.""" + mock_source = MagicMock(spec=[]) # No get_items method + stage = DataSourceStage(mock_source, name="headlines") + ctx = PipelineContext() + test_data = [{"content": "test"}] + + result = stage.process(test_data, ctx) + assert result == test_data + + +class TestDisplayStage: + """Test DisplayStage adapter.""" + + def test_display_stage_name(self): + """DisplayStage stores name correctly.""" + mock_display = MagicMock() + stage = DisplayStage(mock_display, name="terminal") + assert stage.name == "terminal" + + def test_display_stage_category(self): + """DisplayStage has 'display' category.""" + mock_display = MagicMock() + stage = DisplayStage(mock_display, name="terminal") + assert stage.category == "display" + + def test_display_stage_capabilities(self): + """DisplayStage advertises display capability.""" + mock_display = MagicMock() + stage = DisplayStage(mock_display, name="terminal") + assert "display.output" in stage.capabilities + + def test_display_stage_dependencies(self): + """DisplayStage depends on render.output.""" + mock_display = MagicMock() + stage = DisplayStage(mock_display, name="terminal") + assert "render.output" in stage.dependencies + + def test_display_stage_init(self): + """DisplayStage.init() calls display.init() with dimensions.""" + mock_display = MagicMock() + mock_display.init.return_value = True + stage = DisplayStage(mock_display, name="terminal") + + ctx = PipelineContext() + ctx.params = MagicMock() + ctx.params.viewport_width = 100 + ctx.params.viewport_height = 30 + + result = stage.init(ctx) + + assert result is True + mock_display.init.assert_called_once_with(100, 30, reuse=False) + + def test_display_stage_init_uses_defaults(self): + """DisplayStage.init() uses defaults when params missing.""" + mock_display = MagicMock() + mock_display.init.return_value = True + stage = DisplayStage(mock_display, name="terminal") + + ctx = PipelineContext() + ctx.params = None + + result = stage.init(ctx) + + assert result is True + mock_display.init.assert_called_once_with(80, 24, reuse=False) + + def test_display_stage_process_calls_show(self): + """DisplayStage.process() calls display.show() with data.""" + mock_display = MagicMock() + stage = DisplayStage(mock_display, name="terminal") + + test_buffer = [[["A", "red"] for _ in range(80)] for _ in range(24)] + ctx = PipelineContext() + result = stage.process(test_buffer, ctx) + + assert result == test_buffer + mock_display.show.assert_called_once_with(test_buffer) + + def test_display_stage_process_skips_none_data(self): + """DisplayStage.process() skips show() if data is None.""" + mock_display = MagicMock() + stage = DisplayStage(mock_display, name="terminal") + + ctx = PipelineContext() + result = stage.process(None, ctx) + + assert result is None + mock_display.show.assert_not_called() + + def test_display_stage_cleanup(self): + """DisplayStage.cleanup() calls display.cleanup().""" + mock_display = MagicMock() + stage = DisplayStage(mock_display, name="terminal") + + stage.cleanup() + + mock_display.cleanup.assert_called_once() + + +class TestPassthroughStage: + """Test PassthroughStage adapter.""" + + def test_passthrough_stage_name(self): + """PassthroughStage stores name correctly.""" + stage = PassthroughStage(name="test") + assert stage.name == "test" + + def test_passthrough_stage_category(self): + """PassthroughStage has 'render' category.""" + stage = PassthroughStage() + assert stage.category == "render" + + def test_passthrough_stage_is_optional(self): + """PassthroughStage is optional.""" + stage = PassthroughStage() + assert stage.optional is True + + def test_passthrough_stage_capabilities(self): + """PassthroughStage advertises render output capability.""" + stage = PassthroughStage() + assert "render.output" in stage.capabilities + + def test_passthrough_stage_dependencies(self): + """PassthroughStage depends on source.""" + stage = PassthroughStage() + assert "source" in stage.dependencies + + def test_passthrough_stage_process_returns_data_unchanged(self): + """PassthroughStage.process() returns data unchanged.""" + stage = PassthroughStage() + ctx = PipelineContext() + + test_data = [ + SourceItem(content="Line 1", source="test", timestamp="12:00"), + ] + result = stage.process(test_data, ctx) + + assert result == test_data + assert result is test_data + + +class TestSourceItemsToBufferStage: + """Test SourceItemsToBufferStage adapter.""" + + def test_source_items_to_buffer_stage_name(self): + """SourceItemsToBufferStage stores name correctly.""" + stage = SourceItemsToBufferStage(name="custom-name") + assert stage.name == "custom-name" + + def test_source_items_to_buffer_stage_category(self): + """SourceItemsToBufferStage has 'render' category.""" + stage = SourceItemsToBufferStage() + assert stage.category == "render" + + def test_source_items_to_buffer_stage_is_optional(self): + """SourceItemsToBufferStage is optional.""" + stage = SourceItemsToBufferStage() + assert stage.optional is True + + def test_source_items_to_buffer_stage_capabilities(self): + """SourceItemsToBufferStage advertises render output capability.""" + stage = SourceItemsToBufferStage() + assert "render.output" in stage.capabilities + + def test_source_items_to_buffer_stage_dependencies(self): + """SourceItemsToBufferStage depends on source.""" + stage = SourceItemsToBufferStage() + assert "source" in stage.dependencies + + def test_source_items_to_buffer_stage_process_single_line_item(self): + """SourceItemsToBufferStage converts single-line SourceItem.""" + stage = SourceItemsToBufferStage() + ctx = PipelineContext() + + items = [ + SourceItem(content="Single line content", source="test", timestamp="12:00"), + ] + result = stage.process(items, ctx) + + assert isinstance(result, list) + assert len(result) >= 1 + # Result should be lines of text + assert all(isinstance(line, str) for line in result) + + def test_source_items_to_buffer_stage_process_multiline_item(self): + """SourceItemsToBufferStage splits multiline SourceItem content.""" + stage = SourceItemsToBufferStage() + ctx = PipelineContext() + + content = "Line 1\nLine 2\nLine 3" + items = [ + SourceItem(content=content, source="test", timestamp="12:00"), + ] + result = stage.process(items, ctx) + + # Should have at least 3 lines + assert len(result) >= 3 + assert all(isinstance(line, str) for line in result) + + def test_source_items_to_buffer_stage_process_multiple_items(self): + """SourceItemsToBufferStage handles multiple SourceItems.""" + stage = SourceItemsToBufferStage() + ctx = PipelineContext() + + items = [ + SourceItem(content="Item 1", source="test", timestamp="12:00"), + SourceItem(content="Item 2", source="test", timestamp="12:01"), + SourceItem(content="Item 3", source="test", timestamp="12:02"), + ] + result = stage.process(items, ctx) + + # Should have at least 3 lines (one per item, possibly more) + assert len(result) >= 3 + assert all(isinstance(line, str) for line in result) + + +class TestEffectPluginStage: + """Test EffectPluginStage adapter.""" + + def test_effect_plugin_stage_name(self): + """EffectPluginStage stores name correctly.""" + mock_effect = MagicMock() + stage = EffectPluginStage(mock_effect, name="blur") + assert stage.name == "blur" + + def test_effect_plugin_stage_category(self): + """EffectPluginStage has 'effect' category.""" + mock_effect = MagicMock() + stage = EffectPluginStage(mock_effect, name="blur") + assert stage.category == "effect" + + def test_effect_plugin_stage_is_not_optional(self): + """EffectPluginStage is required when configured.""" + mock_effect = MagicMock() + stage = EffectPluginStage(mock_effect, name="blur") + assert stage.optional is False + + def test_effect_plugin_stage_capabilities(self): + """EffectPluginStage advertises effect capability with name.""" + mock_effect = MagicMock() + stage = EffectPluginStage(mock_effect, name="blur") + assert "effect.blur" in stage.capabilities + + def test_effect_plugin_stage_dependencies(self): + """EffectPluginStage has no static dependencies.""" + mock_effect = MagicMock() + stage = EffectPluginStage(mock_effect, name="blur") + # EffectPluginStage has empty dependencies - they are resolved dynamically + assert stage.dependencies == set() + + def test_effect_plugin_stage_stage_type(self): + """EffectPluginStage.stage_type returns effect for non-HUD.""" + mock_effect = MagicMock() + stage = EffectPluginStage(mock_effect, name="blur") + assert stage.stage_type == "effect" + + def test_effect_plugin_stage_hud_special_handling(self): + """EffectPluginStage has special handling for HUD effect.""" + mock_effect = MagicMock() + stage = EffectPluginStage(mock_effect, name="hud") + assert stage.stage_type == "overlay" + assert stage.is_overlay is True + assert stage.render_order == 100 + + def test_effect_plugin_stage_process(self): + """EffectPluginStage.process() calls effect.process().""" + mock_effect = MagicMock() + mock_effect.process.return_value = "processed_data" + + stage = EffectPluginStage(mock_effect, name="blur") + ctx = PipelineContext() + test_buffer = "test_buffer" + + result = stage.process(test_buffer, ctx) + + assert result == "processed_data" + mock_effect.process.assert_called_once() diff --git a/tests/test_app.py b/tests/test_app.py new file mode 100644 index 0000000..acc94c6 --- /dev/null +++ b/tests/test_app.py @@ -0,0 +1,215 @@ +""" +Integration tests for engine/app.py - pipeline orchestration. + +Tests the main entry point and pipeline mode initialization. +""" + +import sys +from unittest.mock import Mock, patch + +import pytest + +from engine.app import main, run_pipeline_mode +from engine.pipeline import get_preset + + +class TestMain: + """Test main() entry point.""" + + def test_main_calls_run_pipeline_mode_with_default_preset(self): + """main() runs default preset (demo) when no args provided.""" + with patch("engine.app.main.run_pipeline_mode") as mock_run: + sys.argv = ["mainline.py"] + main() + mock_run.assert_called_once_with("demo") + + def test_main_calls_run_pipeline_mode_with_config_preset(self): + """main() uses PRESET from config if set.""" + with ( + patch("engine.config.PIPELINE_DIAGRAM", False), + patch("engine.config.PRESET", "demo"), + patch("engine.config.PIPELINE_MODE", False), + patch("engine.app.main.run_pipeline_mode") as mock_run, + ): + sys.argv = ["mainline.py"] + main() + mock_run.assert_called_once_with("demo") + + def test_main_exits_on_unknown_preset(self): + """main() exits with error for unknown preset.""" + with ( + patch("engine.config.PIPELINE_DIAGRAM", False), + patch("engine.config.PRESET", "nonexistent"), + patch("engine.config.PIPELINE_MODE", False), + patch("engine.pipeline.list_presets", return_value=["demo", "poetry"]), + ): + sys.argv = ["mainline.py"] + with pytest.raises(SystemExit) as exc_info: + main() + assert exc_info.value.code == 1 + + +class TestRunPipelineMode: + """Test run_pipeline_mode() initialization.""" + + def test_run_pipeline_mode_loads_valid_preset(self): + """run_pipeline_mode() loads a valid preset.""" + preset = get_preset("demo") + assert preset is not None + assert preset.name == "demo" + assert preset.source == "headlines" + + def test_run_pipeline_mode_exits_on_invalid_preset(self): + """run_pipeline_mode() exits if preset not found.""" + with pytest.raises(SystemExit) as exc_info: + run_pipeline_mode("invalid-preset-xyz") + assert exc_info.value.code == 1 + + def test_run_pipeline_mode_exits_when_no_content_available(self): + """run_pipeline_mode() exits if no content can be fetched.""" + with ( + patch("engine.app.pipeline_runner.load_cache", return_value=None), + patch("engine.app.pipeline_runner.fetch_all_fast", return_value=[]), + patch( + "engine.app.pipeline_runner.fetch_all", return_value=([], None, None) + ), # Mock background thread + patch("engine.app.pipeline_runner.save_cache"), # Prevent disk I/O + patch("engine.effects.plugins.discover_plugins"), + pytest.raises(SystemExit) as exc_info, + ): + run_pipeline_mode("demo") + assert exc_info.value.code == 1 + + def test_run_pipeline_mode_uses_cache_over_fetch(self): + """run_pipeline_mode() uses cached content if available.""" + cached = ["cached_item"] + with ( + patch( + "engine.app.pipeline_runner.load_cache", return_value=cached + ) as mock_load, + patch("engine.app.pipeline_runner.fetch_all") as mock_fetch, + patch("engine.app.pipeline_runner.fetch_all_fast"), + patch("engine.app.pipeline_runner.DisplayRegistry.create") as mock_create, + ): + mock_display = Mock() + mock_display.init = Mock() + mock_display.get_dimensions = Mock(return_value=(80, 24)) + mock_display.is_quit_requested = Mock(return_value=True) + mock_display.clear_quit_request = Mock() + mock_display.show = Mock() + mock_display.cleanup = Mock() + mock_create.return_value = mock_display + + try: + run_pipeline_mode("demo") + except (KeyboardInterrupt, SystemExit): + pass + + # Verify fetch_all was NOT called (cache was used) + mock_fetch.assert_not_called() + mock_load.assert_called_once() + + def test_run_pipeline_mode_creates_display(self): + """run_pipeline_mode() creates a display backend.""" + with ( + patch("engine.app.pipeline_runner.load_cache", return_value=["item"]), + patch("engine.app.pipeline_runner.fetch_all_fast", return_value=[]), + patch("engine.app.DisplayRegistry.create") as mock_create, + ): + mock_display = Mock() + mock_display.init = Mock() + mock_display.get_dimensions = Mock(return_value=(80, 24)) + mock_display.is_quit_requested = Mock(return_value=True) + mock_display.clear_quit_request = Mock() + mock_display.show = Mock() + mock_display.cleanup = Mock() + mock_create.return_value = mock_display + + try: + run_pipeline_mode("demo-base") + except (KeyboardInterrupt, SystemExit): + pass + + # Verify display was created with 'terminal' (preset display) + mock_create.assert_called_once_with("terminal") + + def test_run_pipeline_mode_respects_display_cli_flag(self): + """run_pipeline_mode() uses --display CLI flag if provided.""" + sys.argv = ["mainline.py", "--display", "websocket"] + + with ( + patch("engine.app.pipeline_runner.load_cache", return_value=["item"]), + patch("engine.app.pipeline_runner.fetch_all_fast", return_value=[]), + patch("engine.app.DisplayRegistry.create") as mock_create, + ): + mock_display = Mock() + mock_display.init = Mock() + mock_display.get_dimensions = Mock(return_value=(80, 24)) + mock_display.is_quit_requested = Mock(return_value=True) + mock_display.clear_quit_request = Mock() + mock_display.show = Mock() + mock_display.cleanup = Mock() + mock_create.return_value = mock_display + + try: + run_pipeline_mode("demo") + except (KeyboardInterrupt, SystemExit): + pass + + # Verify display was created with CLI override + mock_create.assert_called_once_with("websocket") + + def test_run_pipeline_mode_fetches_poetry_for_poetry_source(self): + """run_pipeline_mode() fetches poetry for poetry preset.""" + with ( + patch("engine.app.pipeline_runner.load_cache", return_value=None), + patch( + "engine.app.pipeline_runner.fetch_poetry", + return_value=(["poem"], None, None), + ) as mock_fetch_poetry, + patch("engine.app.pipeline_runner.fetch_all") as mock_fetch_all, + patch("engine.app.pipeline_runner.fetch_all_fast", return_value=[]), + patch("engine.app.pipeline_runner.DisplayRegistry.create") as mock_create, + ): + mock_display = Mock() + mock_display.init = Mock() + mock_display.get_dimensions = Mock(return_value=(80, 24)) + mock_display.is_quit_requested = Mock(return_value=True) + mock_display.clear_quit_request = Mock() + mock_display.show = Mock() + mock_display.cleanup = Mock() + mock_create.return_value = mock_display + + try: + run_pipeline_mode("poetry") + except (KeyboardInterrupt, SystemExit): + pass + + # Verify fetch_poetry was called, not fetch_all + mock_fetch_poetry.assert_called_once() + mock_fetch_all.assert_not_called() + + def test_run_pipeline_mode_discovers_effect_plugins(self): + """run_pipeline_mode() discovers available effect plugins.""" + with ( + patch("engine.app.pipeline_runner.load_cache", return_value=["item"]), + patch("engine.app.pipeline_runner.fetch_all_fast", return_value=[]), + patch("engine.effects.plugins.discover_plugins") as mock_discover, + patch("engine.app.pipeline_runner.DisplayRegistry.create") as mock_create, + ): + mock_display = Mock() + mock_display.init = Mock() + mock_display.get_dimensions = Mock(return_value=(80, 24)) + mock_display.is_quit_requested = Mock(return_value=True) + mock_display.clear_quit_request = Mock() + mock_display.show = Mock() + mock_display.cleanup = Mock() + mock_create.return_value = mock_display + + try: + run_pipeline_mode("demo") + except (KeyboardInterrupt, SystemExit): + pass + + # Verify effects_plugins.discover_plugins was called + mock_discover.assert_called_once() diff --git a/tests/test_benchmark.py b/tests/test_benchmark.py new file mode 100644 index 0000000..ba37f12 --- /dev/null +++ b/tests/test_benchmark.py @@ -0,0 +1,380 @@ +""" +Tests for engine.benchmark module - performance regression tests. +""" + +import os +from unittest.mock import patch + +import pytest + +from engine.display import MultiDisplay, NullDisplay, TerminalDisplay +from engine.effects import EffectContext, get_registry +from engine.effects.plugins import discover_plugins + + +def _is_coverage_active(): + """Check if coverage is active.""" + # Check if coverage module is loaded + import sys + + return "coverage" in sys.modules or "cov" in sys.modules + + +def _get_min_fps_threshold(base_threshold: int) -> int: + """ + Get minimum FPS threshold adjusted for coverage mode. + + Coverage instrumentation typically slows execution by 2-5x. + We adjust thresholds accordingly to avoid false positives. + """ + if _is_coverage_active(): + # Coverage typically slows execution by 2-5x + # Use a more conservative threshold (25% of original to account for higher overhead) + return max(500, int(base_threshold * 0.25)) + return base_threshold + + +def _get_iterations() -> int: + """Get number of iterations for benchmarks.""" + # Check for environment variable override + env_iterations = os.environ.get("BENCHMARK_ITERATIONS") + if env_iterations: + try: + return int(env_iterations) + except ValueError: + pass + + # Default based on coverage mode + if _is_coverage_active(): + return 100 # Fewer iterations when coverage is active + return 500 # Default iterations + + +class TestBenchmarkNullDisplay: + """Performance tests for NullDisplay - regression tests.""" + + @pytest.mark.benchmark + def test_null_display_minimum_fps(self): + """NullDisplay should meet minimum performance threshold.""" + import time + + display = NullDisplay() + display.init(80, 24) + buffer = ["x" * 80 for _ in range(24)] + + iterations = _get_iterations() + start = time.perf_counter() + for _ in range(iterations): + display.show(buffer) + elapsed = time.perf_counter() - start + + fps = iterations / elapsed + min_fps = _get_min_fps_threshold(20000) + + assert fps >= min_fps, f"NullDisplay FPS {fps:.0f} below minimum {min_fps}" + + @pytest.mark.benchmark + def test_effects_minimum_throughput(self): + """Effects should meet minimum processing throughput.""" + import time + + from engine.effects import EffectContext, get_registry + from engine.effects.plugins import discover_plugins + + discover_plugins() + registry = get_registry() + effect = registry.get("noise") + assert effect is not None, "Noise effect should be registered" + + buffer = ["x" * 80 for _ in range(24)] + ctx = EffectContext( + terminal_width=80, + terminal_height=24, + scroll_cam=0, + ticker_height=20, + mic_excess=0.0, + grad_offset=0.0, + frame_number=0, + has_message=False, + ) + + iterations = _get_iterations() + start = time.perf_counter() + for _ in range(iterations): + effect.process(buffer, ctx) + elapsed = time.perf_counter() - start + + fps = iterations / elapsed + min_fps = _get_min_fps_threshold(10000) + + assert fps >= min_fps, ( + f"Effect processing FPS {fps:.0f} below minimum {min_fps}" + ) + + +class TestBenchmarkWebSocketDisplay: + """Performance tests for WebSocketDisplay.""" + + @pytest.mark.benchmark + def test_websocket_display_minimum_fps(self): + """WebSocketDisplay should meet minimum performance threshold.""" + import time + + with patch("engine.display.backends.websocket.websockets", None): + from engine.display import WebSocketDisplay + + display = WebSocketDisplay() + display.init(80, 24) + buffer = ["x" * 80 for _ in range(24)] + + iterations = _get_iterations() + start = time.perf_counter() + for _ in range(iterations): + display.show(buffer) + elapsed = time.perf_counter() - start + + fps = iterations / elapsed + min_fps = _get_min_fps_threshold(10000) + + assert fps >= min_fps, ( + f"WebSocketDisplay FPS {fps:.0f} below minimum {min_fps}" + ) + + +class TestBenchmarkTerminalDisplay: + """Performance tests for TerminalDisplay.""" + + @pytest.mark.benchmark + def test_terminal_display_minimum_fps(self): + """TerminalDisplay should meet minimum performance threshold.""" + import time + + display = TerminalDisplay() + display.init(80, 24) + buffer = ["x" * 80 for _ in range(24)] + + iterations = _get_iterations() + start = time.perf_counter() + for _ in range(iterations): + display.show(buffer) + elapsed = time.perf_counter() - start + + fps = iterations / elapsed + min_fps = _get_min_fps_threshold(10000) + + assert fps >= min_fps, f"TerminalDisplay FPS {fps:.0f} below minimum {min_fps}" + + +class TestBenchmarkMultiDisplay: + """Performance tests for MultiDisplay.""" + + @pytest.mark.benchmark + def test_multi_display_minimum_fps(self): + """MultiDisplay should meet minimum performance threshold.""" + import time + + with patch("engine.display.backends.websocket.websockets", None): + from engine.display import WebSocketDisplay + + null_display = NullDisplay() + null_display.init(80, 24) + ws_display = WebSocketDisplay() + ws_display.init(80, 24) + + display = MultiDisplay([null_display, ws_display]) + display.init(80, 24) + buffer = ["x" * 80 for _ in range(24)] + + iterations = _get_iterations() + start = time.perf_counter() + for _ in range(iterations): + display.show(buffer) + elapsed = time.perf_counter() - start + + fps = iterations / elapsed + min_fps = _get_min_fps_threshold(5000) + + assert fps >= min_fps, f"MultiDisplay FPS {fps:.0f} below minimum {min_fps}" + + +class TestBenchmarkEffects: + """Performance tests for various effects.""" + + @pytest.mark.benchmark + def test_fade_effect_minimum_fps(self): + """Fade effect should meet minimum performance threshold.""" + import time + + discover_plugins() + registry = get_registry() + effect = registry.get("fade") + assert effect is not None, "Fade effect should be registered" + + buffer = ["x" * 80 for _ in range(24)] + ctx = EffectContext( + terminal_width=80, + terminal_height=24, + scroll_cam=0, + ticker_height=20, + mic_excess=0.0, + grad_offset=0.0, + frame_number=0, + has_message=False, + ) + + iterations = _get_iterations() + start = time.perf_counter() + for _ in range(iterations): + effect.process(buffer, ctx) + elapsed = time.perf_counter() - start + + fps = iterations / elapsed + min_fps = _get_min_fps_threshold(7000) + + assert fps >= min_fps, f"Fade effect FPS {fps:.0f} below minimum {min_fps}" + + @pytest.mark.benchmark + def test_glitch_effect_minimum_fps(self): + """Glitch effect should meet minimum performance threshold.""" + import time + + discover_plugins() + registry = get_registry() + effect = registry.get("glitch") + assert effect is not None, "Glitch effect should be registered" + + buffer = ["x" * 80 for _ in range(24)] + ctx = EffectContext( + terminal_width=80, + terminal_height=24, + scroll_cam=0, + ticker_height=20, + mic_excess=0.0, + grad_offset=0.0, + frame_number=0, + has_message=False, + ) + + iterations = _get_iterations() + start = time.perf_counter() + for _ in range(iterations): + effect.process(buffer, ctx) + elapsed = time.perf_counter() - start + + fps = iterations / elapsed + min_fps = _get_min_fps_threshold(5000) + + assert fps >= min_fps, f"Glitch effect FPS {fps:.0f} below minimum {min_fps}" + + @pytest.mark.benchmark + def test_border_effect_minimum_fps(self): + """Border effect should meet minimum performance threshold.""" + import time + + discover_plugins() + registry = get_registry() + effect = registry.get("border") + assert effect is not None, "Border effect should be registered" + + buffer = ["x" * 80 for _ in range(24)] + ctx = EffectContext( + terminal_width=80, + terminal_height=24, + scroll_cam=0, + ticker_height=20, + mic_excess=0.0, + grad_offset=0.0, + frame_number=0, + has_message=False, + ) + + iterations = _get_iterations() + start = time.perf_counter() + for _ in range(iterations): + effect.process(buffer, ctx) + elapsed = time.perf_counter() - start + + fps = iterations / elapsed + min_fps = _get_min_fps_threshold(5000) + + assert fps >= min_fps, f"Border effect FPS {fps:.0f} below minimum {min_fps}" + + @pytest.mark.benchmark + def test_tint_effect_minimum_fps(self): + """Tint effect should meet minimum performance threshold.""" + import time + + discover_plugins() + registry = get_registry() + effect = registry.get("tint") + assert effect is not None, "Tint effect should be registered" + + buffer = ["x" * 80 for _ in range(24)] + ctx = EffectContext( + terminal_width=80, + terminal_height=24, + scroll_cam=0, + ticker_height=20, + mic_excess=0.0, + grad_offset=0.0, + frame_number=0, + has_message=False, + ) + + iterations = _get_iterations() + start = time.perf_counter() + for _ in range(iterations): + effect.process(buffer, ctx) + elapsed = time.perf_counter() - start + + fps = iterations / elapsed + min_fps = _get_min_fps_threshold(8000) + + assert fps >= min_fps, f"Tint effect FPS {fps:.0f} below minimum {min_fps}" + + +class TestBenchmarkPipeline: + """Performance tests for pipeline execution.""" + + @pytest.mark.benchmark + def test_pipeline_execution_minimum_fps(self): + """Pipeline execution should meet minimum performance threshold.""" + import time + + from engine.data_sources.sources import EmptyDataSource + from engine.pipeline import Pipeline, StageRegistry, discover_stages + from engine.pipeline.adapters import DataSourceStage, SourceItemsToBufferStage + + discover_stages() + + # Create a minimal pipeline with empty source to avoid network calls + pipeline = Pipeline() + + # Create empty source directly (not registered in stage registry) + empty_source = EmptyDataSource(width=80, height=24) + source_stage = DataSourceStage(empty_source, name="empty") + + # Add render stage to convert items to text buffer + render_stage = SourceItemsToBufferStage(name="items-to-buffer") + + # Get null display from registry + null_display = StageRegistry.create("display", "null") + assert null_display is not None, "null display should be registered" + + pipeline.add_stage("source", source_stage) + pipeline.add_stage("render", render_stage) + pipeline.add_stage("display", null_display) + pipeline.build() + + iterations = _get_iterations() + start = time.perf_counter() + for _ in range(iterations): + pipeline.execute() + elapsed = time.perf_counter() - start + + fps = iterations / elapsed + min_fps = _get_min_fps_threshold(1000) + + assert fps >= min_fps, ( + f"Pipeline execution FPS {fps:.0f} below minimum {min_fps}" + ) diff --git a/tests/test_border_effect.py b/tests/test_border_effect.py new file mode 100644 index 0000000..c3fb8c7 --- /dev/null +++ b/tests/test_border_effect.py @@ -0,0 +1,111 @@ +""" +Tests for BorderEffect. +""" + +from engine.effects.plugins.border import BorderEffect +from engine.effects.types import EffectContext + + +def make_ctx(terminal_width: int = 80, terminal_height: int = 24) -> EffectContext: + """Create a mock EffectContext.""" + return EffectContext( + terminal_width=terminal_width, + terminal_height=terminal_height, + scroll_cam=0, + ticker_height=terminal_height, + ) + + +class TestBorderEffect: + """Tests for BorderEffect.""" + + def test_basic_init(self): + """BorderEffect initializes with defaults.""" + effect = BorderEffect() + assert effect.name == "border" + assert effect.config.enabled is True + + def test_adds_border(self): + """BorderEffect adds border around content.""" + effect = BorderEffect() + buf = [ + "Hello World", + "Test Content", + "Third Line", + ] + ctx = make_ctx(terminal_width=20, terminal_height=10) + + result = effect.process(buf, ctx) + + # Should have top and bottom borders + assert len(result) >= 3 + # First line should start with border character + assert result[0][0] in "┌┎┍" + # Last line should end with border character + assert result[-1][-1] in "┘┖┚" + + def test_border_with_small_buffer(self): + """BorderEffect handles small buffer (too small for border).""" + effect = BorderEffect() + buf = ["ab"] # Too small for proper border + ctx = make_ctx(terminal_width=10, terminal_height=5) + + result = effect.process(buf, ctx) + + # Should still try to add border but result may differ + # At minimum should have output + assert len(result) >= 1 + + def test_metrics_in_border(self): + """BorderEffect includes FPS and frame time in border.""" + effect = BorderEffect() + buf = ["x" * 10] * 5 + ctx = make_ctx(terminal_width=20, terminal_height=10) + + # Add metrics to context + ctx.set_state( + "metrics", + { + "avg_ms": 16.5, + "frame_count": 100, + "fps": 60.0, + }, + ) + + result = effect.process(buf, ctx) + + # Check for FPS in top border + top_line = result[0] + assert "FPS" in top_line or "60" in top_line + + # Check for frame time in bottom border + bottom_line = result[-1] + assert "ms" in bottom_line or "16" in bottom_line + + def test_no_metrics(self): + """BorderEffect works without metrics.""" + effect = BorderEffect() + buf = ["content"] * 5 + ctx = make_ctx(terminal_width=20, terminal_height=10) + # No metrics set + + result = effect.process(buf, ctx) + + # Should still have border characters + assert len(result) >= 3 + assert result[0][0] in "┌┎┍" + + def test_crops_before_bordering(self): + """BorderEffect crops input before adding border.""" + effect = BorderEffect() + buf = ["x" * 100] * 50 # Very large buffer + ctx = make_ctx(terminal_width=20, terminal_height=10) + + result = effect.process(buf, ctx) + + # Should be cropped to fit, then bordered + # Result should be <= terminal_height with border + assert len(result) <= ctx.terminal_height + # Each line should be <= terminal_width + for line in result: + assert len(line) <= ctx.terminal_width diff --git a/tests/test_camera.py b/tests/test_camera.py new file mode 100644 index 0000000..60c5bb4 --- /dev/null +++ b/tests/test_camera.py @@ -0,0 +1,68 @@ +from engine.camera import Camera, CameraMode + + +def test_camera_vertical_default(): + """Test default vertical camera.""" + cam = Camera() + assert cam.mode == CameraMode.FEED + assert cam.x == 0 + assert cam.y == 0 + + +def test_camera_vertical_factory(): + """Test vertical factory method.""" + cam = Camera.feed(speed=2.0) + assert cam.mode == CameraMode.FEED + assert cam.speed == 2.0 + + +def test_camera_horizontal(): + """Test horizontal camera.""" + cam = Camera.horizontal(speed=1.5) + assert cam.mode == CameraMode.HORIZONTAL + cam.update(1.0) + assert cam.x > 0 + + +def test_camera_omni(): + """Test omnidirectional camera.""" + cam = Camera.omni(speed=1.0) + assert cam.mode == CameraMode.OMNI + cam.update(1.0) + assert cam.x > 0 + assert cam.y > 0 + + +def test_camera_floating(): + """Test floating camera with sinusoidal motion.""" + cam = Camera.floating(speed=1.0) + assert cam.mode == CameraMode.FLOATING + y_before = cam.y + cam.update(0.5) + y_after = cam.y + assert y_before != y_after + + +def test_camera_reset(): + """Test camera reset.""" + cam = Camera.vertical() + cam.update(1.0) + assert cam.y > 0 + cam.reset() + assert cam.x == 0 + assert cam.y == 0 + + +def test_camera_custom_update(): + """Test custom update function.""" + call_count = 0 + + def custom_update(camera, dt): + nonlocal call_count + call_count += 1 + camera.x += int(10 * dt) + + cam = Camera.custom(custom_update) + cam.update(1.0) + assert call_count == 1 + assert cam.x == 10 diff --git a/tests/test_camera_acceptance.py b/tests/test_camera_acceptance.py new file mode 100644 index 0000000..1faa519 --- /dev/null +++ b/tests/test_camera_acceptance.py @@ -0,0 +1,826 @@ +""" +Camera acceptance tests using NullDisplay frame recording and ReplayDisplay. + +Tests all camera modes by: +1. Creating deterministic source data (numbered lines) +2. Running pipeline with small viewport (40x15) +3. Recording frames with NullDisplay +4. Asserting expected viewport content for each mode + +Usage: + pytest tests/test_camera_acceptance.py -v + pytest tests/test_camera_acceptance.py --show-frames -v + +The --show-frames flag displays recorded frames for visual verification. +""" + +import math +import sys +from pathlib import Path + +import pytest + +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from engine.camera import Camera, CameraMode +from engine.display import DisplayRegistry +from engine.effects import get_registry +from engine.pipeline import Pipeline, PipelineConfig, PipelineContext +from engine.pipeline.adapters import ( + CameraClockStage, + CameraStage, + FontStage, + ViewportFilterStage, + create_stage_from_display, + create_stage_from_effect, +) +from engine.pipeline.params import PipelineParams + + +def get_camera_position(pipeline, camera): + """Helper to get camera position directly from the camera object. + + The pipeline context's camera_y/camera_x values may be transformed by + ViewportFilterStage (filtered relative position). This helper gets the + true camera position from the camera object itself. + + Args: + pipeline: The pipeline instance + camera: The camera object + + Returns: + tuple (x, y) of the camera's absolute position + """ + return (camera.x, camera.y) + + +# Register custom CLI option for showing frames +def pytest_addoption(parser): + parser.addoption( + "--show-frames", + action="store_true", + default=False, + help="Display recorded frames for visual verification", + ) + + +@pytest.fixture +def show_frames(request): + """Get the --show-frames flag value.""" + try: + return request.config.getoption("--show-frames") + except ValueError: + # Option not registered, default to False + return False + + +@pytest.fixture +def viewport_dims(): + """Small viewport dimensions for testing.""" + return (40, 15) + + +@pytest.fixture +def items(): + """Create deterministic test data - numbered lines for easy verification.""" + # Create 100 numbered lines: LINE 000, LINE 001, etc. + return [{"text": f"LINE {i:03d} - This is line number {i}"} for i in range(100)] + + +@pytest.fixture +def null_display(viewport_dims): + """Create a NullDisplay for testing.""" + display = DisplayRegistry.create("null") + display.init(viewport_dims[0], viewport_dims[1]) + return display + + +def create_pipeline_with_camera( + camera, items, null_display, viewport_dims, effects=None +): + """Helper to create a pipeline with a specific camera.""" + effects = effects or [] + width, height = viewport_dims + + params = PipelineParams() + params.viewport_width = width + params.viewport_height = height + + config = PipelineConfig( + source="fixture", + display="null", + camera="scroll", + effects=effects, + ) + + pipeline = Pipeline(config=config, context=PipelineContext()) + + from engine.data_sources.sources import ListDataSource + from engine.pipeline.adapters import DataSourceStage + + list_source = ListDataSource(items, name="fixture") + pipeline.add_stage("source", DataSourceStage(list_source, name="fixture")) + + # Add camera update stage to ensure camera_y is available for viewport filter + pipeline.add_stage("camera_update", CameraClockStage(camera, name="camera-clock")) + + # Note: camera should come after font/viewport_filter, before effects + pipeline.add_stage("viewport_filter", ViewportFilterStage(name="viewport-filter")) + pipeline.add_stage("font", FontStage(name="font")) + pipeline.add_stage( + "camera", + CameraStage( + camera, name="radial" if camera.mode == CameraMode.RADIAL else "vertical" + ), + ) + + if effects: + effect_registry = get_registry() + for effect_name in effects: + effect = effect_registry.get(effect_name) + if effect: + pipeline.add_stage( + f"effect_{effect_name}", + create_stage_from_effect(effect, effect_name), + ) + + pipeline.add_stage("display", create_stage_from_display(null_display, "null")) + pipeline.build() + + if not pipeline.initialize(): + return None + + ctx = pipeline.context + ctx.params = params + ctx.set("display", null_display) + ctx.set("items", items) + ctx.set("pipeline", pipeline) + ctx.set("pipeline_order", pipeline.execution_order) + + return pipeline + + +class DisplayHelper: + """Helper to display frames for visual verification.""" + + @staticmethod + def show_frame(buffer, title, viewport_dims, marker_line=None): + """Display a single frame with visual markers.""" + width, height = viewport_dims + print(f"\n{'=' * (width + 20)}") + print(f" {title}") + print(f"{'=' * (width + 20)}") + + for i, line in enumerate(buffer[:height]): + # Add marker if this line should be highlighted + marker = ">>>" if marker_line == i else " " + print(f"{marker} [{i:2}] {line[:width]}") + + print(f"{'=' * (width + 20)}\n") + + +class TestFeedCamera: + """Test FEED mode: rapid single-item scrolling (1 row/frame at speed=1.0).""" + + def test_feed_camera_scrolls_down( + self, items, null_display, viewport_dims, show_frames + ): + """FEED camera should move content down (y increases) at 1 row/frame.""" + camera = Camera.feed(speed=1.0) + camera.set_canvas_size(200, 100) + + pipeline = create_pipeline_with_camera( + camera, items, null_display, viewport_dims + ) + assert pipeline is not None, "Pipeline creation failed" + + null_display.start_recording() + + # Run for 10 frames with small delay between frames + # to ensure camera has time to move (dt calculation relies on time.perf_counter()) + import time + + for frame in range(10): + pipeline.context.set("frame_number", frame) + result = pipeline.execute(items) + assert result.success, f"Frame {frame} execution failed" + if frame < 9: # No need to sleep after last frame + time.sleep(0.02) # Wait 20ms so dt~0.02, camera moves ~1.2 rows + + null_display.stop_recording() + frames = null_display.get_frames() + + if show_frames: + DisplayHelper.show_frame(frames[0], "FEED Camera - Frame 0", viewport_dims) + DisplayHelper.show_frame(frames[5], "FEED Camera - Frame 5", viewport_dims) + DisplayHelper.show_frame(frames[9], "FEED Camera - Frame 9", viewport_dims) + + # FEED mode: each frame y increases by speed*dt*60 + # At dt=1.0, speed=1.0: y increases by 60 per frame + # But clamp to canvas bounds (200) + # Frame 0: y=0, should show LINE 000 + # Frame 1: y=60, should show LINE 060 + + # Verify frame 0 contains ASCII art content (rendered from LINE 000) + # The text is converted to block characters, so check for non-empty frames + assert len(frames[0]) > 0, "Frame 0 should not be empty" + assert frames[0][0].strip() != "", "Frame 0 should have visible content" + + # Verify camera position changed between frames + # Feed mode moves 1 row per frame at speed=1.0 with dt~0.02 + # After 5 frames, camera should have moved down + assert camera.y > 0, f"Camera should have moved down, y={camera.y}" + + # Verify different frames show different content (camera is scrolling) + # Check that frame 0 and frame 5 are different + frame_0_str = "\n".join(frames[0]) + frame_5_str = "\n".join(frames[5]) + assert frame_0_str != frame_5_str, ( + "Frame 0 and Frame 5 should show different content" + ) + + +class TestScrollCamera: + """Test SCROLL mode: smooth vertical scrolling with float accumulation.""" + + def test_scroll_camera_smooth_movement( + self, items, null_display, viewport_dims, show_frames + ): + """SCROLL camera should move content smoothly with sub-integer precision.""" + camera = Camera.scroll(speed=0.5) + camera.set_canvas_size(0, 200) # Match viewport width for text wrapping + + pipeline = create_pipeline_with_camera( + camera, items, null_display, viewport_dims + ) + assert pipeline is not None, "Pipeline creation failed" + + null_display.start_recording() + + # Run for 20 frames + for frame in range(20): + pipeline.context.set("frame_number", frame) + result = pipeline.execute(items) + assert result.success, f"Frame {frame} execution failed" + + null_display.stop_recording() + frames = null_display.get_frames() + + if show_frames: + DisplayHelper.show_frame( + frames[0], "SCROLL Camera - Frame 0", viewport_dims + ) + DisplayHelper.show_frame( + frames[10], "SCROLL Camera - Frame 10", viewport_dims + ) + + # SCROLL mode uses float accumulation for smooth scrolling + # At speed=0.5, dt=1.0: y increases by 0.5 * 60 = 30 pixels per frame + # Verify camera_y is increasing (which causes the scroll) + camera_y_values = [] + for frame in range(5): + # Get camera.y directly (not filtered context value) + pipeline.context.set("frame_number", frame) + pipeline.execute(items) + camera_y_values.append(camera.y) + + print(f"\nSCROLL test - camera_y positions: {camera_y_values}") + + # Verify camera_y is non-zero (camera is moving) + assert camera_y_values[-1] > 0, ( + "Camera should have scrolled down (camera_y > 0)" + ) + + # Verify camera_y is increasing + for i in range(len(camera_y_values) - 1): + assert camera_y_values[i + 1] >= camera_y_values[i], ( + f"Camera_y should be non-decreasing: {camera_y_values}" + ) + + +class TestHorizontalCamera: + """Test HORIZONTAL mode: left/right scrolling.""" + + def test_horizontal_camera_scrolls_right( + self, items, null_display, viewport_dims, show_frames + ): + """HORIZONTAL camera should move content right (x increases).""" + camera = Camera.horizontal(speed=1.0) + camera.set_canvas_size(200, 200) + + pipeline = create_pipeline_with_camera( + camera, items, null_display, viewport_dims + ) + assert pipeline is not None, "Pipeline creation failed" + + null_display.start_recording() + + for frame in range(10): + pipeline.context.set("frame_number", frame) + result = pipeline.execute(items) + assert result.success, f"Frame {frame} execution failed" + + null_display.stop_recording() + frames = null_display.get_frames() + + if show_frames: + DisplayHelper.show_frame( + frames[0], "HORIZONTAL Camera - Frame 0", viewport_dims + ) + DisplayHelper.show_frame( + frames[5], "HORIZONTAL Camera - Frame 5", viewport_dims + ) + + # HORIZONTAL mode: x increases by speed*dt*60 + # At dt=1.0, speed=1.0: x increases by 60 per frame + # Frame 0: x=0 + # Frame 5: x=300 (clamped to canvas_width-viewport_width) + + # Verify frame 0 contains content (ASCII art of LINE 000) + assert len(frames[0]) > 0, "Frame 0 should not be empty" + assert frames[0][0].strip() != "", "Frame 0 should have visible content" + + # Verify camera x is increasing + print("\nHORIZONTAL test - camera positions:") + for i in range(10): + print(f" Frame {i}: x={camera.x}, y={camera.y}") + camera.update(1.0) + + # Verify camera moved + assert camera.x > 0, f"Camera should have moved right, x={camera.x}" + + +class TestOmniCamera: + """Test OMNI mode: diagonal scrolling (x and y increase together).""" + + def test_omni_camera_diagonal_movement( + self, items, null_display, viewport_dims, show_frames + ): + """OMNI camera should move content diagonally (both x and y increase).""" + camera = Camera.omni(speed=1.0) + camera.set_canvas_size(200, 200) + + pipeline = create_pipeline_with_camera( + camera, items, null_display, viewport_dims + ) + assert pipeline is not None, "Pipeline creation failed" + + null_display.start_recording() + + for frame in range(10): + pipeline.context.set("frame_number", frame) + result = pipeline.execute(items) + assert result.success, f"Frame {frame} execution failed" + + null_display.stop_recording() + frames = null_display.get_frames() + + if show_frames: + DisplayHelper.show_frame(frames[0], "OMNI Camera - Frame 0", viewport_dims) + DisplayHelper.show_frame(frames[5], "OMNI Camera - Frame 5", viewport_dims) + + # OMNI mode: y increases by speed*dt*60, x increases by speed*dt*60*0.5 + # At dt=1.0, speed=1.0: y += 60, x += 30 + + # Verify frame 0 contains content (ASCII art) + assert len(frames[0]) > 0, "Frame 0 should not be empty" + assert frames[0][0].strip() != "", "Frame 0 should have visible content" + + print("\nOMNI test - camera positions:") + camera.reset() + for frame in range(5): + print(f" Frame {frame}: x={camera.x}, y={camera.y}") + camera.update(1.0) + + # Verify camera moved + assert camera.y > 0, f"Camera should have moved down, y={camera.y}" + + +class TestFloatingCamera: + """Test FLOATING mode: sinusoidal bobbing motion.""" + + def test_floating_camera_bobbing( + self, items, null_display, viewport_dims, show_frames + ): + """FLOATING camera should move content in a sinusoidal pattern.""" + camera = Camera.floating(speed=1.0) + camera.set_canvas_size(200, 200) + + pipeline = create_pipeline_with_camera( + camera, items, null_display, viewport_dims + ) + assert pipeline is not None, "Pipeline creation failed" + + null_display.start_recording() + + for frame in range(32): + pipeline.context.set("frame_number", frame) + result = pipeline.execute(items) + assert result.success, f"Frame {frame} execution failed" + + null_display.stop_recording() + frames = null_display.get_frames() + + if show_frames: + DisplayHelper.show_frame( + frames[0], "FLOATING Camera - Frame 0", viewport_dims + ) + DisplayHelper.show_frame( + frames[8], "FLOATING Camera - Frame 8 (quarter cycle)", viewport_dims + ) + DisplayHelper.show_frame( + frames[16], "FLOATING Camera - Frame 16 (half cycle)", viewport_dims + ) + + # FLOATING mode: y = sin(time*2) * speed * 30 + # Period: 2π / 2 = π ≈ 3.14 seconds (or ~3.14 frames at dt=1.0) + # Full cycle ~32 frames + + print("\nFLOATING test - sinusoidal motion:") + camera.reset() + for frame in range(16): + print(f" Frame {frame}: y={camera.y}, x={camera.x}") + camera.update(1.0) + + # Verify y oscillates around 0 + camera.reset() + camera.update(1.0) # Frame 1 + y1 = camera.y + camera.update(1.0) # Frame 2 + y2 = camera.y + camera.update(1.0) # Frame 3 + y3 = camera.y + + # After a few frames, y should oscillate (not monotonic) + assert y1 != y2 or y2 != y3, "FLOATING camera should oscillate" + + +class TestBounceCamera: + """Test BOUNCE mode: bouncing DVD-style motion.""" + + def test_bounce_camera_reverses_at_edges( + self, items, null_display, viewport_dims, show_frames + ): + """BOUNCE camera should reverse direction when hitting canvas edges.""" + camera = Camera.bounce(speed=5.0) # Faster for quicker test + # Set zoom > 1.0 so viewport is smaller than canvas, allowing movement + camera.set_zoom(2.0) # Zoom out 2x, viewport is half the canvas size + camera.set_canvas_size(400, 400) + + pipeline = create_pipeline_with_camera( + camera, items, null_display, viewport_dims + ) + assert pipeline is not None, "Pipeline creation failed" + + null_display.start_recording() + + for frame in range(50): + pipeline.context.set("frame_number", frame) + result = pipeline.execute(items) + assert result.success, f"Frame {frame} execution failed" + + null_display.stop_recording() + frames = null_display.get_frames() + + if show_frames: + DisplayHelper.show_frame( + frames[0], "BOUNCE Camera - Frame 0", viewport_dims + ) + DisplayHelper.show_frame( + frames[25], "BOUNCE Camera - Frame 25", viewport_dims + ) + + # BOUNCE mode: moves until it hits edge, then reverses + # Verify the camera moves and changes direction + + print("\nBOUNCE test - bouncing motion:") + camera.reset() + camera.set_zoom(2.0) # Reset also resets zoom, so set it again + for frame in range(20): + print(f" Frame {frame}: x={camera.x}, y={camera.y}") + camera.update(1.0) + + # Check that camera hits bounds and reverses + camera.reset() + camera.set_zoom(2.0) # Reset also resets zoom, so set it again + for _ in range(51): # Odd number ensures ending at opposite corner + camera.update(1.0) + + # Camera should have hit an edge and reversed direction + # With 400x400 canvas, viewport 200x200 (zoom=2), max_x = 200, max_y = 200 + # Starting at (0,0), after 51 updates it should be at (200, 200) + max_x = max(0, camera.canvas_width - camera.viewport_width) + print(f"BOUNCE camera final position: x={camera.x}, y={camera.y}") + assert camera.x == max_x, ( + f"Camera should be at max_x ({max_x}), got x={camera.x}" + ) + + # Check bounds are respected + vw = camera.viewport_width + vh = camera.viewport_height + assert camera.x >= 0 and camera.x <= camera.canvas_width - vw + assert camera.y >= 0 and camera.y <= camera.canvas_height - vh + + +class TestRadialCamera: + """Test RADIAL mode: polar coordinate scanning (rotation around center).""" + + def test_radial_camera_rotates_around_center( + self, items, null_display, viewport_dims, show_frames + ): + """RADIAL camera should rotate around the center of the canvas.""" + camera = Camera.radial(speed=0.5) + camera.set_canvas_size(200, 200) + + pipeline = create_pipeline_with_camera( + camera, items, null_display, viewport_dims + ) + assert pipeline is not None, "Pipeline creation failed" + + null_display.start_recording() + + for frame in range(32): # 32 frames = 2π at ~0.2 rad/frame + pipeline.context.set("frame_number", frame) + result = pipeline.execute(items) + assert result.success, f"Frame {frame} execution failed" + + null_display.stop_recording() + frames = null_display.get_frames() + + if show_frames: + DisplayHelper.show_frame( + frames[0], "RADIAL Camera - Frame 0", viewport_dims + ) + DisplayHelper.show_frame( + frames[8], "RADIAL Camera - Frame 8 (quarter turn)", viewport_dims + ) + DisplayHelper.show_frame( + frames[16], "RADIAL Camera - Frame 16 (half turn)", viewport_dims + ) + DisplayHelper.show_frame( + frames[24], "RADIAL Camera - Frame 24 (3/4 turn)", viewport_dims + ) + + # RADIAL mode: rotates around center with smooth angular motion + # At speed=0.5: theta increases by ~0.2 rad/frame (0.5 * dt * 1.0) + + print("\nRADIAL test - rotational motion:") + camera.reset() + for frame in range(32): + theta_deg = (camera._theta_float * 180 / math.pi) % 360 + print( + f" Frame {frame}: theta={theta_deg:.1f}°, x={camera.x}, y={camera.y}" + ) + camera.update(1.0) + + # Verify rotation occurs (angle should change) + camera.reset() + theta_start = camera._theta_float + camera.update(1.0) # Frame 1 + theta_mid = camera._theta_float + camera.update(1.0) # Frame 2 + theta_end = camera._theta_float + + assert theta_mid > theta_start, "Theta should increase (rotation)" + assert theta_end > theta_mid, "Theta should continue increasing" + + def test_radial_camera_with_sensor_integration( + self, items, null_display, viewport_dims, show_frames + ): + """RADIAL camera can be driven by external sensor (OSC integration test).""" + from engine.sensors.oscillator import ( + OscillatorSensor, + register_oscillator_sensor, + ) + + # Create an oscillator sensor for testing + register_oscillator_sensor(name="test_osc", waveform="sine", frequency=0.5) + osc = OscillatorSensor(name="test_osc", waveform="sine", frequency=0.5) + + camera = Camera.radial(speed=0.3) + camera.set_canvas_size(200, 200) + + pipeline = create_pipeline_with_camera( + camera, items, null_display, viewport_dims + ) + assert pipeline is not None, "Pipeline creation failed" + + null_display.start_recording() + + # Run frames while modulating camera with oscillator + for frame in range(32): + # Read oscillator value and set as radial input + osc_value = osc.read() + if osc_value: + camera.set_radial_input(osc_value.value) + + pipeline.context.set("frame_number", frame) + result = pipeline.execute(items) + assert result.success, f"Frame {frame} execution failed" + + null_display.stop_recording() + frames = null_display.get_frames() + + if show_frames: + DisplayHelper.show_frame( + frames[0], "RADIAL+OSC Camera - Frame 0", viewport_dims + ) + DisplayHelper.show_frame( + frames[8], "RADIAL+OSC Camera - Frame 8", viewport_dims + ) + DisplayHelper.show_frame( + frames[16], "RADIAL+OSC Camera - Frame 16", viewport_dims + ) + + print("\nRADIAL+OSC test - sensor-driven rotation:") + osc.start() + camera.reset() + for frame in range(16): + osc_value = osc.read() + if osc_value: + camera.set_radial_input(osc_value.value) + camera.update(1.0) + theta_deg = (camera._theta_float * 180 / math.pi) % 360 + print( + f" Frame {frame}: osc={osc_value.value if osc_value else 0:.3f}, theta={theta_deg:.1f}°" + ) + + # Verify camera position changes when driven by sensor + camera.reset() + x_start = camera.x + camera.update(1.0) + x_mid = camera.x + assert x_start != x_mid, "Camera should move when driven by oscillator" + + osc.stop() + + def test_radial_camera_with_direct_angle_setting( + self, items, null_display, viewport_dims, show_frames + ): + """RADIAL camera can have angle set directly for OSC integration.""" + camera = Camera.radial(speed=0.0) # No auto-rotation + camera.set_canvas_size(200, 200) + camera._r_float = 80.0 # Set initial radius to see movement + + pipeline = create_pipeline_with_camera( + camera, items, null_display, viewport_dims + ) + assert pipeline is not None, "Pipeline creation failed" + + null_display.start_recording() + + # Set angle directly to sweep through full rotation + for frame in range(32): + angle = (frame / 32) * 2 * math.pi # 0 to 2π over 32 frames + camera.set_radial_angle(angle) + camera.update(1.0) # Must update to convert polar to Cartesian + + pipeline.context.set("frame_number", frame) + result = pipeline.execute(items) + assert result.success, f"Frame {frame} execution failed" + + null_display.stop_recording() + frames = null_display.get_frames() + + if show_frames: + DisplayHelper.show_frame( + frames[0], "RADIAL Direct Angle - Frame 0", viewport_dims + ) + DisplayHelper.show_frame( + frames[8], "RADIAL Direct Angle - Frame 8", viewport_dims + ) + DisplayHelper.show_frame( + frames[16], "RADIAL Direct Angle - Frame 16", viewport_dims + ) + + print("\nRADIAL Direct Angle test - sweeping rotation:") + for frame in range(32): + angle = (frame / 32) * 2 * math.pi + camera.set_radial_angle(angle) + camera.update(1.0) # Update converts angle to x,y position + theta_deg = angle * 180 / math.pi + print( + f" Frame {frame}: set_angle={theta_deg:.1f}°, actual_x={camera.x}, actual_y={camera.y}" + ) + + # Verify camera position changes as angle sweeps + camera.reset() + camera._r_float = 80.0 # Set radius for testing + camera.set_radial_angle(0) + camera.update(1.0) + x0 = camera.x + camera.set_radial_angle(math.pi / 2) + camera.update(1.0) + x90 = camera.x + assert x0 != x90, ( + f"Camera position should change with angle (x0={x0}, x90={x90})" + ) + + +class TestCameraModeEnum: + """Test CameraMode enum integrity.""" + + def test_all_modes_exist(self): + """Verify all camera modes are defined.""" + modes = [m.name for m in CameraMode] + expected = [ + "FEED", + "SCROLL", + "HORIZONTAL", + "OMNI", + "FLOATING", + "BOUNCE", + "RADIAL", + ] + + for mode in expected: + assert mode in modes, f"CameraMode.{mode} should exist" + + def test_radial_mode_exists(self): + """Verify RADIAL mode is properly defined.""" + assert CameraMode.RADIAL is not None + assert isinstance(CameraMode.RADIAL, CameraMode) + assert CameraMode.RADIAL.name == "RADIAL" + + +class TestCameraFactoryMethods: + """Test camera factory methods create proper camera instances.""" + + def test_radial_factory(self): + """RADIAL factory should create a camera with correct mode.""" + camera = Camera.radial(speed=2.0) + assert camera.mode == CameraMode.RADIAL + assert camera.speed == 2.0 + assert hasattr(camera, "_r_float") + assert hasattr(camera, "_theta_float") + + def test_radial_factory_initializes_state(self): + """RADIAL factory should initialize radial state.""" + camera = Camera.radial() + assert camera._r_float == 0.0 + assert camera._theta_float == 0.0 + + +class TestCameraStateSaveRestore: + """Test camera state can be saved and restored (for hot-rebuild).""" + + def test_radial_camera_state_save(self): + """RADIAL camera should save polar coordinate state.""" + camera = Camera.radial() + camera._theta_float = math.pi / 4 + camera._r_float = 50.0 + + # Save state via CameraStage adapter + from engine.pipeline.adapters.camera import CameraStage + + stage = CameraStage(camera) + + state = stage.save_state() + assert "_theta_float" in state + assert "_r_float" in state + assert state["_theta_float"] == math.pi / 4 + assert state["_r_float"] == 50.0 + + def test_radial_camera_state_restore(self): + """RADIAL camera should restore polar coordinate state.""" + camera1 = Camera.radial() + camera1._theta_float = math.pi / 3 + camera1._r_float = 75.0 + + from engine.pipeline.adapters.camera import CameraStage + + stage1 = CameraStage(camera1) + state = stage1.save_state() + + # Create new camera and restore + camera2 = Camera.radial() + stage2 = CameraStage(camera2) + stage2.restore_state(state) + + assert abs(camera2._theta_float - math.pi / 3) < 0.001 + assert abs(camera2._r_float - 75.0) < 0.001 + + +class TestCameraViewportApplication: + """Test camera.apply() properly slices buffers.""" + + def test_radial_camera_viewport_slicing(self): + """RADIAL camera should properly slice buffer based on position.""" + camera = Camera.radial(speed=0.5) + camera.set_canvas_size(200, 200) + + # Update to move camera + camera.update(1.0) + + # Create test buffer with 200 lines + buffer = [f"LINE {i:03d}" for i in range(200)] + + # Apply camera viewport (15 lines high) + result = camera.apply(buffer, viewport_width=40, viewport_height=15) + + # Result should be exactly 15 lines + assert len(result) == 15 + + # Each line should be 40 characters (padded or truncated) + for line in result: + assert len(line) <= 40 diff --git a/tests/test_controller.py b/tests/test_controller.py deleted file mode 100644 index 0f08b9b..0000000 --- a/tests/test_controller.py +++ /dev/null @@ -1,117 +0,0 @@ -""" -Tests for engine.controller module. -""" - -from unittest.mock import MagicMock, patch - -from engine import config -from engine.controller import StreamController - - -class TestStreamController: - """Tests for StreamController class.""" - - def test_init_default_config(self): - """StreamController initializes with default config.""" - controller = StreamController() - assert controller.config is not None - assert isinstance(controller.config, config.Config) - - def test_init_custom_config(self): - """StreamController accepts custom config.""" - custom_config = config.Config(headline_limit=500) - controller = StreamController(config=custom_config) - assert controller.config.headline_limit == 500 - - def test_init_sources_none_by_default(self): - """Sources are None until initialized.""" - controller = StreamController() - assert controller.mic is None - assert controller.ntfy is None - - @patch("engine.controller.MicMonitor") - @patch("engine.controller.NtfyPoller") - def test_initialize_sources(self, mock_ntfy, mock_mic): - """initialize_sources creates mic and ntfy instances.""" - mock_mic_instance = MagicMock() - mock_mic_instance.available = True - mock_mic_instance.start.return_value = True - mock_mic.return_value = mock_mic_instance - - mock_ntfy_instance = MagicMock() - mock_ntfy_instance.start.return_value = True - mock_ntfy.return_value = mock_ntfy_instance - - controller = StreamController() - mic_ok, ntfy_ok = controller.initialize_sources() - - assert mic_ok is True - assert ntfy_ok is True - assert controller.mic is not None - assert controller.ntfy is not None - - @patch("engine.controller.MicMonitor") - @patch("engine.controller.NtfyPoller") - def test_initialize_sources_mic_unavailable(self, mock_ntfy, mock_mic): - """initialize_sources handles unavailable mic.""" - mock_mic_instance = MagicMock() - mock_mic_instance.available = False - mock_mic.return_value = mock_mic_instance - - mock_ntfy_instance = MagicMock() - mock_ntfy_instance.start.return_value = True - mock_ntfy.return_value = mock_ntfy_instance - - controller = StreamController() - mic_ok, ntfy_ok = controller.initialize_sources() - - assert mic_ok is False - assert ntfy_ok is True - - -class TestStreamControllerCleanup: - """Tests for StreamController cleanup.""" - - @patch("engine.controller.MicMonitor") - def test_cleanup_stops_mic(self, mock_mic): - """cleanup stops the microphone if running.""" - mock_mic_instance = MagicMock() - mock_mic.return_value = mock_mic_instance - - controller = StreamController() - controller.mic = mock_mic_instance - controller.cleanup() - - mock_mic_instance.stop.assert_called_once() - - -class TestStreamControllerWarmup: - """Tests for StreamController topic warmup.""" - - def test_warmup_topics_idempotent(self): - """warmup_topics can be called multiple times.""" - StreamController._topics_warmed = False - - with patch("urllib.request.urlopen") as mock_urlopen: - StreamController.warmup_topics() - StreamController.warmup_topics() - - assert mock_urlopen.call_count >= 3 - - def test_warmup_topics_sets_flag(self): - """warmup_topics sets the warmed flag.""" - StreamController._topics_warmed = False - - with patch("urllib.request.urlopen"): - StreamController.warmup_topics() - - assert StreamController._topics_warmed is True - - def test_warmup_topics_skips_after_first(self): - """warmup_topics skips after first call.""" - StreamController._topics_warmed = True - - with patch("urllib.request.urlopen") as mock_urlopen: - StreamController.warmup_topics() - - mock_urlopen.assert_not_called() diff --git a/tests/test_crop_effect.py b/tests/test_crop_effect.py new file mode 100644 index 0000000..238d2ff --- /dev/null +++ b/tests/test_crop_effect.py @@ -0,0 +1,99 @@ +""" +Tests for CropEffect. +""" + +from engine.effects.plugins.crop import CropEffect +from engine.effects.types import EffectContext + + +def make_ctx(terminal_width: int = 80, terminal_height: int = 24) -> EffectContext: + """Create a mock EffectContext.""" + return EffectContext( + terminal_width=terminal_width, + terminal_height=terminal_height, + scroll_cam=0, + ticker_height=terminal_height, + ) + + +class TestCropEffect: + """Tests for CropEffect.""" + + def test_basic_init(self): + """CropEffect initializes with defaults.""" + effect = CropEffect() + assert effect.name == "crop" + assert effect.config.enabled is True + + def test_crop_wider_buffer(self): + """CropEffect crops wide buffer to terminal width.""" + effect = CropEffect() + buf = [ + "This is a very long line that exceeds the terminal width of eighty characters!", + "Another long line that should also be cropped to fit within the terminal bounds!", + "Short", + ] + ctx = make_ctx(terminal_width=40, terminal_height=10) + + result = effect.process(buf, ctx) + + # Lines should be cropped to 40 chars + assert len(result[0]) == 40 + assert len(result[1]) == 40 + assert result[2] == "Short" + " " * 35 # padded to width + + def test_crop_taller_buffer(self): + """CropEffect crops tall buffer to terminal height.""" + effect = CropEffect() + buf = ["line"] * 30 # 30 lines + ctx = make_ctx(terminal_width=80, terminal_height=10) + + result = effect.process(buf, ctx) + + # Should be cropped to 10 lines + assert len(result) == 10 + + def test_pad_shorter_lines(self): + """CropEffect pads lines shorter than width.""" + effect = CropEffect() + buf = ["short", "medium length", ""] + ctx = make_ctx(terminal_width=20, terminal_height=5) + + result = effect.process(buf, ctx) + + assert len(result[0]) == 20 # padded + assert len(result[1]) == 20 # padded + assert len(result[2]) == 20 # padded (was empty) + + def test_pad_to_height(self): + """CropEffect pads with empty lines if buffer is too short.""" + effect = CropEffect() + buf = ["line1", "line2"] + ctx = make_ctx(terminal_width=20, terminal_height=10) + + result = effect.process(buf, ctx) + + # Should have 10 lines + assert len(result) == 10 + # Last 8 should be empty padding + for i in range(2, 10): + assert result[i] == " " * 20 + + def test_empty_buffer(self): + """CropEffect handles empty buffer.""" + effect = CropEffect() + ctx = make_ctx() + + result = effect.process([], ctx) + + assert result == [] + + def test_uses_context_dimensions(self): + """CropEffect uses context terminal_width/terminal_height.""" + effect = CropEffect() + buf = ["x" * 100] + ctx = make_ctx(terminal_width=50, terminal_height=1) + + result = effect.process(buf, ctx) + + assert len(result[0]) == 50 diff --git a/tests/test_data_sources.py b/tests/test_data_sources.py new file mode 100644 index 0000000..8d94404 --- /dev/null +++ b/tests/test_data_sources.py @@ -0,0 +1,220 @@ +""" +Tests for engine/data_sources/sources.py - data source implementations. + +Tests HeadlinesDataSource, PoetryDataSource, EmptyDataSource, and the +base DataSource class functionality. +""" + +from unittest.mock import patch + +import pytest + +from engine.data_sources.sources import ( + EmptyDataSource, + HeadlinesDataSource, + PoetryDataSource, + SourceItem, +) + + +class TestSourceItem: + """Test SourceItem dataclass.""" + + def test_source_item_creation(self): + """SourceItem can be created with required fields.""" + item = SourceItem( + content="Test headline", + source="test_source", + timestamp="2024-01-01", + ) + assert item.content == "Test headline" + assert item.source == "test_source" + assert item.timestamp == "2024-01-01" + assert item.metadata is None + + def test_source_item_with_metadata(self): + """SourceItem can include optional metadata.""" + metadata = {"author": "John", "category": "tech"} + item = SourceItem( + content="Test", + source="test", + timestamp="2024-01-01", + metadata=metadata, + ) + assert item.metadata == metadata + + +class TestEmptyDataSource: + """Test EmptyDataSource.""" + + def test_empty_source_name(self): + """EmptyDataSource has correct name.""" + source = EmptyDataSource() + assert source.name == "empty" + + def test_empty_source_is_not_dynamic(self): + """EmptyDataSource is static, not dynamic.""" + source = EmptyDataSource() + assert source.is_dynamic is False + + def test_empty_source_fetch_returns_blank_content(self): + """EmptyDataSource.fetch() returns blank lines.""" + source = EmptyDataSource(width=80, height=24) + items = source.fetch() + + assert len(items) == 1 + assert isinstance(items[0], SourceItem) + assert items[0].source == "empty" + # Content should be 24 lines of 80 spaces + lines = items[0].content.split("\n") + assert len(lines) == 24 + assert all(len(line) == 80 for line in lines) + + def test_empty_source_get_items_caches_result(self): + """EmptyDataSource.get_items() caches the result.""" + source = EmptyDataSource() + items1 = source.get_items() + items2 = source.get_items() + # Should return same cached items (same object reference) + assert items1 is items2 + + +class TestHeadlinesDataSource: + """Test HeadlinesDataSource.""" + + def test_headlines_source_name(self): + """HeadlinesDataSource has correct name.""" + source = HeadlinesDataSource() + assert source.name == "headlines" + + def test_headlines_source_is_static(self): + """HeadlinesDataSource is static.""" + source = HeadlinesDataSource() + assert source.is_dynamic is False + + def test_headlines_fetch_returns_source_items(self): + """HeadlinesDataSource.fetch() returns SourceItem list.""" + mock_items = [ + ("Test Article 1", "source1", "10:30"), + ("Test Article 2", "source2", "11:45"), + ] + with patch("engine.fetch.fetch_all") as mock_fetch_all: + mock_fetch_all.return_value = (mock_items, 2, 0) + + source = HeadlinesDataSource() + items = source.fetch() + + assert len(items) == 2 + assert all(isinstance(item, SourceItem) for item in items) + assert items[0].content == "Test Article 1" + assert items[0].source == "source1" + assert items[0].timestamp == "10:30" + + def test_headlines_fetch_with_empty_feed(self): + """HeadlinesDataSource handles empty feeds gracefully.""" + with patch("engine.fetch.fetch_all") as mock_fetch_all: + mock_fetch_all.return_value = ([], 0, 1) + + source = HeadlinesDataSource() + items = source.fetch() + + # Should return empty list + assert isinstance(items, list) + assert len(items) == 0 + + def test_headlines_get_items_caches_result(self): + """HeadlinesDataSource.get_items() caches the result.""" + mock_items = [("Test Article", "source", "12:00")] + with patch("engine.fetch.fetch_all") as mock_fetch_all: + mock_fetch_all.return_value = (mock_items, 1, 0) + + source = HeadlinesDataSource() + items1 = source.get_items() + items2 = source.get_items() + + # Should only call fetch once (cached) + assert mock_fetch_all.call_count == 1 + assert items1 is items2 + + def test_headlines_refresh_clears_cache(self): + """HeadlinesDataSource.refresh() clears cache and refetches.""" + mock_items = [("Test Article", "source", "12:00")] + with patch("engine.fetch.fetch_all") as mock_fetch_all: + mock_fetch_all.return_value = (mock_items, 1, 0) + + source = HeadlinesDataSource() + source.get_items() + source.refresh() + source.get_items() + + # Should call fetch twice (once for initial, once for refresh) + assert mock_fetch_all.call_count == 2 + + +class TestPoetryDataSource: + """Test PoetryDataSource.""" + + def test_poetry_source_name(self): + """PoetryDataSource has correct name.""" + source = PoetryDataSource() + assert source.name == "poetry" + + def test_poetry_source_is_static(self): + """PoetryDataSource is static.""" + source = PoetryDataSource() + assert source.is_dynamic is False + + def test_poetry_fetch_returns_source_items(self): + """PoetryDataSource.fetch() returns SourceItem list.""" + mock_items = [ + ("Poetry line 1", "Poetry Source 1", ""), + ("Poetry line 2", "Poetry Source 2", ""), + ] + with patch("engine.fetch.fetch_poetry") as mock_fetch_poetry: + mock_fetch_poetry.return_value = (mock_items, 2, 0) + + source = PoetryDataSource() + items = source.fetch() + + assert len(items) == 2 + assert all(isinstance(item, SourceItem) for item in items) + assert items[0].content == "Poetry line 1" + assert items[0].source == "Poetry Source 1" + + def test_poetry_get_items_caches_result(self): + """PoetryDataSource.get_items() caches result.""" + mock_items = [("Poetry line", "Poetry Source", "")] + with patch("engine.fetch.fetch_poetry") as mock_fetch_poetry: + mock_fetch_poetry.return_value = (mock_items, 1, 0) + + source = PoetryDataSource() + items1 = source.get_items() + items2 = source.get_items() + + # Should only fetch once (cached) + assert mock_fetch_poetry.call_count == 1 + assert items1 is items2 + + +class TestDataSourceInterface: + """Test DataSource base class interface.""" + + def test_data_source_stream_not_implemented(self): + """DataSource.stream() raises NotImplementedError.""" + source = EmptyDataSource() + with pytest.raises(NotImplementedError): + source.stream() + + def test_data_source_is_dynamic_defaults_false(self): + """DataSource.is_dynamic defaults to False.""" + source = EmptyDataSource() + assert source.is_dynamic is False + + def test_data_source_refresh_updates_cache(self): + """DataSource.refresh() updates internal cache.""" + source = EmptyDataSource() + source.get_items() + items_refreshed = source.refresh() + + # refresh() should return new items + assert isinstance(items_refreshed, list) diff --git a/tests/test_display.py b/tests/test_display.py index e2c08b4..20215e4 100644 --- a/tests/test_display.py +++ b/tests/test_display.py @@ -2,7 +2,13 @@ Tests for engine.display module. """ -from engine.display import NullDisplay, TerminalDisplay +import sys +from unittest.mock import MagicMock, patch + +import pytest + +from engine.display import DisplayRegistry, NullDisplay, TerminalDisplay, render_border +from engine.display.backends.multi import MultiDisplay class TestDisplayProtocol: @@ -25,6 +31,66 @@ class TestDisplayProtocol: assert hasattr(display, "cleanup") +class TestDisplayRegistry: + """Tests for DisplayRegistry class.""" + + def setup_method(self): + """Reset registry before each test.""" + DisplayRegistry._backends = {} + DisplayRegistry._initialized = False + + def test_register_adds_backend(self): + """register adds a backend to the registry.""" + DisplayRegistry.register("test", TerminalDisplay) + assert DisplayRegistry.get("test") == TerminalDisplay + + def test_register_case_insensitive(self): + """register is case insensitive.""" + DisplayRegistry.register("TEST", TerminalDisplay) + assert DisplayRegistry.get("test") == TerminalDisplay + + def test_get_returns_none_for_unknown(self): + """get returns None for unknown backend.""" + assert DisplayRegistry.get("unknown") is None + + def test_list_backends_returns_all(self): + """list_backends returns all registered backends.""" + DisplayRegistry.register("a", TerminalDisplay) + DisplayRegistry.register("b", NullDisplay) + backends = DisplayRegistry.list_backends() + assert "a" in backends + assert "b" in backends + + def test_create_returns_instance(self): + """create returns a display instance.""" + DisplayRegistry.register("test", NullDisplay) + display = DisplayRegistry.create("test") + assert isinstance(display, NullDisplay) + + def test_create_returns_none_for_unknown(self): + """create returns None for unknown backend.""" + display = DisplayRegistry.create("unknown") + assert display is None + + def test_initialize_registers_defaults(self): + """initialize registers default backends.""" + DisplayRegistry.initialize() + assert DisplayRegistry.get("terminal") == TerminalDisplay + assert DisplayRegistry.get("null") == NullDisplay + from engine.display.backends.pygame import PygameDisplay + from engine.display.backends.websocket import WebSocketDisplay + + assert DisplayRegistry.get("websocket") == WebSocketDisplay + assert DisplayRegistry.get("pygame") == PygameDisplay + + def test_initialize_idempotent(self): + """initialize can be called multiple times safely.""" + DisplayRegistry.initialize() + DisplayRegistry._backends["custom"] = TerminalDisplay + DisplayRegistry.initialize() + assert "custom" in DisplayRegistry.list_backends() + + class TestTerminalDisplay: """Tests for TerminalDisplay class.""" @@ -52,6 +118,119 @@ class TestTerminalDisplay: display = TerminalDisplay() display.cleanup() + def test_get_dimensions_returns_cached_value(self): + """get_dimensions returns cached dimensions for stability.""" + import os + from unittest.mock import patch + + # Mock terminal size to ensure deterministic dimensions + term_size = os.terminal_size((80, 24)) + with patch("os.get_terminal_size", return_value=term_size): + display = TerminalDisplay() + display.init(80, 24) + d1 = display.get_dimensions() + assert d1 == (80, 24) + + def test_show_clears_screen_before_each_frame(self): + """show clears previous frame to prevent visual wobble. + + Regression test: Previously show() didn't clear the screen, + causing old content to remain and creating visual wobble. + The fix adds \\033[H\\033[J (cursor home + erase down) before each frame. + """ + from io import BytesIO + + display = TerminalDisplay() + display.init(80, 24) + + buffer = ["line1", "line2", "line3"] + + fake_buffer = BytesIO() + fake_stdout = MagicMock() + fake_stdout.buffer = fake_buffer + with patch.object(sys, "stdout", fake_stdout): + display.show(buffer) + + output = fake_buffer.getvalue().decode("utf-8") + assert output.startswith("\033[H\033[J"), ( + f"Output should start with clear sequence, got: {repr(output[:20])}" + ) + + def test_show_clears_screen_on_subsequent_frames(self): + """show clears screen on every frame, not just the first. + + Regression test: Ensures each show() call includes the clear sequence. + """ + from io import BytesIO + + # Use target_fps=0 to disable frame skipping in test + display = TerminalDisplay(target_fps=0) + display.init(80, 24) + + buffer = ["line1", "line2"] + + for i in range(3): + fake_buffer = BytesIO() + fake_stdout = MagicMock() + fake_stdout.buffer = fake_buffer + with patch.object(sys, "stdout", fake_stdout): + display.show(buffer) + + output = fake_buffer.getvalue().decode("utf-8") + assert output.startswith("\033[H\033[J"), ( + f"Frame {i} should start with clear sequence" + ) + + def test_get_dimensions_stable_across_rapid_calls(self): + """get_dimensions should not fluctuate when called rapidly. + + This test catches the bug where os.get_terminal_size() returns + inconsistent values, causing visual wobble. + """ + display = TerminalDisplay() + display.init(80, 24) + + # Get dimensions 10 times rapidly (simulating frame loop) + dims = [display.get_dimensions() for _ in range(10)] + + # All should be the same - this would fail if os.get_terminal_size() + # returns different values each call + assert len(set(dims)) == 1, f"Dimensions should be stable, got: {set(dims)}" + + def test_show_with_border_uses_render_border(self): + """show with border=True calls render_border with FPS.""" + from unittest.mock import MagicMock + + display = TerminalDisplay() + display.init(80, 24) + + buffer = ["line1", "line2"] + + # Mock get_monitor to provide FPS + mock_monitor = MagicMock() + mock_monitor.get_stats.return_value = { + "pipeline": {"avg_ms": 16.5}, + "frame_count": 100, + } + + # Mock render_border to verify it's called + with ( + patch("engine.display.get_monitor", return_value=mock_monitor), + patch("engine.display.render_border", wraps=render_border) as mock_render, + ): + display.show(buffer, border=True) + + # Verify render_border was called with correct arguments + assert mock_render.called + args, kwargs = mock_render.call_args + # Arguments: buffer, width, height, fps, frame_time (positional) + assert args[0] == buffer + assert args[1] == 80 + assert args[2] == 24 + assert args[3] == pytest.approx(60.6, rel=0.1) # fps = 1000/16.5 + assert args[4] == pytest.approx(16.5, rel=0.1) + assert kwargs == {} # no keyword arguments + class TestNullDisplay: """Tests for NullDisplay class.""" @@ -77,3 +256,178 @@ class TestNullDisplay: """cleanup does nothing.""" display = NullDisplay() display.cleanup() + + def test_show_stores_last_buffer(self): + """show stores last buffer for testing inspection.""" + display = NullDisplay() + display.init(80, 24) + + buffer = ["line1", "line2", "line3"] + display.show(buffer) + + assert display._last_buffer == buffer + + def test_show_tracks_last_buffer_across_calls(self): + """show updates last_buffer on each call.""" + display = NullDisplay() + display.init(80, 24) + + display.show(["first"]) + assert display._last_buffer == ["first"] + + display.show(["second"]) + assert display._last_buffer == ["second"] + + +class TestRenderBorder: + """Tests for render_border function.""" + + def test_render_border_adds_corners(self): + """render_border adds corner characters.""" + from engine.display import render_border + + buffer = ["hello", "world"] + result = render_border(buffer, width=10, height=5) + + assert result[0][0] in "┌┎┍" # top-left + assert result[0][-1] in "┐┒┓" # top-right + assert result[-1][0] in "└┚┖" # bottom-left + assert result[-1][-1] in "┘┛┙" # bottom-right + + def test_render_border_dimensions(self): + """render_border output matches requested dimensions.""" + from engine.display import render_border + + buffer = ["line1", "line2", "line3"] + result = render_border(buffer, width=20, height=10) + + # Output should be exactly height lines + assert len(result) == 10 + # Each line should be exactly width characters + for line in result: + assert len(line) == 20 + + def test_render_border_with_fps(self): + """render_border includes FPS in top border when provided.""" + from engine.display import render_border + + buffer = ["test"] + result = render_border(buffer, width=20, height=5, fps=60.0) + + top_line = result[0] + assert "FPS:60" in top_line or "FPS: 60" in top_line + + def test_render_border_with_frame_time(self): + """render_border includes frame time in bottom border when provided.""" + from engine.display import render_border + + buffer = ["test"] + result = render_border(buffer, width=20, height=5, frame_time=16.5) + + bottom_line = result[-1] + assert "16.5ms" in bottom_line + + def test_render_border_crops_content_to_fit(self): + """render_border crops content to fit within borders.""" + from engine.display import render_border + + # Buffer larger than viewport + buffer = ["x" * 100] * 50 + result = render_border(buffer, width=20, height=10) + + # Result shrinks to fit viewport + assert len(result) == 10 + for line in result[1:-1]: # Skip border lines + assert len(line) == 20 + + def test_render_border_preserves_content(self): + """render_border preserves content within borders.""" + from engine.display import render_border + + buffer = ["hello world", "test line"] + result = render_border(buffer, width=20, height=5) + + # Content should appear in the middle rows + content_lines = result[1:-1] + assert any("hello world" in line for line in content_lines) + + def test_render_border_with_small_buffer(self): + """render_border handles buffers smaller than viewport.""" + from engine.display import render_border + + buffer = ["hi"] + result = render_border(buffer, width=20, height=10) + + # Should still produce full viewport with padding + assert len(result) == 10 + # All lines should be full width + for line in result: + assert len(line) == 20 + + +class TestMultiDisplay: + """Tests for MultiDisplay class.""" + + def test_init_stores_dimensions(self): + """init stores dimensions and forwards to displays.""" + mock_display1 = MagicMock() + mock_display2 = MagicMock() + multi = MultiDisplay([mock_display1, mock_display2]) + + multi.init(120, 40) + + assert multi.width == 120 + assert multi.height == 40 + mock_display1.init.assert_called_once_with(120, 40, reuse=False) + mock_display2.init.assert_called_once_with(120, 40, reuse=False) + + def test_show_forwards_to_all_displays(self): + """show forwards buffer to all displays.""" + mock_display1 = MagicMock() + mock_display2 = MagicMock() + multi = MultiDisplay([mock_display1, mock_display2]) + + buffer = ["line1", "line2"] + multi.show(buffer, border=False) + + mock_display1.show.assert_called_once_with(buffer, border=False) + mock_display2.show.assert_called_once_with(buffer, border=False) + + def test_clear_forwards_to_all_displays(self): + """clear forwards to all displays.""" + mock_display1 = MagicMock() + mock_display2 = MagicMock() + multi = MultiDisplay([mock_display1, mock_display2]) + + multi.clear() + + mock_display1.clear.assert_called_once() + mock_display2.clear.assert_called_once() + + def test_cleanup_forwards_to_all_displays(self): + """cleanup forwards to all displays.""" + mock_display1 = MagicMock() + mock_display2 = MagicMock() + multi = MultiDisplay([mock_display1, mock_display2]) + + multi.cleanup() + + mock_display1.cleanup.assert_called_once() + mock_display2.cleanup.assert_called_once() + + def test_empty_displays_list(self): + """handles empty displays list gracefully.""" + multi = MultiDisplay([]) + multi.init(80, 24) + multi.show(["test"]) + multi.clear() + multi.cleanup() + + def test_init_with_reuse(self): + """init passes reuse flag to child displays.""" + mock_display = MagicMock() + multi = MultiDisplay([mock_display]) + + multi.init(80, 24, reuse=True) + + mock_display.init.assert_called_once_with(80, 24, reuse=True) diff --git a/tests/test_effects_controller.py b/tests/test_effects_controller.py index fd17fe8..0a26a05 100644 --- a/tests/test_effects_controller.py +++ b/tests/test_effects_controller.py @@ -5,8 +5,10 @@ Tests for engine.effects.controller module. from unittest.mock import MagicMock, patch from engine.effects.controller import ( + _format_stats, handle_effects_command, set_effect_chain_ref, + show_effects_menu, ) @@ -92,6 +94,29 @@ class TestHandleEffectsCommand: assert "Reordered pipeline" in result mock_chain_instance.reorder.assert_called_once_with(["noise", "fade"]) + def test_reorder_failure(self): + """reorder returns error on failure.""" + with patch("engine.effects.controller.get_registry") as mock_registry: + mock_registry.return_value.list_all.return_value = {} + + with patch("engine.effects.controller._get_effect_chain") as mock_chain: + mock_chain_instance = MagicMock() + mock_chain_instance.reorder.return_value = False + mock_chain.return_value = mock_chain_instance + + result = handle_effects_command("/effects reorder bad") + + assert "Failed to reorder" in result + + def test_unknown_effect(self): + """unknown effect returns error.""" + with patch("engine.effects.controller.get_registry") as mock_registry: + mock_registry.return_value.list_all.return_value = {} + + result = handle_effects_command("/effects unknown on") + + assert "Unknown effect" in result + def test_unknown_command(self): """unknown command returns error.""" result = handle_effects_command("/unknown") @@ -102,6 +127,105 @@ class TestHandleEffectsCommand: result = handle_effects_command("not a command") assert "Unknown command" in result + def test_invalid_intensity_value(self): + """invalid intensity value returns error.""" + with patch("engine.effects.controller.get_registry") as mock_registry: + mock_plugin = MagicMock() + mock_registry.return_value.get.return_value = mock_plugin + mock_registry.return_value.list_all.return_value = {"noise": mock_plugin} + + result = handle_effects_command("/effects noise intensity bad") + + assert "Invalid intensity" in result + + def test_missing_action(self): + """missing action returns usage.""" + with patch("engine.effects.controller.get_registry") as mock_registry: + mock_plugin = MagicMock() + mock_registry.return_value.get.return_value = mock_plugin + mock_registry.return_value.list_all.return_value = {"noise": mock_plugin} + + result = handle_effects_command("/effects noise") + + assert "Usage" in result + + def test_stats_command(self): + """stats command returns formatted stats.""" + with patch("engine.effects.controller.get_monitor") as mock_monitor: + mock_monitor.return_value.get_stats.return_value = { + "frame_count": 100, + "pipeline": {"avg_ms": 1.5, "min_ms": 1.0, "max_ms": 2.0}, + "effects": {}, + } + + result = handle_effects_command("/effects stats") + + assert "Performance Stats" in result + + def test_list_only_effects(self): + """list command works with just /effects.""" + with patch("engine.effects.controller.get_registry") as mock_registry: + mock_plugin = MagicMock() + mock_plugin.config.enabled = False + mock_plugin.config.intensity = 0.5 + mock_registry.return_value.list_all.return_value = {"noise": mock_plugin} + + with patch("engine.effects.controller._get_effect_chain") as mock_chain: + mock_chain.return_value = None + + result = handle_effects_command("/effects") + + assert "noise: OFF" in result + + +class TestShowEffectsMenu: + """Tests for show_effects_menu function.""" + + def test_returns_formatted_menu(self): + """returns formatted effects menu.""" + with patch("engine.effects.controller.get_registry") as mock_registry: + mock_plugin = MagicMock() + mock_plugin.config.enabled = True + mock_plugin.config.intensity = 0.75 + mock_registry.return_value.list_all.return_value = {"noise": mock_plugin} + + with patch("engine.effects.controller._get_effect_chain") as mock_chain: + mock_chain_instance = MagicMock() + mock_chain_instance.get_order.return_value = ["noise"] + mock_chain.return_value = mock_chain_instance + + result = show_effects_menu() + + assert "EFFECTS MENU" in result + assert "noise" in result + + +class TestFormatStats: + """Tests for _format_stats function.""" + + def test_returns_error_when_no_monitor(self): + """returns error when monitor unavailable.""" + with patch("engine.effects.controller.get_monitor") as mock_monitor: + mock_monitor.return_value.get_stats.return_value = {"error": "No data"} + + result = _format_stats() + + assert "No data" in result + + def test_formats_pipeline_stats(self): + """formats pipeline stats correctly.""" + with patch("engine.effects.controller.get_monitor") as mock_monitor: + mock_monitor.return_value.get_stats.return_value = { + "frame_count": 50, + "pipeline": {"avg_ms": 2.5, "min_ms": 2.0, "max_ms": 3.0}, + "effects": {"noise": {"avg_ms": 0.5, "min_ms": 0.4, "max_ms": 0.6}}, + } + + result = _format_stats() + + assert "Pipeline" in result + assert "noise" in result + class TestSetEffectChainRef: """Tests for set_effect_chain_ref function.""" diff --git a/tests/test_emitters.py b/tests/test_emitters.py deleted file mode 100644 index b28cddb..0000000 --- a/tests/test_emitters.py +++ /dev/null @@ -1,69 +0,0 @@ -""" -Tests for engine.emitters module. -""" - -from engine.emitters import EventEmitter, Startable, Stoppable - - -class TestEventEmitterProtocol: - """Tests for EventEmitter protocol.""" - - def test_protocol_exists(self): - """EventEmitter protocol is defined.""" - assert EventEmitter is not None - - def test_protocol_has_subscribe_method(self): - """EventEmitter has subscribe method in protocol.""" - assert hasattr(EventEmitter, "subscribe") - - def test_protocol_has_unsubscribe_method(self): - """EventEmitter has unsubscribe method in protocol.""" - assert hasattr(EventEmitter, "unsubscribe") - - -class TestStartableProtocol: - """Tests for Startable protocol.""" - - def test_protocol_exists(self): - """Startable protocol is defined.""" - assert Startable is not None - - def test_protocol_has_start_method(self): - """Startable has start method in protocol.""" - assert hasattr(Startable, "start") - - -class TestStoppableProtocol: - """Tests for Stoppable protocol.""" - - def test_protocol_exists(self): - """Stoppable protocol is defined.""" - assert Stoppable is not None - - def test_protocol_has_stop_method(self): - """Stoppable has stop method in protocol.""" - assert hasattr(Stoppable, "stop") - - -class TestProtocolCompliance: - """Tests that existing classes comply with protocols.""" - - def test_ntfy_poller_complies_with_protocol(self): - """NtfyPoller implements EventEmitter protocol.""" - from engine.ntfy import NtfyPoller - - poller = NtfyPoller("http://example.com/topic") - assert hasattr(poller, "subscribe") - assert hasattr(poller, "unsubscribe") - assert callable(poller.subscribe) - assert callable(poller.unsubscribe) - - def test_mic_monitor_complies_with_protocol(self): - """MicMonitor implements EventEmitter and Startable protocols.""" - from engine.mic import MicMonitor - - monitor = MicMonitor() - assert hasattr(monitor, "subscribe") - assert hasattr(monitor, "unsubscribe") - assert hasattr(monitor, "start") - assert hasattr(monitor, "stop") diff --git a/tests/test_fetch.py b/tests/test_fetch.py new file mode 100644 index 0000000..05c1328 --- /dev/null +++ b/tests/test_fetch.py @@ -0,0 +1,234 @@ +""" +Tests for engine.fetch module. +""" + +import json +from unittest.mock import MagicMock, patch + +from engine.fetch import ( + _fetch_gutenberg, + fetch_all, + fetch_feed, + fetch_poetry, + load_cache, + save_cache, +) + + +class TestFetchFeed: + """Tests for fetch_feed function.""" + + @patch("engine.fetch.urllib.request.urlopen") + def test_fetch_success(self, mock_urlopen): + """Successful feed fetch returns parsed feed.""" + mock_response = MagicMock() + mock_response.read.return_value = b"test" + mock_urlopen.return_value = mock_response + + result = fetch_feed("http://example.com/feed") + + assert result is not None + + @patch("engine.fetch.urllib.request.urlopen") + def test_fetch_network_error(self, mock_urlopen): + """Network error returns tuple with None feed.""" + mock_urlopen.side_effect = Exception("Network error") + + url, feed = fetch_feed("http://example.com/feed") + + assert feed is None + + +class TestFetchAll: + """Tests for fetch_all function.""" + + @patch("engine.fetch.fetch_feed") + @patch("engine.fetch.strip_tags") + @patch("engine.fetch.skip") + @patch("engine.fetch.boot_ln") + def test_fetch_all_success(self, mock_boot, mock_skip, mock_strip, mock_fetch_feed): + """Successful fetch returns items.""" + mock_feed = MagicMock() + mock_feed.bozo = False + mock_feed.entries = [ + {"title": "Headline 1", "published_parsed": (2024, 1, 1, 12, 0, 0)}, + {"title": "Headline 2", "updated_parsed": (2024, 1, 2, 12, 0, 0)}, + ] + mock_fetch_feed.return_value = ("http://example.com", mock_feed) + mock_skip.return_value = False + mock_strip.side_effect = lambda x: x + + items, linked, failed = fetch_all() + + assert linked > 0 + assert failed == 0 + + @patch("engine.fetch.fetch_feed") + @patch("engine.fetch.boot_ln") + def test_fetch_all_feed_error(self, mock_boot, mock_fetch_feed): + """Feed error increments failed count.""" + mock_fetch_feed.return_value = ("http://example.com", None) + + items, linked, failed = fetch_all() + + assert failed > 0 + + @patch("engine.fetch.fetch_feed") + @patch("engine.fetch.strip_tags") + @patch("engine.fetch.skip") + @patch("engine.fetch.boot_ln") + def test_fetch_all_skips_filtered( + self, mock_boot, mock_skip, mock_strip, mock_fetch_feed + ): + """Filtered headlines are skipped.""" + mock_feed = MagicMock() + mock_feed.bozo = False + mock_feed.entries = [ + {"title": "Sports scores"}, + {"title": "Valid headline"}, + ] + mock_fetch_feed.return_value = ("http://example.com", mock_feed) + mock_skip.side_effect = lambda x: x == "Sports scores" + mock_strip.side_effect = lambda x: x + + items, linked, failed = fetch_all() + + assert any("Valid headline" in item[0] for item in items) + + +class TestFetchGutenberg: + """Tests for _fetch_gutenberg function.""" + + @patch("engine.fetch.urllib.request.urlopen") + def test_gutenberg_success(self, mock_urlopen): + """Successful gutenberg fetch returns items.""" + text = """Project Gutenberg + +*** START OF THE PROJECT GUTENBERG *** +This is a test poem with multiple lines +that should be parsed as a block. + +Another stanza with more content here. + +*** END OF THE PROJECT GUTENBERG *** +""" + mock_response = MagicMock() + mock_response.read.return_value = text.encode("utf-8") + mock_urlopen.return_value = mock_response + + result = _fetch_gutenberg("http://example.com/test", "Test") + + assert len(result) > 0 + + @patch("engine.fetch.urllib.request.urlopen") + def test_gutenberg_network_error(self, mock_urlopen): + """Network error returns empty list.""" + mock_urlopen.side_effect = Exception("Network error") + + result = _fetch_gutenberg("http://example.com/test", "Test") + + assert result == [] + + @patch("engine.fetch.urllib.request.urlopen") + def test_gutenberg_skips_short_blocks(self, mock_urlopen): + """Blocks shorter than 20 chars are skipped.""" + text = """*** START OF THE *** +Short +*** END OF THE *** +""" + mock_response = MagicMock() + mock_response.read.return_value = text.encode("utf-8") + mock_urlopen.return_value = mock_response + + result = _fetch_gutenberg("http://example.com/test", "Test") + + assert result == [] + + @patch("engine.fetch.urllib.request.urlopen") + def test_gutenberg_skips_all_caps_headers(self, mock_urlopen): + """All-caps lines are skipped as headers.""" + text = """*** START OF THE *** +THIS IS ALL CAPS HEADER +more content here +*** END OF THE *** +""" + mock_response = MagicMock() + mock_response.read.return_value = text.encode("utf-8") + mock_urlopen.return_value = mock_response + + result = _fetch_gutenberg("http://example.com/test", "Test") + + assert len(result) > 0 + + +class TestFetchPoetry: + """Tests for fetch_poetry function.""" + + @patch("engine.fetch._fetch_gutenberg") + @patch("engine.fetch.boot_ln") + def test_fetch_poetry_success(self, mock_boot, mock_fetch): + """Successful poetry fetch returns items.""" + mock_fetch.return_value = [ + ("Stanza 1 content here", "Test", ""), + ("Stanza 2 content here", "Test", ""), + ] + + items, linked, failed = fetch_poetry() + + assert linked > 0 + assert failed == 0 + + @patch("engine.fetch._fetch_gutenberg") + @patch("engine.fetch.boot_ln") + def test_fetch_poetry_failure(self, mock_boot, mock_fetch): + """Failed fetch increments failed count.""" + mock_fetch.return_value = [] + + items, linked, failed = fetch_poetry() + + assert failed > 0 + + +class TestCache: + """Tests for cache functions.""" + + @patch("engine.fetch._cache_path") + def test_load_cache_success(self, mock_path): + """Successful cache load returns items.""" + mock_path.return_value.__str__ = MagicMock(return_value="/tmp/cache") + mock_path.return_value.exists.return_value = True + mock_path.return_value.read_text.return_value = json.dumps( + {"items": [("title", "source", "time")]} + ) + + result = load_cache() + + assert result is not None + + @patch("engine.fetch._cache_path") + def test_load_cache_missing_file(self, mock_path): + """Missing cache file returns None.""" + mock_path.return_value.exists.return_value = False + + result = load_cache() + + assert result is None + + @patch("engine.fetch._cache_path") + def test_load_cache_invalid_json(self, mock_path): + """Invalid JSON returns None.""" + mock_path.return_value.exists.return_value = True + mock_path.return_value.read_text.side_effect = json.JSONDecodeError("", "", 0) + + result = load_cache() + + assert result is None + + @patch("engine.fetch._cache_path") + def test_save_cache_success(self, mock_path): + """Save cache writes to file.""" + mock_path.return_value.__truediv__ = MagicMock( + return_value=mock_path.return_value + ) + + save_cache([("title", "source", "time")]) diff --git a/tests/test_fetch_code.py b/tests/test_fetch_code.py deleted file mode 100644 index 5578b1d..0000000 --- a/tests/test_fetch_code.py +++ /dev/null @@ -1,35 +0,0 @@ -import re - -from engine.fetch_code import fetch_code - - -def test_return_shape(): - items, line_count, ignored = fetch_code() - assert isinstance(items, list) - assert line_count == len(items) - assert ignored == 0 - - -def test_items_are_tuples(): - items, _, _ = fetch_code() - assert items, "expected at least one code line" - for item in items: - assert isinstance(item, tuple) and len(item) == 3 - text, src, ts = item - assert isinstance(text, str) - assert isinstance(src, str) - assert isinstance(ts, str) - - -def test_blank_and_comment_lines_excluded(): - items, _, _ = fetch_code() - for text, _, _ in items: - assert text.strip(), "blank line should have been filtered" - assert not text.strip().startswith("#"), "comment line should have been filtered" - - -def test_module_path_format(): - items, _, _ = fetch_code() - pattern = re.compile(r"^engine\.\w+$") - for _, _, ts in items: - assert pattern.match(ts), f"unexpected module path: {ts!r}" diff --git a/tests/test_figment.py b/tests/test_figment.py deleted file mode 100644 index 6774b1a..0000000 --- a/tests/test_figment.py +++ /dev/null @@ -1,151 +0,0 @@ -"""Tests for the FigmentEffect plugin.""" - -import os -from enum import Enum - -import pytest - -pytest.importorskip("cairosvg", reason="cairosvg requires system Cairo library") - -from effects_plugins.figment import FigmentEffect, FigmentPhase, FigmentState -from engine.effects.types import EffectConfig, EffectContext - -FIXTURE_SVG = os.path.join(os.path.dirname(__file__), "fixtures", "test.svg") -FIGMENTS_DIR = os.path.join(os.path.dirname(__file__), "fixtures") - - -class TestFigmentPhase: - def test_is_enum(self): - assert issubclass(FigmentPhase, Enum) - - def test_has_all_phases(self): - assert hasattr(FigmentPhase, "REVEAL") - assert hasattr(FigmentPhase, "HOLD") - assert hasattr(FigmentPhase, "DISSOLVE") - - -class TestFigmentState: - def test_creation(self): - state = FigmentState( - phase=FigmentPhase.REVEAL, - progress=0.5, - rows=["█▀▄", " █ "], - gradient=[46, 40, 34, 28, 22, 22, 34, 40, 46, 82, 118, 231], - center_row=5, - center_col=10, - ) - assert state.phase == FigmentPhase.REVEAL - assert state.progress == 0.5 - assert len(state.rows) == 2 - - -class TestFigmentEffectInit: - def test_name(self): - effect = FigmentEffect(figment_dir=FIGMENTS_DIR) - assert effect.name == "figment" - - def test_default_config(self): - effect = FigmentEffect(figment_dir=FIGMENTS_DIR) - assert effect.config.enabled is False - assert effect.config.intensity == 1.0 - assert effect.config.params["interval_secs"] == 60 - assert effect.config.params["display_secs"] == 4.5 - - def test_process_is_noop(self): - effect = FigmentEffect(figment_dir=FIGMENTS_DIR) - buf = ["line1", "line2"] - ctx = EffectContext( - terminal_width=80, - terminal_height=24, - scroll_cam=0, - ticker_height=20, - ) - result = effect.process(buf, ctx) - assert result == buf - assert result is buf - - def test_configure(self): - effect = FigmentEffect(figment_dir=FIGMENTS_DIR) - new_cfg = EffectConfig(enabled=True, intensity=0.5) - effect.configure(new_cfg) - assert effect.config.enabled is True - assert effect.config.intensity == 0.5 - - -class TestFigmentStateMachine: - def test_idle_initially(self): - effect = FigmentEffect(figment_dir=FIGMENTS_DIR) - effect.config.enabled = True - state = effect.get_figment_state(0, 80, 24) - # Timer hasn't fired yet, should be None (idle) - assert state is None - - def test_trigger_starts_reveal(self): - effect = FigmentEffect(figment_dir=FIGMENTS_DIR) - effect.config.enabled = True - effect.trigger(80, 24) - state = effect.get_figment_state(1, 80, 24) - assert state is not None - assert state.phase == FigmentPhase.REVEAL - - def test_full_cycle(self): - effect = FigmentEffect(figment_dir=FIGMENTS_DIR) - effect.config.enabled = True - effect.config.params["display_secs"] = 0.15 # 3 phases x 0.05s - - effect.trigger(40, 20) - - # Advance through reveal (30 frames at 0.05s = 1.5s, but we shrunk it) - # With display_secs=0.15, each phase is 0.05s = 1 frame - state = effect.get_figment_state(1, 40, 20) - assert state is not None - assert state.phase == FigmentPhase.REVEAL - - # Advance enough frames to get through all phases - for frame in range(2, 100): - state = effect.get_figment_state(frame, 40, 20) - if state is None: - break - - # Should have completed the full cycle back to idle - assert state is None - - def test_timer_fires_at_interval(self): - effect = FigmentEffect(figment_dir=FIGMENTS_DIR) - effect.config.enabled = True - effect.config.params["interval_secs"] = 0.1 # 2 frames at 20fps - - # Frame 0: idle - state = effect.get_figment_state(0, 40, 20) - assert state is None - - # Advance past interval (0.1s = 2 frames) - state = effect.get_figment_state(1, 40, 20) - state = effect.get_figment_state(2, 40, 20) - state = effect.get_figment_state(3, 40, 20) - # Timer should have fired by now - assert state is not None - - -class TestFigmentEdgeCases: - def test_empty_figment_dir(self, tmp_path): - effect = FigmentEffect(figment_dir=str(tmp_path)) - effect.config.enabled = True - effect.trigger(40, 20) - state = effect.get_figment_state(1, 40, 20) - # No SVGs available — should stay idle - assert state is None - - def test_missing_figment_dir(self): - effect = FigmentEffect(figment_dir="/nonexistent/path") - effect.config.enabled = True - effect.trigger(40, 20) - state = effect.get_figment_state(1, 40, 20) - assert state is None - - def test_disabled_ignores_trigger(self): - effect = FigmentEffect(figment_dir=FIGMENTS_DIR) - effect.config.enabled = False - effect.trigger(80, 24) - state = effect.get_figment_state(1, 80, 24) - assert state is None diff --git a/tests/test_figment_overlay.py b/tests/test_figment_overlay.py deleted file mode 100644 index 99152be..0000000 --- a/tests/test_figment_overlay.py +++ /dev/null @@ -1,64 +0,0 @@ -"""Tests for render_figment_overlay in engine.layers.""" - -import pytest - -pytest.importorskip("cairosvg", reason="cairosvg requires system Cairo library") - -from effects_plugins.figment import FigmentPhase, FigmentState -from engine.layers import render_figment_overlay - - -def _make_state(phase=FigmentPhase.HOLD, progress=0.5): - return FigmentState( - phase=phase, - progress=progress, - rows=["█▀▄ █", " ▄█▀ ", "█ █"], - gradient=[46, 40, 34, 28, 22, 22, 34, 40, 46, 82, 118, 231], - center_row=10, - center_col=37, - ) - - -class TestRenderFigmentOverlay: - def test_returns_list_of_strings(self): - state = _make_state() - result = render_figment_overlay(state, 80, 24) - assert isinstance(result, list) - assert all(isinstance(s, str) for s in result) - - def test_contains_ansi_positioning(self): - state = _make_state() - result = render_figment_overlay(state, 80, 24) - # Should contain cursor positioning escape codes - assert any("\033[" in s for s in result) - - def test_reveal_phase_partial(self): - state = _make_state(phase=FigmentPhase.REVEAL, progress=0.0) - result = render_figment_overlay(state, 80, 24) - # At progress 0.0, very few cells should be visible - # Result should still be a valid list - assert isinstance(result, list) - - def test_hold_phase_full(self): - state = _make_state(phase=FigmentPhase.HOLD, progress=0.5) - result = render_figment_overlay(state, 80, 24) - # During hold, content should be present - assert len(result) > 0 - - def test_dissolve_phase(self): - state = _make_state(phase=FigmentPhase.DISSOLVE, progress=0.9) - result = render_figment_overlay(state, 80, 24) - # At high dissolve progress, most cells are gone - assert isinstance(result, list) - - def test_empty_rows(self): - state = FigmentState( - phase=FigmentPhase.HOLD, - progress=0.5, - rows=[], - gradient=[46] * 12, - center_row=0, - center_col=0, - ) - result = render_figment_overlay(state, 80, 24) - assert result == [] diff --git a/tests/test_figment_render.py b/tests/test_figment_render.py deleted file mode 100644 index fffb62f..0000000 --- a/tests/test_figment_render.py +++ /dev/null @@ -1,52 +0,0 @@ -"""Tests for engine.figment_render module.""" - -import os - -import pytest - -pytest.importorskip("cairosvg", reason="cairosvg requires system Cairo library") - -from engine.figment_render import rasterize_svg - -FIXTURE_SVG = os.path.join(os.path.dirname(__file__), "fixtures", "test.svg") - - -class TestRasterizeSvg: - def test_returns_list_of_strings(self): - rows = rasterize_svg(FIXTURE_SVG, 40, 20) - assert isinstance(rows, list) - assert all(isinstance(r, str) for r in rows) - - def test_output_height_matches_terminal_height(self): - rows = rasterize_svg(FIXTURE_SVG, 40, 20) - assert len(rows) == 20 - - def test_output_contains_block_characters(self): - rows = rasterize_svg(FIXTURE_SVG, 40, 20) - all_chars = "".join(rows) - block_chars = {"█", "▀", "▄"} - assert any(ch in all_chars for ch in block_chars) - - def test_different_sizes_produce_different_output(self): - rows_small = rasterize_svg(FIXTURE_SVG, 20, 10) - rows_large = rasterize_svg(FIXTURE_SVG, 80, 40) - assert len(rows_small) == 10 - assert len(rows_large) == 40 - - def test_nonexistent_file_raises(self): - import pytest - - with pytest.raises((FileNotFoundError, OSError)): - rasterize_svg("/nonexistent/file.svg", 40, 20) - - -class TestRasterizeCache: - def test_cache_returns_same_result(self): - rows1 = rasterize_svg(FIXTURE_SVG, 40, 20) - rows2 = rasterize_svg(FIXTURE_SVG, 40, 20) - assert rows1 == rows2 - - def test_cache_invalidated_by_size_change(self): - rows1 = rasterize_svg(FIXTURE_SVG, 40, 20) - rows2 = rasterize_svg(FIXTURE_SVG, 60, 30) - assert len(rows1) != len(rows2) diff --git a/tests/test_figment_trigger.py b/tests/test_figment_trigger.py deleted file mode 100644 index 989a0bb..0000000 --- a/tests/test_figment_trigger.py +++ /dev/null @@ -1,40 +0,0 @@ -"""Tests for engine.figment_trigger module.""" - -from enum import Enum - -from engine.figment_trigger import FigmentAction, FigmentCommand - - -class TestFigmentAction: - def test_is_enum(self): - assert issubclass(FigmentAction, Enum) - - def test_has_trigger(self): - assert FigmentAction.TRIGGER.value == "trigger" - - def test_has_set_intensity(self): - assert FigmentAction.SET_INTENSITY.value == "set_intensity" - - def test_has_set_interval(self): - assert FigmentAction.SET_INTERVAL.value == "set_interval" - - def test_has_set_color(self): - assert FigmentAction.SET_COLOR.value == "set_color" - - def test_has_stop(self): - assert FigmentAction.STOP.value == "stop" - - -class TestFigmentCommand: - def test_trigger_command(self): - cmd = FigmentCommand(action=FigmentAction.TRIGGER) - assert cmd.action == FigmentAction.TRIGGER - assert cmd.value is None - - def test_set_intensity_command(self): - cmd = FigmentCommand(action=FigmentAction.SET_INTENSITY, value=0.8) - assert cmd.value == 0.8 - - def test_set_color_command(self): - cmd = FigmentCommand(action=FigmentAction.SET_COLOR, value="orange") - assert cmd.value == "orange" diff --git a/tests/test_framebuffer_acceptance.py b/tests/test_framebuffer_acceptance.py new file mode 100644 index 0000000..8f42b6a --- /dev/null +++ b/tests/test_framebuffer_acceptance.py @@ -0,0 +1,195 @@ +"""Integration test: FrameBufferStage in the pipeline.""" + +import queue + +from engine.data_sources.sources import ListDataSource, SourceItem +from engine.effects.types import EffectConfig +from engine.pipeline import Pipeline, PipelineConfig +from engine.pipeline.adapters import ( + DataSourceStage, + DisplayStage, + SourceItemsToBufferStage, +) +from engine.pipeline.core import PipelineContext +from engine.pipeline.stages.framebuffer import FrameBufferStage + + +class QueueDisplay: + """Stub display that captures every frame into a queue.""" + + def __init__(self): + self.frames: queue.Queue[list[str]] = queue.Queue() + self.width = 80 + self.height = 24 + self._init_called = False + + def init(self, width: int, height: int, reuse: bool = False) -> None: + self.width = width + self.height = height + self._init_called = True + + def show(self, buffer: list[str], border: bool = False) -> None: + self.frames.put(list(buffer)) + + def clear(self) -> None: + pass + + def cleanup(self) -> None: + pass + + def get_dimensions(self) -> tuple[int, int]: + return (self.width, self.height) + + +def _build_pipeline( + items: list[SourceItem], + history_depth: int = 5, + width: int = 80, + height: int = 24, +) -> tuple[Pipeline, QueueDisplay, PipelineContext]: + """Build pipeline: source -> render -> framebuffer -> display.""" + display = QueueDisplay() + + ctx = PipelineContext() + ctx.set("items", items) + + pipeline = Pipeline( + config=PipelineConfig(enable_metrics=True), + context=ctx, + ) + + # Source + source = ListDataSource(items, name="test-source") + pipeline.add_stage("source", DataSourceStage(source, name="test-source")) + + # Render + pipeline.add_stage("render", SourceItemsToBufferStage(name="items-to-buffer")) + + # Framebuffer + framebuffer = FrameBufferStage(name="default", history_depth=history_depth) + pipeline.add_stage("framebuffer", framebuffer) + + # Display + pipeline.add_stage("display", DisplayStage(display, name="queue")) + + pipeline.build() + pipeline.initialize() + + return pipeline, display, ctx + + +class TestFrameBufferAcceptance: + """Test FrameBufferStage in a full pipeline.""" + + def test_framebuffer_populates_history(self): + """After several frames, framebuffer should have history stored.""" + items = [ + SourceItem(content="Frame\nBuffer\nTest", source="test", timestamp="0") + ] + pipeline, display, ctx = _build_pipeline(items, history_depth=5) + + # Run 3 frames + for i in range(3): + result = pipeline.execute([]) + assert result.success, f"Pipeline failed at frame {i}: {result.error}" + + # Check framebuffer history in context + history = ctx.get("framebuffer.default.history") + assert history is not None, "Framebuffer history not found in context" + assert len(history) == 3, f"Expected 3 history frames, got {len(history)}" + + def test_framebuffer_respects_depth(self): + """Framebuffer should not exceed configured history depth.""" + items = [SourceItem(content="Depth\nTest", source="test", timestamp="0")] + pipeline, display, ctx = _build_pipeline(items, history_depth=3) + + # Run 5 frames + for i in range(5): + result = pipeline.execute([]) + assert result.success + + history = ctx.get("framebuffer.default.history") + assert history is not None + assert len(history) == 3, f"Expected depth 3, got {len(history)}" + + def test_framebuffer_current_intensity(self): + """Framebuffer should compute current intensity map.""" + items = [SourceItem(content="Intensity\nMap", source="test", timestamp="0")] + pipeline, display, ctx = _build_pipeline(items, history_depth=5) + + # Run at least one frame + result = pipeline.execute([]) + assert result.success + + intensity = ctx.get("framebuffer.default.current_intensity") + assert intensity is not None, "No intensity map in context" + # Intensity should be a list of one value per line? Actually it's a 2D array or list? + # Let's just check it's non-empty + assert len(intensity) > 0, "Intensity map is empty" + + def test_framebuffer_get_frame(self): + """Should be able to retrieve specific frames from history.""" + items = [SourceItem(content="Retrieve\nFrame", source="test", timestamp="0")] + pipeline, display, ctx = _build_pipeline(items, history_depth=5) + + # Run 2 frames + for i in range(2): + result = pipeline.execute([]) + assert result.success + + # Retrieve frame 0 (most recent) + recent = pipeline.get_stage("framebuffer").get_frame(0, ctx) + assert recent is not None, "Cannot retrieve recent frame" + assert len(recent) > 0, "Recent frame is empty" + + # Retrieve frame 1 (previous) + previous = pipeline.get_stage("framebuffer").get_frame(1, ctx) + assert previous is not None, "Cannot retrieve previous frame" + + def test_framebuffer_with_motionblur_effect(self): + """MotionBlurEffect should work when depending on framebuffer.""" + from engine.effects.plugins.motionblur import MotionBlurEffect + from engine.pipeline.adapters import EffectPluginStage + + items = [SourceItem(content="Motion\nBlur", source="test", timestamp="0")] + display = QueueDisplay() + ctx = PipelineContext() + ctx.set("items", items) + + pipeline = Pipeline( + config=PipelineConfig(enable_metrics=True), + context=ctx, + ) + + source = ListDataSource(items, name="test") + pipeline.add_stage("source", DataSourceStage(source, name="test")) + pipeline.add_stage("render", SourceItemsToBufferStage(name="render")) + + framebuffer = FrameBufferStage(name="default", history_depth=3) + pipeline.add_stage("framebuffer", framebuffer) + + motionblur = MotionBlurEffect() + motionblur.configure(EffectConfig(enabled=True, intensity=0.5)) + pipeline.add_stage( + "motionblur", + EffectPluginStage( + motionblur, + name="motionblur", + dependencies={"framebuffer.history.default"}, + ), + ) + + pipeline.add_stage("display", DisplayStage(display, name="queue")) + + pipeline.build() + pipeline.initialize() + + # Run a few frames + for i in range(5): + result = pipeline.execute([]) + assert result.success, f"Motion blur pipeline failed at frame {i}" + + # Check that history exists + history = ctx.get("framebuffer.default.history") + assert history is not None + assert len(history) > 0 diff --git a/tests/test_framebuffer_stage.py b/tests/test_framebuffer_stage.py new file mode 100644 index 0000000..be3c81d --- /dev/null +++ b/tests/test_framebuffer_stage.py @@ -0,0 +1,237 @@ +""" +Tests for FrameBufferStage. +""" + +import pytest + +from engine.pipeline.core import DataType, PipelineContext +from engine.pipeline.params import PipelineParams +from engine.pipeline.stages.framebuffer import FrameBufferConfig, FrameBufferStage + + +def make_ctx(width: int = 80, height: int = 24) -> PipelineContext: + """Create a PipelineContext for testing.""" + ctx = PipelineContext() + params = PipelineParams() + params.viewport_width = width + params.viewport_height = height + ctx.params = params + return ctx + + +class TestFrameBufferStage: + """Tests for FrameBufferStage.""" + + def test_init(self): + """FrameBufferStage initializes with default config.""" + stage = FrameBufferStage() + assert stage.name == "framebuffer" + assert stage.category == "effect" + assert stage.config.history_depth == 2 + + def test_capabilities(self): + """Stage provides framebuffer.history.{name} capability.""" + stage = FrameBufferStage() + assert "framebuffer.history.default" in stage.capabilities + + def test_dependencies(self): + """Stage depends on render.output.""" + stage = FrameBufferStage() + assert "render.output" in stage.dependencies + + def test_inlet_outlet_types(self): + """Stage accepts and produces TEXT_BUFFER.""" + stage = FrameBufferStage() + assert DataType.TEXT_BUFFER in stage.inlet_types + assert DataType.TEXT_BUFFER in stage.outlet_types + + def test_init_context(self): + """init initializes context state with prefixed keys.""" + stage = FrameBufferStage() + ctx = make_ctx() + + result = stage.init(ctx) + + assert result is True + assert ctx.get("framebuffer.default.history") == [] + assert ctx.get("framebuffer.default.intensity_history") == [] + + def test_process_stores_buffer_in_history(self): + """process stores buffer in history.""" + stage = FrameBufferStage() + ctx = make_ctx() + stage.init(ctx) + + buffer = ["line1", "line2", "line3"] + result = stage.process(buffer, ctx) + + assert result == buffer # Pass-through + history = ctx.get("framebuffer.default.history") + assert len(history) == 1 + assert history[0] == buffer + + def test_process_computes_intensity(self): + """process computes intensity map.""" + stage = FrameBufferStage() + ctx = make_ctx() + stage.init(ctx) + + buffer = ["hello world", "test line", ""] + stage.process(buffer, ctx) + + intensity = ctx.get("framebuffer.default.current_intensity") + assert intensity is not None + assert len(intensity) == 3 # Three rows + # Non-empty lines should have intensity > 0 + assert intensity[0] > 0 + assert intensity[1] > 0 + # Empty line should have intensity 0 + assert intensity[2] == 0.0 + + def test_process_keeps_multiple_frames(self): + """process keeps configured depth of frames.""" + config = FrameBufferConfig(history_depth=3, name="test") + stage = FrameBufferStage(config) + ctx = make_ctx() + stage.init(ctx) + + # Process several frames + for i in range(5): + buffer = [f"frame {i}"] + stage.process(buffer, ctx) + + history = ctx.get("framebuffer.test.history") + assert len(history) == 3 # Only last 3 kept + # Should be in reverse chronological order (most recent first) + assert history[0] == ["frame 4"] + assert history[1] == ["frame 3"] + assert history[2] == ["frame 2"] + + def test_process_keeps_intensity_sync(self): + """process keeps intensity history in sync with frame history.""" + config = FrameBufferConfig(history_depth=3, name="sync") + stage = FrameBufferStage(config) + ctx = make_ctx() + stage.init(ctx) + + buffers = [ + ["a"], + ["bb"], + ["ccc"], + ] + for buf in buffers: + stage.process(buf, ctx) + + prefix = "framebuffer.sync" + frame_hist = ctx.get(f"{prefix}.history") + intensity_hist = ctx.get(f"{prefix}.intensity_history") + assert len(frame_hist) == len(intensity_hist) == 3 + + # Each frame's intensity should match + for i, frame in enumerate(frame_hist): + computed_intensity = stage._compute_buffer_intensity(frame, len(frame)) + assert intensity_hist[i] == pytest.approx(computed_intensity) + + def test_get_frame(self): + """get_frame retrieves frames from history by index.""" + config = FrameBufferConfig(history_depth=3) + stage = FrameBufferStage(config) + ctx = make_ctx() + stage.init(ctx) + + buffers = [["f1"], ["f2"], ["f3"]] + for buf in buffers: + stage.process(buf, ctx) + + assert stage.get_frame(0, ctx) == ["f3"] # Most recent + assert stage.get_frame(1, ctx) == ["f2"] + assert stage.get_frame(2, ctx) == ["f1"] + assert stage.get_frame(3, ctx) is None # Out of range + + def test_get_intensity(self): + """get_intensity retrieves intensity maps by index.""" + stage = FrameBufferStage() + ctx = make_ctx() + stage.init(ctx) + + buffers = [["line"], ["longer line"]] + for buf in buffers: + stage.process(buf, ctx) + + intensity0 = stage.get_intensity(0, ctx) + intensity1 = stage.get_intensity(1, ctx) + assert intensity0 is not None + assert intensity1 is not None + # Longer line should have higher intensity (more non-space chars) + assert sum(intensity1) > sum(intensity0) + + def test_compute_buffer_intensity_simple(self): + """_compute_buffer_intensity computes simple density.""" + stage = FrameBufferStage() + + buf = ["abc", " ", "de"] + intensities = stage._compute_buffer_intensity(buf, max_rows=3) + + assert len(intensities) == 3 + # "abc" -> 3/3 = 1.0 + assert pytest.approx(intensities[0]) == 1.0 + # " " -> 0/2 = 0.0 + assert pytest.approx(intensities[1]) == 0.0 + # "de" -> 2/2 = 1.0 + assert pytest.approx(intensities[2]) == 1.0 + + def test_compute_buffer_intensity_with_ansi(self): + """_compute_buffer_intensity strips ANSI codes.""" + stage = FrameBufferStage() + + # Line with ANSI color codes + buf = ["\033[31mred\033[0m", "normal"] + intensities = stage._compute_buffer_intensity(buf, max_rows=2) + + assert len(intensities) == 2 + # Should treat "red" as 3 non-space chars + assert pytest.approx(intensities[0]) == 1.0 # "red" = 3/3 + assert pytest.approx(intensities[1]) == 1.0 # "normal" = 6/6 + + def test_compute_buffer_intensity_padding(self): + """_compute_buffer_intensity pads to max_rows.""" + stage = FrameBufferStage() + + buf = ["short"] + intensities = stage._compute_buffer_intensity(buf, max_rows=5) + + assert len(intensities) == 5 + assert intensities[0] > 0 + assert all(i == 0.0 for i in intensities[1:]) + + def test_thread_safety(self): + """process is thread-safe.""" + from threading import Thread + + stage = FrameBufferStage(name="threadtest") + ctx = make_ctx() + stage.init(ctx) + + results = [] + + def worker(idx): + buffer = [f"thread {idx}"] + stage.process(buffer, ctx) + results.append(len(ctx.get("framebuffer.threadtest.history", []))) + + threads = [Thread(target=worker, args=(i,)) for i in range(10)] + for t in threads: + t.start() + for t in threads: + t.join() + + # All threads should see consistent state + assert len(ctx.get("framebuffer.threadtest.history")) <= 2 # Depth limit + # All worker threads should have completed without errors + assert len(results) == 10 + + def test_cleanup(self): + """cleanup does nothing but can be called.""" + stage = FrameBufferStage() + # Should not raise + stage.cleanup() diff --git a/tests/test_glitch_effect.py b/tests/test_glitch_effect.py new file mode 100644 index 0000000..7b7c9a5 --- /dev/null +++ b/tests/test_glitch_effect.py @@ -0,0 +1,240 @@ +""" +Tests for Glitch effect - regression tests for stability issues. +""" + +import re + +import pytest + +from engine.display import NullDisplay +from engine.effects.types import EffectConfig, EffectContext + + +def strip_ansi(s: str) -> str: + """Remove ANSI escape sequences from string.""" + return re.sub(r"\x1b\[[0-9;]*[a-zA-Z]", "", s) + + +class TestGlitchEffectStability: + """Regression tests for Glitch effect stability.""" + + @pytest.fixture + def effect_context(self): + """Create a consistent effect context for testing.""" + return EffectContext( + terminal_width=80, + terminal_height=24, + scroll_cam=0, + ticker_height=20, + frame_number=0, + ) + + @pytest.fixture + def stable_buffer(self): + """Create a stable buffer for testing.""" + return ["line" + str(i).zfill(2) + " " * 60 for i in range(24)] + + def test_glitch_preserves_line_count(self, effect_context, stable_buffer): + """Glitch should not change the number of lines in buffer.""" + from engine.effects.plugins.glitch import GlitchEffect + + effect = GlitchEffect() + result = effect.process(stable_buffer, effect_context) + + assert len(result) == len(stable_buffer), ( + f"Line count changed from {len(stable_buffer)} to {len(result)}" + ) + + def test_glitch_preserves_line_lengths(self, effect_context, stable_buffer): + """Glitch should not change individual line lengths - prevents viewport jumping. + + Note: Effects may add ANSI color codes, so we check VISIBLE length (stripped). + """ + from engine.effects.plugins.glitch import GlitchEffect + + effect = GlitchEffect() + + # Run multiple times to catch randomness + for _ in range(10): + result = effect.process(stable_buffer, effect_context) + for i, (orig, new) in enumerate(zip(stable_buffer, result, strict=False)): + visible_new = strip_ansi(new) + assert len(visible_new) == len(orig), ( + f"Line {i} visible length changed from {len(orig)} to {len(visible_new)}" + ) + + def test_glitch_no_cursor_positioning(self, effect_context, stable_buffer): + """Glitch should not use cursor positioning escape sequences. + + Regression test: Previously glitch used \\033[{row};1H which caused + conflicts with HUD and border rendering. + """ + from engine.effects.plugins.glitch import GlitchEffect + + effect = GlitchEffect() + result = effect.process(stable_buffer, effect_context) + + # Check no cursor positioning in output + cursor_pos_pattern = re.compile(r"\033\[[0-9]+;[0-9]+H") + for i, line in enumerate(result): + match = cursor_pos_pattern.search(line) + assert match is None, ( + f"Line {i} contains cursor positioning: {repr(line[:50])}" + ) + + def test_glitch_output_deterministic_given_seed( + self, effect_context, stable_buffer + ): + """Glitch output should be deterministic given the same random seed.""" + from engine.effects.plugins.glitch import GlitchEffect + + effect = GlitchEffect() + effect.config = EffectConfig(enabled=True, intensity=1.0) + + # With fixed random state, should get same result + import random + + random.seed(42) + result1 = effect.process(stable_buffer, effect_context) + + random.seed(42) + result2 = effect.process(stable_buffer, effect_context) + + assert result1 == result2, ( + "Glitch should be deterministic with fixed random seed" + ) + + +class TestEffectViewportStability: + """Tests to catch effects that cause viewport instability.""" + + def test_null_display_stable_without_effects(self): + """NullDisplay should produce identical output without effects.""" + display = NullDisplay() + display.init(80, 24) + + buffer = ["test line " + "x" * 60 for _ in range(24)] + + display.show(buffer) + output1 = display._last_buffer + + display.show(buffer) + output2 = display._last_buffer + + assert output1 == output2, ( + "NullDisplay output should be identical for identical inputs" + ) + + def test_effect_chain_preserves_dimensions(self): + """Effect chain should preserve buffer dimensions.""" + from engine.effects.plugins.fade import FadeEffect + from engine.effects.plugins.glitch import GlitchEffect + from engine.effects.plugins.noise import NoiseEffect + + ctx = EffectContext( + terminal_width=80, + terminal_height=24, + scroll_cam=0, + ticker_height=20, + ) + + buffer = ["x" * 80 for _ in range(24)] + original_len = len(buffer) + original_widths = [len(line) for line in buffer] + + effects = [NoiseEffect(), FadeEffect(), GlitchEffect()] + + for effect in effects: + buffer = effect.process(buffer, ctx) + + # Check dimensions preserved (check VISIBLE length, not raw) + # Effects may add ANSI codes which increase raw length but not visible width + assert len(buffer) == original_len, ( + f"{effect.name} changed line count from {original_len} to {len(buffer)}" + ) + for i, (orig_w, new_line) in enumerate( + zip(original_widths, buffer, strict=False) + ): + visible_len = len(strip_ansi(new_line)) + assert visible_len == orig_w, ( + f"{effect.name} changed line {i} visible width from {orig_w} to {visible_len}" + ) + + +class TestEffectTestMatrix: + """Effect test matrix - test each effect for stability.""" + + @pytest.fixture + def effect_names(self): + """List of all effect names to test.""" + return ["noise", "fade", "glitch", "firehose", "border"] + + @pytest.fixture + def stable_input_buffer(self): + """A predictable buffer for testing.""" + return [f"row{i:02d}" + " " * 70 for i in range(24)] + + @pytest.mark.parametrize("effect_name", ["noise", "fade", "glitch"]) + def test_effect_preserves_buffer_dimensions(self, effect_name, stable_input_buffer): + """Each effect should preserve input buffer dimensions.""" + try: + if effect_name == "border": + # Border is handled differently + pytest.skip("Border handled by display") + else: + effect_module = __import__( + f"engine.effects.plugins.{effect_name}", + fromlist=[f"{effect_name.title()}Effect"], + ) + effect_class = getattr(effect_module, f"{effect_name.title()}Effect") + effect = effect_class() + except ImportError: + pytest.skip(f"Effect {effect_name} not available") + + ctx = EffectContext( + terminal_width=80, + terminal_height=24, + scroll_cam=0, + ticker_height=20, + ) + + result = effect.process(stable_input_buffer, ctx) + + # Check dimensions preserved (check VISIBLE length) + assert len(result) == len(stable_input_buffer), ( + f"{effect_name} changed line count" + ) + for i, (orig, new) in enumerate(zip(stable_input_buffer, result, strict=False)): + visible_new = strip_ansi(new) + assert len(visible_new) == len(orig), ( + f"{effect_name} changed line {i} visible length from {len(orig)} to {len(visible_new)}" + ) + + @pytest.mark.parametrize("effect_name", ["noise", "fade", "glitch"]) + def test_effect_no_cursor_positioning(self, effect_name, stable_input_buffer): + """Effects should not use cursor positioning (causes display conflicts).""" + try: + effect_module = __import__( + f"engine.effects.plugins.{effect_name}", + fromlist=[f"{effect_name.title()}Effect"], + ) + effect_class = getattr(effect_module, f"{effect_name.title()}Effect") + effect = effect_class() + except ImportError: + pytest.skip(f"Effect {effect_name} not available") + + ctx = EffectContext( + terminal_width=80, + terminal_height=24, + scroll_cam=0, + ticker_height=20, + ) + + result = effect.process(stable_input_buffer, ctx) + + cursor_pos_pattern = re.compile(r"\033\[[0-9]+;[0-9]+H") + for i, line in enumerate(result): + match = cursor_pos_pattern.search(line) + assert match is None, ( + f"{effect_name} uses cursor positioning on line {i}: {repr(line[:50])}" + ) diff --git a/tests/test_hud.py b/tests/test_hud.py new file mode 100644 index 0000000..22cfcf9 --- /dev/null +++ b/tests/test_hud.py @@ -0,0 +1,106 @@ +from engine.effects.performance import PerformanceMonitor, set_monitor +from engine.effects.types import EffectContext + + +def test_hud_effect_adds_hud_lines(): + """Test that HUD effect adds HUD lines to the buffer.""" + from engine.effects.plugins.hud import HudEffect + + set_monitor(PerformanceMonitor()) + + hud = HudEffect() + hud.config.params["display_effect"] = "noise" + hud.config.params["display_intensity"] = 0.5 + + ctx = EffectContext( + terminal_width=80, + terminal_height=24, + scroll_cam=0, + ticker_height=24, + mic_excess=0.0, + grad_offset=0.0, + frame_number=0, + has_message=False, + items=[], + ) + + buf = [ + "A" * 80, + "B" * 80, + "C" * 80, + ] + + result = hud.process(buf, ctx) + + assert len(result) >= 3, f"Expected at least 3 lines, got {len(result)}" + + first_line = result[0] + assert "MAINLINE DEMO" in first_line, ( + f"HUD not found in first line: {first_line[:50]}" + ) + + second_line = result[1] + assert "EFFECT:" in second_line, f"Effect line not found: {second_line[:50]}" + + print("First line:", result[0]) + print("Second line:", result[1]) + if len(result) > 2: + print("Third line:", result[2]) + + +def test_hud_effect_shows_current_effect(): + """Test that HUD displays the correct effect name.""" + from engine.effects.plugins.hud import HudEffect + + set_monitor(PerformanceMonitor()) + + hud = HudEffect() + hud.config.params["display_effect"] = "fade" + hud.config.params["display_intensity"] = 0.75 + + ctx = EffectContext( + terminal_width=80, + terminal_height=24, + scroll_cam=0, + ticker_height=24, + mic_excess=0.0, + grad_offset=0.0, + frame_number=0, + has_message=False, + items=[], + ) + + buf = ["X" * 80] + result = hud.process(buf, ctx) + + second_line = result[1] + assert "fade" in second_line, f"Effect name 'fade' not found in: {second_line}" + + +def test_hud_effect_shows_intensity(): + """Test that HUD displays intensity percentage.""" + from engine.effects.plugins.hud import HudEffect + + set_monitor(PerformanceMonitor()) + + hud = HudEffect() + hud.config.params["display_effect"] = "glitch" + hud.config.params["display_intensity"] = 0.8 + + ctx = EffectContext( + terminal_width=80, + terminal_height=24, + scroll_cam=0, + ticker_height=24, + mic_excess=0.0, + grad_offset=0.0, + frame_number=0, + has_message=False, + items=[], + ) + + buf = ["Y" * 80] + result = hud.process(buf, ctx) + + second_line = result[1] + assert "80%" in second_line, f"Intensity 80% not found in: {second_line}" diff --git a/tests/test_layers.py b/tests/test_layers.py deleted file mode 100644 index afe9c07..0000000 --- a/tests/test_layers.py +++ /dev/null @@ -1,96 +0,0 @@ -""" -Tests for engine.layers module. -""" - -import time - -from engine import layers - - -class TestRenderMessageOverlay: - """Tests for render_message_overlay function.""" - - def test_no_message_returns_empty(self): - """Returns empty list when msg is None.""" - result, cache = layers.render_message_overlay(None, 80, 24, (None, None)) - assert result == [] - assert cache[0] is None - - def test_message_returns_overlay_lines(self): - """Returns non-empty list when message is present.""" - msg = ("Test Title", "Test Body", time.monotonic()) - result, cache = layers.render_message_overlay(msg, 80, 24, (None, None)) - assert len(result) > 0 - assert cache[0] is not None - - def test_cache_key_changes_with_text(self): - """Cache key changes when message text changes.""" - msg1 = ("Title1", "Body1", time.monotonic()) - msg2 = ("Title2", "Body2", time.monotonic()) - - _, cache1 = layers.render_message_overlay(msg1, 80, 24, (None, None)) - _, cache2 = layers.render_message_overlay(msg2, 80, 24, cache1) - - assert cache1[0] != cache2[0] - - def test_cache_reuse_avoids_recomputation(self): - """Cache is returned when same message is passed (interface test).""" - msg = ("Same Title", "Same Body", time.monotonic()) - - result1, cache1 = layers.render_message_overlay(msg, 80, 24, (None, None)) - result2, cache2 = layers.render_message_overlay(msg, 80, 24, cache1) - - assert len(result1) > 0 - assert len(result2) > 0 - assert cache1[0] == cache2[0] - - -class TestRenderFirehose: - """Tests for render_firehose function.""" - - def test_no_firehose_returns_empty(self): - """Returns empty list when firehose height is 0.""" - items = [("Headline", "Source", "12:00")] - result = layers.render_firehose(items, 80, 0, 24) - assert result == [] - - def test_firehose_returns_lines(self): - """Returns lines when firehose height > 0.""" - items = [("Headline", "Source", "12:00")] - result = layers.render_firehose(items, 80, 4, 24) - assert len(result) == 4 - - def test_firehose_includes_ansi_escapes(self): - """Returns lines containing ANSI escape sequences.""" - items = [("Headline", "Source", "12:00")] - result = layers.render_firehose(items, 80, 1, 24) - assert "\033[" in result[0] - - -class TestApplyGlitch: - """Tests for apply_glitch function.""" - - def test_empty_buffer_unchanged(self): - """Empty buffer is returned unchanged.""" - result = layers.apply_glitch([], 0, 0.0, 80) - assert result == [] - - def test_buffer_length_preserved(self): - """Buffer length is preserved after glitch application.""" - buf = [f"\033[{i + 1};1Htest\033[K" for i in range(10)] - result = layers.apply_glitch(buf, 0, 0.5, 80) - assert len(result) == len(buf) - - -class TestRenderTickerZone: - """Tests for render_ticker_zone function - focusing on interface.""" - - def test_returns_list(self): - """Returns a list of strings.""" - result, cache = layers.render_ticker_zone([], 0, 10, 80, {}, 0.0) - assert isinstance(result, list) - - def test_returns_dict_for_cache(self): - """Returns a dict for the noise cache.""" - result, cache = layers.render_ticker_zone([], 0, 10, 80, {}, 0.0) - assert isinstance(cache, dict) diff --git a/tests/test_mic.py b/tests/test_mic.py deleted file mode 100644 index a347e5f..0000000 --- a/tests/test_mic.py +++ /dev/null @@ -1,149 +0,0 @@ -""" -Tests for engine.mic module. -""" - -from datetime import datetime -from unittest.mock import patch - -from engine.events import MicLevelEvent - - -class TestMicMonitorImport: - """Tests for module import behavior.""" - - def test_mic_monitor_imports_without_error(self): - """MicMonitor can be imported even without sounddevice.""" - from engine.mic import MicMonitor - - assert MicMonitor is not None - - -class TestMicMonitorInit: - """Tests for MicMonitor initialization.""" - - def test_init_sets_threshold(self): - """Threshold is set correctly.""" - from engine.mic import MicMonitor - - monitor = MicMonitor(threshold_db=60) - assert monitor.threshold_db == 60 - - def test_init_defaults(self): - """Default values are set correctly.""" - from engine.mic import MicMonitor - - monitor = MicMonitor() - assert monitor.threshold_db == 50 - - def test_init_db_starts_at_negative(self): - """_db starts at negative value.""" - from engine.mic import MicMonitor - - monitor = MicMonitor() - assert monitor.db == -99.0 - - -class TestMicMonitorProperties: - """Tests for MicMonitor properties.""" - - def test_excess_returns_positive_when_above_threshold(self): - """excess returns positive value when above threshold.""" - from engine.mic import MicMonitor - - monitor = MicMonitor(threshold_db=50) - with patch.object(monitor, "_db", 60.0): - assert monitor.excess == 10.0 - - def test_excess_returns_zero_when_below_threshold(self): - """excess returns zero when below threshold.""" - from engine.mic import MicMonitor - - monitor = MicMonitor(threshold_db=50) - with patch.object(monitor, "_db", 40.0): - assert monitor.excess == 0.0 - - -class TestMicMonitorAvailable: - """Tests for MicMonitor.available property.""" - - def test_available_is_bool(self): - """available returns a boolean.""" - from engine.mic import MicMonitor - - monitor = MicMonitor() - assert isinstance(monitor.available, bool) - - -class TestMicMonitorStop: - """Tests for MicMonitor.stop method.""" - - def test_stop_does_nothing_when_no_stream(self): - """stop() does nothing if no stream exists.""" - from engine.mic import MicMonitor - - monitor = MicMonitor() - monitor.stop() - assert monitor._stream is None - - -class TestMicMonitorEventEmission: - """Tests for MicMonitor event emission.""" - - def test_subscribe_adds_callback(self): - """subscribe() adds a callback.""" - from engine.mic import MicMonitor - - monitor = MicMonitor() - def callback(e): - return None - - monitor.subscribe(callback) - - assert callback in monitor._subscribers - - def test_unsubscribe_removes_callback(self): - """unsubscribe() removes a callback.""" - from engine.mic import MicMonitor - - monitor = MicMonitor() - def callback(e): - return None - monitor.subscribe(callback) - - monitor.unsubscribe(callback) - - assert callback not in monitor._subscribers - - def test_emit_calls_subscribers(self): - """_emit() calls all subscribers.""" - from engine.mic import MicMonitor - - monitor = MicMonitor() - received = [] - - def callback(event): - received.append(event) - - monitor.subscribe(callback) - event = MicLevelEvent( - db_level=60.0, excess_above_threshold=10.0, timestamp=datetime.now() - ) - monitor._emit(event) - - assert len(received) == 1 - assert received[0].db_level == 60.0 - - def test_emit_handles_subscriber_exception(self): - """_emit() handles exceptions in subscribers gracefully.""" - from engine.mic import MicMonitor - - monitor = MicMonitor() - - def bad_callback(event): - raise RuntimeError("test") - - monitor.subscribe(bad_callback) - event = MicLevelEvent( - db_level=60.0, excess_above_threshold=10.0, timestamp=datetime.now() - ) - monitor._emit(event) diff --git a/tests/test_ntfy_integration.py b/tests/test_ntfy_integration.py new file mode 100644 index 0000000..a6aaa5d --- /dev/null +++ b/tests/test_ntfy_integration.py @@ -0,0 +1,131 @@ +""" +Integration tests for ntfy topics. +""" + +import json +import time +import urllib.request + +import pytest + + +@pytest.mark.integration +@pytest.mark.ntfy +class TestNtfyTopics: + def test_cc_cmd_topic_exists_and_writable(self): + """Verify C&C CMD topic exists and accepts messages.""" + from engine.config import NTFY_CC_CMD_TOPIC + + topic_url = NTFY_CC_CMD_TOPIC.replace("/json", "") + test_message = f"test_{int(time.time())}" + + req = urllib.request.Request( + topic_url, + data=test_message.encode("utf-8"), + headers={ + "User-Agent": "mainline-test/0.1", + "Content-Type": "text/plain", + }, + method="POST", + ) + + try: + with urllib.request.urlopen(req, timeout=10) as resp: + assert resp.status == 200 + except Exception as e: + raise AssertionError(f"Failed to write to C&C CMD topic: {e}") from e + + def test_cc_resp_topic_exists_and_writable(self): + """Verify C&C RESP topic exists and accepts messages.""" + from engine.config import NTFY_CC_RESP_TOPIC + + topic_url = NTFY_CC_RESP_TOPIC.replace("/json", "") + test_message = f"test_{int(time.time())}" + + req = urllib.request.Request( + topic_url, + data=test_message.encode("utf-8"), + headers={ + "User-Agent": "mainline-test/0.1", + "Content-Type": "text/plain", + }, + method="POST", + ) + + try: + with urllib.request.urlopen(req, timeout=10) as resp: + assert resp.status == 200 + except Exception as e: + raise AssertionError(f"Failed to write to C&C RESP topic: {e}") from e + + def test_message_topic_exists_and_writable(self): + """Verify message topic exists and accepts messages.""" + from engine.config import NTFY_TOPIC + + topic_url = NTFY_TOPIC.replace("/json", "") + test_message = f"test_{int(time.time())}" + + req = urllib.request.Request( + topic_url, + data=test_message.encode("utf-8"), + headers={ + "User-Agent": "mainline-test/0.1", + "Content-Type": "text/plain", + }, + method="POST", + ) + + try: + with urllib.request.urlopen(req, timeout=10) as resp: + assert resp.status == 200 + except Exception as e: + raise AssertionError(f"Failed to write to message topic: {e}") from e + + def test_cc_cmd_topic_readable(self): + """Verify we can read messages from C&C CMD topic.""" + from engine.config import NTFY_CC_CMD_TOPIC + + test_message = f"integration_test_{int(time.time())}" + topic_url = NTFY_CC_CMD_TOPIC.replace("/json", "") + + req = urllib.request.Request( + topic_url, + data=test_message.encode("utf-8"), + headers={ + "User-Agent": "mainline-test/0.1", + "Content-Type": "text/plain", + }, + method="POST", + ) + + try: + urllib.request.urlopen(req, timeout=10) + except Exception as e: + raise AssertionError(f"Failed to write to C&C CMD topic: {e}") from e + + time.sleep(1) + + poll_url = f"{NTFY_CC_CMD_TOPIC}?poll=1&limit=1" + req = urllib.request.Request( + poll_url, + headers={"User-Agent": "mainline-test/0.1"}, + ) + + try: + with urllib.request.urlopen(req, timeout=10) as resp: + body = resp.read().decode("utf-8") + if body.strip(): + data = json.loads(body.split("\n")[0]) + assert isinstance(data, dict) + except Exception as e: + raise AssertionError(f"Failed to read from C&C CMD topic: {e}") from e + + def test_topics_are_different(self): + """Verify C&C CMD/RESP and message topics are different.""" + from engine.config import NTFY_CC_CMD_TOPIC, NTFY_CC_RESP_TOPIC, NTFY_TOPIC + + assert NTFY_CC_CMD_TOPIC != NTFY_TOPIC + assert NTFY_CC_RESP_TOPIC != NTFY_TOPIC + assert NTFY_CC_CMD_TOPIC != NTFY_CC_RESP_TOPIC + assert "_cc_cmd" in NTFY_CC_CMD_TOPIC + assert "_cc_resp" in NTFY_CC_RESP_TOPIC diff --git a/tests/test_performance_regression.py b/tests/test_performance_regression.py new file mode 100644 index 0000000..662c8bb --- /dev/null +++ b/tests/test_performance_regression.py @@ -0,0 +1,185 @@ +"""Performance regression tests for pipeline stages with realistic data volumes. + +These tests verify that the pipeline maintains performance with large datasets +by ensuring ViewportFilterStage prevents FontStage from rendering excessive items. + +Uses pytest-benchmark for statistical benchmarking with automatic regression detection. +""" + +import pytest + +from engine.data_sources.sources import SourceItem +from engine.pipeline.adapters import FontStage, ViewportFilterStage +from engine.pipeline.core import PipelineContext +from engine.pipeline.params import PipelineParams + + +class TestViewportFilterPerformance: + """Test ViewportFilterStage performance with realistic data volumes.""" + + @pytest.mark.benchmark + def test_filter_2000_items_to_viewport(self, benchmark): + """Benchmark: Filter 2000 items to viewport size. + + Performance threshold: Must complete in < 1ms per iteration + This tests the filtering overhead is negligible. + """ + # Create 2000 test items (more than real headline sources) + test_items = [ + SourceItem(f"Headline {i}", f"source-{i % 10}", str(i)) for i in range(2000) + ] + + stage = ViewportFilterStage() + ctx = PipelineContext() + ctx.params = PipelineParams(viewport_height=24) + + result = benchmark(stage.process, test_items, ctx) + + # Verify result is correct - viewport filter takes first N items + assert len(result) <= 24 # viewport height + assert len(result) > 0 + + @pytest.mark.benchmark + def test_font_stage_with_filtered_items(self, benchmark): + """Benchmark: FontStage rendering filtered (5) items. + + Performance threshold: Must complete in < 50ms per iteration + This tests that filtering saves significant time by reducing FontStage work. + """ + # Create filtered items (what ViewportFilterStage outputs) + filtered_items = [ + SourceItem(f"Headline {i}", "source", str(i)) + for i in range(5) # Filtered count + ] + + font_stage = FontStage() + ctx = PipelineContext() + ctx.params = PipelineParams() + + result = benchmark(font_stage.process, filtered_items, ctx) + + # Should render successfully + assert result is not None + assert isinstance(result, list) + assert len(result) > 0 + + def test_filter_reduces_work_by_288x(self): + """Verify ViewportFilterStage achieves expected performance improvement. + + With 1438 items and 24-line viewport: + - Without filter: FontStage renders all 1438 items + - With filter: FontStage renders ~4 items (height-based) + - Expected improvement: 1438 / 4 ≈ 360x + """ + test_items = [ + SourceItem(f"Headline {i}", "source", str(i)) for i in range(1438) + ] + + stage = ViewportFilterStage() + ctx = PipelineContext() + ctx.params = PipelineParams(viewport_height=24) + + filtered = stage.process(test_items, ctx) + improvement_factor = len(test_items) / len(filtered) + + # Verify we get significant improvement (height-based filtering) + assert 300 < improvement_factor < 500 + # Verify filtered count is ~4 (24 viewport / 6 rows per item) + assert len(filtered) == 4 + + +class TestPipelinePerformanceWithRealData: + """Integration tests for full pipeline performance with large datasets.""" + + def test_pipeline_handles_large_item_count(self): + """Test that pipeline doesn't hang with 2000+ items due to filtering.""" + # Create large dataset + large_items = [ + SourceItem(f"Headline {i}", f"source-{i % 5}", str(i)) for i in range(2000) + ] + + filter_stage = ViewportFilterStage() + font_stage = FontStage() + + ctx = PipelineContext() + ctx.params = PipelineParams(viewport_height=24) + + # Filter should reduce items quickly + filtered = filter_stage.process(large_items, ctx) + assert len(filtered) < len(large_items) + + # FontStage should process filtered items quickly + rendered = font_stage.process(filtered, ctx) + assert rendered is not None + + def test_multiple_viewports_filter_correctly(self): + """Test that filter respects different viewport configurations.""" + large_items = [ + SourceItem(f"Headline {i}", "source", str(i)) for i in range(1000) + ] + + stage = ViewportFilterStage() + + # Test different viewport heights + test_cases = [ + (12, 12), # 12px height -> 12 items + (24, 24), # 24px height -> 24 items + (48, 48), # 48px height -> 48 items + ] + + for viewport_height, expected_max_items in test_cases: + ctx = PipelineContext() + ctx.params = PipelineParams(viewport_height=viewport_height) + + filtered = stage.process(large_items, ctx) + + # Verify filtering is proportional to viewport + assert len(filtered) <= expected_max_items + 1 + assert len(filtered) > 0 + + +class TestPerformanceRegressions: + """Tests that catch common performance regressions.""" + + def test_filter_doesnt_render_all_items(self): + """Regression test: Ensure filter doesn't accidentally render all items. + + This would indicate that ViewportFilterStage is broken or bypassed. + """ + large_items = [ + SourceItem(f"Headline {i}", "source", str(i)) for i in range(1438) + ] + + stage = ViewportFilterStage() + ctx = PipelineContext() + ctx.params = PipelineParams() + + filtered = stage.process(large_items, ctx) + + # Should NOT have all items (regression detection) + assert len(filtered) != len(large_items) + # With height-based filtering, ~4 items fit in 24-row viewport (6 rows/item) + assert len(filtered) == 4 + + def test_font_stage_doesnt_hang_with_filter(self): + """Regression test: FontStage shouldn't hang when receiving filtered data. + + Previously, FontStage would render all items, causing 10+ second hangs. + Now it should receive only ~5 items and complete quickly. + """ + # Simulate what happens after ViewportFilterStage + filtered_items = [ + SourceItem(f"Headline {i}", "source", str(i)) + for i in range(5) # What filter outputs + ] + + font_stage = FontStage() + ctx = PipelineContext() + ctx.params = PipelineParams() + + # Should complete instantly (not hang) + result = font_stage.process(filtered_items, ctx) + + # Verify it actually worked + assert result is not None + assert isinstance(result, list) diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py new file mode 100644 index 0000000..c8b86c1 --- /dev/null +++ b/tests/test_pipeline.py @@ -0,0 +1,1844 @@ +""" +Tests for the new unified pipeline architecture. +""" + +from unittest.mock import MagicMock + +import pytest + +from engine.pipeline import ( + Pipeline, + PipelineConfig, + PipelineContext, + Stage, + StageRegistry, + create_default_pipeline, + discover_stages, +) +from engine.pipeline.core import DataType, StageError + + +class TestStageRegistry: + """Tests for StageRegistry.""" + + def setup_method(self): + """Reset registry before each test.""" + StageRegistry._discovered = False + StageRegistry._categories.clear() + StageRegistry._instances.clear() + + def test_discover_stages_registers_sources(self): + """discover_stages registers source stages.""" + discover_stages() + + sources = StageRegistry.list("source") + assert "HeadlinesDataSource" in sources + assert "PoetryDataSource" in sources + assert "PipelineIntrospectionSource" in sources + + def test_discover_stages_registers_displays(self): + """discover_stages registers display stages.""" + discover_stages() + + displays = StageRegistry.list("display") + assert "terminal" in displays + assert "pygame" in displays + assert "websocket" in displays + assert "null" in displays + + def test_create_source_stage(self): + """StageRegistry.create creates source stages.""" + discover_stages() + + source = StageRegistry.create("source", "HeadlinesDataSource") + assert source is not None + assert source.name == "headlines" + + def test_create_display_stage(self): + """StageRegistry.create creates display stages.""" + discover_stages() + + display = StageRegistry.create("display", "terminal") + assert display is not None + assert hasattr(display, "_display") + + def test_create_display_stage_pygame(self): + """StageRegistry.create creates pygame display stage.""" + discover_stages() + + display = StageRegistry.create("display", "pygame") + assert display is not None + + +class TestPipeline: + """Tests for Pipeline class.""" + + def setup_method(self): + """Reset registry before each test.""" + StageRegistry._discovered = False + StageRegistry._categories.clear() + StageRegistry._instances.clear() + discover_stages() + + def test_create_pipeline(self): + """Pipeline can be created with config.""" + config = PipelineConfig(source="headlines", display="terminal") + pipeline = Pipeline(config=config) + + assert pipeline.config is not None + assert pipeline.config.source == "headlines" + assert pipeline.config.display == "terminal" + + def test_add_stage(self): + """Pipeline.add_stage adds a stage.""" + pipeline = Pipeline() + mock_stage = MagicMock(spec=Stage) + mock_stage.name = "test_stage" + mock_stage.category = "test" + + pipeline.add_stage("test", mock_stage) + + assert "test" in pipeline.stages + + def test_build_resolves_dependencies(self): + """Pipeline.build resolves execution order.""" + from engine.pipeline.core import DataType + + pipeline = Pipeline() + mock_source = MagicMock(spec=Stage) + mock_source.name = "source" + mock_source.category = "source" + mock_source.stage_type = "source" + mock_source.render_order = 0 + mock_source.is_overlay = False + mock_source.inlet_types = {DataType.NONE} + mock_source.outlet_types = {DataType.SOURCE_ITEMS} + mock_source.dependencies = set() + mock_source.capabilities = {"source"} + + mock_display = MagicMock(spec=Stage) + mock_display.name = "display" + mock_display.category = "display" + mock_display.stage_type = "display" + mock_display.render_order = 0 + mock_display.is_overlay = False + mock_display.inlet_types = {DataType.ANY} # Accept any type + mock_display.outlet_types = {DataType.NONE} + mock_display.dependencies = {"source"} + mock_display.capabilities = {"display"} + + pipeline.add_stage("source", mock_source) + pipeline.add_stage("display", mock_display) + pipeline.build(auto_inject=False) + + assert pipeline._initialized is True + assert "source" in pipeline.execution_order + assert "display" in pipeline.execution_order + + def test_execute_runs_stages(self): + """Pipeline.execute runs all stages in order.""" + from engine.pipeline.core import DataType + + pipeline = Pipeline() + + call_order = [] + + mock_source = MagicMock(spec=Stage) + mock_source.name = "source" + mock_source.category = "source" + mock_source.stage_type = "source" + mock_source.render_order = 0 + mock_source.is_overlay = False + mock_source.inlet_types = {DataType.NONE} + mock_source.outlet_types = {DataType.SOURCE_ITEMS} + mock_source.dependencies = set() + mock_source.capabilities = {"source"} + mock_source.process = lambda data, ctx: call_order.append("source") or "data" + + mock_effect = MagicMock(spec=Stage) + mock_effect.name = "effect" + mock_effect.category = "effect" + mock_effect.stage_type = "effect" + mock_effect.render_order = 0 + mock_effect.is_overlay = False + mock_effect.inlet_types = {DataType.SOURCE_ITEMS} + mock_effect.outlet_types = {DataType.TEXT_BUFFER} + mock_effect.dependencies = {"source"} + mock_effect.capabilities = {"effect"} + mock_effect.process = lambda data, ctx: call_order.append("effect") or data + + mock_display = MagicMock(spec=Stage) + mock_display.name = "display" + mock_display.category = "display" + mock_display.stage_type = "display" + mock_display.render_order = 0 + mock_display.is_overlay = False + mock_display.inlet_types = {DataType.TEXT_BUFFER} + mock_display.outlet_types = {DataType.NONE} + mock_display.dependencies = {"effect"} + mock_display.capabilities = {"display"} + mock_display.process = lambda data, ctx: call_order.append("display") or data + + pipeline.add_stage("source", mock_source) + pipeline.add_stage("effect", mock_effect) + pipeline.add_stage("display", mock_display) + pipeline.build(auto_inject=False) + + result = pipeline.execute(None) + + assert result.success is True + assert call_order == ["source", "effect", "display"] + + def test_execute_handles_stage_failure(self): + """Pipeline.execute handles stage failures.""" + pipeline = Pipeline() + + mock_source = MagicMock(spec=Stage) + mock_source.name = "source" + mock_source.category = "source" + mock_source.stage_type = "source" + mock_source.render_order = 0 + mock_source.is_overlay = False + mock_source.dependencies = set() + mock_source.capabilities = {"source"} + mock_source.process = lambda data, ctx: "data" + + mock_failing = MagicMock(spec=Stage) + mock_failing.name = "failing" + mock_failing.category = "effect" + mock_failing.stage_type = "effect" + mock_failing.render_order = 0 + mock_failing.is_overlay = False + mock_failing.dependencies = {"source"} + mock_failing.capabilities = {"effect"} + mock_failing.optional = False + mock_failing.process = lambda data, ctx: (_ for _ in ()).throw( + Exception("fail") + ) + + pipeline.add_stage("source", mock_source) + pipeline.add_stage("failing", mock_failing) + pipeline.build(auto_inject=False) + + result = pipeline.execute(None) + + assert result.success is False + assert result.error is not None + + def test_optional_stage_failure_continues(self): + """Pipeline.execute continues on optional stage failure.""" + pipeline = Pipeline() + + mock_source = MagicMock(spec=Stage) + mock_source.name = "source" + mock_source.category = "source" + mock_source.stage_type = "source" + mock_source.render_order = 0 + mock_source.is_overlay = False + mock_source.dependencies = set() + mock_source.capabilities = {"source"} + mock_source.process = lambda data, ctx: "data" + + mock_optional = MagicMock(spec=Stage) + mock_optional.name = "optional" + mock_optional.category = "effect" + mock_optional.stage_type = "effect" + mock_optional.render_order = 0 + mock_optional.is_overlay = False + mock_optional.dependencies = {"source"} + mock_optional.capabilities = {"effect"} + mock_optional.optional = True + mock_optional.process = lambda data, ctx: (_ for _ in ()).throw( + Exception("fail") + ) + + pipeline.add_stage("source", mock_source) + pipeline.add_stage("optional", mock_optional) + pipeline.build(auto_inject=False) + + result = pipeline.execute(None) + + assert result.success is True + + +class TestCapabilityBasedDependencies: + """Tests for capability-based dependency resolution.""" + + def test_capability_wildcard_resolution(self): + """Pipeline resolves dependencies using wildcard capabilities.""" + from engine.pipeline.controller import Pipeline + from engine.pipeline.core import Stage + + class SourceStage(Stage): + name = "headlines" + category = "source" + + @property + def capabilities(self): + return {"source.headlines"} + + @property + def dependencies(self): + return set() + + def process(self, data, ctx): + return data + + class RenderStage(Stage): + name = "render" + category = "render" + + @property + def capabilities(self): + return {"render.output"} + + @property + def dependencies(self): + return {"source.*"} + + def process(self, data, ctx): + return data + + pipeline = Pipeline() + pipeline.add_stage("headlines", SourceStage()) + pipeline.add_stage("render", RenderStage()) + pipeline.build(auto_inject=False) + + assert "headlines" in pipeline.execution_order + assert "render" in pipeline.execution_order + assert pipeline.execution_order.index( + "headlines" + ) < pipeline.execution_order.index("render") + + def test_missing_capability_raises_error(self): + """Pipeline raises error when capability is missing.""" + from engine.pipeline.controller import Pipeline + from engine.pipeline.core import Stage, StageError + + class RenderStage(Stage): + name = "render" + category = "render" + + @property + def capabilities(self): + return {"render.output"} + + @property + def dependencies(self): + return {"source.headlines"} + + def process(self, data, ctx): + return data + + pipeline = Pipeline() + pipeline.add_stage("render", RenderStage()) + + try: + pipeline.build(auto_inject=False) + raise AssertionError("Should have raised StageError") + except StageError as e: + assert "Missing capabilities" in e.message + assert "source.headlines" in e.message + + def test_multiple_stages_same_capability(self): + """Pipeline uses first registered stage for capability.""" + from engine.pipeline.controller import Pipeline + from engine.pipeline.core import Stage + + class SourceA(Stage): + name = "headlines" + category = "source" + + @property + def capabilities(self): + return {"source"} + + @property + def dependencies(self): + return set() + + def process(self, data, ctx): + return "A" + + class SourceB(Stage): + name = "poetry" + category = "source" + + @property + def capabilities(self): + return {"source"} + + @property + def dependencies(self): + return set() + + def process(self, data, ctx): + return "B" + + class DisplayStage(Stage): + name = "display" + category = "display" + + @property + def capabilities(self): + return {"display"} + + @property + def dependencies(self): + return {"source"} + + def process(self, data, ctx): + return data + + pipeline = Pipeline() + pipeline.add_stage("headlines", SourceA()) + pipeline.add_stage("poetry", SourceB()) + pipeline.add_stage("display", DisplayStage()) + pipeline.build(auto_inject=False) + + assert pipeline.execution_order[0] == "headlines" + + +class TestPipelineContext: + """Tests for PipelineContext.""" + + def test_init_empty(self): + """PipelineContext initializes with empty services and state.""" + ctx = PipelineContext() + + assert ctx.services == {} + assert ctx.state == {} + + def test_init_with_services(self): + """PipelineContext accepts initial services.""" + ctx = PipelineContext(services={"display": MagicMock()}) + + assert "display" in ctx.services + + def test_init_with_state(self): + """PipelineContext accepts initial state.""" + ctx = PipelineContext(initial_state={"count": 42}) + + assert ctx.get_state("count") == 42 + + def test_get_set_services(self): + """PipelineContext can get/set services.""" + ctx = PipelineContext() + mock_service = MagicMock() + + ctx.set("test_service", mock_service) + + assert ctx.get("test_service") == mock_service + + def test_get_set_state(self): + """PipelineContext can get/set state.""" + ctx = PipelineContext() + + ctx.set_state("counter", 100) + + assert ctx.get_state("counter") == 100 + + def test_lazy_resolver(self): + """PipelineContext resolves lazy services.""" + ctx = PipelineContext() + + config = ctx.get("config") + assert config is not None + + def test_has_capability(self): + """PipelineContext.has_capability checks for services.""" + ctx = PipelineContext(services={"display.output": MagicMock()}) + + assert ctx.has_capability("display.output") is True + assert ctx.has_capability("missing") is False + + +class TestCreateDefaultPipeline: + """Tests for create_default_pipeline function.""" + + def setup_method(self): + """Reset registry before each test.""" + StageRegistry._discovered = False + StageRegistry._categories.clear() + StageRegistry._instances.clear() + discover_stages() + + def test_create_default_pipeline(self): + """create_default_pipeline creates a working pipeline.""" + pipeline = create_default_pipeline() + + assert pipeline is not None + assert "display" in pipeline.stages + + +class TestPipelineParams: + """Tests for PipelineParams.""" + + def test_default_values(self): + """PipelineParams has correct defaults.""" + from engine.pipeline.params import PipelineParams + + params = PipelineParams() + assert params.source == "headlines" + assert params.display == "terminal" + assert params.camera_mode == "vertical" + assert params.effect_order == ["noise", "fade", "glitch", "firehose"] + + def test_effect_config(self): + """PipelineParams effect config methods work.""" + from engine.pipeline.params import PipelineParams + + params = PipelineParams() + enabled, intensity = params.get_effect_config("noise") + assert enabled is True + assert intensity == 1.0 + + params.set_effect_config("noise", False, 0.5) + enabled, intensity = params.get_effect_config("noise") + assert enabled is False + assert intensity == 0.5 + + def test_is_effect_enabled(self): + """PipelineParams is_effect_enabled works.""" + from engine.pipeline.params import PipelineParams + + params = PipelineParams() + assert params.is_effect_enabled("noise") is True + + params.effect_enabled["noise"] = False + assert params.is_effect_enabled("noise") is False + + def test_to_dict_from_dict(self): + """PipelineParams serialization roundtrip works.""" + from engine.pipeline.params import PipelineParams + + params = PipelineParams() + params.viewport_width = 100 + params.viewport_height = 50 + + data = params.to_dict() + restored = PipelineParams.from_dict(data) + + assert restored.viewport_width == 100 + assert restored.viewport_height == 50 + + def test_copy(self): + """PipelineParams copy works.""" + from engine.pipeline.params import PipelineParams + + params = PipelineParams() + params.viewport_width = 100 + params.effect_enabled["noise"] = False + + copy = params.copy() + assert copy.viewport_width == 100 + assert copy.effect_enabled["noise"] is False + + +class TestPipelinePresets: + """Tests for pipeline presets.""" + + def test_presets_defined(self): + """All expected presets are defined.""" + from engine.pipeline.presets import ( + DEMO_PRESET, + FIREHOSE_PRESET, + PIPELINE_VIZ_PRESET, + POETRY_PRESET, + UI_PRESET, + WEBSOCKET_PRESET, + ) + + assert DEMO_PRESET.name == "demo" + assert POETRY_PRESET.name == "poetry" + assert FIREHOSE_PRESET.name == "firehose" + assert PIPELINE_VIZ_PRESET.name == "pipeline" + assert WEBSOCKET_PRESET.name == "websocket" + assert UI_PRESET.name == "ui" + + def test_preset_to_params(self): + """Presets convert to PipelineParams correctly.""" + from engine.pipeline.presets import DEMO_PRESET + + params = DEMO_PRESET.to_params() + assert params.source == "headlines" + assert params.display == "pygame" + assert "noise" in params.effect_order + + def test_list_presets(self): + """list_presets returns all presets.""" + from engine.pipeline.presets import list_presets + + presets = list_presets() + assert "demo" in presets + assert "poetry" in presets + assert "firehose" in presets + + def test_get_preset(self): + """get_preset returns correct preset.""" + from engine.pipeline.presets import get_preset + + preset = get_preset("demo") + assert preset is not None + assert preset.name == "demo" + + assert get_preset("nonexistent") is None + + +class TestStageAdapters: + """Tests for pipeline stage adapters.""" + + def test_display_stage_init(self): + """DisplayStage.init initializes display.""" + from engine.display.backends.null import NullDisplay + from engine.pipeline.adapters import DisplayStage + from engine.pipeline.core import PipelineContext + from engine.pipeline.params import PipelineParams + + display = NullDisplay() + stage = DisplayStage(display, name="null") + ctx = PipelineContext() + ctx.params = PipelineParams() + + result = stage.init(ctx) + assert result is True + + def test_display_stage_process(self): + """DisplayStage.process forwards to display.""" + from engine.display.backends.null import NullDisplay + from engine.pipeline.adapters import DisplayStage + from engine.pipeline.core import PipelineContext + from engine.pipeline.params import PipelineParams + + display = NullDisplay() + stage = DisplayStage(display, name="null") + ctx = PipelineContext() + ctx.params = PipelineParams() + + stage.init(ctx) + buffer = ["line1", "line2"] + result = stage.process(buffer, ctx) + assert result == buffer + + def test_camera_stage(self): + """CameraStage applies camera transform.""" + from engine.camera import Camera, CameraMode + from engine.pipeline.adapters import CameraStage + from engine.pipeline.core import PipelineContext + + camera = Camera(mode=CameraMode.FEED) + stage = CameraStage(camera, name="vertical") + PipelineContext() + + assert "camera" in stage.capabilities + assert "render.output" in stage.dependencies # Depends on rendered content + + def test_camera_stage_does_not_error_on_process(self): + """CameraStage.process should not error when setting viewport. + + Regression test: Previously CameraStage tried to set viewport_width + and viewport_height as writable properties, but they are computed + from canvas_size / zoom. This caused an AttributeError each frame. + """ + from engine.camera import Camera, CameraMode + from engine.pipeline.adapters import CameraStage + from engine.pipeline.core import PipelineContext + from engine.pipeline.params import PipelineParams + + camera = Camera(mode=CameraMode.FEED) + stage = CameraStage(camera, name="vertical") + + ctx = PipelineContext() + ctx.params = PipelineParams(viewport_width=80, viewport_height=24) + + buffer = ["line" + str(i) for i in range(24)] + + # This should not raise AttributeError + result = stage.process(buffer, ctx) + + # Should return the buffer (unchanged for FEED mode) + assert result is not None + assert len(result) == 24 + + +class TestDataSourceStage: + """Tests for DataSourceStage adapter.""" + + def test_datasource_stage_capabilities(self): + """DataSourceStage declares correct capabilities.""" + from engine.data_sources.sources import HeadlinesDataSource + from engine.pipeline.adapters import DataSourceStage + + source = HeadlinesDataSource() + stage = DataSourceStage(source, name="headlines") + + assert "source.headlines" in stage.capabilities + + def test_datasource_stage_process(self): + """DataSourceStage fetches from DataSource.""" + from unittest.mock import patch + + from engine.data_sources.sources import HeadlinesDataSource + from engine.pipeline.adapters import DataSourceStage + from engine.pipeline.core import PipelineContext + + mock_items = [ + ("Test Headline 1", "TestSource", "12:00"), + ("Test Headline 2", "TestSource", "12:01"), + ] + + with patch("engine.fetch.fetch_all", return_value=(mock_items, 1, 0)): + source = HeadlinesDataSource() + stage = DataSourceStage(source, name="headlines") + + result = stage.process(None, PipelineContext()) + + assert result is not None + assert isinstance(result, list) + + +class TestEffectPluginStage: + """Tests for EffectPluginStage adapter.""" + + def test_effect_stage_capabilities(self): + """EffectPluginStage declares correct capabilities.""" + from engine.effects.types import EffectPlugin + from engine.pipeline.adapters import EffectPluginStage + + class TestEffect(EffectPlugin): + name = "test" + + def process(self, buf, ctx): + return buf + + def configure(self, config): + pass + + effect = TestEffect() + stage = EffectPluginStage(effect, name="test") + + assert "effect.test" in stage.capabilities + + def test_effect_stage_with_sensor_bindings(self): + """EffectPluginStage applies sensor param bindings.""" + from engine.effects.types import EffectConfig, EffectPlugin + from engine.pipeline.adapters import EffectPluginStage + from engine.pipeline.core import PipelineContext + from engine.pipeline.params import PipelineParams + + class SensorDrivenEffect(EffectPlugin): + name = "sensor_effect" + config = EffectConfig(intensity=1.0) + param_bindings = { + "intensity": {"sensor": "mic", "transform": "linear"}, + } + + def process(self, buf, ctx): + return buf + + def configure(self, config): + pass + + effect = SensorDrivenEffect() + stage = EffectPluginStage(effect, name="sensor_effect") + + ctx = PipelineContext() + ctx.params = PipelineParams() + ctx.set_state("sensor.mic", 0.5) + + result = stage.process(["test"], ctx) + assert result == ["test"] + + +class TestFullPipeline: + """End-to-end tests for the full pipeline.""" + + def test_pipeline_circular_dependency_detection(self): + """Pipeline detects circular dependencies.""" + from engine.pipeline.controller import Pipeline + from engine.pipeline.core import Stage + + class StageA(Stage): + name = "a" + + @property + def capabilities(self): + return {"a"} + + @property + def dependencies(self): + return {"b"} + + def process(self, data, ctx): + return data + + class StageB(Stage): + name = "b" + + @property + def capabilities(self): + return {"b"} + + @property + def dependencies(self): + return {"a"} + + def process(self, data, ctx): + return data + + pipeline = Pipeline() + pipeline.add_stage("a", StageA()) + pipeline.add_stage("b", StageB()) + + try: + pipeline.build(auto_inject=False) + raise AssertionError("Should detect circular dependency") + except Exception: + pass + + +class TestPipelineMetrics: + """Tests for pipeline metrics collection.""" + + def test_metrics_collected(self): + """Pipeline collects metrics when enabled.""" + from engine.pipeline.controller import Pipeline, PipelineConfig + from engine.pipeline.core import Stage + + class DummyStage(Stage): + name = "dummy" + category = "test" + + def process(self, data, ctx): + return data + + config = PipelineConfig(enable_metrics=True) + pipeline = Pipeline(config=config) + pipeline.add_stage("dummy", DummyStage()) + pipeline.build(auto_inject=False) + + pipeline.execute("test_data") + + summary = pipeline.get_metrics_summary() + assert "pipeline" in summary + assert summary["frame_count"] == 1 + + def test_metrics_disabled(self): + """Pipeline skips metrics when disabled.""" + from engine.pipeline.controller import Pipeline, PipelineConfig + from engine.pipeline.core import Stage + + class DummyStage(Stage): + name = "dummy" + category = "test" + + def process(self, data, ctx): + return data + + config = PipelineConfig(enable_metrics=False) + pipeline = Pipeline(config=config) + pipeline.add_stage("dummy", DummyStage()) + pipeline.build(auto_inject=False) + + pipeline.execute("test_data") + + summary = pipeline.get_metrics_summary() + assert "error" in summary + + def test_reset_metrics(self): + """Pipeline.reset_metrics clears collected metrics.""" + from engine.pipeline.controller import Pipeline, PipelineConfig + from engine.pipeline.core import Stage + + class DummyStage(Stage): + name = "dummy" + category = "test" + + def process(self, data, ctx): + return data + + config = PipelineConfig(enable_metrics=True) + pipeline = Pipeline(config=config) + pipeline.add_stage("dummy", DummyStage()) + pipeline.build(auto_inject=False) + + pipeline.execute("test1") + pipeline.execute("test2") + + assert pipeline.get_metrics_summary()["frame_count"] == 2 + + pipeline.reset_metrics() + # After reset, metrics collection starts fresh + pipeline.execute("test3") + assert pipeline.get_metrics_summary()["frame_count"] == 1 + + +class TestOverlayStages: + """Tests for overlay stage support.""" + + def test_stage_is_overlay_property(self): + """Stage has is_overlay property defaulting to False.""" + from engine.pipeline.core import Stage + + class TestStage(Stage): + name = "test" + category = "effect" + + def process(self, data, ctx): + return data + + stage = TestStage() + assert stage.is_overlay is False + + def test_stage_render_order_property(self): + """Stage has render_order property defaulting to 0.""" + from engine.pipeline.core import Stage + + class TestStage(Stage): + name = "test" + category = "effect" + + def process(self, data, ctx): + return data + + stage = TestStage() + assert stage.render_order == 0 + + def test_stage_stage_type_property(self): + """Stage has stage_type property defaulting to category.""" + from engine.pipeline.core import Stage + + class TestStage(Stage): + name = "test" + category = "effect" + + def process(self, data, ctx): + return data + + stage = TestStage() + assert stage.stage_type == "effect" + + def test_pipeline_get_overlay_stages(self): + """Pipeline.get_overlay_stages returns overlay stages sorted by render_order.""" + from engine.pipeline.controller import Pipeline + from engine.pipeline.core import Stage + + class OverlayStageA(Stage): + name = "overlay_a" + category = "overlay" + + @property + def is_overlay(self): + return True + + @property + def render_order(self): + return 10 + + def process(self, data, ctx): + return data + + class OverlayStageB(Stage): + name = "overlay_b" + category = "overlay" + + @property + def is_overlay(self): + return True + + @property + def render_order(self): + return 5 + + def process(self, data, ctx): + return data + + class RegularStage(Stage): + name = "regular" + category = "effect" + + def process(self, data, ctx): + return data + + pipeline = Pipeline() + pipeline.add_stage("overlay_a", OverlayStageA()) + pipeline.add_stage("overlay_b", OverlayStageB()) + pipeline.add_stage("regular", RegularStage()) + pipeline.build(auto_inject=False) + + overlays = pipeline.get_overlay_stages() + assert len(overlays) == 2 + # Should be sorted by render_order + assert overlays[0].name == "overlay_b" # render_order=5 + assert overlays[1].name == "overlay_a" # render_order=10 + + def test_pipeline_executes_overlays_after_regular(self): + """Pipeline executes overlays after regular stages.""" + from engine.pipeline.controller import Pipeline + from engine.pipeline.core import Stage + + call_order = [] + + class RegularStage(Stage): + name = "regular" + category = "effect" + + def process(self, data, ctx): + call_order.append("regular") + return data + + class OverlayStage(Stage): + name = "overlay" + category = "overlay" + + @property + def is_overlay(self): + return True + + @property + def render_order(self): + return 100 + + def process(self, data, ctx): + call_order.append("overlay") + return data + + pipeline = Pipeline() + pipeline.add_stage("regular", RegularStage()) + pipeline.add_stage("overlay", OverlayStage()) + pipeline.build(auto_inject=False) + + pipeline.execute("data") + + assert call_order == ["regular", "overlay"] + + def test_effect_plugin_stage_hud_is_overlay(self): + """EffectPluginStage marks HUD as overlay.""" + from engine.effects.types import EffectConfig, EffectPlugin + from engine.pipeline.adapters import EffectPluginStage + + class HudEffect(EffectPlugin): + name = "hud" + config = EffectConfig(enabled=True) + + def process(self, buf, ctx): + return buf + + def configure(self, config): + pass + + stage = EffectPluginStage(HudEffect(), name="hud") + assert stage.is_overlay is True + assert stage.stage_type == "overlay" + assert stage.render_order == 100 + + def test_effect_plugin_stage_non_hud_not_overlay(self): + """EffectPluginStage marks non-HUD effects as not overlay.""" + from engine.effects.types import EffectConfig, EffectPlugin + from engine.pipeline.adapters import EffectPluginStage + + class FadeEffect(EffectPlugin): + name = "fade" + config = EffectConfig(enabled=True) + + def process(self, buf, ctx): + return buf + + def configure(self, config): + pass + + stage = EffectPluginStage(FadeEffect(), name="fade") + assert stage.is_overlay is False + assert stage.stage_type == "effect" + assert stage.render_order == 0 + + def test_pipeline_get_stage_type(self): + """Pipeline.get_stage_type returns stage_type for a stage.""" + from engine.pipeline.controller import Pipeline + from engine.pipeline.core import Stage + + class TestStage(Stage): + name = "test" + category = "effect" + + @property + def stage_type(self): + return "overlay" + + def process(self, data, ctx): + return data + + pipeline = Pipeline() + pipeline.add_stage("test", TestStage()) + pipeline.build(auto_inject=False) + + assert pipeline.get_stage_type("test") == "overlay" + + def test_pipeline_get_render_order(self): + """Pipeline.get_render_order returns render_order for a stage.""" + from engine.pipeline.controller import Pipeline + from engine.pipeline.core import Stage + + class TestStage(Stage): + name = "test" + category = "effect" + + @property + def render_order(self): + return 42 + + def process(self, data, ctx): + return data + + pipeline = Pipeline() + pipeline.add_stage("test", TestStage()) + pipeline.build(auto_inject=False) + + assert pipeline.get_render_order("test") == 42 + + +class TestInletOutletTypeValidation: + """Test type validation between connected stages.""" + + def test_type_mismatch_raises_error(self): + """Type mismatch between stages raises StageError.""" + + class ProducerStage(Stage): + name = "producer" + category = "test" + + @property + def inlet_types(self): + return {DataType.NONE} + + @property + def outlet_types(self): + return {DataType.SOURCE_ITEMS} + + def process(self, data, ctx): + return data + + class ConsumerStage(Stage): + name = "consumer" + category = "test" + + @property + def dependencies(self): + return {"test.producer"} + + @property + def inlet_types(self): + return {DataType.TEXT_BUFFER} # Incompatible! + + @property + def outlet_types(self): + return {DataType.TEXT_BUFFER} + + def process(self, data, ctx): + return data + + pipeline = Pipeline() + pipeline.add_stage("producer", ProducerStage()) + pipeline.add_stage("consumer", ConsumerStage()) + + with pytest.raises(StageError) as exc_info: + pipeline.build(auto_inject=False) + + assert "Type mismatch" in str(exc_info.value) + assert "TEXT_BUFFER" in str(exc_info.value) + assert "SOURCE_ITEMS" in str(exc_info.value) + + def test_compatible_types_pass_validation(self): + """Compatible types pass validation.""" + + class ProducerStage(Stage): + name = "producer" + category = "test" + + @property + def inlet_types(self): + return {DataType.NONE} + + @property + def outlet_types(self): + return {DataType.SOURCE_ITEMS} + + def process(self, data, ctx): + return data + + class ConsumerStage(Stage): + name = "consumer" + category = "test" + + @property + def dependencies(self): + return {"test.producer"} + + @property + def inlet_types(self): + return {DataType.SOURCE_ITEMS} # Compatible! + + @property + def outlet_types(self): + return {DataType.TEXT_BUFFER} + + def process(self, data, ctx): + return data + + pipeline = Pipeline() + pipeline.add_stage("producer", ProducerStage()) + pipeline.add_stage("consumer", ConsumerStage()) + + # Should not raise + pipeline.build(auto_inject=False) + + def test_any_type_accepts_everything(self): + """DataType.ANY accepts any upstream type.""" + + class ProducerStage(Stage): + name = "producer" + category = "test" + + @property + def inlet_types(self): + return {DataType.NONE} + + @property + def outlet_types(self): + return {DataType.SOURCE_ITEMS} + + def process(self, data, ctx): + return data + + class ConsumerStage(Stage): + name = "consumer" + category = "test" + + @property + def dependencies(self): + return {"test.producer"} + + @property + def inlet_types(self): + return {DataType.ANY} # Accepts anything + + @property + def outlet_types(self): + return {DataType.TEXT_BUFFER} + + def process(self, data, ctx): + return data + + pipeline = Pipeline() + pipeline.add_stage("producer", ProducerStage()) + pipeline.add_stage("consumer", ConsumerStage()) + + # Should not raise because consumer accepts ANY + pipeline.build(auto_inject=False) + + def test_multiple_compatible_types(self): + """Stage can declare multiple inlet types.""" + + class ProducerStage(Stage): + name = "producer" + category = "test" + + @property + def inlet_types(self): + return {DataType.NONE} + + @property + def outlet_types(self): + return {DataType.SOURCE_ITEMS} + + def process(self, data, ctx): + return data + + class ConsumerStage(Stage): + name = "consumer" + category = "test" + + @property + def dependencies(self): + return {"test.producer"} + + @property + def inlet_types(self): + return {DataType.SOURCE_ITEMS, DataType.TEXT_BUFFER} + + @property + def outlet_types(self): + return {DataType.TEXT_BUFFER} + + def process(self, data, ctx): + return data + + pipeline = Pipeline() + pipeline.add_stage("producer", ProducerStage()) + pipeline.add_stage("consumer", ConsumerStage()) + + # Should not raise because consumer accepts SOURCE_ITEMS + pipeline.build(auto_inject=False) + + def test_display_must_accept_text_buffer(self): + """Display stages must accept TEXT_BUFFER type.""" + + class BadDisplayStage(Stage): + name = "display" + category = "display" + + @property + def inlet_types(self): + return {DataType.SOURCE_ITEMS} # Wrong type for display! + + @property + def outlet_types(self): + return {DataType.NONE} + + def process(self, data, ctx): + return data + + pipeline = Pipeline() + pipeline.add_stage("display", BadDisplayStage()) + + with pytest.raises(StageError) as exc_info: + pipeline.build(auto_inject=False) + + assert "display" in str(exc_info.value).lower() + + +class TestPipelineMutation: + """Tests for Pipeline Mutation API - dynamic stage modification.""" + + def setup_method(self): + """Set up test fixtures.""" + StageRegistry._discovered = False + StageRegistry._categories.clear() + StageRegistry._instances.clear() + discover_stages() + + def _create_mock_stage( + self, + name: str = "test", + category: str = "test", + capabilities: set | None = None, + dependencies: set | None = None, + ): + """Helper to create a mock stage.""" + from engine.pipeline.core import DataType + + mock = MagicMock(spec=Stage) + mock.name = name + mock.category = category + mock.stage_type = category + mock.render_order = 0 + mock.is_overlay = False + mock.inlet_types = {DataType.ANY} + mock.outlet_types = {DataType.TEXT_BUFFER} + mock.capabilities = capabilities or {f"{category}.{name}"} + mock.dependencies = dependencies or set() + mock.process = lambda data, ctx: data + mock.init = MagicMock(return_value=True) + mock.cleanup = MagicMock() + mock.is_enabled = MagicMock(return_value=True) + mock.set_enabled = MagicMock() + mock._enabled = True + return mock + + def test_add_stage_initializes_when_pipeline_initialized(self): + """add_stage() initializes stage when pipeline already initialized.""" + pipeline = Pipeline() + mock_stage = self._create_mock_stage("test") + pipeline.build(auto_inject=False) + pipeline._initialized = True + + pipeline.add_stage("test", mock_stage, initialize=True) + + mock_stage.init.assert_called_once() + + def test_add_stage_skips_initialize_when_pipeline_not_initialized(self): + """add_stage() skips initialization when pipeline not built.""" + pipeline = Pipeline() + mock_stage = self._create_mock_stage("test") + + pipeline.add_stage("test", mock_stage, initialize=False) + + mock_stage.init.assert_not_called() + + def test_remove_stage_returns_removed_stage(self): + """remove_stage() returns the removed stage.""" + pipeline = Pipeline() + mock_stage = self._create_mock_stage("test") + pipeline.add_stage("test", mock_stage, initialize=False) + + removed = pipeline.remove_stage("test", cleanup=False) + + assert removed is mock_stage + assert "test" not in pipeline.stages + + def test_remove_stage_calls_cleanup_when_requested(self): + """remove_stage() calls cleanup when cleanup=True.""" + pipeline = Pipeline() + mock_stage = self._create_mock_stage("test") + pipeline.add_stage("test", mock_stage, initialize=False) + + pipeline.remove_stage("test", cleanup=True) + + mock_stage.cleanup.assert_called_once() + + def test_remove_stage_skips_cleanup_when_requested(self): + """remove_stage() skips cleanup when cleanup=False.""" + pipeline = Pipeline() + mock_stage = self._create_mock_stage("test") + pipeline.add_stage("test", mock_stage, initialize=False) + + pipeline.remove_stage("test", cleanup=False) + + mock_stage.cleanup.assert_not_called() + + def test_remove_nonexistent_stage_returns_none(self): + """remove_stage() returns None for nonexistent stage.""" + pipeline = Pipeline() + + result = pipeline.remove_stage("nonexistent", cleanup=False) + + assert result is None + + def test_replace_stage_preserves_state(self): + """replace_stage() copies _enabled from old to new stage.""" + pipeline = Pipeline() + old_stage = self._create_mock_stage("test") + old_stage._enabled = False + + new_stage = self._create_mock_stage("test") + + pipeline.add_stage("test", old_stage, initialize=False) + pipeline.replace_stage("test", new_stage, preserve_state=True) + + assert new_stage._enabled is False + old_stage.cleanup.assert_called_once() + new_stage.init.assert_called_once() + + def test_replace_stage_without_preserving_state(self): + """replace_stage() without preserve_state doesn't copy state.""" + pipeline = Pipeline() + old_stage = self._create_mock_stage("test") + old_stage._enabled = False + + new_stage = self._create_mock_stage("test") + new_stage._enabled = True + + pipeline.add_stage("test", old_stage, initialize=False) + pipeline.replace_stage("test", new_stage, preserve_state=False) + + assert new_stage._enabled is True + + def test_replace_nonexistent_stage_returns_none(self): + """replace_stage() returns None for nonexistent stage.""" + pipeline = Pipeline() + mock_stage = self._create_mock_stage("test") + + result = pipeline.replace_stage("nonexistent", mock_stage) + + assert result is None + + def test_swap_stages_swaps_stages(self): + """swap_stages() swaps two stages.""" + pipeline = Pipeline() + stage_a = self._create_mock_stage("stage_a", "a") + stage_b = self._create_mock_stage("stage_b", "b") + + pipeline.add_stage("a", stage_a, initialize=False) + pipeline.add_stage("b", stage_b, initialize=False) + + result = pipeline.swap_stages("a", "b") + + assert result is True + assert pipeline.stages["a"].name == "stage_b" + assert pipeline.stages["b"].name == "stage_a" + + def test_swap_stages_fails_for_nonexistent(self): + """swap_stages() fails if either stage doesn't exist.""" + pipeline = Pipeline() + stage = self._create_mock_stage("test") + + pipeline.add_stage("test", stage, initialize=False) + + result = pipeline.swap_stages("test", "nonexistent") + + assert result is False + + def test_move_stage_after(self): + """move_stage() moves stage after another.""" + pipeline = Pipeline() + stage_a = self._create_mock_stage("a") + stage_b = self._create_mock_stage("b") + stage_c = self._create_mock_stage("c") + + pipeline.add_stage("a", stage_a, initialize=False) + pipeline.add_stage("b", stage_b, initialize=False) + pipeline.add_stage("c", stage_c, initialize=False) + pipeline.build(auto_inject=False) + + result = pipeline.move_stage("a", after="c") + + assert result is True + idx_a = pipeline.execution_order.index("a") + idx_c = pipeline.execution_order.index("c") + assert idx_a > idx_c + + def test_move_stage_before(self): + """move_stage() moves stage before another.""" + pipeline = Pipeline() + stage_a = self._create_mock_stage("a") + stage_b = self._create_mock_stage("b") + stage_c = self._create_mock_stage("c") + + pipeline.add_stage("a", stage_a, initialize=False) + pipeline.add_stage("b", stage_b, initialize=False) + pipeline.add_stage("c", stage_c, initialize=False) + pipeline.build(auto_inject=False) + + result = pipeline.move_stage("c", before="a") + + assert result is True + idx_a = pipeline.execution_order.index("a") + idx_c = pipeline.execution_order.index("c") + assert idx_c < idx_a + + def test_move_stage_fails_for_nonexistent(self): + """move_stage() fails if stage doesn't exist.""" + pipeline = Pipeline() + stage = self._create_mock_stage("test") + + pipeline.add_stage("test", stage, initialize=False) + pipeline.build(auto_inject=False) + + result = pipeline.move_stage("nonexistent", after="test") + + assert result is False + + def test_move_stage_fails_when_not_initialized(self): + """move_stage() fails if pipeline not built.""" + pipeline = Pipeline() + stage = self._create_mock_stage("test") + + pipeline.add_stage("test", stage, initialize=False) + + result = pipeline.move_stage("test", after="other") + + assert result is False + + def test_enable_stage(self): + """enable_stage() enables a stage.""" + pipeline = Pipeline() + stage = self._create_mock_stage("test") + + pipeline.add_stage("test", stage, initialize=False) + + result = pipeline.enable_stage("test") + + assert result is True + stage.set_enabled.assert_called_with(True) + + def test_enable_nonexistent_stage_returns_false(self): + """enable_stage() returns False for nonexistent stage.""" + pipeline = Pipeline() + + result = pipeline.enable_stage("nonexistent") + + assert result is False + + def test_disable_stage(self): + """disable_stage() disables a stage.""" + pipeline = Pipeline() + stage = self._create_mock_stage("test") + + pipeline.add_stage("test", stage, initialize=False) + + result = pipeline.disable_stage("test") + + assert result is True + stage.set_enabled.assert_called_with(False) + + def test_disable_nonexistent_stage_returns_false(self): + """disable_stage() returns False for nonexistent stage.""" + pipeline = Pipeline() + + result = pipeline.disable_stage("nonexistent") + + assert result is False + + def test_get_stage_info_returns_correct_info(self): + """get_stage_info() returns correct stage information.""" + pipeline = Pipeline() + stage = self._create_mock_stage( + "test_stage", + "effect", + capabilities={"effect.test"}, + dependencies={"source"}, + ) + stage.render_order = 5 + stage.is_overlay = False + stage.optional = True + + pipeline.add_stage("test", stage, initialize=False) + + info = pipeline.get_stage_info("test") + + assert info is not None + assert info["name"] == "test" # Dict key, not stage.name + assert info["category"] == "effect" + assert info["stage_type"] == "effect" + assert info["enabled"] is True + assert info["optional"] is True + assert info["capabilities"] == ["effect.test"] + assert info["dependencies"] == ["source"] + assert info["render_order"] == 5 + assert info["is_overlay"] is False + + def test_get_stage_info_returns_none_for_nonexistent(self): + """get_stage_info() returns None for nonexistent stage.""" + pipeline = Pipeline() + + info = pipeline.get_stage_info("nonexistent") + + assert info is None + + def test_get_pipeline_info_returns_complete_info(self): + """get_pipeline_info() returns complete pipeline state.""" + pipeline = Pipeline() + stage1 = self._create_mock_stage("stage1") + stage2 = self._create_mock_stage("stage2") + + pipeline.add_stage("s1", stage1, initialize=False) + pipeline.add_stage("s2", stage2, initialize=False) + pipeline.build(auto_inject=False) + + info = pipeline.get_pipeline_info() + + assert "stages" in info + assert "execution_order" in info + assert info["initialized"] is True + assert info["stage_count"] == 2 + assert "s1" in info["stages"] + assert "s2" in info["stages"] + + def test_rebuild_after_mutation(self): + """_rebuild() updates execution order after mutation.""" + pipeline = Pipeline() + source = self._create_mock_stage( + "source", "source", capabilities={"source"}, dependencies=set() + ) + effect = self._create_mock_stage( + "effect", "effect", capabilities={"effect"}, dependencies={"source"} + ) + display = self._create_mock_stage( + "display", "display", capabilities={"display"}, dependencies={"effect"} + ) + + pipeline.add_stage("source", source, initialize=False) + pipeline.add_stage("effect", effect, initialize=False) + pipeline.add_stage("display", display, initialize=False) + pipeline.build(auto_inject=False) + + assert pipeline.execution_order == ["source", "effect", "display"] + + pipeline.remove_stage("effect", cleanup=False) + + pipeline._rebuild() + + assert "effect" not in pipeline.execution_order + assert "source" in pipeline.execution_order + assert "display" in pipeline.execution_order + + def test_add_stage_after_build(self): + """add_stage() can add stage after build with initialization.""" + pipeline = Pipeline() + source = self._create_mock_stage( + "source", "source", capabilities={"source"}, dependencies=set() + ) + display = self._create_mock_stage( + "display", "display", capabilities={"display"}, dependencies={"source"} + ) + + pipeline.add_stage("source", source, initialize=False) + pipeline.add_stage("display", display, initialize=False) + pipeline.build(auto_inject=False) + + new_stage = self._create_mock_stage( + "effect", "effect", capabilities={"effect"}, dependencies={"source"} + ) + + pipeline.add_stage("effect", new_stage, initialize=True) + + assert "effect" in pipeline.stages + new_stage.init.assert_called_once() + + def test_mutation_preserves_execution_for_remaining_stages(self): + """Removing a stage doesn't break execution of remaining stages.""" + from engine.pipeline.core import DataType + + call_log = [] + + class TestSource(Stage): + name = "source" + category = "source" + + @property + def inlet_types(self): + return {DataType.NONE} + + @property + def outlet_types(self): + return {DataType.SOURCE_ITEMS} + + @property + def capabilities(self): + return {"source"} + + @property + def dependencies(self): + return set() + + def process(self, data, ctx): + call_log.append("source") + return ["item"] + + class TestEffect(Stage): + name = "effect" + category = "effect" + + @property + def inlet_types(self): + return {DataType.SOURCE_ITEMS} + + @property + def outlet_types(self): + return {DataType.TEXT_BUFFER} + + @property + def capabilities(self): + return {"effect"} + + @property + def dependencies(self): + return {"source"} + + def process(self, data, ctx): + call_log.append("effect") + return data + + class TestDisplay(Stage): + name = "display" + category = "display" + + @property + def inlet_types(self): + return {DataType.TEXT_BUFFER} + + @property + def outlet_types(self): + return {DataType.NONE} + + @property + def capabilities(self): + return {"display"} + + @property + def dependencies(self): + return {"effect"} + + def process(self, data, ctx): + call_log.append("display") + return data + + pipeline = Pipeline() + pipeline.add_stage("source", TestSource(), initialize=False) + pipeline.add_stage("effect", TestEffect(), initialize=False) + pipeline.add_stage("display", TestDisplay(), initialize=False) + pipeline.build(auto_inject=False) + pipeline.initialize() + + result = pipeline.execute(None) + assert result.success + assert call_log == ["source", "effect", "display"] + + call_log.clear() + pipeline.remove_stage("effect", cleanup=True) + + pipeline._rebuild() + + result = pipeline.execute(None) + assert result.success + assert call_log == ["source", "display"] + + +class TestAutoInjection: + """Tests for auto-injection of minimum capabilities.""" + + def setup_method(self): + """Reset registry before each test.""" + StageRegistry._discovered = False + StageRegistry._categories.clear() + StageRegistry._instances.clear() + discover_stages() + + def test_auto_injection_provides_minimum_capabilities(self): + """Pipeline with no stages gets minimum capabilities auto-injected.""" + pipeline = Pipeline() + # Don't add any stages + pipeline.build(auto_inject=True) + + # Should have stages for source, render, camera, display + assert len(pipeline.stages) > 0 + assert "source" in pipeline.stages + assert "display" in pipeline.stages + + def test_auto_injection_rebuilds_execution_order(self): + """Auto-injection rebuilds execution order correctly.""" + pipeline = Pipeline() + pipeline.build(auto_inject=True) + + # Execution order should be valid + assert len(pipeline.execution_order) > 0 + # Source should come before display + source_idx = pipeline.execution_order.index("source") + display_idx = pipeline.execution_order.index("display") + assert source_idx < display_idx + + def test_validation_error_after_auto_injection(self): + """Pipeline raises error if auto-injection fails to provide capabilities.""" + from unittest.mock import patch + + pipeline = Pipeline() + + # Mock ensure_minimum_capabilities to return empty list (injection failed) + with ( + patch.object(pipeline, "ensure_minimum_capabilities", return_value=[]), + patch.object( + pipeline, + "validate_minimum_capabilities", + return_value=(False, ["source"]), + ), + ): + # Even though injection "ran", it didn't provide the capability + # build() should raise StageError + with pytest.raises(StageError) as exc_info: + pipeline.build(auto_inject=True) + + assert "Auto-injection failed" in str(exc_info.value) + + def test_minimum_capability_removal_recovery(self): + """Pipeline re-injects minimum capability if removed.""" + pipeline = Pipeline() + pipeline.build(auto_inject=True) + + # Remove the display stage + pipeline.remove_stage("display", cleanup=True) + + # Rebuild with auto-injection + pipeline.build(auto_inject=True) + + # Display should be back + assert "display" in pipeline.stages diff --git a/tests/test_pipeline_e2e.py b/tests/test_pipeline_e2e.py new file mode 100644 index 0000000..3f3ef2f --- /dev/null +++ b/tests/test_pipeline_e2e.py @@ -0,0 +1,552 @@ +""" +End-to-end pipeline integration tests. + +Verifies that data actually flows through every pipeline stage +(source -> render -> effects -> display) using a queue-backed +stub display to capture output frames. + +These tests catch dead-code paths and wiring bugs that unit tests miss. +""" + +import queue +from unittest.mock import patch + +from engine.data_sources.sources import ListDataSource, SourceItem +from engine.effects import EffectContext +from engine.effects.types import EffectPlugin +from engine.pipeline import Pipeline, PipelineConfig +from engine.pipeline.adapters import ( + DataSourceStage, + DisplayStage, + EffectPluginStage, + FontStage, + SourceItemsToBufferStage, + ViewportFilterStage, +) +from engine.pipeline.core import PipelineContext +from engine.pipeline.params import PipelineParams + +# ─── FIXTURES ──────────────────────────────────────────── + + +class QueueDisplay: + """Stub display that captures every frame into a queue. + + Acts as a FIFO sink so tests can inspect exactly what + the pipeline produced without any terminal or network I/O. + """ + + def __init__(self): + self.frames: queue.Queue[list[str]] = queue.Queue() + self.width = 80 + self.height = 24 + self._init_called = False + + def init(self, width: int, height: int, reuse: bool = False) -> None: + self.width = width + self.height = height + self._init_called = True + + def show(self, buffer: list[str], border: bool = False) -> None: + # Deep copy to prevent later mutations + self.frames.put(list(buffer)) + + def clear(self) -> None: + pass + + def cleanup(self) -> None: + pass + + def get_dimensions(self) -> tuple[int, int]: + return (self.width, self.height) + + +class MarkerEffect(EffectPlugin): + """Effect that prepends a marker line to prove it ran. + + Each MarkerEffect adds a unique tag so tests can verify + which effects executed and in what order. + """ + + def __init__(self, tag: str = "MARKER"): + self._tag = tag + self.call_count = 0 + super().__init__() + + @property + def name(self) -> str: + return f"marker-{self._tag}" + + def configure(self, config: dict) -> None: + pass + + def process(self, buffer: list[str], ctx: EffectContext) -> list[str]: + self.call_count += 1 + if buffer is None: + return [f"[{self._tag}:EMPTY]"] + return [f"[{self._tag}]"] + list(buffer) + + +# ─── HELPERS ───────────────────────────────────────────── + + +def _build_pipeline( + items: list, + effects: list[tuple[str, EffectPlugin]] | None = None, + use_font_stage: bool = False, + width: int = 80, + height: int = 24, +) -> tuple[Pipeline, QueueDisplay, PipelineContext]: + """Build a fully-wired pipeline with a QueueDisplay sink. + + Args: + items: Content items to feed into the source. + effects: Optional list of (name, EffectPlugin) to add. + use_font_stage: Use FontStage instead of SourceItemsToBufferStage. + width: Viewport width. + height: Viewport height. + + Returns: + (pipeline, queue_display, context) tuple. + """ + display = QueueDisplay() + + ctx = PipelineContext() + params = PipelineParams() + params.viewport_width = width + params.viewport_height = height + params.frame_number = 0 + ctx.params = params + ctx.set("items", items) + + pipeline = Pipeline( + config=PipelineConfig(enable_metrics=True), + context=ctx, + ) + + # Source stage + source = ListDataSource(items, name="test-source") + pipeline.add_stage("source", DataSourceStage(source, name="test-source")) + + # Render stage + if use_font_stage: + # FontStage requires viewport_filter stage which requires camera state + from engine.camera import Camera + from engine.pipeline.adapters import CameraClockStage, CameraStage + + camera = Camera.scroll(speed=0.0) + camera.set_canvas_size(200, 200) + + # CameraClockStage updates camera state, must come before viewport_filter + pipeline.add_stage( + "camera_update", CameraClockStage(camera, name="camera-clock") + ) + + # ViewportFilterStage requires camera.state + pipeline.add_stage( + "viewport_filter", ViewportFilterStage(name="viewport-filter") + ) + + # FontStage converts items to buffer + pipeline.add_stage("render", FontStage(name="font")) + + # CameraStage applies viewport transformation to rendered buffer + pipeline.add_stage("camera", CameraStage(camera, name="static")) + else: + pipeline.add_stage("render", SourceItemsToBufferStage(name="items-to-buffer")) + + # Effect stages + if effects: + for effect_name, effect_plugin in effects: + pipeline.add_stage( + f"effect_{effect_name}", + EffectPluginStage(effect_plugin, name=effect_name), + ) + + # Display stage + pipeline.add_stage("display", DisplayStage(display, name="queue")) + + pipeline.build() + pipeline.initialize() + + return pipeline, display, ctx + + +# ─── TESTS: HAPPY PATH ────────────────────────────────── + + +class TestPipelineE2EHappyPath: + """End-to-end: data flows source -> render -> display.""" + + def test_items_reach_display(self): + """Content items fed to source must appear in the display output.""" + items = [ + SourceItem(content="Hello World", source="test", timestamp="now"), + SourceItem(content="Second Item", source="test", timestamp="now"), + ] + pipeline, display, ctx = _build_pipeline(items) + + result = pipeline.execute(items) + + assert result.success, f"Pipeline failed: {result.error}" + frame = display.frames.get(timeout=1) + text = "\n".join(frame) + assert "Hello World" in text + assert "Second Item" in text + + def test_pipeline_output_is_list_of_strings(self): + """Display must receive list[str], not raw SourceItems.""" + items = [SourceItem(content="Line one", source="s", timestamp="t")] + pipeline, display, ctx = _build_pipeline(items) + + result = pipeline.execute(items) + + assert result.success + frame = display.frames.get(timeout=1) + assert isinstance(frame, list) + for line in frame: + assert isinstance(line, str), f"Expected str, got {type(line)}: {line!r}" + + def test_multiline_items_are_split(self): + """Items with newlines should be split into individual buffer lines.""" + items = [ + SourceItem(content="Line A\nLine B\nLine C", source="s", timestamp="t") + ] + pipeline, display, ctx = _build_pipeline(items) + + result = pipeline.execute(items) + + assert result.success + frame = display.frames.get(timeout=1) + # Camera stage pads lines to viewport width, so check for substring match + assert any("Line A" in line for line in frame) + assert any("Line B" in line for line in frame) + assert any("Line C" in line for line in frame) + + def test_empty_source_produces_empty_buffer(self): + """An empty source should produce an empty (or blank) frame.""" + items = [] + pipeline, display, ctx = _build_pipeline(items) + + result = pipeline.execute(items) + + assert result.success + frame = display.frames.get(timeout=1) + assert isinstance(frame, list) + + def test_multiple_frames_are_independent(self): + """Each execute() call should produce a distinct frame.""" + items = [SourceItem(content="frame-content", source="s", timestamp="t")] + pipeline, display, ctx = _build_pipeline(items) + + pipeline.execute(items) + pipeline.execute(items) + + f1 = display.frames.get(timeout=1) + f2 = display.frames.get(timeout=1) + assert f1 == f2 # Same input => same output + assert display.frames.empty() # Exactly 2 frames + + +# ─── TESTS: EFFECTS IN THE PIPELINE ───────────────────── + + +class TestPipelineE2EEffects: + """End-to-end: effects process the buffer between render and display.""" + + def test_single_effect_modifies_output(self): + """A single effect should visibly modify the output frame.""" + items = [SourceItem(content="Original", source="s", timestamp="t")] + marker = MarkerEffect("FX1") + pipeline, display, ctx = _build_pipeline(items, effects=[("marker", marker)]) + + result = pipeline.execute(items) + + assert result.success + frame = display.frames.get(timeout=1) + # Camera stage pads lines to viewport width, so check for substring match + assert any("[FX1]" in line for line in frame), ( + f"Marker not found in frame: {frame}" + ) + assert "Original" in "\n".join(frame) + + def test_effect_chain_ordering(self): + """Multiple effects execute in the order they were added.""" + items = [SourceItem(content="data", source="s", timestamp="t")] + fx_a = MarkerEffect("A") + fx_b = MarkerEffect("B") + pipeline, display, ctx = _build_pipeline( + items, effects=[("alpha", fx_a), ("beta", fx_b)] + ) + + result = pipeline.execute(items) + + assert result.success + frame = display.frames.get(timeout=1) + text = "\n".join(frame) + # B runs after A, so B's marker is prepended last => appears first + idx_a = text.index("[A]") + idx_b = text.index("[B]") + assert idx_b < idx_a, f"Expected [B] before [A], got: {frame}" + + def test_effect_receives_list_of_strings(self): + """Effects must receive list[str] from the render stage.""" + items = [SourceItem(content="check-type", source="s", timestamp="t")] + received_types = [] + + class TypeCheckEffect(EffectPlugin): + @property + def name(self): + return "typecheck" + + def configure(self, config): + pass + + def process(self, buffer, ctx): + received_types.append(type(buffer).__name__) + if isinstance(buffer, list): + for item in buffer: + received_types.append(type(item).__name__) + return buffer + + pipeline, display, ctx = _build_pipeline( + items, effects=[("typecheck", TypeCheckEffect())] + ) + + pipeline.execute(items) + + assert received_types[0] == "list", f"Buffer type: {received_types[0]}" + # All elements should be strings + for t in received_types[1:]: + assert t == "str", f"Buffer element type: {t}" + + def test_disabled_effect_is_skipped(self): + """A disabled effect should not process data.""" + items = [SourceItem(content="data", source="s", timestamp="t")] + marker = MarkerEffect("DISABLED") + pipeline, display, ctx = _build_pipeline( + items, effects=[("disabled-fx", marker)] + ) + + # Disable the effect stage + stage = pipeline.get_stage("effect_disabled-fx") + stage.set_enabled(False) + + result = pipeline.execute(items) + + assert result.success + frame = display.frames.get(timeout=1) + assert "[DISABLED]" not in frame, "Disabled effect should not run" + assert marker.call_count == 0 + + +# ─── TESTS: STAGE EXECUTION ORDER & METRICS ───────────── + + +class TestPipelineE2EStageOrder: + """Verify all stages execute and metrics are collected.""" + + def test_all_stages_appear_in_execution_order(self): + """Pipeline build must include source, render, and display.""" + items = [SourceItem(content="x", source="s", timestamp="t")] + pipeline, display, ctx = _build_pipeline(items) + + order = pipeline.execution_order + assert "source" in order + assert "render" in order + assert "display" in order + + def test_execution_order_is_source_render_display(self): + """Source must come before render, render before display.""" + items = [SourceItem(content="x", source="s", timestamp="t")] + pipeline, display, ctx = _build_pipeline(items) + + order = pipeline.execution_order + assert order.index("source") < order.index("render") + assert order.index("render") < order.index("display") + + def test_effects_between_render_and_display(self): + """Effects must execute after render and before display.""" + items = [SourceItem(content="x", source="s", timestamp="t")] + marker = MarkerEffect("MID") + pipeline, display, ctx = _build_pipeline(items, effects=[("mid", marker)]) + + order = pipeline.execution_order + render_idx = order.index("render") + display_idx = order.index("display") + effect_idx = order.index("effect_mid") + assert render_idx < effect_idx < display_idx + + def test_metrics_collected_for_all_stages(self): + """After execution, metrics should exist for every active stage.""" + items = [SourceItem(content="x", source="s", timestamp="t")] + marker = MarkerEffect("M") + pipeline, display, ctx = _build_pipeline(items, effects=[("m", marker)]) + + pipeline.execute(items) + + summary = pipeline.get_metrics_summary() + assert "stages" in summary + stage_names = set(summary["stages"].keys()) + # All regular (non-overlay) stages should have metrics + assert "source" in stage_names + assert "render" in stage_names + assert "queue" in stage_names # Display stage is named "queue" in the test + assert "effect_m" in stage_names + + +# ─── TESTS: FONT STAGE DATAFLOW ───────────────────────── + + +class TestFontStageDataflow: + """Verify FontStage correctly renders content through make_block. + + These tests expose the tuple-unpacking bug in FontStage.process() + where make_block returns (lines, color, meta_idx) but the code + does result.extend(block) instead of result.extend(block[0]). + """ + + def test_font_stage_unpacks_make_block_correctly(self): + """FontStage must produce list[str] output, not mixed types.""" + items = [ + SourceItem(content="Test Headline", source="test-src", timestamp="12345") + ] + + # Mock make_block to return its documented signature + mock_lines = [" RENDERED LINE 1", " RENDERED LINE 2", "", " meta info"] + mock_return = (mock_lines, "\033[38;5;46m", 3) + + with patch("engine.render.make_block", return_value=mock_return): + pipeline, display, ctx = _build_pipeline(items, use_font_stage=True) + + result = pipeline.execute(items) + + assert result.success, f"Pipeline failed: {result.error}" + frame = display.frames.get(timeout=1) + + # Every element in the frame must be a string + for i, line in enumerate(frame): + assert isinstance(line, str), ( + f"Frame line {i} is {type(line).__name__}: {line!r} " + f"(FontStage likely extended with raw tuple)" + ) + + def test_font_stage_output_contains_rendered_content(self): + """FontStage output should contain the rendered lines, not color codes.""" + items = [SourceItem(content="My Headline", source="src", timestamp="0")] + + mock_lines = [" BIG BLOCK TEXT", " MORE TEXT", "", " ░ src · 0"] + mock_return = (mock_lines, "\033[38;5;46m", 3) + + with patch("engine.render.make_block", return_value=mock_return): + pipeline, display, ctx = _build_pipeline(items, use_font_stage=True) + + result = pipeline.execute(items) + + assert result.success + frame = display.frames.get(timeout=1) + text = "\n".join(frame) + assert "BIG BLOCK TEXT" in text + assert "MORE TEXT" in text + + def test_font_stage_does_not_leak_color_codes_as_lines(self): + """The ANSI color code from make_block must NOT appear as a frame line.""" + items = [SourceItem(content="Headline", source="s", timestamp="0")] + + color_code = "\033[38;5;46m" + mock_return = ([" rendered"], color_code, 0) + + with patch("engine.render.make_block", return_value=mock_return): + pipeline, display, ctx = _build_pipeline(items, use_font_stage=True) + + result = pipeline.execute(items) + + assert result.success + frame = display.frames.get(timeout=1) + # The color code itself should not be a standalone line + assert color_code not in frame, ( + f"Color code leaked as a frame line: {frame}" + ) + # The meta_row_index (int) should not be a line either + for line in frame: + assert not isinstance(line, int), f"Integer leaked into frame: {line}" + + def test_font_stage_handles_multiple_items(self): + """FontStage should render each item through make_block.""" + items = [ + SourceItem(content="First", source="a", timestamp="1"), + SourceItem(content="Second", source="b", timestamp="2"), + ] + + call_count = 0 + + def mock_make_block(title, src, ts, w): + nonlocal call_count + call_count += 1 + return ([f" [{title}]"], "\033[0m", 0) + + with patch("engine.render.make_block", side_effect=mock_make_block): + pipeline, display, ctx = _build_pipeline(items, use_font_stage=True) + + result = pipeline.execute(items) + + assert result.success + assert call_count == 2, f"make_block called {call_count} times, expected 2" + frame = display.frames.get(timeout=1) + text = "\n".join(frame) + assert "[First]" in text + assert "[Second]" in text + + +# ─── TESTS: MIRROR OF app.py ASSEMBLY ─────────────────── + + +class TestAppPipelineAssembly: + """Verify the pipeline as assembled by app.py works end-to-end. + + This mirrors how run_pipeline_mode() builds the pipeline but + without any network or terminal dependencies. + """ + + def test_demo_preset_pipeline_produces_output(self): + """Simulates the 'demo' preset pipeline with stub data.""" + # Simulate what app.py does for the demo preset + items = [ + ("Breaking: Test passes", "UnitTest", "1234567890"), + ("Update: Coverage improves", "CI", "1234567891"), + ] + + display = QueueDisplay() + ctx = PipelineContext() + params = PipelineParams() + params.viewport_width = 80 + params.viewport_height = 24 + params.frame_number = 0 + ctx.params = params + ctx.set("items", items) + + pipeline = Pipeline( + config=PipelineConfig(enable_metrics=True), + context=ctx, + ) + + # Mirror app.py: ListDataSource -> SourceItemsToBufferStage -> display + source = ListDataSource(items, name="headlines") + pipeline.add_stage("source", DataSourceStage(source, name="headlines")) + pipeline.add_stage("render", SourceItemsToBufferStage(name="items-to-buffer")) + pipeline.add_stage("display", DisplayStage(display, name="queue")) + + pipeline.build() + pipeline.initialize() + + result = pipeline.execute(items) + + assert result.success, f"Pipeline failed: {result.error}" + assert not display.frames.empty(), "Display received no frames" + + frame = display.frames.get(timeout=1) + assert isinstance(frame, list) + assert len(frame) > 0 + # All lines must be strings + for line in frame: + assert isinstance(line, str) diff --git a/tests/test_pipeline_introspection.py b/tests/test_pipeline_introspection.py new file mode 100644 index 0000000..23c6888 --- /dev/null +++ b/tests/test_pipeline_introspection.py @@ -0,0 +1,171 @@ +""" +Tests for PipelineIntrospectionSource. +""" + +from engine.data_sources.pipeline_introspection import PipelineIntrospectionSource + + +class TestPipelineIntrospectionSource: + """Tests for PipelineIntrospectionSource.""" + + def test_basic_init(self): + """Source initializes with defaults.""" + source = PipelineIntrospectionSource() + assert source.name == "pipeline-inspect" + assert source.is_dynamic is True + assert source.frame == 0 + assert source.ready is False + + def test_init_with_params(self): + """Source initializes with custom params.""" + source = PipelineIntrospectionSource(viewport_width=100, viewport_height=40) + assert source.viewport_width == 100 + assert source.viewport_height == 40 + + def test_inlet_outlet_types(self): + """Source has correct inlet/outlet types.""" + source = PipelineIntrospectionSource() + from engine.pipeline.core import DataType + + assert DataType.NONE in source.inlet_types + assert DataType.SOURCE_ITEMS in source.outlet_types + + def test_fetch_returns_items(self): + """fetch() returns SourceItem list.""" + source = PipelineIntrospectionSource() + items = source.fetch() + assert len(items) == 1 + assert items[0].source == "pipeline-inspect" + + def test_fetch_increments_frame(self): + """fetch() increments frame counter when ready.""" + source = PipelineIntrospectionSource() + assert source.frame == 0 + + # Set pipeline first to make source ready + class MockPipeline: + stages = {} + execution_order = [] + + def get_metrics_summary(self): + return {"avg_ms": 10.0, "fps": 60, "stages": {}} + + def get_frame_times(self): + return [10.0, 12.0, 11.0] + + source.set_pipeline(MockPipeline()) + assert source.ready is True + + source.fetch() + assert source.frame == 1 + source.fetch() + assert source.frame == 2 + + def test_get_items(self): + """get_items() returns list of SourceItems.""" + source = PipelineIntrospectionSource() + items = source.get_items() + assert isinstance(items, list) + assert len(items) > 0 + assert items[0].source == "pipeline-inspect" + + def test_set_pipeline(self): + """set_pipeline() marks source as ready.""" + source = PipelineIntrospectionSource() + assert source.ready is False + + class MockPipeline: + stages = {} + execution_order = [] + + def get_metrics_summary(self): + return {"avg_ms": 10.0, "fps": 60, "stages": {}} + + def get_frame_times(self): + return [10.0, 12.0, 11.0] + + source.set_pipeline(MockPipeline()) + assert source.ready is True + + +class TestPipelineIntrospectionRender: + """Tests for rendering methods.""" + + def test_render_header_no_pipeline(self): + """_render_header returns default when no pipeline.""" + source = PipelineIntrospectionSource() + lines = source._render_header() + assert len(lines) == 1 + assert "PIPELINE INTROSPECTION" in lines[0] + + def test_render_bar(self): + """_render_bar creates correct bar.""" + source = PipelineIntrospectionSource() + bar = source._render_bar(50, 10) + assert len(bar) == 10 + assert bar.count("█") == 5 + assert bar.count("░") == 5 + + def test_render_bar_zero(self): + """_render_bar handles zero percentage.""" + source = PipelineIntrospectionSource() + bar = source._render_bar(0, 10) + assert bar == "░" * 10 + + def test_render_bar_full(self): + """_render_bar handles 100%.""" + source = PipelineIntrospectionSource() + bar = source._render_bar(100, 10) + assert bar == "█" * 10 + + def test_render_sparkline(self): + """_render_sparkline creates sparkline.""" + source = PipelineIntrospectionSource() + values = [1.0, 2.0, 3.0, 4.0, 5.0] + sparkline = source._render_sparkline(values, 10) + assert len(sparkline) == 10 + + def test_render_sparkline_empty(self): + """_render_sparkline handles empty values.""" + source = PipelineIntrospectionSource() + sparkline = source._render_sparkline([], 10) + assert sparkline == " " * 10 + + def test_render_footer_no_pipeline(self): + """_render_footer shows collecting data when no pipeline.""" + source = PipelineIntrospectionSource() + lines = source._render_footer() + assert len(lines) >= 2 + + +class TestPipelineIntrospectionFull: + """Integration tests.""" + + def test_render_empty(self): + """_render works when not ready.""" + source = PipelineIntrospectionSource() + lines = source._render() + assert len(lines) > 0 + assert "PIPELINE INTROSPECTION" in lines[0] + + def test_render_with_mock_pipeline(self): + """_render works with mock pipeline.""" + source = PipelineIntrospectionSource() + + class MockStage: + category = "source" + name = "test" + + class MockPipeline: + stages = {"test": MockStage()} + execution_order = ["test"] + + def get_metrics_summary(self): + return {"stages": {"test": {"avg_ms": 1.5}}, "avg_ms": 2.0, "fps": 60} + + def get_frame_times(self): + return [1.0, 2.0, 3.0] + + source.set_pipeline(MockPipeline()) + lines = source._render() + assert len(lines) > 0 diff --git a/tests/test_pipeline_introspection_demo.py b/tests/test_pipeline_introspection_demo.py new file mode 100644 index 0000000..735f114 --- /dev/null +++ b/tests/test_pipeline_introspection_demo.py @@ -0,0 +1,167 @@ +""" +Tests for PipelineIntrospectionDemo. +""" + +from engine.pipeline.pipeline_introspection_demo import ( + DemoConfig, + DemoPhase, + PhaseState, + PipelineIntrospectionDemo, +) + + +class MockPipeline: + """Mock pipeline for testing.""" + + pass + + +class MockEffectConfig: + """Mock effect config.""" + + def __init__(self): + self.enabled = False + self.intensity = 0.5 + + +class MockEffect: + """Mock effect for testing.""" + + def __init__(self, name): + self.name = name + self.config = MockEffectConfig() + + +class MockRegistry: + """Mock effect registry.""" + + def __init__(self, effects): + self._effects = {e.name: e for e in effects} + + def get(self, name): + return self._effects.get(name) + + +class TestDemoPhase: + """Tests for DemoPhase enum.""" + + def test_phases_exist(self): + """All three phases exist.""" + assert DemoPhase.PHASE_1_TOGGLE is not None + assert DemoPhase.PHASE_2_LFO is not None + assert DemoPhase.PHASE_3_SHARED_LFO is not None + + +class TestDemoConfig: + """Tests for DemoConfig.""" + + def test_defaults(self): + """Default config has sensible values.""" + config = DemoConfig() + assert config.effect_cycle_duration == 3.0 + assert config.gap_duration == 1.0 + assert config.lfo_duration == 4.0 + assert config.phase_2_effect_duration == 4.0 + assert config.phase_3_lfo_duration == 6.0 + + +class TestPhaseState: + """Tests for PhaseState.""" + + def test_defaults(self): + """PhaseState initializes correctly.""" + state = PhaseState(phase=DemoPhase.PHASE_1_TOGGLE, start_time=0.0) + assert state.phase == DemoPhase.PHASE_1_TOGGLE + assert state.start_time == 0.0 + assert state.current_effect_index == 0 + + +class TestPipelineIntrospectionDemo: + """Tests for PipelineIntrospectionDemo.""" + + def test_basic_init(self): + """Demo initializes with defaults.""" + demo = PipelineIntrospectionDemo(pipeline=None) + assert demo.phase == DemoPhase.PHASE_1_TOGGLE + assert demo.effect_names == ["noise", "fade", "glitch", "firehose"] + + def test_init_with_custom_effects(self): + """Demo initializes with custom effects.""" + demo = PipelineIntrospectionDemo(pipeline=None, effect_names=["noise", "fade"]) + assert demo.effect_names == ["noise", "fade"] + + def test_phase_display(self): + """phase_display returns correct string.""" + demo = PipelineIntrospectionDemo(pipeline=None) + assert "Phase 1" in demo.phase_display + + def test_shared_oscillator_created(self): + """Shared oscillator is created.""" + demo = PipelineIntrospectionDemo(pipeline=None) + assert demo.shared_oscillator is not None + assert demo.shared_oscillator.name == "demo-lfo" + + +class TestPipelineIntrospectionDemoUpdate: + """Tests for update method.""" + + def test_update_returns_dict(self): + """update() returns a dict with expected keys.""" + demo = PipelineIntrospectionDemo(pipeline=None) + result = demo.update() + assert "phase" in result + assert "phase_display" in result + assert "effect_states" in result + + def test_update_phase_1_structure(self): + """Phase 1 has correct structure.""" + demo = PipelineIntrospectionDemo(pipeline=None) + result = demo.update() + assert result["phase"] == "PHASE_1_TOGGLE" + assert "current_effect" in result + + def test_effect_states_structure(self): + """effect_states has correct structure.""" + demo = PipelineIntrospectionDemo(pipeline=None) + result = demo.update() + states = result["effect_states"] + for name in demo.effect_names: + assert name in states + assert "enabled" in states[name] + assert "intensity" in states[name] + + +class TestPipelineIntrospectionDemoPhases: + """Tests for phase transitions.""" + + def test_phase_1_initial(self): + """Starts in phase 1.""" + demo = PipelineIntrospectionDemo(pipeline=None) + assert demo.phase == DemoPhase.PHASE_1_TOGGLE + + def test_shared_oscillator_not_started_initially(self): + """Shared oscillator not started in phase 1.""" + demo = PipelineIntrospectionDemo(pipeline=None) + assert demo.shared_oscillator is not None + # The oscillator.start() is called when transitioning to phase 3 + + +class TestPipelineIntrospectionDemoCleanup: + """Tests for cleanup method.""" + + def test_cleanup_no_error(self): + """cleanup() runs without error.""" + demo = PipelineIntrospectionDemo(pipeline=None) + demo.cleanup() # Should not raise + + def test_cleanup_resets_effects(self): + """cleanup() resets effects.""" + demo = PipelineIntrospectionDemo(pipeline=None) + demo._apply_effect_states( + { + "noise": {"enabled": True, "intensity": 1.0}, + "fade": {"enabled": True, "intensity": 1.0}, + } + ) + demo.cleanup() + # If we had a mock registry, we could verify effects were reset diff --git a/tests/test_pipeline_metrics_sensor.py b/tests/test_pipeline_metrics_sensor.py new file mode 100644 index 0000000..8af380b --- /dev/null +++ b/tests/test_pipeline_metrics_sensor.py @@ -0,0 +1,113 @@ +""" +Tests for PipelineMetricsSensor. +""" + +from engine.sensors.pipeline_metrics import PipelineMetricsSensor + + +class MockPipeline: + """Mock pipeline for testing.""" + + def __init__(self, metrics=None): + self._metrics = metrics or {} + + def get_metrics_summary(self): + return self._metrics + + +class TestPipelineMetricsSensor: + """Tests for PipelineMetricsSensor.""" + + def test_basic_init(self): + """Sensor initializes with defaults.""" + sensor = PipelineMetricsSensor() + assert sensor.name == "pipeline" + assert sensor.available is False + + def test_init_with_pipeline(self): + """Sensor initializes with pipeline.""" + mock = MockPipeline() + sensor = PipelineMetricsSensor(mock) + assert sensor.available is True + + def test_set_pipeline(self): + """set_pipeline() updates pipeline.""" + sensor = PipelineMetricsSensor() + assert sensor.available is False + sensor.set_pipeline(MockPipeline()) + assert sensor.available is True + + def test_read_no_pipeline(self): + """read() returns None when no pipeline.""" + sensor = PipelineMetricsSensor() + assert sensor.read() is None + + def test_read_with_metrics(self): + """read() returns sensor value with metrics.""" + mock = MockPipeline( + { + "total_ms": 18.5, + "fps": 54.0, + "avg_ms": 18.5, + "min_ms": 15.0, + "max_ms": 22.0, + "stages": {"render": {"avg_ms": 12.0}, "noise": {"avg_ms": 3.0}}, + } + ) + sensor = PipelineMetricsSensor(mock) + val = sensor.read() + assert val is not None + assert val.sensor_name == "pipeline" + assert val.value == 18.5 + + def test_read_with_error(self): + """read() returns None when metrics have error.""" + mock = MockPipeline({"error": "No metrics collected"}) + sensor = PipelineMetricsSensor(mock) + assert sensor.read() is None + + def test_get_stage_timing(self): + """get_stage_timing() returns stage timing.""" + mock = MockPipeline( + { + "stages": {"render": {"avg_ms": 12.0}, "noise": {"avg_ms": 3.0}}, + } + ) + sensor = PipelineMetricsSensor(mock) + assert sensor.get_stage_timing("render") == 12.0 + assert sensor.get_stage_timing("noise") == 3.0 + assert sensor.get_stage_timing("nonexistent") == 0.0 + + def test_get_stage_timing_no_pipeline(self): + """get_stage_timing() returns 0 when no pipeline.""" + sensor = PipelineMetricsSensor() + assert sensor.get_stage_timing("test") == 0.0 + + def test_get_all_timings(self): + """get_all_timings() returns all stage timings.""" + mock = MockPipeline( + { + "stages": {"render": {"avg_ms": 12.0}, "noise": {"avg_ms": 3.0}}, + } + ) + sensor = PipelineMetricsSensor(mock) + timings = sensor.get_all_timings() + assert timings == {"render": {"avg_ms": 12.0}, "noise": {"avg_ms": 3.0}} + + def test_get_frame_history(self): + """get_frame_history() returns frame times.""" + MockPipeline() + + class MockPipelineWithFrames: + def get_frame_times(self): + return [1.0, 2.0, 3.0] + + sensor = PipelineMetricsSensor(MockPipelineWithFrames()) + history = sensor.get_frame_history() + assert history == [1.0, 2.0, 3.0] + + def test_start_stop(self): + """start() and stop() work.""" + sensor = PipelineMetricsSensor() + assert sensor.start() is True + sensor.stop() # Should not raise diff --git a/tests/test_pipeline_mutation_commands.py b/tests/test_pipeline_mutation_commands.py new file mode 100644 index 0000000..11e3e0d --- /dev/null +++ b/tests/test_pipeline_mutation_commands.py @@ -0,0 +1,259 @@ +""" +Integration tests for pipeline mutation commands via WebSocket/UI panel. + +Tests the mutation API through the command interface. +""" + +from unittest.mock import Mock + +from engine.app.pipeline_runner import _handle_pipeline_mutation +from engine.pipeline import Pipeline +from engine.pipeline.ui import UIConfig, UIPanel + + +class TestPipelineMutationCommands: + """Test pipeline mutation commands through the mutation API.""" + + def test_can_hot_swap_existing_stage(self): + """Test can_hot_swap returns True for existing, non-critical stage.""" + pipeline = Pipeline() + + # Add a test stage + mock_stage = Mock() + mock_stage.capabilities = {"test_capability"} + pipeline.add_stage("test_stage", mock_stage) + pipeline._capability_map = {"test_capability": ["test_stage"]} + + # Test that we can check hot-swap capability + result = pipeline.can_hot_swap("test_stage") + assert result is True + + def test_can_hot_swap_nonexistent_stage(self): + """Test can_hot_swap returns False for non-existent stage.""" + pipeline = Pipeline() + result = pipeline.can_hot_swap("nonexistent_stage") + assert result is False + + def test_can_hot_swap_minimum_capability(self): + """Test can_hot_swap with minimum capability stage.""" + pipeline = Pipeline() + + # Add a source stage (minimum capability) + mock_stage = Mock() + mock_stage.capabilities = {"source"} + pipeline.add_stage("source", mock_stage) + pipeline._capability_map = {"source": ["source"]} + + # Initialize pipeline to trigger capability validation + pipeline._initialized = True + + # Source is the only provider of minimum capability + result = pipeline.can_hot_swap("source") + # Should be False because it's the sole provider of a minimum capability + assert result is False + + def test_cleanup_stage(self): + """Test cleanup_stage calls cleanup on specific stage.""" + pipeline = Pipeline() + + # Add a stage with a mock cleanup method + mock_stage = Mock() + pipeline.add_stage("test_stage", mock_stage) + + # Cleanup the specific stage + pipeline.cleanup_stage("test_stage") + + # Verify cleanup was called + mock_stage.cleanup.assert_called_once() + + def test_cleanup_stage_nonexistent(self): + """Test cleanup_stage on non-existent stage doesn't crash.""" + pipeline = Pipeline() + pipeline.cleanup_stage("nonexistent_stage") + # Should not raise an exception + + def test_remove_stage_rebuilds_execution_order(self): + """Test that remove_stage rebuilds execution order.""" + pipeline = Pipeline() + + # Add two independent stages + stage1 = Mock() + stage1.capabilities = {"source"} + stage1.dependencies = set() + stage1.stage_dependencies = [] # Add empty list for stage dependencies + + stage2 = Mock() + stage2.capabilities = {"render.output"} + stage2.dependencies = set() # No dependencies + stage2.stage_dependencies = [] # No stage dependencies + + pipeline.add_stage("stage1", stage1) + pipeline.add_stage("stage2", stage2) + + # Build pipeline to establish execution order + pipeline._initialized = True + pipeline._capability_map = {"source": ["stage1"], "render.output": ["stage2"]} + pipeline._execution_order = ["stage1", "stage2"] + + # Remove stage1 + pipeline.remove_stage("stage1") + + # Verify execution order was rebuilt + assert "stage1" not in pipeline._execution_order + assert "stage2" in pipeline._execution_order + + def test_handle_pipeline_mutation_remove_stage(self): + """Test _handle_pipeline_mutation with remove_stage command.""" + pipeline = Pipeline() + + # Add a mock stage + mock_stage = Mock() + pipeline.add_stage("test_stage", mock_stage) + + # Create remove command + command = {"action": "remove_stage", "stage": "test_stage"} + + # Handle the mutation + result = _handle_pipeline_mutation(pipeline, command) + + # Verify it was handled and stage was removed + assert result is True + assert "test_stage" not in pipeline._stages + + def test_handle_pipeline_mutation_swap_stages(self): + """Test _handle_pipeline_mutation with swap_stages command.""" + pipeline = Pipeline() + + # Add two mock stages + stage1 = Mock() + stage2 = Mock() + pipeline.add_stage("stage1", stage1) + pipeline.add_stage("stage2", stage2) + + # Create swap command + command = {"action": "swap_stages", "stage1": "stage1", "stage2": "stage2"} + + # Handle the mutation + result = _handle_pipeline_mutation(pipeline, command) + + # Verify it was handled + assert result is True + + def test_handle_pipeline_mutation_enable_stage(self): + """Test _handle_pipeline_mutation with enable_stage command.""" + pipeline = Pipeline() + + # Add a mock stage with set_enabled method + mock_stage = Mock() + mock_stage.set_enabled = Mock() + pipeline.add_stage("test_stage", mock_stage) + + # Create enable command + command = {"action": "enable_stage", "stage": "test_stage"} + + # Handle the mutation + result = _handle_pipeline_mutation(pipeline, command) + + # Verify it was handled + assert result is True + mock_stage.set_enabled.assert_called_once_with(True) + + def test_handle_pipeline_mutation_disable_stage(self): + """Test _handle_pipeline_mutation with disable_stage command.""" + pipeline = Pipeline() + + # Add a mock stage with set_enabled method + mock_stage = Mock() + mock_stage.set_enabled = Mock() + pipeline.add_stage("test_stage", mock_stage) + + # Create disable command + command = {"action": "disable_stage", "stage": "test_stage"} + + # Handle the mutation + result = _handle_pipeline_mutation(pipeline, command) + + # Verify it was handled + assert result is True + mock_stage.set_enabled.assert_called_once_with(False) + + def test_handle_pipeline_mutation_cleanup_stage(self): + """Test _handle_pipeline_mutation with cleanup_stage command.""" + pipeline = Pipeline() + + # Add a mock stage + mock_stage = Mock() + pipeline.add_stage("test_stage", mock_stage) + + # Create cleanup command + command = {"action": "cleanup_stage", "stage": "test_stage"} + + # Handle the mutation + result = _handle_pipeline_mutation(pipeline, command) + + # Verify it was handled and cleanup was called + assert result is True + mock_stage.cleanup.assert_called_once() + + def test_handle_pipeline_mutation_can_hot_swap(self): + """Test _handle_pipeline_mutation with can_hot_swap command.""" + pipeline = Pipeline() + + # Add a mock stage + mock_stage = Mock() + mock_stage.capabilities = {"test"} + pipeline.add_stage("test_stage", mock_stage) + pipeline._capability_map = {"test": ["test_stage"]} + + # Create can_hot_swap command + command = {"action": "can_hot_swap", "stage": "test_stage"} + + # Handle the mutation + result = _handle_pipeline_mutation(pipeline, command) + + # Verify it was handled + assert result is True + + def test_handle_pipeline_mutation_move_stage(self): + """Test _handle_pipeline_mutation with move_stage command.""" + pipeline = Pipeline() + + # Add two mock stages + stage1 = Mock() + stage2 = Mock() + pipeline.add_stage("stage1", stage1) + pipeline.add_stage("stage2", stage2) + + # Initialize execution order + pipeline._execution_order = ["stage1", "stage2"] + + # Create move command to move stage1 after stage2 + command = {"action": "move_stage", "stage": "stage1", "after": "stage2"} + + # Handle the mutation + result = _handle_pipeline_mutation(pipeline, command) + + # Verify it was handled (result might be True or False depending on validation) + # The key is that the command was processed + assert result in (True, False) + + def test_ui_panel_execute_command_mutation_actions(self): + """Test UI panel execute_command with mutation actions.""" + ui_panel = UIPanel(UIConfig()) + + # Test that mutation actions return False (not handled by UI panel) + # These should be handled by the WebSocket command handler instead + mutation_actions = [ + {"action": "remove_stage", "stage": "test"}, + {"action": "swap_stages", "stage1": "a", "stage2": "b"}, + {"action": "enable_stage", "stage": "test"}, + {"action": "disable_stage", "stage": "test"}, + {"action": "cleanup_stage", "stage": "test"}, + {"action": "can_hot_swap", "stage": "test"}, + ] + + for command in mutation_actions: + result = ui_panel.execute_command(command) + assert result is False, ( + f"Mutation action {command['action']} should not be handled by UI panel" + ) diff --git a/tests/test_pipeline_rebuild.py b/tests/test_pipeline_rebuild.py new file mode 100644 index 0000000..dd62590 --- /dev/null +++ b/tests/test_pipeline_rebuild.py @@ -0,0 +1,405 @@ +""" +Integration tests for pipeline hot-rebuild and state preservation. + +Tests: +1. Viewport size control via --viewport flag +2. NullDisplay recording and save/load functionality +3. Pipeline state preservation during hot-rebuild +""" + +import json +import sys +import tempfile +from pathlib import Path + +import pytest + +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from engine.display import DisplayRegistry +from engine.display.backends.null import NullDisplay +from engine.display.backends.replay import ReplayDisplay +from engine.effects import get_registry +from engine.fetch import load_cache +from engine.pipeline import Pipeline, PipelineConfig, PipelineContext +from engine.pipeline.adapters import ( + EffectPluginStage, + FontStage, + ViewportFilterStage, + create_stage_from_display, + create_stage_from_effect, +) +from engine.pipeline.params import PipelineParams + + +@pytest.fixture +def viewport_dims(): + """Small viewport dimensions for testing.""" + return (40, 15) + + +@pytest.fixture +def items(): + """Load cached source items.""" + items = load_cache() + if not items: + pytest.skip("No fixture cache available") + return items + + +@pytest.fixture +def null_display(viewport_dims): + """Create a NullDisplay for testing.""" + display = DisplayRegistry.create("null") + display.init(viewport_dims[0], viewport_dims[1]) + return display + + +@pytest.fixture +def pipeline_with_null_display(items, null_display): + """Create a pipeline with NullDisplay for testing.""" + import engine.effects.plugins as effects_plugins + + effects_plugins.discover_plugins() + + width, height = null_display.width, null_display.height + params = PipelineParams() + params.viewport_width = width + params.viewport_height = height + + config = PipelineConfig( + source="fixture", + display="null", + camera="scroll", + effects=["noise", "fade"], + ) + + pipeline = Pipeline(config=config, context=PipelineContext()) + + from engine.camera import Camera + from engine.data_sources.sources import ListDataSource + from engine.pipeline.adapters import CameraClockStage, CameraStage, DataSourceStage + + list_source = ListDataSource(items, name="fixture") + pipeline.add_stage("source", DataSourceStage(list_source, name="fixture")) + + # Add camera stages (required by ViewportFilterStage) + camera = Camera.scroll(speed=0.3) + camera.set_canvas_size(200, 200) + pipeline.add_stage("camera_update", CameraClockStage(camera, name="camera-clock")) + pipeline.add_stage("camera", CameraStage(camera, name="scroll")) + + pipeline.add_stage("viewport_filter", ViewportFilterStage(name="viewport-filter")) + pipeline.add_stage("font", FontStage(name="font")) + + effect_registry = get_registry() + for effect_name in config.effects: + effect = effect_registry.get(effect_name) + if effect: + pipeline.add_stage( + f"effect_{effect_name}", + create_stage_from_effect(effect, effect_name), + ) + + pipeline.add_stage("display", create_stage_from_display(null_display, "null")) + pipeline.build() + + if not pipeline.initialize(): + pytest.fail("Failed to initialize pipeline") + + ctx = pipeline.context + ctx.params = params + ctx.set("display", null_display) + ctx.set("items", items) + ctx.set("pipeline", pipeline) + ctx.set("pipeline_order", pipeline.execution_order) + ctx.set("camera_y", 0) + + yield pipeline, params, null_display + + pipeline.cleanup() + null_display.cleanup() + + +class TestNullDisplayRecording: + """Tests for NullDisplay recording functionality.""" + + def test_null_display_initialization(self, viewport_dims): + """NullDisplay initializes with correct dimensions.""" + display = NullDisplay() + display.init(viewport_dims[0], viewport_dims[1]) + assert display.width == viewport_dims[0] + assert display.height == viewport_dims[1] + + def test_start_stop_recording(self, null_display): + """NullDisplay can start and stop recording.""" + assert not null_display._is_recording + + null_display.start_recording() + assert null_display._is_recording is True + + null_display.stop_recording() + assert null_display._is_recording is False + + def test_record_frames(self, null_display, pipeline_with_null_display): + """NullDisplay records frames when recording is enabled.""" + pipeline, params, display = pipeline_with_null_display + + display.start_recording() + assert len(display._recorded_frames) == 0 + + for frame in range(5): + params.frame_number = frame + pipeline.context.params = params + pipeline.execute([]) + + assert len(display._recorded_frames) == 5 + + def test_get_frames(self, null_display, pipeline_with_null_display): + """NullDisplay.get_frames() returns recorded buffers.""" + pipeline, params, display = pipeline_with_null_display + + display.start_recording() + + for frame in range(3): + params.frame_number = frame + pipeline.context.params = params + pipeline.execute([]) + + frames = display.get_frames() + assert len(frames) == 3 + assert all(isinstance(f, list) for f in frames) + + def test_clear_recording(self, null_display, pipeline_with_null_display): + """NullDisplay.clear_recording() clears recorded frames.""" + pipeline, params, display = pipeline_with_null_display + + display.start_recording() + for frame in range(3): + params.frame_number = frame + pipeline.context.params = params + pipeline.execute([]) + + assert len(display._recorded_frames) == 3 + + display.clear_recording() + assert len(display._recorded_frames) == 0 + + def test_save_load_recording(self, null_display, pipeline_with_null_display): + """NullDisplay can save and load recordings.""" + pipeline, params, display = pipeline_with_null_display + + display.start_recording() + for frame in range(3): + params.frame_number = frame + pipeline.context.params = params + pipeline.execute([]) + + with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f: + temp_path = f.name + + try: + display.save_recording(temp_path) + + with open(temp_path) as f: + data = json.load(f) + + assert data["version"] == 1 + assert data["display"] == "null" + assert data["frame_count"] == 3 + assert len(data["frames"]) == 3 + + display2 = NullDisplay() + display2.load_recording(temp_path) + assert len(display2._recorded_frames) == 3 + + finally: + Path(temp_path).unlink(missing_ok=True) + + +class TestReplayDisplay: + """Tests for ReplayDisplay functionality.""" + + def test_replay_display_initialization(self, viewport_dims): + """ReplayDisplay initializes correctly.""" + display = ReplayDisplay() + display.init(viewport_dims[0], viewport_dims[1]) + assert display.width == viewport_dims[0] + assert display.height == viewport_dims[1] + + def test_set_and_get_frames(self): + """ReplayDisplay can set and retrieve frames.""" + display = ReplayDisplay() + frames = [ + {"buffer": ["line1", "line2"], "width": 40, "height": 15}, + {"buffer": ["line3", "line4"], "width": 40, "height": 15}, + ] + display.set_frames(frames) + + frame = display.get_next_frame() + assert frame == ["line1", "line2"] + + frame = display.get_next_frame() + assert frame == ["line3", "line4"] + + frame = display.get_next_frame() + assert frame is None + + def test_replay_loop_mode(self): + """ReplayDisplay can loop playback.""" + display = ReplayDisplay() + display.set_loop(True) + frames = [ + {"buffer": ["frame1"], "width": 40, "height": 15}, + {"buffer": ["frame2"], "width": 40, "height": 15}, + ] + display.set_frames(frames) + + assert display.get_next_frame() == ["frame1"] + assert display.get_next_frame() == ["frame2"] + assert display.get_next_frame() == ["frame1"] + assert display.get_next_frame() == ["frame2"] + + def test_replay_seek_and_reset(self): + """ReplayDisplay supports seek and reset.""" + display = ReplayDisplay() + frames = [ + {"buffer": [f"frame{i}"], "width": 40, "height": 15} for i in range(5) + ] + display.set_frames(frames) + + display.seek(3) + assert display.get_next_frame() == ["frame3"] + + display.reset() + assert display.get_next_frame() == ["frame0"] + + +class TestPipelineHotRebuild: + """Tests for pipeline hot-rebuild and state preservation.""" + + def test_pipeline_runs_with_null_display(self, pipeline_with_null_display): + """Pipeline executes successfully with NullDisplay.""" + pipeline, params, display = pipeline_with_null_display + + for frame in range(5): + params.frame_number = frame + pipeline.context.params = params + result = pipeline.execute([]) + + assert result.success + assert display._last_buffer is not None + + def test_effect_toggle_during_execution(self, pipeline_with_null_display): + """Effects can be toggled during pipeline execution.""" + pipeline, params, display = pipeline_with_null_display + + params.frame_number = 0 + pipeline.context.params = params + pipeline.execute([]) + buffer1 = display._last_buffer + + fade_stage = pipeline.get_stage("effect_fade") + assert fade_stage is not None + assert isinstance(fade_stage, EffectPluginStage) + + fade_stage._enabled = False + fade_stage._effect.config.enabled = False + + params.frame_number = 1 + pipeline.context.params = params + pipeline.execute([]) + buffer2 = display._last_buffer + + assert buffer1 != buffer2 + + def test_state_preservation_across_rebuild(self, pipeline_with_null_display): + """Pipeline state is preserved across hot-rebuild events.""" + pipeline, params, display = pipeline_with_null_display + + for frame in range(5): + params.frame_number = frame + pipeline.context.params = params + pipeline.execute([]) + + camera_y_before = pipeline.context.get("camera_y") + + fade_stage = pipeline.get_stage("effect_fade") + if fade_stage and isinstance(fade_stage, EffectPluginStage): + fade_stage.set_enabled(not fade_stage.is_enabled()) + fade_stage._effect.config.enabled = fade_stage.is_enabled() + + params.frame_number = 5 + pipeline.context.params = params + pipeline.execute([]) + + pipeline.context.get("camera_y") + + assert camera_y_before is not None + + +class TestViewportControl: + """Tests for viewport size control.""" + + def test_viewport_dimensions_applied(self, items): + """Viewport dimensions are correctly applied to pipeline.""" + width, height = 40, 15 + + display = DisplayRegistry.create("null") + display.init(width, height) + + params = PipelineParams() + params.viewport_width = width + params.viewport_height = height + + config = PipelineConfig( + source="fixture", + display="null", + camera="scroll", + effects=[], + ) + + pipeline = Pipeline(config=config, context=PipelineContext()) + + from engine.camera import Camera + from engine.data_sources.sources import ListDataSource + from engine.pipeline.adapters import ( + CameraClockStage, + CameraStage, + DataSourceStage, + ) + + list_source = ListDataSource(items, name="fixture") + pipeline.add_stage("source", DataSourceStage(list_source, name="fixture")) + + # Add camera stages (required by ViewportFilterStage) + camera = Camera.scroll(speed=0.3) + camera.set_canvas_size(200, 200) + pipeline.add_stage( + "camera_update", CameraClockStage(camera, name="camera-clock") + ) + pipeline.add_stage("camera", CameraStage(camera, name="scroll")) + + pipeline.add_stage( + "viewport_filter", ViewportFilterStage(name="viewport-filter") + ) + pipeline.add_stage("font", FontStage(name="font")) + pipeline.add_stage("display", create_stage_from_display(display, "null")) + pipeline.build() + + assert pipeline.initialize() + + ctx = pipeline.context + ctx.params = params + ctx.set("display", display) + ctx.set("items", items) + ctx.set("pipeline", pipeline) + ctx.set("camera_y", 0) + + result = pipeline.execute(items) + + assert result.success + assert display._last_buffer is not None + + pipeline.cleanup() + display.cleanup() diff --git a/tests/test_render.py b/tests/test_render.py deleted file mode 100644 index 20eb63e..0000000 --- a/tests/test_render.py +++ /dev/null @@ -1,301 +0,0 @@ -""" -Tests for engine.render module. -""" - -import pytest - -from engine import config, render - - -class TestDefaultGradients: - """Tests for default gradient fallback functions.""" - - def test_default_green_gradient_length(self): - """_default_green_gradient returns 12 colors.""" - gradient = render._default_green_gradient() - assert len(gradient) == 12 - - def test_default_green_gradient_is_list(self): - """_default_green_gradient returns a list.""" - gradient = render._default_green_gradient() - assert isinstance(gradient, list) - - def test_default_green_gradient_all_strings(self): - """_default_green_gradient returns list of ANSI code strings.""" - gradient = render._default_green_gradient() - assert all(isinstance(code, str) for code in gradient) - - def test_default_magenta_gradient_length(self): - """_default_magenta_gradient returns 12 colors.""" - gradient = render._default_magenta_gradient() - assert len(gradient) == 12 - - def test_default_magenta_gradient_is_list(self): - """_default_magenta_gradient returns a list.""" - gradient = render._default_magenta_gradient() - assert isinstance(gradient, list) - - def test_default_magenta_gradient_all_strings(self): - """_default_magenta_gradient returns list of ANSI code strings.""" - gradient = render._default_magenta_gradient() - assert all(isinstance(code, str) for code in gradient) - - -class TestLrGradientUsesActiveTheme: - """Tests for lr_gradient using active theme.""" - - def test_lr_gradient_uses_active_theme_when_cols_none(self): - """lr_gradient uses ACTIVE_THEME.main_gradient when cols=None.""" - # Save original state - original_theme = config.ACTIVE_THEME - - try: - # Set a theme - config.set_active_theme("green") - - # Create simple test data - rows = ["text"] - - # Call without cols parameter (cols=None) - result = render.lr_gradient(rows, offset=0.0) - - # Should not raise and should return colored output - assert isinstance(result, list) - assert len(result) == 1 - # Should have ANSI codes (no plain "text") - assert result[0] != "text" - finally: - # Restore original state - config.ACTIVE_THEME = original_theme - - def test_lr_gradient_fallback_when_no_theme(self): - """lr_gradient uses fallback green when ACTIVE_THEME is None.""" - # Save original state - original_theme = config.ACTIVE_THEME - - try: - # Clear the theme - config.ACTIVE_THEME = None - - # Create simple test data - rows = ["text"] - - # Call without cols parameter (should use fallback) - result = render.lr_gradient(rows, offset=0.0) - - # Should not raise and should return colored output - assert isinstance(result, list) - assert len(result) == 1 - # Should have ANSI codes (no plain "text") - assert result[0] != "text" - finally: - # Restore original state - config.ACTIVE_THEME = original_theme - - def test_lr_gradient_explicit_cols_parameter_still_works(self): - """lr_gradient with explicit cols parameter overrides theme.""" - # Custom gradient - custom_cols = ["\033[38;5;1m", "\033[38;5;2m"] * 6 - - rows = ["xy"] - result = render.lr_gradient(rows, offset=0.0, cols=custom_cols) - - # Should use the provided cols - assert isinstance(result, list) - assert len(result) == 1 - - def test_lr_gradient_respects_cols_parameter_name(self): - """lr_gradient accepts cols as keyword argument.""" - custom_cols = ["\033[38;5;1m", "\033[38;5;2m"] * 6 - - rows = ["xy"] - # Call with cols as keyword - result = render.lr_gradient(rows, offset=0.0, cols=custom_cols) - - assert isinstance(result, list) - - -class TestLrGradientBasicFunctionality: - """Tests to ensure lr_gradient basic functionality still works.""" - - def test_lr_gradient_colors_non_space_chars(self): - """lr_gradient colors non-space characters.""" - rows = ["hello"] - - # Set a theme for the test - original_theme = config.ACTIVE_THEME - try: - config.set_active_theme("green") - result = render.lr_gradient(rows, offset=0.0) - - # Result should have ANSI codes - assert any("\033[" in r for r in result), "Expected ANSI codes in result" - finally: - config.ACTIVE_THEME = original_theme - - def test_lr_gradient_preserves_spaces(self): - """lr_gradient preserves spaces in output.""" - rows = ["a b c"] - - original_theme = config.ACTIVE_THEME - try: - config.set_active_theme("green") - result = render.lr_gradient(rows, offset=0.0) - - # Spaces should be preserved (not colored) - assert " " in result[0] - finally: - config.ACTIVE_THEME = original_theme - - def test_lr_gradient_empty_rows(self): - """lr_gradient handles empty rows correctly.""" - rows = [""] - - original_theme = config.ACTIVE_THEME - try: - config.set_active_theme("green") - result = render.lr_gradient(rows, offset=0.0) - - assert result == [""] - finally: - config.ACTIVE_THEME = original_theme - - def test_lr_gradient_multiple_rows(self): - """lr_gradient handles multiple rows.""" - rows = ["row1", "row2", "row3"] - - original_theme = config.ACTIVE_THEME - try: - config.set_active_theme("green") - result = render.lr_gradient(rows, offset=0.0) - - assert len(result) == 3 - finally: - config.ACTIVE_THEME = original_theme - - -class TestMsgGradient: - """Tests for msg_gradient function (message/ntfy overlay coloring).""" - - def test_msg_gradient_uses_active_theme(self): - """msg_gradient uses ACTIVE_THEME.message_gradient when theme is set.""" - # Save original state - original_theme = config.ACTIVE_THEME - - try: - # Set a theme - config.set_active_theme("green") - - # Create simple test data - rows = ["MESSAGE"] - - # Call msg_gradient - result = render.msg_gradient(rows, offset=0.0) - - # Should return colored output using theme's message_gradient - assert isinstance(result, list) - assert len(result) == 1 - # Should have ANSI codes from the message gradient - assert result[0] != "MESSAGE" - assert "\033[" in result[0] - finally: - # Restore original state - config.ACTIVE_THEME = original_theme - - def test_msg_gradient_fallback_when_no_theme(self): - """msg_gradient uses fallback magenta when ACTIVE_THEME is None.""" - # Save original state - original_theme = config.ACTIVE_THEME - - try: - # Clear the theme - config.ACTIVE_THEME = None - - # Create simple test data - rows = ["MESSAGE"] - - # Call msg_gradient - result = render.msg_gradient(rows, offset=0.0) - - # Should return colored output using default magenta - assert isinstance(result, list) - assert len(result) == 1 - # Should have ANSI codes - assert result[0] != "MESSAGE" - assert "\033[" in result[0] - finally: - # Restore original state - config.ACTIVE_THEME = original_theme - - def test_msg_gradient_returns_colored_rows(self): - """msg_gradient returns properly colored rows with animation offset.""" - # Save original state - original_theme = config.ACTIVE_THEME - - try: - # Set a theme - config.set_active_theme("orange") - - rows = ["NTFY", "ALERT"] - - # Call with offset - result = render.msg_gradient(rows, offset=0.5) - - # Should return same number of rows - assert len(result) == 2 - # Both should be colored - assert all("\033[" in r for r in result) - # Should not be the original text - assert result != rows - finally: - config.ACTIVE_THEME = original_theme - - def test_msg_gradient_different_themes_produce_different_results(self): - """msg_gradient produces different colors for different themes.""" - original_theme = config.ACTIVE_THEME - - try: - rows = ["TEST"] - - # Get result with green theme - config.set_active_theme("green") - result_green = render.msg_gradient(rows, offset=0.0) - - # Get result with orange theme - config.set_active_theme("orange") - result_orange = render.msg_gradient(rows, offset=0.0) - - # Results should be different (different message gradients) - assert result_green != result_orange - finally: - config.ACTIVE_THEME = original_theme - - def test_msg_gradient_preserves_spacing(self): - """msg_gradient preserves spaces in rows.""" - original_theme = config.ACTIVE_THEME - - try: - config.set_active_theme("purple") - rows = ["M E S S A G E"] - - result = render.msg_gradient(rows, offset=0.0) - - # Spaces should be preserved - assert " " in result[0] - finally: - config.ACTIVE_THEME = original_theme - - def test_msg_gradient_empty_rows(self): - """msg_gradient handles empty rows correctly.""" - original_theme = config.ACTIVE_THEME - - try: - config.set_active_theme("green") - rows = [""] - - result = render.msg_gradient(rows, offset=0.0) - - # Empty row should stay empty - assert result == [""] - finally: - config.ACTIVE_THEME = original_theme diff --git a/tests/test_sensors.py b/tests/test_sensors.py new file mode 100644 index 0000000..04e43fd --- /dev/null +++ b/tests/test_sensors.py @@ -0,0 +1,473 @@ +""" +Tests for the sensor framework. +""" + +import time + +from engine.sensors import Sensor, SensorRegistry, SensorStage, SensorValue + + +class TestSensorValue: + """Tests for SensorValue dataclass.""" + + def test_create_sensor_value(self): + """SensorValue stores sensor data correctly.""" + value = SensorValue( + sensor_name="mic", + value=42.5, + timestamp=1234567890.0, + unit="dB", + ) + + assert value.sensor_name == "mic" + assert value.value == 42.5 + assert value.timestamp == 1234567890.0 + assert value.unit == "dB" + + +class DummySensor(Sensor): + """Dummy sensor for testing.""" + + def __init__(self, name: str = "dummy", value: float = 1.0): + self.name = name + self.unit = "units" + self._value = value + + def start(self) -> bool: + return True + + def stop(self) -> None: + pass + + def read(self) -> SensorValue | None: + return SensorValue( + sensor_name=self.name, + value=self._value, + timestamp=time.time(), + unit=self.unit, + ) + + +class TestSensorRegistry: + """Tests for SensorRegistry.""" + + def setup_method(self): + """Clear registry before each test.""" + SensorRegistry._sensors.clear() + SensorRegistry._started = False + + def test_register_sensor(self): + """SensorRegistry registers sensors.""" + sensor = DummySensor() + SensorRegistry.register(sensor) + + assert SensorRegistry.get("dummy") is sensor + + def test_list_sensors(self): + """SensorRegistry lists registered sensors.""" + SensorRegistry.register(DummySensor("a")) + SensorRegistry.register(DummySensor("b")) + + sensors = SensorRegistry.list_sensors() + assert "a" in sensors + assert "b" in sensors + + def test_read_all(self): + """SensorRegistry reads all sensor values.""" + SensorRegistry.register(DummySensor("a", 1.0)) + SensorRegistry.register(DummySensor("b", 2.0)) + + values = SensorRegistry.read_all() + assert values["a"] == 1.0 + assert values["b"] == 2.0 + + +class TestSensorStage: + """Tests for SensorStage pipeline adapter.""" + + def setup_method(self): + SensorRegistry._sensors.clear() + SensorRegistry._started = False + + def test_sensor_stage_capabilities(self): + """SensorStage declares correct capabilities.""" + sensor = DummySensor("mic") + stage = SensorStage(sensor) + + assert "sensor.mic" in stage.capabilities + + def test_sensor_stage_process(self): + """SensorStage reads sensor and stores in context.""" + from engine.pipeline.core import PipelineContext + + sensor = DummySensor("test", 42.0) + stage = SensorStage(sensor, "test") + + ctx = PipelineContext() + result = stage.process(None, ctx) + + assert ctx.get_state("sensor.test") == 42.0 + assert result is None + + +class TestApplyParamBindings: + """Tests for sensor param bindings.""" + + def test_no_bindings_returns_original(self): + """Effect without bindings returns original config.""" + from engine.effects.types import ( + EffectConfig, + EffectPlugin, + apply_param_bindings, + ) + + class TestEffect(EffectPlugin): + name = "test" + config = EffectConfig() + + def process(self, buf, ctx): + return buf + + def configure(self, config): + pass + + effect = TestEffect() + ctx = object() + + result = apply_param_bindings(effect, ctx) + assert result is effect.config + + def test_bindings_read_sensor_values(self): + """Param bindings read sensor values from context.""" + from engine.effects.types import ( + EffectConfig, + EffectPlugin, + apply_param_bindings, + ) + + class TestEffect(EffectPlugin): + name = "test" + config = EffectConfig(intensity=1.0) + param_bindings = { + "intensity": {"sensor": "mic", "transform": "linear"}, + } + + def process(self, buf, ctx): + return buf + + def configure(self, config): + pass + + from engine.effects.types import EffectContext + + effect = TestEffect() + ctx = EffectContext( + terminal_width=80, + terminal_height=24, + scroll_cam=0, + ticker_height=20, + ) + ctx.set_state("sensor.mic", 0.8) + + result = apply_param_bindings(effect, ctx) + assert "intensity_sensor" in result.params + + +class TestSensorLifecycle: + """Tests for sensor start/stop lifecycle.""" + + def setup_method(self): + SensorRegistry._sensors.clear() + SensorRegistry._started = False + + def test_start_all(self): + """SensorRegistry starts all sensors.""" + started = [] + + class StatefulSensor(Sensor): + name = "stateful" + + def start(self) -> bool: + started.append("start") + return True + + def stop(self) -> None: + started.append("stop") + + def read(self) -> SensorValue | None: + return SensorValue("stateful", 1.0, 0.0) + + SensorRegistry.register(StatefulSensor()) + SensorRegistry.start_all() + + assert "start" in started + assert SensorRegistry._started is True + + def test_stop_all(self): + """SensorRegistry stops all sensors.""" + stopped = [] + + class StatefulSensor(Sensor): + name = "stateful" + + def start(self) -> bool: + return True + + def stop(self) -> None: + stopped.append("stop") + + def read(self) -> SensorValue | None: + return SensorValue("stateful", 1.0, 0.0) + + SensorRegistry.register(StatefulSensor()) + SensorRegistry.start_all() + SensorRegistry.stop_all() + + assert "stop" in stopped + assert SensorRegistry._started is False + + def test_unavailable_sensor(self): + """Unavailable sensor returns None from read.""" + + class UnavailableSensor(Sensor): + name = "unavailable" + + @property + def available(self) -> bool: + return False + + def start(self) -> bool: + return False + + def stop(self) -> None: + pass + + def read(self) -> SensorValue | None: + return None + + sensor = UnavailableSensor() + assert sensor.available is False + assert sensor.read() is None + + +class TestTransforms: + """Tests for sensor value transforms.""" + + def test_exponential_transform(self): + """Exponential transform squares the value.""" + from engine.effects.types import ( + EffectConfig, + EffectPlugin, + apply_param_bindings, + ) + + class TestEffect(EffectPlugin): + name = "test" + config = EffectConfig(intensity=1.0) + param_bindings = { + "intensity": {"sensor": "mic", "transform": "exponential"}, + } + + def process(self, buf, ctx): + return buf + + def configure(self, config): + pass + + from engine.effects.types import EffectContext + + effect = TestEffect() + ctx = EffectContext(80, 24, 0, 20) + ctx.set_state("sensor.mic", 0.5) + + result = apply_param_bindings(effect, ctx) + # 0.5^2 = 0.25, then scaled: 0.5 + 0.25*0.5 = 0.625 + assert result.intensity != effect.config.intensity + + def test_inverse_transform(self): + """Inverse transform inverts the value.""" + from engine.effects.types import ( + EffectConfig, + EffectPlugin, + apply_param_bindings, + ) + + class TestEffect(EffectPlugin): + name = "test" + config = EffectConfig(intensity=1.0) + param_bindings = { + "intensity": {"sensor": "mic", "transform": "inverse"}, + } + + def process(self, buf, ctx): + return buf + + def configure(self, config): + pass + + from engine.effects.types import EffectContext + + effect = TestEffect() + ctx = EffectContext(80, 24, 0, 20) + ctx.set_state("sensor.mic", 0.8) + + result = apply_param_bindings(effect, ctx) + # 1.0 - 0.8 = 0.2 + assert abs(result.params["intensity_sensor"] - 0.2) < 0.001 + + def test_threshold_transform(self): + """Threshold transform applies binary threshold.""" + from engine.effects.types import ( + EffectConfig, + EffectPlugin, + apply_param_bindings, + ) + + class TestEffect(EffectPlugin): + name = "test" + config = EffectConfig(intensity=1.0) + param_bindings = { + "intensity": { + "sensor": "mic", + "transform": "threshold", + "threshold": 0.5, + }, + } + + def process(self, buf, ctx): + return buf + + def configure(self, config): + pass + + from engine.effects.types import EffectContext + + effect = TestEffect() + ctx = EffectContext(80, 24, 0, 20) + + # Above threshold + ctx.set_state("sensor.mic", 0.8) + result = apply_param_bindings(effect, ctx) + assert result.params["intensity_sensor"] == 1.0 + + # Below threshold + ctx.set_state("sensor.mic", 0.3) + result = apply_param_bindings(effect, ctx) + assert result.params["intensity_sensor"] == 0.0 + + +class TestOscillatorSensor: + """Tests for OscillatorSensor.""" + + def setup_method(self): + SensorRegistry._sensors.clear() + SensorRegistry._started = False + + def test_sine_waveform(self): + """Oscillator generates sine wave.""" + from engine.sensors.oscillator import OscillatorSensor + + osc = OscillatorSensor(name="test", waveform="sine", frequency=1.0) + osc.start() + + values = [osc.read().value for _ in range(10)] + assert all(0 <= v <= 1 for v in values) + + def test_square_waveform(self): + """Oscillator generates square wave.""" + from engine.sensors.oscillator import OscillatorSensor + + osc = OscillatorSensor(name="test", waveform="square", frequency=10.0) + osc.start() + + values = [osc.read().value for _ in range(10)] + assert all(v in (0.0, 1.0) for v in values) + + def test_waveform_types(self): + """All waveform types work.""" + from engine.sensors.oscillator import OscillatorSensor + + for wf in ["sine", "square", "sawtooth", "triangle", "noise"]: + osc = OscillatorSensor(name=wf, waveform=wf, frequency=1.0) + osc.start() + val = osc.read() + assert val is not None + assert 0 <= val.value <= 1 + + def test_invalid_waveform_raises(self): + """Invalid waveform returns None.""" + from engine.sensors.oscillator import OscillatorSensor + + osc = OscillatorSensor(waveform="invalid") + osc.start() + val = osc.read() + assert val is None + + def test_sensor_driven_oscillator(self): + """Oscillator can be driven by another sensor.""" + from engine.sensors.oscillator import OscillatorSensor + + class ModSensor(Sensor): + name = "mod" + + def start(self) -> bool: + return True + + def stop(self) -> None: + pass + + def read(self) -> SensorValue | None: + return SensorValue("mod", 0.5, 0.0) + + SensorRegistry.register(ModSensor()) + + osc = OscillatorSensor( + name="lfo", waveform="sine", frequency=0.1, input_sensor="mod" + ) + osc.start() + + val = osc.read() + assert val is not None + assert 0 <= val.value <= 1 + + +class TestMicSensor: + """Tests for MicSensor.""" + + def setup_method(self): + SensorRegistry._sensors.clear() + SensorRegistry._started = False + + def test_mic_sensor_creation(self): + """MicSensor can be created.""" + from engine.sensors.mic import MicSensor + + sensor = MicSensor() + assert sensor.name == "mic" + assert sensor.unit == "dB" + + def test_mic_sensor_custom_name(self): + """MicSensor can have custom name.""" + from engine.sensors.mic import MicSensor + + sensor = MicSensor(name="my_mic") + assert sensor.name == "my_mic" + + def test_mic_sensor_start(self): + """MicSensor.start returns bool.""" + from engine.sensors.mic import MicSensor + + sensor = MicSensor() + result = sensor.start() + assert isinstance(result, bool) + + def test_mic_sensor_read_returns_value_or_none(self): + """MicSensor.read returns SensorValue or None.""" + from engine.sensors.mic import MicSensor + + sensor = MicSensor() + sensor.start() + # May be None if no mic available + result = sensor.read() + # Just check it doesn't raise - result depends on system + assert result is None or isinstance(result, SensorValue) diff --git a/tests/test_streaming.py b/tests/test_streaming.py new file mode 100644 index 0000000..929240d --- /dev/null +++ b/tests/test_streaming.py @@ -0,0 +1,223 @@ +""" +Tests for streaming protocol utilities. +""" + +from engine.display.streaming import ( + FrameDiff, + MessageType, + apply_diff, + compress_frame, + compute_diff, + decode_binary_message, + decode_diff_message, + decode_rle, + decompress_frame, + encode_binary_message, + encode_diff_message, + encode_rle, + should_use_diff, +) + + +class TestFrameDiff: + """Tests for FrameDiff computation.""" + + def test_compute_diff_all_changed(self): + """compute_diff detects all changed lines.""" + old = ["a", "b", "c"] + new = ["x", "y", "z"] + + diff = compute_diff(old, new) + + assert len(diff.changed_lines) == 3 + assert diff.width == 1 + assert diff.height == 3 + + def test_compute_diff_no_changes(self): + """compute_diff returns empty for identical buffers.""" + old = ["a", "b", "c"] + new = ["a", "b", "c"] + + diff = compute_diff(old, new) + + assert len(diff.changed_lines) == 0 + + def test_compute_diff_partial_changes(self): + """compute_diff detects partial changes.""" + old = ["a", "b", "c"] + new = ["a", "x", "c"] + + diff = compute_diff(old, new) + + assert len(diff.changed_lines) == 1 + assert diff.changed_lines[0] == (1, "x") + + def test_compute_diff_new_lines(self): + """compute_diff detects new lines added.""" + old = ["a", "b"] + new = ["a", "b", "c"] + + diff = compute_diff(old, new) + + assert len(diff.changed_lines) == 1 + assert diff.changed_lines[0] == (2, "c") + + def test_compute_diff_empty_old(self): + """compute_diff handles empty old buffer.""" + old = [] + new = ["a", "b", "c"] + + diff = compute_diff(old, new) + + assert len(diff.changed_lines) == 3 + + +class TestRLE: + """Tests for run-length encoding.""" + + def test_encode_rle_no_repeats(self): + """encode_rle handles no repeated lines.""" + lines = [(0, "a"), (1, "b"), (2, "c")] + + encoded = encode_rle(lines) + + assert len(encoded) == 3 + assert encoded[0] == (0, "a", 1) + assert encoded[1] == (1, "b", 1) + assert encoded[2] == (2, "c", 1) + + def test_encode_rle_with_repeats(self): + """encode_rle compresses repeated lines.""" + lines = [(0, "a"), (1, "a"), (2, "a"), (3, "b")] + + encoded = encode_rle(lines) + + assert len(encoded) == 2 + assert encoded[0] == (0, "a", 3) + assert encoded[1] == (3, "b", 1) + + def test_decode_rle(self): + """decode_rle reconstructs original lines.""" + encoded = [(0, "a", 3), (3, "b", 1)] + + decoded = decode_rle(encoded) + + assert decoded == [(0, "a"), (1, "a"), (2, "a"), (3, "b")] + + def test_encode_decode_roundtrip(self): + """encode/decode is lossless.""" + original = [(i, f"line{i % 3}") for i in range(10)] + encoded = encode_rle(original) + decoded = decode_rle(encoded) + + assert decoded == original + + +class TestCompression: + """Tests for frame compression.""" + + def test_compress_decompress(self): + """compress_frame is lossless.""" + buffer = [f"Line {i:02d}" for i in range(24)] + + compressed = compress_frame(buffer) + decompressed = decompress_frame(compressed, 24) + + assert decompressed == buffer + + def test_compress_empty(self): + """compress_frame handles empty buffer.""" + compressed = compress_frame([]) + decompressed = decompress_frame(compressed, 0) + + assert decompressed == [] + + +class TestBinaryProtocol: + """Tests for binary message encoding.""" + + def test_encode_decode_message(self): + """encode_binary_message is lossless.""" + payload = b"test payload" + + encoded = encode_binary_message(MessageType.FULL_FRAME, 80, 24, payload) + msg_type, width, height, decoded_payload = decode_binary_message(encoded) + + assert msg_type == MessageType.FULL_FRAME + assert width == 80 + assert height == 24 + assert decoded_payload == payload + + def test_encode_decode_all_types(self): + """All message types encode correctly.""" + for msg_type in MessageType: + payload = b"test" + encoded = encode_binary_message(msg_type, 80, 24, payload) + decoded_type, _, _, _ = decode_binary_message(encoded) + assert decoded_type == msg_type + + +class TestDiffProtocol: + """Tests for diff message encoding.""" + + def test_encode_decode_diff(self): + """encode_diff_message is lossless.""" + diff = FrameDiff(width=80, height=24, changed_lines=[(0, "a"), (5, "b")]) + + payload = encode_diff_message(diff) + decoded = decode_diff_message(payload) + + assert decoded == diff.changed_lines + + +class TestApplyDiff: + """Tests for applying diffs.""" + + def test_apply_diff(self): + """apply_diff reconstructs new buffer.""" + old_buffer = ["a", "b", "c", "d"] + diff = FrameDiff(width=1, height=4, changed_lines=[(1, "x"), (2, "y")]) + + new_buffer = apply_diff(old_buffer, diff) + + assert new_buffer == ["a", "x", "y", "d"] + + def test_apply_diff_new_lines(self): + """apply_diff handles new lines.""" + old_buffer = ["a", "b"] + diff = FrameDiff(width=1, height=4, changed_lines=[(2, "c"), (3, "d")]) + + new_buffer = apply_diff(old_buffer, diff) + + assert new_buffer == ["a", "b", "c", "d"] + + +class TestShouldUseDiff: + """Tests for diff threshold decision.""" + + def test_uses_diff_when_small_changes(self): + """should_use_diff returns True when few changes.""" + old = ["a"] * 100 + new = ["a"] * 95 + ["b"] * 5 + + assert should_use_diff(old, new, threshold=0.3) is True + + def test_uses_full_when_many_changes(self): + """should_use_diff returns False when many changes.""" + old = ["a"] * 100 + new = ["b"] * 100 + + assert should_use_diff(old, new, threshold=0.3) is False + + def test_uses_diff_at_threshold(self): + """should_use_diff handles threshold boundary.""" + old = ["a"] * 100 + new = ["a"] * 70 + ["b"] * 30 + + result = should_use_diff(old, new, threshold=0.3) + assert result is True or result is False # At boundary + + def test_returns_false_for_empty(self): + """should_use_diff returns False for empty buffers.""" + assert should_use_diff([], ["a", "b"]) is False + assert should_use_diff(["a", "b"], []) is False diff --git a/tests/test_themes.py b/tests/test_themes.py deleted file mode 100644 index f6bbdf3..0000000 --- a/tests/test_themes.py +++ /dev/null @@ -1,169 +0,0 @@ -""" -Tests for engine.themes module. -""" - -import pytest - -from engine import themes - - -class TestThemeConstruction: - """Tests for Theme class initialization.""" - - def test_theme_construction(self): - """Theme stores name and gradients correctly.""" - main_grad = ["color1", "color2", "color3"] - msg_grad = ["msg1", "msg2", "msg3"] - theme = themes.Theme("test_theme", main_grad, msg_grad) - - assert theme.name == "test_theme" - assert theme.main_gradient == main_grad - assert theme.message_gradient == msg_grad - - -class TestGradientLength: - """Tests for gradient length validation.""" - - def test_gradient_length_green(self): - """Green theme has exactly 12 colors in each gradient.""" - green = themes.THEME_REGISTRY["green"] - assert len(green.main_gradient) == 12 - assert len(green.message_gradient) == 12 - - def test_gradient_length_orange(self): - """Orange theme has exactly 12 colors in each gradient.""" - orange = themes.THEME_REGISTRY["orange"] - assert len(orange.main_gradient) == 12 - assert len(orange.message_gradient) == 12 - - def test_gradient_length_purple(self): - """Purple theme has exactly 12 colors in each gradient.""" - purple = themes.THEME_REGISTRY["purple"] - assert len(purple.main_gradient) == 12 - assert len(purple.message_gradient) == 12 - - -class TestThemeRegistry: - """Tests for THEME_REGISTRY dictionary.""" - - def test_theme_registry_has_three_themes(self): - """Registry contains exactly three themes: green, orange, purple.""" - assert len(themes.THEME_REGISTRY) == 3 - assert set(themes.THEME_REGISTRY.keys()) == {"green", "orange", "purple"} - - def test_registry_values_are_themes(self): - """All registry values are Theme instances.""" - for theme_id, theme in themes.THEME_REGISTRY.items(): - assert isinstance(theme, themes.Theme) - assert theme.name == theme_id - - -class TestGetTheme: - """Tests for get_theme function.""" - - def test_get_theme_valid_green(self): - """get_theme('green') returns correct green Theme.""" - green = themes.get_theme("green") - assert isinstance(green, themes.Theme) - assert green.name == "green" - - def test_get_theme_valid_orange(self): - """get_theme('orange') returns correct orange Theme.""" - orange = themes.get_theme("orange") - assert isinstance(orange, themes.Theme) - assert orange.name == "orange" - - def test_get_theme_valid_purple(self): - """get_theme('purple') returns correct purple Theme.""" - purple = themes.get_theme("purple") - assert isinstance(purple, themes.Theme) - assert purple.name == "purple" - - def test_get_theme_invalid(self): - """get_theme with invalid ID raises KeyError.""" - with pytest.raises(KeyError): - themes.get_theme("invalid_theme") - - def test_get_theme_invalid_none(self): - """get_theme with None raises KeyError.""" - with pytest.raises(KeyError): - themes.get_theme(None) - - -class TestGreenTheme: - """Tests for green theme specific values.""" - - def test_green_theme_unchanged(self): - """Green theme maintains original color sequence.""" - green = themes.get_theme("green") - - # Expected main gradient: 231→195→123→118→82→46→40→34→28→22→22(dim)→235 - expected_main = [231, 195, 123, 118, 82, 46, 40, 34, 28, 22, 22, 235] - # Expected msg gradient: 231→225→219→213→207→201→165→161→125→89→89(dim)→235 - expected_msg = [231, 225, 219, 213, 207, 201, 165, 161, 125, 89, 89, 235] - - assert green.main_gradient == expected_main - assert green.message_gradient == expected_msg - - def test_green_theme_name(self): - """Green theme has correct name.""" - green = themes.get_theme("green") - assert green.name == "green" - - -class TestOrangeTheme: - """Tests for orange theme specific values.""" - - def test_orange_theme_unchanged(self): - """Orange theme maintains original color sequence.""" - orange = themes.get_theme("orange") - - # Expected main gradient: 231→215→209→208→202→166→130→94→58→94→94(dim)→235 - expected_main = [231, 215, 209, 208, 202, 166, 130, 94, 58, 94, 94, 235] - # Expected msg gradient: 231→195→33→27→21→21→21→18→18→18→18(dim)→235 - expected_msg = [231, 195, 33, 27, 21, 21, 21, 18, 18, 18, 18, 235] - - assert orange.main_gradient == expected_main - assert orange.message_gradient == expected_msg - - def test_orange_theme_name(self): - """Orange theme has correct name.""" - orange = themes.get_theme("orange") - assert orange.name == "orange" - - -class TestPurpleTheme: - """Tests for purple theme specific values.""" - - def test_purple_theme_unchanged(self): - """Purple theme maintains original color sequence.""" - purple = themes.get_theme("purple") - - # Expected main gradient: 231→225→177→171→165→135→129→93→57→57→57(dim)→235 - expected_main = [231, 225, 177, 171, 165, 135, 129, 93, 57, 57, 57, 235] - # Expected msg gradient: 231→226→226→220→220→184→184→178→178→172→172(dim)→235 - expected_msg = [231, 226, 226, 220, 220, 184, 184, 178, 178, 172, 172, 235] - - assert purple.main_gradient == expected_main - assert purple.message_gradient == expected_msg - - def test_purple_theme_name(self): - """Purple theme has correct name.""" - purple = themes.get_theme("purple") - assert purple.name == "purple" - - -class TestThemeDataOnly: - """Tests to ensure themes module has no problematic imports.""" - - def test_themes_module_imports(self): - """themes module should be data-only without config/render imports.""" - import inspect - source = inspect.getsource(themes) - # Verify no imports of config or render (look for actual import statements) - lines = source.split('\n') - import_lines = [line for line in lines if line.strip().startswith('import ') or line.strip().startswith('from ')] - # Filter out empty and comment lines - import_lines = [line for line in import_lines if line.strip() and not line.strip().startswith('#')] - # Should have no import lines - assert len(import_lines) == 0, f"Found unexpected imports: {import_lines}" diff --git a/tests/test_tint_acceptance.py b/tests/test_tint_acceptance.py new file mode 100644 index 0000000..7dd70c8 --- /dev/null +++ b/tests/test_tint_acceptance.py @@ -0,0 +1,206 @@ +"""Integration test: TintEffect in the pipeline.""" + +import queue + +from engine.data_sources.sources import ListDataSource, SourceItem +from engine.effects.plugins.tint import TintEffect +from engine.effects.types import EffectConfig +from engine.pipeline import Pipeline, PipelineConfig +from engine.pipeline.adapters import ( + DataSourceStage, + DisplayStage, + EffectPluginStage, + SourceItemsToBufferStage, +) +from engine.pipeline.core import PipelineContext +from engine.pipeline.params import PipelineParams + + +class QueueDisplay: + """Stub display that captures every frame into a queue.""" + + def __init__(self): + self.frames: queue.Queue[list[str]] = queue.Queue() + self.width = 80 + self.height = 24 + self._init_called = False + + def init(self, width: int, height: int, reuse: bool = False) -> None: + self.width = width + self.height = height + self._init_called = True + + def show(self, buffer: list[str], border: bool = False) -> None: + self.frames.put(list(buffer)) + + def clear(self) -> None: + pass + + def cleanup(self) -> None: + pass + + def get_dimensions(self) -> tuple[int, int]: + return (self.width, self.height) + + +def _build_pipeline( + items: list[SourceItem], + tint_config: EffectConfig | None = None, + width: int = 80, + height: int = 24, +) -> tuple[Pipeline, QueueDisplay, PipelineContext]: + """Build pipeline: source -> render -> tint effect -> display.""" + display = QueueDisplay() + + ctx = PipelineContext() + params = PipelineParams() + params.viewport_width = width + params.viewport_height = height + params.frame_number = 0 + ctx.params = params + ctx.set("items", items) + + pipeline = Pipeline( + config=PipelineConfig(enable_metrics=True), + context=ctx, + ) + + # Source + source = ListDataSource(items, name="test-source") + pipeline.add_stage("source", DataSourceStage(source, name="test-source")) + + # Render (simple) + pipeline.add_stage("render", SourceItemsToBufferStage(name="items-to-buffer")) + + # Tint effect + tint_effect = TintEffect() + if tint_config is not None: + tint_effect.configure(tint_config) + pipeline.add_stage("tint", EffectPluginStage(tint_effect, name="tint")) + + # Display + pipeline.add_stage("display", DisplayStage(display, name="queue")) + + pipeline.build() + pipeline.initialize() + + return pipeline, display, ctx + + +class TestTintAcceptance: + """Test TintEffect in a full pipeline.""" + + def test_tint_applies_default_color(self): + """Default tint should apply ANSI color codes to output.""" + items = [SourceItem(content="Hello World", source="test", timestamp="0")] + pipeline, display, ctx = _build_pipeline(items) + + result = pipeline.execute(items) + + assert result.success, f"Pipeline failed: {result.error}" + frame = display.frames.get(timeout=1) + + text = "\n".join(frame) + assert "\033[" in text, f"Expected ANSI codes in frame: {frame}" + assert "Hello World" in text + + def test_tint_applies_red_color(self): + """Configured red tint should produce red ANSI code (196-197).""" + items = [SourceItem(content="Red Text", source="test", timestamp="0")] + config = EffectConfig( + enabled=True, + intensity=1.0, + params={"r": 255, "g": 0, "b": 0, "a": 0.8}, + ) + pipeline, display, ctx = _build_pipeline(items, tint_config=config) + + result = pipeline.execute(items) + + assert result.success + frame = display.frames.get(timeout=1) + line = frame[0] + + # Should contain red ANSI code (196 or 197 in 256 color) + assert "\033[38;5;196m" in line or "\033[38;5;197m" in line, ( + f"Missing red tint: {line}" + ) + assert "Red Text" in line + + def test_tint_disabled_does_nothing(self): + """Disabled tint stage should pass through buffer unchanged.""" + items = [SourceItem(content="Plain Text", source="test", timestamp="0")] + pipeline, display, ctx = _build_pipeline(items) + + # Disable the tint stage + stage = pipeline.get_stage("tint") + stage.set_enabled(False) + + result = pipeline.execute(items) + + assert result.success + frame = display.frames.get(timeout=1) + text = "\n".join(frame) + + # Should contain Plain Text with NO ANSI color codes + assert "Plain Text" in text + assert "\033[" not in text, f"Unexpected ANSI codes in frame: {frame}" + + def test_tint_zero_transparency(self): + """Alpha=0 should pass through buffer unchanged (no tint).""" + items = [SourceItem(content="Transparent", source="test", timestamp="0")] + config = EffectConfig( + enabled=True, + intensity=1.0, + params={"r": 255, "g": 128, "b": 64, "a": 0.0}, + ) + pipeline, display, ctx = _build_pipeline(items, tint_config=config) + + result = pipeline.execute(items) + + assert result.success + frame = display.frames.get(timeout=1) + text = "\n".join(frame) + + assert "Transparent" in text + assert "\033[" not in text, f"Expected no ANSI codes with alpha=0: {frame}" + + def test_tint_with_multiples_lines(self): + """Tint should apply to all non-empty lines.""" + items = [ + SourceItem(content="Line1\nLine2\n\nLine4", source="test", timestamp="0") + ] + config = EffectConfig( + enabled=True, + intensity=1.0, + params={"r": 0, "g": 255, "b": 0, "a": 0.7}, + ) + pipeline, display, ctx = _build_pipeline(items, tint_config=config) + + result = pipeline.execute(items) + + assert result.success + frame = display.frames.get(timeout=1) + + # All non-empty lines should have green ANSI codes + green_codes = ["\033[38;5;", "m"] + for line in frame: + if line.strip(): + assert green_codes[0] in line and green_codes[1] in line, ( + f"Missing green tint: {line}" + ) + else: + assert line == "", f"Empty lines should be exactly empty: {line}" + + def test_tint_preserves_empty_lines(self): + """Empty lines should remain empty (no ANSI codes).""" + items = [SourceItem(content="A\n\nB", source="test", timestamp="0")] + pipeline, display, ctx = _build_pipeline(items) + + result = pipeline.execute(items) + + assert result.success + frame = display.frames.get(timeout=1) + + assert frame[0].strip() != "" + assert frame[1] == "" # Empty line unchanged + assert frame[2].strip() != "" diff --git a/tests/test_tint_effect.py b/tests/test_tint_effect.py new file mode 100644 index 0000000..c7df3c3 --- /dev/null +++ b/tests/test_tint_effect.py @@ -0,0 +1,125 @@ +import pytest + +from engine.effects.plugins.tint import TintEffect +from engine.effects.types import EffectConfig + + +@pytest.fixture +def effect(): + return TintEffect() + + +@pytest.fixture +def effect_with_params(r=255, g=128, b=64, a=0.5): + e = TintEffect() + config = EffectConfig( + enabled=True, + intensity=1.0, + params={"r": r, "g": g, "b": b, "a": a}, + ) + e.configure(config) + return e + + +@pytest.fixture +def mock_context(): + class MockContext: + terminal_width = 80 + terminal_height = 24 + + def get_state(self, key): + return None + + return MockContext() + + +class TestTintEffect: + def test_name(self, effect): + assert effect.name == "tint" + + def test_enabled_by_default(self, effect): + assert effect.config.enabled is True + + def test_returns_input_when_empty(self, effect, mock_context): + result = effect.process([], mock_context) + assert result == [] + + def test_returns_input_when_transparency_zero( + self, effect_with_params, mock_context + ): + effect_with_params.config.params["a"] = 0.0 + buf = ["hello world"] + result = effect_with_params.process(buf, mock_context) + assert result == buf + + def test_applies_tint_to_plain_text(self, effect_with_params, mock_context): + buf = ["hello world"] + result = effect_with_params.process(buf, mock_context) + assert len(result) == 1 + assert "\033[" in result[0] # Has ANSI codes + assert "hello world" in result[0] + + def test_tint_preserves_content(self, effect_with_params, mock_context): + buf = ["hello world", "test line"] + result = effect_with_params.process(buf, mock_context) + assert "hello world" in result[0] + assert "test line" in result[1] + + def test_rgb_to_ansi256_black(self, effect): + assert effect._rgb_to_ansi256(0, 0, 0) == 16 + + def test_rgb_to_ansi256_white(self, effect): + assert effect._rgb_to_ansi256(255, 255, 255) == 231 + + def test_rgb_to_ansi256_red(self, effect): + color = effect._rgb_to_ansi256(255, 0, 0) + assert 196 <= color <= 197 # Red in 256 color + + def test_rgb_to_ansi256_green(self, effect): + color = effect._rgb_to_ansi256(0, 255, 0) + assert 34 <= color <= 46 + + def test_rgb_to_ansi256_blue(self, effect): + color = effect._rgb_to_ansi256(0, 0, 255) + assert 20 <= color <= 33 + + def test_configure_updates_params(self, effect): + config = EffectConfig( + enabled=True, + intensity=1.0, + params={"r": 100, "g": 150, "b": 200, "a": 0.8}, + ) + effect.configure(config) + assert effect.config.params["r"] == 100 + assert effect.config.params["g"] == 150 + assert effect.config.params["b"] == 200 + assert effect.config.params["a"] == 0.8 + + def test_clamp_rgb_values(self, effect_with_params, mock_context): + effect_with_params.config.params["r"] = 300 + effect_with_params.config.params["g"] = -10 + effect_with_params.config.params["b"] = 1.5 + buf = ["test"] + result = effect_with_params.process(buf, mock_context) + assert "\033[" in result[0] + + def test_clamp_alpha_above_one(self, effect_with_params, mock_context): + effect_with_params.config.params["a"] = 1.5 + buf = ["test"] + result = effect_with_params.process(buf, mock_context) + assert "\033[" in result[0] + + def test_preserves_empty_lines(self, effect_with_params, mock_context): + buf = ["hello", "", "world"] + result = effect_with_params.process(buf, mock_context) + assert result[1] == "" + + def test_inlet_types_includes_text_buffer(self, effect): + from engine.pipeline.core import DataType + + assert DataType.TEXT_BUFFER in effect.inlet_types + + def test_outlet_types_includes_text_buffer(self, effect): + from engine.pipeline.core import DataType + + assert DataType.TEXT_BUFFER in effect.outlet_types diff --git a/tests/test_translate.py b/tests/test_translate.py new file mode 100644 index 0000000..658afc0 --- /dev/null +++ b/tests/test_translate.py @@ -0,0 +1,115 @@ +""" +Tests for engine.translate module. +""" + +import json +from unittest.mock import MagicMock, patch + +from engine.translate import ( + _translate_cached, + detect_location_language, + translate_headline, +) + + +def clear_translate_cache(): + """Clear the LRU cache between tests.""" + _translate_cached.cache_clear() + + +class TestDetectLocationLanguage: + """Tests for detect_location_language function.""" + + def test_returns_none_for_unknown_location(self): + """Returns None when no location pattern matches.""" + result = detect_location_language("Breaking news about technology") + assert result is None + + def test_detects_berlin(self): + """Detects Berlin location.""" + result = detect_location_language("Berlin police arrest protesters") + assert result == "de" + + def test_detects_paris(self): + """Detects Paris location.""" + result = detect_location_language("Paris fashion week begins") + assert result == "fr" + + def test_detects_tokyo(self): + """Detects Tokyo location.""" + result = detect_location_language("Tokyo stocks rise") + assert result == "ja" + + def test_detects_berlin_again(self): + """Detects Berlin location again.""" + result = detect_location_language("Berlin marathon set to begin") + assert result == "de" + + def test_case_insensitive(self): + """Detection is case insensitive.""" + result = detect_location_language("BERLIN SUMMER FESTIVAL") + assert result == "de" + + def test_returns_first_match(self): + """Returns first matching pattern.""" + result = detect_location_language("Berlin in Paris for the event") + assert result == "de" + + +class TestTranslateHeadline: + """Tests for translate_headline function.""" + + def test_returns_translated_text(self): + """Returns translated text from cache.""" + clear_translate_cache() + with patch("engine.translate.translate_headline") as mock_fn: + mock_fn.return_value = "Translated title" + from engine.translate import translate_headline as th + + result = th("Original title", "de") + assert result == "Translated title" + + def test_uses_cached_result(self): + """Translation uses LRU cache.""" + clear_translate_cache() + result1 = translate_headline("Test unique", "es") + result2 = translate_headline("Test unique", "es") + assert result1 == result2 + + +class TestTranslateCached: + """Tests for _translate_cached function.""" + + def test_translation_network_error(self): + """Network error returns original text.""" + clear_translate_cache() + with patch("engine.translate.urllib.request.urlopen") as mock_urlopen: + mock_urlopen.side_effect = Exception("Network error") + + result = _translate_cached("Hello world", "de") + + assert result == "Hello world" + + def test_translation_invalid_json(self): + """Invalid JSON returns original text.""" + clear_translate_cache() + with patch("engine.translate.urllib.request.urlopen") as mock_urlopen: + mock_response = MagicMock() + mock_response.read.return_value = b"invalid json" + mock_urlopen.return_value = mock_response + + result = _translate_cached("Hello", "de") + + assert result == "Hello" + + def test_translation_empty_response(self): + """Empty translation response returns original text.""" + clear_translate_cache() + with patch("engine.translate.urllib.request.urlopen") as mock_urlopen: + mock_response = MagicMock() + mock_response.read.return_value = json.dumps([[[""], None, "de"], None]) + mock_urlopen.return_value = mock_response + + result = _translate_cached("Hello", "de") + + assert result == "Hello" diff --git a/tests/test_ui_panel.py b/tests/test_ui_panel.py new file mode 100644 index 0000000..17a9980 --- /dev/null +++ b/tests/test_ui_panel.py @@ -0,0 +1,184 @@ +""" +Tests for UIPanel. +""" + +from engine.pipeline.ui import StageControl, UIConfig, UIPanel + + +class MockStage: + """Mock stage for testing.""" + + def __init__(self, name, category="effect"): + self.name = name + self.category = category + self._enabled = True + + def is_enabled(self): + return self._enabled + + +class TestUIPanel: + """Tests for UIPanel.""" + + def test_init(self): + """UIPanel initializes with default config.""" + panel = UIPanel() + assert panel.config.panel_width == 24 + assert panel.config.stage_list_height == 12 + assert panel.scroll_offset == 0 + assert panel.selected_stage is None + + def test_register_stage(self): + """register_stage adds a stage control.""" + panel = UIPanel() + stage = MockStage("noise") + panel.register_stage(stage, enabled=True) + assert "noise" in panel.stages + ctrl = panel.stages["noise"] + assert ctrl.name == "noise" + assert ctrl.enabled is True + assert ctrl.selected is False + + def test_select_stage(self): + """select_stage sets selection.""" + panel = UIPanel() + stage1 = MockStage("noise") + stage2 = MockStage("fade") + panel.register_stage(stage1) + panel.register_stage(stage2) + panel.select_stage("fade") + assert panel.selected_stage == "fade" + assert panel.stages["fade"].selected is True + assert panel.stages["noise"].selected is False + + def test_toggle_stage(self): + """toggle_stage flips enabled state.""" + panel = UIPanel() + stage = MockStage("glitch") + panel.register_stage(stage, enabled=True) + result = panel.toggle_stage("glitch") + assert result is False + assert panel.stages["glitch"].enabled is False + result = panel.toggle_stage("glitch") + assert result is True + + def test_get_enabled_stages(self): + """get_enabled_stages returns only enabled stage names.""" + panel = UIPanel() + panel.register_stage(MockStage("noise"), enabled=True) + panel.register_stage(MockStage("fade"), enabled=False) + panel.register_stage(MockStage("glitch"), enabled=True) + enabled = panel.get_enabled_stages() + assert set(enabled) == {"noise", "glitch"} + + def test_scroll_stages(self): + """scroll_stages moves the view.""" + panel = UIPanel(UIConfig(stage_list_height=3)) + for i in range(10): + panel.register_stage(MockStage(f"stage{i}")) + assert panel.scroll_offset == 0 + panel.scroll_stages(1) + assert panel.scroll_offset == 1 + panel.scroll_stages(-1) + assert panel.scroll_offset == 0 + # Clamp at max + panel.scroll_stages(100) + assert panel.scroll_offset == 7 # 10 - 3 = 7 + + def test_render_produces_lines(self): + """render produces list of strings of correct width.""" + panel = UIPanel(UIConfig(panel_width=20)) + panel.register_stage(MockStage("noise"), enabled=True) + panel.register_stage(MockStage("fade"), enabled=False) + panel.select_stage("noise") + lines = panel.render(80, 24) + # All lines should be exactly panel_width chars (20) + for line in lines: + assert len(line) == 20 + # Should have header, stage rows, separator, params area, footer + assert len(lines) >= 5 + + def test_process_key_event_space_toggles_stage(self): + """process_key_event with space toggles UI panel visibility.""" + panel = UIPanel() + stage = MockStage("glitch") + panel.register_stage(stage, enabled=True) + panel.select_stage("glitch") + # Space should now toggle UI panel visibility, not stage + assert panel._show_panel is True + handled = panel.process_key_event(" ") + assert handled is True + assert panel._show_panel is False + # Pressing space again should show panel + handled = panel.process_key_event(" ") + assert panel._show_panel is True + + def test_process_key_event_space_does_not_toggle_in_picker(self): + """Space should not toggle UI panel when preset picker is active.""" + panel = UIPanel() + panel._show_panel = True + panel._show_preset_picker = True + handled = panel.process_key_event(" ") + assert handled is False # Not handled when picker active + assert panel._show_panel is True # Unchanged + + def test_process_key_event_s_selects_next(self): + """process_key_event with s cycles selection.""" + panel = UIPanel() + panel.register_stage(MockStage("noise")) + panel.register_stage(MockStage("fade")) + panel.register_stage(MockStage("glitch")) + panel.select_stage("noise") + handled = panel.process_key_event("s") + assert handled is True + assert panel.selected_stage == "fade" + + def test_process_key_event_hjkl_navigation(self): + """process_key_event with HJKL keys.""" + panel = UIPanel() + stage = MockStage("noise") + panel.register_stage(stage) + panel.select_stage("noise") + + # J or Down should scroll or adjust param + assert panel.scroll_stages(1) is None # Just test it doesn't error + # H or Left should adjust param (when param selected) + panel.selected_stage = "noise" + panel._focused_param = "intensity" + panel.stages["noise"].params["intensity"] = 0.5 + + # Left/H should decrease + handled = panel.process_key_event("h") + assert handled is True + # L or Right should increase + handled = panel.process_key_event("l") + assert handled is True + + # K should scroll up + panel.selected_stage = None + handled = panel.process_key_event("k") + assert handled is True + + def test_set_event_callback(self): + """set_event_callback registers callback.""" + panel = UIPanel() + called = [] + + def callback(stage_name, enabled): + called.append((stage_name, enabled)) + + panel.set_event_callback("stage_toggled", callback) + panel.toggle_stage("test") # No stage, won't trigger + # Simulate toggle through event + panel._emit_event("stage_toggled", stage_name="noise", enabled=False) + assert called == [("noise", False)] + + def test_register_stage_returns_control(self): + """register_stage should return the StageControl instance.""" + panel = UIPanel() + stage = MockStage("noise_effect") + control = panel.register_stage(stage, enabled=True) + assert control is not None + assert isinstance(control, StageControl) + assert control.name == "noise_effect" + assert control.enabled is True diff --git a/tests/test_viewport_filter_performance.py b/tests/test_viewport_filter_performance.py new file mode 100644 index 0000000..4d1fc06 --- /dev/null +++ b/tests/test_viewport_filter_performance.py @@ -0,0 +1,252 @@ +"""Integration tests for ViewportFilterStage with realistic data volumes. + +These tests verify that the ViewportFilterStage effectively reduces the number +of items processed by FontStage, preventing the 10+ second hangs observed with +large headline sources. +""" + +from engine.data_sources.sources import SourceItem +from engine.pipeline.adapters import ViewportFilterStage +from engine.pipeline.core import PipelineContext + + +class MockParams: + """Mock parameters object for testing.""" + + def __init__(self, viewport_width: int = 80, viewport_height: int = 24): + self.viewport_width = viewport_width + self.viewport_height = viewport_height + + +class TestViewportFilterStage: + """Test ViewportFilterStage filtering behavior.""" + + def test_filter_stage_exists(self): + """Verify ViewportFilterStage can be instantiated.""" + stage = ViewportFilterStage() + assert stage is not None + assert stage.name == "viewport-filter" + + def test_filter_stage_properties(self): + """Verify ViewportFilterStage has correct type properties.""" + stage = ViewportFilterStage() + from engine.pipeline.core import DataType + + assert DataType.SOURCE_ITEMS in stage.inlet_types + assert DataType.SOURCE_ITEMS in stage.outlet_types + + def test_filter_large_item_count_to_viewport(self): + """Test filtering 1438 items (like real headlines) to viewport size.""" + # Create 1438 test items (matching real headline source) + test_items = [ + SourceItem(f"Headline {i}", f"source-{i % 5}", str(i)) for i in range(1438) + ] + + stage = ViewportFilterStage() + ctx = PipelineContext() + ctx.params = MockParams(viewport_width=80, viewport_height=24) + + # Filter items + filtered = stage.process(test_items, ctx) + + # Verify filtering reduced item count significantly + assert len(filtered) < len(test_items) + assert len(filtered) <= 5 # 24 height / 6 lines per item + 1 + assert len(filtered) > 0 # Must return at least 1 item + + def test_filter_respects_viewport_height(self): + """Test that filter respects different viewport heights.""" + test_items = [SourceItem(f"Headline {i}", "source", str(i)) for i in range(100)] + + stage = ViewportFilterStage() + + # Test with different viewport heights + for height in [12, 24, 48]: + ctx = PipelineContext() + ctx.params = MockParams(viewport_height=height) + + filtered = stage.process(test_items, ctx) + expected_max = max(1, height // 6 + 1) + + assert len(filtered) <= expected_max + assert len(filtered) > 0 + + def test_filter_handles_empty_list(self): + """Test filter handles empty input gracefully.""" + stage = ViewportFilterStage() + ctx = PipelineContext() + ctx.params = MockParams() + + result = stage.process([], ctx) + + assert result == [] + + def test_filter_handles_none(self): + """Test filter handles None input gracefully.""" + stage = ViewportFilterStage() + ctx = PipelineContext() + ctx.params = MockParams() + + result = stage.process(None, ctx) + + assert result is None + + def test_filter_performance_improvement(self): + """Verify significant performance improvement (288x reduction).""" + # With 1438 items and 24-line viewport: + # - Without filter: FontStage renders all 1438 items + # - With filter: FontStage renders only ~5 items + # - Improvement: 1438 / 3 = ~479x fewer items to render + # (layout-based filtering is more precise than old estimate) + + test_items = [ + SourceItem(f"Headline {i}", "source", str(i)) for i in range(1438) + ] + + stage = ViewportFilterStage() + ctx = PipelineContext() + ctx.params = MockParams(viewport_height=24) + + filtered = stage.process(test_items, ctx) + improvement_factor = len(test_items) / len(filtered) + + # Verify we get significant improvement (360x with 4 items vs 1438) + assert improvement_factor > 300 + assert 300 < improvement_factor < 500 + + +class TestViewportFilterIntegration: + """Test ViewportFilterStage in pipeline context.""" + + def test_filter_output_is_source_items(self): + """Verify filter output can be consumed by FontStage.""" + from engine.pipeline.adapters import FontStage + + test_items = [ + SourceItem("Test Headline", "test-source", "123") for _ in range(10) + ] + + filter_stage = ViewportFilterStage() + font_stage = FontStage() + + ctx = PipelineContext() + ctx.params = MockParams() + + # Filter items + filtered = filter_stage.process(test_items, ctx) + + # Verify filtered output is compatible with FontStage + assert isinstance(filtered, list) + assert all(isinstance(item, SourceItem) for item in filtered) + + # FontStage should accept the filtered items + # (This would throw if types were incompatible) + result = font_stage.process(filtered, ctx) + assert result is not None + + def test_filter_preserves_item_order(self): + """Verify filter preserves order of first N items.""" + test_items = [SourceItem(f"Headline {i}", "source", str(i)) for i in range(20)] + + stage = ViewportFilterStage() + ctx = PipelineContext() + ctx.params = MockParams(viewport_height=24) + + filtered = stage.process(test_items, ctx) + + # Verify we kept the first N items in order + for i, item in enumerate(filtered): + assert item.content == f"Headline {i}" + + +class TestViewportResize: + """Test ViewportFilterStage handles viewport resize correctly.""" + + def test_layout_recomputes_on_width_change(self): + """Test that layout is recomputed when viewport_width changes.""" + stage = ViewportFilterStage() + # Use long headlines that will wrap differently at different widths + items = [ + SourceItem( + f"This is a very long headline number {i} that will definitely wrap at narrow widths", + "test", + str(i), + ) + for i in range(50) + ] + + # Initial render at 80 cols + ctx = PipelineContext() + ctx.params = MockParams(viewport_width=80, viewport_height=24) + ctx.set("camera_y", 0) + + stage.process(items, ctx) + cached_layout_80 = stage._layout.copy() + + # Resize to 40 cols - layout should recompute + ctx.params.viewport_width = 40 + stage.process(items, ctx) + cached_layout_40 = stage._layout.copy() + + # With narrower viewport, items wrap to more lines + # So the cumulative heights should be different + assert cached_layout_40 != cached_layout_80, ( + "Layout should recompute when viewport_width changes" + ) + + def test_layout_recomputes_on_height_change(self): + """Test that visible items change when viewport_height changes.""" + stage = ViewportFilterStage() + items = [SourceItem(f"Headline {i}", "test", str(i)) for i in range(100)] + + ctx = PipelineContext() + ctx.set("camera_y", 0) + + # Small viewport - fewer items visible + ctx.params = MockParams(viewport_width=80, viewport_height=12) + result_small = stage.process(items, ctx) + + # Larger viewport - more items visible + ctx.params.viewport_height = 48 + result_large = stage.process(items, ctx) + + # With larger viewport, more items should be visible + assert len(result_large) >= len(result_small) + + def test_camera_y_propagates_to_filter(self): + """Test that camera_y is read from context.""" + stage = ViewportFilterStage() + items = [SourceItem(f"Headline {i}", "test", str(i)) for i in range(100)] + + ctx = PipelineContext() + ctx.params = MockParams(viewport_width=80, viewport_height=24) + + # Camera at y=0 + ctx.set("camera_y", 0) + result_at_0 = stage.process(items, ctx) + + # Camera at y=100 + ctx.set("camera_y", 100) + result_at_100 = stage.process(items, ctx) + + # With different camera positions, different items should be visible + # (unless items are very short) + first_item_at_0 = result_at_0[0].content if result_at_0 else None + first_item_at_100 = result_at_100[0].content if result_at_100 else None + + # The items at different positions should be different + assert first_item_at_0 != first_item_at_100 or first_item_at_0 is None + + def test_resize_handles_edge_case_small_width(self): + """Test that very narrow viewport doesn't crash.""" + stage = ViewportFilterStage() + items = [SourceItem("Short", "test", "1")] + + ctx = PipelineContext() + ctx.params = MockParams(viewport_width=10, viewport_height=5) + ctx.set("camera_y", 0) + + # Should not crash with very narrow viewport + result = stage.process(items, ctx) + assert result is not None + assert len(result) > 0 diff --git a/tests/test_vis_offset.py b/tests/test_vis_offset.py new file mode 100644 index 0000000..7777e4c --- /dev/null +++ b/tests/test_vis_offset.py @@ -0,0 +1,31 @@ +from engine.effects.legacy import vis_offset, vis_trunc + + +def test_vis_offset_no_change(): + """vis_offset with offset 0 returns original.""" + result = vis_offset("hello", 0) + assert result == "hello" + + +def test_vis_offset_trims_start(): + """vis_offset skips first N characters.""" + result = vis_offset("hello world", 6) + assert result == "world" + + +def test_vis_offset_handles_ansi(): + """vis_offset handles ANSI codes correctly.""" + result = vis_offset("\033[31mhello\033[0m", 3) + assert result == "lo\x1b[0m" or "lo" in result + + +def test_vis_offset_greater_than_length(): + """vis_offset with offset > length returns empty-ish.""" + result = vis_offset("hi", 10) + assert result == "" + + +def test_vis_trunc_still_works(): + """Ensure vis_trunc still works after changes.""" + result = vis_trunc("hello world", 5) + assert result == "hello" diff --git a/tests/test_websocket.py b/tests/test_websocket.py new file mode 100644 index 0000000..0e6224b --- /dev/null +++ b/tests/test_websocket.py @@ -0,0 +1,395 @@ +""" +Tests for engine.display.backends.websocket module. +""" + +from unittest.mock import MagicMock, patch + +import pytest + +from engine.display.backends.websocket import WebSocketDisplay + + +class TestWebSocketDisplayImport: + """Test that websocket module can be imported.""" + + def test_import_does_not_error(self): + """Module imports without error.""" + from engine.display import backends + + assert backends is not None + + +class TestWebSocketDisplayInit: + """Tests for WebSocketDisplay initialization.""" + + def test_default_init(self): + """Default initialization sets correct defaults.""" + with patch("engine.display.backends.websocket.websockets", None): + display = WebSocketDisplay() + assert display.host == "0.0.0.0" + assert display.port == 8765 + assert display.http_port == 8766 + assert display.width == 80 + assert display.height == 24 + + def test_custom_init(self): + """Custom initialization uses provided values.""" + with patch("engine.display.backends.websocket.websockets", None): + display = WebSocketDisplay(host="localhost", port=9000, http_port=9001) + assert display.host == "localhost" + assert display.port == 9000 + assert display.http_port == 9001 + + def test_is_available_when_websockets_present(self): + """is_available returns True when websockets is available.""" + pytest.importorskip("websockets") + display = WebSocketDisplay() + assert display.is_available() is True + + @pytest.mark.skipif( + pytest.importorskip("websockets") is not None, reason="websockets is available" + ) + def test_is_available_when_websockets_missing(self): + """is_available returns False when websockets is not available.""" + display = WebSocketDisplay() + assert display.is_available() is False + + +class TestWebSocketDisplayProtocol: + """Test that WebSocketDisplay satisfies Display protocol.""" + + def test_websocket_display_is_display(self): + """WebSocketDisplay satisfies Display protocol.""" + with patch("engine.display.backends.websocket.websockets", MagicMock()): + display = WebSocketDisplay() + assert hasattr(display, "init") + assert hasattr(display, "show") + assert hasattr(display, "clear") + assert hasattr(display, "cleanup") + + +class TestWebSocketDisplayMethods: + """Tests for WebSocketDisplay methods.""" + + def test_init_stores_dimensions(self): + """init stores terminal dimensions.""" + with patch("engine.display.backends.websocket.websockets", MagicMock()): + display = WebSocketDisplay() + display.init(100, 40) + assert display.width == 100 + assert display.height == 40 + + @pytest.mark.skip(reason="port binding conflict in CI environment") + def test_client_count_initially_zero(self): + """client_count returns 0 when no clients connected.""" + with patch("engine.display.backends.websocket.websockets", MagicMock()): + display = WebSocketDisplay() + assert display.client_count() == 0 + + def test_get_ws_port(self): + """get_ws_port returns configured port.""" + with patch("engine.display.backends.websocket.websockets", MagicMock()): + display = WebSocketDisplay(port=9000) + assert display.get_ws_port() == 9000 + + def test_get_http_port(self): + """get_http_port returns configured port.""" + with patch("engine.display.backends.websocket.websockets", MagicMock()): + display = WebSocketDisplay(http_port=9001) + assert display.get_http_port() == 9001 + + def test_frame_delay_defaults_to_zero(self): + """get_frame_delay returns 0 by default.""" + with patch("engine.display.backends.websocket.websockets", MagicMock()): + display = WebSocketDisplay() + assert display.get_frame_delay() == 0.0 + + def test_set_frame_delay(self): + """set_frame_delay stores the value.""" + with patch("engine.display.backends.websocket.websockets", MagicMock()): + display = WebSocketDisplay() + display.set_frame_delay(0.05) + assert display.get_frame_delay() == 0.05 + + +class TestWebSocketDisplayCallbacks: + """Tests for WebSocketDisplay callback methods.""" + + def test_set_client_connected_callback(self): + """set_client_connected_callback stores callback.""" + with patch("engine.display.backends.websocket.websockets", MagicMock()): + display = WebSocketDisplay() + callback = MagicMock() + display.set_client_connected_callback(callback) + assert display._client_connected_callback is callback + + def test_set_client_disconnected_callback(self): + """set_client_disconnected_callback stores callback.""" + with patch("engine.display.backends.websocket.websockets", MagicMock()): + display = WebSocketDisplay() + callback = MagicMock() + display.set_client_disconnected_callback(callback) + assert display._client_disconnected_callback is callback + + +class TestWebSocketDisplayUnavailable: + """Tests when WebSocket support is unavailable.""" + + @pytest.mark.skipif( + pytest.importorskip("websockets") is not None, reason="websockets is available" + ) + def test_start_server_noop_when_unavailable(self): + """start_server does nothing when websockets unavailable.""" + display = WebSocketDisplay() + display.start_server() + assert display._server_thread is None + + @pytest.mark.skipif( + pytest.importorskip("websockets") is not None, reason="websockets is available" + ) + def test_start_http_server_noop_when_unavailable(self): + """start_http_server does nothing when websockets unavailable.""" + display = WebSocketDisplay() + display.start_http_server() + assert display._http_thread is None + + @pytest.mark.skipif( + pytest.importorskip("websockets") is not None, reason="websockets is available" + ) + def test_show_noops_when_unavailable(self): + """show does nothing when websockets unavailable.""" + display = WebSocketDisplay() + display.show(["line1", "line2"]) + + +class TestWebSocketUIPanelIntegration: + """Tests for WebSocket-UIPanel integration for remote control.""" + + def test_set_controller_stores_controller(self): + """set_controller stores the controller reference.""" + with patch("engine.display.backends.websocket.websockets", MagicMock()): + display = WebSocketDisplay() + mock_controller = MagicMock() + display.set_controller(mock_controller) + assert display._controller is mock_controller + + def test_set_command_callback_stores_callback(self): + """set_command_callback stores the callback.""" + with patch("engine.display.backends.websocket.websockets", MagicMock()): + display = WebSocketDisplay() + callback = MagicMock() + display.set_command_callback(callback) + assert display._command_callback is callback + + def test_get_state_snapshot_returns_none_without_controller(self): + """_get_state_snapshot returns None when no controller is set.""" + with patch("engine.display.backends.websocket.websockets", MagicMock()): + display = WebSocketDisplay() + assert display._get_state_snapshot() is None + + def test_get_state_snapshot_returns_controller_state(self): + """_get_state_snapshot returns state from controller.""" + with patch("engine.display.backends.websocket.websockets", MagicMock()): + display = WebSocketDisplay() + + # Create mock controller with expected attributes + mock_controller = MagicMock() + mock_controller.stages = { + "test_stage": MagicMock( + enabled=True, params={"intensity": 0.5}, selected=False + ) + } + mock_controller._current_preset = "demo" + mock_controller._presets = ["demo", "test"] + mock_controller.selected_stage = "test_stage" + + display.set_controller(mock_controller) + state = display._get_state_snapshot() + + assert state is not None + assert "stages" in state + assert "test_stage" in state["stages"] + assert state["stages"]["test_stage"]["enabled"] is True + assert state["stages"]["test_stage"]["params"] == {"intensity": 0.5} + assert state["preset"] == "demo" + assert state["presets"] == ["demo", "test"] + assert state["selected_stage"] == "test_stage" + + def test_get_state_snapshot_handles_missing_attributes(self): + """_get_state_snapshot handles controller without all attributes.""" + with patch("engine.display.backends.websocket.websockets", MagicMock()): + display = WebSocketDisplay() + + # Create mock controller without stages attribute using spec + # This prevents MagicMock from auto-creating the attribute + mock_controller = MagicMock(spec=[]) # Empty spec means no attributes + + display.set_controller(mock_controller) + state = display._get_state_snapshot() + + assert state == {} + + def test_broadcast_state_sends_to_clients(self): + """broadcast_state sends state update to all connected clients.""" + with patch("engine.display.backends.websocket.websockets", MagicMock()): + display = WebSocketDisplay() + + # Mock client with send method + mock_client = MagicMock() + mock_client.send = MagicMock() + display._clients.add(mock_client) + + test_state = {"test": "state"} + display.broadcast_state(test_state) + + # Verify send was called with JSON containing state + mock_client.send.assert_called_once() + call_args = mock_client.send.call_args[0][0] + assert '"type": "state"' in call_args + assert '"test"' in call_args + + def test_broadcast_state_noop_when_no_clients(self): + """broadcast_state does nothing when no clients connected.""" + with patch("engine.display.backends.websocket.websockets", MagicMock()): + display = WebSocketDisplay() + display._clients.clear() + + # Should not raise error + display.broadcast_state({"test": "state"}) + + +class TestWebSocketHTTPServerPath: + """Tests for WebSocket HTTP server client directory path calculation.""" + + def test_client_dir_path_calculation(self): + """Client directory path is correctly calculated from websocket.py location.""" + import os + + # Use the actual websocket.py file location, not the test file + websocket_module = __import__( + "engine.display.backends.websocket", fromlist=["WebSocketDisplay"] + ) + websocket_file = websocket_module.__file__ + parts = websocket_file.split(os.sep) + + if "engine" in parts: + engine_idx = parts.index("engine") + project_root = os.sep.join(parts[:engine_idx]) + client_dir = os.path.join(project_root, "client") + else: + # Fallback calculation (shouldn't happen in normal test runs) + client_dir = os.path.join( + os.path.dirname( + os.path.dirname(os.path.dirname(os.path.dirname(websocket_file))) + ), + "client", + ) + + # Verify the client directory exists and contains expected files + assert os.path.exists(client_dir), f"Client directory not found: {client_dir}" + assert "index.html" in os.listdir(client_dir), ( + "index.html not found in client directory" + ) + assert "editor.html" in os.listdir(client_dir), ( + "editor.html not found in client directory" + ) + + # Verify the path is correct (should be .../Mainline/client) + assert client_dir.endswith("client"), ( + f"Client dir should end with 'client': {client_dir}" + ) + assert "Mainline" in client_dir, ( + f"Client dir should contain 'Mainline': {client_dir}" + ) + + def test_http_server_directory_serves_client_files(self): + """HTTP server directory correctly serves client files.""" + import os + + # Use the actual websocket.py file location, not the test file + websocket_module = __import__( + "engine.display.backends.websocket", fromlist=["WebSocketDisplay"] + ) + websocket_file = websocket_module.__file__ + parts = websocket_file.split(os.sep) + + if "engine" in parts: + engine_idx = parts.index("engine") + project_root = os.sep.join(parts[:engine_idx]) + client_dir = os.path.join(project_root, "client") + else: + client_dir = os.path.join( + os.path.dirname( + os.path.dirname(os.path.dirname(os.path.dirname(websocket_file))) + ), + "client", + ) + + # Verify the handler would be able to serve files from this directory + # We can't actually instantiate the handler without a valid request, + # but we can verify the directory is accessible + assert os.access(client_dir, os.R_OK), ( + f"Client directory not readable: {client_dir}" + ) + + # Verify key files exist + index_path = os.path.join(client_dir, "index.html") + editor_path = os.path.join(client_dir, "editor.html") + + assert os.path.exists(index_path), f"index.html not found at: {index_path}" + assert os.path.exists(editor_path), f"editor.html not found at: {editor_path}" + + # Verify files are readable + assert os.access(index_path, os.R_OK), "index.html not readable" + assert os.access(editor_path, os.R_OK), "editor.html not readable" + + def test_old_buggy_path_does_not_find_client_directory(self): + """The old buggy path (3 dirname calls) should NOT find the client directory. + + This test verifies that the old buggy behavior would have failed. + The old code used: + client_dir = os.path.join( + os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "client" + ) + + This would resolve to: .../engine/client (which doesn't exist) + Instead of: .../Mainline/client (which does exist) + """ + import os + + # Use the actual websocket.py file location + websocket_module = __import__( + "engine.display.backends.websocket", fromlist=["WebSocketDisplay"] + ) + websocket_file = websocket_module.__file__ + + # OLD BUGGY CODE: 3 dirname calls + old_buggy_client_dir = os.path.join( + os.path.dirname(os.path.dirname(os.path.dirname(websocket_file))), "client" + ) + + # This path should NOT exist (it's the buggy path) + assert not os.path.exists(old_buggy_client_dir), ( + f"Old buggy path should not exist: {old_buggy_client_dir}\n" + f"If this assertion fails, the bug may have been fixed elsewhere or " + f"the test needs updating." + ) + + # The buggy path should be .../engine/client, not .../Mainline/client + assert old_buggy_client_dir.endswith("engine/client"), ( + f"Old buggy path should end with 'engine/client': {old_buggy_client_dir}" + ) + + # Verify that going up one more level (4 dirname calls) finds the correct path + correct_client_dir = os.path.join( + os.path.dirname( + os.path.dirname(os.path.dirname(os.path.dirname(websocket_file))) + ), + "client", + ) + assert os.path.exists(correct_client_dir), ( + f"Correct path should exist: {correct_client_dir}" + ) + assert "index.html" in os.listdir(correct_client_dir), ( + f"index.html should exist in correct path: {correct_client_dir}" + ) diff --git a/tests/test_websocket_e2e.py b/tests/test_websocket_e2e.py new file mode 100644 index 0000000..4189b6d --- /dev/null +++ b/tests/test_websocket_e2e.py @@ -0,0 +1,78 @@ +""" +End-to-end tests for WebSocket display using Playwright. +""" + +import time + +import pytest + + +class TestWebSocketE2E: + """End-to-end tests for WebSocket display with browser.""" + + @pytest.mark.e2e + def test_websocket_server_starts(self): + """Test that WebSocket server starts and serves HTTP.""" + import threading + + from engine.display.backends.websocket import WebSocketDisplay + + display = WebSocketDisplay(host="127.0.0.1", port=18765) + + server_thread = threading.Thread(target=display.start_http_server) + server_thread.daemon = True + server_thread.start() + + time.sleep(1) + + try: + import urllib.request + + response = urllib.request.urlopen("http://127.0.0.1:18765", timeout=5) + assert response.status == 200 + content = response.read().decode("utf-8") + assert len(content) > 0 + finally: + display.cleanup() + time.sleep(0.5) + + @pytest.mark.e2e + @pytest.mark.skipif( + not pytest.importorskip("playwright", reason="playwright not installed"), + reason="playwright not installed", + ) + def test_websocket_browser_connection(self): + """Test WebSocket connection with actual browser.""" + import threading + + from playwright.sync_api import sync_playwright + + from engine.display.backends.websocket import WebSocketDisplay + + display = WebSocketDisplay(host="127.0.0.1", port=18767) + + server_thread = threading.Thread(target=display.start_server) + server_thread.daemon = True + server_thread.start() + + http_thread = threading.Thread(target=display.start_http_server) + http_thread.daemon = True + http_thread.start() + + time.sleep(1) + + try: + with sync_playwright() as p: + browser = p.chromium.launch(headless=True) + page = browser.new_page() + + page.goto("http://127.0.0.1:18767") + time.sleep(0.5) + + title = page.title() + assert len(title) >= 0 + + browser.close() + finally: + display.cleanup() + time.sleep(0.5)