984 lines
39 KiB
Python
984 lines
39 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
FastLED Test Build System - Python Compiler API Integration
|
|
|
|
This module provides a high-performance test compilation system built on the proven
|
|
ci.util.ci.util.clang_compiler API that delivers 8x faster build times compared to CMake.
|
|
|
|
Key Features:
|
|
- Parallel test compilation using ThreadPoolExecutor
|
|
- 90%+ cache hit rates with proven compiler settings
|
|
- Integration with build_unit.toml configuration
|
|
- Support for STUB platform testing without hardware dependencies
|
|
- GDB-compatible debug symbol generation
|
|
- Consistent performance across all platforms
|
|
|
|
"""
|
|
|
|
import hashlib
|
|
import os
|
|
import shutil
|
|
import subprocess
|
|
import sys
|
|
import tempfile
|
|
import textwrap
|
|
import time
|
|
import tomllib
|
|
from concurrent.futures import Future, as_completed
|
|
from dataclasses import dataclass, field
|
|
from pathlib import Path
|
|
from types import TracebackType
|
|
from typing import Any, Callable, Dict, List, Optional
|
|
|
|
from ci.util.paths import PROJECT_ROOT
|
|
|
|
|
|
def optimize_python_command(cmd: list[str]) -> list[str]:
|
|
"""
|
|
Optimize command list for subprocess execution in uv environment.
|
|
|
|
For python commands, we need to use 'uv run python' to ensure access to
|
|
installed packages like ziglang. Direct sys.executable bypasses uv environment.
|
|
|
|
Args:
|
|
cmd: Command list that may contain 'python' as first element
|
|
|
|
Returns:
|
|
list[str]: Optimized command with 'python' prefixed by 'uv run'
|
|
"""
|
|
if cmd and (cmd[0] == "python" or cmd[0] == "python3"):
|
|
# Use uv run python to ensure access to uv-managed packages
|
|
optimized_cmd = ["uv", "run", "python"] + cmd[1:]
|
|
return optimized_cmd
|
|
return cmd
|
|
|
|
|
|
from ci.compiler.clang_compiler import (
|
|
BuildFlags,
|
|
BuildTools,
|
|
Compiler,
|
|
CompilerOptions,
|
|
LinkOptions,
|
|
Result,
|
|
create_compiler_options_from_toml,
|
|
link_program_sync,
|
|
load_build_flags_from_toml,
|
|
)
|
|
|
|
|
|
@dataclass
|
|
class TestExecutable:
|
|
"""Represents a compiled test executable"""
|
|
|
|
name: str
|
|
executable_path: Path
|
|
test_source_path: Path
|
|
|
|
|
|
@dataclass
|
|
class CompileResult:
|
|
"""Result of test compilation operation"""
|
|
|
|
success: bool
|
|
compiled_count: int
|
|
duration: float
|
|
errors: List["CompileError"] = field(default_factory=lambda: list())
|
|
|
|
|
|
@dataclass
|
|
class CompileError:
|
|
"""Compilation error for a specific test"""
|
|
|
|
test_name: str
|
|
message: str
|
|
|
|
|
|
class FastLEDTestCompiler:
|
|
"""
|
|
Test compiler built on proven Compiler API for 8x faster builds.
|
|
|
|
This class leverages the proven ci.util.ci.util.clang_compiler.Compiler API that has already
|
|
demonstrated 4x+ performance improvements in example compilation. By using the same
|
|
proven patterns and configurations, we inherit the optimized build flags, caching,
|
|
and parallel compilation capabilities.
|
|
"""
|
|
|
|
# Class variable to store existing instance for get_existing_instance()
|
|
_existing_instance: Optional["FastLEDTestCompiler"] = None
|
|
|
|
def __init__(
|
|
self,
|
|
compiler: Compiler,
|
|
build_dir: Path,
|
|
project_root: Path,
|
|
quick_build: bool = True,
|
|
strict_mode: bool = False,
|
|
no_unity: bool = False,
|
|
):
|
|
self.compiler = compiler
|
|
self.build_dir = build_dir
|
|
self.project_root = project_root
|
|
self.quick_build = quick_build
|
|
self.strict_mode = strict_mode
|
|
self.no_unity = no_unity
|
|
self.compiled_tests: List[TestExecutable] = []
|
|
self.linking_failures: List[str] = []
|
|
self.build_flags = self._load_build_flags()
|
|
FastLEDTestCompiler._existing_instance = self
|
|
|
|
def _load_build_flags(self) -> BuildFlags:
|
|
"""Load build flags from TOML configuration"""
|
|
toml_path = self.project_root / "ci" / "build_unit.toml"
|
|
return BuildFlags.parse(toml_path, self.quick_build, self.strict_mode)
|
|
|
|
@classmethod
|
|
def create_for_unit_tests(
|
|
cls,
|
|
project_root: Path,
|
|
clean_build: bool = False,
|
|
enable_static_analysis: bool = False,
|
|
specific_test: str | None = None,
|
|
quick_build: bool = True,
|
|
strict_mode: bool = False,
|
|
no_unity: bool = False,
|
|
) -> "FastLEDTestCompiler":
|
|
"""
|
|
Create compiler configured for FastLED unit tests using TOML build flags.
|
|
|
|
This configuration uses the new TOML-based build flag system:
|
|
- ci/build_unit.toml for centralized flag configuration
|
|
- Support for build modes (quick/debug) and strict mode
|
|
- STUB platform for hardware-free testing
|
|
- Precompiled headers for faster compilation
|
|
- Parallel compilation with ThreadPoolExecutor
|
|
"""
|
|
|
|
# Set up build directory in .build/fled/unit for consistency
|
|
build_dir = project_root / ".build" / "fled" / "unit"
|
|
if clean_build and build_dir.exists():
|
|
print("###########################")
|
|
print("# CLEANING UNIT TEST BUILD DIR #")
|
|
print("###########################")
|
|
import errno
|
|
import os
|
|
import shutil
|
|
import stat
|
|
import time
|
|
|
|
def handle_remove_readonly(
|
|
func: Callable[[str], None],
|
|
path: str,
|
|
exc: tuple[type[BaseException], BaseException, TracebackType | None],
|
|
) -> None:
|
|
"""Error handler for Windows readonly files"""
|
|
if hasattr(exc[1], "errno") and exc[1].errno == errno.EACCES: # type: ignore
|
|
os.chmod(path, stat.S_IWRITE)
|
|
func(path)
|
|
else:
|
|
raise
|
|
|
|
# Windows-compatible directory removal with retry
|
|
max_retries = 3
|
|
for attempt in range(max_retries):
|
|
try:
|
|
# Use onerror parameter for compatibility across Python versions
|
|
shutil.rmtree(build_dir, onerror=handle_remove_readonly)
|
|
break
|
|
except (OSError, PermissionError) as e:
|
|
if attempt < max_retries - 1:
|
|
print(
|
|
f"Warning: Failed to remove build directory (attempt {attempt + 1}): {e}"
|
|
)
|
|
time.sleep(0.01) # Brief pause before retry
|
|
continue
|
|
else:
|
|
print(
|
|
f"Warning: Could not remove build directory after {max_retries} attempts: {e}"
|
|
)
|
|
print("Continuing with existing directory...")
|
|
break
|
|
build_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
# Additional compiler args beyond TOML
|
|
additional_compiler_args = [
|
|
f"-I{project_root}/src/platforms/stub", # STUB platform headers
|
|
]
|
|
|
|
# Load build flags using BuildFlags.parse() to get platform-specific configuration
|
|
toml_path = project_root / "ci" / "build_unit.toml"
|
|
build_flags = BuildFlags.parse(toml_path, quick_build, strict_mode)
|
|
|
|
# Load TOML config for test-specific flags that aren't in BuildFlags
|
|
with open(toml_path, "rb") as f:
|
|
config = tomllib.load(f)
|
|
|
|
# Get test-specific defines from TOML (not included in BuildFlags.parse)
|
|
test_defines = config.get("test", {}).get("defines", [])
|
|
|
|
# Create compiler args from BuildFlags (includes platform-specific flags)
|
|
compiler_args: List[str] = []
|
|
compiler_args.extend(
|
|
build_flags.compiler_flags
|
|
) # Platform-aware compiler flags
|
|
compiler_args.extend(build_flags.include_flags) # Platform-aware include flags
|
|
|
|
# Add strict mode flags if enabled (already handled by BuildFlags.parse)
|
|
|
|
# Add additional compiler args
|
|
if additional_compiler_args:
|
|
compiler_args.extend(additional_compiler_args)
|
|
|
|
# Extract defines without the "-D" prefix for CompilerOptions
|
|
defines: List[str] = []
|
|
for define in test_defines:
|
|
if define.startswith("-D"):
|
|
defines.append(define[2:]) # Remove "-D" prefix
|
|
else:
|
|
defines.append(define)
|
|
|
|
# Get tools configuration from BuildFlags (respects build_unit.toml)
|
|
# Use modern command-based approach
|
|
compiler_command = build_flags.tools.cpp_compiler
|
|
|
|
print(f"Using compiler from build_unit.toml: {compiler_command}")
|
|
|
|
# Use compiler command from TOML + flags as final compiler args
|
|
final_compiler_args = compiler_command + compiler_args
|
|
|
|
# Create compiler options with TOML-loaded flags and tools
|
|
settings = CompilerOptions(
|
|
include_path=str(project_root / "src"),
|
|
defines=defines,
|
|
compiler_args=final_compiler_args, # Use cache-aware compiler args
|
|
# Note: archiver and compiler fields are now handled via command-based approach
|
|
)
|
|
|
|
compiler = Compiler(settings, build_flags)
|
|
|
|
# Create final instance with proper compiler and flags
|
|
instance = cls(
|
|
compiler, build_dir, project_root, quick_build, strict_mode, no_unity
|
|
)
|
|
return instance
|
|
|
|
def discover_test_files(self, specific_test: str | None = None) -> List[Path]:
|
|
"""
|
|
Discover test_*.cpp files using proven patterns.
|
|
|
|
Uses the same file discovery patterns as the existing CMake system
|
|
to ensure 100% compatibility with current test discovery logic.
|
|
"""
|
|
tests_dir = Path(PROJECT_ROOT) / "tests"
|
|
test_files: List[Path] = []
|
|
|
|
# Find all test files in tests directory and subdirectories
|
|
for test_file in tests_dir.rglob("test_*.cpp"):
|
|
# Skip the doctest_main.cpp file (not a test file)
|
|
if test_file.name == "doctest_main.cpp":
|
|
continue
|
|
|
|
# Handle both "test_name" and "name" formats for compatibility
|
|
test_stem = test_file.stem
|
|
test_name = test_stem.replace("test_", "")
|
|
|
|
# Check if we should do fuzzy matching (if there's a * in the name)
|
|
if specific_test:
|
|
if "*" in specific_test:
|
|
# Convert glob pattern to regex pattern
|
|
import re
|
|
|
|
pattern = specific_test.replace("*", ".*")
|
|
if re.search(pattern, test_stem) or re.search(pattern, test_name):
|
|
test_files.append(test_file)
|
|
else:
|
|
# Exact matching - either the full name or with test_ prefix (case-insensitive)
|
|
if (
|
|
test_stem.lower() == specific_test.lower()
|
|
or test_stem.lower() == f"test_{specific_test}".lower()
|
|
or test_name.lower() == specific_test.lower()
|
|
):
|
|
test_files.append(test_file)
|
|
else:
|
|
test_files.append(test_file)
|
|
|
|
return test_files
|
|
|
|
def compile_all_tests(self, specific_test: str | None = None) -> CompileResult:
|
|
"""
|
|
Compile all tests in parallel using proven API patterns.
|
|
|
|
This method uses the same parallel compilation patterns proven in
|
|
example compilation that deliver 4x+ performance improvements.
|
|
"""
|
|
compile_start = time.time()
|
|
|
|
# If specific_test is not provided in the method call, use the one from discover_test_files
|
|
test_files = self.discover_test_files(specific_test)
|
|
|
|
if not test_files:
|
|
return CompileResult(success=True, compiled_count=0, duration=0.0)
|
|
|
|
print(f"Compiling {len(test_files)} test files using proven Python API...")
|
|
# Convert absolute path to relative for display
|
|
rel_build_dir = os.path.relpath(self.build_dir)
|
|
print(f"Build directory: {rel_build_dir}")
|
|
|
|
# Always show build configuration in unit test mode
|
|
print("\nFastLED Library Build Configuration:")
|
|
if self.no_unity:
|
|
print(" ❌ Unity build disabled - Compiling individual source files")
|
|
else:
|
|
print(" ✅ Unity build enabled - Using glob pattern:")
|
|
print(" src/**/*.cpp")
|
|
|
|
print("\nPrecompiled header status:")
|
|
if self.compiler.settings.use_pch:
|
|
print(" ✅ PCH enabled - Using precompiled headers for faster compilation")
|
|
print(" PCH content:")
|
|
print(textwrap.indent(self.compiler.generate_pch_header(), " "))
|
|
|
|
# Create PCH for faster compilation
|
|
print("\n🔧 Creating precompiled header...")
|
|
pch_success = self.compiler.create_pch_file()
|
|
if pch_success:
|
|
print(" ✅ PCH created successfully")
|
|
else:
|
|
print(" ❌ PCH creation failed - continuing with direct compilation")
|
|
else:
|
|
print(" ❌ PCH disabled - Not using precompiled headers")
|
|
|
|
print("\nCompiler flags:")
|
|
for flag in self.compiler.get_compiler_args():
|
|
print(f" {flag}")
|
|
print("\nLinker flags:")
|
|
for flag in self._get_platform_linker_args(self.build_dir / "libfastled.lib"):
|
|
print(f" {flag}")
|
|
print("")
|
|
|
|
# Print list of test files being compiled
|
|
print("Test files to compile:")
|
|
for i, test_file in enumerate(test_files, 1):
|
|
print(f" {i}. {test_file.name}")
|
|
print("")
|
|
|
|
# Submit parallel compilation jobs (proven pattern from example compilation)
|
|
future_to_test: Dict[Future[Result], Path] = {}
|
|
|
|
# Check if we're running in parallel or sequential mode
|
|
if os.environ.get("NO_PARALLEL"):
|
|
print("Starting sequential compilation of test files...")
|
|
else:
|
|
print("Starting parallel compilation of test files...")
|
|
for test_file in test_files:
|
|
# Compile to object file first (since compile_cpp_file uses -c flag)
|
|
obj_path = self.build_dir / f"{test_file.stem}.o"
|
|
# Convert absolute path to relative for display
|
|
rel_obj_path = os.path.relpath(obj_path)
|
|
print(f"Submitting compilation job for: {test_file.name} -> {rel_obj_path}")
|
|
# Show compilation command if enabled
|
|
if os.environ.get("FASTLED_TEST_SHOW_COMPILE", "").lower() in (
|
|
"1",
|
|
"true",
|
|
"yes",
|
|
):
|
|
# Convert absolute path to relative for display
|
|
rel_obj_path = os.path.relpath(obj_path)
|
|
print(f"[COMPILE] {test_file.name} -> {rel_obj_path}")
|
|
|
|
compile_future = self.compiler.compile_cpp_file(
|
|
test_file,
|
|
output_path=obj_path,
|
|
additional_flags=[
|
|
"-I",
|
|
str(self.project_root / "src"),
|
|
"-I",
|
|
str(self.project_root / "tests"),
|
|
# NOTE: Compiler flags now come from build_unit.toml
|
|
],
|
|
)
|
|
future_to_test[compile_future] = test_file
|
|
|
|
# Collect compilation results with progress reporting
|
|
compiled_objects: List[Path] = []
|
|
errors: List[CompileError] = []
|
|
completed = 0
|
|
|
|
print(f"Waiting for {len(future_to_test)} compilation jobs to complete...")
|
|
|
|
try:
|
|
for future in as_completed(future_to_test.keys()):
|
|
test_file = future_to_test[future]
|
|
result: Result = future.result()
|
|
completed += 1
|
|
|
|
if result.ok:
|
|
obj_path = self.build_dir / f"{test_file.stem}.o"
|
|
compiled_objects.append(obj_path)
|
|
print(f"[{completed}/{len(test_files)}] Compiled {test_file.name}")
|
|
else:
|
|
errors.append(
|
|
CompileError(
|
|
test_name=test_file.stem,
|
|
message=result.stderr
|
|
or result.stdout
|
|
or "Compilation failed",
|
|
)
|
|
)
|
|
print(
|
|
f"[{completed}/{len(test_files)}] FAILED {test_file.name}: {result.stderr}"
|
|
)
|
|
except KeyboardInterrupt:
|
|
print("\nKeyboard interrupt detected during compilation")
|
|
# Clean up any in-progress compilations
|
|
for future in future_to_test.keys():
|
|
future.cancel()
|
|
import _thread
|
|
|
|
_thread.interrupt_main()
|
|
raise
|
|
|
|
if errors:
|
|
duration = time.time() - compile_start
|
|
print(f"Compilation failed with {len(errors)} errors in {duration:.2f}s")
|
|
return CompileResult(
|
|
success=False, compiled_count=0, duration=duration, errors=errors
|
|
)
|
|
|
|
print(f"All {len(compiled_objects)} object files compiled successfully")
|
|
|
|
# Link each test to executable using proven linking API
|
|
self.compiled_tests = self._link_tests(compiled_objects)
|
|
|
|
# Check for linking failures and add them to errors
|
|
if hasattr(self, "linking_failures") and self.linking_failures:
|
|
for failure in self.linking_failures:
|
|
# Parse the failure string to extract test name and error
|
|
if ":" in failure:
|
|
test_name, error_msg = failure.split(":", 1)
|
|
errors.append(
|
|
CompileError(
|
|
test_name=test_name.strip(),
|
|
message=f"Linking failed: {error_msg.strip()}",
|
|
)
|
|
)
|
|
|
|
duration = time.time() - compile_start
|
|
|
|
# Success requires both compilation AND linking to succeed
|
|
success = len(errors) == 0 and len(self.compiled_tests) > 0
|
|
|
|
if success:
|
|
print(
|
|
f"SUCCESS: Compiled and linked {len(self.compiled_tests)} tests in {duration:.2f}s"
|
|
)
|
|
# List all the successful test executables
|
|
print("Test executables created:")
|
|
for i, test in enumerate(self.compiled_tests, 1):
|
|
# Convert absolute path to relative for display
|
|
rel_exe_path = os.path.relpath(test.executable_path)
|
|
print(f" {i}. {test.name} -> {rel_exe_path}")
|
|
else:
|
|
print(f"FAILED: {len(errors)} total failures (compilation + linking)")
|
|
|
|
return CompileResult(
|
|
success=success,
|
|
compiled_count=len(self.compiled_tests),
|
|
duration=duration,
|
|
errors=errors,
|
|
)
|
|
|
|
def _link_tests(self, compiled_objects: List[Path]) -> List[TestExecutable]:
|
|
"""Link each test to executable using proven linking API"""
|
|
print(f"Linking {len(compiled_objects)} test executables...")
|
|
|
|
# First, build a complete FastLED library similar to CMake approach
|
|
fastled_lib_path = self._build_fastled_library()
|
|
|
|
# Compile doctest_main.cpp once for all tests (provides main function and doctest implementation)
|
|
doctest_main_path = self.project_root / "tests" / "doctest_main.cpp"
|
|
doctest_obj_path = self.build_dir / "doctest_main.o"
|
|
|
|
if not doctest_obj_path.exists():
|
|
print("Compiling doctest_main.cpp...")
|
|
doctest_future = self.compiler.compile_cpp_file(
|
|
doctest_main_path,
|
|
output_path=doctest_obj_path,
|
|
additional_flags=["-c"], # Compile only
|
|
)
|
|
doctest_result: Result = doctest_future.result()
|
|
if not doctest_result.ok:
|
|
print(
|
|
f"ERROR: Failed to compile doctest_main.cpp: {doctest_result.stderr}"
|
|
)
|
|
return []
|
|
|
|
print(f"Compiled FastLED library and doctest_main")
|
|
|
|
compiled_tests: List[TestExecutable] = []
|
|
linking_failures: List[str] = []
|
|
cache_hits = 0
|
|
cache_misses = 0
|
|
|
|
# Link each test with the FastLED library
|
|
for obj_path in compiled_objects:
|
|
# Derive test name from object file
|
|
test_name = obj_path.stem.replace("test_", "")
|
|
exe_path = self.build_dir / f"test_{test_name}.exe"
|
|
|
|
# Define clean execution path in tests/bin/ (this is where tests will actually run from)
|
|
tests_bin_dir = self.project_root / "tests" / "bin"
|
|
tests_bin_dir.mkdir(parents=True, exist_ok=True)
|
|
clean_exe_path = tests_bin_dir / f"test_{test_name}.exe"
|
|
|
|
# Check if this test has its own main() function
|
|
test_source = self.project_root / "tests" / f"test_{test_name}.cpp"
|
|
has_own_main = self._test_has_own_main(test_source)
|
|
|
|
# Get linker args (needed for cache key calculation)
|
|
linker_args = self._get_platform_linker_args(fastled_lib_path)
|
|
|
|
# Calculate comprehensive cache key based on all linking inputs
|
|
if has_own_main:
|
|
# For standalone tests, only include test object file
|
|
cache_key = self._calculate_link_cache_key(
|
|
obj_path, fastled_lib_path, linker_args
|
|
)
|
|
else:
|
|
# For doctest tests, include both test object and doctest_main object
|
|
# Calculate hashes for both object files and combine them
|
|
test_obj_hash = self._calculate_file_hash(obj_path)
|
|
doctest_obj_hash = self._calculate_file_hash(doctest_obj_path)
|
|
combined_obj_hash = hashlib.sha256(
|
|
f"{test_obj_hash}+{doctest_obj_hash}".encode("utf-8")
|
|
).hexdigest()
|
|
|
|
# Create a temporary cache key with combined object hash
|
|
fastled_hash = self._calculate_file_hash(fastled_lib_path)
|
|
linker_hash = self._calculate_linker_args_hash(linker_args)
|
|
combined = f"fastled:{fastled_hash}|test:{combined_obj_hash}|flags:{linker_hash}"
|
|
cache_key = hashlib.sha256(combined.encode("utf-8")).hexdigest()[:16]
|
|
|
|
# Check for cached executable based on comprehensive cache key
|
|
cached_exe = self._get_cached_executable(test_name, cache_key)
|
|
if cached_exe:
|
|
print(f" {test_name}: Using cached executable (link cache hit)")
|
|
try:
|
|
shutil.copy2(cached_exe, exe_path)
|
|
# Also copy to clean execution location
|
|
shutil.copy2(cached_exe, clean_exe_path)
|
|
cache_hits += 1
|
|
|
|
test_exe = TestExecutable(
|
|
name=test_name,
|
|
executable_path=clean_exe_path, # Point to clean location for execution
|
|
test_source_path=test_source,
|
|
)
|
|
compiled_tests.append(test_exe)
|
|
continue # Skip linking, we have cached result
|
|
except Exception as e:
|
|
print(
|
|
f" {test_name}: Warning - failed to copy cached executable, will relink: {e}"
|
|
)
|
|
# Fall through to linking logic
|
|
|
|
if has_own_main:
|
|
# Link standalone test without doctest_main.o
|
|
link_options = LinkOptions(
|
|
object_files=[obj_path], # Only the test object file
|
|
output_executable=exe_path,
|
|
linker=f'"{sys.executable}" -m ziglang c++', # Use optimized Python executable for linking
|
|
linker_args=linker_args, # Use pre-calculated args for consistency
|
|
)
|
|
else:
|
|
# Link with the FastLED library instead of individual objects
|
|
link_options = LinkOptions(
|
|
object_files=[
|
|
obj_path,
|
|
doctest_obj_path,
|
|
], # Test object + doctest main
|
|
output_executable=exe_path,
|
|
linker=f'"{sys.executable}" -m ziglang c++', # Use optimized Python executable for linking
|
|
linker_args=linker_args, # Use pre-calculated args for consistency
|
|
)
|
|
|
|
# Show linking command if enabled
|
|
if os.environ.get("FASTLED_TEST_SHOW_LINK", "").lower() in (
|
|
"1",
|
|
"true",
|
|
"yes",
|
|
):
|
|
# Convert absolute path to relative for display
|
|
rel_exe_path = os.path.relpath(exe_path)
|
|
print(f"[LINK] {test_name} -> {rel_exe_path}")
|
|
|
|
print(f"Linking test: {test_name}")
|
|
# STRICT: Provide explicit BuildFlags - NO defaults allowed
|
|
link_result: Result = link_program_sync(
|
|
link_options, self.compiler.build_flags
|
|
)
|
|
|
|
if not link_result.ok:
|
|
# Create more detailed error message showing link command context
|
|
error_msg = f" {test_name}: Linking failed - Linker: {link_options.linker}, Output: {link_options.output_executable}, Objects: {len(link_options.object_files)} files"
|
|
print(error_msg)
|
|
print(f" Error details: {link_result.stderr}")
|
|
linking_failures.append(f"{test_name}: {link_result.stderr}")
|
|
continue
|
|
|
|
print(f" {test_name}: Linking successful")
|
|
cache_misses += 1
|
|
|
|
# Cache the newly linked executable for future use
|
|
self._cache_executable(test_name, cache_key, exe_path)
|
|
|
|
# Copy to clean execution location
|
|
shutil.copy2(exe_path, clean_exe_path)
|
|
|
|
test_exe = TestExecutable(
|
|
name=test_name,
|
|
executable_path=clean_exe_path,
|
|
test_source_path=test_source, # Point to clean location for execution
|
|
)
|
|
compiled_tests.append(test_exe)
|
|
|
|
if linking_failures:
|
|
print(f"ERROR: {len(linking_failures)} linking failures:")
|
|
for failure in linking_failures:
|
|
print(f" {failure}")
|
|
# Store linking failures for later reporting
|
|
self.linking_failures = linking_failures
|
|
else:
|
|
self.linking_failures = []
|
|
|
|
# Report link cache statistics
|
|
total_tests = cache_hits + cache_misses
|
|
if total_tests > 0:
|
|
hit_rate = (cache_hits / total_tests) * 100
|
|
print(
|
|
f"Link cache: {cache_hits} hits, {cache_misses} misses ({hit_rate:.1f}% hit rate) - caching by lib+test+flags"
|
|
)
|
|
|
|
print(f"Successfully linked {len(compiled_tests)} test executables")
|
|
return compiled_tests
|
|
|
|
def _test_has_own_main(self, test_source_path: Path) -> bool:
|
|
"""Check if a test file defines its own main() function"""
|
|
try:
|
|
if not test_source_path.exists():
|
|
return False
|
|
|
|
content = test_source_path.read_text(encoding="utf-8")
|
|
# Look for main function definition (simple pattern matching)
|
|
import re
|
|
|
|
# Match "int main(" allowing for whitespace and various formats
|
|
main_pattern = r"\bint\s+main\s*\("
|
|
return bool(re.search(main_pattern, content))
|
|
except Exception:
|
|
# If we can't read the file, assume it doesn't have main
|
|
return False
|
|
|
|
def _build_fastled_library(self) -> Path:
|
|
"""Build a complete FastLED static library like CMake does"""
|
|
print("Building FastLED static library...")
|
|
|
|
# Define library path
|
|
fastled_lib_path = self.build_dir / "libfastled.lib"
|
|
|
|
# If library already exists, return it (for faster rebuilds)
|
|
if fastled_lib_path.exists():
|
|
# Convert absolute path to relative for display
|
|
rel_lib_path = os.path.relpath(fastled_lib_path)
|
|
print(f"Using existing FastLED library: {rel_lib_path}")
|
|
return fastled_lib_path
|
|
|
|
# Compile essential FastLED source files for STUB platform
|
|
# User directive: Just glob ALL .cpp files in src/** - no exclusions needed
|
|
import glob
|
|
|
|
fastled_src_dir = self.project_root / "src"
|
|
# Include ALL .cpp files recursively - no platform exclusions
|
|
all_cpp_files: List[str] = glob.glob(
|
|
str(fastled_src_dir / "**" / "*.cpp"), recursive=True
|
|
)
|
|
|
|
fastled_sources: List[Path] = []
|
|
for cpp_file in all_cpp_files:
|
|
# Include ALL files - no filtering needed
|
|
fastled_sources.append(Path(cpp_file))
|
|
|
|
if self.no_unity:
|
|
print(
|
|
f"Individual compilation: Found {len(fastled_sources)} FastLED .cpp files for compilation"
|
|
)
|
|
else:
|
|
print(
|
|
f"Unity build mode: Found {len(fastled_sources)} FastLED .cpp files for compilation"
|
|
)
|
|
|
|
# Compile all FastLED source files to object files
|
|
fastled_objects: List[Path] = []
|
|
for src_file in fastled_sources:
|
|
src_path = self.project_root / src_file
|
|
# Create unique object file name including directory path to avoid collisions
|
|
# Convert path separators to underscores to create valid filename
|
|
relative_path = src_path.relative_to(self.project_root / "src")
|
|
safe_name = (
|
|
str(relative_path.with_suffix("")).replace("/", "_").replace("\\", "_")
|
|
)
|
|
obj_path = self.build_dir / f"{safe_name}_fastled.o"
|
|
|
|
if not obj_path.exists():
|
|
future = self.compiler.compile_cpp_file(
|
|
src_path,
|
|
output_path=obj_path,
|
|
additional_flags=[
|
|
"-c", # Compile only
|
|
"-DFASTLED_STUB_IMPL", # Essential for STUB platform
|
|
# NOTE: All defines and compiler flags now come from build_unit.toml
|
|
# Remove hardcoded defines - they should be in [test] section
|
|
],
|
|
)
|
|
result: Result = future.result()
|
|
if not result.ok:
|
|
print(f"ERROR: Failed to compile {src_file}: {result.stderr}")
|
|
# Continue with other files rather than failing completely
|
|
continue
|
|
|
|
fastled_objects.append(obj_path)
|
|
|
|
print(f"Creating static library from {len(fastled_objects)} object files...")
|
|
|
|
# Create static library using ar (archiver)
|
|
# First try llvm-ar since it's more likely to be available
|
|
ar_cmd: List[str] = [
|
|
"llvm-ar", # LLVM archiver tool (works with Clang)
|
|
"rcs", # Create archive with symbol table
|
|
str(fastled_lib_path),
|
|
] + [str(obj) for obj in fastled_objects]
|
|
|
|
# Create static library using ziglang ar (archiver)
|
|
print("Creating static library using ziglang ar...")
|
|
lib_cmd: List[str] = [
|
|
"python",
|
|
"-m",
|
|
"ziglang",
|
|
"ar",
|
|
"rcs", # Create archive with symbol table
|
|
str(fastled_lib_path),
|
|
] + [str(obj) for obj in fastled_objects]
|
|
|
|
try:
|
|
# Optimize command to use sys.executable instead of shell 'python' resolution
|
|
python_exe = optimize_python_command(lib_cmd)
|
|
|
|
# Use streaming to prevent buffer overflow
|
|
process = subprocess.Popen(
|
|
python_exe,
|
|
stdout=subprocess.PIPE,
|
|
stderr=subprocess.PIPE,
|
|
text=True,
|
|
bufsize=1,
|
|
encoding="utf-8",
|
|
errors="replace",
|
|
cwd=self.build_dir,
|
|
)
|
|
|
|
stdout_lines: list[str] = []
|
|
stderr_lines: list[str] = []
|
|
|
|
while True:
|
|
stdout_line = process.stdout.readline() if process.stdout else ""
|
|
stderr_line = process.stderr.readline() if process.stderr else ""
|
|
|
|
if stdout_line:
|
|
stdout_lines.append(stdout_line.rstrip())
|
|
if stderr_line:
|
|
stderr_lines.append(stderr_line.rstrip())
|
|
|
|
if process.poll() is not None:
|
|
remaining_stdout = process.stdout.read() if process.stdout else ""
|
|
remaining_stderr = process.stderr.read() if process.stderr else ""
|
|
|
|
if remaining_stdout:
|
|
for line in remaining_stdout.splitlines():
|
|
stdout_lines.append(line.rstrip())
|
|
if remaining_stderr:
|
|
for line in remaining_stderr.splitlines():
|
|
stderr_lines.append(line.rstrip())
|
|
break
|
|
|
|
if process.returncode != 0:
|
|
print(f"ERROR: Failed to create static library: {stderr_lines}")
|
|
return fastled_lib_path # Return path even if creation failed
|
|
|
|
# Convert absolute path to relative for display
|
|
rel_lib_path = os.path.relpath(fastled_lib_path)
|
|
print(f"Successfully created FastLED library: {rel_lib_path}")
|
|
return fastled_lib_path
|
|
|
|
except Exception as e:
|
|
print(f"ERROR: Exception during library creation: {e}")
|
|
raise
|
|
|
|
def get_test_executables(
|
|
self, specific_test: str | None = None
|
|
) -> List[TestExecutable]:
|
|
"""Get compiled test executables with name filtering"""
|
|
if specific_test:
|
|
# Check if we should do fuzzy matching (if there's a * in the name)
|
|
if "*" in specific_test:
|
|
# Convert glob pattern to regex pattern
|
|
import re
|
|
|
|
pattern = specific_test.replace("*", ".*")
|
|
return [
|
|
t
|
|
for t in self.compiled_tests
|
|
if re.search(pattern, t.name)
|
|
or re.search(pattern, f"test_{t.name}")
|
|
]
|
|
else:
|
|
# Exact matching - either the full name or with test_ prefix (case-insensitive)
|
|
return [
|
|
t
|
|
for t in self.compiled_tests
|
|
if (
|
|
t.name.lower() == specific_test.lower()
|
|
or t.name.lower() == f"test_{specific_test}".lower()
|
|
or t.name.lower() == specific_test.replace("test_", "").lower()
|
|
)
|
|
]
|
|
return self.compiled_tests
|
|
|
|
@property
|
|
def link_cache_dir(self) -> Path:
|
|
"""Get the link cache directory"""
|
|
cache_dir = self.build_dir / "link_cache"
|
|
cache_dir.mkdir(exist_ok=True)
|
|
return cache_dir
|
|
|
|
def _calculate_file_hash(self, file_path: Path) -> str:
|
|
"""Calculate SHA256 hash of a file"""
|
|
if not file_path.exists():
|
|
return "no_file"
|
|
|
|
hash_sha256 = hashlib.sha256()
|
|
with open(file_path, "rb") as f:
|
|
# Read in chunks to handle large files efficiently
|
|
for chunk in iter(lambda: f.read(4096), b""):
|
|
hash_sha256.update(chunk)
|
|
return hash_sha256.hexdigest()
|
|
|
|
def _calculate_linker_args_hash(self, linker_args: List[str]) -> str:
|
|
"""Calculate SHA256 hash of linker arguments"""
|
|
# Convert linker args to a stable string representation
|
|
args_str = "|".join(sorted(linker_args))
|
|
hash_sha256 = hashlib.sha256()
|
|
hash_sha256.update(args_str.encode("utf-8"))
|
|
return hash_sha256.hexdigest()
|
|
|
|
def _calculate_link_cache_key(
|
|
self, test_obj_path: Path, fastled_lib_path: Path, linker_args: List[str]
|
|
) -> str:
|
|
"""Calculate comprehensive cache key for linking"""
|
|
# Hash all the factors that affect linking output
|
|
fastled_hash = self._calculate_file_hash(fastled_lib_path)
|
|
test_obj_hash = self._calculate_file_hash(test_obj_path)
|
|
linker_hash = self._calculate_linker_args_hash(linker_args)
|
|
|
|
# Combine all hashes into a single cache key
|
|
combined = f"fastled:{fastled_hash}|test:{test_obj_hash}|flags:{linker_hash}"
|
|
final_hash = hashlib.sha256(combined.encode("utf-8")).hexdigest()
|
|
|
|
return final_hash[:16] # Use first 16 chars for readability
|
|
|
|
def _get_cached_executable(self, test_name: str, cache_key: str) -> Optional[Path]:
|
|
"""Check if a cached executable exists for this test and cache key"""
|
|
cached_exe = self.link_cache_dir / f"{test_name}_{cache_key}.exe"
|
|
return cached_exe if cached_exe.exists() else None
|
|
|
|
def _cache_executable(self, test_name: str, cache_key: str, exe_path: Path) -> None:
|
|
"""Cache an executable with the given cache key"""
|
|
if not exe_path.exists():
|
|
return
|
|
|
|
cached_exe = self.link_cache_dir / f"{test_name}_{cache_key}.exe"
|
|
try:
|
|
shutil.copy2(exe_path, cached_exe)
|
|
print(f" {test_name}: Cached executable for future use")
|
|
except Exception as e:
|
|
print(f" {test_name}: Warning - failed to cache executable: {e}")
|
|
|
|
def _get_platform_linker_args(self, fastled_lib_path: Path) -> List[str]:
|
|
"""Get platform-specific linker arguments using BuildFlags"""
|
|
# Use the build_flags that includes platform-specific configuration
|
|
args = self.build_flags.link_flags.copy()
|
|
|
|
# Load TOML config for test-specific and build mode flags
|
|
toml_path = self.project_root / "ci" / "build_unit.toml"
|
|
with open(toml_path, "rb") as f:
|
|
config = tomllib.load(f)
|
|
|
|
# Add test-specific linking flags if available
|
|
test_link_flags = config.get("linking", {}).get("test", {}).get("flags", [])
|
|
args.extend(test_link_flags)
|
|
|
|
# Add build mode specific flags (use test-specific debug mode)
|
|
mode = "quick" if self.quick_build else "test_debug"
|
|
mode_flags = config.get("build_modes", {}).get(mode, {}).get("link_flags", [])
|
|
args.extend(mode_flags)
|
|
|
|
# Add FastLED library
|
|
args.append(str(fastled_lib_path)) # Link against the FastLED static library
|
|
|
|
return args
|
|
|
|
@classmethod
|
|
def get_existing_instance(cls) -> Optional["FastLEDTestCompiler"]:
|
|
"""Get existing compiler instance for reuse"""
|
|
return cls._existing_instance
|
|
|
|
|
|
def check_iwyu_available() -> bool:
|
|
"""Check if include-what-you-use is available (preserving existing logic)"""
|
|
import shutil
|
|
|
|
return (
|
|
shutil.which("include-what-you-use") is not None
|
|
or shutil.which("iwyu") is not None
|
|
)
|
|
|
|
|
|
# For testing this module directly
|
|
if __name__ == "__main__":
|
|
print("Testing FastLEDTestCompiler...")
|
|
|
|
# Create test compiler
|
|
test_compiler = FastLEDTestCompiler.create_for_unit_tests(
|
|
project_root=Path(PROJECT_ROOT), clean_build=True
|
|
)
|
|
|
|
# Discover test files
|
|
test_files = test_compiler.discover_test_files()
|
|
print(f"Discovered {len(test_files)} test files")
|
|
|
|
# Test compilation of a few files
|
|
if test_files:
|
|
limited_files = test_files[:3] # Test first 3 files
|
|
print(f"Testing compilation of {len(limited_files)} test files...")
|
|
|
|
compile_result = test_compiler.compile_all_tests()
|
|
|
|
if compile_result.success:
|
|
print(
|
|
f"SUCCESS: Compiled {compile_result.compiled_count} tests in {compile_result.duration:.2f}s"
|
|
)
|
|
executables = test_compiler.get_test_executables()
|
|
for exe in executables:
|
|
print(f" Executable: {exe.name} -> {exe.executable_path}")
|
|
else:
|
|
print(f"FAILED: {len(compile_result.errors)} errors")
|
|
for error in compile_result.errors:
|
|
print(f" {error.test_name}: {error.message}")
|