initial commit

This commit is contained in:
2026-02-12 00:45:31 -08:00
commit 5f168f370b
3024 changed files with 804889 additions and 0 deletions

View File

@@ -0,0 +1,5 @@
"""
FastLED Compiler Module
High-performance compilation system built on proven Python compiler API.
"""

View File

@@ -0,0 +1,196 @@
#!/usr/bin/env python3
"""
FastLED Dynamic Library Builder
Builds FastLED as a shared/dynamic library for unit testing
"""
import os
import subprocess
import sys
from pathlib import Path
from typing import List
from ci.compiler.clang_compiler import (
BuildFlags,
Compiler,
CompilerOptions,
Result,
)
from ci.util.paths import PROJECT_ROOT
def build_fastled_dynamic_library(build_dir: Path) -> Path:
"""Build FastLED as a dynamic library"""
print("Building FastLED dynamic library...")
# Define library path with appropriate extension
if sys.platform == "win32":
fastled_lib_path = build_dir / "fastled.dll"
else:
fastled_lib_path = build_dir / "libfastled.so"
# Configure compiler with shared library flags
settings = CompilerOptions(
include_path=str(Path(PROJECT_ROOT) / "src"),
defines=[
"STUB_PLATFORM",
"ARDUINO=10808",
"FASTLED_USE_STUB_ARDUINO",
"SKETCH_HAS_LOTS_OF_MEMORY=1",
"FASTLED_STUB_IMPL",
"FASTLED_FORCE_NAMESPACE=1",
"FASTLED_TESTING",
"FASTLED_NO_AUTO_NAMESPACE",
"FASTLED_NO_PINMAP",
"HAS_HARDWARE_PIN_SUPPORT",
# "FASTLED_DEBUG_LEVEL=1",
# "BUILDING_FASTLED_DLL", # New define for DLL exports
# "FASTLED_EXPORT=__declspec(dllexport)" if sys.platform == "win32" else "FASTLED_EXPORT=", # DLL exports for Windows
],
std_version="c++17",
compiler="clang++",
compiler_args=[
# NOTE: All compiler flags should come from build_unit.toml
# Keep only platform-specific include paths and shared library flags
"-I" + str(Path(PROJECT_ROOT) / "src/platforms/stub"),
"-I" + str(Path(PROJECT_ROOT) / "tests"),
# Add shared library flags - platform specific
"-shared" if sys.platform != "win32" else "/DLL",
"-fPIC" if sys.platform != "win32" else "",
],
use_pch=True,
parallel=True,
)
# Load build flags from TOML
build_flags_path = Path(PROJECT_ROOT) / "ci" / "build_unit.toml"
build_flags = BuildFlags.parse(
build_flags_path, quick_build=False, strict_mode=False
)
compiler = Compiler(settings, build_flags)
# Compile all FastLED source files
fastled_src_dir = Path(PROJECT_ROOT) / "src"
all_cpp_files = list(fastled_src_dir.rglob("*.cpp"))
print(f"Found {len(all_cpp_files)} FastLED source files")
# Compile to object files
object_files: List[Path] = []
for src_file in all_cpp_files:
relative_path = src_file.relative_to(fastled_src_dir)
safe_name = (
str(relative_path.with_suffix("")).replace("/", "_").replace("\\", "_")
)
obj_path = build_dir / f"{safe_name}_fastled.o"
future = compiler.compile_cpp_file(
src_file,
output_path=obj_path,
additional_flags=[
"-c",
"-DFASTLED_STUB_IMPL",
"-DFASTLED_FORCE_NAMESPACE=1",
"-DFASTLED_NO_AUTO_NAMESPACE",
"-DFASTLED_NO_PINMAP",
"-DPROGMEM=",
"-DHAS_HARDWARE_PIN_SUPPORT",
"-DFASTLED_ENABLE_JSON=1",
"-fno-exceptions",
"-fno-rtti",
"-fPIC", # Position Independent Code for shared library
],
)
result: Result = future.result()
if not result.ok:
print(f"ERROR: Failed to compile {src_file}: {result.stderr}")
continue
object_files.append(obj_path)
print(f"Linking {len(object_files)} object files into dynamic library...")
# Link as shared library
if sys.platform == "win32":
# Use lld-link for Windows DLL creation
link_cmd = [
"lld-link",
"/DLL",
"/NOLOGO",
"/OUT:" + str(fastled_lib_path),
"/LIBPATH:C:/Program Files (x86)/Windows Kits/10/Lib/10.0.19041.0/um/x64",
"/LIBPATH:C:/Program Files (x86)/Windows Kits/10/Lib/10.0.19041.0/ucrt/x64",
"/LIBPATH:C:/Program Files/Microsoft Visual Studio/2022/Community/VC/Tools/MSVC/14.37.32822/lib/x64",
*[str(obj) for obj in object_files],
"msvcrt.lib",
"legacy_stdio_definitions.lib",
"kernel32.lib",
"user32.lib",
]
else:
link_cmd = [
"clang++",
"-shared",
"-o",
str(fastled_lib_path),
*[str(obj) for obj in object_files],
]
try:
# Use streaming to prevent buffer overflow
process = subprocess.Popen(
link_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
bufsize=1,
encoding="utf-8",
errors="replace",
)
stdout_lines: list[str] = []
stderr_lines: list[str] = []
while True:
stdout_line = process.stdout.readline() if process.stdout else ""
stderr_line = process.stderr.readline() if process.stderr else ""
if stdout_line:
stdout_lines.append(stdout_line.rstrip())
if stderr_line:
stderr_lines.append(stderr_line.rstrip())
if process.poll() is not None:
remaining_stdout = process.stdout.read() if process.stdout else ""
remaining_stderr = process.stderr.read() if process.stderr else ""
if remaining_stdout:
for line in remaining_stdout.splitlines():
stdout_lines.append(line.rstrip())
if remaining_stderr:
for line in remaining_stderr.splitlines():
stderr_lines.append(line.rstrip())
break
stderr_result = "\n".join(stderr_lines)
if process.returncode != 0:
raise Exception(f"Failed to create dynamic library: {stderr_result}")
print(f"Successfully created dynamic library: {fastled_lib_path}")
return fastled_lib_path
except Exception as e:
print(f"ERROR: Exception during library creation: {e}")
raise
if __name__ == "__main__":
# Create build directory in temp
import tempfile
build_dir = Path(tempfile.gettempdir()) / "fastled_test_build"
build_dir.mkdir(parents=True, exist_ok=True)
# Build the dynamic library
build_fastled_dynamic_library(build_dir)

View File

@@ -0,0 +1,273 @@
#!/usr/bin/env python3
"""
Clean Cache-Enhanced Compilation
Simple wrapper that adds fingerprint cache to FastLED compilation.
Dramatically speeds up incremental builds by skipping unchanged files.
"""
import time
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set
from ci.ci.fingerprint_cache import FingerprintCache
from ci.compiler.clang_compiler import Compiler
from ci.compiler.test_example_compilation import CompilationResult
class CacheAwareCompiler:
"""Simple wrapper that adds cache checking to compilation."""
def __init__(self, compiler: Compiler, cache_file: Path, verbose: bool = False):
self.compiler = compiler
self.cache = FingerprintCache(cache_file)
self.verbose = verbose
self.stats = {
"files_checked": 0,
"files_skipped": 0,
"files_compiled": 0,
"cache_hits": 0,
"cache_misses": 0,
}
def should_compile(self, file_path: Path, baseline_time: float) -> bool:
"""Check if file needs compilation using cache."""
self.stats["files_checked"] += 1
try:
needs_compile = self.cache.has_changed(file_path, baseline_time)
if needs_compile:
self.stats["cache_misses"] += 1
self.stats["files_compiled"] += 1
return True
else:
self.stats["cache_hits"] += 1
self.stats["files_skipped"] += 1
if self.verbose:
print(f"[CACHE] Skipping unchanged: {file_path.name}")
return False
except KeyboardInterrupt:
import _thread
_thread.interrupt_main()
raise
except Exception as e:
if self.verbose:
print(f"[CACHE] Error checking {file_path.name}: {e}")
self.stats["cache_misses"] += 1
self.stats["files_compiled"] += 1
return True
def _headers_changed(self, baseline_time: float) -> bool:
"""Detect if any relevant header dependency has changed since last run.
Uses the compiler's PCH dependency discovery to collect a conservative
set of headers that impact example compilation, then queries the
fingerprint cache to see if any actually changed content.
"""
try:
# Reuse the compiler's dependency discovery (covers src/** and platforms/**)
dependencies: List[Path] = []
if hasattr(self.compiler, "_get_pch_dependencies"):
dependencies = self.compiler._get_pch_dependencies() # type: ignore[attr-defined]
else:
# Fallback: hash all headers under the compiler's include path
include_root = Path(self.compiler.settings.include_path)
for pattern in ("**/*.h", "**/*.hpp"):
dependencies.extend(include_root.glob(pattern))
changed_any = False
for dep in dependencies:
try:
if self.cache.has_changed(dep, baseline_time):
if self.verbose:
print(f"[CACHE] Header changed: {dep}")
changed_any = True
break
except FileNotFoundError:
# Missing dependency means we must rebuild conservatively
if self.verbose:
print(f"[CACHE] Header missing (forces rebuild): {dep}")
changed_any = True
break
return changed_any
except KeyboardInterrupt:
import _thread
_thread.interrupt_main()
raise
except Exception as e:
if self.verbose:
print(f"[CACHE] Header scan failed, forcing rebuild: {e}")
return True
def compile_with_cache(
self,
ino_files: List[Path],
pch_compatible_files: Set[Path],
log_fn: Callable[[str], None],
full_compilation: bool,
verbose: bool = False,
baseline_time: Optional[float] = None,
) -> Dict[str, Any]:
"""Compile only files that have changed."""
if baseline_time is None:
baseline_time = time.time() - 3600 # Default: 1 hour ago
start_time = time.time()
# If any header dependency changed, conservatively rebuild all example files
force_recompile_due_to_headers = self._headers_changed(baseline_time)
if force_recompile_due_to_headers and self.verbose:
print(
"[CACHE] Header dependency changes detected - forcing recompilation of example sources"
)
# Check which files need compilation
files_to_compile: List[Path] = []
for ino_file in ino_files:
if force_recompile_due_to_headers or self.should_compile(
ino_file, baseline_time
):
files_to_compile.append(ino_file)
# Check .cpp files too
cpp_files_to_compile: List[Path] = []
for ino_file in ino_files:
cpp_files = self.compiler.find_cpp_files_for_example(ino_file)
for cpp_file in cpp_files:
if force_recompile_due_to_headers or self.should_compile(
cpp_file, baseline_time
):
cpp_files_to_compile.append(cpp_file)
# Log cache results
total_files = len(ino_files) + sum(
len(self.compiler.find_cpp_files_for_example(f)) for f in ino_files
)
log_fn(
f"[CACHE] {self.stats['files_skipped']}/{total_files} files unchanged, {self.stats['files_compiled']}/{total_files} need compilation"
)
if self.stats["files_skipped"] > 0:
time_saved = self.stats["files_skipped"] * 0.5 # Estimate
log_fn(f"[CACHE] Estimated time saved: {time_saved:.1f}s")
# Compile files that changed
if files_to_compile or cpp_files_to_compile:
result = self._run_actual_compilation(
files_to_compile,
cpp_files_to_compile,
pch_compatible_files,
log_fn,
full_compilation,
verbose,
)
else:
log_fn("[CACHE] All files unchanged - no compilation needed!")
result = self._create_success_result(len(ino_files), ino_files)
# Log final stats
if total_files > 0:
hit_rate = (
(
self.stats["cache_hits"]
/ (self.stats["cache_hits"] + self.stats["cache_misses"])
* 100
)
if (self.stats["cache_hits"] + self.stats["cache_misses"]) > 0
else 0
)
skip_rate = self.stats["files_skipped"] / total_files * 100
log_fn(
f"[CACHE] {hit_rate:.1f}% cache hit rate, {skip_rate:.1f}% files skipped"
)
return {
"compilation_result": result,
"cache_stats": self.stats,
"total_time": time.time() - start_time,
}
def _run_actual_compilation(
self,
ino_files: List[Path],
cpp_files_to_compile: List[Path],
pch_compatible_files: Set[Path],
log_fn: Callable[[str], None],
full_compilation: bool,
verbose: bool,
):
"""Run the actual compilation for changed files."""
from ci.compiler.test_example_compilation import compile_examples_simple
# Temporarily filter .cpp files to only compile changed ones
original_method = self.compiler.find_cpp_files_for_example
def filtered_cpp_files(ino_file: Path) -> List[Path]:
all_cpp = original_method(ino_file)
return [cpp for cpp in all_cpp if cpp in cpp_files_to_compile]
self.compiler.find_cpp_files_for_example = filtered_cpp_files
try:
return compile_examples_simple(
self.compiler,
ino_files,
pch_compatible_files,
log_fn,
full_compilation,
verbose,
)
finally:
self.compiler.find_cpp_files_for_example = original_method
def _create_success_result(
self, file_count: int, ino_files: Optional[List[Path]] = None
) -> CompilationResult:
"""Create a successful compilation result for cached files."""
# For cached files, we need to populate object_file_map with existing object files
# so that linking can proceed properly
object_file_map: Dict[Path, List[Path]] = {}
if ino_files:
for ino_file in ino_files:
# Find existing object files for this example
example_name = ino_file.parent.name
build_dir = Path(".build/examples") / example_name
obj_files: List[Path] = []
if build_dir.exists():
# Look for the main .ino object file
ino_obj = build_dir / f"{ino_file.stem}.o"
if ino_obj.exists():
obj_files.append(ino_obj)
# Look for additional .cpp files in the same directory as the .ino
cpp_files = self.compiler.find_cpp_files_for_example(ino_file)
for cpp_file in cpp_files:
cpp_obj = build_dir / f"{cpp_file.stem}.o"
if cpp_obj.exists():
obj_files.append(cpp_obj)
if obj_files:
object_file_map[ino_file] = obj_files
return CompilationResult(
successful_count=file_count,
failed_count=0,
compile_time=0.0,
failed_examples=[],
object_file_map=object_file_map,
)
def create_cache_compiler(
compiler: Compiler, cache_file: Optional[Path] = None, verbose: bool = False
) -> CacheAwareCompiler:
"""Create a cache-aware compiler. Simple factory function."""
if cache_file is None:
cache_file = Path(".build/fingerprint_cache.json")
return CacheAwareCompiler(compiler, cache_file, verbose)

View File

@@ -0,0 +1,514 @@
#!/usr/bin/env python3
"""
Standalone cache setup script for PlatformIO builds with Python fake compilers.
This script is executed by PlatformIO as a post-build extra_script.
Configuration is passed through environment variables to avoid template string issues.
"""
# ruff: noqa: F405, F821 # Suppress SCons-specific import and undefined name warnings
import json
import os
import shutil
import sys
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
# Import env and try to import projenv (SCons-specific imports)
# These are dynamically available in PlatformIO/SCons environment
try:
Import("env") # type: ignore[name-defined] # SCons-specific import
env: Any # SCons environment object
except NameError:
env = None # For type checking when not in SCons context
_VERBOSE = os.environ.get("VERBOSE", "0") in ("1", "true", "True", "yes", "y")
# Try to import projenv if it exists
has_projenv: bool = False
projenv: Optional[Any] = None
try:
Import("projenv") # type: ignore[name-defined] # SCons-specific import
# projenv is now available in scope from Import
has_projenv = True
except (NameError, Exception):
has_projenv = False
projenv = None
# Import cached compiler functions with type stubs
create_cached_toolchain: Optional[Any] = None
get_platform_packages_paths: Optional[Any] = None
cached_compiler_available: bool = False
try:
from ci.util.cached_compiler import (
create_cached_toolchain,
get_platform_packages_paths,
)
cached_compiler_available = True
except ImportError as e:
print("WARNING: Could not import cached compiler module: " + str(e))
cached_compiler_available = False
# Debug: Dump the environment state to disk for inspection
env_dump: Dict[str, str] = {}
try:
if env is not None and hasattr(env, "Dictionary"): # type: ignore[has-type]
for key in env.Dictionary(): # type: ignore[union-attr]
try:
value = env[key] # type: ignore[index]
# Convert to string to avoid JSON serialization issues
env_dump[key] = str(value) # type: ignore[arg-type]
except Exception:
env_dump[key] = "<error getting value>"
else:
env_dump["error"] = "env not available or missing Dictionary method"
except Exception as e:
env_dump["error"] = f"Failed to access env: {e}"
# Write environment dump to disk
env_dump_path = "env_dump.json"
with open(env_dump_path, "w") as f:
json.dump(env_dump, f, indent=2)
print("Environment state dumped to: " + env_dump_path)
# Also dump projenv if available
if has_projenv and projenv is not None:
projenv_dump: Dict[str, str] = {}
for key in projenv.Dictionary(): # type: ignore
try:
value = projenv[key] # type: ignore
projenv_dump[key] = str(value)
except Exception:
projenv_dump[key] = "<error getting value>"
projenv_dump_path = "projenv_dump.json"
with open(projenv_dump_path, "w") as f:
json.dump(projenv_dump, f, indent=2)
print("Projenv state dumped to: " + projenv_dump_path)
# Read cache configuration from environment variables
cache_type = os.environ.get("FASTLED_CACHE_TYPE", "no_cache")
cache_executable = os.environ.get("FASTLED_CACHE_EXECUTABLE", "")
sccache_path = os.environ.get("FASTLED_SCCACHE_PATH", "")
sccache_dir = os.environ.get("FASTLED_SCCACHE_DIR", "")
sccache_cache_size = os.environ.get("FASTLED_SCCACHE_CACHE_SIZE", "2G")
xcache_path = os.environ.get("FASTLED_XCACHE_PATH", "")
debug_enabled = os.environ.get("FASTLED_CACHE_DEBUG", "0") == "1"
print("Cache configuration from environment:")
print(" Cache type: " + cache_type)
print(" Cache executable: " + cache_executable)
print(" SCCACHE path: " + sccache_path)
print(" SCCACHE dir: " + sccache_dir)
print(" Debug enabled: " + str(debug_enabled))
# Set up cache environment variables for subprocess execution
try:
if env is not None and hasattr(env, "Append"): # type: ignore[has-type]
if sccache_dir:
env.Append(ENV={"SCCACHE_DIR": sccache_dir}) # type: ignore[union-attr]
os.environ["SCCACHE_DIR"] = sccache_dir
if sccache_cache_size:
env.Append(ENV={"SCCACHE_CACHE_SIZE": sccache_cache_size}) # type: ignore[union-attr]
os.environ["SCCACHE_CACHE_SIZE"] = sccache_cache_size
# Ensure sccache binary directory is on PATH so xcache can find it even
# inside ESP-IDF's virtual environments where PATH is heavily modified.
if sccache_path:
sccache_dir_path = str(Path(sccache_path).parent)
env.PrependENVPath("PATH", sccache_dir_path) # type: ignore[union-attr]
if has_projenv and projenv is not None and hasattr(projenv, "PrependENVPath"):
projenv.PrependENVPath("PATH", sccache_dir_path) # type: ignore[union-attr]
os.environ["PATH"] = sccache_dir_path + os.pathsep + os.environ.get("PATH", "")
if debug_enabled:
env.Append(ENV={"XCACHE_DEBUG": "1", "SCCACHE_DEBUG": "1"}) # type: ignore[union-attr]
if has_projenv and projenv is not None and hasattr(projenv, "Append"):
projenv.Append(ENV={"XCACHE_DEBUG": "1", "SCCACHE_DEBUG": "1"}) # type: ignore[union-attr]
except Exception as e:
print(f"Warning: Failed to set up cache environment: {e}")
# Check if cache is available and cached compiler system can be used
USE_CACHE = False
if cached_compiler_available and cache_executable:
if cache_type == "xcache":
# For xcache, check if the Python script exists and sccache is available
USE_CACHE = (
cache_executable
and xcache_path
and Path(xcache_path).exists()
and sccache_path
and shutil.which(sccache_path)
)
if USE_CACHE:
print("xcache wrapper detected and configured for Python fake compilers")
print(" xcache path: " + str(xcache_path))
print(" cache executable: " + str(cache_executable))
else:
# For sccache/ccache, check if executable is in PATH
USE_CACHE = shutil.which(cache_executable) is not None
if USE_CACHE:
print(
str(cache_type) + " detected and configured for Python cached compilers"
)
print(" cache executable: " + str(cache_executable))
elif not cached_compiler_available:
print(
"WARNING: Python cached compiler system not available, cache will be disabled"
)
else:
print(
"Cache executable not found: "
+ str(cache_executable)
+ ", cache will be disabled"
)
if USE_CACHE and env is not None and hasattr(env, "get"): # type: ignore[has-type]
# Get current compilers from environment
original_cc = env.get("CC") # type: ignore[union-attr]
original_cxx = env.get("CXX") # type: ignore[union-attr]
print("DEBUG: Found compilers in env:")
# Use repr for safer type conversion
cc_str = repr(original_cc)
cxx_str = repr(original_cxx)
cc_type_str = type(original_cc).__name__
cxx_type_str = type(original_cxx).__name__
print(" CC: " + cc_str + " (type: " + cc_type_str + ")")
print(" CXX: " + cxx_str + " (type: " + cxx_type_str + ")")
# Extract compiler information for fake compiler generation
def extract_compiler_info(compiler_env_var: Any) -> Optional[str]:
"""Extract compiler name from environment variable value."""
if not compiler_env_var:
return None
if isinstance(compiler_env_var, list):
return str(compiler_env_var[0]) if compiler_env_var else None # type: ignore[arg-type]
else:
# Handle string values like "arm-none-eabi-gcc" or "gcc"
return str(compiler_env_var).split()[0] # type: ignore[arg-type]
cc_name = extract_compiler_info(original_cc) or "gcc"
cxx_name = extract_compiler_info(original_cxx) or "g++"
print("Extracted compiler names:")
print(" CC name: " + str(cc_name))
print(" CXX name: " + str(cxx_name))
# Create toolchain info for fake compiler generation
toolchain_info: Dict[str, str] = {
"CC": cc_name,
"CXX": cxx_name,
}
# Create cache config for fake compiler system
cache_config: Dict[str, str] = {
"CACHE_TYPE": cache_type,
"CACHE_EXECUTABLE": cache_executable,
"SCCACHE_PATH": sccache_path,
"SCCACHE_DIR": sccache_dir,
"XCACHE_PATH": xcache_path,
}
# Check if toolset exists and is valid
# Create a cache key based on the configuration to invalidate cache when config changes
cache_key = f"{cc_name}_{cxx_name}_{cache_type}_{cache_executable}"
# Cache in the local build directory (e.g., .build/pio/uno/)
cache_file = Path(current_dir) / "compiler_cache.json"
cached_tools: Optional[Dict[str, str]] = None
# Try to load from persistent cache file
if cache_file.exists():
try:
import json
with open(cache_file, "r") as f:
cache_data = json.load(f)
cached_real_cc = cache_data.get("real_cc")
cached_real_cxx = cache_data.get("real_cxx")
cached_fake_cc = cache_data.get("fake_cc")
cached_fake_cxx = cache_data.get("fake_cxx")
if (
cached_real_cc
and cached_real_cxx
and cached_fake_cc
and cached_fake_cxx
):
print("Found local compiler cache:")
print(f" Cache file: {cache_file}")
print(f" Real CC: {cached_real_cc}")
print(f" Real CXX: {cached_real_cxx}")
# Check if fake compiler scripts still exist, recreate if needed
fake_cc_path = Path(cached_fake_cc.replace("python ", ""))
fake_cxx_path = Path(cached_fake_cxx.replace("python ", ""))
if fake_cc_path.exists() and fake_cxx_path.exists():
# Fast path: use existing cached compilers
# On Windows, ensure we have .cmd shims to avoid 'python script.py @file'
# causing Python to treat @file as the script name.
import sys as _sys
import os as _os
is_win = (
_os.name == "nt"
or _sys.platform.startswith("win")
or _sys.platform.startswith("cygwin")
or _sys.platform.startswith("msys")
)
def _ensure_cmd_wrapper(fake_str: str, real_path: str) -> str:
p = Path(fake_str.replace("python ", ""))
if is_win and p.suffix.lower() == ".py":
cmd = p.with_suffix(".cmd")
if not cmd.exists():
# Create missing .cmd wrapper now
from ci.util.cached_compiler import create_cached_compiler_script
out_dir = p.parent
created = create_cached_compiler_script(
compiler_name=p.stem.split("cached_")[-1],
cache_executable=cache_config.get("CACHE_EXECUTABLE", "sccache"),
real_compiler_path=real_path,
output_dir=out_dir,
debug=debug_enabled,
)
cmd = Path(str(created).replace("python ", ""))
if cmd.exists():
return str(cmd)
return fake_str
# Create/choose proper wrapper paths
new_cc = _ensure_cmd_wrapper(cached_fake_cc, cached_real_cc)
new_cxx = _ensure_cmd_wrapper(cached_fake_cxx, cached_real_cxx)
cached_tools = {"CC": new_cc, "CXX": new_cxx}
# Update cache file if we switched to .cmd
try:
if new_cc != cached_fake_cc or new_cxx != cached_fake_cxx:
cache_data["fake_cc"] = new_cc
cache_data["fake_cxx"] = new_cxx
with open(cache_file, "w") as f:
json.dump(cache_data, f, indent=2)
except KeyboardInterrupt as ke:
import _thread
_thread.interrupt_main()
raise ke
except Exception:
pass
print(
"SUCCESS: Using cached compilers (instant, no platform search needed):"
)
print(f" CC: {cached_tools['CC']}")
print(f" CXX: {cached_tools['CXX']}")
print(" Platform search skipped - using cached toolset")
else:
print(
"Cached compiler scripts missing, recreating with cached real paths..."
)
# Recreate cached compilers using cached real paths (fast)
cached_compilers_dir = Path(current_dir) / "cached_compilers"
cached_compilers_dir.mkdir(parents=True, exist_ok=True)
from ci.util.cached_compiler import create_cached_compiler_script
cache_executable = cache_config.get("CACHE_EXECUTABLE", "sccache")
# Create cached CC script using cached real path
cached_cc_script = create_cached_compiler_script(
compiler_name="CC",
cache_executable=cache_executable,
real_compiler_path=cached_real_cc,
output_dir=cached_compilers_dir,
debug=debug_enabled,
)
# Create cached CXX script using cached real path
cached_cxx_script = create_cached_compiler_script(
compiler_name="CXX",
cache_executable=cache_executable,
real_compiler_path=cached_real_cxx,
output_dir=cached_compilers_dir,
debug=debug_enabled,
)
cached_tools = {
"CC": str(cached_cc_script),
"CXX": str(cached_cxx_script),
}
# Update cache file with new script paths
cache_data["fake_cc"] = cached_tools["CC"]
cache_data["fake_cxx"] = cached_tools["CXX"]
with open(cache_file, "w") as f:
json.dump(cache_data, f, indent=2)
print("Recreated cached compilers using cached real paths:")
print(f" CC: {cached_tools['CC']}")
print(f" CXX: {cached_tools['CXX']}")
except Exception as e:
print(f"Warning: Failed to load cache file {cache_file}: {e}")
# Fall through to full recreation
# If no valid cache found, create the toolset from scratch
if cached_tools is None:
print("No valid cache found, creating compiler toolset from scratch...")
print(" This is the first compile or configuration changed")
# Get platform packages paths for toolchain resolution (expensive operation)
platform_packages: List[str] = []
if get_platform_packages_paths is not None:
print("Searching platform packages (this may take ~10 seconds)...")
platform_packages = get_platform_packages_paths()
print(f"Found {len(platform_packages)} platform package directories")
if create_cached_toolchain is not None:
try:
# Find the real compiler paths (expensive operation, done only once)
from ci.util.cached_compiler import find_toolchain_compiler
print("Resolving real compiler paths...")
real_cc_path = find_toolchain_compiler(cc_name, platform_packages)
real_cxx_path = find_toolchain_compiler(cxx_name, platform_packages)
if not real_cc_path or not real_cxx_path:
print(f"ERROR: Could not find real compilers:")
print(f" CC '{cc_name}': {real_cc_path}")
print(f" CXX '{cxx_name}': {real_cxx_path}")
cached_tools = None
else:
print(f"Found real compilers:")
print(f" Real CC: {real_cc_path}")
print(f" Real CXX: {real_cxx_path}")
# Create cached compiler scripts
cached_compilers_dir = Path(current_dir) / "cached_compilers"
cached_compilers_dir.mkdir(parents=True, exist_ok=True)
from ci.util.cached_compiler import create_cached_compiler_script
cache_executable = cache_config.get("CACHE_EXECUTABLE", "sccache")
# Create cached CC script
cached_cc_script = create_cached_compiler_script(
compiler_name="CC",
cache_executable=cache_executable,
real_compiler_path=real_cc_path,
output_dir=cached_compilers_dir,
debug=debug_enabled,
)
# Create cached CXX script
cached_cxx_script = create_cached_compiler_script(
compiler_name="CXX",
cache_executable=cache_executable,
real_compiler_path=real_cxx_path,
output_dir=cached_compilers_dir,
debug=debug_enabled,
)
cached_tools = {
"CC": str(cached_cc_script),
"CXX": str(cached_cxx_script),
}
print("Created new compiler toolset:")
print(f" CC: {cached_tools['CC']}")
print(f" CXX: {cached_tools['CXX']}")
# Save to local build directory cache file
cache_data = {
"cache_key": cache_key,
"real_cc": real_cc_path,
"real_cxx": real_cxx_path,
"fake_cc": cached_tools["CC"],
"fake_cxx": cached_tools["CXX"],
"build_dir": str(current_dir),
"platform_packages_count": len(platform_packages),
}
with open(cache_file, "w") as f:
json.dump(cache_data, f, indent=2)
print(f"Saved compiler toolset to local cache: {cache_file}")
print(" This cache will persist across builds for this platform")
except Exception as e:
print("ERROR: Toolset creation failed with exception: " + str(e))
import traceback
traceback.print_exc()
cached_tools = None
else:
print("ERROR: create_cached_toolchain function is None")
if cached_tools:
# Use Python cached compilers instead of batch scripts
new_cc = cached_tools.get("CC")
new_cxx = cached_tools.get("CXX")
if new_cc and new_cxx:
print("Created Python cached compilers:")
print(" CC: " + str(new_cc))
print(" CXX: " + str(new_cxx))
# Apply to both environments
env.Replace(CC=new_cc, CXX=new_cxx) # type: ignore
if has_projenv and projenv is not None:
projenv.Replace(CC=new_cc, CXX=new_cxx) # type: ignore
print("Applied Python fake compilers to both env and projenv")
else:
print("Applied Python fake compilers to env (projenv not available)")
# Apply to library builders (critical for framework caching)
try:
for lib_builder in env.GetLibBuilders(): # type: ignore
lib_builder.env.Replace(CC=new_cc, CXX=new_cxx) # type: ignore
if _VERBOSE:
print(
"Applied Python fake compilers to library builder: "
+ str(getattr(lib_builder, "name", "unnamed"))
)
except KeyboardInterrupt:
import _thread
_thread.interrupt_main()
raise
except Exception as e:
print("WARNING: Could not apply to library builders: " + str(e))
print("Python fake compiler cache enabled: " + str(cache_type))
print(" Original CC: " + str(original_cc))
print(" Original CXX: " + str(original_cxx))
print(" Fake CC: " + str(new_cc))
print(" Fake CXX: " + str(new_cxx))
else:
print(
"ERROR: Failed to create Python fake compilers, falling back to no cache"
)
USE_CACHE = False
else:
print("ERROR: Python fake compiler creation failed, falling back to no cache")
USE_CACHE = False
if not USE_CACHE:
if cache_executable:
print("Warning: " + str(cache_type) + " setup failed; using default compilers")
else:
print("No cache executable configured; using default compilers")
print("Python fake compiler cache environment configured successfully")

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,261 @@
# pyright: reportUnknownMemberType=false
"""
Compilation support for individual boards.
"""
import os
import shutil
import subprocess
import time
from pathlib import Path
from threading import Lock
from typing import List
from ci.boards import Board # type: ignore
from ci.util.locked_print import locked_print
ERROR_HAPPENED = False
IS_GITHUB = "GITHUB_ACTIONS" in os.environ
FIRST_BUILD_LOCK = Lock()
USE_FIRST_BUILD_LOCK = IS_GITHUB
def errors_happened() -> bool:
"""Return whether any errors happened during the build."""
return ERROR_HAPPENED
def _fastled_js_is_parent_directory(p: Path) -> bool:
"""Check if fastled_js is a parent directory of the given path."""
# Check if fastled_js is a parent directory of p
return "fastled_js" in str(p.absolute())
def compile_for_board_and_example(
board: Board,
example: Path,
build_dir: str | None,
verbose_on_failure: bool,
libs: list[str] | None,
) -> tuple[bool, str]:
"""Compile the given example for the given board."""
global ERROR_HAPPENED # pylint: disable=global-statement
if board.board_name == "web":
locked_print(f"Skipping web target for example {example}")
return True, ""
board_name = board.board_name
use_pio_run = board.use_pio_run
real_board_name = board.get_real_board_name()
libs = libs or []
builddir = (
Path(build_dir) / board_name if build_dir else Path(".build") / board_name
)
builddir.mkdir(parents=True, exist_ok=True)
srcdir = builddir / "src"
# Remove the previous *.ino file if it exists, everything else is recycled
# to speed up the next build.
if srcdir.exists():
shutil.rmtree(srcdir, ignore_errors=False)
locked_print(f"*** Building example {example} for board {board_name} ***")
cwd: str | None = None
shell: bool = False
# Copy all files from the example directory to the "src" directory
for src_file in example.rglob("*"):
if src_file.is_file():
if _fastled_js_is_parent_directory(src_file):
# Skip the fastled_js folder, it's not needed for the build.
continue
src_dir = src_file.parent
path = src_dir.relative_to(example)
dst_dir = srcdir / path
os.makedirs(dst_dir, exist_ok=True)
locked_print(f"Copying {src_file} to {dst_dir / src_file.name}")
os.makedirs(srcdir, exist_ok=True)
shutil.copy(src_file, dst_dir / src_file.name)
# libs = ["src", "ci"]
if use_pio_run:
# we have to copy a few folders of pio ci in order to get this to work.
for lib in libs:
project_libdir = Path(lib)
assert project_libdir.exists()
build_lib = builddir / "lib" / lib
shutil.rmtree(build_lib, ignore_errors=True)
shutil.copytree(project_libdir, build_lib)
cwd = str(builddir)
cmd_list = [
"pio",
"run",
]
# in this case we need to manually copy the example to the src directory
# because platformio doesn't support building a single file.
# ino_file = example / f"{example.name}.ino"
else:
cmd_list = [
"pio",
"ci",
"--board",
real_board_name,
*[f"--lib={lib}" for lib in libs],
"--keep-build-dir",
f"--build-dir={builddir.as_posix()}",
]
cmd_list.append(f"{example.as_posix()}/*ino")
cmd_str = subprocess.list2cmdline(cmd_list)
msg_lsit = [
"\n\n******************************",
f"* Running command in cwd: {cwd if cwd else os.getcwd()}",
f"* {cmd_str}",
"******************************\n",
]
msg = "\n".join(msg_lsit)
locked_print(msg)
# Start timing for the process
start_time = time.time()
# Run the process with real-time output capture and timing
result = subprocess.Popen(
cmd_list,
cwd=cwd,
shell=shell,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
)
# Capture output lines in real-time with timing
stdout_lines: List[str] = []
if result.stdout:
for line in iter(result.stdout.readline, ""):
if line:
elapsed = time.time() - start_time
# Format timing as seconds with 2 decimal places
timing_prefix = f"{elapsed:5.2f} "
timed_line = timing_prefix + line.rstrip()
stdout_lines.append(
line.rstrip()
) # Store original line for return value
locked_print(timed_line)
# Wait for process to complete
result.wait()
# Join all stdout lines for the return value
stdout = "\n".join(stdout_lines)
# replace all instances of "lib/src" => "src" so intellisense can find the files
# with one click.
stdout = stdout.replace("lib/src", "src").replace("lib\\src", "src")
if result.returncode != 0:
if not verbose_on_failure:
ERROR_HAPPENED = True
return False, stdout
if ERROR_HAPPENED:
return False, ""
ERROR_HAPPENED = True
locked_print(
f"*** Error compiling example {example} for board {board_name} ***"
)
# re-running command with verbose output to see what the defines are.
cmd_list.append("-v")
cmd_str = subprocess.list2cmdline(cmd_list)
msg_lsit = [
"\n\n******************************",
"* Re-running failed command but with verbose output:",
f"* {cmd_str}",
"******************************\n",
]
msg = "\n".join(msg_lsit)
locked_print(msg)
# Start timing for the verbose re-run
start_time_verbose = time.time()
# Run the verbose process with real-time output capture and timing
result = subprocess.Popen(
cmd_list,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
)
# Capture output lines in real-time with timing for verbose run
stdout_lines_verbose: List[str] = []
if result.stdout:
for line in iter(result.stdout.readline, ""):
if line:
elapsed = time.time() - start_time_verbose
# Format timing as seconds with 2 decimal places
timing_prefix = f"{elapsed:5.2f} "
timed_line = timing_prefix + line.rstrip()
stdout_lines_verbose.append(
line.rstrip()
) # Store original line for return value
locked_print(timed_line)
# Wait for verbose process to complete
result.wait()
# Join all stdout lines for the return value
stdout = "\n".join(stdout_lines_verbose)
stdout = (
stdout
+ "\n\nThis is a second attempt, but with verbose output, look above for compiler errors.\n"
)
return False, stdout
locked_print(f"*** Finished building example {example} for board {board_name} ***")
return True, stdout
# Function to process task queues for each board
def compile_examples(
board: Board,
examples: list[Path],
build_dir: str | None,
verbose_on_failure: bool,
libs: list[str] | None,
) -> tuple[bool, str]:
"""Process the task queue for the given board."""
global ERROR_HAPPENED # pylint: disable=global-statement
board_name = board.board_name
is_first = True
for example in examples:
example = example.relative_to(Path(".").resolve())
if ERROR_HAPPENED:
return True, ""
locked_print(f"\n*** Building {example} for board {board_name} ***")
if is_first:
locked_print(
f"*** Building for first example {example} board {board_name} ***"
)
if is_first and USE_FIRST_BUILD_LOCK:
with FIRST_BUILD_LOCK:
# Github runners are memory limited and the first job is the most
# memory intensive since all the artifacts are being generated in parallel.
success, message = compile_for_board_and_example(
board=board,
example=example,
build_dir=build_dir,
verbose_on_failure=verbose_on_failure,
libs=libs,
)
else:
success, message = compile_for_board_and_example(
board=board,
example=example,
build_dir=build_dir,
verbose_on_failure=verbose_on_failure,
libs=libs,
)
is_first = False
if not success:
ERROR_HAPPENED = True
return (
False,
f"Error building {example} for board {board_name}. stdout:\n{message}",
)
return True, ""

View File

@@ -0,0 +1,126 @@
#!/usr/bin/env python3
"""
Abstract Base Class for FastLED Compilers
Defines the interface that all FastLED compiler implementations must follow.
"""
from abc import ABC, abstractmethod
from concurrent.futures import Future
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import Any
class CacheType(Enum):
"""Compiler cache type options."""
NO_CACHE = "no_cache"
SCCACHE = "sccache"
@dataclass
class CompilerResult:
"""Base result class for compiler operations."""
success: bool
output: str
build_dir: Path
@dataclass
class InitResult(CompilerResult):
"""Result from compiler initialization."""
@property
def platformio_ini(self) -> Path:
return self.build_dir / "platformio.ini"
@dataclass
class SketchResult(CompilerResult):
"""Result from sketch compilation."""
example: str
class Compiler(ABC):
"""Abstract base class defining the interface for FastLED compilers."""
def __init__(self) -> None:
"""Initialize the compiler."""
pass
@abstractmethod
def build(self, examples: list[str]) -> list[Future[SketchResult]]:
"""Build a list of examples with proper resource management.
Args:
examples: List of example names or paths to compile
Returns:
List of Future objects containing SketchResult for each example
"""
pass
@abstractmethod
def clean(self) -> None:
"""Clean build artifacts for this platform."""
pass
@abstractmethod
def clean_all(self) -> None:
"""Clean all build artifacts (local and global) for this platform."""
pass
@abstractmethod
def deploy(
self, example: str, upload_port: str | None = None, monitor: bool = False
) -> SketchResult:
"""Deploy (upload) a specific example to the target device.
Args:
example: Name of the example to deploy
upload_port: Optional specific port for upload
monitor: If True, attach to device monitor after successful upload
Returns:
SketchResult indicating success/failure of deployment
"""
pass
@abstractmethod
def cancel_all(self) -> None:
"""Cancel all currently running builds."""
pass
@abstractmethod
def check_usb_permissions(self) -> tuple[bool, str]:
"""Check if USB device access is properly configured.
Returns:
Tuple of (has_access, status_message)
"""
pass
@abstractmethod
def install_usb_permissions(self) -> bool:
"""Install platform-specific USB permissions or equivalent.
Returns:
True if installation succeeded, False otherwise
"""
pass
@abstractmethod
def get_cache_stats(self) -> str:
"""Get compiler statistics as a formatted string.
This can include cache statistics, build metrics, performance data,
or any other relevant compiler statistics.
Returns:
Formatted string containing compiler statistics, or empty string if none available
"""
pass

View File

@@ -0,0 +1,993 @@
#!/usr/bin/env python3
# pyright: reportUnknownMemberType=false, reportReturnType=false, reportMissingParameterType=false
import argparse
import hashlib
import json
import logging
import os
import shutil
import subprocess
import sys
import time
from concurrent.futures import Future
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, Tuple, Union
from ci.util.paths import PROJECT_ROOT
from ci.util.running_process import RunningProcess
from ci.util.test_args import parse_args as parse_global_test_args
from .clang_compiler import (
BuildFlags,
Compiler,
CompilerOptions,
LinkOptions,
test_clang_accessibility,
)
from .test_example_compilation import create_fastled_compiler
# Configure logging
logger = logging.getLogger(__name__)
BUILD_DIR = PROJECT_ROOT / "tests" / ".build"
BUILD_DIR.mkdir(parents=True, exist_ok=True)
CACHE_DIR = PROJECT_ROOT / ".cache"
CACHE_DIR.mkdir(parents=True, exist_ok=True)
TEST_FILES_LIST = CACHE_DIR / "test_files_list.txt"
# ============================================================================
# HASH-BASED LINKING CACHE (same optimization as examples)
# ============================================================================
def calculate_file_hash(file_path: Path) -> str:
"""Calculate SHA256 hash of a file (same as FastLEDTestCompiler)"""
if not file_path.exists():
return "no_file"
hash_sha256 = hashlib.sha256()
with open(file_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_sha256.update(chunk)
return hash_sha256.hexdigest()
def calculate_linker_args_hash(linker_args: list[str]) -> str:
"""Calculate SHA256 hash of linker arguments (same as FastLEDTestCompiler)"""
args_str = "|".join(sorted(linker_args))
hash_sha256 = hashlib.sha256()
hash_sha256.update(args_str.encode("utf-8"))
return hash_sha256.hexdigest()
def calculate_link_cache_key(
object_files: list[str | Path], fastled_lib_path: Path, linker_args: list[str]
) -> str:
"""Calculate comprehensive cache key for linking (same as examples)"""
# Calculate hash for each object file
obj_hashes: list[str] = []
for obj_file in object_files:
obj_hashes.append(calculate_file_hash(Path(obj_file)))
# Combine object file hashes in a stable way (sorted by path for consistency)
sorted_obj_paths = sorted(str(obj) for obj in object_files)
sorted_obj_hashes: list[str] = []
for obj_path in sorted_obj_paths:
obj_file = Path(obj_path)
sorted_obj_hashes.append(calculate_file_hash(obj_file))
combined_obj_hash = hashlib.sha256(
"|".join(sorted_obj_hashes).encode("utf-8")
).hexdigest()
# Calculate other hashes
fastled_hash = calculate_file_hash(fastled_lib_path)
linker_hash = calculate_linker_args_hash(linker_args)
# Combine all components (same format as FastLEDTestCompiler)
combined = f"fastled:{fastled_hash}|objects:{combined_obj_hash}|flags:{linker_hash}"
final_hash = hashlib.sha256(combined.encode("utf-8")).hexdigest()
return final_hash[:16] # Use first 16 chars for readability
def get_link_cache_dir() -> Path:
"""Get the link cache directory (same as examples)"""
cache_dir = Path(".build/link_cache")
cache_dir.mkdir(parents=True, exist_ok=True)
return cache_dir
def get_cached_executable(test_name: str, cache_key: str) -> Optional[Path]:
"""Check if cached executable exists (same as examples)"""
cache_dir = get_link_cache_dir()
cached_exe = cache_dir / f"{test_name}_{cache_key}.exe"
return cached_exe if cached_exe.exists() else None
def cache_executable(test_name: str, cache_key: str, exe_path: Path) -> None:
"""Cache an executable for future use (same as examples)"""
cache_dir = get_link_cache_dir()
cached_exe = cache_dir / f"{test_name}_{cache_key}.exe"
try:
shutil.copy2(exe_path, cached_exe)
except Exception as e:
print(f"Warning: Failed to cache {test_name}: {e}")
# ============================================================================
# END HASH-BASED LINKING CACHE
# ============================================================================
def get_test_files() -> Set[str]:
"""Get a set of all test files"""
test_files: Set[str] = set()
tests_dir = PROJECT_ROOT / "tests"
if tests_dir.exists():
for file_path in tests_dir.rglob("test_*.cpp"):
# Store relative paths for consistency
test_files.add(str(file_path.relative_to(PROJECT_ROOT)))
return test_files
def check_test_files_changed() -> bool:
"""Check if test files have changed since last run"""
try:
current_files = get_test_files()
if TEST_FILES_LIST.exists():
# Read previous file list
with open(TEST_FILES_LIST, "r") as f:
previous_files = set(line.strip() for line in f if line.strip())
# Compare file sets
if current_files == previous_files:
return False # No changes
else:
print("Test files have changed, cleaning build directory...")
return True # Files changed
else:
# No previous file list, need to clean
return True
except Exception as e:
print(f"Warning: Error checking test file changes: {e}")
return True # Default to cleaning on error
def save_test_files_list() -> None:
"""Save current test file list"""
try:
current_files = get_test_files()
with open(TEST_FILES_LIST, "w") as f:
for file_path in sorted(current_files):
f.write(f"{file_path}\n")
except KeyboardInterrupt:
raise
except Exception as e:
print(f"Warning: Failed to save test file list: {e}")
def clean_build_directory():
print("Cleaning build directory...")
shutil.rmtree(BUILD_DIR, ignore_errors=True)
BUILD_DIR.mkdir(parents=True, exist_ok=True)
print("Build directory cleaned.")
HERE = Path(__file__).resolve().parent
WASM_BUILD = False
USE_ZIG = False
USE_CLANG = False
# Legacy CMake helper functions removed - using optimized Python API
def get_unit_test_fastled_sources() -> list[Path]:
"""Get essential FastLED .cpp files for unit test library creation (optimized paradigm)."""
# Always work from project root
project_root = Path(
__file__
).parent.parent.parent # Go up from ci/compiler/ to project root
src_dir = project_root / "src"
# Core FastLED files that must be included for unit tests
core_files: list[Path] = [
src_dir / "FastLED.cpp",
src_dir / "colorutils.cpp",
src_dir / "hsv2rgb.cpp",
]
# Find all .cpp files in key directories
additional_sources: list[Path] = []
for pattern in ["*.cpp", "lib8tion/*.cpp", "platforms/stub/*.cpp"]:
additional_sources.extend(list(src_dir.glob(pattern)))
# Include essential .cpp files from nested directories
additional_sources.extend(list(src_dir.rglob("*.cpp")))
# Filter out duplicates and ensure files exist
all_sources: list[Path] = []
seen_files: set[Path] = set()
for cpp_file in core_files + additional_sources:
# Skip stub_main.cpp since unit tests have their own main
if cpp_file.name == "stub_main.cpp":
continue
# Skip platform-specific files that aren't needed for unit tests
rel_path_str = str(cpp_file)
if any(
skip in rel_path_str for skip in ["wasm", "esp", "avr", "arm", "teensy"]
):
continue
if cpp_file.exists() and cpp_file not in seen_files:
all_sources.append(cpp_file)
seen_files.add(cpp_file)
return all_sources
def create_unit_test_fastled_library(
clean: bool = False, use_pch: bool = True
) -> Path | None:
"""Create libfastled.a static library specifically for unit tests with FASTLED_FORCE_NAMESPACE=1.
CRITICAL: Unit tests need their own separate library from examples because they use
different compilation flags. Unit tests require FASTLED_FORCE_NAMESPACE=1 to put
all symbols in the fl:: namespace that the tests expect.
"""
# Unit tests get their own separate library directory under .build/fastled/unit/
fastled_build_dir = BUILD_DIR.parent / ".build" / "fastled" / "unit"
fastled_build_dir.mkdir(parents=True, exist_ok=True)
lib_file = fastled_build_dir / "libfastled.a"
if lib_file.exists() and not clean:
print(f"[LIBRARY] Using existing FastLED library: {lib_file}")
return lib_file
print("[LIBRARY] Creating FastLED static library with proper compiler flags...")
# Create a proper FastLED compiler (NOT unit test compiler) for the library
# This ensures the library is compiled without FASTLED_FORCE_NAMESPACE=1
print("[LIBRARY] Creating FastLED library compiler (without unit test flags)...")
# Save current directory and change to project root for create_fastled_compiler
import os
project_root = Path(__file__).parent.parent.parent
original_cwd = os.getcwd()
os.chdir(str(project_root))
try:
library_compiler = create_fastled_compiler(
use_pch=use_pch,
parallel=True,
)
# CRITICAL: Add required defines for unit test library compilation
if library_compiler.settings.defines is None:
library_compiler.settings.defines = []
# Add FASTLED_FORCE_NAMESPACE=1 to export symbols in fl:: namespace
library_compiler.settings.defines.append("FASTLED_FORCE_NAMESPACE=1")
print("[LIBRARY] Added FASTLED_FORCE_NAMESPACE=1 to library compiler")
# Add FASTLED_TESTING=1 to include MockTimeProvider and test utility functions
library_compiler.settings.defines.append("FASTLED_TESTING=1")
print("[LIBRARY] Added FASTLED_TESTING=1 to library compiler")
finally:
# Restore original working directory
os.chdir(original_cwd)
# Get FastLED sources using optimized selection
fastled_sources = get_unit_test_fastled_sources()
fastled_objects: list[Path] = []
obj_dir = fastled_build_dir / "obj"
obj_dir.mkdir(exist_ok=True)
print(f"[LIBRARY] Compiling {len(fastled_sources)} FastLED source files...")
# Compile each source file with optimized naming (same as examples)
futures: List[Tuple[Any, ...]] = []
project_root = Path(__file__).parent.parent.parent
src_dir = project_root / "src"
for cpp_file in fastled_sources:
# Create unique object file name by including relative path to prevent collisions
# Convert path separators to underscores to create valid filename
if cpp_file.is_relative_to(src_dir):
rel_path = cpp_file.relative_to(src_dir)
else:
rel_path = cpp_file
# Replace path separators with underscores for unique object file names
obj_name = str(rel_path.with_suffix(".o")).replace("/", "_").replace("\\", "_")
obj_file = obj_dir / obj_name
future = library_compiler.compile_cpp_file(cpp_file, obj_file)
futures.append((future, obj_file, cpp_file))
# Wait for compilation to complete
compiled_count = 0
for future, obj_file, cpp_file in futures:
try:
result = future.result()
if result.ok:
fastled_objects.append(obj_file)
compiled_count += 1
else:
print(
f"[LIBRARY] WARNING: Failed to compile {cpp_file.relative_to(src_dir)}: {result.stderr[:100]}..."
)
except Exception as e:
print(
f"[LIBRARY] WARNING: Exception compiling {cpp_file.relative_to(src_dir)}: {e}"
)
print(
f"[LIBRARY] Successfully compiled {compiled_count}/{len(fastled_sources)} FastLED sources"
)
if not fastled_objects:
print("[LIBRARY] ERROR: No FastLED source files compiled successfully")
return None
# Create static library using the same approach as examples
print(f"[LIBRARY] Creating static library: {lib_file}")
archive_future = library_compiler.create_archive(fastled_objects, lib_file)
archive_result = archive_future.result()
if not archive_result.ok:
print(f"[LIBRARY] ERROR: Library creation failed: {archive_result.stderr}")
return None
print(f"[LIBRARY] SUCCESS: FastLED library created: {lib_file}")
return lib_file
def create_unit_test_compiler(
use_pch: bool = True, enable_static_analysis: bool = False, debug: bool = False
) -> Compiler:
"""Create compiler optimized for unit test compilation with PCH support."""
# Always work from the project root, not from ci/compiler
project_root = Path(
__file__
).parent.parent.parent # Go up from ci/compiler/ to project root
current_dir = project_root
src_path = current_dir / "src"
# Load build flags configuration
build_flags_path = current_dir / "ci" / "build_unit.toml"
build_flags = BuildFlags.parse(
build_flags_path, quick_build=True, strict_mode=False
)
# Unit test specific defines
unit_test_defines = [
"FASTLED_UNIT_TEST=1",
"FASTLED_FORCE_NAMESPACE=1",
"FASTLED_USE_PROGMEM=0",
"STUB_PLATFORM",
"ARDUINO=10808",
"FASTLED_USE_STUB_ARDUINO",
"SKETCH_HAS_LOTS_OF_MEMORY=1",
"FASTLED_STUB_IMPL",
"FASTLED_USE_JSON_UI=1",
"FASTLED_TESTING",
"FASTLED_NO_AUTO_NAMESPACE",
"FASTLED_NO_PINMAP",
"HAS_HARDWARE_PIN_SUPPORT",
"FASTLED_DEBUG_LEVEL=1",
"FASTLED_NO_ATEXIT=1",
"DOCTEST_CONFIG_NO_EXCEPTIONS_BUT_WITH_ALL_ASSERTS",
"ENABLE_CRASH_HANDLER",
"RELEASE=1", # Disable FASTLED_FORCE_DBG to avoid fl::println dependency
]
# Unit test specific compiler args
unit_test_args = [
"-std=gnu++17",
"-fpermissive",
"-Wall",
"-Wextra",
"-Wno-deprecated-register",
"-Wno-backslash-newline-escape",
"-fno-exceptions",
"-fno-rtti",
# Optimization/debug controls set below based on debug flag
"-fno-omit-frame-pointer",
"-fno-inline-functions",
"-fno-vectorize",
"-fno-unroll-loops",
"-fno-strict-aliasing",
f"-I{current_dir}",
f"-I{src_path}",
f"-I{current_dir / 'tests'}",
f"-I{src_path / 'platforms' / 'stub'}",
]
# Apply quick vs debug modes
if debug:
unit_test_args.extend(
[
"-O0",
"-g3",
"-fstandalone-debug",
]
)
if os.name == "nt":
unit_test_args.extend(
["-gdwarf-4"]
) # GNU debug info for Windows GNU toolchain
else:
unit_test_args.extend(
[
"-O0",
"-g0",
]
)
# Note: DWARF flags are added above conditionally when debug=True
# PCH configuration with unit test specific headers
pch_output_path = None
pch_header_content = None
if use_pch:
cache_dir = current_dir / ".build" / "cache"
cache_dir.mkdir(parents=True, exist_ok=True)
pch_output_path = str(cache_dir / "fastled_unit_test_pch.hpp.pch")
# Unit test specific PCH header content
pch_header_content = """// FastLED Unit Test PCH - Common headers for faster test compilation
#pragma once
// Core test framework
#include "test.h"
// Core FastLED headers that are used in nearly all unit tests
#include "FastLED.h"
// Common C++ standard library headers used in tests
#include <string>
#include <vector>
#include <stdio.h>
#include <cstdint>
#include <cmath>
#include <cassert>
#include <iostream>
#include <memory>
// Platform headers for stub environment
#include "platforms/stub/fastled_stub.h"
// Commonly tested FastLED components
#include "lib8tion.h"
#include "colorutils.h"
#include "hsv2rgb.h"
#include "fl/math.h"
#include "fl/vector.h"
// Using namespace to match test files
using namespace fl;
"""
print(f"[PCH] Unit tests will use precompiled headers: {pch_output_path}")
print(
f"[PCH] PCH includes: test.h, FastLED.h, lib8tion.h, colorutils.h, and more"
)
else:
print("[PCH] Precompiled headers disabled for unit tests")
# Determine compiler
compiler_cmd = "python -m ziglang c++"
if USE_CLANG:
compiler_cmd = "clang++"
print("USING CLANG COMPILER FOR UNIT TESTS")
elif USE_ZIG:
print("USING ZIG COMPILER FOR UNIT TESTS")
else:
print("USING DEFAULT COMPILER FOR UNIT TESTS")
settings = CompilerOptions(
include_path=str(src_path),
defines=unit_test_defines,
std_version="c++17",
compiler=compiler_cmd,
compiler_args=unit_test_args,
use_pch=use_pch,
pch_output_path=pch_output_path,
pch_header_content=pch_header_content,
parallel=True,
)
return Compiler(settings, build_flags)
def compile_unit_tests_python_api(
specific_test: str | None = None,
enable_static_analysis: bool = False,
use_pch: bool = True,
clean: bool = False,
debug: bool = False,
) -> bool:
"""Compile unit tests using the fast Python API instead of CMake.
Returns:
bool: True if all tests compiled and linked successfully, False otherwise
"""
from .clang_compiler import Compiler, LinkOptions
print("=" * 60)
print("COMPILING UNIT TESTS WITH PYTHON API")
print("=" * 60)
if clean:
print("Cleaning build directory...")
shutil.rmtree(BUILD_DIR, ignore_errors=True)
BUILD_DIR.mkdir(parents=True, exist_ok=True)
# Create optimized compiler for unit tests
compiler = create_unit_test_compiler(
use_pch=use_pch, enable_static_analysis=enable_static_analysis, debug=debug
)
# Find all test files - work from project root
project_root = Path(__file__).parent.parent.parent
tests_dir = project_root / "tests"
test_files = []
if specific_test:
# Handle specific test with case-insensitive matching
test_name = (
specific_test
if specific_test.startswith("test_")
else f"test_{specific_test}"
)
test_file = tests_dir / f"{test_name}.cpp"
# First try exact case match
if test_file.exists():
test_files = [test_file]
print(f"Compiling specific test: {test_file.name}")
else:
# Try case-insensitive matching for all test files
found_match = False
for existing_file in tests_dir.glob("test_*.cpp"):
# Check if the file matches case-insensitively
existing_stem = existing_file.stem
existing_name = existing_stem.replace("test_", "")
if (
existing_stem.lower() == test_name.lower()
or existing_name.lower() == specific_test.lower()
):
test_files = [existing_file]
print(
f"Compiling specific test (case-insensitive match): {existing_file.name}"
)
found_match = True
break
if not found_match:
raise RuntimeError(f"Test file not found: {test_file}")
else:
# Find all test files
test_files = list(tests_dir.glob("test_*.cpp"))
print(f"Found {len(test_files)} unit test files")
if not test_files:
print("No test files found")
return
# Ensure output directory exists
bin_dir = BUILD_DIR / "bin"
bin_dir.mkdir(parents=True, exist_ok=True)
# Create clean execution directory as specified in requirements
clean_bin_dir = PROJECT_ROOT / "tests" / "bin"
clean_bin_dir.mkdir(parents=True, exist_ok=True)
# Step 1: Compile doctest main once
print("Compiling doctest main...")
doctest_main_path = tests_dir / "doctest_main.cpp"
doctest_main_obj = bin_dir / "doctest_main.o"
if not doctest_main_obj.exists() or clean:
doctest_compile_future = compiler.compile_cpp_file(
cpp_path=str(doctest_main_path), output_path=str(doctest_main_obj)
)
doctest_result = doctest_compile_future.result()
if doctest_result.return_code != 0:
raise RuntimeError(
f"Failed to compile doctest main: {doctest_result.stderr}"
)
# Step 2: Build FastLED library using optimized examples paradigm
print("Building FastLED library...")
fastled_lib_path = create_unit_test_fastled_library(clean, use_pch=use_pch)
# Step 3: Compile and link each test (PARALLEL OPTIMIZATION)
print(f"Compiling {len(test_files)} tests...")
start_time = time.time()
# Phase 1: Start all compilations in parallel (NON-BLOCKING)
print("🚀 Starting parallel compilation...")
compile_start = time.time()
compile_futures: Dict[str, Future[Any]] = {}
test_info: Dict[str, Any] = {}
for test_file in test_files:
test_name = test_file.stem
executable_path = bin_dir / test_name
object_path = bin_dir / f"{test_name}.o"
test_info[test_name] = {
"test_file": test_file,
"executable_path": executable_path,
"object_path": object_path,
}
print(f" Compiling {test_name}...")
# Start compilation (NON-BLOCKING)
compile_future = compiler.compile_cpp_file(
cpp_path=str(test_file), output_path=str(object_path)
)
compile_futures[test_name] = compile_future
# Phase 2: Wait for all compilations to complete and check cache before linking
compile_dispatch_time = time.time() - compile_start
print(
f"⏳ Waiting for compilations to complete... (dispatch took {compile_dispatch_time:.2f}s)"
)
compile_wait_start = time.time()
link_futures: Dict[str, Dict[str, Any]] = {}
cache_hits = 0
success_count = 0
for test_name, compile_future in compile_futures.items():
try:
compile_result = (
compile_future.result()
) # BLOCKING WAIT for this specific test
if compile_result.return_code != 0:
print(f"❌ Compilation failed for {test_name}: {compile_result.stderr}")
continue
# Prepare linking info (same logic as before)
info = test_info[test_name]
executable_path = info["executable_path"]
object_path = info["object_path"]
tests_with_own_main = ["test_example_compilation"]
if test_name in tests_with_own_main:
object_files: list[str | Path] = [object_path]
else:
object_files: list[str | Path] = [object_path, doctest_main_obj]
# Platform-specific linker arguments for crash handler support
if os.name == "nt": # Windows
linker_args = ["-ldbghelp", "-lpsapi"]
else: # Linux/macOS
linker_args = ["-pthread"]
# HASH-BASED CACHE CHECK (same as examples)
if not fastled_lib_path:
print(f"⚠️ No FastLED library found, skipping cache for {test_name}")
cache_key = "no_fastled_lib"
cached_exe = None
else:
cache_key = calculate_link_cache_key(
object_files, fastled_lib_path, linker_args
)
cached_exe = get_cached_executable(test_name, cache_key)
if cached_exe:
# Cache hit! Copy cached executable to target location
try:
shutil.copy2(cached_exe, executable_path)
# Also copy to clean execution directory
clean_executable_path = clean_bin_dir / f"{test_name}.exe"
shutil.copy2(cached_exe, clean_executable_path)
cache_hits += 1
success_count += 1
print(f"{test_name}: Using cached executable (cache hit)")
continue # Skip linking entirely
except Exception as e:
print(f" ⚠️ Failed to copy cached {test_name}, will relink: {e}")
# Fall through to actual linking
# Cache miss - proceed with actual linking
static_libraries: List[Union[str, Path]] = []
if fastled_lib_path and fastled_lib_path.exists():
static_libraries.append(fastled_lib_path)
link_options = LinkOptions(
output_executable=str(executable_path),
object_files=object_files,
static_libraries=static_libraries,
linker_args=linker_args,
)
# Start linking (NON-BLOCKING) and store cache info for later
link_future = compiler.link_program(link_options)
link_futures[test_name] = {
"future": link_future,
"cache_key": cache_key,
"executable_path": executable_path,
}
except Exception as e:
print(f"❌ ERROR compiling {test_name}: {e}")
continue
# Phase 3: Wait for all linking to complete and cache successful results
compile_wait_time = time.time() - compile_wait_start
print(
f"🔗 Waiting for linking to complete... (compilation took {compile_wait_time:.2f}s)"
)
link_start = time.time()
cache_misses = 0
for test_name, link_info in link_futures.items():
try:
link_future = link_info["future"]
cache_key = link_info["cache_key"]
executable_path = link_info["executable_path"]
link_result = link_future.result() # BLOCKING WAIT for this specific test
if link_result.return_code != 0:
print(f"⚠️ Linking failed for {test_name}: {link_result.stderr}")
cache_misses += 1
continue
else:
success_count += 1
cache_misses += 1 # This was a fresh link, not from cache
# Cache the successful executable for future use (same as examples)
cache_executable(test_name, cache_key, executable_path)
# Also copy to clean execution directory
clean_executable_path = clean_bin_dir / f"{test_name}.exe"
shutil.copy2(executable_path, clean_executable_path)
except Exception as e:
print(f"❌ ERROR linking {test_name}: {e}")
cache_misses += 1
continue
link_time = time.time() - link_start
compilation_time = time.time() - start_time
print(f"✅ Unit test compilation completed in {compilation_time:.2f}s")
print(f" 📊 Time breakdown:")
print(f" • Dispatch: {compile_dispatch_time:.2f}s")
print(f" • Compilation: {compile_wait_time:.2f}s")
print(f" • Linking: {link_time:.2f}s")
print(f" 🎯 Cache statistics (archive + hash linking optimization):")
print(f" • Cache hits: {cache_hits} (skipped linking)")
print(f" • Cache misses: {cache_misses} (fresh linking)")
print(
f" • Cache hit ratio: {cache_hits / max(1, cache_hits + cache_misses) * 100:.1f}%"
)
print(f" Successfully compiled: {success_count}/{len(test_files)} tests")
print(f" Average: {compilation_time / len(test_files):.2f}s per test")
print(f" Output directory: {bin_dir}")
print(f" Cache directory: {get_link_cache_dir()}")
# Return success only if ALL tests compiled and linked successfully
all_tests_successful = success_count == len(test_files)
if not all_tests_successful:
failed_count = len(test_files) - success_count
print(f"{failed_count} test(s) failed to compile or link")
return all_tests_successful
def parse_arguments():
parser = argparse.ArgumentParser(
description="Compile FastLED library with different compiler options."
)
parser.add_argument("--use-zig", action="store_true", help="Use Zig compiler")
parser.add_argument("--use-clang", action="store_true", help="Use Clang compiler")
parser.add_argument("--wasm", action="store_true", help="Build for WebAssembly")
parser.add_argument(
"--clean",
action="store_true",
help="Clean the build directory before compiling",
)
parser.add_argument(
"--test",
help="Specific test to compile (without test_ prefix)",
)
parser.add_argument(
"--check",
action="store_true",
help="Enable static analysis (IWYU, clang-tidy)",
)
parser.add_argument("--no-unity", action="store_true", help="Disable unity build")
parser.add_argument(
"--no-pch", action="store_true", help="Disable precompiled headers (PCH)"
)
parser.add_argument("--debug", action="store_true", help="Enable debug symbols")
parser.add_argument("--verbose", action="store_true", help="Enable verbose output")
return parser.parse_args()
def get_build_info(args: argparse.Namespace) -> dict[str, str | dict[str, str]]:
return {
"USE_ZIG": str(USE_ZIG),
"USE_CLANG": str(USE_CLANG),
"WASM_BUILD": str(WASM_BUILD),
"CC": os.environ.get("CC", ""),
"CXX": os.environ.get("CXX", ""),
"AR": os.environ.get("AR", ""),
"CFLAGS": os.environ.get("CFLAGS", ""),
"CXXFLAGS": os.environ.get("CXXFLAGS", ""),
"ARGS": {
"use_zig": str(args.use_zig),
"use_clang": str(args.use_clang),
"wasm": str(args.wasm),
"specific_test": str(args.test) if args.test else "all",
},
}
def should_clean_build(build_info: dict[str, str | dict[str, str]]) -> bool:
build_info_file = BUILD_DIR / "build_info.json"
if not build_info_file.exists():
return True
try:
with open(build_info_file, "r") as f:
old_build_info = json.load(f)
except (json.JSONDecodeError, ValueError) as e:
# If JSON is corrupted or empty, remove it and force clean build
logger.warning(
f"Corrupted build_info.json detected ({e}), removing and forcing clean build"
)
try:
build_info_file.unlink()
except OSError:
pass # Ignore if we can't remove it
return True
# If build parameters have changed, we need to rebuild
if old_build_info != build_info:
# Check if this is just a change in specific test target
old_args_raw = old_build_info.get("ARGS", {})
new_args_raw = build_info.get("ARGS", {})
# Ensure ARGS is a dictionary
if not isinstance(old_args_raw, dict) or not isinstance(new_args_raw, dict):
return True
old_args: Dict[str, Any] = old_args_raw # type: ignore
new_args: Dict[str, Any] = new_args_raw # type: ignore
# If only the specific test changed and everything else is the same,
# we don't need to clean the build directory
old_test = old_args.get("specific_test", "all")
new_test = new_args.get("specific_test", "all")
# Create copies without the specific_test field for comparison
old_args_no_test: Dict[str, Any] = {
k: v for k, v in old_args.items() if k != "specific_test"
}
new_args_no_test: Dict[str, Any] = {
k: v for k, v in new_args.items() if k != "specific_test"
}
# If only the test target changed, don't clean
if (
old_args_no_test == new_args_no_test
and old_build_info.get("USE_ZIG") == build_info.get("USE_ZIG")
and old_build_info.get("USE_CLANG") == build_info.get("USE_CLANG")
and old_build_info.get("WASM_BUILD") == build_info.get("WASM_BUILD")
and old_build_info.get("CC") == build_info.get("CC")
and old_build_info.get("CXX") == build_info.get("CXX")
and old_build_info.get("AR") == build_info.get("AR")
and old_build_info.get("CFLAGS") == build_info.get("CFLAGS")
and old_build_info.get("CXXFLAGS") == build_info.get("CXXFLAGS")
):
print(
f"Build parameters unchanged, only test target changed: {old_test} -> {new_test}"
)
return False
return True
return False
def update_build_info(build_info: dict[str, str | dict[str, str]]):
build_info_file = BUILD_DIR / "build_info.json"
with open(build_info_file, "w") as f:
json.dump(build_info, f, indent=2)
def main() -> None:
global USE_ZIG, USE_CLANG, WASM_BUILD
args = parse_arguments()
USE_ZIG = args.use_zig # use Zig's clang compiler
USE_CLANG = args.use_clang # Use pure Clang for WASM builds
WASM_BUILD = args.wasm
using_gcc = not USE_ZIG and not USE_CLANG and not WASM_BUILD
if using_gcc:
if not shutil.which("g++"):
print(
"gcc compiler not found in PATH, falling back zig's built in clang compiler"
)
USE_ZIG = True
USE_CLANG = False
if USE_CLANG:
if not test_clang_accessibility():
print(
"Clang compiler not found in PATH, falling back to Zig-clang compiler"
)
USE_ZIG = True
USE_CLANG = False
os.chdir(str(HERE))
print(f"Current directory: {Path('.').absolute()}")
# Auto-detection for --clean based on test file changes
need_clean = args.clean
if not need_clean:
# Only check for changes if --clean wasn't explicitly specified
need_clean = check_test_files_changed()
build_info = get_build_info(args)
if need_clean or should_clean_build(build_info):
clean_build_directory()
# Save the file list after cleaning
save_test_files_list()
elif args.clean:
# If --clean was explicitly specified but not needed according to build info,
# still clean and save file list
clean_build_directory()
save_test_files_list()
# Unit tests now use optimized Python API by default (same as examples)
use_pch = not getattr(
args, "no_pch", False
) # Default to PCH enabled unless --no-pch specified
# Use the optimized Python API with PCH optimization (now default for unit tests)
compilation_successful = compile_unit_tests_python_api(
specific_test=args.test,
enable_static_analysis=args.check,
use_pch=use_pch,
clean=need_clean,
debug=args.debug,
)
if compilation_successful:
update_build_info(build_info)
print("FastLED library compiled successfully.")
else:
print("❌ FastLED unit test compilation failed!")
import sys
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,906 @@
#!/usr/bin/env python3
# pyright: reportUnknownMemberType=false, reportMissingParameterType=false
import argparse
import multiprocessing
import os
import re
import subprocess
import sys
import tempfile
import time # Added for timing test execution
from dataclasses import dataclass
from pathlib import Path
from queue import Empty, PriorityQueue
from threading import Event, Lock, Thread
from typing import List
import psutil
from ci.util.paths import PROJECT_ROOT
def optimize_python_command(cmd: list[str]) -> list[str]:
"""
Optimize command list for subprocess execution in uv environment.
For python commands, we need to use 'uv run python' to ensure access to
installed packages like ziglang. Direct sys.executable bypasses uv environment.
Args:
cmd: Command list that may contain 'python' as first element
Returns:
list[str]: Optimized command with 'python' prefixed by 'uv run'
"""
if cmd and (cmd[0] == "python" or cmd[0] == "python3"):
# Use uv run python to ensure access to uv-managed packages
optimized_cmd = ["uv", "run", "python"] + cmd[1:]
return optimized_cmd
return cmd
from ci.util.test_exceptions import (
CompilationFailedException,
TestExecutionFailedException,
TestFailureInfo,
TestTimeoutException,
)
class OutputBuffer:
"""Thread-safe output buffer with ordered output display"""
def __init__(self) -> None:
self.output_queue: PriorityQueue[tuple[int, int, str]] = PriorityQueue()
self.next_sequence: int = 0
self.sequence_lock: Lock = Lock()
self.stop_event: Event = Event()
self.output_thread: Thread = Thread(target=self._output_worker, daemon=True)
self.output_thread.start()
def write(self, test_index: int, message: str) -> None:
"""Write a message to the buffer with test index for ordering"""
with self.sequence_lock:
sequence = self.next_sequence
self.next_sequence += 1
self.output_queue.put((test_index, sequence, message))
def _output_worker(self) -> None:
"""Worker thread that processes output in order"""
while not self.stop_event.is_set() or not self.output_queue.empty():
try:
item: tuple[int, int, str] = self.output_queue.get(timeout=0.1)
_, _, message = item
print(message, flush=True)
self.output_queue.task_done()
except Empty:
continue
except Exception as e:
print(f"Error in output worker: {e}")
continue
def stop(self) -> None:
"""Stop the output worker thread"""
self.stop_event.set()
if self.output_thread.is_alive():
self.output_thread.join()
# Configure console for UTF-8 output on Windows
if os.name == "nt": # Windows
# Try to set console to UTF-8 mode
try:
# Set stdout and stderr to UTF-8 encoding
# Note: reconfigure() was added in Python 3.7
if hasattr(sys.stdout, "reconfigure") and callable(
getattr(sys.stdout, "reconfigure", None)
):
sys.stdout.reconfigure(encoding="utf-8", errors="replace") # type: ignore[attr-defined]
if hasattr(sys.stderr, "reconfigure") and callable(
getattr(sys.stderr, "reconfigure", None)
):
sys.stderr.reconfigure(encoding="utf-8", errors="replace") # type: ignore[attr-defined]
except (AttributeError, OSError):
# Fallback for older Python versions or if reconfigure fails
pass
# Environment flags for backward compatibility
_SHOW_COMPILE = os.environ.get("FASTLED_TEST_SHOW_COMPILE", "").lower() in (
"1",
"true",
"yes",
)
_SHOW_LINK = os.environ.get("FASTLED_TEST_SHOW_LINK", "").lower() in (
"1",
"true",
"yes",
)
@dataclass
class FailedTest:
name: str
return_code: int
stdout: str
def check_iwyu_available() -> bool:
"""Check if include-what-you-use is available in the system"""
try:
result = subprocess.run(
["include-what-you-use", "--version"],
capture_output=True,
text=True,
timeout=10,
)
return result.returncode == 0
except (
subprocess.CalledProcessError,
FileNotFoundError,
subprocess.TimeoutExpired,
):
return False
def run_command(
command: str | list[str],
use_gdb: bool = False,
*,
verbose: bool = False,
show_compile: bool = False,
show_link: bool = False,
) -> tuple[int, str]:
captured_lines: list[str] = []
# Determine command type
is_test_execution = False
is_compile = False
is_link = False
if isinstance(command, str):
cmd_lower = command.replace("\\", "/").lower()
# Check if running test executable
is_test_execution = (
"/test_" in cmd_lower
or ".build/bin/test_" in cmd_lower
or cmd_lower.endswith(".exe")
)
# Check if compiling
is_compile = "-c" in cmd_lower and (".cpp" in cmd_lower or ".c" in cmd_lower)
# Check if linking
is_link = (
not is_compile
and not is_test_execution
and ("-o" in cmd_lower or "lib" in cmd_lower)
)
if use_gdb:
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as gdb_script:
gdb_script.write("set pagination off\n")
gdb_script.write("run\n")
gdb_script.write("bt full\n")
gdb_script.write("info registers\n")
gdb_script.write("x/16i $pc\n")
gdb_script.write("thread apply all bt full\n")
gdb_script.write("quit\n")
gdb_command = (
f"gdb -return-child-result -batch -x {gdb_script.name} --args {command}"
)
process = subprocess.Popen(
gdb_command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, # Merge stderr into stdout
shell=True,
text=False,
)
assert process.stdout is not None
# Stream and capture output
while True:
line_bytes = process.stdout.readline()
line = line_bytes.decode("utf-8", errors="ignore")
if not line and process.poll() is not None:
break
if line:
captured_lines.append(line.rstrip())
# Always print GDB output (it's only used for crashes anyway)
try:
print(line, end="", flush=True)
except UnicodeEncodeError:
# Fallback: replace problematic characters
print(
line.encode("utf-8", errors="replace").decode(
"utf-8", errors="replace"
),
end="",
flush=True,
)
os.unlink(gdb_script.name)
output = "\n".join(captured_lines)
return process.returncode, output
else:
# Optimize list commands to avoid shell overhead
if isinstance(command, list):
# Optimize python commands and use shell=False for better performance
python_exe = optimize_python_command(command)
process = subprocess.Popen(
python_exe,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, # Merge stderr into stdout
shell=False, # Use shell=False for better performance with list commands
text=False,
)
else:
# String commands still need shell=True
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, # Merge stderr into stdout
shell=True,
text=False,
)
assert process.stdout is not None
# Stream and capture output
while True:
line_bytes = process.stdout.readline()
line = line_bytes.decode("utf-8", errors="ignore")
if not line and process.poll() is not None:
break
if line:
captured_lines.append(line.rstrip())
# Determine if we should print this line
should_print = (
verbose # Always print in verbose mode
or (is_compile and show_compile) # Print compilation if enabled
or (is_link and show_link) # Print linking if enabled
or (not is_test_execution) # Print non-test output
or (
is_test_execution and process.returncode != 0
) # Print failed test output
or (
is_test_execution
and any(
marker in line
for marker in [
"Running test:",
"Test passed",
"Test FAILED",
"passed with return code",
"Test output:",
]
)
) # Print test status
)
if should_print:
try:
# Add prefix for compile/link commands
if is_compile and show_compile:
print("[COMPILE] ", end="", flush=True)
elif is_link and show_link:
print("[LINK] ", end="", flush=True)
print(line, end="", flush=True)
except UnicodeEncodeError:
# Fallback: replace problematic characters
print(
line.encode("utf-8", errors="replace").decode(
"utf-8", errors="replace"
),
end="",
flush=True,
)
output = "\n".join(captured_lines)
return process.returncode, output
def compile_tests(
clean: bool = False,
unknown_args: list[str] = [],
specific_test: str | None = None,
quick_build: bool = True,
*,
verbose: bool = False,
show_compile: bool = False,
show_link: bool = False,
) -> None:
"""
Compile C++ tests using the Python build system.
"""
os.chdir(str(PROJECT_ROOT))
print("🔧 Compiling tests using Python build system")
_compile_tests_python(
clean,
unknown_args,
specific_test,
quick_build=quick_build,
verbose=verbose,
show_compile=show_compile,
show_link=show_link,
)
def _compile_tests_python(
clean: bool = False,
unknown_args: list[str] = [],
specific_test: str | None = None,
quick_build: bool = True,
*,
verbose: bool = False,
show_compile: bool = False,
show_link: bool = False,
) -> None:
"""Python build system with PCH optimization"""
# Use the optimized cpp_test_compile system directly
import subprocess
cmd = ["uv", "run", "python", "-m", "ci.compiler.cpp_test_compile"]
if specific_test:
cmd.extend(["--test", specific_test])
if clean:
cmd.append("--clean")
if verbose:
cmd.append("--verbose")
if "--check" in unknown_args:
cmd.append("--check")
if "--no-pch" in unknown_args:
cmd.append("--no-pch")
# Forward debug mode to the compiler when quick_build is disabled
if not quick_build:
cmd.append("--debug")
print("🚀 Using Python build system with PCH optimization")
result = subprocess.run(cmd)
if result.returncode != 0:
raise RuntimeError(
f"Unit test compilation failed with return code {result.returncode}"
)
def run_tests(
specific_test: str | None = None,
*,
verbose: bool = False,
show_compile: bool = False,
show_link: bool = False,
) -> None:
"""
Run compiled tests with GDB crash analysis support.
"""
_run_tests_python(specific_test)
def _run_tests_python(
specific_test: str | None = None,
*,
verbose: bool = False,
show_compile: bool = False,
show_link: bool = False,
) -> None:
"""Run tests from Python build system"""
# Import the test compiler system
from ci.compiler.test_compiler import FastLEDTestCompiler
# Get test executables from Python build system
test_compiler = FastLEDTestCompiler.get_existing_instance()
if not test_compiler:
print("No compiled tests found. Run compilation first.")
sys.exit(1)
test_executables = test_compiler.get_test_executables(specific_test)
if not test_executables:
test_name = specific_test or "any tests"
print(f"No test executables found for: {test_name}")
sys.exit(1)
print(f"Running {len(test_executables)} tests from Python build...")
# Print list of tests that will be executed
print("Tests to execute:")
for i, test_exec in enumerate(test_executables, 1):
# Convert absolute path to relative for display
rel_path = os.path.relpath(test_exec.executable_path)
print(f" {i}. {test_exec.name} ({rel_path})")
print("")
failed_tests: list[FailedTest] = []
# Convert to file list format for compatibility with existing logic
files: list[str] = []
test_paths: dict[str, str] = {}
for test_exec in test_executables:
file_name = test_exec.name
if os.name == "nt" and not file_name.endswith(".exe"):
file_name += ".exe"
files.append(file_name)
test_paths[file_name] = str(test_exec.executable_path)
print(f"Starting test execution for {len(files)} test files...")
_execute_test_files(files, "", failed_tests, specific_test, test_paths)
_handle_test_results(failed_tests)
def _execute_test_files(
files: list[str],
test_dir: str,
failed_tests: list[FailedTest],
specific_test: str | None,
test_paths: dict[str, str] | None = None,
*,
verbose: bool = False,
show_compile: bool = False,
show_link: bool = False,
) -> None:
"""
Execute test files in parallel with full GDB crash analysis.
Args:
files: List of test file names
test_dir: Directory containing tests (for CMake) or empty string (for Python API)
failed_tests: List to collect failed tests
specific_test: Specific test name if filtering
test_paths: Dict mapping file names to full paths (for Python API)
"""
total_tests = len(files)
successful_tests = 0
completed_tests = 0
# Initialize output buffer for ordered output
output_buffer = OutputBuffer()
output_buffer.write(0, f"Executing {total_tests} test files in parallel...")
# Determine number of workers based on configuration
# Get configuration from args
args = parse_args()
# Force sequential execution if NO_PARALLEL is set
if os.environ.get("NO_PARALLEL"):
max_workers = 1
output_buffer.write(
0, "NO_PARALLEL environment variable set - forcing sequential execution"
)
elif args.sequential:
max_workers = 1
elif args.parallel:
max_workers = args.parallel
else:
max_workers = max(1, multiprocessing.cpu_count() - 1) # Leave one core free
# Check memory limit
if args.max_memory:
memory_limit = args.max_memory * 1024 * 1024 # Convert MB to bytes
available_memory = psutil.virtual_memory().available
if memory_limit > available_memory:
output_buffer.write(
0,
f"Warning: Requested memory limit {args.max_memory}MB exceeds available memory {available_memory / (1024 * 1024):.0f}MB",
)
output_buffer.write(
0, "Reducing number of parallel workers to stay within memory limits"
)
# Estimate memory per test based on previous runs or default to 100MB
memory_per_test = 100 * 1024 * 1024 # 100MB per test
max_parallel_by_memory = max(1, memory_limit // memory_per_test)
max_workers = min(max_workers, max_parallel_by_memory)
output_buffer.write(0, f"Using {max_workers} parallel workers")
# Thread-safe counter
counter_lock = Lock()
def run_single_test(
test_file: str, test_index: int
) -> tuple[bool, float, str, int]:
"""Run a single test and return its results"""
nonlocal completed_tests
if test_paths:
test_path = test_paths[test_file]
else:
test_path = os.path.join(test_dir, test_file)
# For .cpp files, compile them first
if test_path.endswith(".cpp"):
# Create a temporary directory for compilation
with tempfile.TemporaryDirectory() as temp_dir:
# Compile the test file
output_buffer.write(
test_index,
f"[{test_index}/{total_tests}] Compiling test: {test_file}",
)
compile_cmd = [
"python",
"-m",
"ziglang",
"c++",
"-o",
os.path.join(temp_dir, "test.exe"),
test_path,
"-I",
os.path.join(PROJECT_ROOT, "src"),
"-I",
os.path.join(PROJECT_ROOT, "tests"),
# NOTE: Compiler flags now come from build configuration TOML
]
return_code, stdout = run_command(compile_cmd)
if return_code != 0:
output_buffer.write(
test_index,
f"[{test_index}/{total_tests}] ERROR: Failed to compile test: {test_file}",
)
return False, 0.0, f"Failed to compile test: {stdout}", return_code
# Update test_path to point to the compiled executable
test_path = os.path.join(temp_dir, "test.exe")
if not (os.path.isfile(test_path) and os.access(test_path, os.X_OK)):
output_buffer.write(
test_index,
f"[{test_index}/{total_tests}] ERROR: Test file not found or not executable: {test_path}",
)
return False, 0.0, f"Test file not found or not executable: {test_path}", 1
output_buffer.write(
test_index, f"[{test_index}/{total_tests}] Running test: {test_file}"
)
if verbose:
output_buffer.write(test_index, f" Command: {test_path}")
start_time = time.time()
# Pass --minimal flag to doctest when not in verbose mode to suppress output unless tests fail
cmd = [test_path]
if not verbose:
cmd.append("--minimal")
return_code, stdout = run_command(cmd)
elapsed_time = time.time() - start_time
output = stdout
failure_pattern = re.compile(r"Test .+ failed with return code (\d+)")
failure_match = failure_pattern.search(output)
is_crash = failure_match is not None
# Handle crashes with GDB (must be done synchronously)
if is_crash:
output_buffer.write(
test_index, f"Test crashed. Re-running with GDB to get stack trace..."
)
_, gdb_stdout = run_command(test_path, use_gdb=True)
stdout += "\n--- GDB Output ---\n" + gdb_stdout
# Extract crash information
crash_info = extract_crash_info(gdb_stdout)
output_buffer.write(
test_index, f"Crash occurred at: {crash_info.file}:{crash_info.line}"
)
output_buffer.write(test_index, f"Cause: {crash_info.cause}")
output_buffer.write(test_index, f"Stack: {crash_info.stack}")
# Print output based on verbosity and status
if verbose or return_code != 0:
output_buffer.write(test_index, "Test output:")
output_buffer.write(test_index, stdout)
if return_code == 0:
output_buffer.write(
test_index, f" Test {test_file} passed in {elapsed_time:.2f}s"
)
else:
output_buffer.write(
test_index,
f" Test {test_file} FAILED with return code {return_code} in {elapsed_time:.2f}s",
)
with counter_lock:
completed_tests += 1
return return_code == 0, elapsed_time, stdout, return_code
try:
# Run tests in parallel using ThreadPoolExecutor
from concurrent.futures import ThreadPoolExecutor, as_completed
with ThreadPoolExecutor(max_workers=max_workers) as executor:
# Submit all tests
future_to_test = {
executor.submit(run_single_test, test_file, i + 1): test_file
for i, test_file in enumerate(files)
}
# Process results as they complete
for future in as_completed(future_to_test):
test_file = future_to_test[future]
try:
success, _, stdout, return_code = future.result()
if success:
with counter_lock:
successful_tests += 1
else:
failed_tests.append(
FailedTest(
name=test_file, return_code=return_code, stdout=stdout
)
)
except Exception as e:
output_buffer.write(
0, f"ERROR: Test {test_file} failed with exception: {e}"
)
failed_tests.append(
FailedTest(name=test_file, return_code=1, stdout=str(e))
)
# Print final summary
output_buffer.write(
0,
f"Test execution complete: {successful_tests} passed, {len(failed_tests)} failed",
)
if successful_tests == total_tests:
output_buffer.write(0, "All tests passed successfully!")
else:
output_buffer.write(
0, f"Some tests failed ({len(failed_tests)} of {total_tests})"
)
finally:
# Ensure output buffer is stopped
output_buffer.stop()
def _handle_test_results(
failed_tests: list[FailedTest], *, verbose: bool = False
) -> None:
"""Handle test results and exit appropriately (preserving existing logic)"""
if failed_tests:
print("Failed tests summary:")
failures: List[TestFailureInfo] = []
for failed_test in failed_tests:
print(
f"Test {failed_test.name} failed with return code {failed_test.return_code}"
)
# Always show output on failure
print("Output:")
# Show indented output for better readability
for line in failed_test.stdout.splitlines():
print(f" {line}")
print() # Add spacing between failed tests
failures.append(
TestFailureInfo(
test_name=failed_test.name,
command=f"test_{failed_test.name}",
return_code=failed_test.return_code,
output=failed_test.stdout,
error_type="test_execution_failure",
)
)
tests_failed = len(failed_tests)
failed_test_names = [test.name for test in failed_tests]
print(
f"{tests_failed} test{'s' if tests_failed != 1 else ''} failed: {', '.join(failed_test_names)}"
)
raise TestExecutionFailedException(f"{tests_failed} test(s) failed", failures)
if verbose:
print("All tests passed.")
@dataclass
class CrashInfo:
cause: str = "Unknown"
stack: str = "Unknown"
file: str = "Unknown"
line: str = "Unknown"
def extract_crash_info(gdb_output: str) -> CrashInfo:
lines = gdb_output.split("\n")
crash_info = CrashInfo()
try:
for i, line in enumerate(lines):
if line.startswith("Program received signal"):
try:
crash_info.cause = line.split(":", 1)[1].strip()
except IndexError:
crash_info.cause = line.strip()
elif line.startswith("#0"):
crash_info.stack = line
for j in range(i, len(lines)):
if "at" in lines[j]:
try:
_, location = lines[j].split("at", 1)
location = location.strip()
if ":" in location:
crash_info.file, crash_info.line = location.rsplit(
":", 1
)
else:
crash_info.file = location
except ValueError:
pass # If split fails, we keep the default values
break
break
except Exception as e:
print(f"Error parsing GDB output: {e}")
return crash_info
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Compile and run C++ tests")
parser.add_argument(
"--compile-only",
action="store_true",
help="Only compile the tests without running them",
)
parser.add_argument(
"--run-only",
action="store_true",
help="Only run the tests without compiling them",
)
parser.add_argument(
"--only-run-failed-test",
action="store_true",
help="Only run the tests that failed in the previous run",
)
parser.add_argument(
"--clean", action="store_true", help="Clean build before compiling"
)
parser.add_argument(
"--test",
help="Specific test to run (without extension)",
)
parser.add_argument(
"--verbose",
action="store_true",
help="Enable verbose output",
)
parser.add_argument(
"--show-compile",
action="store_true",
help="Show compilation commands and output",
)
parser.add_argument(
"--show-link",
action="store_true",
help="Show linking commands and output",
)
parser.add_argument(
"--parallel",
type=int,
help="Number of parallel test processes to run (default: CPU count - 1)",
)
parser.add_argument(
"--sequential",
action="store_true",
help="Run tests sequentially (disables parallel execution)",
)
parser.add_argument(
"--max-memory",
type=int,
help="Maximum memory usage in MB for parallel test execution",
)
# Create mutually exclusive group for compiler selection
compiler_group = parser.add_mutually_exclusive_group()
compiler_group.add_argument(
"--clang",
help="Use Clang compiler",
action="store_true",
)
compiler_group.add_argument(
"--gcc",
help="Use GCC compiler (default on non-Windows)",
action="store_true",
)
parser.add_argument(
"--check",
action="store_true",
help="Enable static analysis (IWYU, clang-tidy)",
)
parser.add_argument(
"--no-unity",
action="store_true",
help="Disable unity builds for cpp tests",
)
parser.add_argument(
"--no-pch",
action="store_true",
help="Disable precompiled headers (PCH) for unit tests",
)
parser.add_argument(
"--debug",
action="store_true",
help="Use debug build mode with full debug symbols (default is quick mode with -g0)",
)
args, unknown = parser.parse_known_args()
args.unknown = unknown
return args
def main() -> None:
try:
args = parse_args()
# Get verbosity flags from args
run_only = args.run_only
compile_only = args.compile_only
specific_test = args.test
# only_run_failed_test feature to be implemented in future
_ = args.only_run_failed_test
use_clang = args.clang
no_unity = args.no_unity
quick_build = (
not args.debug
) # Default to quick mode unless --debug is specified
# use_gcc = args.gcc
if not run_only:
passthrough_args = args.unknown
if use_clang:
passthrough_args.append("--use-clang")
if args.check:
passthrough_args.append("--check")
if no_unity:
passthrough_args.append("--no-unity")
if args.no_pch:
passthrough_args.append("--no-pch")
# Note: --gcc is handled by not passing --use-clang (GCC is the default in compiler/cpp_test_compile.py)
compile_tests(
clean=args.clean,
unknown_args=passthrough_args,
specific_test=specific_test,
quick_build=quick_build,
verbose=args.verbose,
show_compile=args.show_compile,
show_link=args.show_link,
)
if not compile_only:
if specific_test:
run_tests(
specific_test,
verbose=args.verbose,
show_compile=args.show_compile,
show_link=args.show_link,
)
else:
# Use our own test runner instead of CTest since CTest integration is broken
run_tests(
None,
verbose=args.verbose,
show_compile=args.show_compile,
show_link=args.show_link,
)
except (
CompilationFailedException,
TestExecutionFailedException,
TestTimeoutException,
) as e:
# Print detailed failure information
print("\n" + "=" * 60)
print("FASTLED TEST FAILURE DETAILS")
print("=" * 60)
print(e.get_detailed_failure_info())
print("=" * 60)
# Exit with appropriate code
if e.failures:
# Use the return code from the first failure, or 1 if none available
exit_code = (
e.failures[0].return_code if e.failures[0].return_code != 0 else 1
)
else:
exit_code = 1
sys.exit(exit_code)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,784 @@
"""
Enhanced Arduino Package Index Implementation with Pydantic
This module provides a robust, production-ready Arduino package management system
that fully complies with the Arduino CLI Package Index JSON Specification.
Key Features:
- Pydantic models with comprehensive validation
- Multi-source package index support
- Caching and persistence
- Package installation and dependency resolution
- Search and filtering capabilities
- Checksum validation and error handling
"""
import asyncio
import hashlib
import json
import shutil
import sys
import tarfile
import zipfile
from datetime import datetime
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set, Union
from urllib.parse import urlparse
from urllib.request import urlopen
import httpx
from pydantic import (
BaseModel,
EmailStr,
Field,
HttpUrl,
ValidationError,
field_validator,
model_validator,
)
# Custom Exceptions
class PackageParsingError(Exception):
"""Error parsing package index data"""
pass
class PackageInstallationError(Exception):
"""Error during package installation"""
pass
class PackageValidationError(Exception):
"""Error validating package data"""
pass
# Core Pydantic Models with Enhanced Validation
class Help(BaseModel):
"""Help information with online resources"""
online: HttpUrl = Field(description="URL to online help resources")
class Config:
schema_extra = {
"example": {"online": "https://github.com/espressif/arduino-esp32"}
}
class Board(BaseModel):
"""Board information with complete properties"""
name: str = Field(min_length=1, max_length=200, description="Board display name")
properties: Dict[str, Any] = Field(
default_factory=dict, description="Board configuration properties"
)
@field_validator("name")
@classmethod
def validate_board_name(cls, v: str) -> str:
"""Validate board name format"""
if not v.strip():
raise ValueError("Board name cannot be empty or whitespace")
return v.strip()
class Config:
schema_extra = {
"example": {
"name": "ESP32 Dev Module",
"properties": {
"upload.tool": "esptool_py",
"upload.maximum_size": "1310720",
"build.mcu": "esp32",
},
}
}
class ToolDependency(BaseModel):
"""Tool dependency specification with validation"""
packager: str = Field(
min_length=1, max_length=100, description="Tool packager name"
)
name: str = Field(pattern=r"^[a-zA-Z0-9_.-]+$", description="Tool name")
version: str = Field(min_length=1, description="Required tool version")
@field_validator("version")
@classmethod
def validate_version_format(cls, v: str) -> str:
"""Validate version format - supports semantic versioning and Arduino versioning"""
if not v.strip():
raise ValueError("Version cannot be empty")
# Allow flexible versioning (semantic, date-based, etc.)
return v.strip()
class Config:
schema_extra = {
"example": {
"packager": "esp32",
"name": "xtensa-esp32-elf-gcc",
"version": "esp-2021r2-patch5-8.4.0",
}
}
class Platform(BaseModel):
"""Platform specification with comprehensive validation"""
name: str = Field(min_length=1, max_length=200, description="Platform display name")
architecture: str = Field(
pattern=r"^[a-zA-Z0-9_-]+$", description="Target architecture"
)
version: str = Field(description="Platform version")
category: str = Field(min_length=1, description="Platform category")
url: HttpUrl = Field(description="Download URL for platform archive")
archive_filename: str = Field(
alias="archiveFileName", description="Archive file name"
)
checksum: str = Field(
pattern=r"^SHA-256:[a-fA-F0-9]{64}$", description="SHA-256 checksum"
)
size_mb: float = Field(gt=0, alias="size", description="Archive size in megabytes")
boards: List[Board] = Field(
default_factory=lambda: [], description="Supported boards"
)
tool_dependencies: List[ToolDependency] = Field(
default_factory=lambda: [],
alias="toolsDependencies",
description="Required tool dependencies",
)
help: Help = Field(description="Help and documentation links")
@field_validator("version")
@classmethod
def validate_version_format(cls, v: str) -> str:
"""Validate version format"""
if not v.strip():
raise ValueError("Version cannot be empty")
return v.strip()
@field_validator("size_mb", mode="before")
@classmethod
def convert_size_from_bytes(cls, v: Union[str, int, float]) -> float:
"""Convert size from bytes to megabytes if needed"""
if isinstance(v, str):
try:
size_bytes = int(v)
return size_bytes / (1024 * 1024)
except ValueError:
raise ValueError(f"Invalid size format: {v}")
elif isinstance(v, (int, float)):
if v > 1024 * 1024: # Assume bytes if > 1MB
return v / (1024 * 1024)
return v # Already in MB
return v
@field_validator("archive_filename")
@classmethod
def validate_archive_filename(cls, v: str) -> str:
"""Validate archive filename format"""
valid_extensions = [".zip", ".tar.gz", ".tar.bz2", ".tar.xz", ".tar.zst"]
if not any(v.lower().endswith(ext) for ext in valid_extensions):
raise ValueError(
f"Archive must have one of these extensions: {valid_extensions}"
)
return v
class Config:
allow_population_by_field_name = True
schema_extra: Dict[str, Any] = {
"example": {
"name": "ESP32 Arduino",
"architecture": "esp32",
"version": "2.0.5",
"category": "ESP32",
"url": "https://github.com/espressif/arduino-esp32/releases/download/2.0.5/esp32-2.0.5.zip",
"archiveFileName": "esp32-2.0.5.zip",
"checksum": "SHA-256:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
"size": "50000000",
"boards": [],
"toolsDependencies": [],
}
}
class SystemDownload(BaseModel):
"""System-specific download information for tools"""
host: str = Field(min_length=1, description="Target host system identifier")
url: HttpUrl = Field(description="Download URL for this system")
archive_filename: str = Field(
alias="archiveFileName", description="Archive file name"
)
checksum: str = Field(
pattern=r"^SHA-256:[a-fA-F0-9]{64}$", description="SHA-256 checksum"
)
size_mb: float = Field(gt=0, alias="size", description="Archive size in megabytes")
@field_validator("size_mb", mode="before")
@classmethod
def convert_size_from_bytes(cls, v: Union[str, int, float]) -> float:
"""Convert size from bytes to megabytes if needed"""
if isinstance(v, str):
try:
size_bytes = int(v)
return size_bytes / (1024 * 1024)
except ValueError:
raise ValueError(f"Invalid size format: {v}")
elif isinstance(v, (int, float)):
if v > 1024 * 1024: # Assume bytes if > 1MB
return v / (1024 * 1024)
return v # Already in MB
return v
@field_validator("host")
@classmethod
def validate_host_format(cls, v: str) -> str:
"""Validate host system identifier"""
# Common host patterns: i686-pc-linux-gnu, x86_64-apple-darwin, etc.
if not v.strip():
raise ValueError("Host identifier cannot be empty")
return v.strip()
class Config:
allow_population_by_field_name = True
schema_extra = {
"example": {
"host": "x86_64-pc-linux-gnu",
"url": "https://github.com/espressif/crosstool-NG/releases/download/esp-2021r2-patch5/xtensa-esp32-elf-gcc8_4_0-esp-2021r2-patch5-linux-amd64.tar.gz",
"archiveFileName": "xtensa-esp32-elf-gcc8_4_0-esp-2021r2-patch5-linux-amd64.tar.gz",
"checksum": "SHA-256:abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890",
"size": "150000000",
}
}
class Tool(BaseModel):
"""Tool with system-specific downloads and enhanced functionality"""
name: str = Field(pattern=r"^[a-zA-Z0-9_.-]+$", description="Tool name")
version: str = Field(min_length=1, description="Tool version")
systems: List[SystemDownload] = Field(
min_length=1, description="System-specific downloads"
)
@field_validator("systems")
@classmethod
def validate_unique_systems(cls, v: List[SystemDownload]) -> List[SystemDownload]:
"""Ensure no duplicate host systems"""
hosts = [system.host for system in v]
if len(hosts) != len(set(hosts)):
raise ValueError("Duplicate host systems found in tool downloads")
return v
def get_system_download(self, host_pattern: str) -> Optional[SystemDownload]:
"""Get system download matching host pattern"""
for system in self.systems:
if host_pattern in system.host:
return system
return None
def get_compatible_systems(self) -> List[str]:
"""Get list of compatible host systems"""
return [system.host for system in self.systems]
class Config:
schema_extra = {
"example": {
"name": "xtensa-esp32-elf-gcc",
"version": "esp-2021r2-patch5-8.4.0",
"systems": [
{
"host": "x86_64-pc-linux-gnu",
"url": "https://github.com/espressif/crosstool-NG/releases/download/esp-2021r2-patch5/xtensa-esp32-elf-gcc8_4_0-esp-2021r2-patch5-linux-amd64.tar.gz",
"archiveFileName": "xtensa-esp32-elf-gcc8_4_0-esp-2021r2-patch5-linux-amd64.tar.gz",
"checksum": "SHA-256:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
"size": "150000000",
}
],
}
}
class Package(BaseModel):
"""Package containing platforms and tools with comprehensive validation"""
name: str = Field(pattern=r"^[A-Za-z0-9_.-]+$", description="Package identifier")
maintainer: str = Field(
min_length=1, max_length=200, description="Package maintainer"
)
website_url: HttpUrl = Field(alias="websiteURL", description="Package website URL")
email: EmailStr = Field(description="Maintainer contact email")
help: Help = Field(description="Help and documentation links")
platforms: List[Platform] = Field(
default_factory=lambda: [], description="Available platforms"
)
tools: List[Tool] = Field(default_factory=lambda: [], description="Available tools")
@field_validator("platforms")
@classmethod
def validate_unique_platforms(cls, v: List[Platform]) -> List[Platform]:
"""Ensure no duplicate platform architecture/version combinations"""
seen: Set[tuple[str, str]] = set()
for platform in v:
key = (platform.architecture, platform.version)
if key in seen:
raise ValueError(
f"Duplicate platform found: {platform.architecture} v{platform.version}"
)
seen.add(key)
return v
@field_validator("tools")
@classmethod
def validate_unique_tools(cls, v: List[Tool]) -> List[Tool]:
"""Ensure no duplicate tool name/version combinations"""
seen: Set[tuple[str, str]] = set()
for tool in v:
key = (tool.name, tool.version)
if key in seen:
raise ValueError(f"Duplicate tool found: {tool.name} v{tool.version}")
seen.add(key)
return v
def find_platform(
self, architecture: str, version: Optional[str] = None
) -> Optional[Platform]:
"""Find platform by architecture and optionally version"""
for platform in self.platforms:
if platform.architecture == architecture:
if version is None or platform.version == version:
return platform
return None
def find_tool(self, name: str, version: Optional[str] = None) -> Optional[Tool]:
"""Find tool by name and optionally version"""
for tool in self.tools:
if tool.name == name:
if version is None or tool.version == version:
return tool
return None
def get_latest_platform_version(self, architecture: str) -> Optional[str]:
"""Get the latest version for a given architecture"""
versions = [p.version for p in self.platforms if p.architecture == architecture]
if not versions:
return None
# Simple version sorting - can be enhanced with proper semver parsing
return sorted(versions)[-1]
class Config:
allow_population_by_field_name = True
schema_extra: Dict[str, Any] = {
"example": {
"name": "esp32",
"maintainer": "Espressif Systems",
"websiteURL": "https://github.com/espressif/arduino-esp32",
"email": "hr@espressif.com",
"help": {"online": "https://github.com/espressif/arduino-esp32"},
"platforms": [],
"tools": [],
}
}
class PackageIndex(BaseModel):
"""Root package index containing multiple packages"""
packages: List[Package] = Field(min_length=1, description="Available packages")
@field_validator("packages")
@classmethod
def validate_unique_packages(cls, v: List[Package]) -> List[Package]:
"""Ensure no duplicate package names"""
names = [pkg.name for pkg in v]
if len(names) != len(set(names)):
raise ValueError("Duplicate package names found in index")
return v
def find_package(self, name: str) -> Optional[Package]:
"""Find package by name"""
for package in self.packages:
if package.name == name:
return package
return None
def get_all_platforms(self) -> List[Platform]:
"""Get all platforms from all packages"""
platforms: List[Platform] = []
for package in self.packages:
platforms.extend(package.platforms)
return platforms
def get_all_tools(self) -> List[Tool]:
"""Get all tools from all packages"""
tools: List[Tool] = []
for package in self.packages:
tools.extend(package.tools)
return tools
# Enhanced Parser with Validation and Error Handling
class PackageIndexParser:
"""Enhanced parser with comprehensive validation and error handling"""
def __init__(self, timeout: int = 30):
"""Initialize parser with timeout configuration"""
self.timeout = timeout
def parse_package_index(self, json_str: str) -> PackageIndex:
"""Parse and validate package index JSON"""
try:
raw_data = json.loads(json_str)
return PackageIndex(**raw_data)
except ValidationError as e:
raise PackageParsingError(f"Invalid package index format: {e}")
except json.JSONDecodeError as e:
raise PackageParsingError(f"Invalid JSON format: {e}")
def parse_from_url(self, url: str) -> PackageIndex:
"""Fetch and parse package index from URL with validation"""
try:
print(f"Fetching package index from: {url}")
with urlopen(url, timeout=self.timeout) as response:
content = response.read()
json_str = content.decode("utf-8")
return self.parse_package_index(json_str)
except Exception as e:
raise PackageParsingError(f"Error fetching package index from {url}: {e}")
async def parse_from_url_async(self, url: str) -> PackageIndex:
"""Async version of URL parsing"""
try:
print(f"Fetching package index from: {url}")
async with httpx.AsyncClient(timeout=self.timeout) as client:
response = await client.get(url)
response.raise_for_status()
json_str = response.text
return self.parse_package_index(json_str)
except Exception as e:
raise PackageParsingError(f"Error fetching package index from {url}: {e}")
# Package Manager Configuration
class PackageManagerConfig(BaseModel):
"""Configuration for package manager with validation"""
cache_dir: Path = Field(default_factory=lambda: Path.home() / ".arduino_packages")
sources: List[HttpUrl] = Field(
default_factory=lambda: [], description="Package index URLs"
)
timeout: int = Field(
default=30, gt=0, le=300, description="Request timeout in seconds"
)
max_retries: int = Field(
default=3, ge=0, le=10, description="Maximum retry attempts"
)
verify_checksums: bool = Field(
default=True, description="Verify download checksums"
)
allow_insecure: bool = Field(default=False, description="Allow insecure downloads")
@field_validator("cache_dir")
@classmethod
def validate_cache_dir(cls, v: Path) -> Path:
"""Ensure cache directory is valid"""
if v.exists() and not v.is_dir():
raise ValueError(f"Cache path exists but is not a directory: {v}")
return v
class Config:
validate_assignment = True
schema_extra = {
"example": {
"cache_dir": "~/.arduino_packages",
"sources": [
"https://espressif.github.io/arduino-esp32/package_esp32_index.json"
],
"timeout": 30,
"max_retries": 3,
"verify_checksums": True,
"allow_insecure": False,
}
}
# Utility Functions for Enhanced Functionality
def verify_checksum(file_path: Path, expected_checksum: str) -> bool:
"""Verify file checksum against expected SHA-256 value"""
if not expected_checksum.startswith("SHA-256:"):
raise ValueError(f"Invalid checksum format: {expected_checksum}")
expected_hash = expected_checksum[8:] # Remove 'SHA-256:' prefix
sha256_hash = hashlib.sha256()
with open(file_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
sha256_hash.update(chunk)
actual_hash = sha256_hash.hexdigest()
return actual_hash.lower() == expected_hash.lower()
def extract_archive(archive_path: Path, extract_to: Path) -> bool:
"""Extract archive to specified directory with format detection"""
try:
archive_str = str(archive_path)
if archive_path.suffix == ".zip":
with zipfile.ZipFile(archive_path, "r") as zip_ref:
zip_ref.extractall(extract_to)
elif archive_str.endswith((".tar.gz", ".tgz")):
with tarfile.open(archive_path, "r:gz") as tar_ref:
tar_ref.extractall(extract_to)
elif archive_str.endswith(".tar.bz2"):
with tarfile.open(archive_path, "r:bz2") as tar_ref:
tar_ref.extractall(extract_to)
elif archive_str.endswith(".tar.xz"):
with tarfile.open(archive_path, "r:xz") as tar_ref:
tar_ref.extractall(extract_to)
else:
raise ValueError(f"Unsupported archive format: {archive_path}")
return True
except Exception as e:
print(f"Error extracting archive {archive_path}: {e}")
return False
def format_size(size_mb: float) -> str:
"""Format size in a human-readable way"""
if size_mb < 1:
return f"{size_mb * 1024:.1f} KB"
elif size_mb < 1024:
return f"{size_mb:.1f} MB"
else:
return f"{size_mb / 1024:.1f} GB"
# Display Functions with Enhanced Formatting
def display_package_info(package: Package) -> None:
"""Display package information with enhanced formatting"""
print(f"\n📦 Package: {package.name}")
print(f"👤 Maintainer: {package.maintainer}")
print(f"🌐 Website: {package.website_url}")
print(f"📧 Email: {package.email}")
print(f"📚 Help: {package.help.online}")
print(f"🛠️ Platforms: {len(package.platforms)}")
print(f"🔧 Tools: {len(package.tools)}")
# Show platform information
for i, platform in enumerate(package.platforms[:3]): # Show first 3 platforms
print(f"\n 📋 Platform {i + 1}: {platform.name} v{platform.version}")
print(f" Architecture: {platform.architecture}")
print(f" Category: {platform.category}")
print(f" Size: {format_size(platform.size_mb)}")
print(f" Archive: {platform.archive_filename}")
print(f" Checksum: {platform.checksum[:24]}...")
print(f" Help: {platform.help.online}")
print(f" Boards: {len(platform.boards)}")
# Show first 5 boards
for board in platform.boards[:5]:
print(f"{board.name}")
if len(platform.boards) > 5:
print(f" ... and {len(platform.boards) - 5} more")
print(f" Tool Dependencies: {len(platform.tool_dependencies)}")
for dep in platform.tool_dependencies[:3]: # Show first 3 dependencies
print(f"{dep.name} v{dep.version} ({dep.packager})")
if len(platform.tool_dependencies) > 3:
print(f" ... and {len(platform.tool_dependencies) - 3} more")
if len(package.platforms) > 3:
print(f"\n ... and {len(package.platforms) - 3} more platforms")
def display_validation_summary(package_index: PackageIndex) -> None:
"""Display validation summary for the package index"""
print(f"\n✅ VALIDATION SUMMARY")
print(f" 📦 Total packages: {len(package_index.packages)}")
total_platforms = sum(len(pkg.platforms) for pkg in package_index.packages)
total_tools = sum(len(pkg.tools) for pkg in package_index.packages)
total_boards = sum(
len(platform.boards)
for pkg in package_index.packages
for platform in pkg.platforms
)
print(f" 🛠️ Total platforms: {total_platforms}")
print(f" 🔧 Total tools: {total_tools}")
print(f" 💾 Total boards: {total_boards}")
# Show architectures
architectures: Set[str] = set()
for pkg in package_index.packages:
for platform in pkg.platforms:
architectures.add(platform.architecture)
print(f" 🏗️ Architectures: {', '.join(sorted(architectures))}")
# Demonstration Functions
def demo_esp32_parsing() -> Optional[PackageIndex]:
"""Demonstrate parsing ESP32 package index with enhanced validation"""
ESP32_URL = "https://espressif.github.io/arduino-esp32/package_esp32_index.json"
try:
parser = PackageIndexParser(timeout=30)
package_index = parser.parse_from_url(ESP32_URL)
print("🎉 Successfully parsed ESP32 package index with Pydantic validation!")
display_validation_summary(package_index)
# Display first package
if package_index.packages:
display_package_info(package_index.packages[0])
return package_index
except PackageParsingError as e:
print(f"❌ Package parsing error: {e}")
sys.exit(1)
except Exception as e:
print(f"❌ Unexpected error: {e}")
sys.exit(1)
def demo_model_validation() -> None:
"""Demonstrate Pydantic model validation capabilities"""
print("\n🧪 TESTING PYDANTIC MODEL VALIDATION")
# Define valid platform data
valid_platform_data: Dict[str, Any] = {
"name": "ESP32 Arduino",
"architecture": "esp32",
"version": "2.0.5",
"category": "ESP32",
"url": "https://github.com/espressif/arduino-esp32/releases/download/2.0.5/esp32-2.0.5.zip",
"archiveFileName": "esp32-2.0.5.zip",
"checksum": "SHA-256:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
"size": "50000000", # Should convert to MB
"boards": [],
"toolsDependencies": [],
"help": {"online": "https://github.com/espressif/arduino-esp32"},
}
# Test valid platform
try:
platform = Platform(**valid_platform_data)
print(f"✅ Valid platform created: {platform.name} v{platform.version}")
print(f" Size converted: {platform.size_mb:.1f} MB")
except ValidationError as e:
print(f"❌ Unexpected validation error: {e}")
# Test invalid platform
try:
invalid_platform_data = valid_platform_data.copy()
invalid_platform_data["checksum"] = "invalid-checksum-format"
platform = Platform(**invalid_platform_data)
print("❌ Should have failed validation!")
except ValidationError as e:
print(f"✅ Correctly caught invalid checksum: {e.errors()[0]['msg']}")
# Test invalid URL
try:
invalid_url_data = valid_platform_data.copy()
invalid_url_data["url"] = "not-a-valid-url"
platform = Platform(**invalid_url_data)
print("❌ Should have failed URL validation!")
except ValidationError as e:
print(f"✅ Correctly caught invalid URL: {e.errors()[0]['msg']}")
def main() -> None:
"""Main function demonstrating enhanced package index functionality"""
print("🚀 ENHANCED ARDUINO PACKAGE INDEX WITH PYDANTIC")
print("=" * 60)
try:
# Demo model validation
demo_model_validation()
# Demo ESP32 parsing
package_index = demo_esp32_parsing()
# Interactive options
try:
print(f"\n📋 AVAILABLE OPTIONS:")
print("1. Show detailed tools information")
print("2. Search for specific architecture")
print("3. Exit")
choice = input("\nEnter your choice (1-3): ").strip()
if choice == "1" and package_index and package_index.packages:
# Simple tools info display (without importing original)
pkg = package_index.packages[0]
print(f"\n🔧 TOOLS INFORMATION for {pkg.name}")
print(f"Total tools: {len(pkg.tools)}")
for tool in pkg.tools[:3]: # Show first 3 tools
print(
f"{tool.name} v{tool.version} ({len(tool.systems)} systems)"
)
if len(pkg.tools) > 3:
print(f" ... and {len(pkg.tools) - 3} more tools")
elif choice == "2" and package_index:
arch = input("Enter architecture to search for: ").strip()
platforms = [
p
for pkg in package_index.packages
for p in pkg.platforms
if p.architecture == arch
]
if platforms:
print(
f"\n🔍 Found {len(platforms)} platforms for architecture '{arch}':"
)
for platform in platforms:
print(
f"{platform.name} v{platform.version} ({format_size(platform.size_mb)})"
)
else:
print(f"❌ No platforms found for architecture '{arch}'")
else:
print("👋 Goodbye!")
except KeyboardInterrupt:
print("\n👋 Interrupted by user")
except KeyboardInterrupt:
print("\n👋 Interrupted by user")
sys.exit(1)
if __name__ == "__main__":
main()

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,937 @@
#!/usr/bin/env python3
"""
PlatformIO artifact cache implementation for speeding up CI builds.
This module implements the cache mechanism described in FEATURE_PIO_SPEEDUP.md,
providing functionality to:
1. Parse platformio.ini files with zip URLs
2. Download and cache platform/framework artifacts
3. Install artifacts via PlatformIO CLI
4. Modify platformio.ini in-place with resolved local paths
"""
import _thread
import configparser
import json
import logging
import os
import shutil
import subprocess
import tempfile
import threading
import time
import urllib.parse
import zipfile
from concurrent.futures import Future, ThreadPoolExecutor, as_completed
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
from types import TracebackType
from typing import Any, Dict, List, Optional, Tuple, Union, cast
import fasteners
import httpx
from ci.compiler.platformio_ini import PlatformIOIni
from ci.util.running_process import RunningProcess
from ci.util.url_utils import sanitize_url_for_path
@dataclass
class DownloadResult:
"""Result of a download operation."""
url: str
temp_path: Path
exception: Optional[BaseException] = None
@property
def success(self) -> bool:
"""True if download succeeded."""
return self.exception is None
@dataclass
class ArtifactProcessingResult:
"""Result of processing an artifact."""
url: str
is_framework: bool
env_section: str
resolved_path: Optional[str] = None
exception: Optional[BaseException] = None
@property
def success(self) -> bool:
"""True if processing succeeded."""
return self.exception is None and self.resolved_path is not None
@dataclass
class ZipUrlInfo:
"""Information about a zip URL found in platformio.ini."""
url: str
section_name: str
option_name: str
@dataclass
class ManifestResult:
"""Result of manifest validation and type detection."""
is_valid: bool
manifest_path: Optional[Path]
is_framework: bool
def _get_remote_file_size(url: str) -> Optional[int]:
"""Get file size from URL using HEAD request."""
try:
parsed_url = urllib.parse.urlparse(url)
if parsed_url.scheme in ("http", "https"):
with httpx.Client(follow_redirects=True) as client:
response = client.head(
url, headers={"User-Agent": "PlatformIO-Cache/1.0"}
)
response.raise_for_status()
content_length = response.headers.get("Content-Length")
if content_length:
return int(content_length)
except Exception as e:
logger.debug(f"Failed to get file size for {url}: {e}")
return None
def _format_file_size(size_bytes: Optional[int]) -> str:
"""Format file size in human-readable format."""
if size_bytes is None:
return "unknown size"
if size_bytes < 1024:
return f"{size_bytes} B"
elif size_bytes < 1024 * 1024:
return f"{size_bytes / 1024:.1f} KB"
elif size_bytes < 1024 * 1024 * 1024:
return f"{size_bytes / (1024 * 1024):.1f} MB"
else:
return f"{size_bytes / (1024 * 1024 * 1024):.1f} GB"
# Global cache to track PlatformIO installations in this session
_session_installation_cache: set[str] = set()
# Global cancellation event for handling keyboard interrupts
_global_cancel_event = threading.Event()
# Lock for thread-safe operations
_download_lock = threading.Lock()
def _get_status_file(artifact_dir: Path, cache_key: str) -> Path:
"""Get the JSON status file path for an artifact."""
# Use simple descriptive filename since artifacts are already in unique directories
return artifact_dir / "info.json"
def _read_status(status_file: Path) -> Optional[Dict[str, Any]]:
"""Read status from JSON file."""
if not status_file.exists():
return None
try:
with open(status_file, "r") as f:
return json.load(f)
except (json.JSONDecodeError, OSError):
return None
def _write_status(status_file: Path, status: Dict[str, Any]) -> None:
"""Write status to JSON file."""
try:
with open(status_file, "w") as f:
json.dump(status, f, indent=2)
except OSError as e:
logger.warning(f"Failed to write status file {status_file}: {e}")
def _is_processing_complete(status_file: Path) -> bool:
"""Check if processing is complete based on status file."""
status = _read_status(status_file)
return status is not None and status.get("status") == "complete"
def _download_with_progress(
url: str, temp_path: Path, cancel_event: threading.Event
) -> DownloadResult:
"""Download HTTP/HTTPS file with cancellation support."""
try:
# HTTP/HTTPS download using httpx with streaming
with httpx.Client(follow_redirects=True, timeout=30.0) as client:
with client.stream(
"GET", url, headers={"User-Agent": "PlatformIO-Cache/1.0"}
) as response:
response.raise_for_status()
with open(temp_path, "wb") as f:
for chunk in response.iter_bytes(chunk_size=8192):
# Check for cancellation periodically
if cancel_event.is_set():
cancelled_error = RuntimeError(
f"Download cancelled for {url}"
)
logger.warning(
f"Download cancelled for {url}",
exc_info=cancelled_error,
)
return DownloadResult(url, temp_path, cancelled_error)
f.write(chunk)
return DownloadResult(url, temp_path) # Success
except KeyboardInterrupt as e:
# Set cancel event and interrupt main thread
cancel_event.set()
_thread.interrupt_main()
logger.warning(
f"Download interrupted by KeyboardInterrupt for {url}", exc_info=e
)
return DownloadResult(url, temp_path, e)
except Exception as e:
if not cancel_event.is_set():
logger.error(f"Download failed for {url}: {e}", exc_info=e)
else:
logger.warning(f"Download failed for {url} (cancelled): {e}", exc_info=e)
return DownloadResult(url, temp_path, e)
def _copy_file_with_progress(
url: str, temp_path: Path, cancel_event: threading.Event
) -> DownloadResult:
"""Copy local file with cancellation support."""
try:
parsed_url = urllib.parse.urlparse(url)
# File URL - copy local file
logger.debug(f"Parsing file URL: {url}")
logger.debug(f"Parsed URL path: {parsed_url.path}")
# Handle both Unix and Windows file URLs
if os.name == "nt": # Windows
# On Windows, file:///C:/path becomes /C:/path, so remove leading slash
if (
parsed_url.path.startswith("/")
and len(parsed_url.path) > 3
and parsed_url.path[2] == ":"
):
source_path = Path(parsed_url.path[1:])
else:
source_path = Path(parsed_url.path)
else: # Unix-like
source_path = Path(parsed_url.path)
logger.debug(f"Resolved file path: {source_path}")
if not source_path.exists():
raise FileNotFoundError(f"Source file not found: {source_path}")
# Check for cancellation before copy
if cancel_event.is_set():
cancelled_error = RuntimeError(f"Copy cancelled for {url}")
logger.warning(f"Copy cancelled for {url}", exc_info=cancelled_error)
return DownloadResult(url, temp_path, cancelled_error)
shutil.copy2(source_path, temp_path)
return DownloadResult(url, temp_path) # Success
except KeyboardInterrupt as e:
# Set cancel event and interrupt main thread
cancel_event.set()
_thread.interrupt_main()
logger.warning(f"Copy interrupted by KeyboardInterrupt for {url}", exc_info=e)
return DownloadResult(url, temp_path, e)
except Exception as e:
if not cancel_event.is_set():
logger.error(f"Copy failed for {url}: {e}", exc_info=e)
else:
logger.warning(f"Copy failed for {url} (cancelled): {e}", exc_info=e)
return DownloadResult(url, temp_path, e)
def clear_session_cache() -> None:
"""Clear the session installation cache. Useful for testing."""
global _session_installation_cache
_session_installation_cache.clear()
logger.debug("Cleared session installation cache")
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Reduce httpx verbosity to avoid log spew from HTTP requests
logging.getLogger("httpx").setLevel(logging.WARNING)
class PlatformIOCache:
"""Enhanced cache manager for PlatformIO artifacts."""
def __init__(self, cache_dir: Path):
"""Initialize cache manager with directory structure."""
self.cache_dir = cache_dir
# Simplified structure: each artifact gets its own directory directly in cache root
# containing both the .zip and extracted/ folder
# Create directory structure
self.cache_dir.mkdir(parents=True, exist_ok=True)
def _get_cache_key(self, url: str) -> str:
"""Generate cache key from URL - sanitized for filesystem use."""
return str(sanitize_url_for_path(url))
def download_artifact(self, url: str) -> str:
"""
Download and cache an artifact from the given URL.
Returns the absolute path to the cached zip file.
"""
cache_key = self._get_cache_key(url)
# Each artifact gets its own directory
artifact_dir = self.cache_dir / cache_key
artifact_dir.mkdir(parents=True, exist_ok=True)
cached_path = artifact_dir / "artifact.zip"
# Use read-write locking for concurrent safety
# Start with write lock for downloading, can upgrade to read if cache hit
lock_path = str(artifact_dir / "artifact.lock")
rw_lock = fasteners.InterProcessReaderWriterLock(lock_path)
with rw_lock.write_lock():
if cached_path.exists():
# Check if processing was completed successfully
status_file = _get_status_file(artifact_dir, cache_key)
if not _is_processing_complete(status_file):
logger.warning(
f"Cache incomplete (no completion status), re-downloading: {url}"
)
# Remove incomplete zip file
cached_path.unlink()
# Also remove any partial extraction
extracted_dir = artifact_dir / "extracted"
if extracted_dir.exists():
shutil.rmtree(extracted_dir)
else:
print(f"Using cached artifact: {cached_path}")
# Cache hit - return the cached path (write lock will be released)
return str(cached_path)
# Check URL scheme to determine action
parsed_url = urllib.parse.urlparse(url)
if parsed_url.scheme in ("http", "https"):
# Download to temporary file first (atomic operation)
file_size = _get_remote_file_size(url)
size_str = _format_file_size(file_size)
print(f"Downloading: {url} ({size_str})")
temp_file_handle = tempfile.NamedTemporaryFile(
delete=False, suffix=".zip"
)
temp_path = Path(temp_file_handle.name)
temp_file_handle.close() # Close immediately to avoid Windows file locking issues
# Create a thread-local cancel event
thread_cancel_event = threading.Event()
# Download directly (we're already in a thread from the main pool)
download_result = _download_with_progress(
url, temp_path, thread_cancel_event
)
elif parsed_url.scheme == "file":
# File URL - copy from local file
print(f"Copying from local file: {url}")
temp_file_handle = tempfile.NamedTemporaryFile(
delete=False, suffix=".zip"
)
temp_path = Path(temp_file_handle.name)
temp_file_handle.close() # Close immediately to avoid Windows file locking issues
# Create a thread-local cancel event
thread_cancel_event = threading.Event()
# Copy directly (we're already in a thread from the main pool)
download_result = _copy_file_with_progress(
url, temp_path, thread_cancel_event
)
else:
raise ValueError(f"Unsupported URL scheme: {parsed_url.scheme}")
try:
# Check if global cancellation was requested
if _global_cancel_event.is_set():
raise KeyboardInterrupt(
"Download cancelled due to global interrupt"
)
# Process the download result
if not download_result.success:
if download_result.exception is not None:
raise download_result.exception
else:
raise RuntimeError(
f"Download failed for unknown reason: {download_result.url}"
)
else:
print(f"Download completed successfully: {download_result.url}")
# Atomic move to final location
shutil.move(str(temp_path), str(cached_path))
print(f"Successfully cached: {cached_path}")
return str(cached_path)
except Exception as e:
logger.error(f"Download failed: {e}")
if temp_path.exists():
temp_path.unlink()
raise
def _is_zip_web_url(value: str) -> bool:
"""Enhanced URL detection for zip artifacts."""
if not isinstance(value, str):
return False
parsed = urllib.parse.urlparse(value)
# Direct zip URLs
if value.endswith(".zip"):
return parsed.scheme in ("http", "https")
return False
def validate_and_detect_manifest(
content_path: Path,
) -> ManifestResult:
"""
Validate manifest files and auto-detect artifact type.
"""
# Try framework manifests first
framework_manifests = ["framework.json", "package.json"]
for manifest in framework_manifests:
manifest_path = content_path / manifest
if manifest_path.exists():
try:
with open(manifest_path, "r") as f:
json.load(f) # Validate JSON syntax
print(f"Found valid framework manifest: {manifest_path}")
return ManifestResult(
is_valid=True, manifest_path=manifest_path, is_framework=True
)
except json.JSONDecodeError as e:
logger.warning(f"Invalid JSON in {manifest_path}: {e}")
# Try platform manifest
platform_manifest = content_path / "platform.json"
if platform_manifest.exists():
try:
with open(platform_manifest, "r") as f:
json.load(f) # Validate JSON syntax
print(f"Found valid platform manifest: {platform_manifest}")
return ManifestResult(
is_valid=True, manifest_path=platform_manifest, is_framework=False
)
except json.JSONDecodeError as e:
logger.warning(f"Invalid JSON in {platform_manifest}: {e}")
return ManifestResult(is_valid=False, manifest_path=None, is_framework=False)
def get_platformio_command_path(path: Path) -> str:
"""Get path format for PlatformIO command line usage."""
import platform
resolved_path = path.resolve()
# On Windows, PlatformIO has issues with file:// URLs for local directories
# Use native Windows paths instead
if platform.system() == "Windows":
return str(resolved_path)
else:
# On Unix systems, use proper file:// URLs
posix_path = resolved_path.as_posix()
return f"file://{posix_path}"
def get_proper_file_url(path: Path) -> str:
"""Convert a path to a proper file:// URL for platformio.ini files."""
# For PlatformIO on Windows, use the Windows path directly (not file:// URL)
# This avoids issues with path parsing in PlatformIO
import platform
resolved_path = path.resolve()
if platform.system() == "Windows":
# On Windows, PlatformIO handles Windows paths directly better than file:// URLs
# Convert to Windows path format
return str(resolved_path)
else:
# On Unix systems, use proper file:// URL format
posix_path = resolved_path.as_posix()
return f"file://{posix_path}"
def unzip_and_install(
cached_zip_path: Path,
cache_manager: PlatformIOCache,
is_framework: bool,
env_section: str,
) -> bool:
"""
Enhanced unzip and install with manifest validation and cleanup.
"""
cached_zip_path_obj = cached_zip_path
# Extract to a directory alongside the zip file
artifact_dir = cached_zip_path_obj.parent
extracted_dir = artifact_dir / "extracted"
temp_unzip_dir = artifact_dir / "temp_extract"
try:
# Clean extraction directory
if temp_unzip_dir.exists():
shutil.rmtree(temp_unzip_dir)
temp_unzip_dir.mkdir(parents=True)
print(f"Extracting {cached_zip_path_obj} to {temp_unzip_dir}")
# Extract with better error handling
try:
with zipfile.ZipFile(cached_zip_path_obj, "r") as zip_ref:
# Check for zip bombs (basic protection)
total_size = sum(info.file_size for info in zip_ref.infolist())
if total_size > 500 * 1024 * 1024: # 500MB limit
raise ValueError("Archive too large, possible zip bomb")
zip_ref.extractall(temp_unzip_dir)
print("Extraction completed successfully")
except zipfile.BadZipFile:
logger.error(f"Invalid zip file: {cached_zip_path_obj}")
return False
except Exception as e:
logger.error(f"Extraction failed: {e}")
return False
# Find content directory (handle nested structures like GitHub archives)
content_items = list(temp_unzip_dir.iterdir())
if len(content_items) == 1 and content_items[0].is_dir():
# Single root directory (common with GitHub/GitLab archives) - use it
unzipped_content_path = content_items[0]
print(f"Found nested directory structure: {content_items[0].name}")
else:
# Multiple items at root - use temp dir
unzipped_content_path = temp_unzip_dir
print("Using flat directory structure")
# Validate manifest files and auto-detect type
manifest_result = validate_and_detect_manifest(unzipped_content_path)
if not manifest_result.is_valid:
logger.error(f"No valid manifest found in {unzipped_content_path}")
return False
# Move to final location
if extracted_dir.exists():
shutil.rmtree(extracted_dir)
shutil.move(unzipped_content_path, extracted_dir)
# Install via PlatformIO
return install_with_platformio(
extracted_dir, manifest_result.is_framework, env_section
)
finally:
# Cleanup temporary extraction
if temp_unzip_dir.exists():
print(f"Cleaning up extraction directory: {temp_unzip_dir}")
shutil.rmtree(temp_unzip_dir)
def install_with_platformio(
content_path: Path, is_framework: bool, env_section: str
) -> bool:
"""Install extracted content using appropriate PlatformIO command."""
content_path_obj = Path(content_path)
command_path = get_platformio_command_path(content_path_obj)
# Create a cache key based on the installation type and path
cache_key = f"{'framework' if is_framework else 'platform'}:{command_path}"
# Check if we've already installed this in this session
if cache_key in _session_installation_cache:
print(
f"Skipping installation for {env_section}: already installed in this session ({command_path})"
)
return True
if is_framework:
# Framework installation - use pkg install
command = ["pio", "pkg", "install", "--global", command_path]
else:
# Platform installation - also use pkg install (new recommended way)
command = ["pio", "pkg", "install", "--global", "--platform", command_path]
try:
print(f"Installing for {env_section}: {' '.join(command)}")
# Use RunningProcess for streaming output
process = RunningProcess(
command,
check=False, # We'll handle errors ourselves
timeout=300, # 5 minute timeout
)
# Stream output in real-time
for line in process.line_iter(timeout=300):
line_str = cast(str, line)
print(f"PIO: {line_str}")
# Wait for completion and check result
process.wait()
if process.returncode == 0:
print("PlatformIO installation successful")
# Add to session cache to avoid redundant installations
_session_installation_cache.add(cache_key)
return True
else:
logger.error(
f"PlatformIO installation failed with return code: {process.returncode}"
)
return False
except Exception as e:
if "No such file or directory" in str(e) or "not found" in str(e).lower():
logger.error("PlatformIO CLI not found. Is it installed and in PATH?")
else:
logger.error(f"PlatformIO installation failed: {e}")
logger.error(f"Command: {' '.join(command)}")
return False
def handle_zip_artifact(
zip_source: str,
cache_manager: PlatformIOCache,
env_section: str,
) -> str:
"""
Enhanced artifact handler with validation and error recovery.
Returns the resolved local path for the artifact.
"""
try:
# Download and cache the artifact
cached_zip_path = cache_manager.download_artifact(zip_source)
if not cached_zip_path or not Path(cached_zip_path).exists():
raise FileNotFoundError(f"Download failed for {zip_source}")
# Extract and get the content path
cache_key = cache_manager._get_cache_key(zip_source)
cached_zip_path_obj = Path(cached_zip_path)
# Extract to a directory alongside the zip file
artifact_dir = cached_zip_path_obj.parent
extracted_dir = artifact_dir / "extracted"
temp_unzip_dir = artifact_dir / "temp_extract"
# Check if already extracted
if extracted_dir.exists():
# Validate existing extraction and auto-detect type
manifest_result = validate_and_detect_manifest(extracted_dir)
if manifest_result.is_valid:
print(f"Using existing extraction: {extracted_dir}")
# Check for completion status using URL-based cache key (consistent with status file creation)
url_cache_key = cache_manager._get_cache_key(zip_source)
status_file = _get_status_file(artifact_dir, url_cache_key)
if _is_processing_complete(status_file):
print(
f"Skipping PlatformIO installation for {env_section}: found completion status"
)
return get_proper_file_url(extracted_dir)
# Create a session cache key for this artifact
session_cache_key = f"{'framework' if manifest_result.is_framework else 'platform'}:{get_platformio_command_path(extracted_dir)}"
# If we've already handled this exact artifact in this session, skip PlatformIO entirely
if session_cache_key in _session_installation_cache:
print(
f"Skipping PlatformIO installation for {env_section}: already processed in this session"
)
return get_proper_file_url(extracted_dir)
# Otherwise, install via PlatformIO
install_success = install_with_platformio(
extracted_dir, manifest_result.is_framework, env_section
)
# Status file will be created later using URL-based cache key
if install_success:
print(f"Successfully installed {zip_source} for {env_section}")
else:
logger.warning(
f"Installation completed with warnings for {zip_source}"
)
return get_proper_file_url(extracted_dir)
# Clean extraction directory for fresh extraction
if temp_unzip_dir.exists():
shutil.rmtree(temp_unzip_dir)
temp_unzip_dir.mkdir(parents=True)
# Extract the zip
with zipfile.ZipFile(cached_zip_path, "r") as zip_ref:
zip_ref.extractall(temp_unzip_dir)
# Find content directory (handle nested structures)
content_items = list(temp_unzip_dir.iterdir())
if len(content_items) == 1 and content_items[0].is_dir():
unzipped_content_path = content_items[0]
else:
unzipped_content_path = temp_unzip_dir
# Validate manifest files and auto-detect type
manifest_result = validate_and_detect_manifest(unzipped_content_path)
if not manifest_result.is_valid:
raise ValueError(f"No valid manifest found in {unzipped_content_path}")
# Move to final location
if extracted_dir.exists():
shutil.rmtree(extracted_dir)
shutil.move(unzipped_content_path, extracted_dir)
# Clean up temp directory
if temp_unzip_dir.exists():
shutil.rmtree(temp_unzip_dir)
# Install via PlatformIO
install_success = install_with_platformio(
extracted_dir, manifest_result.is_framework, env_section
)
# Create status file with processing results
cache_key = cache_manager._get_cache_key(zip_source)
status_file = _get_status_file(artifact_dir, cache_key)
# Get zip file size
zip_file = artifact_dir / "artifact.zip"
zip_size = zip_file.stat().st_size if zip_file.exists() else 0
status = {
"status": "complete" if install_success else "warning",
"timestamp": datetime.now().isoformat(),
"url": zip_source,
"env_section": env_section,
"extracted_dir": str(extracted_dir.relative_to(artifact_dir)),
"zip_size_bytes": zip_size,
}
_write_status(status_file, status)
if install_success:
print(f"Successfully installed {zip_source} for {env_section}")
else:
logger.warning(f"Installation completed with warnings for {zip_source}")
# Return the file URL for the extracted directory
return get_proper_file_url(extracted_dir)
except Exception as e:
import traceback
traceback.print_exc()
logger.error(f"Failed to handle artifact {zip_source}: {e}")
raise
def _process_artifact(
artifact_url: str,
env_section: str,
cache_manager: "PlatformIOCache",
) -> ArtifactProcessingResult:
"""Process a single artifact (download, extract, install)."""
try:
resolved_path = handle_zip_artifact(artifact_url, cache_manager, env_section)
return ArtifactProcessingResult(
url=artifact_url,
is_framework=False, # This will be determined during processing
env_section=env_section,
resolved_path=resolved_path,
)
except Exception as e:
logger.error(f"Failed to process {artifact_url}: {e}", exc_info=e)
return ArtifactProcessingResult(
url=artifact_url,
is_framework=False, # This will be determined during processing
env_section=env_section,
exception=e,
)
def _collect_all_zip_urls(pio_ini: PlatformIOIni) -> List[ZipUrlInfo]:
"""
Collect all zip URLs from platformio.ini in a single pass.
"""
all_urls: List[ZipUrlInfo] = []
# Collect platform URLs
for section_name, option_name, url in pio_ini.get_platform_urls():
if _is_zip_web_url(url):
print(f"Found platform zip: {url} in {section_name}")
all_urls.append(ZipUrlInfo(url, section_name, option_name))
# Collect framework URLs
for section_name, option_name, url in pio_ini.get_framework_urls():
if _is_zip_web_url(url):
print(f"Found framework zip: {url} in {section_name}")
all_urls.append(ZipUrlInfo(url, section_name, option_name))
return all_urls
def _dedupe_urls(
all_urls: List[ZipUrlInfo],
) -> Dict[str, str]:
"""
Deduplicate URLs, keeping the first occurrence of each unique URL.
Returns dict mapping url -> env_section.
"""
unique_urls: Dict[str, str] = {}
for url_info in all_urls:
if url_info.url not in unique_urls:
unique_urls[url_info.url] = url_info.section_name
return unique_urls
def _download_and_process_urls(
unique_urls: Dict[str, str], cache_manager: PlatformIOCache
) -> Dict[str, str]:
"""
Download and process all URLs concurrently.
Returns dict mapping original_url -> resolved_local_path.
Raises exception immediately if any download fails.
"""
if not unique_urls:
return {}
max_workers = min(4, len(unique_urls)) # Don't create more threads than needed
with ThreadPoolExecutor(
max_workers=max_workers, thread_name_prefix="download"
) as executor:
# Submit all download tasks
futures: List[Future[ArtifactProcessingResult]] = []
for url, env_section in unique_urls.items():
future = executor.submit(_process_artifact, url, env_section, cache_manager)
futures.append(future)
replacements: Dict[str, str] = {}
try:
for future in as_completed(futures):
try:
result = future.result()
if result.success and result.resolved_path is not None:
replacements[result.url] = result.resolved_path
print(f"✅ Resolved {result.url} -> {result.resolved_path}")
else:
logger.error(
f"❌ Failed to process {result.url}: {result.exception}"
)
# Re-raise all exceptions - no downloads should fail silently
if result.exception:
raise result.exception
except Exception as e:
logger.error(
f"Future failed with unexpected error: {e}", exc_info=e
)
raise # Re-raise unexpected exceptions
except KeyboardInterrupt:
logger.warning("Processing interrupted, cancelling remaining downloads...")
_global_cancel_event.set()
# The context manager will handle cleanup of the executor
raise
return replacements
def _replace_all_urls(
pio_ini: PlatformIOIni,
all_urls: List[ZipUrlInfo],
replacements: Dict[str, str],
) -> None:
"""
Replace all URLs in platformio.ini with their resolved local paths.
"""
for url_info in all_urls:
if url_info.url in replacements:
pio_ini.replace_url(
url_info.section_name,
url_info.option_name,
url_info.url,
replacements[url_info.url],
)
def _apply_board_specific_config(
platformio_ini_path: Path, custom_zip_cache_dir: Path
) -> None:
"""
Enhanced config parser with improved zip detection and error handling.
Modifies platformio.ini in-place with resolved local paths.
Uses concurrent downloads for better performance.
"""
cache_manager = PlatformIOCache(custom_zip_cache_dir)
try:
pio_ini = PlatformIOIni.parseFile(platformio_ini_path)
print(f"Parsed platformio.ini: {platformio_ini_path}")
except (configparser.Error, FileNotFoundError) as e:
logger.error(f"Error reading platformio.ini: {e}")
return
# Step 1: Collect all URLs from platformio.ini
all_urls = _collect_all_zip_urls(pio_ini)
if not all_urls:
print("No zip artifacts found to process")
return
# Step 2: Deduplicate URLs
unique_urls = _dedupe_urls(all_urls)
print(f"Found {len(all_urls)} total URLs, {len(unique_urls)} unique")
# Step 3: Download and process all unique URLs
replacements = _download_and_process_urls(unique_urls, cache_manager)
# Step 4: Replace all URLs in platformio.ini with resolved local paths
if replacements:
_replace_all_urls(pio_ini, all_urls, replacements)
# Write back atomically
try:
pio_ini.dump(platformio_ini_path)
print(f"Updated platformio.ini with {len(replacements)} resolved artifacts")
except Exception as e:
logger.error(f"Failed to update platformio.ini: {e}")
raise
# Public API function
def resolve_and_cache_platform_artifacts(
platformio_ini_path: Path, cache_dir: Path
) -> None:
"""
Main entry point for resolving and caching PlatformIO platform artifacts.
Args:
platformio_ini_path: Path to the platformio.ini file to process
cache_dir: Cache directory for storing artifacts
"""
print(f"Starting platform artifact resolution for {platformio_ini_path}")
print(f"Using cache directory: {cache_dir}")
_apply_board_specific_config(platformio_ini_path, cache_dir)
print("Platform artifact resolution completed")

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,983 @@
#!/usr/bin/env python3
"""
FastLED Test Build System - Python Compiler API Integration
This module provides a high-performance test compilation system built on the proven
ci.util.ci.util.clang_compiler API that delivers 8x faster build times compared to CMake.
Key Features:
- Parallel test compilation using ThreadPoolExecutor
- 90%+ cache hit rates with proven compiler settings
- Integration with build_unit.toml configuration
- Support for STUB platform testing without hardware dependencies
- GDB-compatible debug symbol generation
- Consistent performance across all platforms
"""
import hashlib
import os
import shutil
import subprocess
import sys
import tempfile
import textwrap
import time
import tomllib
from concurrent.futures import Future, as_completed
from dataclasses import dataclass, field
from pathlib import Path
from types import TracebackType
from typing import Any, Callable, Dict, List, Optional
from ci.util.paths import PROJECT_ROOT
def optimize_python_command(cmd: list[str]) -> list[str]:
"""
Optimize command list for subprocess execution in uv environment.
For python commands, we need to use 'uv run python' to ensure access to
installed packages like ziglang. Direct sys.executable bypasses uv environment.
Args:
cmd: Command list that may contain 'python' as first element
Returns:
list[str]: Optimized command with 'python' prefixed by 'uv run'
"""
if cmd and (cmd[0] == "python" or cmd[0] == "python3"):
# Use uv run python to ensure access to uv-managed packages
optimized_cmd = ["uv", "run", "python"] + cmd[1:]
return optimized_cmd
return cmd
from ci.compiler.clang_compiler import (
BuildFlags,
BuildTools,
Compiler,
CompilerOptions,
LinkOptions,
Result,
create_compiler_options_from_toml,
link_program_sync,
load_build_flags_from_toml,
)
@dataclass
class TestExecutable:
"""Represents a compiled test executable"""
name: str
executable_path: Path
test_source_path: Path
@dataclass
class CompileResult:
"""Result of test compilation operation"""
success: bool
compiled_count: int
duration: float
errors: List["CompileError"] = field(default_factory=lambda: list())
@dataclass
class CompileError:
"""Compilation error for a specific test"""
test_name: str
message: str
class FastLEDTestCompiler:
"""
Test compiler built on proven Compiler API for 8x faster builds.
This class leverages the proven ci.util.ci.util.clang_compiler.Compiler API that has already
demonstrated 4x+ performance improvements in example compilation. By using the same
proven patterns and configurations, we inherit the optimized build flags, caching,
and parallel compilation capabilities.
"""
# Class variable to store existing instance for get_existing_instance()
_existing_instance: Optional["FastLEDTestCompiler"] = None
def __init__(
self,
compiler: Compiler,
build_dir: Path,
project_root: Path,
quick_build: bool = True,
strict_mode: bool = False,
no_unity: bool = False,
):
self.compiler = compiler
self.build_dir = build_dir
self.project_root = project_root
self.quick_build = quick_build
self.strict_mode = strict_mode
self.no_unity = no_unity
self.compiled_tests: List[TestExecutable] = []
self.linking_failures: List[str] = []
self.build_flags = self._load_build_flags()
FastLEDTestCompiler._existing_instance = self
def _load_build_flags(self) -> BuildFlags:
"""Load build flags from TOML configuration"""
toml_path = self.project_root / "ci" / "build_unit.toml"
return BuildFlags.parse(toml_path, self.quick_build, self.strict_mode)
@classmethod
def create_for_unit_tests(
cls,
project_root: Path,
clean_build: bool = False,
enable_static_analysis: bool = False,
specific_test: str | None = None,
quick_build: bool = True,
strict_mode: bool = False,
no_unity: bool = False,
) -> "FastLEDTestCompiler":
"""
Create compiler configured for FastLED unit tests using TOML build flags.
This configuration uses the new TOML-based build flag system:
- ci/build_unit.toml for centralized flag configuration
- Support for build modes (quick/debug) and strict mode
- STUB platform for hardware-free testing
- Precompiled headers for faster compilation
- Parallel compilation with ThreadPoolExecutor
"""
# Set up build directory in .build/fled/unit for consistency
build_dir = project_root / ".build" / "fled" / "unit"
if clean_build and build_dir.exists():
print("###########################")
print("# CLEANING UNIT TEST BUILD DIR #")
print("###########################")
import errno
import os
import shutil
import stat
import time
def handle_remove_readonly(
func: Callable[[str], None],
path: str,
exc: tuple[type[BaseException], BaseException, TracebackType | None],
) -> None:
"""Error handler for Windows readonly files"""
if hasattr(exc[1], "errno") and exc[1].errno == errno.EACCES: # type: ignore
os.chmod(path, stat.S_IWRITE)
func(path)
else:
raise
# Windows-compatible directory removal with retry
max_retries = 3
for attempt in range(max_retries):
try:
# Use onerror parameter for compatibility across Python versions
shutil.rmtree(build_dir, onerror=handle_remove_readonly)
break
except (OSError, PermissionError) as e:
if attempt < max_retries - 1:
print(
f"Warning: Failed to remove build directory (attempt {attempt + 1}): {e}"
)
time.sleep(0.01) # Brief pause before retry
continue
else:
print(
f"Warning: Could not remove build directory after {max_retries} attempts: {e}"
)
print("Continuing with existing directory...")
break
build_dir.mkdir(parents=True, exist_ok=True)
# Additional compiler args beyond TOML
additional_compiler_args = [
f"-I{project_root}/src/platforms/stub", # STUB platform headers
]
# Load build flags using BuildFlags.parse() to get platform-specific configuration
toml_path = project_root / "ci" / "build_unit.toml"
build_flags = BuildFlags.parse(toml_path, quick_build, strict_mode)
# Load TOML config for test-specific flags that aren't in BuildFlags
with open(toml_path, "rb") as f:
config = tomllib.load(f)
# Get test-specific defines from TOML (not included in BuildFlags.parse)
test_defines = config.get("test", {}).get("defines", [])
# Create compiler args from BuildFlags (includes platform-specific flags)
compiler_args: List[str] = []
compiler_args.extend(
build_flags.compiler_flags
) # Platform-aware compiler flags
compiler_args.extend(build_flags.include_flags) # Platform-aware include flags
# Add strict mode flags if enabled (already handled by BuildFlags.parse)
# Add additional compiler args
if additional_compiler_args:
compiler_args.extend(additional_compiler_args)
# Extract defines without the "-D" prefix for CompilerOptions
defines: List[str] = []
for define in test_defines:
if define.startswith("-D"):
defines.append(define[2:]) # Remove "-D" prefix
else:
defines.append(define)
# Get tools configuration from BuildFlags (respects build_unit.toml)
# Use modern command-based approach
compiler_command = build_flags.tools.cpp_compiler
print(f"Using compiler from build_unit.toml: {compiler_command}")
# Use compiler command from TOML + flags as final compiler args
final_compiler_args = compiler_command + compiler_args
# Create compiler options with TOML-loaded flags and tools
settings = CompilerOptions(
include_path=str(project_root / "src"),
defines=defines,
compiler_args=final_compiler_args, # Use cache-aware compiler args
# Note: archiver and compiler fields are now handled via command-based approach
)
compiler = Compiler(settings, build_flags)
# Create final instance with proper compiler and flags
instance = cls(
compiler, build_dir, project_root, quick_build, strict_mode, no_unity
)
return instance
def discover_test_files(self, specific_test: str | None = None) -> List[Path]:
"""
Discover test_*.cpp files using proven patterns.
Uses the same file discovery patterns as the existing CMake system
to ensure 100% compatibility with current test discovery logic.
"""
tests_dir = Path(PROJECT_ROOT) / "tests"
test_files: List[Path] = []
# Find all test files in tests directory and subdirectories
for test_file in tests_dir.rglob("test_*.cpp"):
# Skip the doctest_main.cpp file (not a test file)
if test_file.name == "doctest_main.cpp":
continue
# Handle both "test_name" and "name" formats for compatibility
test_stem = test_file.stem
test_name = test_stem.replace("test_", "")
# Check if we should do fuzzy matching (if there's a * in the name)
if specific_test:
if "*" in specific_test:
# Convert glob pattern to regex pattern
import re
pattern = specific_test.replace("*", ".*")
if re.search(pattern, test_stem) or re.search(pattern, test_name):
test_files.append(test_file)
else:
# Exact matching - either the full name or with test_ prefix (case-insensitive)
if (
test_stem.lower() == specific_test.lower()
or test_stem.lower() == f"test_{specific_test}".lower()
or test_name.lower() == specific_test.lower()
):
test_files.append(test_file)
else:
test_files.append(test_file)
return test_files
def compile_all_tests(self, specific_test: str | None = None) -> CompileResult:
"""
Compile all tests in parallel using proven API patterns.
This method uses the same parallel compilation patterns proven in
example compilation that deliver 4x+ performance improvements.
"""
compile_start = time.time()
# If specific_test is not provided in the method call, use the one from discover_test_files
test_files = self.discover_test_files(specific_test)
if not test_files:
return CompileResult(success=True, compiled_count=0, duration=0.0)
print(f"Compiling {len(test_files)} test files using proven Python API...")
# Convert absolute path to relative for display
rel_build_dir = os.path.relpath(self.build_dir)
print(f"Build directory: {rel_build_dir}")
# Always show build configuration in unit test mode
print("\nFastLED Library Build Configuration:")
if self.no_unity:
print(" ❌ Unity build disabled - Compiling individual source files")
else:
print(" ✅ Unity build enabled - Using glob pattern:")
print(" src/**/*.cpp")
print("\nPrecompiled header status:")
if self.compiler.settings.use_pch:
print(" ✅ PCH enabled - Using precompiled headers for faster compilation")
print(" PCH content:")
print(textwrap.indent(self.compiler.generate_pch_header(), " "))
# Create PCH for faster compilation
print("\n🔧 Creating precompiled header...")
pch_success = self.compiler.create_pch_file()
if pch_success:
print(" ✅ PCH created successfully")
else:
print(" ❌ PCH creation failed - continuing with direct compilation")
else:
print(" ❌ PCH disabled - Not using precompiled headers")
print("\nCompiler flags:")
for flag in self.compiler.get_compiler_args():
print(f" {flag}")
print("\nLinker flags:")
for flag in self._get_platform_linker_args(self.build_dir / "libfastled.lib"):
print(f" {flag}")
print("")
# Print list of test files being compiled
print("Test files to compile:")
for i, test_file in enumerate(test_files, 1):
print(f" {i}. {test_file.name}")
print("")
# Submit parallel compilation jobs (proven pattern from example compilation)
future_to_test: Dict[Future[Result], Path] = {}
# Check if we're running in parallel or sequential mode
if os.environ.get("NO_PARALLEL"):
print("Starting sequential compilation of test files...")
else:
print("Starting parallel compilation of test files...")
for test_file in test_files:
# Compile to object file first (since compile_cpp_file uses -c flag)
obj_path = self.build_dir / f"{test_file.stem}.o"
# Convert absolute path to relative for display
rel_obj_path = os.path.relpath(obj_path)
print(f"Submitting compilation job for: {test_file.name} -> {rel_obj_path}")
# Show compilation command if enabled
if os.environ.get("FASTLED_TEST_SHOW_COMPILE", "").lower() in (
"1",
"true",
"yes",
):
# Convert absolute path to relative for display
rel_obj_path = os.path.relpath(obj_path)
print(f"[COMPILE] {test_file.name} -> {rel_obj_path}")
compile_future = self.compiler.compile_cpp_file(
test_file,
output_path=obj_path,
additional_flags=[
"-I",
str(self.project_root / "src"),
"-I",
str(self.project_root / "tests"),
# NOTE: Compiler flags now come from build_unit.toml
],
)
future_to_test[compile_future] = test_file
# Collect compilation results with progress reporting
compiled_objects: List[Path] = []
errors: List[CompileError] = []
completed = 0
print(f"Waiting for {len(future_to_test)} compilation jobs to complete...")
try:
for future in as_completed(future_to_test.keys()):
test_file = future_to_test[future]
result: Result = future.result()
completed += 1
if result.ok:
obj_path = self.build_dir / f"{test_file.stem}.o"
compiled_objects.append(obj_path)
print(f"[{completed}/{len(test_files)}] Compiled {test_file.name}")
else:
errors.append(
CompileError(
test_name=test_file.stem,
message=result.stderr
or result.stdout
or "Compilation failed",
)
)
print(
f"[{completed}/{len(test_files)}] FAILED {test_file.name}: {result.stderr}"
)
except KeyboardInterrupt:
print("\nKeyboard interrupt detected during compilation")
# Clean up any in-progress compilations
for future in future_to_test.keys():
future.cancel()
import _thread
_thread.interrupt_main()
raise
if errors:
duration = time.time() - compile_start
print(f"Compilation failed with {len(errors)} errors in {duration:.2f}s")
return CompileResult(
success=False, compiled_count=0, duration=duration, errors=errors
)
print(f"All {len(compiled_objects)} object files compiled successfully")
# Link each test to executable using proven linking API
self.compiled_tests = self._link_tests(compiled_objects)
# Check for linking failures and add them to errors
if hasattr(self, "linking_failures") and self.linking_failures:
for failure in self.linking_failures:
# Parse the failure string to extract test name and error
if ":" in failure:
test_name, error_msg = failure.split(":", 1)
errors.append(
CompileError(
test_name=test_name.strip(),
message=f"Linking failed: {error_msg.strip()}",
)
)
duration = time.time() - compile_start
# Success requires both compilation AND linking to succeed
success = len(errors) == 0 and len(self.compiled_tests) > 0
if success:
print(
f"SUCCESS: Compiled and linked {len(self.compiled_tests)} tests in {duration:.2f}s"
)
# List all the successful test executables
print("Test executables created:")
for i, test in enumerate(self.compiled_tests, 1):
# Convert absolute path to relative for display
rel_exe_path = os.path.relpath(test.executable_path)
print(f" {i}. {test.name} -> {rel_exe_path}")
else:
print(f"FAILED: {len(errors)} total failures (compilation + linking)")
return CompileResult(
success=success,
compiled_count=len(self.compiled_tests),
duration=duration,
errors=errors,
)
def _link_tests(self, compiled_objects: List[Path]) -> List[TestExecutable]:
"""Link each test to executable using proven linking API"""
print(f"Linking {len(compiled_objects)} test executables...")
# First, build a complete FastLED library similar to CMake approach
fastled_lib_path = self._build_fastled_library()
# Compile doctest_main.cpp once for all tests (provides main function and doctest implementation)
doctest_main_path = self.project_root / "tests" / "doctest_main.cpp"
doctest_obj_path = self.build_dir / "doctest_main.o"
if not doctest_obj_path.exists():
print("Compiling doctest_main.cpp...")
doctest_future = self.compiler.compile_cpp_file(
doctest_main_path,
output_path=doctest_obj_path,
additional_flags=["-c"], # Compile only
)
doctest_result: Result = doctest_future.result()
if not doctest_result.ok:
print(
f"ERROR: Failed to compile doctest_main.cpp: {doctest_result.stderr}"
)
return []
print(f"Compiled FastLED library and doctest_main")
compiled_tests: List[TestExecutable] = []
linking_failures: List[str] = []
cache_hits = 0
cache_misses = 0
# Link each test with the FastLED library
for obj_path in compiled_objects:
# Derive test name from object file
test_name = obj_path.stem.replace("test_", "")
exe_path = self.build_dir / f"test_{test_name}.exe"
# Define clean execution path in tests/bin/ (this is where tests will actually run from)
tests_bin_dir = self.project_root / "tests" / "bin"
tests_bin_dir.mkdir(parents=True, exist_ok=True)
clean_exe_path = tests_bin_dir / f"test_{test_name}.exe"
# Check if this test has its own main() function
test_source = self.project_root / "tests" / f"test_{test_name}.cpp"
has_own_main = self._test_has_own_main(test_source)
# Get linker args (needed for cache key calculation)
linker_args = self._get_platform_linker_args(fastled_lib_path)
# Calculate comprehensive cache key based on all linking inputs
if has_own_main:
# For standalone tests, only include test object file
cache_key = self._calculate_link_cache_key(
obj_path, fastled_lib_path, linker_args
)
else:
# For doctest tests, include both test object and doctest_main object
# Calculate hashes for both object files and combine them
test_obj_hash = self._calculate_file_hash(obj_path)
doctest_obj_hash = self._calculate_file_hash(doctest_obj_path)
combined_obj_hash = hashlib.sha256(
f"{test_obj_hash}+{doctest_obj_hash}".encode("utf-8")
).hexdigest()
# Create a temporary cache key with combined object hash
fastled_hash = self._calculate_file_hash(fastled_lib_path)
linker_hash = self._calculate_linker_args_hash(linker_args)
combined = f"fastled:{fastled_hash}|test:{combined_obj_hash}|flags:{linker_hash}"
cache_key = hashlib.sha256(combined.encode("utf-8")).hexdigest()[:16]
# Check for cached executable based on comprehensive cache key
cached_exe = self._get_cached_executable(test_name, cache_key)
if cached_exe:
print(f" {test_name}: Using cached executable (link cache hit)")
try:
shutil.copy2(cached_exe, exe_path)
# Also copy to clean execution location
shutil.copy2(cached_exe, clean_exe_path)
cache_hits += 1
test_exe = TestExecutable(
name=test_name,
executable_path=clean_exe_path, # Point to clean location for execution
test_source_path=test_source,
)
compiled_tests.append(test_exe)
continue # Skip linking, we have cached result
except Exception as e:
print(
f" {test_name}: Warning - failed to copy cached executable, will relink: {e}"
)
# Fall through to linking logic
if has_own_main:
# Link standalone test without doctest_main.o
link_options = LinkOptions(
object_files=[obj_path], # Only the test object file
output_executable=exe_path,
linker=f'"{sys.executable}" -m ziglang c++', # Use optimized Python executable for linking
linker_args=linker_args, # Use pre-calculated args for consistency
)
else:
# Link with the FastLED library instead of individual objects
link_options = LinkOptions(
object_files=[
obj_path,
doctest_obj_path,
], # Test object + doctest main
output_executable=exe_path,
linker=f'"{sys.executable}" -m ziglang c++', # Use optimized Python executable for linking
linker_args=linker_args, # Use pre-calculated args for consistency
)
# Show linking command if enabled
if os.environ.get("FASTLED_TEST_SHOW_LINK", "").lower() in (
"1",
"true",
"yes",
):
# Convert absolute path to relative for display
rel_exe_path = os.path.relpath(exe_path)
print(f"[LINK] {test_name} -> {rel_exe_path}")
print(f"Linking test: {test_name}")
# STRICT: Provide explicit BuildFlags - NO defaults allowed
link_result: Result = link_program_sync(
link_options, self.compiler.build_flags
)
if not link_result.ok:
# Create more detailed error message showing link command context
error_msg = f" {test_name}: Linking failed - Linker: {link_options.linker}, Output: {link_options.output_executable}, Objects: {len(link_options.object_files)} files"
print(error_msg)
print(f" Error details: {link_result.stderr}")
linking_failures.append(f"{test_name}: {link_result.stderr}")
continue
print(f" {test_name}: Linking successful")
cache_misses += 1
# Cache the newly linked executable for future use
self._cache_executable(test_name, cache_key, exe_path)
# Copy to clean execution location
shutil.copy2(exe_path, clean_exe_path)
test_exe = TestExecutable(
name=test_name,
executable_path=clean_exe_path,
test_source_path=test_source, # Point to clean location for execution
)
compiled_tests.append(test_exe)
if linking_failures:
print(f"ERROR: {len(linking_failures)} linking failures:")
for failure in linking_failures:
print(f" {failure}")
# Store linking failures for later reporting
self.linking_failures = linking_failures
else:
self.linking_failures = []
# Report link cache statistics
total_tests = cache_hits + cache_misses
if total_tests > 0:
hit_rate = (cache_hits / total_tests) * 100
print(
f"Link cache: {cache_hits} hits, {cache_misses} misses ({hit_rate:.1f}% hit rate) - caching by lib+test+flags"
)
print(f"Successfully linked {len(compiled_tests)} test executables")
return compiled_tests
def _test_has_own_main(self, test_source_path: Path) -> bool:
"""Check if a test file defines its own main() function"""
try:
if not test_source_path.exists():
return False
content = test_source_path.read_text(encoding="utf-8")
# Look for main function definition (simple pattern matching)
import re
# Match "int main(" allowing for whitespace and various formats
main_pattern = r"\bint\s+main\s*\("
return bool(re.search(main_pattern, content))
except Exception:
# If we can't read the file, assume it doesn't have main
return False
def _build_fastled_library(self) -> Path:
"""Build a complete FastLED static library like CMake does"""
print("Building FastLED static library...")
# Define library path
fastled_lib_path = self.build_dir / "libfastled.lib"
# If library already exists, return it (for faster rebuilds)
if fastled_lib_path.exists():
# Convert absolute path to relative for display
rel_lib_path = os.path.relpath(fastled_lib_path)
print(f"Using existing FastLED library: {rel_lib_path}")
return fastled_lib_path
# Compile essential FastLED source files for STUB platform
# User directive: Just glob ALL .cpp files in src/** - no exclusions needed
import glob
fastled_src_dir = self.project_root / "src"
# Include ALL .cpp files recursively - no platform exclusions
all_cpp_files: List[str] = glob.glob(
str(fastled_src_dir / "**" / "*.cpp"), recursive=True
)
fastled_sources: List[Path] = []
for cpp_file in all_cpp_files:
# Include ALL files - no filtering needed
fastled_sources.append(Path(cpp_file))
if self.no_unity:
print(
f"Individual compilation: Found {len(fastled_sources)} FastLED .cpp files for compilation"
)
else:
print(
f"Unity build mode: Found {len(fastled_sources)} FastLED .cpp files for compilation"
)
# Compile all FastLED source files to object files
fastled_objects: List[Path] = []
for src_file in fastled_sources:
src_path = self.project_root / src_file
# Create unique object file name including directory path to avoid collisions
# Convert path separators to underscores to create valid filename
relative_path = src_path.relative_to(self.project_root / "src")
safe_name = (
str(relative_path.with_suffix("")).replace("/", "_").replace("\\", "_")
)
obj_path = self.build_dir / f"{safe_name}_fastled.o"
if not obj_path.exists():
future = self.compiler.compile_cpp_file(
src_path,
output_path=obj_path,
additional_flags=[
"-c", # Compile only
"-DFASTLED_STUB_IMPL", # Essential for STUB platform
# NOTE: All defines and compiler flags now come from build_unit.toml
# Remove hardcoded defines - they should be in [test] section
],
)
result: Result = future.result()
if not result.ok:
print(f"ERROR: Failed to compile {src_file}: {result.stderr}")
# Continue with other files rather than failing completely
continue
fastled_objects.append(obj_path)
print(f"Creating static library from {len(fastled_objects)} object files...")
# Create static library using ar (archiver)
# First try llvm-ar since it's more likely to be available
ar_cmd: List[str] = [
"llvm-ar", # LLVM archiver tool (works with Clang)
"rcs", # Create archive with symbol table
str(fastled_lib_path),
] + [str(obj) for obj in fastled_objects]
# Create static library using ziglang ar (archiver)
print("Creating static library using ziglang ar...")
lib_cmd: List[str] = [
"python",
"-m",
"ziglang",
"ar",
"rcs", # Create archive with symbol table
str(fastled_lib_path),
] + [str(obj) for obj in fastled_objects]
try:
# Optimize command to use sys.executable instead of shell 'python' resolution
python_exe = optimize_python_command(lib_cmd)
# Use streaming to prevent buffer overflow
process = subprocess.Popen(
python_exe,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
bufsize=1,
encoding="utf-8",
errors="replace",
cwd=self.build_dir,
)
stdout_lines: list[str] = []
stderr_lines: list[str] = []
while True:
stdout_line = process.stdout.readline() if process.stdout else ""
stderr_line = process.stderr.readline() if process.stderr else ""
if stdout_line:
stdout_lines.append(stdout_line.rstrip())
if stderr_line:
stderr_lines.append(stderr_line.rstrip())
if process.poll() is not None:
remaining_stdout = process.stdout.read() if process.stdout else ""
remaining_stderr = process.stderr.read() if process.stderr else ""
if remaining_stdout:
for line in remaining_stdout.splitlines():
stdout_lines.append(line.rstrip())
if remaining_stderr:
for line in remaining_stderr.splitlines():
stderr_lines.append(line.rstrip())
break
if process.returncode != 0:
print(f"ERROR: Failed to create static library: {stderr_lines}")
return fastled_lib_path # Return path even if creation failed
# Convert absolute path to relative for display
rel_lib_path = os.path.relpath(fastled_lib_path)
print(f"Successfully created FastLED library: {rel_lib_path}")
return fastled_lib_path
except Exception as e:
print(f"ERROR: Exception during library creation: {e}")
raise
def get_test_executables(
self, specific_test: str | None = None
) -> List[TestExecutable]:
"""Get compiled test executables with name filtering"""
if specific_test:
# Check if we should do fuzzy matching (if there's a * in the name)
if "*" in specific_test:
# Convert glob pattern to regex pattern
import re
pattern = specific_test.replace("*", ".*")
return [
t
for t in self.compiled_tests
if re.search(pattern, t.name)
or re.search(pattern, f"test_{t.name}")
]
else:
# Exact matching - either the full name or with test_ prefix (case-insensitive)
return [
t
for t in self.compiled_tests
if (
t.name.lower() == specific_test.lower()
or t.name.lower() == f"test_{specific_test}".lower()
or t.name.lower() == specific_test.replace("test_", "").lower()
)
]
return self.compiled_tests
@property
def link_cache_dir(self) -> Path:
"""Get the link cache directory"""
cache_dir = self.build_dir / "link_cache"
cache_dir.mkdir(exist_ok=True)
return cache_dir
def _calculate_file_hash(self, file_path: Path) -> str:
"""Calculate SHA256 hash of a file"""
if not file_path.exists():
return "no_file"
hash_sha256 = hashlib.sha256()
with open(file_path, "rb") as f:
# Read in chunks to handle large files efficiently
for chunk in iter(lambda: f.read(4096), b""):
hash_sha256.update(chunk)
return hash_sha256.hexdigest()
def _calculate_linker_args_hash(self, linker_args: List[str]) -> str:
"""Calculate SHA256 hash of linker arguments"""
# Convert linker args to a stable string representation
args_str = "|".join(sorted(linker_args))
hash_sha256 = hashlib.sha256()
hash_sha256.update(args_str.encode("utf-8"))
return hash_sha256.hexdigest()
def _calculate_link_cache_key(
self, test_obj_path: Path, fastled_lib_path: Path, linker_args: List[str]
) -> str:
"""Calculate comprehensive cache key for linking"""
# Hash all the factors that affect linking output
fastled_hash = self._calculate_file_hash(fastled_lib_path)
test_obj_hash = self._calculate_file_hash(test_obj_path)
linker_hash = self._calculate_linker_args_hash(linker_args)
# Combine all hashes into a single cache key
combined = f"fastled:{fastled_hash}|test:{test_obj_hash}|flags:{linker_hash}"
final_hash = hashlib.sha256(combined.encode("utf-8")).hexdigest()
return final_hash[:16] # Use first 16 chars for readability
def _get_cached_executable(self, test_name: str, cache_key: str) -> Optional[Path]:
"""Check if a cached executable exists for this test and cache key"""
cached_exe = self.link_cache_dir / f"{test_name}_{cache_key}.exe"
return cached_exe if cached_exe.exists() else None
def _cache_executable(self, test_name: str, cache_key: str, exe_path: Path) -> None:
"""Cache an executable with the given cache key"""
if not exe_path.exists():
return
cached_exe = self.link_cache_dir / f"{test_name}_{cache_key}.exe"
try:
shutil.copy2(exe_path, cached_exe)
print(f" {test_name}: Cached executable for future use")
except Exception as e:
print(f" {test_name}: Warning - failed to cache executable: {e}")
def _get_platform_linker_args(self, fastled_lib_path: Path) -> List[str]:
"""Get platform-specific linker arguments using BuildFlags"""
# Use the build_flags that includes platform-specific configuration
args = self.build_flags.link_flags.copy()
# Load TOML config for test-specific and build mode flags
toml_path = self.project_root / "ci" / "build_unit.toml"
with open(toml_path, "rb") as f:
config = tomllib.load(f)
# Add test-specific linking flags if available
test_link_flags = config.get("linking", {}).get("test", {}).get("flags", [])
args.extend(test_link_flags)
# Add build mode specific flags (use test-specific debug mode)
mode = "quick" if self.quick_build else "test_debug"
mode_flags = config.get("build_modes", {}).get(mode, {}).get("link_flags", [])
args.extend(mode_flags)
# Add FastLED library
args.append(str(fastled_lib_path)) # Link against the FastLED static library
return args
@classmethod
def get_existing_instance(cls) -> Optional["FastLEDTestCompiler"]:
"""Get existing compiler instance for reuse"""
return cls._existing_instance
def check_iwyu_available() -> bool:
"""Check if include-what-you-use is available (preserving existing logic)"""
import shutil
return (
shutil.which("include-what-you-use") is not None
or shutil.which("iwyu") is not None
)
# For testing this module directly
if __name__ == "__main__":
print("Testing FastLEDTestCompiler...")
# Create test compiler
test_compiler = FastLEDTestCompiler.create_for_unit_tests(
project_root=Path(PROJECT_ROOT), clean_build=True
)
# Discover test files
test_files = test_compiler.discover_test_files()
print(f"Discovered {len(test_files)} test files")
# Test compilation of a few files
if test_files:
limited_files = test_files[:3] # Test first 3 files
print(f"Testing compilation of {len(limited_files)} test files...")
compile_result = test_compiler.compile_all_tests()
if compile_result.success:
print(
f"SUCCESS: Compiled {compile_result.compiled_count} tests in {compile_result.duration:.2f}s"
)
executables = test_compiler.get_test_executables()
for exe in executables:
print(f" Executable: {exe.name} -> {exe.executable_path}")
else:
print(f"FAILED: {len(compile_result.errors)} errors")
for error in compile_result.errors:
print(f" {error.test_name}: {error.message}")

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,171 @@
#!/usr/bin/env python3
import argparse
import os
import sys
from concurrent.futures import Future
from pathlib import Path
from typing import List, Sequence
from ci.compiler.clang_compiler import Compiler, CompilerOptions, Result
from ci.compiler.test_example_compilation import (
create_fastled_compiler,
get_fastled_core_sources,
)
def _sorted_src_files(files: Sequence[Path]) -> List[Path]:
project_root = Path.cwd()
def key_fn(p: Path) -> str:
try:
rel = p.relative_to(project_root)
except Exception:
rel = p
return rel.as_posix()
return sorted(files, key=key_fn)
def _partition(files: List[Path], chunks: int) -> List[List[Path]]:
total = len(files)
if chunks <= 0:
raise ValueError("chunks must be >= 1")
chunks = min(chunks, total) if total > 0 else 1
base = total // chunks
rem = total % chunks
partitions: List[List[Path]] = []
start = 0
for i in range(chunks):
size = base + (1 if i < rem else 0)
end = start + size
if size > 0:
partitions.append(files[start:end])
start = end
return partitions
def build_unity_chunks_and_archive(
compiler: Compiler,
chunks: int,
output_archive: Path,
unity_dir: Path,
no_parallel: bool,
) -> Path:
unity_dir.mkdir(parents=True, exist_ok=True)
# Gather and sort source files consistently with existing library logic
files = get_fastled_core_sources()
# Exclude any main-like TU
files = [p for p in files if p.name != "stub_main.cpp"]
files = _sorted_src_files(files)
if not files:
raise RuntimeError("No source files found under src/** for unity build")
partitions = _partition(files, chunks)
# Prepare per-chunk unity paths
unity_cpp_paths: List[Path] = [
unity_dir / f"unity{i + 1}.cpp" for i in range(len(partitions))
]
unity_obj_paths: List[Path] = [p.with_suffix(".o") for p in unity_cpp_paths]
# Compile chunks
futures: List[Future[Result]] = []
compile_opts = CompilerOptions(
include_path=compiler.settings.include_path,
compiler=compiler.settings.compiler,
defines=compiler.settings.defines,
std_version=compiler.settings.std_version,
compiler_args=compiler.settings.compiler_args,
use_pch=False,
additional_flags=["-c"],
parallel=not no_parallel,
)
def submit(idx: int) -> Future[Result]:
chunk_files = partitions[idx]
unity_cpp = unity_cpp_paths[idx]
return compiler.compile_unity(compile_opts, chunk_files, unity_cpp)
if no_parallel:
# Sequential execution
for i in range(len(partitions)):
result = submit(i).result()
if not result.ok:
raise RuntimeError(f"Unity chunk {i + 1} failed: {result.stderr}")
else:
futures = [submit(i) for i in range(len(partitions))]
for i, fut in enumerate(futures):
result = fut.result()
if not result.ok:
raise RuntimeError(f"Unity chunk {i + 1} failed: {result.stderr}")
# Create archive from chunk objects
from ci.compiler.clang_compiler import LibarchiveOptions
output_archive.parent.mkdir(parents=True, exist_ok=True)
archive_result = compiler.create_archive(
unity_obj_paths, output_archive, LibarchiveOptions()
).result()
if not archive_result.ok:
raise RuntimeError(f"Archive creation failed: {archive_result.stderr}")
return output_archive
def main() -> int:
parser = argparse.ArgumentParser(
description="Chunked UNITY build and archive for FastLED src/**"
)
parser.add_argument(
"--chunks", type=int, default=1, help="Number of unity chunks (1-4 typical)"
)
parser.add_argument(
"--no-parallel", action="store_true", help="Compile unity chunks sequentially"
)
parser.add_argument(
"--output",
type=str,
default=str(Path(".build/fastled/libfastled.a")),
help="Path to output archive libfastled.a",
)
parser.add_argument(
"--unity-dir",
type=str,
default=str(Path(".build/fastled/unity")),
help="Directory to place generated unity{N}.cpp/.o files",
)
parser.add_argument(
"--use-pch",
action="store_true",
help="Enable PCH (not recommended for unity builds)",
)
args = parser.parse_args()
# Anchor to project root (two levels up from this file)
here = Path(__file__).resolve()
project_root = here.parent.parent.parent
os.chdir(project_root)
try:
compiler = create_fastled_compiler(
use_pch=args.use_pch, parallel=not args.no_parallel
)
out = build_unity_chunks_and_archive(
compiler=compiler,
chunks=args.chunks,
output_archive=Path(args.output),
unity_dir=Path(args.unity_dir),
no_parallel=args.no_parallel,
)
print(f"[UNITY] Created archive: {out}")
return 0
except KeyboardInterrupt:
raise
except Exception as e:
print(f"\x1b[31m[UNITY] FAILED: {e}\x1b[0m")
return 1
if __name__ == "__main__":
sys.exit(main())