initial commit
This commit is contained in:
1
libraries/FastLED/ci/util/__init__.py
Normal file
1
libraries/FastLED/ci/util/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""FastLED CI tools and utilities - internal package."""
|
||||
426
libraries/FastLED/ci/util/bin_2_elf.py
Normal file
426
libraries/FastLED/ci/util/bin_2_elf.py
Normal file
@@ -0,0 +1,426 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Improved Binary to ELF Converter
|
||||
Handles platform-specific binary formats for ESP32, Uno, and other platforms.
|
||||
"""
|
||||
|
||||
import json
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def _run_command(command: list[str] | str, show_output: bool = False) -> str:
|
||||
"""
|
||||
Run a command using subprocess and capture the output.
|
||||
|
||||
Args:
|
||||
command (list or str): Command to run.
|
||||
show_output (bool): Print command and its output if True.
|
||||
|
||||
Returns:
|
||||
str: Standard output of the command.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If the command fails.
|
||||
"""
|
||||
if isinstance(command, str):
|
||||
command = [command]
|
||||
|
||||
if show_output:
|
||||
print(f"Running command: {' '.join(command)}")
|
||||
|
||||
result = subprocess.run(command, capture_output=True, text=True)
|
||||
if result.returncode != 0:
|
||||
if show_output:
|
||||
print(f"Command failed: {' '.join(command)}")
|
||||
print(f"Error: {result.stderr}")
|
||||
raise RuntimeError(f"Command failed: {' '.join(command)}\n{result.stderr}")
|
||||
|
||||
if show_output and result.stdout:
|
||||
print(f"Command output: {result.stdout}")
|
||||
return result.stdout
|
||||
|
||||
|
||||
def _detect_platform_from_paths(as_path: Path, ld_path: Path):
|
||||
"""
|
||||
Detect the target platform from toolchain paths.
|
||||
|
||||
Returns:
|
||||
str: Platform name (esp32, avr, arm, etc.)
|
||||
"""
|
||||
path_str = str(as_path).lower()
|
||||
|
||||
if "xtensa-esp32" in path_str or "esp32" in path_str:
|
||||
return "esp32"
|
||||
elif "avr" in path_str:
|
||||
return "avr"
|
||||
elif "arm" in path_str:
|
||||
return "arm"
|
||||
else:
|
||||
return "unknown"
|
||||
|
||||
|
||||
def _analyze_binary_structure(bin_file: Path, platform: str):
|
||||
"""
|
||||
Analyze binary structure based on platform.
|
||||
|
||||
Args:
|
||||
bin_file (Path): Path to binary file
|
||||
platform (str): Target platform
|
||||
|
||||
Returns:
|
||||
dict: Binary analysis information
|
||||
"""
|
||||
print(f"Analyzing {platform} binary: {bin_file}")
|
||||
|
||||
if not bin_file.exists():
|
||||
raise RuntimeError(f"Binary file not found: {bin_file}")
|
||||
|
||||
with open(bin_file, "rb") as f:
|
||||
data = f.read()
|
||||
|
||||
analysis: dict[str, str | int | bytes | dict[str, str | int]] = {
|
||||
"platform": platform,
|
||||
"size": len(data),
|
||||
"header": data[:32] if len(data) >= 32 else data,
|
||||
}
|
||||
|
||||
if platform == "esp32":
|
||||
# ESP32 binary format analysis
|
||||
print(f"ESP32 binary size: {len(data)} bytes")
|
||||
if len(data) >= 32:
|
||||
# ESP32 image header
|
||||
magic = data[0]
|
||||
segments = data[1]
|
||||
flash_mode = data[2]
|
||||
flash_size_freq = data[3]
|
||||
analysis["esp32"] = {
|
||||
"magic": hex(magic),
|
||||
"segments": segments,
|
||||
"flash_mode": flash_mode,
|
||||
"flash_size_freq": flash_size_freq,
|
||||
}
|
||||
print(f"ESP32 image - Magic: {hex(magic)}, Segments: {segments}")
|
||||
|
||||
elif platform == "avr":
|
||||
# AVR/Arduino binary format (Intel HEX)
|
||||
print(f"AVR binary size: {len(data)} bytes")
|
||||
if bin_file.suffix.lower() == ".hex":
|
||||
print("Intel HEX format detected")
|
||||
analysis["format"] = "intel_hex"
|
||||
else:
|
||||
analysis["format"] = "binary"
|
||||
|
||||
# Print hex dump of first 64 bytes for analysis
|
||||
print(f"\nFirst 64 bytes of {platform} binary (hex):")
|
||||
for i in range(0, min(64, len(data)), 16):
|
||||
hex_bytes = " ".join(f"{b:02x}" for b in data[i : i + 16])
|
||||
ascii_chars = "".join(
|
||||
chr(b) if 32 <= b < 127 else "." for b in data[i : i + 16]
|
||||
)
|
||||
print(f"{i:08x}: {hex_bytes:<48} {ascii_chars}")
|
||||
|
||||
return analysis
|
||||
|
||||
|
||||
def _generate_platform_linker_script(map_file: Path, platform: str) -> Path:
|
||||
"""
|
||||
Generate a platform-specific linker script.
|
||||
|
||||
Args:
|
||||
map_file (Path): Path to the map file.
|
||||
platform (str): Target platform.
|
||||
|
||||
Returns:
|
||||
Path: Path to the generated linker script.
|
||||
"""
|
||||
print(f"Generating {platform} linker script...")
|
||||
|
||||
if platform == "esp32":
|
||||
# ESP32 memory layout
|
||||
linker_script_content = """
|
||||
MEMORY
|
||||
{
|
||||
iram0_0_seg : org = 0x40080000, len = 0x20000
|
||||
dram0_0_seg : org = 0x3FFB0000, len = 0x50000
|
||||
flash_seg : org = 0x400D0020, len = 0x330000
|
||||
}
|
||||
|
||||
SECTIONS
|
||||
{
|
||||
.iram0.text : {
|
||||
*(.iram0.literal .iram0.text)
|
||||
} > iram0_0_seg
|
||||
|
||||
.flash.text : {
|
||||
*(.literal .text .literal.* .text.*)
|
||||
} > flash_seg
|
||||
|
||||
.flash.rodata : {
|
||||
*(.rodata .rodata.*)
|
||||
} > flash_seg
|
||||
|
||||
.dram0.data : {
|
||||
*(.data .data.*)
|
||||
} > dram0_0_seg
|
||||
|
||||
.dram0.bss : {
|
||||
*(.bss .bss.*)
|
||||
} > dram0_0_seg
|
||||
}
|
||||
"""
|
||||
elif platform == "avr":
|
||||
# AVR memory layout
|
||||
linker_script_content = """
|
||||
MEMORY
|
||||
{
|
||||
text (rx) : ORIGIN = 0x0000, LENGTH = 0x8000
|
||||
data (rw!x) : ORIGIN = 0x800100, LENGTH = 0x800
|
||||
eeprom (rw!x) : ORIGIN = 0x810000, LENGTH = 0x400
|
||||
}
|
||||
|
||||
SECTIONS
|
||||
{
|
||||
.text : {
|
||||
*(.text)
|
||||
*(.text.*)
|
||||
} > text
|
||||
|
||||
.data : {
|
||||
*(.data)
|
||||
*(.data.*)
|
||||
} > data
|
||||
|
||||
.bss : {
|
||||
*(.bss)
|
||||
*(.bss.*)
|
||||
} > data
|
||||
}
|
||||
"""
|
||||
else:
|
||||
# Generic linker script
|
||||
linker_script_content = """
|
||||
SECTIONS
|
||||
{
|
||||
.text 0x00000000 : {
|
||||
*(.text)
|
||||
*(.text.*)
|
||||
}
|
||||
.data : {
|
||||
*(.data)
|
||||
*(.data.*)
|
||||
}
|
||||
.bss : {
|
||||
*(.bss)
|
||||
*(.bss.*)
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
linker_script_path = map_file.with_suffix(f".{platform}.ld")
|
||||
linker_script_path.write_text(linker_script_content)
|
||||
print(f"Generated {platform} linker script at: {linker_script_path}")
|
||||
return linker_script_path
|
||||
|
||||
|
||||
def _create_platform_object_file(as_path: Path, dummy_obj_path: Path, platform: str):
|
||||
"""
|
||||
Create a platform-specific dummy object file.
|
||||
|
||||
Args:
|
||||
as_path (Path): Path to the assembler executable.
|
||||
dummy_obj_path (Path): Path to the dummy object file to be created.
|
||||
platform (str): Target platform.
|
||||
"""
|
||||
print(f"Creating {platform} dummy object file...")
|
||||
|
||||
if platform == "esp32":
|
||||
# ESP32/Xtensa assembly
|
||||
assembly_code = """
|
||||
.section .text
|
||||
.global _start
|
||||
.type _start, @function
|
||||
_start:
|
||||
nop
|
||||
j _start
|
||||
"""
|
||||
elif platform == "avr":
|
||||
# AVR assembly
|
||||
assembly_code = """
|
||||
.section .text
|
||||
.global _start
|
||||
_start:
|
||||
nop
|
||||
rjmp _start
|
||||
"""
|
||||
else:
|
||||
# Generic assembly
|
||||
assembly_code = """
|
||||
.section .text
|
||||
.global _start
|
||||
_start:
|
||||
nop
|
||||
"""
|
||||
|
||||
asm_file = dummy_obj_path.with_suffix(".s")
|
||||
asm_file.write_text(assembly_code)
|
||||
|
||||
command = [str(as_path), "-o", str(dummy_obj_path), str(asm_file)]
|
||||
print(f"Creating {platform} dummy object file: {dummy_obj_path}")
|
||||
_run_command(command, show_output=True)
|
||||
|
||||
# Clean up assembly file
|
||||
if asm_file.exists():
|
||||
asm_file.unlink()
|
||||
|
||||
|
||||
def _create_dummy_elf(
|
||||
ld_path: Path, linker_script: Path, dummy_obj: Path, output_elf: Path, platform: str
|
||||
):
|
||||
"""
|
||||
Create a dummy ELF file using the specified linker script and dummy object file.
|
||||
|
||||
Args:
|
||||
ld_path (Path): Path to the ld executable.
|
||||
linker_script (Path): Path to the linker script.
|
||||
dummy_obj (Path): Path to the dummy object file.
|
||||
output_elf (Path): Path to the output ELF file.
|
||||
platform (str): Target platform.
|
||||
"""
|
||||
print(f"Creating {platform} dummy ELF file...")
|
||||
|
||||
command = [
|
||||
str(ld_path),
|
||||
str(dummy_obj),
|
||||
"-T",
|
||||
str(linker_script),
|
||||
"-o",
|
||||
str(output_elf),
|
||||
]
|
||||
|
||||
print(f"Creating {platform} dummy ELF file: {output_elf}")
|
||||
_run_command(command, show_output=True)
|
||||
|
||||
|
||||
def _update_elf_sections(
|
||||
objcopy_path: Path, bin_file: Path, elf_file: Path, platform: str
|
||||
):
|
||||
"""
|
||||
Update the ELF file sections with binary data (platform-specific).
|
||||
|
||||
Args:
|
||||
objcopy_path (Path): Path to the objcopy executable.
|
||||
bin_file (Path): Path to the binary file.
|
||||
elf_file (Path): Path to the ELF file.
|
||||
platform (str): Target platform.
|
||||
"""
|
||||
print(f"Updating {platform} ELF sections...")
|
||||
|
||||
# Platform-specific section names
|
||||
section_mapping = {"esp32": ".flash.text", "avr": ".text", "arm": ".text"}
|
||||
|
||||
section_name = section_mapping.get(platform, ".text")
|
||||
|
||||
try:
|
||||
command = [
|
||||
str(objcopy_path),
|
||||
"--update-section",
|
||||
f"{section_name}={bin_file}",
|
||||
str(elf_file),
|
||||
]
|
||||
print(
|
||||
f"Updating {platform} ELF file '{elf_file}' section '{section_name}' with binary file '{bin_file}'"
|
||||
)
|
||||
_run_command(command, show_output=True)
|
||||
print(f"Successfully updated {section_name} section")
|
||||
except RuntimeError as e:
|
||||
print(f"Warning: Could not update {section_name} section: {e}")
|
||||
# Try alternative section names
|
||||
alternative_sections = [".text", ".data", ".rodata"]
|
||||
for alt_section in alternative_sections:
|
||||
if alt_section != section_name:
|
||||
try:
|
||||
command = [
|
||||
str(objcopy_path),
|
||||
"--update-section",
|
||||
f"{alt_section}={bin_file}",
|
||||
str(elf_file),
|
||||
]
|
||||
print(f"Trying alternative section: {alt_section}")
|
||||
_run_command(command, show_output=True)
|
||||
print(f"Successfully updated {alt_section} section")
|
||||
break
|
||||
except RuntimeError:
|
||||
continue
|
||||
|
||||
|
||||
def bin_to_elf(
|
||||
bin_file: Path,
|
||||
map_file: Path,
|
||||
as_path: Path,
|
||||
ld_path: Path,
|
||||
objcopy_path: Path,
|
||||
output_elf: Path,
|
||||
):
|
||||
"""
|
||||
Convert a binary file to ELF format with platform detection and analysis.
|
||||
|
||||
Args:
|
||||
bin_file (Path): Path to the input binary file.
|
||||
map_file (Path): Path to the map file.
|
||||
as_path (Path): Path to the assembler executable.
|
||||
ld_path (Path): Path to the linker executable.
|
||||
objcopy_path (Path): Path to the objcopy executable.
|
||||
output_elf (Path): Path to the output ELF file.
|
||||
|
||||
Returns:
|
||||
Path: Path to the generated ELF file.
|
||||
"""
|
||||
print("=" * 80)
|
||||
print("IMPROVED BINARY TO ELF CONVERTER")
|
||||
print("=" * 80)
|
||||
|
||||
# Detect platform from toolchain
|
||||
platform = _detect_platform_from_paths(as_path, ld_path)
|
||||
print(f"Detected platform: {platform}")
|
||||
|
||||
# Analyze binary structure
|
||||
binary_analysis = _analyze_binary_structure(bin_file, platform)
|
||||
|
||||
# Generate platform-specific linker script
|
||||
linker_script = _generate_platform_linker_script(map_file, platform)
|
||||
|
||||
# Create platform-specific dummy object file
|
||||
dummy_obj_path = bin_file.with_name(f"dummy_{platform}.o")
|
||||
_create_platform_object_file(as_path, dummy_obj_path, platform)
|
||||
|
||||
# Create a dummy ELF file using the generated linker script
|
||||
_create_dummy_elf(ld_path, linker_script, dummy_obj_path, output_elf, platform)
|
||||
|
||||
# Update the ELF sections with binary data
|
||||
_update_elf_sections(objcopy_path, bin_file, output_elf, platform)
|
||||
|
||||
# Clean up temporary files
|
||||
if dummy_obj_path.exists():
|
||||
dummy_obj_path.unlink()
|
||||
print(f"Cleaned up dummy object file: {dummy_obj_path}")
|
||||
|
||||
if linker_script.exists():
|
||||
linker_script.unlink()
|
||||
print(f"Cleaned up linker script: {linker_script}")
|
||||
|
||||
print(f"\n✅ Successfully created {platform} ELF file: {output_elf}")
|
||||
|
||||
# Save analysis results
|
||||
analysis_file = output_elf.with_suffix(".analysis.json")
|
||||
with open(analysis_file, "w") as f:
|
||||
json.dump(binary_analysis, f, indent=2, default=str)
|
||||
print(f"📊 Binary analysis saved to: {analysis_file}")
|
||||
|
||||
return output_elf
|
||||
|
||||
|
||||
# For backward compatibility, keep the old function name
|
||||
def _generate_linker_script(map_file: Path) -> Path:
|
||||
"""Legacy function for backward compatibility."""
|
||||
return _generate_platform_linker_script(map_file, "generic")
|
||||
480
libraries/FastLED/ci/util/build_info_analyzer.py
Normal file
480
libraries/FastLED/ci/util/build_info_analyzer.py
Normal file
@@ -0,0 +1,480 @@
|
||||
#!/usr/bin/env python3
|
||||
# pyright: reportUnknownMemberType=false
|
||||
"""
|
||||
Build Info Analyzer - Tool for extracting platform information from build_info.json files.
|
||||
|
||||
This tool provides easy access to platform-specific defines, compiler flags, toolchain paths,
|
||||
and other build configuration information generated by PlatformIO.
|
||||
|
||||
Usage:
|
||||
python build_info_analyzer.py --board uno --show-defines
|
||||
python build_info_analyzer.py --board esp32dev --show-all
|
||||
python build_info_analyzer.py --list-boards
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
from dataclasses import asdict, dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
|
||||
@dataclass
|
||||
class CompilerInfo:
|
||||
"""Information about compilers and toolchain"""
|
||||
|
||||
cc_path: str = ""
|
||||
cxx_path: str = ""
|
||||
cc_flags: List[str] = field(default_factory=lambda: list())
|
||||
cxx_flags: List[str] = field(default_factory=lambda: list())
|
||||
compiler_type: str = ""
|
||||
build_type: str = ""
|
||||
|
||||
|
||||
@dataclass
|
||||
class BuildInfo:
|
||||
"""Complete build information for a platform"""
|
||||
|
||||
board_name: str
|
||||
defines: Dict[str, str] = field(default_factory=lambda: dict())
|
||||
compiler_info: CompilerInfo = field(default_factory=CompilerInfo)
|
||||
aliases: Dict[str, Optional[str]] = field(default_factory=lambda: dict())
|
||||
|
||||
|
||||
class BuildInfoAnalyzer:
|
||||
"""Analyzer for build_info.json files generated by PlatformIO builds."""
|
||||
|
||||
def __init__(self, build_dir: str):
|
||||
"""
|
||||
Initialize the analyzer.
|
||||
|
||||
Args:
|
||||
build_dir: Directory containing platform build directories
|
||||
"""
|
||||
self.build_dir = Path(build_dir)
|
||||
|
||||
def list_available_boards(self) -> List[str]:
|
||||
"""
|
||||
List all boards that have build_info.json files.
|
||||
|
||||
Returns:
|
||||
List of board names that have been built
|
||||
"""
|
||||
boards: List[str] = []
|
||||
if not self.build_dir.exists():
|
||||
return boards
|
||||
|
||||
for item in self.build_dir.iterdir():
|
||||
if item.is_dir() and (item / "build_info.json").exists():
|
||||
boards.append(item.name)
|
||||
|
||||
return sorted(boards)
|
||||
|
||||
def get_build_info_path(self, board_name: str) -> Optional[Path]:
|
||||
"""
|
||||
Get the path to build_info.json for a specific board.
|
||||
|
||||
Args:
|
||||
board_name: Name of the board (e.g., 'uno', 'esp32dev')
|
||||
|
||||
Returns:
|
||||
Path to build_info.json file or None if not found
|
||||
"""
|
||||
build_info_path = self.build_dir / board_name / "build_info.json"
|
||||
if build_info_path.exists():
|
||||
return build_info_path
|
||||
return None
|
||||
|
||||
def load_build_info(self, board_name: str) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Load and parse build_info.json for a board.
|
||||
|
||||
Args:
|
||||
board_name: Name of the board
|
||||
|
||||
Returns:
|
||||
Parsed JSON data or None if file not found
|
||||
"""
|
||||
build_info_path = self.get_build_info_path(board_name)
|
||||
if not build_info_path:
|
||||
return None
|
||||
|
||||
try:
|
||||
with open(build_info_path, "r") as f:
|
||||
return json.load(f)
|
||||
except (json.JSONDecodeError, IOError) as e:
|
||||
print(f"Error loading build_info.json for {board_name}: {e}")
|
||||
return None
|
||||
|
||||
def create_board_key_from_build_info(
|
||||
self, data: Dict[str, Any], board_name: str
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Get the actual board key used in build_info.json.
|
||||
|
||||
Sometimes the board name differs from the directory name.
|
||||
|
||||
Args:
|
||||
data: Parsed build_info.json data
|
||||
board_name: Directory name of the board
|
||||
|
||||
Returns:
|
||||
Actual board key used in JSON or None if not found
|
||||
"""
|
||||
# Try exact match first
|
||||
if board_name in data:
|
||||
return board_name
|
||||
|
||||
# If only one key, use it
|
||||
keys = list(data.keys())
|
||||
if len(keys) == 1:
|
||||
return keys[0]
|
||||
|
||||
# Try to find a close match
|
||||
for key in keys:
|
||||
if board_name.lower() in key.lower() or key.lower() in board_name.lower():
|
||||
return key
|
||||
|
||||
return None
|
||||
|
||||
def get_platform_defines(self, board_name: str) -> Tuple[bool, List[str], str]:
|
||||
"""
|
||||
Get platform-specific preprocessor defines for a board.
|
||||
|
||||
Args:
|
||||
board_name: Name of the board
|
||||
|
||||
Returns:
|
||||
Tuple of (success, defines_list, error_message)
|
||||
"""
|
||||
data = self.load_build_info(board_name)
|
||||
if not data:
|
||||
return False, [], f"Build info not found for {board_name}"
|
||||
|
||||
board_key = self.create_board_key_from_build_info(data, board_name)
|
||||
if not board_key:
|
||||
available_keys = list(data.keys())
|
||||
return False, [], f"Board key not found. Available keys: {available_keys}"
|
||||
|
||||
board_data = data[board_key]
|
||||
defines = board_data.get("defines", [])
|
||||
|
||||
return True, defines, ""
|
||||
|
||||
def get_compiler_info(self, board_name: str) -> Tuple[bool, CompilerInfo, str]:
|
||||
"""
|
||||
Get compiler information for a board.
|
||||
|
||||
Args:
|
||||
board_name: Name of the board
|
||||
|
||||
Returns:
|
||||
Tuple of (success, compiler_info, error_message)
|
||||
"""
|
||||
data = self.load_build_info(board_name)
|
||||
if not data:
|
||||
return False, CompilerInfo(), f"Build info not found for {board_name}"
|
||||
|
||||
board_key = self.create_board_key_from_build_info(data, board_name)
|
||||
if not board_key:
|
||||
return False, CompilerInfo(), "Board key not found in build_info.json"
|
||||
|
||||
board_data = data[board_key]
|
||||
|
||||
compiler_info = CompilerInfo(
|
||||
cc_path=board_data.get("cc_path", ""),
|
||||
cxx_path=board_data.get("cxx_path", ""),
|
||||
cc_flags=board_data.get("cc_flags", []),
|
||||
cxx_flags=board_data.get("cxx_flags", []),
|
||||
compiler_type=board_data.get("compiler_type", ""),
|
||||
build_type=board_data.get("build_type", ""),
|
||||
)
|
||||
|
||||
return True, compiler_info, ""
|
||||
|
||||
def get_toolchain_aliases(
|
||||
self, board_name: str
|
||||
) -> Tuple[bool, Dict[str, str], str]:
|
||||
"""
|
||||
Get toolchain tool aliases for a board.
|
||||
|
||||
Args:
|
||||
board_name: Name of the board
|
||||
|
||||
Returns:
|
||||
Tuple of (success, aliases_dict, error_message)
|
||||
"""
|
||||
data = self.load_build_info(board_name)
|
||||
if not data:
|
||||
return False, {}, f"Build info not found for {board_name}"
|
||||
|
||||
board_key = self.create_board_key_from_build_info(data, board_name)
|
||||
if not board_key:
|
||||
return False, {}, "Board key not found in build_info.json"
|
||||
|
||||
board_data = data[board_key]
|
||||
aliases = board_data.get("aliases", {})
|
||||
|
||||
return True, aliases, ""
|
||||
|
||||
def get_all_info(self, board_name: str) -> Tuple[bool, Dict[str, Any], str]:
|
||||
"""
|
||||
Get all available information for a board.
|
||||
|
||||
Args:
|
||||
board_name: Name of the board
|
||||
|
||||
Returns:
|
||||
Tuple of (success, all_info_dict, error_message)
|
||||
"""
|
||||
data = self.load_build_info(board_name)
|
||||
if not data:
|
||||
return False, {}, f"Build info not found for {board_name}"
|
||||
|
||||
board_key = self.create_board_key_from_build_info(data, board_name)
|
||||
if not board_key:
|
||||
return False, {}, "Board key not found in build_info.json"
|
||||
|
||||
return True, data[board_key], ""
|
||||
|
||||
def compare_defines(
|
||||
self, board1: str, board2: str
|
||||
) -> Tuple[bool, Dict[str, Any], str]:
|
||||
"""
|
||||
Compare platform defines between two boards.
|
||||
|
||||
Args:
|
||||
board1: First board name
|
||||
board2: Second board name
|
||||
|
||||
Returns:
|
||||
Tuple of (success, comparison_dict, error_message)
|
||||
"""
|
||||
success1, defines1, err1 = self.get_platform_defines(board1)
|
||||
success2, defines2, err2 = self.get_platform_defines(board2)
|
||||
|
||||
if not success1:
|
||||
return False, {}, f"Error getting defines for {board1}: {err1}"
|
||||
if not success2:
|
||||
return False, {}, f"Error getting defines for {board2}: {err2}"
|
||||
|
||||
set1 = set(defines1)
|
||||
set2 = set(defines2)
|
||||
|
||||
comparison = {
|
||||
"board1": board1,
|
||||
"board2": board2,
|
||||
"board1_only": sorted(list(set1 - set2)),
|
||||
"board2_only": sorted(list(set2 - set1)),
|
||||
"common": sorted(list(set1 & set2)),
|
||||
"board1_total": len(defines1),
|
||||
"board2_total": len(defines2),
|
||||
"common_count": len(set1 & set2),
|
||||
}
|
||||
|
||||
return True, comparison, ""
|
||||
|
||||
|
||||
def print_defines(defines: List[str], board_name: str):
|
||||
"""Print platform defines in a formatted way."""
|
||||
print(f"\n📋 Platform Defines for {board_name.upper()}:")
|
||||
print("=" * 50)
|
||||
for define in defines:
|
||||
print(f" {define}")
|
||||
print(f"\nTotal: {len(defines)} defines")
|
||||
|
||||
|
||||
def print_compiler_info(compiler_info: CompilerInfo, board_name: str):
|
||||
"""Print compiler information in a formatted way."""
|
||||
print(f"\n🔧 Compiler Information for {board_name.upper()}:")
|
||||
print("=" * 50)
|
||||
print(f"Compiler Type: {compiler_info.compiler_type or 'Unknown'}")
|
||||
print(f"Build Type: {compiler_info.build_type or 'Unknown'}")
|
||||
print(f"C Compiler: {compiler_info.cc_path or 'Unknown'}")
|
||||
print(f"C++ Compiler: {compiler_info.cxx_path or 'Unknown'}")
|
||||
|
||||
if compiler_info.cc_flags:
|
||||
print(f"\nC Flags ({len(compiler_info.cc_flags)}):")
|
||||
for flag in compiler_info.cc_flags:
|
||||
print(f" {flag}")
|
||||
|
||||
if compiler_info.cxx_flags:
|
||||
print(f"\nC++ Flags ({len(compiler_info.cxx_flags)}):")
|
||||
for flag in compiler_info.cxx_flags:
|
||||
print(f" {flag}")
|
||||
|
||||
|
||||
def print_toolchain_aliases(aliases: Dict[str, str], board_name: str):
|
||||
"""Print toolchain aliases in a formatted way."""
|
||||
print(f"\n⚙️ Toolchain Aliases for {board_name.upper()}:")
|
||||
print("=" * 50)
|
||||
for tool, path in aliases.items():
|
||||
if path:
|
||||
# Show just the tool name from the path for readability
|
||||
tool_name = Path(path).name if path else "Not available"
|
||||
print(f" {tool:10}: {tool_name}")
|
||||
else:
|
||||
print(f" {tool:10}: Not available")
|
||||
|
||||
|
||||
def print_comparison(comparison: Dict[str, Any]):
|
||||
"""Print a comparison between two boards."""
|
||||
board1 = comparison["board1"]
|
||||
board2 = comparison["board2"]
|
||||
|
||||
print("\n🔍 Platform Defines Comparison:")
|
||||
print("=" * 60)
|
||||
print(f"📊 {board1.upper()} vs {board2.upper()}")
|
||||
print(f" {board1}: {comparison['board1_total']} defines")
|
||||
print(f" {board2}: {comparison['board2_total']} defines")
|
||||
print(f" Common: {comparison['common_count']} defines")
|
||||
|
||||
if comparison["board1_only"]:
|
||||
print(f"\n🔴 Only in {board1.upper()} ({len(comparison['board1_only'])}):")
|
||||
for define in comparison["board1_only"]:
|
||||
print(f" {define}")
|
||||
|
||||
if comparison["board2_only"]:
|
||||
print(f"\n🔵 Only in {board2.upper()} ({len(comparison['board2_only'])}):")
|
||||
for define in comparison["board2_only"]:
|
||||
print(f" {define}")
|
||||
|
||||
if comparison["common"]:
|
||||
print(f"\n🟢 Common Defines ({len(comparison['common'])}):")
|
||||
for define in comparison["common"]:
|
||||
print(f" {define}")
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function for command line usage."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Analyze build_info.json files to extract platform information",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
%(prog)s --list-boards # List all available boards
|
||||
%(prog)s --board uno --show-defines # Show UNO platform defines
|
||||
%(prog)s --board esp32dev --show-compiler # Show ESP32 compiler info
|
||||
%(prog)s --board teensy31 --show-all # Show all info for Teensy 3.1
|
||||
%(prog)s --compare uno esp32dev # Compare defines between boards
|
||||
""",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--build-dir",
|
||||
default=".build",
|
||||
help="Build directory containing platform subdirectories (default: .build)",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--list-boards",
|
||||
action="store_true",
|
||||
help="List all boards with build_info.json files",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--board", help="Board name to analyze (e.g., uno, esp32dev, teensy31)"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--show-defines", action="store_true", help="Show platform preprocessor defines"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--show-compiler", action="store_true", help="Show compiler information"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--show-toolchain", action="store_true", help="Show toolchain aliases"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--show-all", action="store_true", help="Show all available information"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--compare",
|
||||
nargs=2,
|
||||
metavar=("BOARD1", "BOARD2"),
|
||||
help="Compare platform defines between two boards",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--json", action="store_true", help="Output results in JSON format"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
analyzer = BuildInfoAnalyzer(args.build_dir)
|
||||
|
||||
if args.list_boards:
|
||||
boards = analyzer.list_available_boards()
|
||||
if not boards:
|
||||
print("❌ No boards with build_info.json found in build directory")
|
||||
print(f" Directory: {analyzer.build_dir}")
|
||||
print(
|
||||
" Try running a compilation first: uv run python -m ci.ci-compile uno --examples Blink"
|
||||
)
|
||||
return 1
|
||||
|
||||
print(f"📋 Available boards ({len(boards)}):")
|
||||
for board in boards:
|
||||
print(f" ✅ {board}")
|
||||
return 0
|
||||
|
||||
if args.compare:
|
||||
board1, board2 = args.compare
|
||||
success, comparison, error = analyzer.compare_defines(board1, board2)
|
||||
if not success:
|
||||
print(f"❌ Error: {error}")
|
||||
return 1
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(comparison, indent=2))
|
||||
else:
|
||||
print_comparison(comparison)
|
||||
return 0
|
||||
|
||||
if not args.board:
|
||||
print("❌ Error: --board is required (or use --list-boards or --compare)")
|
||||
return 1
|
||||
|
||||
# Handle single board analysis
|
||||
if args.show_defines or args.show_all:
|
||||
success, defines, error = analyzer.get_platform_defines(args.board)
|
||||
if not success:
|
||||
print(f"❌ Error getting defines: {error}")
|
||||
return 1
|
||||
|
||||
if args.json:
|
||||
print(json.dumps({"defines": defines}, indent=2))
|
||||
else:
|
||||
print_defines(defines, args.board)
|
||||
|
||||
if args.show_compiler or args.show_all:
|
||||
success, compiler_info, error = analyzer.get_compiler_info(args.board)
|
||||
if not success:
|
||||
print(f"❌ Error getting compiler info: {error}")
|
||||
return 1
|
||||
|
||||
if args.json:
|
||||
print(json.dumps({"compiler": asdict(compiler_info)}, indent=2))
|
||||
else:
|
||||
print_compiler_info(compiler_info, args.board)
|
||||
|
||||
if args.show_toolchain or args.show_all:
|
||||
success, aliases, error = analyzer.get_toolchain_aliases(args.board)
|
||||
if not success:
|
||||
print(f"❌ Error getting toolchain aliases: {error}")
|
||||
return 1
|
||||
|
||||
if args.json:
|
||||
print(json.dumps({"toolchain": aliases}, indent=2))
|
||||
else:
|
||||
print_toolchain_aliases(aliases, args.board)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
exit(main())
|
||||
328
libraries/FastLED/ci/util/cached_compiler.py
Normal file
328
libraries/FastLED/ci/util/cached_compiler.py
Normal file
@@ -0,0 +1,328 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Python-based cached compiler system for sccache integration.
|
||||
|
||||
This module provides Python scripts that act as cached compilers, wrapping the real
|
||||
toolchain with cache support. Unlike batch scripts, Python wrappers can:
|
||||
- Properly resolve toolchain paths from PlatformIO platform packages
|
||||
- Handle response files and complex argument parsing
|
||||
- Provide better error handling and debugging
|
||||
- Work consistently across platforms (Windows/Unix)
|
||||
"""
|
||||
|
||||
import os
|
||||
import platform
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
|
||||
def find_toolchain_compiler(
|
||||
compiler_name: str, platform_packages_paths: List[str]
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Find the real compiler binary in PlatformIO platform packages.
|
||||
|
||||
Args:
|
||||
compiler_name: Name of compiler (e.g., 'arm-none-eabi-gcc', 'gcc', 'clang')
|
||||
platform_packages_paths: List of paths to search for toolchain
|
||||
|
||||
Returns:
|
||||
Absolute path to compiler binary if found, None otherwise
|
||||
"""
|
||||
# First check if compiler is already an absolute path
|
||||
if Path(compiler_name).is_absolute() and Path(compiler_name).exists():
|
||||
return str(Path(compiler_name).resolve())
|
||||
|
||||
# Check if compiler is in PATH
|
||||
path_compiler = shutil.which(compiler_name)
|
||||
if path_compiler:
|
||||
return str(Path(path_compiler).resolve())
|
||||
|
||||
# Search in platform packages directories
|
||||
for package_path in platform_packages_paths:
|
||||
package_dir = Path(package_path)
|
||||
if not package_dir.exists():
|
||||
continue
|
||||
|
||||
# Search for compiler in common toolchain subdirectories
|
||||
search_patterns = [
|
||||
f"bin/{compiler_name}",
|
||||
f"bin/{compiler_name}.exe",
|
||||
f"**/bin/{compiler_name}",
|
||||
f"**/bin/{compiler_name}.exe",
|
||||
]
|
||||
|
||||
for pattern in search_patterns:
|
||||
compiler_candidates = list(package_dir.glob(pattern))
|
||||
for candidate in compiler_candidates:
|
||||
if candidate.is_file():
|
||||
return str(candidate.resolve())
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def create_cached_compiler_script(
|
||||
compiler_name: str,
|
||||
cache_executable: str,
|
||||
real_compiler_path: str,
|
||||
output_dir: Path,
|
||||
debug: bool = False,
|
||||
) -> Path:
|
||||
"""
|
||||
Create a Python script that acts as a cached compiler.
|
||||
|
||||
Args:
|
||||
compiler_name: Name for the cached compiler (e.g., 'gcc', 'g++')
|
||||
cache_executable: Path to cache tool (sccache, ccache, etc.)
|
||||
real_compiler_path: Path to the real compiler to wrap
|
||||
output_dir: Directory to create the cached compiler script
|
||||
debug: Enable debug output
|
||||
|
||||
Returns:
|
||||
Path to the created cached compiler script
|
||||
"""
|
||||
script_base = f"cached_{compiler_name.replace('-', '_')}"
|
||||
script_py = output_dir / f"{script_base}.py"
|
||||
|
||||
script_content = f'''#!/usr/bin/env python3
|
||||
"""
|
||||
Cached compiler wrapper for {compiler_name}
|
||||
Generated automatically for sccache integration.
|
||||
|
||||
This script acts as a cached compiler that wraps the real {compiler_name}
|
||||
with cache support. It resolves the real compiler path and forwards
|
||||
all arguments to the cache tool.
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Configuration (set at generation time)
|
||||
CACHE_EXECUTABLE = r"{cache_executable}"
|
||||
REAL_COMPILER_PATH = r"{real_compiler_path}"
|
||||
DEBUG = {debug}
|
||||
|
||||
def debug_print(msg: str) -> None:
|
||||
"""Print debug message if debug mode is enabled."""
|
||||
if DEBUG:
|
||||
print(f"CACHED_COMPILER[{compiler_name}]: {{msg}}", file=sys.stderr)
|
||||
|
||||
def main() -> int:
|
||||
"""Main entry point for cached compiler."""
|
||||
try:
|
||||
# Verify real compiler exists
|
||||
if not Path(REAL_COMPILER_PATH).exists():
|
||||
print(f"ERROR: Real compiler not found: {{REAL_COMPILER_PATH}}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
# Verify cache executable exists
|
||||
cache_path = CACHE_EXECUTABLE
|
||||
if not Path(cache_path).exists() and not shutil.which(cache_path.split()[0]):
|
||||
print(f"ERROR: Cache executable not found: {{cache_path}}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
# Build command: cache_tool real_compiler args...
|
||||
if " " in cache_path:
|
||||
# Handle "python xcache.py" style commands
|
||||
cache_parts = cache_path.split()
|
||||
command = cache_parts + [REAL_COMPILER_PATH] + sys.argv[1:]
|
||||
else:
|
||||
# Handle simple "sccache" style commands
|
||||
command = [cache_path, REAL_COMPILER_PATH] + sys.argv[1:]
|
||||
|
||||
debug_print(f"Executing: {{' '.join(command)}}")
|
||||
|
||||
# Execute the cache tool with real compiler and arguments
|
||||
result = subprocess.run(command, cwd=os.getcwd())
|
||||
return result.returncode
|
||||
except KeyboardInterrupt:
|
||||
print(f"Keyboard interrupt in cached compiler {{compiler_name}}", file=sys.stderr)
|
||||
return 1
|
||||
except Exception as e:
|
||||
print(f"ERROR in cached compiler {{compiler_name}}: {{e}}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
'''
|
||||
|
||||
# Write the script
|
||||
script_py.write_text(script_content, encoding="utf-8")
|
||||
|
||||
# Detect Windows reliably, including MSYS/Cygwin Git-Bash environments where
|
||||
# os.name may report 'posix'. In those cases we still need a .cmd shim so
|
||||
# that response files ("@file.tmp") are not misinterpreted by 'python'.
|
||||
is_windows = (
|
||||
os.name == "nt"
|
||||
or sys.platform.startswith("win")
|
||||
or sys.platform.startswith("cygwin")
|
||||
or sys.platform.startswith("msys")
|
||||
or platform.system().lower().startswith(("windows", "msys", "cygwin"))
|
||||
)
|
||||
|
||||
# Make executable script on Unix-like systems
|
||||
if not is_windows:
|
||||
script_py.chmod(script_py.stat().st_mode | 0o755)
|
||||
return script_py
|
||||
|
||||
# On Windows, create a .cmd shim so our wrapper is the program token
|
||||
script_cmd = output_dir / f"{script_base}.cmd"
|
||||
# Use system python; PlatformIO environment provides the right interpreter
|
||||
cmd_content = f'@echo off\r\npython "{script_py}" %*\r\nexit /b %ERRORLEVEL%\r\n'
|
||||
script_cmd.write_text(cmd_content, encoding="utf-8")
|
||||
return script_cmd
|
||||
|
||||
|
||||
def create_cached_toolchain(
|
||||
toolchain_info: Dict[str, str],
|
||||
cache_config: Dict[str, str],
|
||||
platform_packages_paths: List[str],
|
||||
output_dir: Path,
|
||||
debug: bool = False,
|
||||
) -> Dict[str, str]:
|
||||
"""
|
||||
Create a complete set of cached compiler scripts for a toolchain.
|
||||
|
||||
Args:
|
||||
toolchain_info: Mapping of tool names to real paths/names
|
||||
cache_config: Cache configuration (CACHE_EXECUTABLE, etc.)
|
||||
platform_packages_paths: Paths to search for real compilers
|
||||
output_dir: Directory to create cached scripts
|
||||
debug: Enable debug output
|
||||
|
||||
Returns:
|
||||
Mapping of tool names to cached script paths
|
||||
"""
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
cache_executable = cache_config.get("CACHE_EXECUTABLE", "sccache")
|
||||
cached_tools: Dict[str, str] = {}
|
||||
|
||||
# Standard compiler tools that should be wrapped with cache
|
||||
cacheable_tools = {"CC", "CXX", "gcc", "g++", "clang", "clang++"}
|
||||
|
||||
for tool_name, tool_path_or_name in toolchain_info.items():
|
||||
# Only wrap cacheable tools
|
||||
tool_base = Path(tool_path_or_name).name if tool_path_or_name else tool_name
|
||||
if not any(cacheable in tool_base for cacheable in cacheable_tools):
|
||||
if debug:
|
||||
print(f"Skipping non-cacheable tool: {tool_name} = {tool_path_or_name}")
|
||||
continue
|
||||
|
||||
# Find the real compiler
|
||||
real_compiler_path = find_toolchain_compiler(
|
||||
tool_path_or_name, platform_packages_paths
|
||||
)
|
||||
if not real_compiler_path:
|
||||
if debug:
|
||||
print(
|
||||
f"WARNING: Could not find real compiler for {tool_name} = {tool_path_or_name}"
|
||||
)
|
||||
continue
|
||||
|
||||
# Create cached compiler script
|
||||
cached_script = create_cached_compiler_script(
|
||||
compiler_name=tool_name,
|
||||
cache_executable=cache_executable,
|
||||
real_compiler_path=real_compiler_path,
|
||||
output_dir=output_dir,
|
||||
debug=debug,
|
||||
)
|
||||
|
||||
# Use returned script path directly (Windows: .cmd shim; Unix: executable .py)
|
||||
cached_tools[tool_name] = str(cached_script)
|
||||
|
||||
if debug:
|
||||
print(
|
||||
f"Created cached {tool_name}: {cached_tools[tool_name]} -> {real_compiler_path}"
|
||||
)
|
||||
|
||||
return cached_tools
|
||||
|
||||
|
||||
def get_platform_packages_paths() -> List[str]:
|
||||
"""
|
||||
Get list of platform package paths from PlatformIO.
|
||||
|
||||
Returns:
|
||||
List of paths where platform packages are installed
|
||||
"""
|
||||
paths: List[str] = []
|
||||
|
||||
# Try to get PlatformIO home directory
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["pio", "system", "info", "--json"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=30,
|
||||
)
|
||||
if result.returncode == 0:
|
||||
import json
|
||||
|
||||
info = json.loads(result.stdout)
|
||||
platformio_home = info.get("platformio_home_dir")
|
||||
if platformio_home:
|
||||
packages_dir = Path(platformio_home) / "packages"
|
||||
if packages_dir.exists():
|
||||
# Add all package directories
|
||||
for package_dir in packages_dir.iterdir():
|
||||
if package_dir.is_dir():
|
||||
paths.append(str(package_dir))
|
||||
except KeyboardInterrupt:
|
||||
print(f"Keyboard interrupt in get_platform_packages_paths")
|
||||
import sys
|
||||
|
||||
sys.exit(1)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Fallback: common PlatformIO package locations
|
||||
common_locations = [
|
||||
Path.home() / ".platformio" / "packages",
|
||||
Path.home() / ".fastled" / "packages",
|
||||
Path("C:/Users") / os.environ.get("USERNAME", "") / ".platformio" / "packages",
|
||||
Path("C:/Users") / os.environ.get("USERNAME", "") / ".fastled" / "packages",
|
||||
]
|
||||
|
||||
for location in common_locations:
|
||||
if location.exists():
|
||||
for package_dir in location.iterdir():
|
||||
if package_dir.is_dir():
|
||||
paths.append(str(package_dir))
|
||||
|
||||
return paths
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Test the cached compiler system
|
||||
print("Testing cached compiler system...")
|
||||
|
||||
# Get platform packages
|
||||
packages = get_platform_packages_paths()
|
||||
print(f"Found {len(packages)} platform packages")
|
||||
|
||||
# Test finding a common compiler
|
||||
test_compiler = find_toolchain_compiler("gcc", packages)
|
||||
print(f"Found gcc at: {test_compiler}")
|
||||
|
||||
# Create test cached compiler
|
||||
test_dir = Path(tempfile.mkdtemp())
|
||||
try:
|
||||
cached_tools = create_cached_toolchain(
|
||||
toolchain_info={"CC": "gcc", "CXX": "g++"},
|
||||
cache_config={"CACHE_EXECUTABLE": "sccache"},
|
||||
platform_packages_paths=packages,
|
||||
output_dir=test_dir,
|
||||
debug=True,
|
||||
)
|
||||
print(f"Created cached tools: {cached_tools}")
|
||||
finally:
|
||||
shutil.rmtree(test_dir, ignore_errors=True)
|
||||
124
libraries/FastLED/ci/util/ccache_config.py
Normal file
124
libraries/FastLED/ci/util/ccache_config.py
Normal file
@@ -0,0 +1,124 @@
|
||||
"""Configure CCACHE for PlatformIO builds."""
|
||||
|
||||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import Any, Protocol
|
||||
|
||||
|
||||
# ruff: noqa: F821
|
||||
# pyright: reportUndefinedVariable=false
|
||||
Import("env") # type: ignore # Import is provided by PlatformIO
|
||||
|
||||
|
||||
class PlatformIOEnv(Protocol):
|
||||
"""Type information for PlatformIO environment."""
|
||||
|
||||
def get(self, key: str, default: str | None = None) -> str | None:
|
||||
"""Get a value from the environment."""
|
||||
...
|
||||
|
||||
def Replace(self, **kwargs: Any) -> None:
|
||||
"""Replace environment variables."""
|
||||
...
|
||||
|
||||
|
||||
def is_ccache_available() -> bool:
|
||||
"""Check if ccache is available in the system."""
|
||||
try:
|
||||
subprocess.run(["ccache", "--version"], capture_output=True, check=True)
|
||||
return True
|
||||
except (subprocess.CalledProcessError, FileNotFoundError):
|
||||
return False
|
||||
|
||||
|
||||
def get_ccache_path() -> str | None:
|
||||
"""Get the full path to ccache executable."""
|
||||
if platform.system() == "Windows":
|
||||
# On Windows, look in chocolatey's bin directory
|
||||
ccache_paths = [
|
||||
"C:\\ProgramData\\chocolatey\\bin\\ccache.exe",
|
||||
os.path.expanduser("~\\scoop\\shims\\ccache.exe"),
|
||||
]
|
||||
for path in ccache_paths:
|
||||
if os.path.exists(path):
|
||||
return path
|
||||
else:
|
||||
# On Unix-like systems, use which to find ccache
|
||||
try:
|
||||
return subprocess.check_output(["which", "ccache"]).decode().strip()
|
||||
except subprocess.CalledProcessError:
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
def configure_ccache(env: PlatformIOEnv) -> None: # type: ignore # env is provided by PlatformIO
|
||||
"""Configure CCACHE for the build environment."""
|
||||
if not is_ccache_available():
|
||||
print("CCACHE is not available. Skipping CCACHE configuration.")
|
||||
return
|
||||
|
||||
ccache_path = get_ccache_path()
|
||||
if not ccache_path:
|
||||
print("Could not find CCACHE executable. Skipping CCACHE configuration.")
|
||||
return
|
||||
|
||||
print(f"Found CCACHE at: {ccache_path}")
|
||||
|
||||
# Set up CCACHE environment variables if not already set
|
||||
if "CCACHE_DIR" not in os.environ:
|
||||
project_dir = env.get("PROJECT_DIR")
|
||||
if project_dir is None:
|
||||
project_dir = os.getcwd()
|
||||
|
||||
# Use board-specific ccache directory if PIOENV (board environment) is available
|
||||
board_name = env.get("PIOENV")
|
||||
if board_name:
|
||||
ccache_dir = os.path.join(project_dir, ".ccache", board_name)
|
||||
else:
|
||||
ccache_dir = os.path.join(project_dir, ".ccache", "default")
|
||||
|
||||
os.environ["CCACHE_DIR"] = ccache_dir
|
||||
Path(ccache_dir).mkdir(parents=True, exist_ok=True)
|
||||
print(f"Using board-specific CCACHE directory: {ccache_dir}")
|
||||
|
||||
# Configure CCACHE for this build
|
||||
project_dir = env.get("PROJECT_DIR")
|
||||
if project_dir is None:
|
||||
project_dir = os.getcwd()
|
||||
os.environ["CCACHE_BASEDIR"] = project_dir
|
||||
os.environ["CCACHE_COMPRESS"] = "true"
|
||||
os.environ["CCACHE_COMPRESSLEVEL"] = "6"
|
||||
os.environ["CCACHE_MAXSIZE"] = "400M"
|
||||
|
||||
# Wrap compiler commands with ccache
|
||||
# STRICT: CC and CXX must be explicitly set - NO fallbacks allowed
|
||||
original_cc = env.get("CC")
|
||||
if not original_cc:
|
||||
raise RuntimeError(
|
||||
"CRITICAL: CC environment variable is required but not set. "
|
||||
"Please set CC to the C compiler path (e.g., gcc, clang)."
|
||||
)
|
||||
original_cxx = env.get("CXX")
|
||||
if not original_cxx:
|
||||
raise RuntimeError(
|
||||
"CRITICAL: CXX environment variable is required but not set. "
|
||||
"Please set CXX to the C++ compiler path (e.g., g++, clang++)."
|
||||
)
|
||||
|
||||
# Don't wrap if already wrapped
|
||||
if original_cc is not None and "ccache" not in original_cc:
|
||||
env.Replace(
|
||||
CC=f"{ccache_path} {original_cc}",
|
||||
CXX=f"{ccache_path} {original_cxx}",
|
||||
)
|
||||
print(f"Wrapped CC: {env.get('CC')}")
|
||||
print(f"Wrapped CXX: {env.get('CXX')}")
|
||||
|
||||
# Show CCACHE stats
|
||||
subprocess.run([ccache_path, "--show-stats"], check=False)
|
||||
|
||||
|
||||
# ruff: noqa: F821
|
||||
configure_ccache(env) # type: ignore # env is provided by PlatformIO
|
||||
217
libraries/FastLED/ci/util/check_files.py
Normal file
217
libraries/FastLED/ci/util/check_files.py
Normal file
@@ -0,0 +1,217 @@
|
||||
# pyright: reportUnknownMemberType=false
|
||||
import os
|
||||
from abc import ABC, abstractmethod
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from ci.util.paths import PROJECT_ROOT
|
||||
|
||||
|
||||
SRC_ROOT = PROJECT_ROOT / "src"
|
||||
|
||||
NUM_WORKERS = 1 if os.environ.get("NO_PARALLEL") else (os.cpu_count() or 1) * 4
|
||||
|
||||
EXCLUDED_FILES = [
|
||||
"stub_main.cpp",
|
||||
]
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileContent:
|
||||
"""Container for file content and metadata."""
|
||||
|
||||
path: str
|
||||
content: str
|
||||
lines: List[str]
|
||||
|
||||
def __post_init__(self):
|
||||
if not self.lines:
|
||||
self.lines = self.content.splitlines()
|
||||
|
||||
|
||||
class FileContentChecker(ABC):
|
||||
"""Abstract base class for checking file content."""
|
||||
|
||||
@abstractmethod
|
||||
def should_process_file(self, file_path: str) -> bool:
|
||||
"""Predicate to determine if a file should be processed.
|
||||
|
||||
Args:
|
||||
file_path: Path to the file to check
|
||||
|
||||
Returns:
|
||||
True if the file should be processed, False otherwise
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def check_file_content(self, file_content: FileContent) -> List[str]:
|
||||
"""Check the file content and return any issues found.
|
||||
|
||||
Args:
|
||||
file_content: FileContent object containing path, content, and lines
|
||||
|
||||
Returns:
|
||||
List of error messages, empty if no issues found
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class MultiCheckerFileProcessor:
|
||||
"""Processor that can run multiple checkers on files."""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def process_files_with_checkers(
|
||||
self, file_paths: List[str], checkers: List[FileContentChecker]
|
||||
) -> Dict[str, List[str]]:
|
||||
"""Process files with multiple checkers.
|
||||
|
||||
Args:
|
||||
file_paths: List of file paths to process
|
||||
checkers: List of checker instances to run on the files
|
||||
|
||||
Returns:
|
||||
Dictionary mapping checker class name to list of issues found
|
||||
"""
|
||||
# Initialize results dictionary for each checker
|
||||
results: Dict[str, List[str]] = {}
|
||||
for checker in checkers:
|
||||
checker_name = checker.__class__.__name__
|
||||
results[checker_name] = []
|
||||
|
||||
# Process each file
|
||||
for file_path in file_paths:
|
||||
# Check if any checker wants to process this file
|
||||
interested_checkers = [
|
||||
checker
|
||||
for checker in checkers
|
||||
if checker.should_process_file(file_path)
|
||||
]
|
||||
|
||||
# If any checker is interested, read the file once
|
||||
if interested_checkers:
|
||||
try:
|
||||
with open(file_path, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
|
||||
# Create FileContent object with lines split
|
||||
file_content = FileContent(
|
||||
path=file_path, content=content, lines=content.splitlines()
|
||||
)
|
||||
|
||||
# Pass the file content to all interested checkers
|
||||
for checker in interested_checkers:
|
||||
checker_name = checker.__class__.__name__
|
||||
issues = checker.check_file_content(file_content)
|
||||
results[checker_name].extend(issues)
|
||||
|
||||
except Exception as e:
|
||||
# Add error to all interested checkers
|
||||
error_msg = f"Error reading file {file_path}: {str(e)}"
|
||||
for checker in interested_checkers:
|
||||
checker_name = checker.__class__.__name__
|
||||
results[checker_name].append(error_msg)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
# Legacy compatibility classes
|
||||
class FileProcessorCallback(FileContentChecker):
|
||||
"""Legacy compatibility wrapper - delegates to FileContentChecker methods."""
|
||||
|
||||
def check_file_content_legacy(self, file_path: str, content: str) -> List[str]:
|
||||
"""Legacy method signature for backward compatibility."""
|
||||
file_content = FileContent(path=file_path, content=content, lines=[])
|
||||
return self.check_file_content(file_content)
|
||||
|
||||
|
||||
class GenericFileSearcher:
|
||||
"""Generic file searcher that processes files using a callback pattern."""
|
||||
|
||||
def __init__(self, max_workers: Optional[int] = None):
|
||||
self.max_workers = max_workers or NUM_WORKERS
|
||||
|
||||
def search_directory(
|
||||
self, start_dir: str, callback: FileProcessorCallback
|
||||
) -> List[str]:
|
||||
"""Search a directory and process files using the provided callback.
|
||||
|
||||
Args:
|
||||
start_dir: Directory to start searching from
|
||||
callback: Callback class to handle file processing
|
||||
|
||||
Returns:
|
||||
List of all issues found across all files
|
||||
"""
|
||||
files_to_check: List[str] = []
|
||||
|
||||
# Collect all files that should be processed
|
||||
for root, _, files in os.walk(start_dir):
|
||||
for file in files:
|
||||
file_path = os.path.join(root, file)
|
||||
if callback.should_process_file(file_path):
|
||||
files_to_check.append(file_path)
|
||||
|
||||
# Process files in parallel
|
||||
all_issues: List[str] = []
|
||||
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
|
||||
futures = [
|
||||
executor.submit(self._process_single_file, file_path, callback)
|
||||
for file_path in files_to_check
|
||||
]
|
||||
for future in futures:
|
||||
all_issues.extend(future.result())
|
||||
|
||||
return all_issues
|
||||
|
||||
def _process_single_file(
|
||||
self, file_path: str, callback: FileProcessorCallback
|
||||
) -> List[str]:
|
||||
"""Process a single file using the callback.
|
||||
|
||||
Args:
|
||||
file_path: Path to the file to process
|
||||
callback: Callback to use for processing
|
||||
|
||||
Returns:
|
||||
List of issues found in this file
|
||||
"""
|
||||
try:
|
||||
with open(file_path, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
file_content = FileContent(path=file_path, content=content, lines=[])
|
||||
return callback.check_file_content(file_content)
|
||||
except Exception as e:
|
||||
return [f"Error processing file {file_path}: {str(e)}"]
|
||||
|
||||
|
||||
def collect_files_to_check(
|
||||
test_directories: List[str], extensions: Optional[List[str]] = None
|
||||
) -> List[str]:
|
||||
"""Collect all files to check from the given directories."""
|
||||
if extensions is None:
|
||||
extensions = [".cpp", ".h", ".hpp"]
|
||||
|
||||
files_to_check: List[str] = []
|
||||
|
||||
# Search each directory
|
||||
for directory in test_directories:
|
||||
if os.path.exists(directory):
|
||||
for root, _, files in os.walk(directory):
|
||||
for file in files:
|
||||
if any(file.endswith(ext) for ext in extensions):
|
||||
file_path = os.path.join(root, file)
|
||||
files_to_check.append(file_path)
|
||||
|
||||
# Also check the main src directory files (not subdirectories)
|
||||
for file in os.listdir(SRC_ROOT):
|
||||
file_path = os.path.join(SRC_ROOT, file)
|
||||
if os.path.isfile(file_path) and any(
|
||||
file_path.endswith(ext) for ext in extensions
|
||||
):
|
||||
files_to_check.append(file_path)
|
||||
|
||||
return files_to_check
|
||||
477
libraries/FastLED/ci/util/check_implementation_files.py.disabled
Normal file
477
libraries/FastLED/ci/util/check_implementation_files.py.disabled
Normal file
@@ -0,0 +1,477 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Implementation Files Checker
|
||||
Scans src/fl/ and src/fx/ directories for *.hpp and *.cpp.hpp files.
|
||||
Provides statistics and can verify inclusion in the all-source build.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Set
|
||||
|
||||
|
||||
# Get project root directory
|
||||
PROJECT_ROOT = Path(__file__).parent.parent.parent
|
||||
SRC_ROOT = PROJECT_ROOT / "src"
|
||||
FL_DIR = SRC_ROOT / "fl"
|
||||
FX_DIR = SRC_ROOT / "fx"
|
||||
ALL_SOURCE_BUILD_FILE = SRC_ROOT / "fastled_compile.hpp.cpp"
|
||||
|
||||
# Hierarchical compile files
|
||||
HIERARCHICAL_FILES = [
|
||||
SRC_ROOT / "fl" / "fl_compile.hpp",
|
||||
SRC_ROOT / "fx" / "fx_compile.hpp",
|
||||
SRC_ROOT / "sensors" / "sensors_compile.hpp",
|
||||
SRC_ROOT / "platforms" / "platforms_compile.hpp",
|
||||
SRC_ROOT / "third_party" / "third_party_compile.hpp",
|
||||
SRC_ROOT / "src_compile.hpp",
|
||||
]
|
||||
|
||||
# Detect if running in CI/test environment for ASCII-only output
|
||||
USE_ASCII_ONLY = (
|
||||
os.environ.get("FASTLED_CI_NO_INTERACTIVE") == "true"
|
||||
or os.environ.get("GITHUB_ACTIONS") == "true"
|
||||
or os.environ.get("CI") == "true"
|
||||
)
|
||||
|
||||
|
||||
def collect_files_by_type(directory: Path) -> Dict[str, List[Path]]:
|
||||
"""Collect files by type (.hpp vs .cpp.hpp) from a directory.
|
||||
|
||||
Args:
|
||||
directory: Directory to scan
|
||||
|
||||
Returns:
|
||||
Dictionary with 'hpp' and 'cpp_hpp' keys containing lists of files
|
||||
"""
|
||||
files = {"hpp": [], "cpp_hpp": []}
|
||||
|
||||
if not directory.exists():
|
||||
print(f"Warning: Directory {directory} does not exist")
|
||||
return files
|
||||
|
||||
# Recursively find all .hpp and .cpp.hpp files
|
||||
for file_path in directory.rglob("*.hpp"):
|
||||
if file_path.name.endswith(".cpp.hpp"):
|
||||
files["cpp_hpp"].append(file_path)
|
||||
else:
|
||||
files["hpp"].append(file_path)
|
||||
|
||||
return files
|
||||
|
||||
|
||||
def get_all_source_build_includes() -> Set[str]:
|
||||
"""Extract the list of #include statements from the all-source build files.
|
||||
|
||||
This function handles the hierarchical structure by checking:
|
||||
1. The main all-source build file (fastled_compile.hpp.cpp)
|
||||
2. All hierarchical module compile files (fl_compile.hpp, fx_compile.hpp, etc.)
|
||||
|
||||
Returns:
|
||||
Set of included file paths (relative to src/)
|
||||
"""
|
||||
includes = set()
|
||||
|
||||
# Check main all-source build file
|
||||
if not ALL_SOURCE_BUILD_FILE.exists():
|
||||
print(f"Warning: All-source build file {ALL_SOURCE_BUILD_FILE} does not exist")
|
||||
return includes
|
||||
|
||||
# Function to extract includes from a file
|
||||
def extract_includes_from_file(file_path: Path) -> Set[str]:
|
||||
file_includes = set()
|
||||
if not file_path.exists():
|
||||
return file_includes
|
||||
|
||||
try:
|
||||
with open(file_path, "r", encoding="utf-8") as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
# Look for #include statements
|
||||
if line.startswith('#include "') and line.endswith('"'):
|
||||
# Extract the include path
|
||||
include_path = line[10:-1] # Remove '#include "' and '"'
|
||||
file_includes.add(include_path)
|
||||
except Exception as e:
|
||||
print(f"Error reading file {file_path}: {e}")
|
||||
|
||||
return file_includes
|
||||
|
||||
# Extract includes from main file
|
||||
includes.update(extract_includes_from_file(ALL_SOURCE_BUILD_FILE))
|
||||
|
||||
# Extract includes from all hierarchical files
|
||||
for hierarchical_file in HIERARCHICAL_FILES:
|
||||
if hierarchical_file.exists():
|
||||
hierarchical_includes = extract_includes_from_file(hierarchical_file)
|
||||
includes.update(hierarchical_includes)
|
||||
|
||||
return includes
|
||||
|
||||
|
||||
def check_inclusion_in_all_source_build(
|
||||
files: Dict[str, List[Path]], base_dir: Path
|
||||
) -> Dict[str, Dict[str, bool]]:
|
||||
"""Check which implementation files are included in the all-source build.
|
||||
|
||||
Args:
|
||||
files: Dictionary of files by type
|
||||
base_dir: Base directory (fl or fx) for relative path calculation
|
||||
|
||||
Returns:
|
||||
Dictionary mapping file types to dictionaries of file -> included status
|
||||
"""
|
||||
all_source_includes = get_all_source_build_includes()
|
||||
|
||||
inclusion_status = {"hpp": {}, "cpp_hpp": {}}
|
||||
|
||||
for file_type, file_list in files.items():
|
||||
if file_type == "cpp_hpp": # Only check .cpp.hpp files for inclusion
|
||||
for file_path in file_list:
|
||||
# Calculate relative path from src/
|
||||
relative_path = file_path.relative_to(SRC_ROOT)
|
||||
relative_path_str = str(relative_path).replace(
|
||||
"\\", "/"
|
||||
) # Normalize path separators
|
||||
|
||||
# Check if this file is included
|
||||
is_included = relative_path_str in all_source_includes
|
||||
inclusion_status[file_type][str(file_path)] = is_included
|
||||
|
||||
return inclusion_status
|
||||
|
||||
|
||||
def print_file_list(
|
||||
files: List[Path], title: str, base_dir: Path, show_relative: bool = True
|
||||
):
|
||||
"""Print a formatted list of files.
|
||||
|
||||
Args:
|
||||
files: List of file paths
|
||||
title: Title for the section
|
||||
base_dir: Base directory for relative path calculation
|
||||
show_relative: Whether to show relative paths
|
||||
"""
|
||||
print(f"\n{title} ({len(files)} files):")
|
||||
print("-" * (len(title) + 20))
|
||||
|
||||
if not files:
|
||||
print(" (none)")
|
||||
return
|
||||
|
||||
# Sort files for consistent output
|
||||
sorted_files = sorted(files, key=lambda p: str(p))
|
||||
|
||||
for i, file_path in enumerate(sorted_files, 1):
|
||||
if show_relative:
|
||||
rel_path = file_path.relative_to(base_dir)
|
||||
print(f" {i:2d}. {rel_path}")
|
||||
else:
|
||||
print(f" {i:2d}. {file_path.name}")
|
||||
|
||||
|
||||
def print_inclusion_report(
|
||||
inclusion_status: Dict[str, Dict[str, bool]], base_dir: Path
|
||||
):
|
||||
"""Print a report of which implementation files are included in all-source build.
|
||||
|
||||
Args:
|
||||
inclusion_status: Dictionary of inclusion status by file type
|
||||
base_dir: Base directory for relative path calculation
|
||||
"""
|
||||
cpp_hpp_status = inclusion_status.get("cpp_hpp", {})
|
||||
|
||||
if not cpp_hpp_status:
|
||||
print("\nNo .cpp.hpp files found to check for inclusion")
|
||||
return
|
||||
|
||||
included_files = [path for path, included in cpp_hpp_status.items() if included]
|
||||
missing_files = [path for path, included in cpp_hpp_status.items() if not included]
|
||||
|
||||
print("\nALL-SOURCE BUILD INCLUSION STATUS:")
|
||||
print("=" * 50)
|
||||
|
||||
# Use ASCII or Unicode symbols based on environment
|
||||
check_symbol = "[+]" if USE_ASCII_ONLY else "✅"
|
||||
cross_symbol = "[-]" if USE_ASCII_ONLY else "❌"
|
||||
|
||||
print(f"{check_symbol} Included in all-source build: {len(included_files)}")
|
||||
if included_files:
|
||||
for file_path in sorted(included_files):
|
||||
rel_path = Path(file_path).relative_to(SRC_ROOT)
|
||||
print(f" {check_symbol} {rel_path}")
|
||||
|
||||
print(f"\n{cross_symbol} Missing from all-source build: {len(missing_files)}")
|
||||
if missing_files:
|
||||
for file_path in sorted(missing_files):
|
||||
rel_path = Path(file_path).relative_to(SRC_ROOT)
|
||||
print(f" {cross_symbol} {rel_path}")
|
||||
|
||||
|
||||
def generate_summary_report(
|
||||
fl_files: Dict[str, List[Path]], fx_files: Dict[str, List[Path]]
|
||||
) -> Dict:
|
||||
"""Generate a summary report with statistics.
|
||||
|
||||
Args:
|
||||
fl_files: Files found in fl/ directory
|
||||
fx_files: Files found in fx/ directory
|
||||
|
||||
Returns:
|
||||
Dictionary containing summary statistics
|
||||
"""
|
||||
summary = {
|
||||
"fl_directory": {
|
||||
"hpp_files": len(fl_files["hpp"]),
|
||||
"cpp_hpp_files": len(fl_files["cpp_hpp"]),
|
||||
"total_files": len(fl_files["hpp"]) + len(fl_files["cpp_hpp"]),
|
||||
},
|
||||
"fx_directory": {
|
||||
"hpp_files": len(fx_files["hpp"]),
|
||||
"cpp_hpp_files": len(fx_files["cpp_hpp"]),
|
||||
"total_files": len(fx_files["hpp"]) + len(fx_files["cpp_hpp"]),
|
||||
},
|
||||
"totals": {
|
||||
"hpp_files": len(fl_files["hpp"]) + len(fx_files["hpp"]),
|
||||
"cpp_hpp_files": len(fl_files["cpp_hpp"]) + len(fx_files["cpp_hpp"]),
|
||||
"total_files": len(fl_files["hpp"])
|
||||
+ len(fl_files["cpp_hpp"])
|
||||
+ len(fx_files["hpp"])
|
||||
+ len(fx_files["cpp_hpp"]),
|
||||
},
|
||||
}
|
||||
|
||||
return summary
|
||||
|
||||
|
||||
def print_summary_report(summary: Dict):
|
||||
"""Print a formatted summary report.
|
||||
|
||||
Args:
|
||||
summary: Summary statistics dictionary
|
||||
"""
|
||||
print("\n" + "=" * 80)
|
||||
print("IMPLEMENTATION FILES SUMMARY REPORT")
|
||||
print("=" * 80)
|
||||
|
||||
# Use ASCII or Unicode symbols based on environment
|
||||
folder_symbol = "[DIR]" if USE_ASCII_ONLY else "📁"
|
||||
chart_symbol = "[STATS]" if USE_ASCII_ONLY else "📊"
|
||||
ratio_symbol = "[RATIO]" if USE_ASCII_ONLY else "📈"
|
||||
|
||||
print(f"\n{folder_symbol} FL DIRECTORY ({FL_DIR.relative_to(PROJECT_ROOT)}):")
|
||||
print(
|
||||
f" Header files (.hpp): {summary['fl_directory']['hpp_files']:3d}"
|
||||
)
|
||||
print(
|
||||
f" Implementation files (.cpp.hpp): {summary['fl_directory']['cpp_hpp_files']:3d}"
|
||||
)
|
||||
print(
|
||||
f" Total files: {summary['fl_directory']['total_files']:3d}"
|
||||
)
|
||||
|
||||
print(f"\n{folder_symbol} FX DIRECTORY ({FX_DIR.relative_to(PROJECT_ROOT)}):")
|
||||
print(
|
||||
f" Header files (.hpp): {summary['fx_directory']['hpp_files']:3d}"
|
||||
)
|
||||
print(
|
||||
f" Implementation files (.cpp.hpp): {summary['fx_directory']['cpp_hpp_files']:3d}"
|
||||
)
|
||||
print(
|
||||
f" Total files: {summary['fx_directory']['total_files']:3d}"
|
||||
)
|
||||
|
||||
print(f"\n{chart_symbol} TOTALS:")
|
||||
print(f" Header files (.hpp): {summary['totals']['hpp_files']:3d}")
|
||||
print(
|
||||
f" Implementation files (.cpp.hpp): {summary['totals']['cpp_hpp_files']:3d}"
|
||||
)
|
||||
print(f" Total files: {summary['totals']['total_files']:3d}")
|
||||
|
||||
# Calculate ratios
|
||||
total_hpp = summary["totals"]["hpp_files"]
|
||||
total_cpp_hpp = summary["totals"]["cpp_hpp_files"]
|
||||
|
||||
if total_hpp > 0:
|
||||
impl_ratio = (total_cpp_hpp / total_hpp) * 100
|
||||
print(f"\n{ratio_symbol} IMPLEMENTATION RATIO:")
|
||||
print(f" Implementation files per header: {impl_ratio:.1f}%")
|
||||
print(f" ({total_cpp_hpp} .cpp.hpp files for {total_hpp} .hpp files)")
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function to run the implementation files checker."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Check *.hpp and *.cpp.hpp files in fl/ and fx/ directories"
|
||||
)
|
||||
parser.add_argument("--list", action="store_true", help="List all files found")
|
||||
parser.add_argument(
|
||||
"--check-inclusion",
|
||||
action="store_true",
|
||||
help="Check which .cpp.hpp files are included in all-source build",
|
||||
)
|
||||
parser.add_argument("--json", action="store_true", help="Output summary as JSON")
|
||||
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output")
|
||||
parser.add_argument(
|
||||
"--ascii-only",
|
||||
action="store_true",
|
||||
help="Use ASCII-only output (no Unicode emoji)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--suppress-summary-on-100-percent",
|
||||
action="store_true",
|
||||
help="Suppress summary report when inclusion percentage is 100%",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Override USE_ASCII_ONLY if command line flag is set
|
||||
global USE_ASCII_ONLY
|
||||
if args.ascii_only:
|
||||
USE_ASCII_ONLY = True
|
||||
|
||||
# Collect files from both directories
|
||||
print("Scanning implementation files...")
|
||||
fl_files = collect_files_by_type(FL_DIR)
|
||||
fx_files = collect_files_by_type(FX_DIR)
|
||||
|
||||
# Generate summary
|
||||
summary = generate_summary_report(fl_files, fx_files)
|
||||
|
||||
# Define symbols once for all output modes
|
||||
search_symbol = "[SEARCH]" if USE_ASCII_ONLY else "🔍"
|
||||
stats_symbol = "[STATS]" if USE_ASCII_ONLY else "📊"
|
||||
config_symbol = "[CONFIG]" if USE_ASCII_ONLY else "🔧"
|
||||
|
||||
# Calculate inclusion percentage to determine if we should suppress summary
|
||||
should_suppress_summary = False
|
||||
if args.suppress_summary_on_100_percent:
|
||||
all_cpp_hpp_files = fl_files["cpp_hpp"] + fx_files["cpp_hpp"]
|
||||
all_source_includes = get_all_source_build_includes()
|
||||
|
||||
included_count = 0
|
||||
for file_path in all_cpp_hpp_files:
|
||||
relative_path = file_path.relative_to(SRC_ROOT)
|
||||
relative_path_str = str(relative_path).replace("\\", "/")
|
||||
if relative_path_str in all_source_includes:
|
||||
included_count += 1
|
||||
|
||||
total_impl_files = len(all_cpp_hpp_files)
|
||||
if total_impl_files > 0:
|
||||
inclusion_percentage = (included_count / total_impl_files) * 100
|
||||
should_suppress_summary = inclusion_percentage >= 100.0
|
||||
|
||||
# Output based on requested format
|
||||
if args.json:
|
||||
# Add file lists to summary for JSON output
|
||||
summary["fl_files"] = {
|
||||
"hpp": [str(p.relative_to(SRC_ROOT)) for p in fl_files["hpp"]],
|
||||
"cpp_hpp": [str(p.relative_to(SRC_ROOT)) for p in fl_files["cpp_hpp"]],
|
||||
}
|
||||
summary["fx_files"] = {
|
||||
"hpp": [str(p.relative_to(SRC_ROOT)) for p in fx_files["hpp"]],
|
||||
"cpp_hpp": [str(p.relative_to(SRC_ROOT)) for p in fx_files["cpp_hpp"]],
|
||||
}
|
||||
print(json.dumps(summary, indent=2))
|
||||
return
|
||||
|
||||
# Print summary report only if not suppressed
|
||||
if not should_suppress_summary:
|
||||
print_summary_report(summary)
|
||||
else:
|
||||
print("Summary report suppressed: 100% inclusion coverage achieved")
|
||||
|
||||
# List files if requested
|
||||
if args.list:
|
||||
print("\n" + "=" * 80)
|
||||
print("DETAILED FILE LISTINGS")
|
||||
print("=" * 80)
|
||||
|
||||
# FL directory files
|
||||
print_file_list(fl_files["hpp"], "FL Header Files (.hpp)", FL_DIR)
|
||||
print_file_list(
|
||||
fl_files["cpp_hpp"], "FL Implementation Files (.cpp.hpp)", FL_DIR
|
||||
)
|
||||
|
||||
# FX directory files
|
||||
print_file_list(fx_files["hpp"], "FX Header Files (.hpp)", FX_DIR)
|
||||
print_file_list(
|
||||
fx_files["cpp_hpp"], "FX Implementation Files (.cpp.hpp)", FX_DIR
|
||||
)
|
||||
|
||||
# Check inclusion in all-source build if requested
|
||||
if args.check_inclusion:
|
||||
# Only show inclusion check if not suppressing or if we don't have 100% coverage
|
||||
if not should_suppress_summary:
|
||||
print("\n" + "=" * 80)
|
||||
print("ALL-SOURCE BUILD INCLUSION CHECK")
|
||||
print("=" * 80)
|
||||
|
||||
fl_inclusion = check_inclusion_in_all_source_build(fl_files, FL_DIR)
|
||||
fx_inclusion = check_inclusion_in_all_source_build(fx_files, FX_DIR)
|
||||
|
||||
print(f"\n{search_symbol} FL DIRECTORY INCLUSION:")
|
||||
print_inclusion_report(fl_inclusion, FL_DIR)
|
||||
|
||||
print(f"\n{search_symbol} FX DIRECTORY INCLUSION:")
|
||||
print_inclusion_report(fx_inclusion, FX_DIR)
|
||||
|
||||
# Overall inclusion statistics
|
||||
all_cpp_hpp_files = fl_files["cpp_hpp"] + fx_files["cpp_hpp"]
|
||||
all_source_includes = get_all_source_build_includes()
|
||||
|
||||
included_count = 0
|
||||
for file_path in all_cpp_hpp_files:
|
||||
relative_path = file_path.relative_to(SRC_ROOT)
|
||||
relative_path_str = str(relative_path).replace("\\", "/")
|
||||
if relative_path_str in all_source_includes:
|
||||
included_count += 1
|
||||
|
||||
total_impl_files = len(all_cpp_hpp_files)
|
||||
if total_impl_files > 0:
|
||||
inclusion_percentage = (included_count / total_impl_files) * 100
|
||||
print(f"\n{stats_symbol} OVERALL INCLUSION STATISTICS:")
|
||||
print(f" Total .cpp.hpp files found: {total_impl_files}")
|
||||
print(f" Included in all-source build: {included_count}")
|
||||
print(f" Inclusion percentage: {inclusion_percentage:.1f}%")
|
||||
|
||||
# Always check for missing files and exit with error if any are missing
|
||||
all_cpp_hpp_files = fl_files["cpp_hpp"] + fx_files["cpp_hpp"]
|
||||
all_source_includes = get_all_source_build_includes()
|
||||
|
||||
included_count = 0
|
||||
for file_path in all_cpp_hpp_files:
|
||||
relative_path = file_path.relative_to(SRC_ROOT)
|
||||
relative_path_str = str(relative_path).replace("\\", "/")
|
||||
if relative_path_str in all_source_includes:
|
||||
included_count += 1
|
||||
|
||||
total_impl_files = len(all_cpp_hpp_files)
|
||||
total_missing = total_impl_files - included_count
|
||||
if total_missing > 0:
|
||||
# Print an explicit error message before exiting
|
||||
error_symbol = "[ERROR]" if USE_ASCII_ONLY else "🚨"
|
||||
print(
|
||||
f"\n{error_symbol} {total_missing} implementation file(s) are missing from the all-source build!"
|
||||
)
|
||||
print(" Failing script due to incomplete all-source build inclusion.")
|
||||
import sys
|
||||
|
||||
sys.exit(1)
|
||||
|
||||
if args.verbose:
|
||||
print(f"\n{config_symbol} CONFIGURATION:")
|
||||
print(f" Project root: {PROJECT_ROOT}")
|
||||
print(f" FL directory: {FL_DIR}")
|
||||
print(f" FX directory: {FX_DIR}")
|
||||
print(f" All-source build file: {ALL_SOURCE_BUILD_FILE}")
|
||||
print(" Hierarchical compile files:")
|
||||
for hfile in HIERARCHICAL_FILES:
|
||||
status = "✓" if hfile.exists() else "✗"
|
||||
print(f" {status} {hfile.relative_to(PROJECT_ROOT)}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
250
libraries/FastLED/ci/util/concurrent_run.py
Normal file
250
libraries/FastLED/ci/util/concurrent_run.py
Normal file
@@ -0,0 +1,250 @@
|
||||
# pyright: reportUnknownMemberType=false
|
||||
"""
|
||||
Concurrent run utilities.
|
||||
"""
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
from concurrent.futures import Future, ThreadPoolExecutor, as_completed
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from ci.boards import Board # type: ignore
|
||||
from ci.compiler.compile_for_board import compile_examples, errors_happened
|
||||
from ci.util.cpu_count import cpu_count
|
||||
from ci.util.create_build_dir import create_build_dir
|
||||
from ci.util.locked_print import locked_print
|
||||
|
||||
|
||||
# Board initialization doesn't take a lot of memory or cpu so it's safe to run in parallel
|
||||
PARRALLEL_PROJECT_INITIALIZATION = (
|
||||
os.environ.get("PARRALLEL_PROJECT_INITIALIZATION", "0") == "1"
|
||||
)
|
||||
|
||||
|
||||
def _banner_print(msg: str) -> None:
|
||||
"""Print a banner message."""
|
||||
# will produce
|
||||
#######
|
||||
# msg #
|
||||
#######
|
||||
lines = msg.splitlines()
|
||||
for line in lines:
|
||||
print("#" * (len(line) + 4))
|
||||
print(f"# {line} #")
|
||||
print("#" * (len(line) + 4))
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConcurrentRunArgs:
|
||||
projects: list[Board]
|
||||
examples: list[Path]
|
||||
skip_init: bool
|
||||
defines: list[str]
|
||||
customsdk: str | None
|
||||
extra_packages: list[str]
|
||||
libs: list[str] | None
|
||||
build_dir: str | None
|
||||
extra_scripts: str | None
|
||||
cwd: str | None
|
||||
board_dir: str | None
|
||||
build_flags: list[str] | None
|
||||
verbose: bool = False
|
||||
extra_examples: dict[Board, list[Path]] | None = None
|
||||
symbols: bool = False
|
||||
|
||||
|
||||
def concurrent_run(
|
||||
args: ConcurrentRunArgs,
|
||||
) -> int:
|
||||
projects = args.projects
|
||||
examples = args.examples
|
||||
skip_init = args.skip_init
|
||||
defines = args.defines
|
||||
customsdk = args.customsdk
|
||||
extra_packages = args.extra_packages
|
||||
build_dir = args.build_dir
|
||||
extra_scripts = args.extra_scripts
|
||||
cwd = args.cwd
|
||||
start_time = time.time()
|
||||
first_project = projects[0]
|
||||
prev_cwd: str | None = None
|
||||
board_dir = args.board_dir
|
||||
libs = args.libs
|
||||
extra_examples: dict[Board, list[Path]] = args.extra_examples or {}
|
||||
if cwd:
|
||||
prev_cwd = os.getcwd()
|
||||
locked_print(f"Changing to directory {cwd}")
|
||||
os.chdir(cwd)
|
||||
|
||||
start_time = time.time()
|
||||
create_build_dir(
|
||||
board=first_project,
|
||||
defines=defines,
|
||||
customsdk=customsdk,
|
||||
no_install_deps=skip_init,
|
||||
extra_packages=extra_packages,
|
||||
build_dir=build_dir,
|
||||
board_dir=board_dir,
|
||||
build_flags=args.build_flags,
|
||||
extra_scripts=extra_scripts,
|
||||
)
|
||||
diff = time.time() - start_time
|
||||
|
||||
msg = f"Build directory created in {diff:.2f} seconds for board"
|
||||
locked_print(msg)
|
||||
|
||||
verbose = args.verbose
|
||||
# This is not memory/cpu bound but is instead network bound so we can run one thread
|
||||
# per board to speed up the process.
|
||||
parallel_init_workers = 1 if not PARRALLEL_PROJECT_INITIALIZATION else len(projects)
|
||||
# Initialize the build directories for all boards
|
||||
locked_print(
|
||||
f"Initializing build directories for {len(projects)} boards with {parallel_init_workers} parallel workers"
|
||||
)
|
||||
with ThreadPoolExecutor(max_workers=parallel_init_workers) as executor:
|
||||
future_to_board: Dict[Future[Any], Board] = {}
|
||||
for board in projects:
|
||||
locked_print(
|
||||
f"Submitting build directory initialization for board: {board.board_name}"
|
||||
)
|
||||
future = executor.submit(
|
||||
create_build_dir,
|
||||
board,
|
||||
defines,
|
||||
customsdk,
|
||||
skip_init,
|
||||
extra_packages,
|
||||
build_dir,
|
||||
board_dir,
|
||||
args.build_flags,
|
||||
extra_scripts,
|
||||
)
|
||||
future_to_board[future] = board
|
||||
|
||||
completed_boards = 0
|
||||
failed_boards = 0
|
||||
for future in as_completed(future_to_board):
|
||||
board = future_to_board[future]
|
||||
try:
|
||||
success, msg = future.result()
|
||||
if not success:
|
||||
locked_print(
|
||||
f"ERROR: Failed to initialize build_dir for board {board.board_name}:\n{msg}"
|
||||
)
|
||||
failed_boards += 1
|
||||
# cancel all other tasks
|
||||
for f in future_to_board:
|
||||
if not f.done():
|
||||
f.cancel()
|
||||
locked_print(
|
||||
"Cancelled initialization for remaining boards due to failure"
|
||||
)
|
||||
return 1
|
||||
else:
|
||||
completed_boards += 1
|
||||
locked_print(
|
||||
f"SUCCESS: Finished initializing build_dir for board {board.board_name} ({completed_boards}/{len(projects)})"
|
||||
)
|
||||
except Exception as e:
|
||||
locked_print(
|
||||
f"EXCEPTION: Build directory initialization failed for board {board.board_name}: {e}"
|
||||
)
|
||||
failed_boards += 1
|
||||
# cancel all other tasks
|
||||
for f in future_to_board:
|
||||
if not f.done():
|
||||
f.cancel()
|
||||
locked_print(
|
||||
"Cancelled initialization for remaining boards due to exception"
|
||||
)
|
||||
return 1
|
||||
init_end_time = time.time()
|
||||
init_time = (init_end_time - start_time) / 60
|
||||
locked_print(f"\nAll build directories initialized in {init_time:.2f} minutes.")
|
||||
errors: list[str] = []
|
||||
# Run the compilation process
|
||||
num_cpus = max(1, min(cpu_count(), len(projects)))
|
||||
with ThreadPoolExecutor(max_workers=num_cpus) as executor:
|
||||
future_to_board: Dict[Future[Any], Board] = {
|
||||
executor.submit(
|
||||
compile_examples,
|
||||
board,
|
||||
examples + extra_examples.get(board, []),
|
||||
build_dir,
|
||||
verbose,
|
||||
libs=libs,
|
||||
): board
|
||||
for board in projects
|
||||
}
|
||||
for future in as_completed(future_to_board):
|
||||
board = future_to_board[future]
|
||||
success, msg = future.result()
|
||||
if not success:
|
||||
msg = f"Compilation failed for board {board}: {msg}"
|
||||
errors.append(msg)
|
||||
locked_print(f"Compilation failed for board {board}: {msg}.\nStopping.")
|
||||
for f in future_to_board:
|
||||
f.cancel()
|
||||
break
|
||||
if prev_cwd:
|
||||
locked_print(f"Changing back to directory {prev_cwd}")
|
||||
os.chdir(prev_cwd)
|
||||
if errors_happened():
|
||||
locked_print("\nDone. Errors happened during compilation.")
|
||||
locked_print("\n".join(errors))
|
||||
return 1
|
||||
|
||||
# Run symbol analysis if requested
|
||||
if args.symbols:
|
||||
locked_print("\nRunning symbol analysis on compiled outputs...")
|
||||
symbol_analysis_errors: List[str] = []
|
||||
|
||||
for board in projects:
|
||||
try:
|
||||
locked_print(f"Running symbol analysis for board: {board.board_name}")
|
||||
|
||||
# Run the symbol analysis tool for this board
|
||||
cmd = [
|
||||
"uv",
|
||||
"run",
|
||||
"ci/util/symbol_analysis.py",
|
||||
"--board",
|
||||
board.board_name,
|
||||
]
|
||||
|
||||
result = subprocess.run(
|
||||
cmd, capture_output=True, text=True, cwd=cwd or os.getcwd()
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
error_msg = f"Symbol analysis failed for board {board.board_name}: {result.stderr}"
|
||||
symbol_analysis_errors.append(error_msg)
|
||||
locked_print(f"ERROR: {error_msg}")
|
||||
else:
|
||||
locked_print(
|
||||
f"Symbol analysis completed for board: {board.board_name}"
|
||||
)
|
||||
# Print the symbol analysis output
|
||||
if result.stdout:
|
||||
print(result.stdout)
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Exception during symbol analysis for board {board.board_name}: {e}"
|
||||
symbol_analysis_errors.append(error_msg)
|
||||
locked_print(f"ERROR: {error_msg}")
|
||||
|
||||
if symbol_analysis_errors:
|
||||
locked_print(
|
||||
f"\nSymbol analysis completed with {len(symbol_analysis_errors)} error(s):"
|
||||
)
|
||||
for error in symbol_analysis_errors:
|
||||
locked_print(f" - {error}")
|
||||
else:
|
||||
locked_print(
|
||||
f"\nSymbol analysis completed successfully for all {len(projects)} board(s)."
|
||||
)
|
||||
|
||||
return 0
|
||||
27
libraries/FastLED/ci/util/console_utf8.py
Normal file
27
libraries/FastLED/ci/util/console_utf8.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
def configure_utf8_console() -> None:
|
||||
"""Ensure stdout/stderr use UTF-8 encoding on Windows consoles.
|
||||
|
||||
Safe no-op on non-Windows platforms and on environments where
|
||||
reconfigure is unavailable.
|
||||
"""
|
||||
if os.name != "nt":
|
||||
return
|
||||
|
||||
try:
|
||||
if hasattr(sys.stdout, "reconfigure") and callable(
|
||||
getattr(sys.stdout, "reconfigure", None)
|
||||
):
|
||||
sys.stdout.reconfigure(encoding="utf-8", errors="replace") # type: ignore[attr-defined]
|
||||
if hasattr(sys.stderr, "reconfigure") and callable(
|
||||
getattr(sys.stderr, "reconfigure", None)
|
||||
):
|
||||
sys.stderr.reconfigure(encoding="utf-8", errors="replace") # type: ignore[attr-defined]
|
||||
except (AttributeError, OSError):
|
||||
# Older Python versions or redirected streams may not support reconfigure
|
||||
pass
|
||||
12
libraries/FastLED/ci/util/cpu_count.py
Normal file
12
libraries/FastLED/ci/util/cpu_count.py
Normal file
@@ -0,0 +1,12 @@
|
||||
import os
|
||||
|
||||
|
||||
def cpu_count() -> int:
|
||||
"""Get the number of CPUs."""
|
||||
# Force sequential execution if NO_PARALLEL is set
|
||||
if "GITHUB_ACTIONS" in os.environ:
|
||||
return 1
|
||||
no_parallel = os.environ.get("NO_PARALLEL", "0") in ["1", "true", "True"]
|
||||
if no_parallel:
|
||||
return 1
|
||||
return os.cpu_count() or 1
|
||||
590
libraries/FastLED/ci/util/create_build_dir.py
Normal file
590
libraries/FastLED/ci/util/create_build_dir.py
Normal file
@@ -0,0 +1,590 @@
|
||||
# pyright: reportUnknownMemberType=false
|
||||
"""
|
||||
Create build directory for project.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import time
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Dict
|
||||
|
||||
from ci.boards import Board # type: ignore
|
||||
from ci.util.locked_print import locked_print
|
||||
|
||||
|
||||
def _install_global_package(package: str) -> None:
|
||||
# example pio pkg -g -p "https://github.com/maxgerhardt/platform-raspberrypi.git".
|
||||
locked_print(f"*** Installing {package} ***")
|
||||
cmd_list = [
|
||||
"pio",
|
||||
"pkg",
|
||||
"install",
|
||||
"-g",
|
||||
"-p",
|
||||
package,
|
||||
]
|
||||
cmd_str = subprocess.list2cmdline(cmd_list)
|
||||
locked_print(f"Running command:\n\n{cmd_str}\n\n")
|
||||
result = subprocess.run(
|
||||
cmd_str,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
text=True,
|
||||
check=True,
|
||||
)
|
||||
locked_print(result.stdout)
|
||||
locked_print(f"*** Finished installing {package} ***")
|
||||
|
||||
|
||||
def insert_tool_aliases(meta_json: Dict[str, Dict[str, Any]]) -> None:
|
||||
for board in meta_json.keys():
|
||||
aliases: dict[str, str | None] = {}
|
||||
cc_path_value = meta_json[board].get("cc_path")
|
||||
resolved_cc_path: Path | None = None
|
||||
if cc_path_value:
|
||||
try:
|
||||
candidate = Path(str(cc_path_value))
|
||||
if candidate.is_absolute() and candidate.exists():
|
||||
resolved_cc_path = candidate
|
||||
elif candidate.exists():
|
||||
resolved_cc_path = candidate.resolve()
|
||||
else:
|
||||
which_result = shutil.which(
|
||||
candidate.name if candidate.name else str(candidate)
|
||||
)
|
||||
if which_result:
|
||||
resolved_cc_path = Path(which_result)
|
||||
except Exception:
|
||||
resolved_cc_path = None
|
||||
|
||||
# Try to infer toolchain bin directory and prefix from either CC or GDB path
|
||||
tool_bin_dir: Path | None = None
|
||||
tool_prefix: str = ""
|
||||
tool_suffix: str = ""
|
||||
|
||||
if resolved_cc_path and resolved_cc_path.exists():
|
||||
cc_base = resolved_cc_path.name
|
||||
# If cc_path points at a real gcc binary, derive prefix/suffix from it.
|
||||
# If it's a wrapper (e.g. cached_CC.cmd) without "gcc" in the name,
|
||||
# fall back to using gdb_path to derive the actual toolchain prefix/suffix.
|
||||
if "gcc" in cc_base:
|
||||
tool_bin_dir = resolved_cc_path.parent
|
||||
tool_prefix = cc_base.split("gcc")[0]
|
||||
tool_suffix = resolved_cc_path.suffix
|
||||
else:
|
||||
resolved_cc_path = None # Force gdb-based fallback below
|
||||
if resolved_cc_path is None:
|
||||
gdb_path_value = meta_json[board].get("gdb_path")
|
||||
if gdb_path_value:
|
||||
try:
|
||||
gdb_path = Path(str(gdb_path_value))
|
||||
if not gdb_path.exists():
|
||||
which_gdb = shutil.which(gdb_path.name)
|
||||
if which_gdb:
|
||||
gdb_path = Path(which_gdb)
|
||||
if gdb_path.exists():
|
||||
tool_bin_dir = gdb_path.parent
|
||||
gdb_base = gdb_path.name
|
||||
# Derive prefix like 'arm-none-eabi-' from 'arm-none-eabi-gdb'
|
||||
tool_prefix = (
|
||||
gdb_base.split("gdb")[0] if "gdb" in gdb_base else ""
|
||||
)
|
||||
tool_suffix = gdb_path.suffix
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
tools = [
|
||||
"gcc",
|
||||
"g++",
|
||||
"ar",
|
||||
"objcopy",
|
||||
"objdump",
|
||||
"size",
|
||||
"nm",
|
||||
"ld",
|
||||
"as",
|
||||
"ranlib",
|
||||
"strip",
|
||||
"c++filt",
|
||||
"readelf",
|
||||
"addr2line",
|
||||
]
|
||||
|
||||
if tool_bin_dir is not None:
|
||||
for tool in tools:
|
||||
name = f"{tool_prefix}{tool}" + tool_suffix
|
||||
tool_path = tool_bin_dir / name
|
||||
if tool_path.exists():
|
||||
aliases[tool] = str(tool_path)
|
||||
else:
|
||||
which_result = shutil.which(name)
|
||||
aliases[tool] = str(Path(which_result)) if which_result else None
|
||||
else:
|
||||
# Fallback: resolve via PATH only
|
||||
for tool in tools:
|
||||
which_result = shutil.which(tool)
|
||||
aliases[tool] = str(Path(which_result)) if which_result else None
|
||||
|
||||
meta_json[board]["aliases"] = aliases
|
||||
|
||||
|
||||
def remove_readonly(func: Callable[..., Any], path: str, _: Any) -> None:
|
||||
"Clear the readonly bit and reattempt the removal"
|
||||
if os.name == "nt":
|
||||
os.system(f"attrib -r {path}")
|
||||
else:
|
||||
try:
|
||||
os.chmod(path, 0o777)
|
||||
except Exception:
|
||||
print(f"Error removing readonly attribute from {path}")
|
||||
|
||||
func(path)
|
||||
|
||||
|
||||
def robust_rmtree(path: Path, max_retries: int, delay: float) -> bool:
|
||||
"""
|
||||
Robustly remove a directory tree, handling race conditions and concurrent access.
|
||||
|
||||
Args:
|
||||
path: Path to the directory to remove
|
||||
max_retries: Maximum number of retry attempts
|
||||
delay: Delay between retries in seconds
|
||||
|
||||
Returns:
|
||||
True if removal was successful, False otherwise
|
||||
"""
|
||||
if not path.exists():
|
||||
locked_print(f"Directory {path} doesn't exist, skipping removal")
|
||||
return True
|
||||
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
locked_print(
|
||||
f"Attempting to remove directory {path} (attempt {attempt + 1}/{max_retries})"
|
||||
)
|
||||
shutil.rmtree(path, onerror=remove_readonly)
|
||||
locked_print(f"Successfully removed directory {path}")
|
||||
return True
|
||||
except OSError as e:
|
||||
if attempt == max_retries - 1:
|
||||
locked_print(
|
||||
f"Failed to remove directory {path} after {max_retries} attempts: {e}"
|
||||
)
|
||||
return False
|
||||
|
||||
# Log the specific error and retry
|
||||
locked_print(
|
||||
f"Failed to remove directory {path} on attempt {attempt + 1}: {e}"
|
||||
)
|
||||
|
||||
# Check if another process removed it
|
||||
if not path.exists():
|
||||
locked_print(f"Directory {path} was removed by another process")
|
||||
return True
|
||||
|
||||
# Wait before retrying
|
||||
time.sleep(delay * (2**attempt)) # Exponential backoff
|
||||
|
||||
except Exception as e:
|
||||
locked_print(f"Unexpected error removing directory {path}: {e}")
|
||||
return False
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def safe_file_removal(file_path: Path, max_retries: int) -> bool:
|
||||
"""
|
||||
Safely remove a file with retry logic.
|
||||
|
||||
Args:
|
||||
file_path: Path to the file to remove
|
||||
max_retries: Maximum number of retry attempts
|
||||
|
||||
Returns:
|
||||
True if removal was successful, False otherwise
|
||||
"""
|
||||
if not file_path.exists():
|
||||
return True
|
||||
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
file_path.unlink()
|
||||
locked_print(f"Successfully removed file {file_path}")
|
||||
return True
|
||||
except OSError as e:
|
||||
if attempt == max_retries - 1:
|
||||
locked_print(
|
||||
f"Failed to remove file {file_path} after {max_retries} attempts: {e}"
|
||||
)
|
||||
return False
|
||||
|
||||
locked_print(
|
||||
f"Failed to remove file {file_path} on attempt {attempt + 1}: {e}"
|
||||
)
|
||||
|
||||
# Check if another process removed it
|
||||
if not file_path.exists():
|
||||
locked_print(f"File {file_path} was removed by another process")
|
||||
return True
|
||||
|
||||
time.sleep(0.1 * (attempt + 1))
|
||||
|
||||
except Exception as e:
|
||||
locked_print(f"Unexpected error removing file {file_path}: {e}")
|
||||
return False
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def create_build_dir(
|
||||
board: Board,
|
||||
defines: list[str],
|
||||
customsdk: str | None,
|
||||
no_install_deps: bool,
|
||||
extra_packages: list[str],
|
||||
build_dir: str | None,
|
||||
board_dir: str | None,
|
||||
build_flags: list[str] | None,
|
||||
extra_scripts: str | None,
|
||||
) -> tuple[bool, str]:
|
||||
"""Create the build directory for the given board."""
|
||||
import threading
|
||||
|
||||
# filter out "web" board because it's not a real board.
|
||||
if board.board_name == "web":
|
||||
locked_print(f"Skipping web target for board {board.board_name}")
|
||||
return True, ""
|
||||
if board.defines:
|
||||
defines.extend(board.defines)
|
||||
# remove duplicates
|
||||
defines = list(set(defines))
|
||||
board_name = board.board_name
|
||||
real_board_name = board.get_real_board_name()
|
||||
thread_id = threading.current_thread().ident
|
||||
locked_print(
|
||||
f"*** [Thread {thread_id}] Initializing environment for {board_name} ***"
|
||||
)
|
||||
# builddir = Path(build_dir) / board if build_dir else Path(".build") / board
|
||||
build_dir = build_dir or ".build"
|
||||
builddir = Path(build_dir) / board_name
|
||||
|
||||
locked_print(f"[Thread {thread_id}] Creating build directory: {builddir}")
|
||||
try:
|
||||
builddir.mkdir(parents=True, exist_ok=True)
|
||||
locked_print(
|
||||
f"[Thread {thread_id}] Successfully created build directory: {builddir}"
|
||||
)
|
||||
except Exception as e:
|
||||
locked_print(
|
||||
f"[Thread {thread_id}] Error creating build directory {builddir}: {e}"
|
||||
)
|
||||
return False, f"Failed to create build directory: {e}"
|
||||
# if lib directory (where FastLED lives) exists, remove it. This is necessary to run on
|
||||
# recycled build directories for fastled to update. This is a fast operation.
|
||||
srcdir = builddir / "lib"
|
||||
if srcdir.exists():
|
||||
locked_print(f"[Thread {thread_id}] Removing existing lib directory: {srcdir}")
|
||||
# STRICT: Explicit retry parameters - NO defaults allowed
|
||||
if not robust_rmtree(srcdir, max_retries=5, delay=0.1):
|
||||
locked_print(
|
||||
f"[Thread {thread_id}] Warning: Failed to remove lib directory {srcdir}, continuing anyway"
|
||||
)
|
||||
|
||||
platformio_ini = builddir / "platformio.ini"
|
||||
if platformio_ini.exists():
|
||||
locked_print(
|
||||
f"[Thread {thread_id}] Removing existing platformio.ini: {platformio_ini}"
|
||||
)
|
||||
# STRICT: Explicit retry parameter - NO defaults allowed
|
||||
if not safe_file_removal(platformio_ini, max_retries=3):
|
||||
locked_print(
|
||||
f"[Thread {thread_id}] Warning: Failed to remove {platformio_ini}, continuing anyway"
|
||||
)
|
||||
|
||||
if board_dir:
|
||||
dst_dir = builddir / "boards"
|
||||
locked_print(
|
||||
f"[Thread {thread_id}] Processing board directory: {board_dir} -> {dst_dir}"
|
||||
)
|
||||
|
||||
if dst_dir.exists():
|
||||
locked_print(
|
||||
f"[Thread {thread_id}] Removing existing boards directory: {dst_dir}"
|
||||
)
|
||||
# STRICT: Explicit retry parameters - NO defaults allowed
|
||||
if not robust_rmtree(dst_dir, max_retries=5, delay=0.1):
|
||||
locked_print(
|
||||
f"[Thread {thread_id}] Error: Failed to remove boards directory {dst_dir}"
|
||||
)
|
||||
return False, f"Failed to remove existing boards directory {dst_dir}"
|
||||
|
||||
try:
|
||||
locked_print(
|
||||
f"[Thread {thread_id}] Copying board directory: {board_dir} -> {dst_dir}"
|
||||
)
|
||||
shutil.copytree(str(board_dir), str(dst_dir))
|
||||
locked_print(
|
||||
f"[Thread {thread_id}] Successfully copied board directory to {dst_dir}"
|
||||
)
|
||||
except Exception as e:
|
||||
locked_print(f"[Thread {thread_id}] Error copying board directory: {e}")
|
||||
return False, f"Failed to copy board directory: {e}"
|
||||
if board.platform_needs_install:
|
||||
if board.platform:
|
||||
try:
|
||||
_install_global_package(board.platform)
|
||||
except subprocess.CalledProcessError as e:
|
||||
stdout = e.stdout
|
||||
return False, stdout
|
||||
else:
|
||||
warnings.warn("Platform install was specified but no platform was given.")
|
||||
|
||||
cmd_list = [
|
||||
"pio",
|
||||
"project",
|
||||
"init",
|
||||
"--project-dir",
|
||||
builddir.as_posix(),
|
||||
"--board",
|
||||
real_board_name,
|
||||
]
|
||||
if board.platform:
|
||||
cmd_list.append(f"--project-option=platform={board.platform}")
|
||||
if board.platform_packages:
|
||||
cmd_list.append(f"--project-option=platform_packages={board.platform_packages}")
|
||||
if board.framework:
|
||||
cmd_list.append(f"--project-option=framework={board.framework}")
|
||||
if board.board_build_core:
|
||||
cmd_list.append(f"--project-option=board_build.core={board.board_build_core}")
|
||||
if board.board_build_filesystem_size:
|
||||
cmd_list.append(
|
||||
f"--project-option=board_build.filesystem_size={board.board_build_filesystem_size}"
|
||||
)
|
||||
if build_flags is not None:
|
||||
for build_flag in build_flags:
|
||||
cmd_list.append(f"--project-option=build_flags={build_flag}")
|
||||
if defines:
|
||||
build_flags_str = " ".join(f"-D{define}" for define in defines)
|
||||
cmd_list.append(f"--project-option=build_flags={build_flags_str}")
|
||||
if board.customsdk:
|
||||
cmd_list.append(f"--project-option=custom_sdkconfig={customsdk}")
|
||||
if extra_packages:
|
||||
cmd_list.append(f"--project-option=lib_deps={','.join(extra_packages)}")
|
||||
if no_install_deps:
|
||||
cmd_list.append("--no-install-dependencies")
|
||||
|
||||
# Add CCACHE configuration script
|
||||
ccache_script = builddir / "ccache_config.py"
|
||||
if not ccache_script.exists():
|
||||
locked_print(
|
||||
f"[Thread {thread_id}] Creating CCACHE configuration script at {ccache_script}"
|
||||
)
|
||||
with open(ccache_script, "w") as f:
|
||||
f.write(
|
||||
'''"""Configure CCACHE for PlatformIO builds."""
|
||||
|
||||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
Import("env")
|
||||
|
||||
def is_ccache_available():
|
||||
"""Check if ccache is available in the system."""
|
||||
try:
|
||||
subprocess.run(["ccache", "--version"], capture_output=True, check=True)
|
||||
return True
|
||||
except (subprocess.CalledProcessError, FileNotFoundError):
|
||||
return False
|
||||
|
||||
def get_ccache_path():
|
||||
"""Get the full path to ccache executable."""
|
||||
if platform.system() == "Windows":
|
||||
# On Windows, look in chocolatey's bin directory
|
||||
ccache_paths = [
|
||||
"C:\\ProgramData\\chocolatey\\bin\\ccache.exe",
|
||||
os.path.expanduser("~\\scoop\\shims\\ccache.exe")
|
||||
]
|
||||
for path in ccache_paths:
|
||||
if os.path.exists(path):
|
||||
return path
|
||||
else:
|
||||
# On Unix-like systems, use which to find ccache
|
||||
try:
|
||||
return subprocess.check_output(["which", "ccache"]).decode().strip()
|
||||
except subprocess.CalledProcessError:
|
||||
pass
|
||||
return None
|
||||
|
||||
def configure_ccache(env):
|
||||
"""Configure CCACHE for the build environment."""
|
||||
if not is_ccache_available():
|
||||
print("CCACHE is not available. Skipping CCACHE configuration.")
|
||||
return
|
||||
|
||||
ccache_path = get_ccache_path()
|
||||
if not ccache_path:
|
||||
print("Could not find CCACHE executable. Skipping CCACHE configuration.")
|
||||
return
|
||||
|
||||
print(f"Found CCACHE at: {ccache_path}")
|
||||
|
||||
# Set up CCACHE environment variables if not already set
|
||||
if "CCACHE_DIR" not in os.environ:
|
||||
# STRICT: PROJECT_DIR must be explicitly set - NO fallbacks allowed
|
||||
project_dir_for_ccache = env.get("PROJECT_DIR")
|
||||
if not project_dir_for_ccache:
|
||||
raise RuntimeError(
|
||||
"CRITICAL: PROJECT_DIR environment variable is required for CCACHE_DIR setup. "
|
||||
"Please set PROJECT_DIR to the project root directory."
|
||||
)
|
||||
ccache_dir = os.path.join(project_dir_for_ccache, ".ccache")
|
||||
os.environ["CCACHE_DIR"] = ccache_dir
|
||||
Path(ccache_dir).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Configure CCACHE for this build
|
||||
# STRICT: PROJECT_DIR must be explicitly set - NO fallbacks allowed
|
||||
project_dir = env.get("PROJECT_DIR")
|
||||
if not project_dir:
|
||||
raise RuntimeError(
|
||||
"CRITICAL: PROJECT_DIR environment variable is required but not set. "
|
||||
"Please set PROJECT_DIR to the project root directory."
|
||||
)
|
||||
os.environ["CCACHE_BASEDIR"] = project_dir
|
||||
os.environ["CCACHE_COMPRESS"] = "true"
|
||||
os.environ["CCACHE_COMPRESSLEVEL"] = "6"
|
||||
os.environ["CCACHE_MAXSIZE"] = "400M"
|
||||
|
||||
# Wrap compiler commands with ccache
|
||||
# STRICT: CC and CXX must be explicitly set - NO fallbacks allowed
|
||||
original_cc = env.get("CC")
|
||||
if not original_cc:
|
||||
raise RuntimeError(
|
||||
"CRITICAL: CC environment variable is required but not set. "
|
||||
"Please set CC to the C compiler path (e.g., gcc, clang)."
|
||||
)
|
||||
original_cxx = env.get("CXX")
|
||||
if not original_cxx:
|
||||
raise RuntimeError(
|
||||
"CRITICAL: CXX environment variable is required but not set. "
|
||||
"Please set CXX to the C++ compiler path (e.g., g++, clang++)."
|
||||
)
|
||||
|
||||
# Don't wrap if already wrapped
|
||||
if "ccache" not in original_cc:
|
||||
env.Replace(
|
||||
CC=f"{ccache_path} {original_cc}",
|
||||
CXX=f"{ccache_path} {original_cxx}",
|
||||
)
|
||||
print(f"Wrapped CC: {env.get('CC')}")
|
||||
print(f"Wrapped CXX: {env.get('CXX')}")
|
||||
|
||||
# Show CCACHE stats
|
||||
subprocess.run([ccache_path, "--show-stats"], check=False)
|
||||
|
||||
configure_ccache(env)'''
|
||||
)
|
||||
|
||||
# Get absolute paths for scripts
|
||||
project_root = Path.cwd()
|
||||
ci_flags_script = (project_root / "ci" / "ci-flags.py").resolve().as_posix()
|
||||
ccache_script = (builddir / "ccache_config.py").resolve().as_posix()
|
||||
|
||||
# Create a list of scripts with pre: prefix
|
||||
script_list = [f"pre:{ci_flags_script}", f"pre:{ccache_script}"]
|
||||
|
||||
# Add any additional scripts
|
||||
if extra_scripts:
|
||||
# Convert to absolute path and use Unix-style separators
|
||||
extra_scripts_path = str(Path(extra_scripts).resolve().as_posix())
|
||||
if not extra_scripts_path.startswith("pre:"):
|
||||
extra_scripts_path = f"pre:{extra_scripts_path}"
|
||||
script_list.append(extra_scripts_path)
|
||||
|
||||
# Add the scripts as a list
|
||||
cmd_list.append(f"--project-option=extra_scripts=[{','.join(script_list)}]")
|
||||
|
||||
cmd_str = subprocess.list2cmdline(cmd_list)
|
||||
locked_print(f"\n\nRunning command:\n {cmd_str}\n")
|
||||
result = subprocess.run(
|
||||
cmd_str,
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
text=True,
|
||||
check=False,
|
||||
)
|
||||
stdout = result.stdout
|
||||
locked_print(result.stdout)
|
||||
if result.returncode != 0:
|
||||
locked_print(
|
||||
f"*** [Thread {thread_id}] Error setting up board {board_name} ***"
|
||||
)
|
||||
return False, stdout
|
||||
locked_print(
|
||||
f"*** [Thread {thread_id}] Finished initializing environment for board {board_name} ***"
|
||||
)
|
||||
|
||||
# Print the location of the generated platformio.ini file
|
||||
platformio_ini_path = builddir / "platformio.ini"
|
||||
locked_print(f"Writing to platformio.ini {platformio_ini_path}")
|
||||
|
||||
# Print the contents of the generated platformio.ini file for debugging
|
||||
platformio_ini_path = builddir / "platformio.ini"
|
||||
if platformio_ini_path.exists():
|
||||
locked_print(
|
||||
f"\n*** Contents of {platformio_ini_path} after initialization ***"
|
||||
)
|
||||
try:
|
||||
with open(platformio_ini_path, "r") as f:
|
||||
ini_contents = f.read()
|
||||
locked_print(f"\n\n{ini_contents}\n\n")
|
||||
except Exception as e:
|
||||
locked_print(f"Error reading {platformio_ini_path}: {e}")
|
||||
locked_print(f"*** End of {platformio_ini_path} contents ***\n")
|
||||
else:
|
||||
locked_print(
|
||||
f"Warning: {platformio_ini_path} was not found after initialization"
|
||||
)
|
||||
|
||||
# dumping enviorment variables to help debug.
|
||||
# this is the command: pio run --target envdump
|
||||
cwd = str(builddir.resolve())
|
||||
cmd_list = [
|
||||
"pio",
|
||||
"project",
|
||||
"metadata",
|
||||
"--json-output",
|
||||
]
|
||||
cmd_str = subprocess.list2cmdline(cmd_list)
|
||||
stdout = subprocess.run(
|
||||
cmd_list,
|
||||
cwd=cwd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
text=True,
|
||||
check=False,
|
||||
).stdout
|
||||
|
||||
try:
|
||||
data = json.loads(stdout)
|
||||
# now dump the values to the file at the root of the build directory.
|
||||
matadata_json = builddir / "build_info.json"
|
||||
try:
|
||||
insert_tool_aliases(data)
|
||||
formatted = json.dumps(data, indent=4, sort_keys=True)
|
||||
with open(matadata_json, "w") as f:
|
||||
f.write(formatted)
|
||||
except Exception:
|
||||
with open(matadata_json, "w") as f:
|
||||
f.write(stdout)
|
||||
except json.JSONDecodeError:
|
||||
msg = f"build_info.json will not be generated because of error because stdout does not look like a json file:\n#### STDOUT ####\n{stdout}\n#### END STDOUT ####\n"
|
||||
locked_print(msg)
|
||||
return True, stdout
|
||||
207
libraries/FastLED/ci/util/elf.py
Normal file
207
libraries/FastLED/ci/util/elf.py
Normal file
@@ -0,0 +1,207 @@
|
||||
# pyright: reportUnknownMemberType=false
|
||||
import subprocess
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List
|
||||
|
||||
|
||||
def run_command(command: List[str], show_output: bool = False) -> str:
|
||||
"""
|
||||
Run a command using subprocess and capture the output.
|
||||
|
||||
Args:
|
||||
command (list): Command to run.
|
||||
show_output (bool): Print command and its output if True.
|
||||
|
||||
Returns:
|
||||
str: Standard output of the command.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If the command fails.
|
||||
"""
|
||||
if show_output:
|
||||
print(f"Running command: {' '.join(command)}")
|
||||
result = subprocess.run(command, capture_output=True, text=True)
|
||||
if result.returncode != 0:
|
||||
raise RuntimeError(f"Command failed: {' '.join(command)}\n{result.stderr}")
|
||||
if show_output and result.stdout:
|
||||
print(f"Command output: {result.stdout}")
|
||||
return result.stdout
|
||||
|
||||
|
||||
def analyze_elf_file(objdump_path: Path, cppfilt_path: Path, elf_file: Path):
|
||||
"""
|
||||
Analyze the ELF file using objdump to display its contents.
|
||||
|
||||
Args:
|
||||
objdump_path (Path): Path to the objdump executable.
|
||||
cppfilt_path (Path): Path to the c++filt executable.
|
||||
elf_file (Path): Path to the ELF file.
|
||||
"""
|
||||
command = [str(objdump_path), "-h", str(elf_file)] # "-h" option shows headers.
|
||||
print(f"Analyzing ELF file: {elf_file}")
|
||||
output = run_command(command, show_output=True)
|
||||
print("\nELF File Analysis:")
|
||||
print(output)
|
||||
list_symbols_and_sizes(objdump_path, cppfilt_path, elf_file)
|
||||
|
||||
|
||||
def cpp_filt(cppfilt_path: Path, input_text: str) -> str:
|
||||
"""
|
||||
Demangle C++ symbols using c++filt.
|
||||
|
||||
Args:
|
||||
cppfilt_path (Path): Path to c++filt executable.
|
||||
input_text (str): Text to demangle.
|
||||
|
||||
Returns:
|
||||
str: Demangled text.
|
||||
"""
|
||||
command = [str(cppfilt_path), "-t", "-n"]
|
||||
print(f"Running c++filt on input text with {cppfilt_path}")
|
||||
process = subprocess.Popen(
|
||||
command,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
)
|
||||
stdout, stderr = process.communicate(input=input_text)
|
||||
if process.returncode != 0:
|
||||
raise RuntimeError(f"Error running c++filt: {stderr}")
|
||||
return stdout
|
||||
|
||||
|
||||
def dump_symbol_sizes(nm_path: Path, cpp_filt_path: Path, elf_file: Path) -> str:
|
||||
nm_command = [
|
||||
str(nm_path),
|
||||
"-S",
|
||||
"--size-sort",
|
||||
str(elf_file),
|
||||
]
|
||||
print(f"Listing symbols and sizes in ELF file: {elf_file}")
|
||||
print("Running command: ", " ".join(nm_command))
|
||||
nm_result = subprocess.run(
|
||||
nm_command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
)
|
||||
if nm_result.returncode != 0:
|
||||
raise RuntimeError(f"Error running nm command: {nm_result.stderr}")
|
||||
|
||||
cpp_filt_command = [str(cpp_filt_path), "--no-strip-underscore"]
|
||||
print("Running c++filt command: ", " ".join(cpp_filt_command))
|
||||
cpp_filt_result = subprocess.run(
|
||||
cpp_filt_command,
|
||||
input=nm_result.stdout,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
)
|
||||
if cpp_filt_result.returncode != 0:
|
||||
raise RuntimeError(f"Error running c++filt command: {cpp_filt_result.stderr}")
|
||||
|
||||
# now reverse sort the lines
|
||||
lines = cpp_filt_result.stdout.splitlines()
|
||||
|
||||
@dataclass
|
||||
class Entry:
|
||||
address: str
|
||||
size: int
|
||||
everything_else: str
|
||||
|
||||
def parse_line(line: str) -> Entry:
|
||||
address, size, *rest = line.split()
|
||||
return Entry(address, int(size, 16), " ".join(rest))
|
||||
|
||||
data: list[Entry] = [parse_line(line) for line in lines]
|
||||
data.sort(key=lambda x: x.size, reverse=True)
|
||||
lines = [f"{d.size:6d} {d.everything_else}" for d in data]
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def demangle_symbol(cppfilt_path: Path, symbol: str) -> str:
|
||||
"""
|
||||
Demangle a C++ symbol using c++filt.
|
||||
|
||||
Args:
|
||||
cppfilt_path (Path): Path to the c++filt executable.
|
||||
symbol (str): The symbol to demangle.
|
||||
|
||||
Returns:
|
||||
str: The demangled symbol.
|
||||
"""
|
||||
command = [str(cppfilt_path), symbol]
|
||||
return run_command(command, show_output=False).strip()
|
||||
|
||||
|
||||
def list_symbols_and_sizes(objdump_path: Path, cppfilt_path: Path, elf_file: Path):
|
||||
"""
|
||||
List all symbols and their sizes from the ELF file using objdump.
|
||||
|
||||
Args:
|
||||
objdump_path (Path): Path to the objdump executable.
|
||||
cppfilt_path (Path): Path to the c++filt executable.
|
||||
elf_file (Path): Path to the ELF file.
|
||||
"""
|
||||
command = [
|
||||
str(objdump_path),
|
||||
"-t",
|
||||
str(elf_file),
|
||||
] # "-t" option lists symbols with sizes.
|
||||
print(f"Listing symbols and sizes in ELF file: {elf_file}")
|
||||
output = run_command(command, show_output=False)
|
||||
|
||||
symbols: List[Dict[str, Any]] = []
|
||||
for line in output.splitlines():
|
||||
parts = line.split()
|
||||
# Expected parts length can vary, check if size and section index (parts[2] & parts[4]) are valid
|
||||
if len(parts) > 5 and parts[2].isdigit() and parts[4].startswith("."):
|
||||
symbol = {
|
||||
"name": parts[-1],
|
||||
"size": int(parts[2], 16), # size is in hex format
|
||||
"section": parts[4],
|
||||
"type": parts[3],
|
||||
}
|
||||
symbols.append(symbol)
|
||||
|
||||
if symbols:
|
||||
print("\nSymbols and Sizes in ELF File:")
|
||||
for symbol in symbols:
|
||||
demangled_name = demangle_symbol(cppfilt_path, symbol["name"])
|
||||
print(
|
||||
f"Symbol: {demangled_name}, Size: {symbol['size']} bytes, Type: {symbol['type']}, Section: {symbol['section']}"
|
||||
)
|
||||
else:
|
||||
print("No symbols found or unable to parse symbols correctly.")
|
||||
|
||||
|
||||
def check_elf_format(objdump_path: Path, elf_file: Path):
|
||||
"""
|
||||
Check the format of the ELF file using objdump to confirm it's being read correctly.
|
||||
|
||||
Args:
|
||||
objdump_path (Path): Path to the objdump executable.
|
||||
elf_file (Path): Path to the ELF file.
|
||||
"""
|
||||
command = [str(objdump_path), "-f", str(elf_file)]
|
||||
print(f"Checking ELF file format: {elf_file}")
|
||||
output = run_command(command, show_output=True)
|
||||
print("\nELF File Format Information:")
|
||||
print(output)
|
||||
|
||||
|
||||
def check_section_contents(objdump_path: Path, elf_file: Path):
|
||||
"""
|
||||
Dump the contents of all sections in the ELF file using objdump.
|
||||
|
||||
Args:
|
||||
objdump_path (Path): Path to the objdump executable.
|
||||
elf_file (Path): Path to the ELF file.
|
||||
"""
|
||||
command = [str(objdump_path), "-s", str(elf_file)]
|
||||
print(f"Dumping all sections of ELF file: {elf_file}")
|
||||
output = run_command(command, show_output=True)
|
||||
print("\nELF File Sections Content:")
|
||||
print(output)
|
||||
380
libraries/FastLED/ci/util/esp32_symbol_analysis.py
Normal file
380
libraries/FastLED/ci/util/esp32_symbol_analysis.py
Normal file
@@ -0,0 +1,380 @@
|
||||
#!/usr/bin/env python3
|
||||
# pyright: reportUnknownMemberType=false, reportMissingParameterType=false, reportUnknownLambdaType=false, reportArgumentType=false
|
||||
"""
|
||||
ESP32 Symbol Analysis Tool
|
||||
Analyzes the ESP32 ELF file to identify symbols that can be eliminated for binary size reduction.
|
||||
"""
|
||||
|
||||
import json
|
||||
import subprocess
|
||||
import sys
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Tuple
|
||||
|
||||
|
||||
@dataclass
|
||||
class SymbolInfo:
|
||||
"""Represents a symbol in the ESP32 binary"""
|
||||
|
||||
address: str
|
||||
size: int
|
||||
type: str
|
||||
name: str
|
||||
demangled_name: str
|
||||
source: str # STRICT: NO defaults - all callers must provide explicit source
|
||||
|
||||
|
||||
def run_command(cmd: str) -> str:
|
||||
"""Run a command and return stdout"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
cmd, shell=True, capture_output=True, text=True, check=True
|
||||
)
|
||||
return result.stdout
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Error running command: {cmd}")
|
||||
print(f"Error: {e.stderr}")
|
||||
return ""
|
||||
|
||||
|
||||
def demangle_symbol(mangled_name: str, cppfilt_path: str) -> str:
|
||||
"""Demangle a C++ symbol using c++filt"""
|
||||
try:
|
||||
cmd = f'echo "{mangled_name}" | "{cppfilt_path}"'
|
||||
result = subprocess.run(
|
||||
cmd, shell=True, capture_output=True, text=True, check=True
|
||||
)
|
||||
demangled = result.stdout.strip()
|
||||
# If demangling failed, c++filt returns the original name
|
||||
return demangled if demangled != mangled_name else mangled_name
|
||||
except Exception as e:
|
||||
print(f"Error demangling symbol: {mangled_name}")
|
||||
print(f"Error: {e}")
|
||||
return mangled_name
|
||||
|
||||
|
||||
def analyze_symbols(
|
||||
elf_file: str, nm_path: str, cppfilt_path: str
|
||||
) -> Tuple[List[SymbolInfo], List[SymbolInfo], List[SymbolInfo]]:
|
||||
"""Analyze symbols in ELF file using nm with C++ demangling"""
|
||||
print("Analyzing symbols...")
|
||||
|
||||
# Get all symbols with sizes
|
||||
cmd = f'"{nm_path}" --print-size --size-sort --radix=d "{elf_file}"'
|
||||
output = run_command(cmd)
|
||||
|
||||
symbols: List[SymbolInfo] = []
|
||||
fastled_symbols: List[SymbolInfo] = []
|
||||
large_symbols: List[SymbolInfo] = []
|
||||
|
||||
print("Demangling C++ symbols...")
|
||||
|
||||
for line in output.strip().split("\n"):
|
||||
if not line.strip():
|
||||
continue
|
||||
|
||||
parts = line.split()
|
||||
if len(parts) >= 4:
|
||||
addr = parts[0]
|
||||
size = int(parts[1])
|
||||
symbol_type = parts[2]
|
||||
mangled_name = " ".join(parts[3:])
|
||||
|
||||
# Demangle the symbol name
|
||||
demangled_name = demangle_symbol(mangled_name, cppfilt_path)
|
||||
|
||||
symbol_info = SymbolInfo(
|
||||
address=addr,
|
||||
size=size,
|
||||
type=symbol_type,
|
||||
name=mangled_name,
|
||||
demangled_name=demangled_name,
|
||||
source="esp32_nm",
|
||||
)
|
||||
|
||||
symbols.append(symbol_info)
|
||||
|
||||
# Identify FastLED-related symbols using demangled names
|
||||
search_text = demangled_name.lower()
|
||||
if any(
|
||||
keyword in search_text
|
||||
for keyword in [
|
||||
"fastled",
|
||||
"cfastled",
|
||||
"crgb",
|
||||
"hsv",
|
||||
"pixel",
|
||||
"controller",
|
||||
"led",
|
||||
"rmt",
|
||||
"strip",
|
||||
"neopixel",
|
||||
"ws2812",
|
||||
"apa102",
|
||||
]
|
||||
):
|
||||
fastled_symbols.append(symbol_info)
|
||||
|
||||
# Identify large symbols (>100 bytes)
|
||||
if size > 100:
|
||||
large_symbols.append(symbol_info)
|
||||
|
||||
return symbols, fastled_symbols, large_symbols
|
||||
|
||||
|
||||
def analyze_map_file(map_file: str) -> Dict[str, List[str]]:
|
||||
"""Analyze the map file to understand module dependencies"""
|
||||
print("Analyzing map file...")
|
||||
|
||||
dependencies: Dict[str, List[str]] = {}
|
||||
current_archive = None
|
||||
|
||||
try:
|
||||
with open(map_file, "r") as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
|
||||
# Look for archive member includes
|
||||
if line.startswith(".pio/build/esp32dev/liba4c/libsrc.a("):
|
||||
# Extract module name
|
||||
start = line.find("(") + 1
|
||||
end = line.find(")")
|
||||
if start > 0 and end > start:
|
||||
current_archive = line[start:end]
|
||||
dependencies[current_archive] = []
|
||||
|
||||
elif current_archive and line and not line.startswith(".pio"):
|
||||
# This line shows what pulled in the module
|
||||
if "(" in line and ")" in line:
|
||||
# Extract the symbol that caused the inclusion
|
||||
symbol_start = line.find("(") + 1
|
||||
symbol_end = line.find(")")
|
||||
if symbol_start > 0 and symbol_end > symbol_start:
|
||||
symbol = line[symbol_start:symbol_end]
|
||||
dependencies[current_archive].append(symbol)
|
||||
current_archive = None
|
||||
except FileNotFoundError:
|
||||
print(f"Map file not found: {map_file}")
|
||||
return {}
|
||||
|
||||
return dependencies
|
||||
|
||||
|
||||
def generate_report(
|
||||
symbols: List[SymbolInfo],
|
||||
fastled_symbols: List[SymbolInfo],
|
||||
large_symbols: List[SymbolInfo],
|
||||
dependencies: Dict[str, List[str]],
|
||||
) -> Dict[str, Any]:
|
||||
"""Generate a comprehensive report"""
|
||||
print("\n" + "=" * 80)
|
||||
print("ESP32 FASTLED SYMBOL ANALYSIS REPORT")
|
||||
print("=" * 80)
|
||||
|
||||
# Summary statistics
|
||||
total_symbols = len(symbols)
|
||||
total_fastled = len(fastled_symbols)
|
||||
fastled_size = sum(s.size for s in fastled_symbols)
|
||||
|
||||
print("\nSUMMARY:")
|
||||
print(f" Total symbols: {total_symbols}")
|
||||
print(f" FastLED symbols: {total_fastled}")
|
||||
print(f" Total FastLED size: {fastled_size} bytes ({fastled_size / 1024:.1f} KB)")
|
||||
|
||||
# Largest FastLED symbols
|
||||
print("\nLARGEST FASTLED SYMBOLS (potential elimination targets):")
|
||||
fastled_sorted: List[SymbolInfo] = sorted(
|
||||
fastled_symbols, key=lambda x: x.size, reverse=True
|
||||
)
|
||||
for i, sym in enumerate(fastled_sorted[:20]):
|
||||
display_name = sym.demangled_name
|
||||
print(f" {i + 1:2d}. {sym.size:6d} bytes - {display_name}")
|
||||
if sym.demangled_name != sym.name:
|
||||
print(
|
||||
f" (mangled: {sym.name[:80]}{'...' if len(sym.name) > 80 else ''})"
|
||||
)
|
||||
|
||||
# FastLED modules analysis
|
||||
print("\nFASTLED MODULES PULLED IN:")
|
||||
fastled_modules: List[str] = [
|
||||
mod
|
||||
for mod in dependencies.keys()
|
||||
if any(kw in mod.lower() for kw in ["fastled", "crgb", "led", "rmt", "strip"])
|
||||
]
|
||||
|
||||
for module in sorted(fastled_modules):
|
||||
print(f" {module}:")
|
||||
for symbol in dependencies[module][:5]: # Show first 5 symbols
|
||||
print(f" - {symbol}")
|
||||
if len(dependencies[module]) > 5:
|
||||
print(f" ... and {len(dependencies[module]) - 5} more")
|
||||
|
||||
# Largest overall symbols (non-FastLED)
|
||||
print("\nLARGEST NON-FASTLED SYMBOLS:")
|
||||
non_fastled: List[SymbolInfo] = [
|
||||
s
|
||||
for s in large_symbols
|
||||
if not any(
|
||||
keyword in s.demangled_name.lower()
|
||||
for keyword in ["fastled", "cfastled", "crgb", "hsv"]
|
||||
)
|
||||
]
|
||||
non_fastled_sorted: List[SymbolInfo] = sorted(
|
||||
non_fastled, key=lambda x: x.size, reverse=True
|
||||
)
|
||||
|
||||
for i, sym in enumerate(non_fastled_sorted[:15]):
|
||||
display_name = sym.demangled_name
|
||||
print(f" {i + 1:2d}. {sym.size:6d} bytes - {display_name}")
|
||||
|
||||
# Recommendations
|
||||
print("\nRECOMMENDATIONS FOR SIZE REDUCTION:")
|
||||
|
||||
# Identify unused features
|
||||
# unused_features = []
|
||||
feature_patterns = {
|
||||
"JSON functionality": ["json", "Json"],
|
||||
"Audio processing": ["audio", "fft", "Audio"],
|
||||
"2D effects": ["2d", "noise", "matrix"],
|
||||
"Video functionality": ["video", "Video"],
|
||||
"UI components": ["ui", "button", "slider"],
|
||||
"File system": ["file", "File", "fs_"],
|
||||
"Mathematical functions": ["sqrt", "sin", "cos", "math"],
|
||||
"String processing": ["string", "str", "String"],
|
||||
}
|
||||
|
||||
for feature, patterns in feature_patterns.items():
|
||||
feature_symbols: List[SymbolInfo] = [
|
||||
s for s in fastled_symbols if any(p in s.demangled_name for p in patterns)
|
||||
]
|
||||
if feature_symbols:
|
||||
total_size = sum(s.size for s in feature_symbols)
|
||||
print(f" - {feature}: {len(feature_symbols)} symbols, {total_size} bytes")
|
||||
if total_size > 1000: # Only show features with >1KB
|
||||
print(
|
||||
f" Consider removing if not needed (could save ~{total_size / 1024:.1f} KB)"
|
||||
)
|
||||
# Show a few example symbols
|
||||
for sym in feature_symbols[:3]:
|
||||
display_name = sym.demangled_name[:60]
|
||||
print(f" * {sym.size} bytes: {display_name}")
|
||||
if len(feature_symbols) > 3:
|
||||
print(f" ... and {len(feature_symbols) - 3} more")
|
||||
|
||||
return {
|
||||
"total_symbols": total_symbols,
|
||||
"fastled_symbols": total_fastled,
|
||||
"fastled_size": fastled_size,
|
||||
"largest_fastled": fastled_sorted[:10],
|
||||
"dependencies": fastled_modules,
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
# Detect build directory and board - try multiple possible locations
|
||||
possible_build_dirs = [
|
||||
Path("../../.build"),
|
||||
Path("../.build"),
|
||||
Path(".build"),
|
||||
Path("../../build"),
|
||||
Path("../build"),
|
||||
Path("build"),
|
||||
]
|
||||
|
||||
build_dir = None
|
||||
for build_path in possible_build_dirs:
|
||||
if build_path.exists():
|
||||
build_dir = build_path
|
||||
break
|
||||
|
||||
if not build_dir:
|
||||
print("Error: Could not find build directory (.build)")
|
||||
sys.exit(1)
|
||||
|
||||
# Find ESP32 board directory
|
||||
esp32_boards = [
|
||||
"esp32dev",
|
||||
"esp32",
|
||||
"esp32s2",
|
||||
"esp32s3",
|
||||
"esp32c3",
|
||||
"esp32c6",
|
||||
"esp32h2",
|
||||
"esp32p4",
|
||||
"esp32c2",
|
||||
]
|
||||
board_dir = None
|
||||
board_name = None
|
||||
|
||||
for board in esp32_boards:
|
||||
candidate_dir = build_dir / board
|
||||
if candidate_dir.exists():
|
||||
build_info_file = candidate_dir / "build_info.json"
|
||||
if build_info_file.exists():
|
||||
board_dir = candidate_dir
|
||||
board_name = board
|
||||
break
|
||||
|
||||
if not board_dir:
|
||||
print("Error: No ESP32 board with build_info.json found in build directory")
|
||||
print(f"Searched in: {build_dir}")
|
||||
print(f"Looking for boards: {esp32_boards}")
|
||||
sys.exit(1)
|
||||
|
||||
build_info_path = board_dir / "build_info.json"
|
||||
print(f"Found ESP32 build info for {board_name}: {build_info_path}")
|
||||
|
||||
with open(build_info_path) as f:
|
||||
build_info = json.load(f)
|
||||
|
||||
# Use the detected board name instead of hardcoded "esp32dev"
|
||||
esp32_info = build_info[board_name]
|
||||
nm_path = esp32_info["aliases"]["nm"]
|
||||
elf_file = esp32_info["prog_path"]
|
||||
|
||||
# Find map file
|
||||
map_file = Path(elf_file).with_suffix(".map")
|
||||
|
||||
print(f"Analyzing ELF file: {elf_file}")
|
||||
print(f"Using nm tool: {nm_path}")
|
||||
print(f"Map file: {map_file}")
|
||||
|
||||
# Analyze symbols
|
||||
cppfilt_path = esp32_info["aliases"]["c++filt"]
|
||||
symbols, fastled_symbols, large_symbols = analyze_symbols(
|
||||
elf_file, nm_path, cppfilt_path
|
||||
)
|
||||
|
||||
# Analyze dependencies
|
||||
dependencies = analyze_map_file(map_file)
|
||||
|
||||
# Generate report
|
||||
report = generate_report(symbols, fastled_symbols, large_symbols, dependencies)
|
||||
|
||||
# Save detailed data to JSON (sorted by size, largest first)
|
||||
# Use board-specific filename and place it relative to build directory
|
||||
output_file = build_dir / f"{board_name}_symbol_analysis.json"
|
||||
detailed_data = {
|
||||
"summary": report,
|
||||
"all_fastled_symbols": sorted(
|
||||
fastled_symbols, key=lambda x: x.size, reverse=True
|
||||
),
|
||||
"all_symbols_sorted_by_size": sorted(
|
||||
symbols, key=lambda x: x.size, reverse=True
|
||||
)[:100], # Top 100 largest symbols
|
||||
"dependencies": dependencies,
|
||||
"large_symbols": sorted(large_symbols, key=lambda x: x.size, reverse=True)[
|
||||
:50
|
||||
], # Top 50 largest symbols
|
||||
}
|
||||
|
||||
with open(output_file, "w") as f:
|
||||
json.dump(detailed_data, f, indent=2)
|
||||
|
||||
print(f"\nDetailed analysis saved to: {output_file}")
|
||||
print("You can examine this file to identify specific symbols to eliminate.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
12
libraries/FastLED/ci/util/locked_print.py
Normal file
12
libraries/FastLED/ci/util/locked_print.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from threading import Lock
|
||||
|
||||
|
||||
PRINT_LOCK = Lock()
|
||||
|
||||
|
||||
def locked_print(string: str):
|
||||
"""Print with a lock to prevent garbled output for multiple threads."""
|
||||
with PRINT_LOCK:
|
||||
# print only prints so much, break up the string into lines
|
||||
for line in string.splitlines():
|
||||
print(line)
|
||||
19
libraries/FastLED/ci/util/map_dump.py
Normal file
19
libraries/FastLED/ci/util/map_dump.py
Normal file
@@ -0,0 +1,19 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def map_dump(map_file: Path) -> None:
|
||||
# os.system("uv run fpvgcc ci/tests/uno/firmware.map --lmap root")
|
||||
|
||||
cmds = [
|
||||
f"uv run fpvgcc {map_file} --sar",
|
||||
f"uv run fpvgcc {map_file} --lmap root",
|
||||
f"uv run fpvgcc {map_file} --uf",
|
||||
f"uv run fpvgcc {map_file} --uregions",
|
||||
# --usections
|
||||
f"uv run fpvgcc {map_file} --usections",
|
||||
f"uv run fpvgcc {map_file} --la",
|
||||
]
|
||||
for cmd in cmds:
|
||||
print("\nRunning command: ", cmd)
|
||||
os.system(cmd)
|
||||
87
libraries/FastLED/ci/util/output_formatter.py
Normal file
87
libraries/FastLED/ci/util/output_formatter.py
Normal file
@@ -0,0 +1,87 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import time
|
||||
from typing import Protocol
|
||||
|
||||
|
||||
class OutputFormatter(Protocol):
|
||||
"""Protocol for output formatters used with RunningProcess."""
|
||||
|
||||
def begin(self) -> None: ...
|
||||
|
||||
def transform(self, line: str) -> str: ...
|
||||
|
||||
def end(self) -> None: ...
|
||||
|
||||
|
||||
class NullOutputFormatter:
|
||||
"""No-op formatter that returns input unchanged and has no lifecycle effects."""
|
||||
|
||||
def begin(self) -> None:
|
||||
return None
|
||||
|
||||
def transform(self, line: str) -> str:
|
||||
return line
|
||||
|
||||
def end(self) -> None:
|
||||
return None
|
||||
|
||||
|
||||
class _MultiPathSubstitutionFormatter:
|
||||
"""Formatter that applies multiple path replacements and prefixes timestamps."""
|
||||
|
||||
def __init__(self, substitutions: list[tuple[str, str, str]]) -> None:
|
||||
"""Initialize with list of (needle, regex_pattern, replacement) tuples."""
|
||||
self._substitutions: list[tuple[str, str, re.Pattern[str]]] = []
|
||||
for needle, regex_pattern, replacement in substitutions:
|
||||
compiled_pattern = re.compile(regex_pattern)
|
||||
self._substitutions.append((needle, replacement, compiled_pattern))
|
||||
self._start_time: float = 0.0
|
||||
|
||||
def begin(self) -> None:
|
||||
self._start_time = time.time()
|
||||
|
||||
def transform(self, line: str) -> str:
|
||||
if not line:
|
||||
return line
|
||||
formatted: str = self._format_paths(line)
|
||||
elapsed: float = time.time() - self._start_time
|
||||
return f"{elapsed:.2f} {formatted}"
|
||||
|
||||
def end(self) -> None:
|
||||
return None
|
||||
|
||||
def _format_paths(self, line: str) -> str:
|
||||
result = line
|
||||
for needle, replacement, compiled_pattern in self._substitutions:
|
||||
if needle in result and compiled_pattern.search(result):
|
||||
result = compiled_pattern.sub(replacement, result)
|
||||
return result
|
||||
|
||||
|
||||
def create_sketch_path_formatter(example: str) -> OutputFormatter:
|
||||
"""Create a formatter that maps lib/FastLED paths to src/ and src/sketch paths to examples/{example}/ and timestamps lines.
|
||||
|
||||
Args:
|
||||
example: Example name or path (e.g., "Pintest" or "examples/SmartMatrix").
|
||||
|
||||
Returns:
|
||||
OutputFormatter: Configured formatter instance.
|
||||
"""
|
||||
# Normalize example path
|
||||
display_example_str: str
|
||||
if "/" in example or "\\" in example:
|
||||
display_example_str = example.replace("\\", "/")
|
||||
else:
|
||||
display_example_str = f"examples/{example}"
|
||||
|
||||
# Define multiple path substitutions
|
||||
substitutions = [
|
||||
# Replace lib/FastLED/ paths with src/ for better UX
|
||||
("lib/FastLED", r"lib[/\\]+FastLED[/\\]+", "src/"),
|
||||
# Replace src/sketch/ paths with examples/{example}/
|
||||
("sketch", r"src[/\\]+sketch[/\\]+", f"{display_example_str}/"),
|
||||
]
|
||||
|
||||
return _MultiPathSubstitutionFormatter(substitutions)
|
||||
6
libraries/FastLED/ci/util/paths.py
Normal file
6
libraries/FastLED/ci/util/paths.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
_HERE = Path(__file__).resolve().parent
|
||||
PROJECT_ROOT = _HERE.parent.parent
|
||||
BUILD = PROJECT_ROOT / ".build"
|
||||
381
libraries/FastLED/ci/util/process_status_display.py
Normal file
381
libraries/FastLED/ci/util/process_status_display.py
Normal file
@@ -0,0 +1,381 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Process status display classes for real-time monitoring."""
|
||||
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING, List, Optional
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ci.util.running_process_group import GroupStatus, RunningProcessGroup
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DisplayConfig:
|
||||
"""Configuration for display output format."""
|
||||
|
||||
format_type: str = "ascii" # "ascii", "rich", "csv"
|
||||
use_colors: bool = True
|
||||
update_interval: float = 0.1
|
||||
max_name_width: int = 20
|
||||
max_output_width: int = 40
|
||||
|
||||
|
||||
class ProcessStatusDisplay(ABC):
|
||||
"""Abstract base class for process status displays."""
|
||||
|
||||
def __init__(
|
||||
self, group: "RunningProcessGroup", config: Optional[DisplayConfig] = None
|
||||
):
|
||||
self.group = group
|
||||
self.config = config or DisplayConfig()
|
||||
self._display_thread: Optional[threading.Thread] = None
|
||||
self._stop_event = threading.Event()
|
||||
|
||||
@abstractmethod
|
||||
def format_status_line(
|
||||
self, group_status: "GroupStatus", spinner_index: int
|
||||
) -> str:
|
||||
"""Format the complete status display."""
|
||||
pass
|
||||
|
||||
def start_display(self) -> threading.Thread:
|
||||
"""Start the real-time display in a background thread."""
|
||||
if self._display_thread and self._display_thread.is_alive():
|
||||
return self._display_thread
|
||||
|
||||
self._stop_event.clear()
|
||||
self._display_thread = threading.Thread(target=self._display_loop, daemon=True)
|
||||
self._display_thread.start()
|
||||
return self._display_thread
|
||||
|
||||
def stop_display(self) -> None:
|
||||
"""Stop the display thread."""
|
||||
if self._display_thread:
|
||||
self._stop_event.set()
|
||||
self._display_thread.join(timeout=1.0)
|
||||
|
||||
def _display_loop(self) -> None:
|
||||
"""Main display loop running in background thread."""
|
||||
spinner_index = 0
|
||||
last_status_time = 0
|
||||
status_interval = 2 # Show status every 2 seconds
|
||||
|
||||
while not self._stop_event.is_set():
|
||||
try:
|
||||
# Wait for monitoring to start, or exit if stopped
|
||||
if not self.group._status_monitoring_active:
|
||||
time.sleep(0.1)
|
||||
continue
|
||||
|
||||
current_time = time.time()
|
||||
group_status = self.group.get_status()
|
||||
|
||||
# Show periodic status updates instead of continuous display
|
||||
if current_time - last_status_time >= status_interval:
|
||||
running_count = sum(1 for p in group_status.processes if p.is_alive)
|
||||
completed_count = group_status.completed_processes
|
||||
|
||||
if running_count > 0:
|
||||
# Show a brief status update
|
||||
spinner_char = ["|", "/", "-", "\\\\"][spinner_index % 4]
|
||||
print(
|
||||
f"{spinner_char} Progress: {completed_count}/{group_status.total_processes} completed, {running_count} running..."
|
||||
)
|
||||
last_status_time = current_time
|
||||
|
||||
spinner_index = (spinner_index + 1) % 4
|
||||
time.sleep(1.0) # Check every second but only print every 5 seconds
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Display update error: {e}")
|
||||
time.sleep(1.0)
|
||||
|
||||
|
||||
class ASCIIStatusDisplay(ProcessStatusDisplay):
|
||||
"""ASCII-compatible status display."""
|
||||
|
||||
def __init__(
|
||||
self, group: "RunningProcessGroup", config: Optional[DisplayConfig] = None
|
||||
):
|
||||
if config is None:
|
||||
config = DisplayConfig(format_type="ascii")
|
||||
super().__init__(group, config)
|
||||
|
||||
self._spinner_chars = ["|", "/", "-", "\\\\"]
|
||||
self._status_chars = {
|
||||
"running": "|>",
|
||||
"done": "OK",
|
||||
"failed": "XX",
|
||||
"pending": "--",
|
||||
}
|
||||
|
||||
def format_status_line(
|
||||
self, group_status: "GroupStatus", spinner_index: int
|
||||
) -> str:
|
||||
"""Format status display using ASCII characters."""
|
||||
lines: List[str] = []
|
||||
|
||||
# Header
|
||||
lines.append(
|
||||
f"Process Group: {group_status.group_name} - Progress: {group_status.completed_processes}/{group_status.total_processes} ({group_status.completion_percentage:.1f}%)"
|
||||
)
|
||||
lines.append("-" * 80)
|
||||
|
||||
# Process status lines
|
||||
for proc_status in group_status.processes:
|
||||
# Get status character
|
||||
if proc_status.is_alive:
|
||||
status_char = self._spinner_chars[
|
||||
spinner_index % len(self._spinner_chars)
|
||||
]
|
||||
elif proc_status.is_completed:
|
||||
if proc_status.return_value == 0:
|
||||
status_char = self._status_chars["done"]
|
||||
else:
|
||||
status_char = self._status_chars["failed"]
|
||||
else:
|
||||
status_char = self._status_chars["pending"]
|
||||
|
||||
# Format fields
|
||||
name = proc_status.name[: self.config.max_name_width].ljust(
|
||||
self.config.max_name_width
|
||||
)
|
||||
|
||||
if proc_status.is_completed:
|
||||
status_text = f"DONE({proc_status.return_value or 0})"
|
||||
elif proc_status.is_alive:
|
||||
status_text = "RUNNING"
|
||||
else:
|
||||
status_text = "PENDING"
|
||||
|
||||
duration = f"{proc_status.running_time_seconds:.1f}s"
|
||||
output = (proc_status.last_output_line or "")[
|
||||
: self.config.max_output_width
|
||||
]
|
||||
|
||||
lines.append(
|
||||
f"{status_char} {name} | {status_text:>8} | {duration:>8} | {output}"
|
||||
)
|
||||
|
||||
return "\\n".join(lines)
|
||||
|
||||
|
||||
class RichStatusDisplay(ProcessStatusDisplay):
|
||||
"""Rich library-based status display with enhanced formatting."""
|
||||
|
||||
def __init__(
|
||||
self, group: "RunningProcessGroup", config: Optional[DisplayConfig] = None
|
||||
):
|
||||
if config is None:
|
||||
config = DisplayConfig(format_type="rich")
|
||||
super().__init__(group, config)
|
||||
|
||||
try:
|
||||
from rich.console import Console
|
||||
from rich.live import Live
|
||||
from rich.progress import (
|
||||
Progress,
|
||||
SpinnerColumn,
|
||||
TextColumn,
|
||||
TimeElapsedColumn,
|
||||
)
|
||||
from rich.table import Table
|
||||
|
||||
self.Progress = Progress
|
||||
self.Live = Live
|
||||
self.Table = Table
|
||||
self.Console = Console
|
||||
self._rich_available = True
|
||||
|
||||
self._spinner_chars = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧"]
|
||||
self._status_chars = {
|
||||
"running": "⠋",
|
||||
"done": "✓",
|
||||
"failed": "✗",
|
||||
"pending": " ",
|
||||
}
|
||||
except ImportError:
|
||||
logger.warning("Rich not available, falling back to ASCII display")
|
||||
self._rich_available = False
|
||||
# Fallback to ASCII chars
|
||||
self._spinner_chars = ["|", "/", "-", "\\\\"]
|
||||
self._status_chars = {
|
||||
"running": "|>",
|
||||
"done": "OK",
|
||||
"failed": "XX",
|
||||
"pending": "--",
|
||||
}
|
||||
|
||||
def format_status_line(
|
||||
self, group_status: "GroupStatus", spinner_index: int
|
||||
) -> str:
|
||||
"""Format status display using Rich library features."""
|
||||
if not self._rich_available:
|
||||
# Fallback to ASCII formatting
|
||||
return self._format_ascii_fallback(group_status, spinner_index)
|
||||
|
||||
try:
|
||||
return self._format_rich_display(group_status, spinner_index)
|
||||
except Exception as e:
|
||||
logger.warning(f"Rich formatting failed, using ASCII fallback: {e}")
|
||||
return self._format_ascii_fallback(group_status, spinner_index)
|
||||
|
||||
def _format_rich_display(
|
||||
self, group_status: "GroupStatus", spinner_index: int
|
||||
) -> str:
|
||||
"""Format using Rich library."""
|
||||
table = self.Table(title=f"Process Group: {group_status.group_name}")
|
||||
|
||||
table.add_column("Status", width=8)
|
||||
table.add_column("Process", width=self.config.max_name_width)
|
||||
table.add_column("State", width=10)
|
||||
table.add_column("Duration", width=10)
|
||||
table.add_column("Last Output", width=self.config.max_output_width)
|
||||
|
||||
for proc_status in group_status.processes:
|
||||
# Get status character
|
||||
if proc_status.is_alive:
|
||||
status_char = self._spinner_chars[
|
||||
spinner_index % len(self._spinner_chars)
|
||||
]
|
||||
elif proc_status.is_completed:
|
||||
if proc_status.return_value == 0:
|
||||
status_char = self._status_chars["done"]
|
||||
else:
|
||||
status_char = self._status_chars["failed"]
|
||||
else:
|
||||
status_char = self._status_chars["pending"]
|
||||
|
||||
# Format fields
|
||||
name = proc_status.name[: self.config.max_name_width]
|
||||
|
||||
if proc_status.is_completed:
|
||||
status_text = f"DONE({proc_status.return_value or 0})"
|
||||
if proc_status.return_value == 0:
|
||||
status_text = f"[green]{status_text}[/green]"
|
||||
else:
|
||||
status_text = f"[red]{status_text}[/red]"
|
||||
elif proc_status.is_alive:
|
||||
status_text = "[yellow]RUNNING[/yellow]"
|
||||
else:
|
||||
status_text = "[dim]PENDING[/dim]"
|
||||
|
||||
duration = f"{proc_status.running_time_seconds:.1f}s"
|
||||
output = (proc_status.last_output_line or "")[
|
||||
: self.config.max_output_width
|
||||
]
|
||||
|
||||
table.add_row(status_char, name, status_text, duration, output)
|
||||
|
||||
# Render table to string
|
||||
console = self.Console(width=120, force_terminal=False)
|
||||
with console.capture() as capture:
|
||||
console.print(table)
|
||||
|
||||
return capture.get()
|
||||
|
||||
def _format_ascii_fallback(
|
||||
self, group_status: "GroupStatus", spinner_index: int
|
||||
) -> str:
|
||||
"""Fallback ASCII formatting when Rich fails."""
|
||||
lines: List[str] = []
|
||||
|
||||
# Header
|
||||
lines.append(
|
||||
f"Process Group: {group_status.group_name} - Progress: {group_status.completed_processes}/{group_status.total_processes} ({group_status.completion_percentage:.1f}%)"
|
||||
)
|
||||
lines.append("-" * 80)
|
||||
|
||||
# Process status lines
|
||||
for proc_status in group_status.processes:
|
||||
# Get status character
|
||||
if proc_status.is_alive:
|
||||
status_char = self._spinner_chars[
|
||||
spinner_index % len(self._spinner_chars)
|
||||
]
|
||||
elif proc_status.is_completed:
|
||||
if proc_status.return_value == 0:
|
||||
status_char = self._status_chars["done"]
|
||||
else:
|
||||
status_char = self._status_chars["failed"]
|
||||
else:
|
||||
status_char = self._status_chars["pending"]
|
||||
|
||||
# Format fields
|
||||
name = proc_status.name[: self.config.max_name_width].ljust(
|
||||
self.config.max_name_width
|
||||
)
|
||||
|
||||
if proc_status.is_completed:
|
||||
status_text = f"DONE({proc_status.return_value or 0})"
|
||||
elif proc_status.is_alive:
|
||||
status_text = "RUNNING"
|
||||
else:
|
||||
status_text = "PENDING"
|
||||
|
||||
duration = f"{proc_status.running_time_seconds:.1f}s"
|
||||
output = (proc_status.last_output_line or "")[
|
||||
: self.config.max_output_width
|
||||
]
|
||||
|
||||
lines.append(
|
||||
f"{status_char} {name} | {status_text:>8} | {duration:>8} | {output}"
|
||||
)
|
||||
|
||||
return "\\n".join(lines)
|
||||
|
||||
|
||||
def get_display_format() -> DisplayConfig:
|
||||
"""Auto-detect best display format for current environment."""
|
||||
try:
|
||||
# Test Rich availability
|
||||
import sys
|
||||
|
||||
import rich
|
||||
|
||||
if sys.stdout.encoding and sys.stdout.encoding.lower() in ["utf-8", "utf8"]:
|
||||
return DisplayConfig(format_type="rich")
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
# Fallback to ASCII
|
||||
return DisplayConfig(format_type="ascii")
|
||||
|
||||
|
||||
def create_status_display(
|
||||
group: "RunningProcessGroup", display_type: str = "auto"
|
||||
) -> ProcessStatusDisplay:
|
||||
"""Factory function to create best available display."""
|
||||
|
||||
if display_type == "auto":
|
||||
config = get_display_format()
|
||||
display_type = config.format_type
|
||||
|
||||
if display_type == "rich":
|
||||
try:
|
||||
return RichStatusDisplay(group)
|
||||
except Exception as e:
|
||||
logger.warning(f"Rich display creation failed, falling back to ASCII: {e}")
|
||||
return ASCIIStatusDisplay(group)
|
||||
|
||||
# Default to ASCII
|
||||
return ASCIIStatusDisplay(group)
|
||||
|
||||
|
||||
def display_process_status(
|
||||
group: "RunningProcessGroup",
|
||||
display_type: str = "auto",
|
||||
update_interval: float = 0.1,
|
||||
) -> threading.Thread:
|
||||
"""Convenience function to start real-time process status display."""
|
||||
|
||||
display = create_status_display(group, display_type)
|
||||
display.config.update_interval = update_interval
|
||||
|
||||
return display.start_display()
|
||||
135
libraries/FastLED/ci/util/resumable_downloader.py
Normal file
135
libraries/FastLED/ci/util/resumable_downloader.py
Normal file
@@ -0,0 +1,135 @@
|
||||
"""Resumable HTTP downloader with chunked download support and automatic retry."""
|
||||
|
||||
import _thread
|
||||
import time
|
||||
import urllib.error
|
||||
import urllib.request
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
|
||||
class ResumableDownloader:
|
||||
"""Downloader with resume capability for large files."""
|
||||
|
||||
def __init__(self, chunk_size: int = 8192, max_retries: int = 5):
|
||||
self.chunk_size = chunk_size
|
||||
self.max_retries = max_retries
|
||||
|
||||
def download(self, url: str, file_path: Path) -> None:
|
||||
"""Download with resume capability.
|
||||
|
||||
Args:
|
||||
url: URL to download
|
||||
file_path: Path where to save the file
|
||||
"""
|
||||
import time
|
||||
import urllib.error
|
||||
import urllib.request
|
||||
|
||||
# Get the total file size
|
||||
total_size = self._get_file_size(url)
|
||||
if total_size is None:
|
||||
print(f"WARNING: Could not determine file size for {url}")
|
||||
total_size = 0
|
||||
else:
|
||||
print(
|
||||
f"File size: {total_size:,} bytes ({total_size / (1024 * 1024):.1f} MB)"
|
||||
)
|
||||
|
||||
# Check if partial file exists
|
||||
start_byte = 0
|
||||
if file_path.exists():
|
||||
start_byte = file_path.stat().st_size
|
||||
if start_byte == total_size:
|
||||
print(f"File already completely downloaded: {file_path}")
|
||||
return
|
||||
elif start_byte > 0:
|
||||
print(
|
||||
f"Resuming download from byte {start_byte:,} ({start_byte / (1024 * 1024):.1f} MB)"
|
||||
)
|
||||
|
||||
retry_count = 0
|
||||
while retry_count <= self.max_retries:
|
||||
try:
|
||||
self._download_range(url, file_path, start_byte, total_size)
|
||||
print(f"SUCCESS: Download completed successfully: {file_path}")
|
||||
return
|
||||
except (urllib.error.URLError, ConnectionError, OSError) as e:
|
||||
retry_count += 1
|
||||
current_size = file_path.stat().st_size if file_path.exists() else 0
|
||||
|
||||
if retry_count <= self.max_retries:
|
||||
wait_time = min(2**retry_count, 30) # Exponential backoff, max 30s
|
||||
print(
|
||||
f"\nCONNECTION LOST: At {current_size:,} bytes. Retry {retry_count}/{self.max_retries} in {wait_time}s..."
|
||||
)
|
||||
time.sleep(wait_time)
|
||||
start_byte = current_size
|
||||
else:
|
||||
print(f"\nERROR: Download failed after {self.max_retries} retries")
|
||||
raise
|
||||
except KeyboardInterrupt:
|
||||
print("\nWARNING: Download interrupted by user")
|
||||
_thread.interrupt_main()
|
||||
raise
|
||||
|
||||
def _get_file_size(self, url: str) -> Optional[int]:
|
||||
"""Get the total file size via HEAD request."""
|
||||
try:
|
||||
import urllib.request
|
||||
|
||||
req = urllib.request.Request(url, method="HEAD")
|
||||
with urllib.request.urlopen(req, timeout=30) as response:
|
||||
content_length = response.headers.get("Content-Length")
|
||||
return int(content_length) if content_length else None
|
||||
except KeyboardInterrupt:
|
||||
_thread.interrupt_main()
|
||||
raise
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def _download_range(
|
||||
self, url: str, file_path: Path, start_byte: int, total_size: int
|
||||
) -> None:
|
||||
"""Download from start_byte to end of file."""
|
||||
import urllib.request
|
||||
|
||||
# Create range request
|
||||
headers: dict[str, str] = {}
|
||||
if start_byte > 0:
|
||||
headers["Range"] = f"bytes={start_byte}-"
|
||||
|
||||
req = urllib.request.Request(url, headers=headers)
|
||||
|
||||
# Open file in append mode if resuming, write mode if starting fresh
|
||||
mode = "ab" if start_byte > 0 else "wb"
|
||||
|
||||
with urllib.request.urlopen(req, timeout=30) as response:
|
||||
with open(file_path, mode) as f:
|
||||
downloaded = start_byte
|
||||
|
||||
while True:
|
||||
chunk = response.read(self.chunk_size)
|
||||
if not chunk:
|
||||
break
|
||||
|
||||
f.write(chunk)
|
||||
downloaded += len(chunk)
|
||||
|
||||
# Progress reporting
|
||||
if total_size > 0:
|
||||
progress = downloaded / total_size * 100
|
||||
mb_downloaded = downloaded / (1024 * 1024)
|
||||
mb_total = total_size / (1024 * 1024)
|
||||
print(
|
||||
f"\rProgress: {progress:.1f}% ({mb_downloaded:.1f}/{mb_total:.1f} MB)",
|
||||
end="",
|
||||
flush=True,
|
||||
)
|
||||
else:
|
||||
mb_downloaded = downloaded / (1024 * 1024)
|
||||
print(
|
||||
f"\rDownloaded: {mb_downloaded:.1f} MB", end="", flush=True
|
||||
)
|
||||
|
||||
print() # New line after progress
|
||||
992
libraries/FastLED/ci/util/running_process.py
Normal file
992
libraries/FastLED/ci/util/running_process.py
Normal file
@@ -0,0 +1,992 @@
|
||||
# pyright: reportUnknownMemberType=false, reportMissingParameterType=false
|
||||
import _thread
|
||||
import os
|
||||
import queue
|
||||
import shlex
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
from queue import Queue
|
||||
from typing import Any, Callable, ContextManager, Iterator
|
||||
|
||||
|
||||
class EndOfStream(Exception):
|
||||
"""Sentinel used to indicate end-of-stream from the reader."""
|
||||
|
||||
|
||||
from ci.util.output_formatter import NullOutputFormatter, OutputFormatter
|
||||
|
||||
|
||||
# Console UTF-8 configuration is now handled globally in ci/__init__.py
|
||||
|
||||
|
||||
class ProcessOutputReader:
|
||||
"""Dedicated reader that drains a process's stdout and enqueues lines.
|
||||
|
||||
This keeps the stdout pipe drained to prevent blocking and forwards
|
||||
transformed, non-empty lines to the provided output queue. It also invokes
|
||||
lifecycle callbacks for timing/unregister behaviors.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
proc: subprocess.Popen[Any],
|
||||
shutdown: threading.Event,
|
||||
output_formatter: OutputFormatter | None,
|
||||
on_output: Callable[[str | EndOfStream], None],
|
||||
on_end: Callable[[], None],
|
||||
) -> None:
|
||||
output_formatter = output_formatter or NullOutputFormatter()
|
||||
self._proc = proc
|
||||
self._shutdown = shutdown
|
||||
self._output_formatter = output_formatter
|
||||
self._on_output = on_output
|
||||
self._on_end = on_end
|
||||
self.last_stdout_ts: float | None = None
|
||||
self._eos_emitted: bool = False
|
||||
|
||||
def _emit_eos_once(self) -> None:
|
||||
"""Ensure EndOfStream is only forwarded a single time."""
|
||||
if not self._eos_emitted:
|
||||
self._eos_emitted = True
|
||||
self._on_output(EndOfStream())
|
||||
|
||||
def run(self) -> None:
|
||||
"""Continuously read stdout lines and forward them until EOF or shutdown."""
|
||||
try:
|
||||
# Begin formatter lifecycle within the reader context
|
||||
try:
|
||||
self._output_formatter.begin()
|
||||
except Exception as e:
|
||||
warnings.warn(f"Output formatter begin() failed: {e}")
|
||||
|
||||
assert self._proc.stdout is not None
|
||||
|
||||
try:
|
||||
for line in self._proc.stdout:
|
||||
self.last_stdout_ts = time.time()
|
||||
if self._shutdown.is_set():
|
||||
break
|
||||
|
||||
line_stripped = line.rstrip()
|
||||
if not line_stripped:
|
||||
continue
|
||||
|
||||
transformed_line = self._output_formatter.transform(line_stripped)
|
||||
|
||||
self._on_output(transformed_line)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
# Per project rules, handle interrupts in threads explicitly
|
||||
thread_id = threading.current_thread().ident
|
||||
thread_name = threading.current_thread().name
|
||||
print(f"Thread {thread_id} ({thread_name}) caught KeyboardInterrupt")
|
||||
print(f"Stack trace for thread {thread_id}:")
|
||||
traceback.print_exc()
|
||||
# Try to ensure child process is terminated promptly
|
||||
try:
|
||||
self._proc.kill()
|
||||
except Exception:
|
||||
pass
|
||||
# Propagate to main thread and re-raise
|
||||
_thread.interrupt_main()
|
||||
# EOF
|
||||
self._emit_eos_once()
|
||||
raise
|
||||
|
||||
except (ValueError, OSError) as e:
|
||||
# Normal shutdown scenarios include closed file descriptors.
|
||||
if "closed file" in str(e) or "Bad file descriptor" in str(e):
|
||||
warnings.warn(f"Output reader encountered closed file: {e}")
|
||||
pass
|
||||
else:
|
||||
print(f"Warning: Output reader encountered error: {e}")
|
||||
finally:
|
||||
# Signal end-of-stream to consumers exactly once
|
||||
self._emit_eos_once()
|
||||
finally:
|
||||
# Cleanup stream and invoke completion callback
|
||||
if self._proc.stdout and not self._proc.stdout.closed:
|
||||
try:
|
||||
self._proc.stdout.close()
|
||||
except (ValueError, OSError) as err:
|
||||
warnings.warn(f"Output reader encountered error: {err}")
|
||||
pass
|
||||
|
||||
# Notify parent for timing/unregistration
|
||||
try:
|
||||
self._on_end()
|
||||
finally:
|
||||
# End formatter lifecycle within the reader context
|
||||
try:
|
||||
self._output_formatter.end()
|
||||
except Exception as e:
|
||||
warnings.warn(f"Output formatter end() failed: {e}")
|
||||
|
||||
|
||||
class ProcessWatcher:
|
||||
"""Background watcher that polls a process until it terminates."""
|
||||
|
||||
def __init__(self, running_process: "RunningProcess") -> None:
|
||||
self._rp = running_process
|
||||
self._thread: threading.Thread | None = None
|
||||
|
||||
def start(self) -> None:
|
||||
name: str = "RPWatcher"
|
||||
try:
|
||||
if self._rp.proc is not None and self._rp.proc.pid is not None:
|
||||
name = f"RPWatcher-{self._rp.proc.pid}"
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
self._thread = threading.Thread(target=self._run, name=name, daemon=True)
|
||||
self._thread.start()
|
||||
|
||||
def _run(self) -> None:
|
||||
thread_id = threading.current_thread().ident
|
||||
thread_name = threading.current_thread().name
|
||||
try:
|
||||
while not self._rp.shutdown.is_set():
|
||||
# Enforce per-process timeout independently of wait()
|
||||
if (
|
||||
self._rp.timeout is not None
|
||||
and self._rp.start_time is not None
|
||||
and (time.time() - self._rp.start_time) > self._rp.timeout
|
||||
):
|
||||
print(
|
||||
f"Process timeout after {self._rp.timeout} seconds (watcher), killing: {self._rp.command}"
|
||||
)
|
||||
if self._rp.enable_stack_trace:
|
||||
try:
|
||||
print("\n" + "=" * 80)
|
||||
print("STACK TRACE DUMP (GDB Output)")
|
||||
print("=" * 80)
|
||||
print(self._rp._dump_stack_trace())
|
||||
print("=" * 80)
|
||||
except Exception as e:
|
||||
print(f"Watcher stack trace dump failed: {e}")
|
||||
self._rp.kill()
|
||||
break
|
||||
|
||||
rc: int | None = self._rp.poll()
|
||||
if rc is not None:
|
||||
break
|
||||
time.sleep(0.1)
|
||||
except KeyboardInterrupt:
|
||||
print(f"Thread {thread_id} ({thread_name}) caught KeyboardInterrupt")
|
||||
print(f"Stack trace for thread {thread_id}:")
|
||||
traceback.print_exc()
|
||||
_thread.interrupt_main()
|
||||
raise
|
||||
except Exception as e:
|
||||
# Surface unexpected errors and keep behavior consistent
|
||||
print(f"Watcher thread error in {thread_name}: {e}")
|
||||
traceback.print_exc()
|
||||
|
||||
@property
|
||||
def thread(self) -> threading.Thread | None:
|
||||
return self._thread
|
||||
|
||||
|
||||
class _RunningProcessLineIterator(ContextManager[Iterator[str]], Iterator[str]):
|
||||
"""Context-managed iterator over a RunningProcess's output lines.
|
||||
|
||||
Yields only strings (never None). Stops on EndOfStream or when a per-line
|
||||
timeout elapses.
|
||||
"""
|
||||
|
||||
def __init__(self, rp: "RunningProcess", timeout: float | None) -> None:
|
||||
self._rp = rp
|
||||
self._timeout = timeout
|
||||
|
||||
# Context manager protocol
|
||||
def __enter__(self) -> "_RunningProcessLineIterator":
|
||||
return self
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc: BaseException | None,
|
||||
tb: Any | None,
|
||||
) -> bool:
|
||||
# Do not suppress exceptions
|
||||
return False
|
||||
|
||||
# Iterator protocol
|
||||
def __iter__(self) -> Iterator[str]:
|
||||
return self
|
||||
|
||||
def __next__(self) -> str:
|
||||
next_item: str | EndOfStream = self._rp.get_next_line(timeout=self._timeout)
|
||||
|
||||
if isinstance(next_item, EndOfStream):
|
||||
raise StopIteration
|
||||
|
||||
# Must be a string by contract
|
||||
return next_item
|
||||
|
||||
|
||||
class RunningProcess:
|
||||
"""
|
||||
A class to manage and stream output from a running subprocess.
|
||||
|
||||
This class provides functionality to execute shell commands, stream their output
|
||||
in real-time via a queue, and control the subprocess execution. It merges stderr
|
||||
into stdout and provides thread-safe access to process output.
|
||||
|
||||
Key features:
|
||||
- Real-time output streaming via queue
|
||||
- Thread-safe output consumption
|
||||
- Timeout protection with optional stack traces
|
||||
- Echo mode for immediate output printing
|
||||
- Process tree termination support
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
command: str | list[str],
|
||||
cwd: Path | None = None,
|
||||
check: bool = False,
|
||||
auto_run: bool = True,
|
||||
shell: bool | None = None,
|
||||
timeout: int | None = None, # None means no global timeout
|
||||
enable_stack_trace: bool = False, # Enable stack trace dumping on timeout
|
||||
on_complete: Callable[[], None]
|
||||
| None = None, # Callback to execute when process completes
|
||||
output_formatter: OutputFormatter | None = None,
|
||||
env: dict[str, str] | None = None, # Environment variables
|
||||
):
|
||||
"""
|
||||
Initialize the RunningProcess instance.
|
||||
|
||||
Note: stderr is automatically merged into stdout for unified output handling.
|
||||
|
||||
Args:
|
||||
command: The command to execute as string or list of arguments.
|
||||
cwd: Working directory to execute the command in.
|
||||
check: If True, raise CalledProcessError if command returns non-zero exit code.
|
||||
auto_run: If True, automatically start the command when instance is created.
|
||||
shell: Shell execution mode. None auto-detects based on command type.
|
||||
timeout: Global timeout in seconds for process execution. None disables timeout.
|
||||
enable_stack_trace: If True, dump GDB stack trace when process times out.
|
||||
on_complete: Callback function executed when process completes normally.
|
||||
output_formatter: Optional formatter for transforming output lines.
|
||||
env: Environment variables to pass to the subprocess. None uses current environment.
|
||||
"""
|
||||
if shell is None:
|
||||
# Default: use shell only when given a string, or when a list includes shell metachars
|
||||
if isinstance(command, str):
|
||||
shell = True
|
||||
elif isinstance(command, list):
|
||||
shell_meta = {"&&", "||", "|", ";", ">", "<", "2>", "&"}
|
||||
shell = any(part in shell_meta for part in command)
|
||||
else:
|
||||
shell = False
|
||||
self.command = command
|
||||
self.shell: bool = shell
|
||||
self.cwd = str(cwd) if cwd is not None else None
|
||||
self.env = env
|
||||
self.output_queue: Queue[str | EndOfStream] = Queue()
|
||||
self.accumulated_output: list[str] = [] # Store all output for later retrieval
|
||||
self.proc: subprocess.Popen[Any] | None = None
|
||||
self.check = check
|
||||
# Force auto_run to False if NO_PARALLEL is set
|
||||
self.auto_run = False if os.environ.get("NO_PARALLEL") else auto_run
|
||||
self.timeout = timeout
|
||||
self.enable_stack_trace = enable_stack_trace
|
||||
self.on_complete = on_complete
|
||||
# Always keep a non-None formatter
|
||||
self.output_formatter = (
|
||||
output_formatter if output_formatter is not None else NullOutputFormatter()
|
||||
)
|
||||
self.reader_thread: threading.Thread | None = None
|
||||
self.watcher_thread: threading.Thread | None = None
|
||||
self.shutdown: threading.Event = threading.Event()
|
||||
self._start_time: float | None = None
|
||||
self._end_time: float | None = None
|
||||
self._time_last_stdout_line: float | None = None
|
||||
self._termination_notified: bool = False
|
||||
if auto_run:
|
||||
self.run()
|
||||
|
||||
def get_command_str(self) -> str:
|
||||
return (
|
||||
subprocess.list2cmdline(self.command)
|
||||
if isinstance(self.command, list)
|
||||
else self.command
|
||||
)
|
||||
|
||||
def _dump_stack_trace(self) -> str:
|
||||
"""
|
||||
Dump stack trace of the running process using GDB.
|
||||
|
||||
Returns:
|
||||
str: GDB output containing stack trace information.
|
||||
"""
|
||||
if self.proc is None:
|
||||
return "No process to dump stack trace for."
|
||||
|
||||
try:
|
||||
# Get the process ID
|
||||
pid = self.proc.pid
|
||||
|
||||
# Create GDB script for attaching to running process
|
||||
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as gdb_script:
|
||||
gdb_script.write("set pagination off\n")
|
||||
gdb_script.write(f"attach {pid}\n")
|
||||
gdb_script.write("bt full\n")
|
||||
gdb_script.write("info registers\n")
|
||||
gdb_script.write("x/16i $pc\n")
|
||||
gdb_script.write("thread apply all bt full\n")
|
||||
gdb_script.write("detach\n")
|
||||
gdb_script.write("quit\n")
|
||||
|
||||
# Run GDB to get stack trace
|
||||
gdb_command = f"gdb -batch -x {gdb_script.name}"
|
||||
gdb_process = subprocess.Popen(
|
||||
gdb_command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
shell=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
gdb_output, _ = gdb_process.communicate(
|
||||
timeout=30
|
||||
) # 30 second timeout for GDB
|
||||
|
||||
# Clean up GDB script
|
||||
os.unlink(gdb_script.name)
|
||||
|
||||
return gdb_output
|
||||
|
||||
except Exception as e:
|
||||
return f"Failed to dump stack trace: {e}"
|
||||
|
||||
def time_last_stdout_line(self) -> float | None:
|
||||
return self._time_last_stdout_line
|
||||
|
||||
def _handle_timeout(self, timeout: float, echo: bool = False) -> None:
|
||||
"""Handle process timeout with optional stack trace and cleanup."""
|
||||
cmd_str = self.get_command_str()
|
||||
|
||||
# Drain any remaining output before killing if echo is enabled
|
||||
if echo:
|
||||
remaining_lines = self.drain_stdout()
|
||||
for line in remaining_lines:
|
||||
print(
|
||||
line, flush=(os.name == "nt")
|
||||
) # Force flush only on Windows per-line
|
||||
if remaining_lines:
|
||||
print(
|
||||
f"[Drained {len(remaining_lines)} final lines before timeout]",
|
||||
flush=(os.name == "nt"),
|
||||
)
|
||||
|
||||
if self.enable_stack_trace:
|
||||
print(f"\nProcess timeout after {timeout} seconds, dumping stack trace...")
|
||||
print(f"Command: {cmd_str}")
|
||||
print(f"Process ID: {self.proc.pid}")
|
||||
|
||||
try:
|
||||
stack_trace = self._dump_stack_trace()
|
||||
print("\n" + "=" * 80)
|
||||
print("STACK TRACE DUMP (GDB Output)")
|
||||
print("=" * 80)
|
||||
print(stack_trace)
|
||||
print("=" * 80)
|
||||
except Exception as e:
|
||||
print(f"Failed to dump stack trace: {e}")
|
||||
|
||||
print(f"Killing timed out process: {cmd_str}")
|
||||
self.kill()
|
||||
raise TimeoutError(f"Process timed out after {timeout} seconds: {cmd_str}")
|
||||
|
||||
def drain_stdout(self) -> list[str]:
|
||||
"""
|
||||
Drain all currently pending stdout lines without blocking.
|
||||
|
||||
Consumes all available lines from the output queue until either the queue
|
||||
is empty or EndOfStream is encountered. The EndOfStream sentinel is preserved
|
||||
by get_next_line() for other callers.
|
||||
|
||||
Returns:
|
||||
List of output lines that were available. Empty list if no output pending.
|
||||
"""
|
||||
lines: list[str] = []
|
||||
|
||||
while True:
|
||||
try:
|
||||
line = self.get_next_line(timeout=0)
|
||||
if isinstance(line, EndOfStream):
|
||||
break # get_next_line already handled EndOfStream preservation
|
||||
lines.append(line)
|
||||
except TimeoutError:
|
||||
break # Queue is empty
|
||||
|
||||
return lines
|
||||
|
||||
def has_pending_output(self) -> bool:
|
||||
"""
|
||||
Check if there are pending output lines without consuming them.
|
||||
|
||||
Returns:
|
||||
True if output lines are available in the queue, False otherwise.
|
||||
Returns False if only EndOfStream sentinel is present.
|
||||
"""
|
||||
try:
|
||||
with self.output_queue.mutex:
|
||||
if len(self.output_queue.queue) == 0:
|
||||
return False
|
||||
# If the only item is EndOfStream, no actual output is pending
|
||||
if len(self.output_queue.queue) == 1 and isinstance(
|
||||
self.output_queue.queue[0], EndOfStream
|
||||
):
|
||||
return False
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def run(self) -> None:
|
||||
"""
|
||||
Execute the command and stream its output to the queue.
|
||||
|
||||
Raises:
|
||||
subprocess.CalledProcessError: If the command returns a non-zero exit code.
|
||||
"""
|
||||
assert self.proc is None
|
||||
shell = self.shell
|
||||
popen_command: str | list[str]
|
||||
if shell and isinstance(self.command, list):
|
||||
# Convert list to a single shell string with proper quoting
|
||||
popen_command = subprocess.list2cmdline(self.command)
|
||||
else:
|
||||
popen_command = self.command
|
||||
|
||||
self.proc = subprocess.Popen(
|
||||
popen_command,
|
||||
shell=shell,
|
||||
cwd=self.cwd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT, # Merge stderr into stdout
|
||||
text=True, # Use text mode
|
||||
encoding="utf-8", # Explicitly use UTF-8
|
||||
errors="replace", # Replace invalid chars instead of failing
|
||||
env=self.env, # Use provided environment variables
|
||||
)
|
||||
|
||||
# Track start time after process is successfully created
|
||||
# This excludes process creation overhead from timing measurements
|
||||
self._start_time = time.time()
|
||||
|
||||
# Register with global process manager
|
||||
try:
|
||||
from ci.util.running_process_manager import (
|
||||
RunningProcessManagerSingleton,
|
||||
)
|
||||
|
||||
RunningProcessManagerSingleton.register(self)
|
||||
except Exception as e:
|
||||
warnings.warn(f"RunningProcessManager.register failed: {e}")
|
||||
|
||||
# Output formatter lifecycle is managed by ProcessOutputReader
|
||||
|
||||
# Prepare output reader helper
|
||||
assert self.proc is not None
|
||||
|
||||
def _on_reader_end() -> None:
|
||||
# Set end time when stdout pumper finishes; captures completion time of useful output
|
||||
if self._end_time is None:
|
||||
self._end_time = time.time()
|
||||
# Unregister when stdout is fully drained
|
||||
try:
|
||||
self._notify_terminated()
|
||||
except Exception as e:
|
||||
warnings.warn(f"RunningProcess termination notify (drain) failed: {e}")
|
||||
|
||||
def _on_output(item: str | EndOfStream) -> None:
|
||||
# Forward to queue and capture text lines for accumulated output
|
||||
if isinstance(item, EndOfStream):
|
||||
self.output_queue.put(item)
|
||||
else:
|
||||
# Track time of last stdout line observed
|
||||
self._time_last_stdout_line = time.time()
|
||||
self.output_queue.put(item)
|
||||
self.accumulated_output.append(item)
|
||||
|
||||
reader = ProcessOutputReader(
|
||||
proc=self.proc,
|
||||
shutdown=self.shutdown,
|
||||
output_formatter=self.output_formatter,
|
||||
on_output=_on_output,
|
||||
on_end=_on_reader_end,
|
||||
)
|
||||
|
||||
# Start output reader thread
|
||||
self.reader_thread = threading.Thread(target=reader.run, daemon=True)
|
||||
self.reader_thread.start()
|
||||
|
||||
# Start watcher thread via helper class and expose thread for compatibility
|
||||
self._watcher = ProcessWatcher(self)
|
||||
self._watcher.start()
|
||||
self.watcher_thread = self._watcher.thread
|
||||
|
||||
def get_next_line(self, timeout: float | None = None) -> str | EndOfStream:
|
||||
"""
|
||||
Get the next line of output from the process.
|
||||
|
||||
Args:
|
||||
timeout: How long to wait for the next line in seconds.
|
||||
None means wait forever, 0 means don't wait.
|
||||
|
||||
Returns:
|
||||
str: The next line of output if available.
|
||||
EndOfStream: Process has finished, no more output will be available.
|
||||
|
||||
Raises:
|
||||
TimeoutError: If timeout is reached before a line becomes available.
|
||||
"""
|
||||
assert self.proc is not None
|
||||
|
||||
# Fast non-blocking path: honor timeout==0 by peeking before raising
|
||||
if timeout == 0:
|
||||
# Peek EOS without consuming
|
||||
with self.output_queue.mutex:
|
||||
if len(self.output_queue.queue) > 0:
|
||||
head = self.output_queue.queue[0]
|
||||
if isinstance(head, EndOfStream):
|
||||
return EndOfStream()
|
||||
# Try immediate get
|
||||
try:
|
||||
item_nb: str | EndOfStream = self.output_queue.get_nowait()
|
||||
if isinstance(item_nb, EndOfStream):
|
||||
with self.output_queue.mutex:
|
||||
self.output_queue.queue.appendleft(item_nb)
|
||||
return EndOfStream()
|
||||
return item_nb
|
||||
except queue.Empty:
|
||||
if self.finished:
|
||||
return EndOfStream()
|
||||
raise TimeoutError("Timeout after 0 seconds")
|
||||
|
||||
expired_time = time.time() + timeout if timeout is not None else None
|
||||
|
||||
while True:
|
||||
if expired_time is not None and time.time() > expired_time:
|
||||
raise TimeoutError(f"Timeout after {timeout} seconds")
|
||||
|
||||
# Peek without popping if EndOfStream is at the front
|
||||
with self.output_queue.mutex:
|
||||
if len(self.output_queue.queue) > 0:
|
||||
head = self.output_queue.queue[0]
|
||||
if isinstance(head, EndOfStream):
|
||||
return EndOfStream()
|
||||
|
||||
# Nothing available yet; wait briefly in blocking mode
|
||||
if self.output_queue.empty():
|
||||
time.sleep(0.01)
|
||||
if self.finished and self.output_queue.empty():
|
||||
return EndOfStream()
|
||||
continue
|
||||
|
||||
try:
|
||||
# Safe to pop now; head is not EndOfStream
|
||||
item: str | EndOfStream = self.output_queue.get(timeout=0.1)
|
||||
if isinstance(item, EndOfStream):
|
||||
# In rare race conditions, EndOfStream could appear after peek; put back for other callers
|
||||
with self.output_queue.mutex:
|
||||
self.output_queue.queue.appendleft(item)
|
||||
return EndOfStream()
|
||||
return item
|
||||
except queue.Empty:
|
||||
if self.finished:
|
||||
return EndOfStream()
|
||||
continue
|
||||
|
||||
def get_next_line_non_blocking(self) -> str | None | EndOfStream:
|
||||
"""
|
||||
Get the next line of output from the process without blocking.
|
||||
|
||||
Returns:
|
||||
str: Next line of output if available
|
||||
None: No output available right now (should continue polling)
|
||||
EndOfStream: Process has finished, no more output will be available
|
||||
"""
|
||||
try:
|
||||
line: str | EndOfStream = self.get_next_line(timeout=0)
|
||||
return line # get_next_line already handled EndOfStream preservation
|
||||
except TimeoutError:
|
||||
# Check if process finished while we were waiting
|
||||
if self.finished:
|
||||
return EndOfStream()
|
||||
return None
|
||||
|
||||
def poll(self) -> int | None:
|
||||
"""
|
||||
Check the return code of the process.
|
||||
"""
|
||||
if self.proc is None:
|
||||
return None
|
||||
rc = self.proc.poll()
|
||||
if rc is not None:
|
||||
# Ensure unregistration only happens once
|
||||
try:
|
||||
self._notify_terminated()
|
||||
except Exception as e:
|
||||
warnings.warn(f"RunningProcess termination notify (poll) failed: {e}")
|
||||
return rc
|
||||
|
||||
@property
|
||||
def finished(self) -> bool:
|
||||
return self.poll() is not None
|
||||
|
||||
def wait(self, echo: bool = False, timeout: float | None = None) -> int:
|
||||
"""
|
||||
Wait for the process to complete with timeout protection.
|
||||
|
||||
When echo=True, continuously drains and prints stdout lines while waiting.
|
||||
Performs final output drain after process completion and thread cleanup.
|
||||
|
||||
Args:
|
||||
echo: If True, continuously print stdout lines as they become available.
|
||||
timeout: Overall timeout in seconds. If None, uses instance timeout.
|
||||
If both are None, waits indefinitely.
|
||||
|
||||
Returns:
|
||||
Process exit code.
|
||||
|
||||
Raises:
|
||||
ValueError: If the process hasn't been started.
|
||||
TimeoutError: If the process exceeds the timeout duration.
|
||||
"""
|
||||
if self.proc is None:
|
||||
raise ValueError("Process is not running.")
|
||||
|
||||
# Determine effective timeout: parameter > instance > none
|
||||
effective_timeout = timeout if timeout is not None else self.timeout
|
||||
|
||||
# Use a timeout to prevent hanging
|
||||
start_time = time.time()
|
||||
|
||||
while self.poll() is None:
|
||||
# Check overall timeout
|
||||
if (
|
||||
effective_timeout is not None
|
||||
and (time.time() - start_time) > effective_timeout
|
||||
):
|
||||
self._handle_timeout(effective_timeout, echo=echo)
|
||||
|
||||
# Echo: drain all available output, then sleep
|
||||
if echo:
|
||||
lines = self.drain_stdout()
|
||||
if lines:
|
||||
for line in lines:
|
||||
# Use print flush=True for Windows compatibility, avoid separate flush calls
|
||||
print(
|
||||
line, flush=(os.name == "nt")
|
||||
) # Force flush only on Windows per-line
|
||||
# Additional flush for Unix systems for better performance
|
||||
if os.name != "nt":
|
||||
sys.stdout.flush()
|
||||
continue # Check for more output immediately
|
||||
|
||||
time.sleep(0.01) # Check every 10ms
|
||||
|
||||
# Process completed - drain any remaining output if echo is enabled
|
||||
if echo:
|
||||
remaining_lines = self.drain_stdout()
|
||||
for line in remaining_lines:
|
||||
print(
|
||||
line, flush=(os.name == "nt")
|
||||
) # Force flush only on Windows per-line
|
||||
if remaining_lines:
|
||||
print(
|
||||
f"[Drained {len(remaining_lines)} final lines after completion]",
|
||||
flush=(os.name == "nt"),
|
||||
)
|
||||
|
||||
# Process has completed, get return code
|
||||
assert self.proc is not None # For type checker
|
||||
rtn = self.proc.returncode
|
||||
assert rtn is not None # Process has completed, so returncode exists
|
||||
|
||||
is_keyboard_interrupt = (rtn == -11) or (rtn == 3221225786)
|
||||
if is_keyboard_interrupt:
|
||||
import _thread
|
||||
|
||||
print("Keyboard interrupt detected, interrupting main thread")
|
||||
_thread.interrupt_main()
|
||||
return 1
|
||||
|
||||
# Record end time only if not already set by output reader
|
||||
# The output reader sets end time when stdout pumper finishes, which is more accurate
|
||||
if self._end_time is None:
|
||||
self._end_time = time.time()
|
||||
|
||||
# Wait for reader thread to finish and cleanup
|
||||
if self.reader_thread is not None:
|
||||
self.reader_thread.join(
|
||||
timeout=0.05
|
||||
) # 50ms should be plenty for thread cleanup
|
||||
if self.reader_thread.is_alive():
|
||||
# Reader thread didn't finish, force shutdown
|
||||
self.shutdown.set()
|
||||
self.reader_thread.join(timeout=0.05) # 50ms for forced shutdown
|
||||
|
||||
# Final drain after reader threads shut down - catch any remaining queued output
|
||||
if echo:
|
||||
final_lines = self.drain_stdout()
|
||||
for line in final_lines:
|
||||
print(
|
||||
line, flush=(os.name == "nt")
|
||||
) # Force flush only on Windows per-line
|
||||
|
||||
# Execute completion callback if provided
|
||||
if self.on_complete is not None:
|
||||
try:
|
||||
self.on_complete()
|
||||
except Exception as e:
|
||||
print(f"Warning: on_complete callback failed: {e}")
|
||||
|
||||
# Output formatter end is handled by ProcessOutputReader
|
||||
|
||||
# Unregister from global process manager on normal completion
|
||||
try:
|
||||
self._notify_terminated()
|
||||
except Exception as e:
|
||||
warnings.warn(f"RunningProcess termination notify (wait) failed: {e}")
|
||||
|
||||
return rtn
|
||||
|
||||
def kill(self) -> None:
|
||||
"""
|
||||
Immediately terminate the process and all child processes.
|
||||
|
||||
Signals reader threads to shutdown, kills the entire process tree to prevent
|
||||
orphaned processes, and waits for thread cleanup. Safe to call multiple times.
|
||||
|
||||
Note: Does not raise if process is already terminated or was never started.
|
||||
"""
|
||||
if self.proc is None:
|
||||
return
|
||||
|
||||
# Record end time when killed (only if not already set by output reader)
|
||||
if self._end_time is None:
|
||||
self._end_time = time.time()
|
||||
|
||||
# Signal reader thread to stop
|
||||
self.shutdown.set()
|
||||
|
||||
# Kill the entire process tree (parent + all children)
|
||||
# This prevents orphaned clang++ processes from hanging the system
|
||||
try:
|
||||
from ci.util.test_env import kill_process_tree
|
||||
|
||||
kill_process_tree(self.proc.pid)
|
||||
except KeyboardInterrupt:
|
||||
print("Keyboard interrupt detected, interrupting main thread")
|
||||
_thread.interrupt_main()
|
||||
try:
|
||||
self.proc.kill()
|
||||
except (ProcessLookupError, PermissionError, OSError, ValueError) as e:
|
||||
print(f"Warning: Failed to kill process tree for {self.proc.pid}: {e}")
|
||||
pass
|
||||
raise
|
||||
except Exception as e:
|
||||
# Fallback to simple kill if tree kill fails
|
||||
print(f"Warning: Failed to kill process tree for {self.proc.pid}: {e}")
|
||||
try:
|
||||
self.proc.kill()
|
||||
except (ProcessLookupError, PermissionError, OSError, ValueError):
|
||||
pass # Process might already be dead
|
||||
|
||||
# Wait for reader thread to finish
|
||||
if self.reader_thread is not None:
|
||||
self.reader_thread.join(timeout=0.05) # 50ms should be plenty for cleanup
|
||||
|
||||
# # Drain any remaining output
|
||||
# while True:
|
||||
# try:
|
||||
# line = self.output_queue.get_nowait()
|
||||
# if line is None: # End of output marker
|
||||
# break
|
||||
# except queue.Empty:
|
||||
# break
|
||||
|
||||
# Ensure unregistration even on forced kill
|
||||
try:
|
||||
from ci.util.running_process_manager import (
|
||||
RunningProcessManagerSingleton,
|
||||
)
|
||||
|
||||
RunningProcessManagerSingleton.unregister(self)
|
||||
except Exception as e:
|
||||
warnings.warn(f"RunningProcessManager.unregister (kill) failed: {e}")
|
||||
|
||||
def _notify_terminated(self) -> None:
|
||||
"""Idempotent notification that the process has terminated.
|
||||
|
||||
Ensures unregister is called only once across multiple termination paths
|
||||
(poll, wait, stdout drain, watcher thread) and records end time when
|
||||
available.
|
||||
"""
|
||||
if self._termination_notified:
|
||||
return
|
||||
self._termination_notified = True
|
||||
|
||||
# Record end time only if not already set
|
||||
if self._end_time is None:
|
||||
self._end_time = time.time()
|
||||
|
||||
try:
|
||||
from ci.util.running_process_manager import RunningProcessManagerSingleton
|
||||
|
||||
RunningProcessManagerSingleton.unregister(self)
|
||||
except Exception as e:
|
||||
warnings.warn(f"RunningProcessManager.unregister notify failed: {e}")
|
||||
|
||||
def terminate(self) -> None:
|
||||
"""
|
||||
Gracefully terminate the process with SIGTERM.
|
||||
|
||||
Raises:
|
||||
ValueError: If the process hasn't been started.
|
||||
"""
|
||||
if self.proc is None:
|
||||
raise ValueError("Process is not running.")
|
||||
self.shutdown.set()
|
||||
self.proc.terminate()
|
||||
|
||||
@property
|
||||
def returncode(self) -> int | None:
|
||||
if self.proc is None:
|
||||
return None
|
||||
return self.proc.returncode
|
||||
|
||||
@property
|
||||
def start_time(self) -> float | None:
|
||||
"""Get the process start time"""
|
||||
return self._start_time
|
||||
|
||||
@property
|
||||
def end_time(self) -> float | None:
|
||||
"""Get the process end time"""
|
||||
return self._end_time
|
||||
|
||||
@property
|
||||
def duration(self) -> float | None:
|
||||
"""Get the process duration in seconds, or None if not completed"""
|
||||
if self._start_time is None or self._end_time is None:
|
||||
return None
|
||||
return self._end_time - self._start_time
|
||||
|
||||
@property
|
||||
def stdout(self) -> str:
|
||||
"""
|
||||
Get the complete stdout output accumulated so far.
|
||||
|
||||
Returns all output lines that have been processed by the reader thread,
|
||||
joined with newlines. Available even while process is still running.
|
||||
|
||||
Returns:
|
||||
Complete stdout output as a string. Empty string if no output yet.
|
||||
"""
|
||||
# Return accumulated output (available even if process is still running)
|
||||
return "\n".join(self.accumulated_output)
|
||||
|
||||
def line_iter(self, timeout: float | None) -> _RunningProcessLineIterator:
|
||||
"""Return a context-managed iterator over output lines.
|
||||
|
||||
Args:
|
||||
timeout: Per-line timeout in seconds. None waits indefinitely for each line.
|
||||
|
||||
Returns:
|
||||
A context-managed iterator yielding non-empty, transformed stdout lines.
|
||||
"""
|
||||
return _RunningProcessLineIterator(self, timeout)
|
||||
|
||||
|
||||
# NOTE: RunningProcessManager and its singleton live in ci/util/running_process_manager.py
|
||||
|
||||
|
||||
def subprocess_run(
|
||||
command: str | list[str],
|
||||
cwd: Path | None,
|
||||
check: bool,
|
||||
timeout: int,
|
||||
enable_stack_trace: bool,
|
||||
) -> subprocess.CompletedProcess[str]:
|
||||
"""
|
||||
Execute a command with robust stdout handling, emulating subprocess.run().
|
||||
|
||||
Uses RunningProcess as the backend to provide:
|
||||
- Continuous stdout streaming to prevent pipe blocking
|
||||
- Merged stderr into stdout for unified output
|
||||
- Timeout protection with optional stack trace dumping
|
||||
- Standard subprocess.CompletedProcess return value
|
||||
|
||||
Args:
|
||||
command: Command to execute as string or list of arguments.
|
||||
cwd: Working directory for command execution. Required parameter.
|
||||
check: If True, raise CalledProcessError for non-zero exit codes.
|
||||
timeout: Maximum execution time in seconds.
|
||||
enable_stack_trace: Enable GDB stack trace dumping on timeout.
|
||||
|
||||
Returns:
|
||||
CompletedProcess with combined stdout and process return code.
|
||||
stderr field is None since it's merged into stdout.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If process times out (wraps TimeoutError).
|
||||
CalledProcessError: If check=True and process exits with non-zero code.
|
||||
"""
|
||||
# Use RunningProcess for robust stdout pumping with merged stderr
|
||||
proc = RunningProcess(
|
||||
command=command,
|
||||
cwd=cwd,
|
||||
check=False,
|
||||
auto_run=True,
|
||||
timeout=timeout,
|
||||
enable_stack_trace=enable_stack_trace,
|
||||
on_complete=None,
|
||||
output_formatter=None,
|
||||
)
|
||||
|
||||
try:
|
||||
return_code: int = proc.wait()
|
||||
except KeyboardInterrupt:
|
||||
# Propagate interrupt behavior consistent with subprocess.run
|
||||
raise
|
||||
except TimeoutError as e:
|
||||
# Align with subprocess.TimeoutExpired semantics by raising a CalledProcessError-like
|
||||
# error with available output. Using TimeoutError here is consistent with internal RP.
|
||||
completed_output: str = proc.stdout
|
||||
raise RuntimeError(
|
||||
f"CRITICAL: Process timed out after {timeout} seconds: {command}"
|
||||
) from e
|
||||
|
||||
combined_stdout: str = proc.stdout
|
||||
|
||||
# Construct CompletedProcess (stderr is merged into stdout by design)
|
||||
completed = subprocess.CompletedProcess(
|
||||
args=command,
|
||||
returncode=return_code,
|
||||
stdout=combined_stdout,
|
||||
stderr=None,
|
||||
)
|
||||
|
||||
if check and return_code != 0:
|
||||
# Raise the standard exception with captured output
|
||||
raise subprocess.CalledProcessError(
|
||||
returncode=return_code,
|
||||
cmd=command,
|
||||
output=combined_stdout,
|
||||
stderr=None,
|
||||
)
|
||||
|
||||
return completed
|
||||
689
libraries/FastLED/ci/util/running_process_group.py
Normal file
689
libraries/FastLED/ci/util/running_process_group.py
Normal file
@@ -0,0 +1,689 @@
|
||||
#!/usr/bin/env python3
|
||||
# pyright: reportUnknownMemberType=false, reportUnknownVariableType=false, reportUnknownArgumentType=false
|
||||
"""RunningProcessGroup - Unified process execution management for FastLED CI."""
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timedelta
|
||||
from enum import Enum
|
||||
from typing import Dict, List, Optional, cast
|
||||
|
||||
from ci.util.running_process import RunningProcess
|
||||
from ci.util.test_exceptions import (
|
||||
TestExecutionFailedException,
|
||||
TestFailureInfo,
|
||||
TestTimeoutException,
|
||||
)
|
||||
from ci.util.test_runner import (
|
||||
_GLOBAL_TIMEOUT,
|
||||
MAX_FAILURES_BEFORE_ABORT,
|
||||
ProcessOutputHandler,
|
||||
ProcessStuckMonitor,
|
||||
ProcessTiming,
|
||||
StuckProcessSignal,
|
||||
_extract_test_name,
|
||||
_get_friendly_test_name,
|
||||
_handle_stuck_processes,
|
||||
extract_error_snippet,
|
||||
)
|
||||
|
||||
|
||||
class ExecutionMode(Enum):
|
||||
"""Execution mode for process groups."""
|
||||
|
||||
PARALLEL = "parallel"
|
||||
SEQUENTIAL = "sequential"
|
||||
SEQUENTIAL_WITH_DEPENDENCIES = "sequential_with_dependencies"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ProcessStatus:
|
||||
"""Real-time status information for a running process."""
|
||||
|
||||
name: str
|
||||
is_alive: bool
|
||||
is_completed: bool
|
||||
start_time: datetime
|
||||
running_duration: timedelta
|
||||
last_output_line: Optional[str] = None
|
||||
return_value: Optional[int] = None
|
||||
|
||||
@property
|
||||
def running_time_seconds(self) -> float:
|
||||
"""Get running duration in seconds for display."""
|
||||
return self.running_duration.total_seconds()
|
||||
|
||||
|
||||
@dataclass
|
||||
class GroupStatus:
|
||||
"""Status information for all processes in a group."""
|
||||
|
||||
group_name: str
|
||||
processes: List[ProcessStatus]
|
||||
total_processes: int
|
||||
completed_processes: int
|
||||
failed_processes: int
|
||||
|
||||
@property
|
||||
def completion_percentage(self) -> float:
|
||||
"""Percentage of processes completed."""
|
||||
if self.total_processes == 0:
|
||||
return 100.0
|
||||
return (self.completed_processes / self.total_processes) * 100.0
|
||||
|
||||
|
||||
@dataclass
|
||||
class ProcessExecutionConfig:
|
||||
"""Configuration for how a process group should execute."""
|
||||
|
||||
execution_mode: ExecutionMode = ExecutionMode.PARALLEL
|
||||
timeout_seconds: Optional[int] = None
|
||||
max_failures_before_abort: int = MAX_FAILURES_BEFORE_ABORT
|
||||
verbose: bool = False
|
||||
enable_stuck_detection: bool = True
|
||||
stuck_timeout_seconds: int = _GLOBAL_TIMEOUT
|
||||
# Real-time display options
|
||||
display_type: str = "auto" # "auto", "rich", "textual", "ascii"
|
||||
live_updates: bool = True
|
||||
update_interval: float = 0.1
|
||||
|
||||
|
||||
class RunningProcessGroup:
|
||||
"""Manages execution of a group of RunningProcess instances."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
processes: Optional[List[RunningProcess]] = None,
|
||||
config: Optional[ProcessExecutionConfig] = None,
|
||||
name: str = "ProcessGroup",
|
||||
):
|
||||
"""Initialize a new process group.
|
||||
|
||||
Args:
|
||||
processes: List of RunningProcess instances to manage
|
||||
config: Configuration for execution behavior
|
||||
name: Name for this process group (used for logging)
|
||||
"""
|
||||
self.processes = processes or []
|
||||
self.config = config or ProcessExecutionConfig()
|
||||
self.name = name
|
||||
self._dependencies: Dict[RunningProcess, List[RunningProcess]] = {}
|
||||
|
||||
# Status tracking
|
||||
self._process_start_times: Dict[RunningProcess, datetime] = {}
|
||||
self._process_last_output: Dict[RunningProcess, str] = {}
|
||||
self._status_monitoring_active: bool = False
|
||||
|
||||
def add_process(self, process: RunningProcess) -> None:
|
||||
"""Add a process to the group.
|
||||
|
||||
Args:
|
||||
process: RunningProcess instance to add
|
||||
"""
|
||||
if process not in self.processes:
|
||||
self.processes.append(process)
|
||||
|
||||
def add_dependency(
|
||||
self, process: RunningProcess, depends_on: RunningProcess
|
||||
) -> None:
|
||||
"""Add a dependency relationship between processes.
|
||||
|
||||
Args:
|
||||
process: The dependent process
|
||||
depends_on: The process that must complete first
|
||||
"""
|
||||
if process not in self.processes:
|
||||
self.add_process(process)
|
||||
if depends_on not in self.processes:
|
||||
self.add_process(depends_on)
|
||||
|
||||
if process not in self._dependencies:
|
||||
self._dependencies[process] = []
|
||||
self._dependencies[process].append(depends_on)
|
||||
|
||||
def add_sequential_chain(self, processes: List[RunningProcess]) -> None:
|
||||
"""Add processes that must run in sequence.
|
||||
|
||||
Args:
|
||||
processes: List of processes to run sequentially
|
||||
"""
|
||||
for process in processes:
|
||||
self.add_process(process)
|
||||
|
||||
# Create dependency chain
|
||||
for i in range(1, len(processes)):
|
||||
self.add_dependency(processes[i], processes[i - 1])
|
||||
|
||||
def run(self) -> List[ProcessTiming]:
|
||||
"""Execute all processes according to the configuration.
|
||||
|
||||
Returns:
|
||||
List of ProcessTiming objects with execution results
|
||||
"""
|
||||
if not self.processes:
|
||||
return []
|
||||
|
||||
print(f"Running process group: {self.name}")
|
||||
|
||||
if self.config.execution_mode == ExecutionMode.PARALLEL:
|
||||
return self._run_parallel()
|
||||
elif self.config.execution_mode == ExecutionMode.SEQUENTIAL:
|
||||
return self._run_sequential()
|
||||
elif self.config.execution_mode == ExecutionMode.SEQUENTIAL_WITH_DEPENDENCIES:
|
||||
return self._run_with_dependencies()
|
||||
else:
|
||||
raise ValueError(f"Unknown execution mode: {self.config.execution_mode}")
|
||||
|
||||
def _run_parallel(self) -> List[ProcessTiming]:
|
||||
"""Execute processes in parallel (based on test_runner._run_processes_parallel)."""
|
||||
if not self.processes:
|
||||
return []
|
||||
|
||||
# Create a shared output handler for formatting
|
||||
output_handler = ProcessOutputHandler(verbose=self.config.verbose)
|
||||
|
||||
# Configure Windows console for UTF-8 output if needed
|
||||
if os.name == "nt": # Windows
|
||||
if hasattr(sys.stdout, "reconfigure"):
|
||||
sys.stdout.reconfigure(encoding="utf-8", errors="replace") # type: ignore
|
||||
if hasattr(sys.stderr, "reconfigure"):
|
||||
sys.stderr.reconfigure(encoding="utf-8", errors="replace") # type: ignore
|
||||
|
||||
# Track start times and enable status monitoring
|
||||
self._status_monitoring_active = True
|
||||
for proc in self.processes:
|
||||
self._track_process_start(proc)
|
||||
|
||||
# Start processes that aren't already running
|
||||
for proc in self.processes:
|
||||
cmd_str = proc.get_command_str()
|
||||
if proc.proc is None: # Only start if not already running
|
||||
proc.run()
|
||||
print(f"Started: {cmd_str}")
|
||||
else:
|
||||
print(f"Process already running: {cmd_str}")
|
||||
|
||||
# Monitor all processes for output and completion
|
||||
active_processes = self.processes.copy()
|
||||
start_time = time.time()
|
||||
|
||||
runner_timeouts: list[int] = [
|
||||
p.timeout for p in self.processes if p.timeout is not None
|
||||
]
|
||||
global_timeout: int | None = self.config.timeout_seconds
|
||||
if global_timeout is None and runner_timeouts:
|
||||
global_timeout = max(runner_timeouts) + 60 # Add 1 minute buffer
|
||||
|
||||
# Track last activity time for each process to detect stuck processes
|
||||
last_activity_time = {proc: time.time() for proc in active_processes}
|
||||
stuck_process_timeout = self.config.stuck_timeout_seconds
|
||||
|
||||
# Track failed processes for proper error reporting
|
||||
failed_processes: list[str] = [] # Processes killed due to timeout/stuck
|
||||
exit_failed_processes: list[
|
||||
tuple[RunningProcess, int]
|
||||
] = [] # Processes that failed with non-zero exit code
|
||||
|
||||
# Track completed processes for timing summary
|
||||
completed_timings: List[ProcessTiming] = []
|
||||
|
||||
# Create thread-based stuck process monitor if enabled
|
||||
stuck_monitor = None
|
||||
if self.config.enable_stuck_detection:
|
||||
stuck_monitor = ProcessStuckMonitor(stuck_process_timeout)
|
||||
|
||||
try:
|
||||
# Start monitoring threads for each process
|
||||
if stuck_monitor:
|
||||
for proc in active_processes:
|
||||
stuck_monitor.start_monitoring(proc)
|
||||
|
||||
def time_expired() -> bool:
|
||||
if global_timeout is None:
|
||||
return False
|
||||
return time.time() - start_time > global_timeout
|
||||
|
||||
while active_processes:
|
||||
# Check global timeout
|
||||
if time_expired():
|
||||
print(f"\nGlobal timeout reached after {global_timeout} seconds")
|
||||
print("\033[91m###### ERROR ######\033[0m")
|
||||
print("Tests failed due to global timeout")
|
||||
failures: list[TestFailureInfo] = []
|
||||
for p in active_processes:
|
||||
failed_processes.append(
|
||||
subprocess.list2cmdline(p.command)
|
||||
) # Track all active processes as failed
|
||||
p.kill()
|
||||
failures.append(
|
||||
TestFailureInfo(
|
||||
test_name=_extract_test_name(p.command),
|
||||
command=str(p.command),
|
||||
return_code=1,
|
||||
output="Process killed due to global timeout",
|
||||
error_type="global_timeout",
|
||||
)
|
||||
)
|
||||
raise TestTimeoutException("Global timeout reached", failures)
|
||||
|
||||
# Check for stuck processes (using threaded monitoring)
|
||||
if stuck_monitor:
|
||||
stuck_signals = stuck_monitor.check_for_stuck_processes()
|
||||
if stuck_signals:
|
||||
self._handle_stuck_processes(
|
||||
stuck_signals,
|
||||
active_processes,
|
||||
failed_processes,
|
||||
stuck_monitor,
|
||||
)
|
||||
|
||||
# Early abort if failure threshold reached via stuck processes
|
||||
if (
|
||||
len(exit_failed_processes) + len(failed_processes)
|
||||
) >= self.config.max_failures_before_abort:
|
||||
print(
|
||||
f"\nExceeded failure threshold ({self.config.max_failures_before_abort}). Aborting remaining tests."
|
||||
)
|
||||
# Kill any remaining active processes
|
||||
for p in active_processes:
|
||||
p.kill()
|
||||
# Build detailed failures
|
||||
failures = self._build_failure_list(
|
||||
exit_failed_processes, failed_processes
|
||||
)
|
||||
raise TestExecutionFailedException(
|
||||
"Exceeded failure threshold", failures
|
||||
)
|
||||
|
||||
# Process each active test individually
|
||||
any_activity = self._process_active_tests(
|
||||
active_processes,
|
||||
exit_failed_processes,
|
||||
failed_processes,
|
||||
completed_timings,
|
||||
stuck_monitor,
|
||||
)
|
||||
|
||||
# Brief sleep to avoid spinning if no activity
|
||||
if not any_activity:
|
||||
time.sleep(0.01)
|
||||
|
||||
finally:
|
||||
# Clean up monitoring
|
||||
if stuck_monitor:
|
||||
for proc in self.processes:
|
||||
stuck_monitor.stop_monitoring(proc)
|
||||
self._status_monitoring_active = False
|
||||
|
||||
# Check for processes that failed with non-zero exit codes
|
||||
if exit_failed_processes:
|
||||
print(f"\n\033[91m###### ERROR ######\033[0m")
|
||||
print(
|
||||
f"Tests failed due to {len(exit_failed_processes)} process(es) with non-zero exit codes:"
|
||||
)
|
||||
for proc, exit_code in exit_failed_processes:
|
||||
print(f" - {proc.command} (exit code {exit_code})")
|
||||
failures: list[TestFailureInfo] = []
|
||||
for proc, exit_code in exit_failed_processes:
|
||||
# Extract error snippet from process output
|
||||
error_snippet = extract_error_snippet(proc.accumulated_output)
|
||||
|
||||
failures.append(
|
||||
TestFailureInfo(
|
||||
test_name=_extract_test_name(proc.command),
|
||||
command=str(proc.command),
|
||||
return_code=exit_code,
|
||||
output=error_snippet,
|
||||
error_type="exit_failure",
|
||||
)
|
||||
)
|
||||
raise TestExecutionFailedException("Tests failed", failures)
|
||||
|
||||
# Check for failed processes (killed due to timeout/stuck)
|
||||
if failed_processes:
|
||||
print(f"\n\033[91m###### ERROR ######\033[0m")
|
||||
print(f"Tests failed due to {len(failed_processes)} killed process(es):")
|
||||
for cmd in failed_processes:
|
||||
print(f" - {cmd}")
|
||||
print("Processes were killed due to timeout/stuck detection")
|
||||
failures: list[TestFailureInfo] = []
|
||||
for cmd in failed_processes:
|
||||
failures.append(
|
||||
TestFailureInfo(
|
||||
test_name=_extract_test_name(cmd),
|
||||
command=str(cmd),
|
||||
return_code=1,
|
||||
output="Process was killed due to timeout/stuck detection",
|
||||
error_type="killed_process",
|
||||
)
|
||||
)
|
||||
raise TestExecutionFailedException("Processes were killed", failures)
|
||||
|
||||
return completed_timings
|
||||
|
||||
def get_status(self) -> GroupStatus:
|
||||
"""Get current status of all processes in the group."""
|
||||
process_statuses = []
|
||||
completed = 0
|
||||
failed = 0
|
||||
|
||||
for process in self.processes:
|
||||
start_time = self._process_start_times.get(process)
|
||||
if start_time is None:
|
||||
# Process hasn't started yet, use current time
|
||||
start_time = datetime.now()
|
||||
running_duration = timedelta(0)
|
||||
else:
|
||||
running_duration = datetime.now() - start_time
|
||||
|
||||
# Get last output line from process
|
||||
last_output = self._get_last_output_line(process)
|
||||
|
||||
# Get process name - use command friendly name if available
|
||||
process_name = getattr(process, "name", None)
|
||||
if not process_name:
|
||||
if hasattr(process, "command"):
|
||||
process_name = _get_friendly_test_name(process.command)
|
||||
else:
|
||||
process_name = f"Process-{id(process)}"
|
||||
|
||||
status = ProcessStatus(
|
||||
name=process_name,
|
||||
is_alive=not process.finished,
|
||||
is_completed=process.finished,
|
||||
start_time=start_time,
|
||||
running_duration=running_duration,
|
||||
last_output_line=last_output,
|
||||
return_value=process.returncode,
|
||||
)
|
||||
|
||||
process_statuses.append(status)
|
||||
|
||||
if status.is_completed:
|
||||
completed += 1
|
||||
if status.return_value != 0:
|
||||
failed += 1
|
||||
|
||||
return GroupStatus(
|
||||
group_name=self.name,
|
||||
processes=process_statuses,
|
||||
total_processes=len(self.processes),
|
||||
completed_processes=completed,
|
||||
failed_processes=failed,
|
||||
)
|
||||
|
||||
def _get_last_output_line(self, process: RunningProcess) -> Optional[str]:
|
||||
"""Extract the last line of output from a process."""
|
||||
# Try to get from cached last output first
|
||||
cached = self._process_last_output.get(process)
|
||||
if cached:
|
||||
return cached
|
||||
|
||||
# Fall back to accumulated output
|
||||
if process.accumulated_output:
|
||||
last_line = process.accumulated_output[-1].strip()
|
||||
self._process_last_output[process] = last_line
|
||||
return last_line
|
||||
|
||||
return None
|
||||
|
||||
def _track_process_start(self, process: RunningProcess) -> None:
|
||||
"""Record when a process starts for timing calculations."""
|
||||
self._process_start_times[process] = datetime.now()
|
||||
|
||||
def _update_process_output(self, process: RunningProcess) -> None:
|
||||
"""Update cached last output line for a process."""
|
||||
if process.accumulated_output:
|
||||
last_line = process.accumulated_output[-1].strip()
|
||||
self._process_last_output[process] = last_line
|
||||
|
||||
def _handle_stuck_processes(
|
||||
self,
|
||||
stuck_signals: list[StuckProcessSignal],
|
||||
active_processes: list[RunningProcess],
|
||||
failed_processes: list[str],
|
||||
stuck_monitor: ProcessStuckMonitor,
|
||||
) -> None:
|
||||
"""Handle processes that are detected as stuck."""
|
||||
_handle_stuck_processes(
|
||||
stuck_signals, active_processes, failed_processes, stuck_monitor
|
||||
)
|
||||
|
||||
def _build_failure_list(
|
||||
self,
|
||||
exit_failed_processes: list[tuple[RunningProcess, int]],
|
||||
failed_processes: list[str],
|
||||
) -> list[TestFailureInfo]:
|
||||
"""Build a list of TestFailureInfo objects from failed processes."""
|
||||
failures: list[TestFailureInfo] = []
|
||||
|
||||
for proc, exit_code in exit_failed_processes:
|
||||
error_snippet = extract_error_snippet(proc.accumulated_output)
|
||||
failures.append(
|
||||
TestFailureInfo(
|
||||
test_name=_extract_test_name(proc.command),
|
||||
command=str(proc.command),
|
||||
return_code=exit_code,
|
||||
output=error_snippet,
|
||||
error_type="exit_failure",
|
||||
)
|
||||
)
|
||||
|
||||
for cmd in failed_processes:
|
||||
if isinstance(cmd, list):
|
||||
cmd_str = subprocess.list2cmdline(cmd)
|
||||
else:
|
||||
cmd_str = str(cmd)
|
||||
failures.append(
|
||||
TestFailureInfo(
|
||||
test_name=_extract_test_name(cmd_str),
|
||||
command=cmd_str,
|
||||
return_code=1,
|
||||
output="Process was killed due to timeout/stuck detection",
|
||||
error_type="killed_process",
|
||||
)
|
||||
)
|
||||
|
||||
return failures
|
||||
|
||||
def _process_active_tests(
|
||||
self,
|
||||
active_processes: list[RunningProcess],
|
||||
exit_failed_processes: list[tuple[RunningProcess, int]],
|
||||
failed_processes: list[str],
|
||||
completed_timings: List[ProcessTiming],
|
||||
stuck_monitor: Optional[ProcessStuckMonitor],
|
||||
) -> bool:
|
||||
"""Process active tests, return True if any activity occurred."""
|
||||
any_activity = False
|
||||
|
||||
# Iterate backwards to safely remove processes from the list
|
||||
for i in range(len(active_processes) - 1, -1, -1):
|
||||
proc = active_processes[i]
|
||||
|
||||
if self.config.verbose:
|
||||
with proc.line_iter(timeout=60) as line_iter:
|
||||
for line in line_iter:
|
||||
print(line)
|
||||
any_activity = True
|
||||
|
||||
# Check if process has finished
|
||||
if proc.finished:
|
||||
any_activity = True
|
||||
# Get the exit code to check for failure
|
||||
exit_code = proc.wait()
|
||||
|
||||
# Process completed, remove from active list
|
||||
active_processes.remove(proc)
|
||||
# Stop monitoring this process
|
||||
if stuck_monitor:
|
||||
stuck_monitor.stop_monitoring(proc)
|
||||
|
||||
# Collect timing data
|
||||
if proc.duration is not None:
|
||||
timing = ProcessTiming(
|
||||
name=_get_friendly_test_name(proc.command),
|
||||
duration=proc.duration,
|
||||
command=str(proc.command),
|
||||
)
|
||||
completed_timings.append(timing)
|
||||
|
||||
# Check for non-zero exit code (failure)
|
||||
if exit_code != 0:
|
||||
print(f"Process failed with exit code {exit_code}: {proc.command}")
|
||||
exit_failed_processes.append((proc, exit_code))
|
||||
# Early abort if we reached the failure threshold
|
||||
if (
|
||||
len(exit_failed_processes) + len(failed_processes)
|
||||
) >= self.config.max_failures_before_abort:
|
||||
print(
|
||||
f"\nExceeded failure threshold ({self.config.max_failures_before_abort}). Aborting remaining tests."
|
||||
)
|
||||
# Kill remaining active processes
|
||||
for p in active_processes:
|
||||
if p is not proc:
|
||||
p.kill()
|
||||
# Prepare failures with snippets
|
||||
failures = self._build_failure_list(
|
||||
exit_failed_processes, failed_processes
|
||||
)
|
||||
raise TestExecutionFailedException(
|
||||
"Exceeded failure threshold", failures
|
||||
)
|
||||
|
||||
return any_activity
|
||||
|
||||
def _run_sequential(self) -> List[ProcessTiming]:
|
||||
"""Execute processes in sequence."""
|
||||
completed_timings: List[ProcessTiming] = []
|
||||
|
||||
# Enable status monitoring
|
||||
self._status_monitoring_active = True
|
||||
try:
|
||||
for process in self.processes:
|
||||
print(f"Running: {process.get_command_str()}")
|
||||
|
||||
# Track process start time
|
||||
self._track_process_start(process)
|
||||
|
||||
# Start the process if not already running
|
||||
if process.proc is None:
|
||||
process.run()
|
||||
|
||||
try:
|
||||
exit_code = process.wait()
|
||||
|
||||
# Collect timing data
|
||||
if process.duration is not None:
|
||||
timing = ProcessTiming(
|
||||
name=_get_friendly_test_name(process.command),
|
||||
duration=process.duration,
|
||||
command=str(process.command),
|
||||
)
|
||||
completed_timings.append(timing)
|
||||
|
||||
# Check for failure
|
||||
if exit_code != 0:
|
||||
error_snippet = extract_error_snippet(
|
||||
process.accumulated_output
|
||||
)
|
||||
failure = TestFailureInfo(
|
||||
test_name=_extract_test_name(process.command),
|
||||
command=str(process.command),
|
||||
return_code=exit_code,
|
||||
output=error_snippet,
|
||||
error_type="exit_failure",
|
||||
)
|
||||
raise TestExecutionFailedException(
|
||||
f"Process failed with exit code {exit_code}", [failure]
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Process failed: {process.get_command_str()} - {e}")
|
||||
raise
|
||||
finally:
|
||||
self._status_monitoring_active = False
|
||||
|
||||
return completed_timings
|
||||
|
||||
def _run_with_dependencies(self) -> List[ProcessTiming]:
|
||||
"""Execute processes respecting dependency order."""
|
||||
completed_timings: List[ProcessTiming] = []
|
||||
completed_processes: set[RunningProcess] = set()
|
||||
remaining_processes = self.processes.copy()
|
||||
|
||||
# Enable status monitoring
|
||||
self._status_monitoring_active = True
|
||||
try:
|
||||
while remaining_processes:
|
||||
# Find processes that can run (all dependencies completed)
|
||||
ready_processes = []
|
||||
for process in remaining_processes:
|
||||
dependencies = self._dependencies.get(process, [])
|
||||
if all(dep in completed_processes for dep in dependencies):
|
||||
ready_processes.append(process)
|
||||
|
||||
if not ready_processes:
|
||||
# No processes can run - circular dependency or missing process
|
||||
remaining_names = [
|
||||
_get_friendly_test_name(p.command) for p in remaining_processes
|
||||
]
|
||||
raise RuntimeError(
|
||||
f"Circular dependency or missing dependency detected for: {remaining_names}"
|
||||
)
|
||||
|
||||
# Run the first ready process
|
||||
process = ready_processes[0]
|
||||
print(f"Running: {process.get_command_str()}")
|
||||
|
||||
# Track process start time
|
||||
self._track_process_start(process)
|
||||
|
||||
# Start the process if not already running
|
||||
if process.proc is None:
|
||||
process.run()
|
||||
|
||||
try:
|
||||
exit_code = process.wait()
|
||||
|
||||
# Collect timing data
|
||||
if process.duration is not None:
|
||||
timing = ProcessTiming(
|
||||
name=_get_friendly_test_name(process.command),
|
||||
duration=process.duration,
|
||||
command=str(process.command),
|
||||
)
|
||||
completed_timings.append(timing)
|
||||
|
||||
# Mark as completed
|
||||
completed_processes.add(process)
|
||||
remaining_processes.remove(process)
|
||||
|
||||
# Check for failure
|
||||
if exit_code != 0:
|
||||
error_snippet = extract_error_snippet(
|
||||
process.accumulated_output
|
||||
)
|
||||
failure = TestFailureInfo(
|
||||
test_name=_extract_test_name(process.command),
|
||||
command=str(process.command),
|
||||
return_code=exit_code,
|
||||
output=error_snippet,
|
||||
error_type="exit_failure",
|
||||
)
|
||||
raise TestExecutionFailedException(
|
||||
f"Process failed with exit code {exit_code}", [failure]
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Process failed: {process.get_command_str()} - {e}")
|
||||
raise
|
||||
finally:
|
||||
self._status_monitoring_active = False
|
||||
|
||||
return completed_timings
|
||||
64
libraries/FastLED/ci/util/running_process_manager.py
Normal file
64
libraries/FastLED/ci/util/running_process_manager.py
Normal file
@@ -0,0 +1,64 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import threading
|
||||
from typing import List
|
||||
|
||||
# Import at runtime since this module is part of util package and used broadly
|
||||
from ci.util.running_process import RunningProcess
|
||||
|
||||
|
||||
class RunningProcessManager:
|
||||
"""Thread-safe registry of currently running processes for diagnostics."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._lock = threading.RLock()
|
||||
self._processes: list[RunningProcess] = []
|
||||
|
||||
def register(self, proc: RunningProcess) -> None:
|
||||
with self._lock:
|
||||
if proc not in self._processes:
|
||||
self._processes.append(proc)
|
||||
|
||||
def unregister(self, proc: RunningProcess) -> None:
|
||||
with self._lock:
|
||||
try:
|
||||
self._processes.remove(proc)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def list_active(self) -> list[RunningProcess]:
|
||||
with self._lock:
|
||||
return [p for p in self._processes if not p.finished]
|
||||
|
||||
def dump_active(self) -> None:
|
||||
active: list[RunningProcess] = self.list_active()
|
||||
if not active:
|
||||
print("\nNO ACTIVE SUBPROCESSES DETECTED - MAIN PROCESS LIKELY HUNG")
|
||||
return
|
||||
|
||||
print("\nSTUCK SUBPROCESS COMMANDS:")
|
||||
import time
|
||||
|
||||
now = time.time()
|
||||
for idx, p in enumerate(active, 1):
|
||||
pid: int | None = None
|
||||
try:
|
||||
if p.proc is not None:
|
||||
pid = p.proc.pid
|
||||
except Exception:
|
||||
pid = None
|
||||
|
||||
start = p.start_time
|
||||
last_out = p.time_last_stdout_line()
|
||||
duration_str = f"{(now - start):.1f}s" if start is not None else "?"
|
||||
since_out_str = (
|
||||
f"{(now - last_out):.1f}s" if last_out is not None else "no-output"
|
||||
)
|
||||
|
||||
print(
|
||||
f" {idx}. cmd={p.command} pid={pid} duration={duration_str} last_output={since_out_str}"
|
||||
)
|
||||
|
||||
|
||||
# Global singleton instance for convenient access
|
||||
RunningProcessManagerSingleton = RunningProcessManager()
|
||||
182
libraries/FastLED/ci/util/sccache_config.py
Normal file
182
libraries/FastLED/ci/util/sccache_config.py
Normal file
@@ -0,0 +1,182 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
SCCACHE Configuration for FastLED Builds
|
||||
|
||||
Provides sccache (distributed compiler cache) support for faster compilation.
|
||||
SCCACHE is faster and more reliable than ccache, especially for CI/CD environments.
|
||||
"""
|
||||
|
||||
import os
|
||||
import platform
|
||||
import shutil
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import Any, Protocol
|
||||
|
||||
|
||||
class PlatformIOEnv(Protocol):
|
||||
"""Type hint for PlatformIO environment object."""
|
||||
|
||||
def get(self, key: str) -> str | None:
|
||||
"""Get environment variable value."""
|
||||
...
|
||||
|
||||
def Replace(self, **kwargs: Any) -> None:
|
||||
"""Replace environment variables."""
|
||||
...
|
||||
|
||||
|
||||
def is_sccache_available() -> bool:
|
||||
"""Check if sccache is available in the system."""
|
||||
try:
|
||||
subprocess.run(["sccache", "--version"], capture_output=True, check=True)
|
||||
return True
|
||||
except (subprocess.CalledProcessError, FileNotFoundError):
|
||||
return False
|
||||
|
||||
|
||||
def get_sccache_path() -> str | None:
|
||||
"""Get the full path to sccache executable."""
|
||||
# Use shutil.which for cross-platform executable finding
|
||||
sccache_path = shutil.which("sccache")
|
||||
if sccache_path:
|
||||
return sccache_path
|
||||
|
||||
# Additional Windows-specific paths
|
||||
if platform.system() == "Windows":
|
||||
additional_paths = [
|
||||
"C:\\ProgramData\\chocolatey\\bin\\sccache.exe",
|
||||
os.path.expanduser("~\\scoop\\shims\\sccache.exe"),
|
||||
os.path.expanduser("~\\.cargo\\bin\\sccache.exe"),
|
||||
]
|
||||
for path in additional_paths:
|
||||
if os.path.exists(path):
|
||||
return path
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def configure_sccache(env: PlatformIOEnv) -> None:
|
||||
"""Configure SCCACHE for the build environment."""
|
||||
if not is_sccache_available():
|
||||
print("SCCACHE is not available. Skipping SCCACHE configuration.")
|
||||
return
|
||||
|
||||
sccache_path = get_sccache_path()
|
||||
if not sccache_path:
|
||||
print("Could not find SCCACHE executable. Skipping SCCACHE configuration.")
|
||||
return
|
||||
|
||||
print(f"Found SCCACHE at: {sccache_path}")
|
||||
|
||||
# Set up SCCACHE environment variables
|
||||
project_dir = env.get("PROJECT_DIR")
|
||||
if project_dir is None:
|
||||
project_dir = os.getcwd()
|
||||
|
||||
# Use board-specific sccache directory if PIOENV (board environment) is available
|
||||
board_name = env.get("PIOENV")
|
||||
if board_name:
|
||||
sccache_dir = os.path.join(project_dir, ".sccache", board_name)
|
||||
else:
|
||||
sccache_dir = os.path.join(project_dir, ".sccache", "default")
|
||||
|
||||
# Create sccache directory
|
||||
Path(sccache_dir).mkdir(parents=True, exist_ok=True)
|
||||
print(f"Using board-specific SCCACHE directory: {sccache_dir}")
|
||||
|
||||
# Configure SCCACHE environment variables
|
||||
os.environ["SCCACHE_DIR"] = sccache_dir
|
||||
os.environ["SCCACHE_CACHE_SIZE"] = "2G" # Larger cache for better hit rates
|
||||
|
||||
# Optional: Configure distributed caching (Redis/Memcached) if available
|
||||
# This can be enabled by setting environment variables before build:
|
||||
# export SCCACHE_REDIS=redis://localhost:6379
|
||||
# export SCCACHE_MEMCACHED=localhost:11211
|
||||
|
||||
# Configure compression for better storage efficiency
|
||||
if platform.system() != "Windows":
|
||||
# Only set on Unix-like systems where it's more reliable
|
||||
os.environ["SCCACHE_LOG"] = "info"
|
||||
|
||||
# Get current compiler paths
|
||||
original_cc = env.get("CC")
|
||||
original_cxx = env.get("CXX")
|
||||
|
||||
# Note: For PlatformIO, we don't need to manually wrap CC/CXX here
|
||||
# Instead, we'll use build_flags to wrap the compiler commands
|
||||
# This is handled in the Board configuration via extra_scripts
|
||||
|
||||
print(f"SCCACHE configuration completed")
|
||||
print(f"Cache directory: {sccache_dir}")
|
||||
print(f"Cache size limit: 2G")
|
||||
|
||||
# Show SCCACHE stats if available
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[sccache_path, "--show-stats"], capture_output=True, text=True, check=False
|
||||
)
|
||||
if result.returncode == 0:
|
||||
print("SCCACHE Statistics:")
|
||||
print(result.stdout)
|
||||
except Exception:
|
||||
pass # Don't fail build if stats aren't available
|
||||
|
||||
|
||||
def get_sccache_wrapper_script_content() -> str:
|
||||
"""Generate content for sccache wrapper script for PlatformIO extra_scripts."""
|
||||
return '''
|
||||
# SCCACHE wrapper script for PlatformIO builds
|
||||
# This script automatically wraps compiler commands with sccache
|
||||
|
||||
Import("env")
|
||||
|
||||
import os
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
def setup_sccache_wrapper():
|
||||
"""Setup sccache wrapper for compiler commands."""
|
||||
|
||||
# Check if sccache is available
|
||||
sccache_path = shutil.which("sccache")
|
||||
if not sccache_path:
|
||||
print("SCCACHE not found, compilation will proceed without caching")
|
||||
return
|
||||
|
||||
print(f"Setting up SCCACHE wrapper: {sccache_path}")
|
||||
|
||||
# Get current build environment
|
||||
project_dir = env.get("PROJECT_DIR", os.getcwd())
|
||||
board_name = env.get("PIOENV", "default")
|
||||
|
||||
# Setup sccache directory
|
||||
sccache_dir = os.path.join(project_dir, ".sccache", board_name)
|
||||
Path(sccache_dir).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Configure sccache environment
|
||||
os.environ["SCCACHE_DIR"] = sccache_dir
|
||||
os.environ["SCCACHE_CACHE_SIZE"] = "2G"
|
||||
|
||||
# Wrap compiler commands
|
||||
current_cc = env.get("CC", "gcc")
|
||||
current_cxx = env.get("CXX", "g++")
|
||||
|
||||
# Only wrap if not already wrapped
|
||||
if "sccache" not in current_cc:
|
||||
env.Replace(
|
||||
CC=f'"{sccache_path}" {current_cc}',
|
||||
CXX=f'"{sccache_path}" {current_cxx}',
|
||||
)
|
||||
print(f"Wrapped CC with SCCACHE: {env.get('CC')}")
|
||||
print(f"Wrapped CXX with SCCACHE: {env.get('CXX')}")
|
||||
|
||||
# Setup sccache wrapper
|
||||
setup_sccache_wrapper()
|
||||
'''
|
||||
|
||||
|
||||
# This script can be executed directly by PlatformIO as an extra_script
|
||||
if __name__ == "__main__":
|
||||
# For direct execution by PlatformIO extra_scripts
|
||||
pass
|
||||
50
libraries/FastLED/ci/util/scrapers/README.md
Normal file
50
libraries/FastLED/ci/util/scrapers/README.md
Normal file
@@ -0,0 +1,50 @@
|
||||
# FastLED Web Scrapers
|
||||
|
||||
This directory contains web scraping tools for testing and capturing screenshots from the online FastLED tool.
|
||||
|
||||
## Contents
|
||||
|
||||
- **`scrape_festival_stick.py`** - Main web scraping script that navigates to the online FastLED tool and captures screenshots
|
||||
- **`run_fastled_scraper.py`** - Utility script for easy execution with different configurations
|
||||
- **`screenshots/`** - Directory containing captured screenshots with timestamps
|
||||
- **`fastled_python_investigation.md`** - Comprehensive documentation of the investigation and implementation
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Run from project root:
|
||||
```bash
|
||||
# Basic usage
|
||||
uv run python ci/ci/scrapers/scrape_festival_stick.py
|
||||
|
||||
# With utility script options
|
||||
uv run python ci/ci/scrapers/run_fastled_scraper.py --headless --timeout 60
|
||||
```
|
||||
|
||||
### Run from scrapers directory:
|
||||
```bash
|
||||
cd ci/ci/scrapers
|
||||
uv run python scrape_festival_stick.py
|
||||
uv run python run_fastled_scraper.py --help
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
- **Automated browser navigation** to https://fastled.onrender.com/docs
|
||||
- **Smart element detection** for FastLED interface components
|
||||
- **Screenshot capture** with timestamp organization
|
||||
- **File upload attempts** for FastLED examples
|
||||
- **Error handling** with debug screenshots
|
||||
- **Headless and visible modes** for different use cases
|
||||
|
||||
## Requirements
|
||||
|
||||
The scrapers use the existing FastLED Python environment with:
|
||||
- `playwright` for browser automation
|
||||
- Python dependencies from `pyproject.toml`
|
||||
- Automatic Playwright browser installation
|
||||
|
||||
## Integration
|
||||
|
||||
These scrapers are integrated with the FastLED CI infrastructure and complement the existing web testing in `ci/wasm_test.py`.
|
||||
|
||||
For detailed information, see `fastled_python_investigation.md`.
|
||||
@@ -0,0 +1,197 @@
|
||||
# FastLED Python Code Investigation
|
||||
|
||||
## Overview
|
||||
This document summarizes the investigation of existing Python code in the FastLED project, particularly focusing on Playwright integration and web scraping capabilities for the FastLED online tool.
|
||||
|
||||
**Location**: All scripts are located in the `ci/ci/scrapers/` directory, organized within the CI infrastructure alongside the existing testing infrastructure, including the original Playwright implementation in `ci/wasm_test.py`.
|
||||
|
||||
## Existing Python Infrastructure
|
||||
|
||||
### Dependencies (pyproject.toml)
|
||||
The project already includes comprehensive dependencies for development and testing:
|
||||
- **Playwright**: `playwright` - for browser automation
|
||||
- **Testing**: `pytest`, `pytest-xdist` for parallel testing
|
||||
- **FastLED**: `fastled>=1.2.26` - the FastLED Python package
|
||||
- **FastLED WASM**: `fastled-wasm` - WebAssembly support
|
||||
- **HTTP Client**: `httpx` - for HTTP requests
|
||||
- **Build Tools**: `uv`, `ziglang`, `ninja`, `cmake`
|
||||
- **Code Quality**: `ruff`, `mypy`, `pyright`, `clang-format`, `isort`, `black`
|
||||
|
||||
### Existing Playwright Implementation (`ci/wasm_test.py`)
|
||||
|
||||
The project already has a sophisticated Playwright setup that:
|
||||
|
||||
1. **Automatic Browser Installation**:
|
||||
```python
|
||||
def install_playwright_browsers():
|
||||
os.system(f"{sys.executable} -m playwright install chromium")
|
||||
```
|
||||
|
||||
2. **FastLED WASM Testing**:
|
||||
- Starts an HTTP server for WASM examples
|
||||
- Tests browser automation with the FastLED.js library
|
||||
- Monitors `FastLED_onFrame` callback execution
|
||||
- Validates WebGL/WASM functionality
|
||||
|
||||
3. **Error Handling**:
|
||||
- Console log monitoring for errors
|
||||
- Timeout handling for page loads
|
||||
- Proper server cleanup
|
||||
|
||||
### MCP Server Integration (`mcp_server.py`)
|
||||
|
||||
The project includes a comprehensive MCP (Model Context Protocol) server with tools for:
|
||||
- Running tests with various options
|
||||
- Compiling examples for different platforms
|
||||
- Code fingerprinting and change detection
|
||||
- Linting and formatting
|
||||
- Project information and status
|
||||
|
||||
## FestivalStick Example Analysis
|
||||
|
||||
### Core Functionality
|
||||
The FestivalStick example (`examples/FestivalStick/`) is a sophisticated LED pattern demo featuring:
|
||||
|
||||
1. **Corkscrew LED Mapping**:
|
||||
- 19.25 turns, 288 LEDs
|
||||
- Maps 2D patterns to spiral LED positions
|
||||
- Uses `fl::Corkscrew` class for geometric calculations
|
||||
|
||||
2. **UI Controls**:
|
||||
- Speed, position, brightness controls
|
||||
- Noise pattern generation with customizable parameters
|
||||
- Color palette selection (Party, Heat, Ocean, Forest, Rainbow)
|
||||
- Rendering mode options (Noise, Position, Mixed)
|
||||
- Color boost with saturation/luminance functions
|
||||
|
||||
3. **Advanced Features**:
|
||||
- Multi-sampling for accurate LED positioning
|
||||
- Real-time noise generation with cylindrical mapping
|
||||
- Auto-advance mode with manual position override
|
||||
- ScreenMap integration for web visualization
|
||||
|
||||
### Key Components
|
||||
```cpp
|
||||
// Corkscrew configuration
|
||||
#define NUM_LEDS 288
|
||||
#define CORKSCREW_TURNS 19.25
|
||||
|
||||
// Runtime corkscrew with flexible configuration
|
||||
Corkscrew::Input corkscrewInput(CORKSCREW_TURNS, NUM_LEDS, 0);
|
||||
Corkscrew corkscrew(corkscrewInput);
|
||||
|
||||
// Frame buffer for 2D pattern drawing
|
||||
fl::Grid<CRGB> frameBuffer;
|
||||
|
||||
// ScreenMap for web interface visualization
|
||||
fl::ScreenMap corkscrewScreenMap = corkscrew.toScreenMap(0.2f);
|
||||
```
|
||||
|
||||
## Web Scraping Script Implementation
|
||||
|
||||
### Script Features (`scrape_festival_stick.py`)
|
||||
|
||||
1. **Robust Web Navigation**:
|
||||
- Navigates to https://fastled.onrender.com/docs
|
||||
- Handles dynamic content loading
|
||||
- Searches for multiple possible interface elements
|
||||
|
||||
2. **Smart Element Detection**:
|
||||
- Looks for example selection dropdowns
|
||||
- Detects file upload capabilities
|
||||
- Finds canvas/visualization elements
|
||||
- Searches for FestivalStick-specific content
|
||||
|
||||
3. **Screenshot Capabilities**:
|
||||
- Full page screenshots with timestamps
|
||||
- Focused canvas screenshots when available
|
||||
- Error screenshots for debugging
|
||||
- Multiple resolution support (1920x1080 default)
|
||||
|
||||
4. **File Upload Attempt**:
|
||||
- Automatically tries to upload `FestivalStick.ino`
|
||||
- Handles missing file scenarios gracefully
|
||||
- Waits for upload processing
|
||||
|
||||
### Script Workflow
|
||||
1. Install Playwright browsers automatically
|
||||
2. Launch visible browser with slow motion for debugging
|
||||
3. Navigate to online FastLED tool
|
||||
4. Search for example/upload functionality
|
||||
5. Attempt to load FestivalStick example
|
||||
6. Capture screenshots of the visualization
|
||||
7. Save timestamped results to `screenshots/` directory
|
||||
|
||||
## Project Testing Infrastructure
|
||||
|
||||
### Unit Tests
|
||||
- Location: `tests/` directory
|
||||
- Command: `bash test` (per user rules)
|
||||
- Comprehensive C++ unit tests for all components
|
||||
- Platform compilation tests
|
||||
- Code quality checks
|
||||
|
||||
### Example Compilation
|
||||
- Multi-platform support: `uno`, `esp32`, `teensy`, etc.
|
||||
- Command: `./compile <platform> --examples <example_name>`
|
||||
- Batch compilation for multiple platforms
|
||||
- Interactive and automated modes
|
||||
|
||||
## Key Findings
|
||||
|
||||
1. **Comprehensive Infrastructure**: The FastLED project already has extensive Python tooling with Playwright, testing, and web automation capabilities.
|
||||
|
||||
2. **Advanced LED Visualization**: The FestivalStick example represents sophisticated LED pattern generation with real-time parameter control and web visualization.
|
||||
|
||||
3. **Web Integration Ready**: The existing WASM testing infrastructure provides a solid foundation for web-based LED visualization and interaction.
|
||||
|
||||
4. **Documentation Gap**: While the code is well-implemented, there could be more comprehensive documentation of the web tooling capabilities.
|
||||
|
||||
## Recommendations
|
||||
|
||||
1. **Extend Web Scraping**: The created script could be enhanced to:
|
||||
- Test multiple examples automatically
|
||||
- Capture video recordings of animations
|
||||
- Perform parameter sweeps for different configurations
|
||||
|
||||
2. **Integration Testing**: Consider adding the web scraping script to the CI/CD pipeline for automated web interface testing.
|
||||
|
||||
3. **User Documentation**: Create user guides for the online FastLED tool and example usage.
|
||||
|
||||
## Results
|
||||
|
||||
✅ **Successfully captured screenshot**: `ci/ci/scrapers/screenshots/festival_stick_20250620_224055.png` (82KB)
|
||||
|
||||
The script successfully:
|
||||
1. Navigated to https://fastled.onrender.com/docs
|
||||
2. Detected and interacted with the FastLED web interface
|
||||
3. Captured a full-page screenshot of the FestivalStick example visualization
|
||||
4. Saved the result with timestamp for easy identification
|
||||
|
||||
## Files Created/Modified
|
||||
|
||||
- `ci/ci/scrapers/scrape_festival_stick.py` - Main web scraping script with Playwright automation
|
||||
- `ci/ci/scrapers/run_fastled_scraper.py` - Utility script for easy execution with different configurations
|
||||
- `ci/ci/scrapers/screenshots/` - Directory containing captured images
|
||||
- `ci/ci/scrapers/screenshots/festival_stick_20250620_224055.png` - Successfully captured screenshot (82KB)
|
||||
- `ci/ci/scrapers/fastled_python_investigation.md` - This documentation file
|
||||
|
||||
## Usage Examples
|
||||
|
||||
```bash
|
||||
# Run the scraper directly from project root
|
||||
uv run ci/ci/scrapers/scrape_festival_stick.py
|
||||
|
||||
# Use the utility script with options
|
||||
uv run ci/ci/scrapers/run_fastled_scraper.py --example FestivalStick --headless --timeout 60
|
||||
|
||||
# Make scripts executable and run from project root
|
||||
chmod +x ci/ci/scrapers/scrape_festival_stick.py ci/ci/scrapers/run_fastled_scraper.py
|
||||
./ci/ci/scrapers/scrape_festival_stick.py
|
||||
./ci/ci/scrapers/run_fastled_scraper.py --help
|
||||
|
||||
# Or run from within the scrapers directory
|
||||
cd ci/ci/scrapers
|
||||
uv run scrape_festival_stick.py
|
||||
uv run run_fastled_scraper.py --help
|
||||
```
|
||||
76
libraries/FastLED/ci/util/scrapers/run_fastled_scraper.py
Normal file
76
libraries/FastLED/ci/util/scrapers/run_fastled_scraper.py
Normal file
@@ -0,0 +1,76 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
FastLED Web Scraper Utility
|
||||
|
||||
This script provides an easy way to run the FastLED web scraper with different
|
||||
configurations and examples.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def run_scraper(
|
||||
example_name: str = "FestivalStick", headless: bool = False, timeout: int = 30
|
||||
) -> int:
|
||||
"""Run the FastLED web scraper with specified parameters"""
|
||||
|
||||
script_path = Path(__file__).parent / "scrape_festival_stick.py"
|
||||
|
||||
if not script_path.exists():
|
||||
print(f"Error: Scraper script not found at {script_path}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
print(f"Running FastLED web scraper for {example_name}...")
|
||||
print(f"Headless mode: {headless}")
|
||||
print(f"Timeout: {timeout} seconds")
|
||||
|
||||
try:
|
||||
# For now, just run the existing script
|
||||
# In the future, this could be enhanced to pass parameters
|
||||
result = subprocess.run(
|
||||
[sys.executable, str(script_path)],
|
||||
check=False,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
print("✅ Scraping completed successfully!")
|
||||
print(result.stdout)
|
||||
else:
|
||||
print("❌ Scraping failed!")
|
||||
print("STDOUT:", result.stdout)
|
||||
print("STDERR:", result.stderr)
|
||||
|
||||
return result.returncode
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error running scraper: {e}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(description="Run FastLED web scraper")
|
||||
parser.add_argument(
|
||||
"--example",
|
||||
"-e",
|
||||
default="FestivalStick",
|
||||
help="Example name to scrape (default: FestivalStick)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--headless", action="store_true", help="Run browser in headless mode"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--timeout", "-t", type=int, default=30, help="Timeout in seconds (default: 30)"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
return run_scraper(args.example, args.headless, args.timeout)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
206
libraries/FastLED/ci/util/scrapers/scrape_festival_stick.py
Normal file
206
libraries/FastLED/ci/util/scrapers/scrape_festival_stick.py
Normal file
@@ -0,0 +1,206 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
from playwright.async_api import async_playwright # type: ignore
|
||||
|
||||
|
||||
HERE = Path(__file__).parent
|
||||
PROJECT_ROOT = HERE.parent.parent.parent # scrapers is 3 levels down from project root
|
||||
SCREENSHOTS_DIR = HERE / "screenshots"
|
||||
|
||||
# Ensure screenshots directory exists
|
||||
SCREENSHOTS_DIR.mkdir(exist_ok=True)
|
||||
|
||||
|
||||
# Ensure Playwright browsers are installed
|
||||
def install_playwright_browsers():
|
||||
print("Installing Playwright browsers...")
|
||||
try:
|
||||
os.system(f"{sys.executable} -m playwright install chromium")
|
||||
print("Playwright browsers installed successfully.")
|
||||
except Exception as e:
|
||||
print(f"Failed to install Playwright browsers: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
async def scrape_festival_stick_example():
|
||||
"""
|
||||
Navigate to the online FastLED tool and capture a screenshot of the FestivalStick example
|
||||
"""
|
||||
install_playwright_browsers()
|
||||
|
||||
# Online FastLED tool URL
|
||||
fastled_url = "https://fastled.onrender.com/docs"
|
||||
|
||||
async with async_playwright() as p:
|
||||
# Launch browser with a visible window for debugging
|
||||
browser = await p.chromium.launch(headless=False, slow_mo=1000)
|
||||
page = await browser.new_page()
|
||||
|
||||
# Set viewport size for consistent screenshots
|
||||
await page.set_viewport_size({"width": 1920, "height": 1080})
|
||||
|
||||
try:
|
||||
print(f"Navigating to {fastled_url}...")
|
||||
await page.goto(fastled_url, timeout=30000)
|
||||
|
||||
# Wait for the page to load
|
||||
await page.wait_for_load_state("networkidle")
|
||||
|
||||
# Look for FastLED examples or upload functionality
|
||||
print("Looking for FastLED example functionality...")
|
||||
|
||||
# Wait a bit for dynamic content to load
|
||||
await page.wait_for_timeout(3000)
|
||||
|
||||
# Try to find any example selection or file upload elements
|
||||
examples_selector = None
|
||||
possible_selectors = [
|
||||
"select[name*='example']",
|
||||
"select[id*='example']",
|
||||
".example-selector",
|
||||
"input[type='file']",
|
||||
"button:has-text('Example')",
|
||||
"button:has-text('FestivalStick')",
|
||||
"a:has-text('Example')",
|
||||
"a:has-text('FestivalStick')",
|
||||
]
|
||||
|
||||
for selector in possible_selectors:
|
||||
try:
|
||||
element = await page.wait_for_selector(selector, timeout=2000)
|
||||
if element:
|
||||
print(f"Found element with selector: {selector}")
|
||||
examples_selector = selector
|
||||
break
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
if not examples_selector:
|
||||
# If no specific example selector found, look for text content
|
||||
print("Looking for FestivalStick text on page...")
|
||||
try:
|
||||
await page.wait_for_selector("text=FestivalStick", timeout=5000)
|
||||
print("Found FestivalStick text on page!")
|
||||
except Exception:
|
||||
print(
|
||||
"FestivalStick text not found, taking screenshot of current page..."
|
||||
)
|
||||
|
||||
# Try to interact with the FastLED tool interface
|
||||
print("Attempting to interact with FastLED interface...")
|
||||
|
||||
# Look for common web interface elements
|
||||
interface_elements = [
|
||||
"canvas",
|
||||
".led-display",
|
||||
".visualization",
|
||||
"#fastled-canvas",
|
||||
".fastled-viewer",
|
||||
]
|
||||
|
||||
canvas_found = False
|
||||
canvas = None
|
||||
for element_selector in interface_elements:
|
||||
try:
|
||||
canvas_element = await page.wait_for_selector(
|
||||
element_selector, timeout=2000
|
||||
)
|
||||
if canvas_element:
|
||||
print(f"Found display element: {element_selector}")
|
||||
canvas_found = True
|
||||
canvas = canvas_element
|
||||
break
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
if not canvas_found:
|
||||
print("No specific display canvas found, capturing full page...")
|
||||
|
||||
# If there's a file upload option, try to upload the FestivalStick example
|
||||
try:
|
||||
file_input = await page.query_selector("input[type='file']")
|
||||
if file_input:
|
||||
print("Found file input, attempting to upload FestivalStick.ino...")
|
||||
festival_stick_path = (
|
||||
PROJECT_ROOT
|
||||
/ "examples"
|
||||
/ "FestivalStick"
|
||||
/ "FestivalStick.ino"
|
||||
)
|
||||
if festival_stick_path.exists():
|
||||
await file_input.set_input_files(str(festival_stick_path))
|
||||
await page.wait_for_timeout(3000) # Wait for upload to process
|
||||
print("FestivalStick.ino uploaded successfully!")
|
||||
else:
|
||||
print(f"FestivalStick.ino not found at {festival_stick_path}")
|
||||
except Exception as e:
|
||||
print(f"Could not upload file: {e}")
|
||||
|
||||
# Wait for any animations or dynamic content to settle
|
||||
await page.wait_for_timeout(5000)
|
||||
|
||||
# Take screenshot
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
screenshot_path = SCREENSHOTS_DIR / f"festival_stick_{timestamp}.png"
|
||||
|
||||
print(f"Taking screenshot and saving to {screenshot_path}...")
|
||||
await page.screenshot(path=str(screenshot_path), full_page=True)
|
||||
|
||||
print(f"Screenshot saved successfully to {screenshot_path}")
|
||||
|
||||
# Also take a focused screenshot if we found a canvas
|
||||
if canvas_found and canvas is not None:
|
||||
try:
|
||||
canvas_screenshot_path = (
|
||||
SCREENSHOTS_DIR / f"festival_stick_canvas_{timestamp}.png"
|
||||
)
|
||||
await canvas.screenshot(path=str(canvas_screenshot_path))
|
||||
print(f"Canvas screenshot saved to {canvas_screenshot_path}")
|
||||
except Exception as e:
|
||||
print(f"Could not take canvas screenshot: {e}")
|
||||
|
||||
# Keep browser open for a few seconds to see the result
|
||||
print("Keeping browser open for 10 seconds for inspection...")
|
||||
await page.wait_for_timeout(10000)
|
||||
|
||||
except Exception as e:
|
||||
print(f"An error occurred during scraping: {e}", file=sys.stderr)
|
||||
|
||||
# Take an error screenshot for debugging
|
||||
error_screenshot_path = (
|
||||
SCREENSHOTS_DIR
|
||||
/ f"error_{datetime.now().strftime('%Y%m%d_%H%M%S')}.png"
|
||||
)
|
||||
try:
|
||||
await page.screenshot(path=str(error_screenshot_path), full_page=True)
|
||||
print(f"Error screenshot saved to {error_screenshot_path}")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
raise e
|
||||
|
||||
finally:
|
||||
await browser.close()
|
||||
|
||||
|
||||
async def main():
|
||||
"""Main function to run the scraping operation"""
|
||||
try:
|
||||
await scrape_festival_stick_example()
|
||||
print("FastLED FestivalStick scraping completed successfully!")
|
||||
except Exception as e:
|
||||
print(f"Scraping failed: {e}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
exit_code = asyncio.run(main())
|
||||
sys.exit(exit_code)
|
||||
898
libraries/FastLED/ci/util/symbol_analysis.py
Normal file
898
libraries/FastLED/ci/util/symbol_analysis.py
Normal file
@@ -0,0 +1,898 @@
|
||||
#!/usr/bin/env python3
|
||||
# pyright: reportUnknownMemberType=false, reportOperatorIssue=false, reportArgumentType=false
|
||||
"""
|
||||
Enhanced Symbol Analysis Tool with Function Call Graph Analysis
|
||||
Analyzes ELF files to identify symbols and function call relationships.
|
||||
Shows which functions call other functions (call graph analysis).
|
||||
Works with any platform (ESP32S3, UNO, etc.) that has build_info.json.
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from dataclasses import asdict, dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Iterator, List, Optional, Tuple
|
||||
|
||||
# Import board mapping system
|
||||
from ci.boards import create_board
|
||||
|
||||
|
||||
@dataclass
|
||||
class SymbolInfo:
|
||||
"""Represents a symbol in the binary"""
|
||||
|
||||
address: str
|
||||
size: int
|
||||
type: str
|
||||
name: str
|
||||
demangled_name: str
|
||||
source: str # STRICT: NO defaults - all callers must provide explicit source
|
||||
|
||||
|
||||
@dataclass
|
||||
class TypeBreakdown:
|
||||
"""Breakdown of symbols by type"""
|
||||
|
||||
type: str
|
||||
count: int
|
||||
total_size: int
|
||||
|
||||
|
||||
@dataclass
|
||||
class CallStats:
|
||||
"""Statistics about function calls"""
|
||||
|
||||
functions_with_calls: int
|
||||
functions_called_by_others: int
|
||||
most_called: List[Tuple[str, int]] = field(default_factory=lambda: [])
|
||||
most_calling: List[Tuple[str, int]] = field(default_factory=lambda: [])
|
||||
|
||||
|
||||
@dataclass
|
||||
class AnalysisReport:
|
||||
"""Complete symbol analysis report"""
|
||||
|
||||
board: str
|
||||
total_symbols: int
|
||||
total_size: int
|
||||
largest_symbols: List[SymbolInfo] = field(default_factory=lambda: [])
|
||||
type_breakdown: List[TypeBreakdown] = field(default_factory=lambda: [])
|
||||
dependencies: Dict[str, List[str]] = field(default_factory=lambda: {})
|
||||
call_graph: Optional[Dict[str, List[str]]] = None
|
||||
reverse_call_graph: Optional[Dict[str, List[str]]] = None
|
||||
call_stats: Optional[CallStats] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class DetailedAnalysisData:
|
||||
"""Complete detailed analysis data structure for JSON output"""
|
||||
|
||||
summary: AnalysisReport
|
||||
all_symbols_sorted_by_size: List[SymbolInfo]
|
||||
dependencies: Dict[str, List[str]]
|
||||
call_graph: Optional[Dict[str, List[str]]] = None
|
||||
reverse_call_graph: Optional[Dict[str, List[str]]] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class TypeStats:
|
||||
"""Statistics for symbol types with dictionary-like functionality"""
|
||||
|
||||
stats: Dict[str, TypeBreakdown] = field(default_factory=lambda: {})
|
||||
|
||||
def add_symbol(self, symbol: SymbolInfo) -> None:
|
||||
"""Add a symbol to the type statistics"""
|
||||
sym_type = symbol.type
|
||||
if sym_type not in self.stats:
|
||||
self.stats[sym_type] = TypeBreakdown(type=sym_type, count=0, total_size=0)
|
||||
self.stats[sym_type].count += 1
|
||||
self.stats[sym_type].total_size += symbol.size
|
||||
|
||||
def items(self) -> List[Tuple[str, TypeBreakdown]]:
|
||||
"""Return items for iteration, sorted by total_size descending"""
|
||||
return sorted(self.stats.items(), key=lambda x: x[1].total_size, reverse=True)
|
||||
|
||||
def values(self) -> List[TypeBreakdown]:
|
||||
"""Return values for iteration"""
|
||||
return list(self.stats.values())
|
||||
|
||||
def __getitem__(self, key: str) -> TypeBreakdown:
|
||||
"""Allow dictionary-style access"""
|
||||
return self.stats[key]
|
||||
|
||||
def __contains__(self, key: str) -> bool:
|
||||
"""Allow 'in' operator"""
|
||||
return key in self.stats
|
||||
|
||||
|
||||
def run_command(cmd: str) -> str:
|
||||
"""Run a command and return stdout"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
cmd, shell=True, capture_output=True, text=True, check=True
|
||||
)
|
||||
return result.stdout
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Error running command: {cmd}")
|
||||
print(f"Error: {e.stderr}")
|
||||
return ""
|
||||
|
||||
|
||||
def demangle_symbol(mangled_name: str, cppfilt_path: str) -> str:
|
||||
"""Demangle a C++ symbol using c++filt"""
|
||||
try:
|
||||
cmd = f'echo "{mangled_name}" | "{cppfilt_path}"'
|
||||
result = subprocess.run(
|
||||
cmd, shell=True, capture_output=True, text=True, check=True
|
||||
)
|
||||
demangled = result.stdout.strip()
|
||||
# If demangling failed, c++filt returns the original name
|
||||
return demangled if demangled != mangled_name else mangled_name
|
||||
except Exception as e:
|
||||
print(f"Error demangling symbol: {mangled_name}")
|
||||
print(f"Error: {e}")
|
||||
return mangled_name
|
||||
|
||||
|
||||
def analyze_symbols(
|
||||
elf_file: str, nm_path: str, cppfilt_path: str, readelf_path: Optional[str] = None
|
||||
) -> List[SymbolInfo]:
|
||||
"""Analyze ALL symbols in ELF file using both nm and readelf for comprehensive coverage"""
|
||||
print("Analyzing symbols with enhanced coverage...")
|
||||
|
||||
symbols: List[SymbolInfo] = []
|
||||
symbols_dict: Dict[str, SymbolInfo] = {} # To deduplicate by address+name
|
||||
|
||||
# Method 1: Use readelf to get ALL symbols (including those without size)
|
||||
if readelf_path:
|
||||
print("Getting all symbols using readelf...")
|
||||
readelf_cmd = f'"{readelf_path}" -s "{elf_file}"'
|
||||
output = run_command(readelf_cmd)
|
||||
|
||||
if output:
|
||||
for line in output.strip().split("\n"):
|
||||
line = line.strip()
|
||||
# Skip header and empty lines
|
||||
if (
|
||||
not line
|
||||
or "Num:" in line
|
||||
or "Symbol table" in line
|
||||
or line.startswith("--")
|
||||
):
|
||||
continue
|
||||
|
||||
# Parse readelf output format: Num: Value Size Type Bind Vis Ndx Name
|
||||
parts = line.split()
|
||||
if len(parts) >= 8:
|
||||
try:
|
||||
# Skip num (parts[0]) - not needed
|
||||
addr = parts[1]
|
||||
size = int(parts[2]) if parts[2].isdigit() else 0
|
||||
symbol_type = parts[3]
|
||||
bind = parts[4]
|
||||
# Skip vis and ndx (parts[5], parts[6]) - not needed
|
||||
name = " ".join(parts[7:]) if len(parts) > 7 else ""
|
||||
|
||||
# Skip empty names and section symbols
|
||||
if not name or name.startswith(".") or symbol_type == "SECTION":
|
||||
continue
|
||||
|
||||
# Create a unique key for deduplication (use name as primary key since addresses can vary)
|
||||
key = name.strip()
|
||||
|
||||
if key not in symbols_dict:
|
||||
# Demangle the symbol name
|
||||
demangled_name = demangle_symbol(name, cppfilt_path)
|
||||
|
||||
symbol_info = SymbolInfo(
|
||||
address=addr,
|
||||
size=size,
|
||||
type=symbol_type[0].upper(), # T, D, B, etc.
|
||||
name=name,
|
||||
demangled_name=demangled_name,
|
||||
source="readelf",
|
||||
)
|
||||
|
||||
symbols_dict[key] = symbol_info
|
||||
|
||||
except (ValueError, IndexError):
|
||||
continue # Skip malformed lines
|
||||
|
||||
# Method 2: Use nm with --print-size to get symbols with sizes (for accurate size info)
|
||||
print("Getting sized symbols using nm...")
|
||||
nm_cmd = f'"{nm_path}" --print-size --size-sort --radix=d "{elf_file}"'
|
||||
output = run_command(nm_cmd)
|
||||
|
||||
if output:
|
||||
for line in output.strip().split("\n"):
|
||||
if not line.strip():
|
||||
continue
|
||||
|
||||
parts = line.split()
|
||||
if len(parts) >= 4:
|
||||
try:
|
||||
addr = parts[0]
|
||||
size = int(parts[1])
|
||||
symbol_type = parts[2]
|
||||
name = " ".join(parts[3:])
|
||||
|
||||
# Create a unique key for deduplication (use name as primary key)
|
||||
key = name.strip()
|
||||
|
||||
# If we already have this symbol from readelf, update with accurate size
|
||||
if key in symbols_dict:
|
||||
symbols_dict[key].size = size
|
||||
symbols_dict[key].type = symbol_type
|
||||
symbols_dict[key].source = "nm+readelf"
|
||||
else:
|
||||
# New symbol not found by readelf
|
||||
demangled_name = demangle_symbol(name, cppfilt_path)
|
||||
|
||||
symbol_info = SymbolInfo(
|
||||
address=addr,
|
||||
size=size,
|
||||
type=symbol_type,
|
||||
name=name,
|
||||
demangled_name=demangled_name,
|
||||
source="nm",
|
||||
)
|
||||
|
||||
symbols_dict[key] = symbol_info
|
||||
|
||||
except (ValueError, IndexError):
|
||||
continue # Skip malformed lines
|
||||
|
||||
# Method 3: Use nm with -a to get all symbols including debugger-only
|
||||
print("Getting additional symbols using nm -a...")
|
||||
nm_all_cmd = f'"{nm_path}" -a --radix=d "{elf_file}"'
|
||||
output = run_command(nm_all_cmd)
|
||||
|
||||
if output:
|
||||
for line in output.strip().split("\n"):
|
||||
if not line.strip():
|
||||
continue
|
||||
|
||||
parts = line.split()
|
||||
if len(parts) >= 3:
|
||||
try:
|
||||
addr = parts[0]
|
||||
symbol_type = parts[1]
|
||||
name = " ".join(parts[2:])
|
||||
|
||||
# Skip empty names
|
||||
if not name:
|
||||
continue
|
||||
|
||||
# Create a unique key for deduplication (use name as primary key)
|
||||
key = name.strip()
|
||||
|
||||
if key not in symbols_dict:
|
||||
# New symbol not found by other methods
|
||||
demangled_name = demangle_symbol(name, cppfilt_path)
|
||||
|
||||
symbol_info = SymbolInfo(
|
||||
address=addr,
|
||||
size=0, # nm -a doesn't provide size
|
||||
type=symbol_type,
|
||||
name=name,
|
||||
demangled_name=demangled_name,
|
||||
source="nm-a",
|
||||
)
|
||||
|
||||
symbols_dict[key] = symbol_info
|
||||
|
||||
except (ValueError, IndexError):
|
||||
continue # Skip malformed lines
|
||||
|
||||
# Convert dict to list
|
||||
symbols: List[SymbolInfo] = list(symbols_dict.values())
|
||||
|
||||
print(f"Found {len(symbols)} total symbols using enhanced analysis")
|
||||
print(f" - Symbols with size info: {len([s for s in symbols if s.size > 0])}")
|
||||
print(f" - Symbols without size: {len([s for s in symbols if s.size == 0])}")
|
||||
|
||||
return symbols
|
||||
|
||||
|
||||
def analyze_function_calls(
|
||||
elf_file: str, objdump_path: str, cppfilt_path: str
|
||||
) -> Dict[str, List[str]]:
|
||||
"""Analyze function calls using objdump to build call graph"""
|
||||
print("Analyzing function calls using objdump...")
|
||||
|
||||
# Use objdump to disassemble the binary
|
||||
cmd = f'"{objdump_path}" -t "{elf_file}"'
|
||||
print(f"Running: {cmd}")
|
||||
symbol_output = run_command(cmd)
|
||||
|
||||
# Build symbol address map for function symbols
|
||||
symbol_map: Dict[str, str] = {} # address -> symbol_name
|
||||
function_symbols: set[str] = set() # set of function names
|
||||
|
||||
for line in symbol_output.strip().split("\n"):
|
||||
if not line.strip():
|
||||
continue
|
||||
|
||||
# Parse objdump symbol table output
|
||||
# Format: address flags section size name
|
||||
parts = line.split()
|
||||
if len(parts) >= 5 and ("F" in parts[1] or "f" in parts[1]): # Function symbol
|
||||
try:
|
||||
address = parts[0]
|
||||
symbol_name = " ".join(parts[4:])
|
||||
|
||||
# Demangle the symbol name
|
||||
demangled_name = demangle_symbol(symbol_name, cppfilt_path)
|
||||
|
||||
symbol_map[address] = {"name": symbol_name, "demangled": demangled_name}
|
||||
function_symbols.add(demangled_name)
|
||||
except (ValueError, IndexError):
|
||||
continue
|
||||
|
||||
print(f"Found {len(function_symbols)} function symbols")
|
||||
|
||||
# Now disassemble text sections to find function calls
|
||||
cmd = f'"{objdump_path}" -d "{elf_file}"'
|
||||
print(f"Running: {cmd}")
|
||||
disasm_output = run_command(cmd)
|
||||
|
||||
if not disasm_output:
|
||||
print("Warning: No disassembly output received")
|
||||
return {}
|
||||
|
||||
# Parse disassembly to find function calls
|
||||
call_graph: defaultdict[str, set[str]] = defaultdict(
|
||||
set
|
||||
) # caller -> set of callees
|
||||
current_function = None
|
||||
|
||||
# Common call instruction patterns for different architectures
|
||||
call_patterns = [
|
||||
r"call\s+(\w+)", # x86/x64 call
|
||||
r"bl\s+(\w+)", # ARM branch with link
|
||||
r"jal\s+(\w+)", # RISC-V jump and link
|
||||
r"callx?\d*\s+(\w+)", # Xtensa call variations
|
||||
]
|
||||
|
||||
call_regex = re.compile(
|
||||
"|".join(f"(?:{pattern})" for pattern in call_patterns), re.IGNORECASE
|
||||
)
|
||||
|
||||
function_start_pattern = re.compile(r"^([0-9a-f]+)\s+<([^>]+)>:")
|
||||
|
||||
lines = disasm_output.split("\n")
|
||||
for i, line in enumerate(lines):
|
||||
line = line.strip()
|
||||
|
||||
# Check for function start
|
||||
func_match = function_start_pattern.match(line)
|
||||
if func_match:
|
||||
func_name = func_match.group(2)
|
||||
|
||||
# Demangle function name
|
||||
current_function = demangle_symbol(func_name, cppfilt_path)
|
||||
continue
|
||||
|
||||
# Look for call instructions
|
||||
if current_function and (
|
||||
"call" in line.lower() or "bl " in line.lower() or "jal" in line.lower()
|
||||
):
|
||||
call_match = call_regex.search(line)
|
||||
if call_match:
|
||||
# Extract the target function name
|
||||
for group in call_match.groups():
|
||||
if group:
|
||||
target_func = demangle_symbol(group, cppfilt_path)
|
||||
call_graph[current_function].add(target_func)
|
||||
break
|
||||
|
||||
print(f"Built call graph with {len(call_graph)} calling functions")
|
||||
|
||||
# Convert sets to lists for JSON serialization
|
||||
return {caller: list(callees) for caller, callees in call_graph.items()}
|
||||
|
||||
|
||||
def build_reverse_call_graph(call_graph: Dict[str, List[str]]) -> Dict[str, List[str]]:
|
||||
"""Build reverse call graph: function -> list of functions that call it"""
|
||||
reverse_graph: defaultdict[str, List[str]] = defaultdict(list)
|
||||
|
||||
for caller, callees in call_graph.items():
|
||||
for callee in callees:
|
||||
reverse_graph[callee].append(caller)
|
||||
|
||||
return dict(reverse_graph)
|
||||
|
||||
|
||||
def analyze_map_file(map_file: Path) -> Dict[str, List[str]]:
|
||||
"""Analyze the map file to understand module dependencies"""
|
||||
print(f"Analyzing map file: {map_file}")
|
||||
|
||||
dependencies: Dict[str, List[str]] = {}
|
||||
current_archive: Optional[str] = None
|
||||
|
||||
if not map_file.exists():
|
||||
print(f"Map file not found: {map_file}")
|
||||
return {}
|
||||
|
||||
try:
|
||||
with open(map_file, "r") as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
|
||||
# Look for archive member includes - handle both ESP32 and UNO formats
|
||||
if ".a(" in line and ")" in line:
|
||||
# Extract module name
|
||||
start = line.find("(") + 1
|
||||
end = line.find(")")
|
||||
if start > 0 and end > start:
|
||||
current_archive = line[start:end]
|
||||
dependencies[current_archive] = []
|
||||
|
||||
elif current_archive and line and not line.startswith((".", "/", "*")):
|
||||
# This line shows what pulled in the module
|
||||
if "(" in line and ")" in line:
|
||||
# Extract the symbol that caused the inclusion
|
||||
symbol_start = line.find("(") + 1
|
||||
symbol_end = line.find(")")
|
||||
if symbol_start > 0 and symbol_end > symbol_start:
|
||||
symbol = line[symbol_start:symbol_end]
|
||||
dependencies[current_archive].append(symbol)
|
||||
current_archive = None
|
||||
except Exception as e:
|
||||
print(f"Error reading map file: {e}")
|
||||
return {}
|
||||
|
||||
return dependencies
|
||||
|
||||
|
||||
def generate_report(
|
||||
board_name: str,
|
||||
symbols: List[SymbolInfo],
|
||||
dependencies: Dict[str, List[str]],
|
||||
call_graph: Optional[Dict[str, List[str]]] = None,
|
||||
reverse_call_graph: Optional[Dict[str, List[str]]] = None,
|
||||
enhanced_mode: bool = False,
|
||||
) -> AnalysisReport:
|
||||
"""Generate a comprehensive report with optional call graph analysis"""
|
||||
print("\n" + "=" * 80)
|
||||
if enhanced_mode and call_graph and reverse_call_graph:
|
||||
print(f"{board_name.upper()} ENHANCED SYMBOL ANALYSIS REPORT")
|
||||
else:
|
||||
print(f"{board_name.upper()} SYMBOL ANALYSIS REPORT")
|
||||
print("=" * 80)
|
||||
|
||||
# Summary statistics
|
||||
total_symbols = len(symbols)
|
||||
total_size = sum(s.size for s in symbols)
|
||||
symbols_with_size = [s for s in symbols if s.size > 0]
|
||||
symbols_without_size = [s for s in symbols if s.size == 0]
|
||||
|
||||
print("\nSUMMARY:")
|
||||
print(f" Total symbols: {total_symbols}")
|
||||
print(f" Symbols with size info: {len(symbols_with_size)}")
|
||||
print(f" Symbols without size info: {len(symbols_without_size)}")
|
||||
print(
|
||||
f" Total symbol size: {total_size} bytes ({total_size / 1024:.1f} KB) [sized symbols only]"
|
||||
)
|
||||
|
||||
if enhanced_mode and call_graph and reverse_call_graph:
|
||||
print(f" Functions with calls: {len(call_graph)}")
|
||||
print(f" Functions called by others: {len(reverse_call_graph)}")
|
||||
|
||||
# Show source breakdown
|
||||
source_stats: Dict[str, int] = {}
|
||||
for sym in symbols:
|
||||
source = sym.source
|
||||
if source not in source_stats:
|
||||
source_stats[source] = 0
|
||||
source_stats[source] += 1
|
||||
|
||||
print("\nSYMBOL SOURCES:")
|
||||
for source, count in sorted(source_stats.items()):
|
||||
print(f" {source}: {count} symbols")
|
||||
|
||||
# Largest symbols overall
|
||||
print("\nLARGEST SYMBOLS (all symbols, sorted by size):")
|
||||
symbols_sorted = sorted(symbols, key=lambda x: x.size, reverse=True)
|
||||
|
||||
display_count = 30 if enhanced_mode else 50
|
||||
for i, sym in enumerate(symbols_sorted[:display_count]):
|
||||
display_name = sym.demangled_name
|
||||
print(f" {i + 1:2d}. {sym.size:6d} bytes - {display_name}")
|
||||
|
||||
# Show what functions call this symbol (if enhanced mode and it's a function)
|
||||
if enhanced_mode and reverse_call_graph and display_name in reverse_call_graph:
|
||||
callers = reverse_call_graph[display_name]
|
||||
if callers:
|
||||
caller_names = [
|
||||
name[:40] + "..." if len(name) > 40 else name
|
||||
for name in callers[:3]
|
||||
]
|
||||
print(f" Called by: {', '.join(caller_names)}")
|
||||
if len(callers) > 3:
|
||||
print(f" ... and {len(callers) - 3} more")
|
||||
|
||||
# Show mangled name if different (non-enhanced mode)
|
||||
elif (
|
||||
not enhanced_mode
|
||||
and hasattr(sym, "demangled_name")
|
||||
and sym.demangled_name
|
||||
and sym.demangled_name != sym.name
|
||||
):
|
||||
print(
|
||||
f" (mangled: {sym.name[:80]}{'...' if len(sym.name) > 80 else ''})"
|
||||
)
|
||||
|
||||
# Initialize variables for enhanced mode data
|
||||
most_called = []
|
||||
most_calling = []
|
||||
|
||||
# Enhanced function call analysis
|
||||
if enhanced_mode and call_graph and reverse_call_graph:
|
||||
print("\n" + "=" * 80)
|
||||
print("FUNCTION CALL ANALYSIS")
|
||||
print("=" * 80)
|
||||
|
||||
# Most called functions
|
||||
most_called = sorted(
|
||||
reverse_call_graph.items(), key=lambda x: len(x[1]), reverse=True
|
||||
)
|
||||
print("\nMOST CALLED FUNCTIONS (functions called by many others):")
|
||||
for i, (func_name, callers) in enumerate(most_called[:15]):
|
||||
short_name = func_name[:60] + "..." if len(func_name) > 60 else func_name
|
||||
print(f" {i + 1:2d}. {short_name}")
|
||||
print(f" Called by {len(callers)} functions")
|
||||
if len(callers) <= 5:
|
||||
for caller in callers:
|
||||
caller_short = caller[:50] + "..." if len(caller) > 50 else caller
|
||||
print(f" - {caller_short}")
|
||||
else:
|
||||
for caller in callers[:3]:
|
||||
caller_short = caller[:50] + "..." if len(caller) > 50 else caller
|
||||
print(f" - {caller_short}")
|
||||
print(f" ... and {len(callers) - 3} more")
|
||||
print()
|
||||
|
||||
# Functions that call many others
|
||||
most_calling = sorted(call_graph.items(), key=lambda x: len(x[1]), reverse=True)
|
||||
print("\nFUNCTIONS THAT CALL MANY OTHERS:")
|
||||
for i, (func_name, callees) in enumerate(most_calling[:10]):
|
||||
short_name = func_name[:60] + "..." if len(func_name) > 60 else func_name
|
||||
print(f" {i + 1:2d}. {short_name}")
|
||||
print(f" Calls {len(callees)} functions")
|
||||
if len(callees) <= 5:
|
||||
for callee in callees:
|
||||
callee_short = callee[:50] + "..." if len(callee) > 50 else callee
|
||||
print(f" -> {callee_short}")
|
||||
else:
|
||||
for callee in callees[:3]:
|
||||
callee_short = callee[:50] + "..." if len(callee) > 50 else callee
|
||||
print(f" -> {callee_short}")
|
||||
print(f" ... and {len(callees) - 3} more")
|
||||
print()
|
||||
|
||||
# Symbol type breakdown
|
||||
section_title = "\n" + "=" * 80 + "\n" if enhanced_mode else "\n"
|
||||
print(section_title + "SYMBOL TYPE BREAKDOWN:")
|
||||
type_stats = TypeStats()
|
||||
for sym in symbols:
|
||||
type_stats.add_symbol(sym)
|
||||
|
||||
for sym_type, stats in type_stats.items():
|
||||
print(
|
||||
f" {sym_type}: {stats.count} symbols, {stats.total_size} bytes ({stats.total_size / 1024:.1f} KB)"
|
||||
)
|
||||
|
||||
# Dependencies analysis
|
||||
if dependencies:
|
||||
print("\nMODULE DEPENDENCIES:")
|
||||
for module in sorted(dependencies.keys()):
|
||||
if dependencies[module]: # Only show modules with dependencies
|
||||
print(f" {module}:")
|
||||
for symbol in dependencies[module][:5]: # Show first 5 symbols
|
||||
print(f" - {symbol}")
|
||||
if len(dependencies[module]) > 5:
|
||||
print(f" ... and {len(dependencies[module]) - 5} more")
|
||||
|
||||
# Build return data
|
||||
report_data = AnalysisReport(
|
||||
board=board_name,
|
||||
total_symbols=total_symbols,
|
||||
total_size=total_size,
|
||||
largest_symbols=symbols_sorted[:20],
|
||||
type_breakdown=list(type_stats.values()),
|
||||
dependencies=dependencies,
|
||||
)
|
||||
|
||||
# Add enhanced data if available
|
||||
if enhanced_mode and call_graph and reverse_call_graph:
|
||||
report_data.call_graph = call_graph
|
||||
report_data.reverse_call_graph = reverse_call_graph
|
||||
report_data.call_stats = CallStats(
|
||||
functions_with_calls=len(call_graph),
|
||||
functions_called_by_others=len(reverse_call_graph),
|
||||
most_called=most_called[:10],
|
||||
most_calling=most_calling[:10],
|
||||
)
|
||||
|
||||
return report_data
|
||||
|
||||
|
||||
def find_board_build_info(board_name: Optional[str] = None) -> Tuple[Path, str]:
|
||||
"""Find build info for a specific board or detect available boards"""
|
||||
# Detect build directory
|
||||
current = Path.cwd()
|
||||
build_dir: Optional[Path] = None
|
||||
while current != current.parent:
|
||||
candidate = current / ".build"
|
||||
if candidate.exists():
|
||||
build_dir = candidate
|
||||
break
|
||||
current = current.parent
|
||||
|
||||
if not build_dir:
|
||||
print("Error: Could not find build directory (.build)")
|
||||
sys.exit(1)
|
||||
|
||||
# If specific board requested, look for it
|
||||
if board_name:
|
||||
# 1) Direct board directory: .build/<board>/build_info.json
|
||||
board_dir = build_dir / board_name
|
||||
if board_dir.exists() and (board_dir / "build_info.json").exists():
|
||||
return board_dir / "build_info.json", board_name
|
||||
|
||||
# 2) PlatformIO nested directory: .build/pio/<board>/build_info.json
|
||||
pio_board_dir = build_dir / "pio" / board_name
|
||||
if pio_board_dir.exists() and (pio_board_dir / "build_info.json").exists():
|
||||
return pio_board_dir / "build_info.json", board_name
|
||||
|
||||
print(f"Error: Board '{board_name}' not found or missing build_info.json")
|
||||
sys.exit(1)
|
||||
|
||||
# Otherwise, find any available board
|
||||
available_boards: List[Tuple[Path, str]] = []
|
||||
|
||||
# 1) Direct children of .build
|
||||
for item in build_dir.iterdir():
|
||||
if item.is_dir():
|
||||
build_info_file = item / "build_info.json"
|
||||
if build_info_file.exists():
|
||||
available_boards.append((build_info_file, item.name))
|
||||
|
||||
# 2) Nested PlatformIO structure .build/pio/*
|
||||
pio_dir = build_dir / "pio"
|
||||
if pio_dir.exists() and pio_dir.is_dir():
|
||||
for item in pio_dir.iterdir():
|
||||
if item.is_dir():
|
||||
build_info_file = item / "build_info.json"
|
||||
if build_info_file.exists():
|
||||
available_boards.append((build_info_file, item.name))
|
||||
|
||||
if not available_boards:
|
||||
print(f"Error: No boards with build_info.json found in {build_dir}")
|
||||
sys.exit(1)
|
||||
|
||||
# Return the first available board
|
||||
return available_boards[0]
|
||||
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Enhanced symbol analysis with optional function call analysis for any platform"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--board", help="Board name to analyze (e.g., uno, esp32dev, esp32s3)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-enhanced",
|
||||
action="store_false",
|
||||
dest="enhanced",
|
||||
default=True,
|
||||
help="Disable enhanced analysis with function call graph (enhanced is default)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--show-calls-to",
|
||||
help="Show what functions call a specific function (enables enhanced mode)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--basic",
|
||||
action="store_true",
|
||||
help="Use basic nm-only symbol analysis (for backward compatibility). Default is comprehensive analysis with readelf + nm",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Enable enhanced mode if show-calls-to is specified
|
||||
enhanced_mode = args.enhanced or args.show_calls_to
|
||||
|
||||
# Use comprehensive symbol analysis by default, basic only if requested
|
||||
comprehensive_symbols = not args.basic
|
||||
|
||||
# Find build info
|
||||
build_info_path, board_name = find_board_build_info(args.board)
|
||||
print(f"Found build info for {board_name}: {build_info_path}")
|
||||
|
||||
with open(build_info_path) as f:
|
||||
build_info = json.load(f)
|
||||
|
||||
# Get board info using proper board mapping
|
||||
board = create_board(board_name)
|
||||
real_board_name = board.get_real_board_name()
|
||||
|
||||
# Try the real board name first, then fall back to directory name
|
||||
if real_board_name in build_info:
|
||||
board_info = build_info[real_board_name]
|
||||
actual_board_key = real_board_name
|
||||
if real_board_name != board_name:
|
||||
print(
|
||||
f"Note: Using board key '{real_board_name}' from board mapping (directory was '{board_name}')"
|
||||
)
|
||||
elif board_name in build_info:
|
||||
board_info = build_info[board_name]
|
||||
actual_board_key = board_name
|
||||
else:
|
||||
# Try to find the actual board key in the JSON as fallback
|
||||
board_keys = list(build_info.keys())
|
||||
if len(board_keys) == 1:
|
||||
actual_board_key = board_keys[0]
|
||||
board_info = build_info[actual_board_key]
|
||||
print(
|
||||
f"Note: Using only available board key '{actual_board_key}' from build_info.json (expected '{real_board_name}' or '{board_name}')"
|
||||
)
|
||||
else:
|
||||
print(
|
||||
f"Error: Could not find board '{real_board_name}' or '{board_name}' in build_info.json"
|
||||
)
|
||||
print(f"Available board keys: {board_keys}")
|
||||
sys.exit(1)
|
||||
|
||||
nm_path = board_info["aliases"]["nm"]
|
||||
cppfilt_path = board_info["aliases"]["c++filt"]
|
||||
elf_file = board_info["prog_path"]
|
||||
|
||||
# Get readelf path (derive from nm path if not in aliases)
|
||||
if "readelf" in board_info["aliases"]:
|
||||
readelf_path = board_info["aliases"]["readelf"]
|
||||
else:
|
||||
# Derive readelf path from nm path (replace 'nm' with 'readelf')
|
||||
readelf_path = nm_path.replace("-nm", "-readelf")
|
||||
|
||||
# Find map file
|
||||
map_file = Path(elf_file).with_suffix(".map")
|
||||
|
||||
print(f"Analyzing ELF file: {elf_file}")
|
||||
print(f"Using nm tool: {nm_path}")
|
||||
print(f"Using c++filt tool: {cppfilt_path}")
|
||||
print(f"Using readelf tool: {readelf_path}")
|
||||
if enhanced_mode:
|
||||
objdump_path = board_info["aliases"]["objdump"]
|
||||
print(f"Using objdump tool: {objdump_path}")
|
||||
print(f"Map file: {map_file}")
|
||||
|
||||
# Analyze symbols
|
||||
if comprehensive_symbols:
|
||||
symbols = analyze_symbols(elf_file, nm_path, cppfilt_path, readelf_path)
|
||||
else:
|
||||
symbols = analyze_symbols(elf_file, nm_path, cppfilt_path)
|
||||
|
||||
# Analyze function calls if enhanced mode
|
||||
call_graph = {}
|
||||
reverse_call_graph = {}
|
||||
if enhanced_mode:
|
||||
objdump_path = board_info["aliases"]["objdump"]
|
||||
call_graph = analyze_function_calls(elf_file, objdump_path, cppfilt_path)
|
||||
reverse_call_graph = build_reverse_call_graph(call_graph)
|
||||
|
||||
# Analyze dependencies
|
||||
dependencies = analyze_map_file(map_file)
|
||||
|
||||
# Handle specific function query
|
||||
if args.show_calls_to:
|
||||
target_function = args.show_calls_to
|
||||
print("\n" + "=" * 80)
|
||||
print(f"FUNCTIONS THAT CALL: {target_function}")
|
||||
print("=" * 80)
|
||||
|
||||
# Find functions that call the target (exact match first)
|
||||
exact_callers = reverse_call_graph.get(target_function, [])
|
||||
|
||||
# Also search for partial matches
|
||||
partial_matches: Dict[str, List[str]] = {}
|
||||
for func_name, callers in reverse_call_graph.items():
|
||||
if (
|
||||
target_function.lower() in func_name.lower()
|
||||
and func_name != target_function
|
||||
):
|
||||
partial_matches[func_name] = callers
|
||||
|
||||
if exact_callers:
|
||||
print(f"\nExact match - Functions calling '{target_function}':")
|
||||
for i, caller in enumerate(exact_callers, 1):
|
||||
print(f" {i}. {caller}")
|
||||
|
||||
if partial_matches:
|
||||
print(f"\nPartial matches - Functions containing '{target_function}':")
|
||||
for func_name, callers in partial_matches.items():
|
||||
print(f"\n Function: {func_name}")
|
||||
print(f" Called by {len(callers)} functions:")
|
||||
for caller in callers[:5]:
|
||||
print(f" - {caller}")
|
||||
if len(callers) > 5:
|
||||
print(f" ... and {len(callers) - 5} more")
|
||||
|
||||
if not exact_callers and not partial_matches:
|
||||
print(f"No functions found that call '{target_function}'")
|
||||
print("Available functions (first 20):")
|
||||
available_funcs = sorted(reverse_call_graph.keys())[:20]
|
||||
for func in available_funcs:
|
||||
print(f" - {func}")
|
||||
|
||||
return # Exit early for specific query
|
||||
|
||||
# Generate report using user-friendly board name
|
||||
report = generate_report(
|
||||
board_name.upper(),
|
||||
symbols,
|
||||
dependencies,
|
||||
call_graph,
|
||||
reverse_call_graph,
|
||||
enhanced_mode,
|
||||
)
|
||||
|
||||
# Save detailed data to JSON (sorted by size, largest first)
|
||||
# Find the build directory (go up from wherever we are to find .build)
|
||||
current = Path.cwd()
|
||||
while current != current.parent:
|
||||
build_dir = current / ".build"
|
||||
if build_dir.exists():
|
||||
filename_suffix = (
|
||||
"_enhanced_symbol_analysis.json"
|
||||
if enhanced_mode
|
||||
else "_symbol_analysis.json"
|
||||
)
|
||||
output_file = build_dir / f"{board_name}{filename_suffix}"
|
||||
break
|
||||
current = current.parent
|
||||
else:
|
||||
# Fallback to current directory if .build not found
|
||||
filename_suffix = (
|
||||
"_enhanced_symbol_analysis.json"
|
||||
if enhanced_mode
|
||||
else "_symbol_analysis.json"
|
||||
)
|
||||
output_file = Path(f"{board_name}{filename_suffix}")
|
||||
|
||||
# Create detailed analysis data structure
|
||||
detailed_data = DetailedAnalysisData(
|
||||
summary=report,
|
||||
all_symbols_sorted_by_size=sorted(symbols, key=lambda x: x.size, reverse=True),
|
||||
dependencies=dependencies,
|
||||
call_graph=call_graph if enhanced_mode else None,
|
||||
reverse_call_graph=reverse_call_graph if enhanced_mode else None,
|
||||
)
|
||||
|
||||
with open(output_file, "w") as f:
|
||||
json.dump(asdict(detailed_data), f, indent=2)
|
||||
|
||||
description = (
|
||||
"enhanced analysis with complete call graph"
|
||||
if enhanced_mode
|
||||
else "basic symbol analysis"
|
||||
)
|
||||
print(f"\nDetailed {description} saved to: {output_file}")
|
||||
|
||||
if not enhanced_mode:
|
||||
print("This file contains ALL symbols without any filtering or classification.")
|
||||
print(
|
||||
"Enhanced mode with function call graph analysis is enabled by default. Use --no-enhanced to disable."
|
||||
)
|
||||
else:
|
||||
print("This file contains ALL symbols and complete call graph analysis.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
263
libraries/FastLED/ci/util/test_args.py
Normal file
263
libraries/FastLED/ci/util/test_args.py
Normal file
@@ -0,0 +1,263 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from typeguard import typechecked
|
||||
|
||||
from ci.util.test_types import TestArgs
|
||||
|
||||
|
||||
def _python_test_exists(test_name: str) -> bool:
|
||||
"""Check if a Python test file exists for the given test name"""
|
||||
# Check for the test file in ci/tests directory
|
||||
tests_dir = Path("ci/tests")
|
||||
|
||||
# Try various naming patterns for Python tests
|
||||
possible_names = [
|
||||
f"test_{test_name}.py",
|
||||
f"{test_name}.py",
|
||||
]
|
||||
|
||||
for name in possible_names:
|
||||
test_file = tests_dir / name
|
||||
if test_file.exists():
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def parse_args(args: Optional[list[str]] = None) -> TestArgs:
|
||||
"""Parse command line arguments"""
|
||||
parser = argparse.ArgumentParser(description="Run FastLED tests")
|
||||
parser.add_argument(
|
||||
"--cpp",
|
||||
action="store_true",
|
||||
help="Run C++ tests only (equivalent to --unit --examples, suppresses Python tests)",
|
||||
)
|
||||
parser.add_argument("--unit", action="store_true", help="Run C++ unit tests only")
|
||||
parser.add_argument("--py", action="store_true", help="Run Python tests only")
|
||||
parser.add_argument(
|
||||
"test",
|
||||
type=str,
|
||||
nargs="?",
|
||||
default=None,
|
||||
help="Specific test to run (Python or C++)",
|
||||
)
|
||||
|
||||
# Create mutually exclusive group for compiler selection
|
||||
compiler_group = parser.add_mutually_exclusive_group()
|
||||
compiler_group.add_argument(
|
||||
"--clang", action="store_true", help="Use Clang compiler"
|
||||
)
|
||||
compiler_group.add_argument(
|
||||
"--gcc", action="store_true", help="Use GCC compiler (default on non-Windows)"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--clean", action="store_true", help="Clean build before compiling"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-interactive",
|
||||
action="store_true",
|
||||
help="Force non-interactive mode (no confirmation prompts)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--interactive",
|
||||
action="store_true",
|
||||
help="Enable interactive mode (allows confirmation prompts)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--verbose",
|
||||
"-v",
|
||||
action="store_true",
|
||||
help="Enable verbose output showing all test details",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--show-compile",
|
||||
action="store_true",
|
||||
help="Show compilation commands and output",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--show-link",
|
||||
action="store_true",
|
||||
help="Show linking commands and output",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--quick", action="store_true", help="Enable quick mode with FASTLED_ALL_SRC=1"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-stack-trace",
|
||||
action="store_true",
|
||||
help="Disable stack trace dumping on timeout",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--check",
|
||||
action="store_true",
|
||||
help="Enable static analysis (IWYU, clang-tidy) - auto-enables --cpp and --clang",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--examples",
|
||||
nargs="*",
|
||||
help="Run example compilation tests only (optionally specify example names). Use with --full for complete compilation + linking + execution",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-pch",
|
||||
action="store_true",
|
||||
help="Disable precompiled headers (PCH) when running example compilation tests",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--unity",
|
||||
action="store_true",
|
||||
help="Enable UNITY build mode for examples - compile all source files as a single unit for improved performance",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-unity",
|
||||
action="store_true",
|
||||
help="Disable unity builds for cpp tests and examples",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--full",
|
||||
action="store_true",
|
||||
help="Run full integration tests including compilation + linking + program execution",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--no-parallel",
|
||||
action="store_true",
|
||||
help="Force sequential test execution",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--unity-chunks",
|
||||
type=int,
|
||||
default=1,
|
||||
help="Number of unity chunks when building libfastled.a (default: 1)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--debug",
|
||||
action="store_true",
|
||||
help="Enable debug mode for C++ unit tests (e.g., full debug symbols)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--qemu",
|
||||
nargs="+",
|
||||
help="Run examples in QEMU emulation. Usage: --qemu esp32s3 [example_names...]",
|
||||
)
|
||||
|
||||
parsed_args = parser.parse_args(args)
|
||||
|
||||
# Convert argparse.Namespace to TestArgs dataclass
|
||||
test_args = TestArgs(
|
||||
cpp=parsed_args.cpp,
|
||||
unit=parsed_args.unit,
|
||||
py=parsed_args.py,
|
||||
test=parsed_args.test,
|
||||
clang=parsed_args.clang,
|
||||
gcc=parsed_args.gcc,
|
||||
clean=parsed_args.clean,
|
||||
no_interactive=parsed_args.no_interactive,
|
||||
interactive=parsed_args.interactive,
|
||||
verbose=parsed_args.verbose,
|
||||
show_compile=parsed_args.show_compile,
|
||||
show_link=parsed_args.show_link,
|
||||
quick=parsed_args.quick,
|
||||
no_stack_trace=parsed_args.no_stack_trace,
|
||||
check=parsed_args.check,
|
||||
examples=parsed_args.examples,
|
||||
no_pch=parsed_args.no_pch,
|
||||
unity=parsed_args.unity,
|
||||
no_unity=parsed_args.no_unity,
|
||||
full=parsed_args.full,
|
||||
no_parallel=parsed_args.no_parallel,
|
||||
unity_chunks=parsed_args.unity_chunks,
|
||||
debug=parsed_args.debug,
|
||||
qemu=parsed_args.qemu,
|
||||
)
|
||||
|
||||
# Auto-enable --py or --cpp mode when a specific test is provided
|
||||
if test_args.test:
|
||||
# Check if this is a Python test first
|
||||
if _python_test_exists(test_args.test):
|
||||
# This is a Python test - enable Python mode
|
||||
if not test_args.py and not test_args.cpp:
|
||||
test_args.py = True
|
||||
print(
|
||||
f"Auto-enabled --py mode for specific Python test: {test_args.test}"
|
||||
)
|
||||
else:
|
||||
# This is not a Python test - assume it's a C++ test
|
||||
if not test_args.cpp and not test_args.py:
|
||||
test_args.cpp = True
|
||||
print(f"Auto-enabled --cpp mode for specific test: {test_args.test}")
|
||||
# Also enable --unit when a specific C++ test is provided without any other flags
|
||||
if (
|
||||
not test_args.unit
|
||||
and not test_args.examples
|
||||
and not test_args.py
|
||||
and not test_args.full
|
||||
):
|
||||
test_args.unit = True
|
||||
print(f"Auto-enabled --unit mode for specific test: {test_args.test}")
|
||||
|
||||
# Auto-enable --verbose when running unit tests (disabled)
|
||||
# if test_args.unit and not test_args.verbose:
|
||||
# test_args.verbose = True
|
||||
# print("Auto-enabled --verbose mode for unit tests")
|
||||
|
||||
# Auto-enable --cpp and --clang when --check is provided
|
||||
if test_args.check:
|
||||
if not test_args.cpp:
|
||||
test_args.cpp = True
|
||||
print("Auto-enabled --cpp mode for static analysis (--check)")
|
||||
if not test_args.clang and not test_args.gcc:
|
||||
test_args.clang = True
|
||||
print("Auto-enabled --clang compiler for static analysis (--check)")
|
||||
|
||||
# Auto-enable --cpp and --quick when --examples is provided
|
||||
if test_args.examples is not None:
|
||||
if not test_args.cpp:
|
||||
test_args.cpp = True
|
||||
print("Auto-enabled --cpp mode for example compilation (--examples)")
|
||||
if not test_args.quick:
|
||||
test_args.quick = True
|
||||
print(
|
||||
"Auto-enabled --quick mode for faster example compilation (--examples)"
|
||||
)
|
||||
|
||||
# Handle --full flag behavior
|
||||
if test_args.full:
|
||||
if test_args.examples is not None:
|
||||
# --examples --full: Run examples with full compilation+linking+execution
|
||||
print("Full examples mode: compilation + linking + program execution")
|
||||
else:
|
||||
# --full alone: Run integration tests
|
||||
if not test_args.cpp:
|
||||
test_args.cpp = True
|
||||
print("Auto-enabled --cpp mode for full integration tests (--full)")
|
||||
print("Full integration tests: compilation + linking + program execution")
|
||||
|
||||
# Default to Clang on Windows unless --gcc is explicitly passed
|
||||
if sys.platform == "win32" and not test_args.gcc and not test_args.clang:
|
||||
test_args.clang = True
|
||||
print("Windows detected: defaulting to Clang compiler (use --gcc to override)")
|
||||
elif test_args.gcc:
|
||||
print("Using GCC compiler")
|
||||
elif test_args.clang:
|
||||
print("Using Clang compiler")
|
||||
|
||||
# Validate conflicting arguments
|
||||
if test_args.no_interactive and test_args.interactive:
|
||||
print(
|
||||
"Error: --interactive and --no-interactive cannot be used together",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Set NO_PARALLEL environment variable if --no-parallel is used
|
||||
if test_args.no_parallel:
|
||||
os.environ["NO_PARALLEL"] = "1"
|
||||
print("Forcing sequential execution (NO_PARALLEL=1)")
|
||||
|
||||
return test_args
|
||||
73
libraries/FastLED/ci/util/test_commands.py
Normal file
73
libraries/FastLED/ci/util/test_commands.py
Normal file
@@ -0,0 +1,73 @@
|
||||
#!/usr/bin/env python3
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, List
|
||||
|
||||
from ci.util.running_process import RunningProcess
|
||||
from ci.util.test_types import TestArgs
|
||||
|
||||
|
||||
def build_cpp_test_command(args: TestArgs) -> str:
|
||||
"""Build the C++ test command based on arguments"""
|
||||
cmd_list = ["uv", "run", "python", "-m", "ci.compiler.cpp_test_run"]
|
||||
|
||||
if args.clang:
|
||||
cmd_list.append("--clang")
|
||||
|
||||
if args.test:
|
||||
cmd_list.append("--test")
|
||||
cmd_list.append(args.test)
|
||||
if args.clean:
|
||||
cmd_list.append("--clean")
|
||||
if args.verbose:
|
||||
cmd_list.append("--verbose") # Always pass verbose flag when enabled
|
||||
if args.show_compile:
|
||||
cmd_list.append("--show-compile") # Pass show-compile flag
|
||||
if args.show_link:
|
||||
cmd_list.append("--show-link") # Pass show-link flag
|
||||
if args.check:
|
||||
cmd_list.append("--check")
|
||||
|
||||
if args.no_unity:
|
||||
cmd_list.append("--no-unity")
|
||||
|
||||
return subprocess.list2cmdline(cmd_list)
|
||||
|
||||
|
||||
def make_pio_check_cmd() -> List[str]:
|
||||
"""Create the PlatformIO check command"""
|
||||
return [
|
||||
"pio",
|
||||
"check",
|
||||
"--skip-packages",
|
||||
"--src-filters=+<src/>",
|
||||
"--severity=medium",
|
||||
"--fail-on-defect=high",
|
||||
"--flags",
|
||||
"--inline-suppr --enable=all --std=c++17",
|
||||
]
|
||||
|
||||
|
||||
def make_compile_uno_test_process(enable_stack_trace: bool = True) -> RunningProcess:
|
||||
"""Create a process to compile the uno tests"""
|
||||
cmd = [
|
||||
"uv",
|
||||
"run",
|
||||
"python",
|
||||
"-m",
|
||||
"ci.ci-compile",
|
||||
"uno",
|
||||
"--examples",
|
||||
"Blink",
|
||||
"--no-interactive",
|
||||
]
|
||||
return RunningProcess(cmd, auto_run=True, enable_stack_trace=enable_stack_trace)
|
||||
|
||||
|
||||
def run_command(cmd: List[str], **kwargs: Any) -> None:
|
||||
"""Run a command and handle errors"""
|
||||
try:
|
||||
subprocess.run(cmd, check=True, **kwargs)
|
||||
except subprocess.CalledProcessError as e:
|
||||
sys.exit(e.returncode)
|
||||
165
libraries/FastLED/ci/util/test_env.py
Normal file
165
libraries/FastLED/ci/util/test_env.py
Normal file
@@ -0,0 +1,165 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
from typing import Protocol, cast
|
||||
|
||||
import psutil
|
||||
|
||||
from ci.util.test_types import TestArgs
|
||||
|
||||
|
||||
class ReconfigurableIO(Protocol):
|
||||
def reconfigure(self, *, encoding: str, errors: str) -> None: ...
|
||||
|
||||
|
||||
def setup_environment(args: TestArgs) -> None:
|
||||
"""Set up the test environment based on arguments"""
|
||||
# Handle --quick flag
|
||||
if args.quick:
|
||||
os.environ["FASTLED_ALL_SRC"] = "1"
|
||||
print("Quick mode enabled. FASTLED_ALL_SRC=1")
|
||||
|
||||
# Handle build flags
|
||||
if args.show_compile:
|
||||
os.environ["FASTLED_TEST_SHOW_COMPILE"] = "1"
|
||||
if args.show_link:
|
||||
os.environ["FASTLED_TEST_SHOW_LINK"] = "1"
|
||||
|
||||
|
||||
def setup_windows_console() -> None:
|
||||
"""Configure Windows console for UTF-8 output"""
|
||||
if os.name == "nt": # Windows
|
||||
if hasattr(sys.stdout, "reconfigure"):
|
||||
cast(ReconfigurableIO, sys.stdout).reconfigure(
|
||||
encoding="utf-8", errors="replace"
|
||||
)
|
||||
if hasattr(sys.stderr, "reconfigure"):
|
||||
cast(ReconfigurableIO, sys.stderr).reconfigure(
|
||||
encoding="utf-8", errors="replace"
|
||||
)
|
||||
|
||||
|
||||
def get_process_tree_info(pid: int) -> str:
|
||||
"""Get information about a process and its children"""
|
||||
try:
|
||||
process = psutil.Process(pid)
|
||||
info = [f"Process {pid} ({process.name()})"]
|
||||
info.append(f"Status: {process.status()}")
|
||||
info.append(f"CPU Times: {process.cpu_times()}")
|
||||
info.append(f"Memory: {process.memory_info()}")
|
||||
|
||||
# Get child processes
|
||||
children = process.children(recursive=True)
|
||||
if children:
|
||||
info.append("\nChild processes:")
|
||||
for child in children:
|
||||
info.append(f" Child {child.pid} ({child.name()})")
|
||||
info.append(f" Status: {child.status()}")
|
||||
info.append(f" CPU Times: {child.cpu_times()}")
|
||||
info.append(f" Memory: {child.memory_info()}")
|
||||
|
||||
return "\n".join(info)
|
||||
except:
|
||||
return f"Could not get process info for PID {pid}"
|
||||
|
||||
|
||||
def kill_process_tree(pid: int) -> None:
|
||||
"""Kill a process and all its children"""
|
||||
try:
|
||||
parent = psutil.Process(pid)
|
||||
children = parent.children(recursive=True)
|
||||
|
||||
# First try graceful termination
|
||||
for child in children:
|
||||
try:
|
||||
child.terminate()
|
||||
except psutil.NoSuchProcess:
|
||||
pass
|
||||
|
||||
# Give them a moment to terminate
|
||||
_, alive = psutil.wait_procs(children, timeout=3)
|
||||
|
||||
# Force kill any that are still alive
|
||||
for child in alive:
|
||||
try:
|
||||
child.kill()
|
||||
except psutil.NoSuchProcess:
|
||||
pass
|
||||
|
||||
# Finally terminate the parent
|
||||
try:
|
||||
parent.terminate()
|
||||
parent.wait(3) # Give it 3 seconds to terminate
|
||||
except (psutil.NoSuchProcess, psutil.TimeoutExpired):
|
||||
try:
|
||||
parent.kill() # Force kill if still alive
|
||||
except psutil.NoSuchProcess:
|
||||
pass
|
||||
except psutil.NoSuchProcess:
|
||||
pass # Process already gone
|
||||
|
||||
|
||||
def dump_thread_stacks() -> None:
|
||||
"""Dump stack trace of the main thread and process tree info"""
|
||||
print("\n=== MAIN THREAD STACK TRACE ===")
|
||||
for thread in threading.enumerate():
|
||||
print(f"\nThread {thread.name}:")
|
||||
if thread.ident is not None:
|
||||
frame = sys._current_frames().get(thread.ident)
|
||||
if frame:
|
||||
traceback.print_stack(frame)
|
||||
print("=== END STACK TRACE ===\n")
|
||||
|
||||
# Dump process tree information
|
||||
print("\n=== PROCESS TREE INFO ===")
|
||||
print(get_process_tree_info(os.getpid()))
|
||||
print("=== END PROCESS TREE INFO ===\n")
|
||||
|
||||
|
||||
def setup_watchdog(timeout: int = 60) -> threading.Thread:
|
||||
"""Start a watchdog timer to kill the process if it takes too long
|
||||
|
||||
Args:
|
||||
timeout: Number of seconds to wait before killing process (default: 60 seconds)
|
||||
"""
|
||||
|
||||
def watchdog_timer() -> None:
|
||||
time.sleep(timeout)
|
||||
print(
|
||||
f"\n🚨 WATCHDOG TIMER EXPIRED - Process took too long! ({timeout} seconds)"
|
||||
)
|
||||
dump_thread_stacks()
|
||||
|
||||
# Kill all child processes and then ourselves
|
||||
kill_process_tree(os.getpid())
|
||||
|
||||
# If we're still here, force exit
|
||||
time.sleep(1) # Give a moment for output to flush
|
||||
os._exit(2) # Exit with error code 2 to indicate timeout
|
||||
|
||||
watchdog = threading.Thread(
|
||||
target=watchdog_timer, daemon=True, name="WatchdogTimer"
|
||||
)
|
||||
watchdog.start()
|
||||
return watchdog
|
||||
|
||||
|
||||
def setup_force_exit() -> threading.Thread:
|
||||
"""Set up a force exit daemon thread"""
|
||||
|
||||
def force_exit() -> None:
|
||||
time.sleep(1)
|
||||
print("Force exit daemon thread invoked")
|
||||
kill_process_tree(os.getpid())
|
||||
os._exit(1)
|
||||
|
||||
daemon_thread = threading.Thread(
|
||||
target=force_exit, daemon=True, name="ForceExitDaemon"
|
||||
)
|
||||
daemon_thread.start()
|
||||
return daemon_thread
|
||||
109
libraries/FastLED/ci/util/test_exceptions.py
Normal file
109
libraries/FastLED/ci/util/test_exceptions.py
Normal file
@@ -0,0 +1,109 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Custom exceptions for test failures that need to bubble up to callers."""
|
||||
|
||||
import subprocess
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Optional
|
||||
|
||||
|
||||
@dataclass
|
||||
class TestFailureInfo:
|
||||
"""Information about a single test failure"""
|
||||
|
||||
test_name: str
|
||||
command: str
|
||||
return_code: int
|
||||
output: str
|
||||
error_type: str = "test_failure"
|
||||
|
||||
|
||||
class FastLEDTestException(Exception):
|
||||
"""Base exception for FastLED test failures"""
|
||||
|
||||
def __init__(self, message: str, failures: Optional[List[TestFailureInfo]] = None):
|
||||
super().__init__(message)
|
||||
self.failures = failures or []
|
||||
self.message = message
|
||||
|
||||
def add_failure(self, failure: TestFailureInfo) -> None:
|
||||
"""Add a test failure to this exception"""
|
||||
self.failures.append(failure)
|
||||
|
||||
def has_failures(self) -> bool:
|
||||
"""Check if this exception contains any failures"""
|
||||
return len(self.failures) > 0
|
||||
|
||||
def get_failure_summary(self) -> str:
|
||||
"""Get a summary of all failures"""
|
||||
if not self.failures:
|
||||
return self.message
|
||||
|
||||
summary = [self.message]
|
||||
summary.append(f"\nFailed tests ({len(self.failures)}):")
|
||||
for failure in self.failures:
|
||||
summary.append(
|
||||
f" - {failure.test_name}: {failure.error_type} (exit code {failure.return_code})"
|
||||
)
|
||||
|
||||
return "\n".join(summary)
|
||||
|
||||
def get_detailed_failure_info(self) -> str:
|
||||
"""Get detailed information about all failures"""
|
||||
if not self.failures:
|
||||
return self.message
|
||||
|
||||
details = [self.message]
|
||||
details.append(f"\n{'=' * 50}")
|
||||
details.append("DETAILED FAILURE INFORMATION")
|
||||
details.append(f"{'=' * 50}")
|
||||
|
||||
for i, failure in enumerate(self.failures, 1):
|
||||
cmd_str: str = (
|
||||
subprocess.list2cmdline(failure.command)
|
||||
if isinstance(failure.command, list)
|
||||
else failure.command
|
||||
)
|
||||
assert isinstance(cmd_str, str)
|
||||
details.append(f"\n{i}. {failure.test_name}")
|
||||
details.append(f" Command: {cmd_str}")
|
||||
details.append(f" Error Type: {failure.error_type}")
|
||||
details.append(f" Exit Code: {failure.return_code}")
|
||||
details.append(f" Output:")
|
||||
# Indent the output
|
||||
for line in failure.output.split("\n"):
|
||||
details.append(f" {line}")
|
||||
|
||||
return "\n".join(details)
|
||||
|
||||
|
||||
class CompilationFailedException(FastLEDTestException):
|
||||
"""Exception for compilation failures"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str = "Compilation failed",
|
||||
failures: Optional[List[TestFailureInfo]] = None,
|
||||
):
|
||||
super().__init__(message, failures)
|
||||
|
||||
|
||||
class TestExecutionFailedException(FastLEDTestException):
|
||||
"""Exception for test execution failures"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str = "Test execution failed",
|
||||
failures: Optional[List[TestFailureInfo]] = None,
|
||||
):
|
||||
super().__init__(message, failures)
|
||||
|
||||
|
||||
class TestTimeoutException(FastLEDTestException):
|
||||
"""Exception for test timeouts"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str = "Test execution timed out",
|
||||
failures: Optional[List[TestFailureInfo]] = None,
|
||||
):
|
||||
super().__init__(message, failures)
|
||||
1457
libraries/FastLED/ci/util/test_runner.py
Normal file
1457
libraries/FastLED/ci/util/test_runner.py
Normal file
File diff suppressed because it is too large
Load Diff
321
libraries/FastLED/ci/util/test_types.py
Normal file
321
libraries/FastLED/ci/util/test_types.py
Normal file
@@ -0,0 +1,321 @@
|
||||
#!/usr/bin/env python3
|
||||
import hashlib
|
||||
import json
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum, auto
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from typeguard import typechecked
|
||||
|
||||
|
||||
class TestResultType(Enum):
|
||||
"""Type of test result message"""
|
||||
|
||||
SUCCESS = auto()
|
||||
ERROR = auto()
|
||||
WARNING = auto()
|
||||
INFO = auto()
|
||||
DEBUG = auto()
|
||||
|
||||
|
||||
@typechecked
|
||||
@dataclass
|
||||
class TestResult:
|
||||
"""Structured test result"""
|
||||
|
||||
type: TestResultType
|
||||
message: str
|
||||
test_name: Optional[str] = None
|
||||
details: Optional[Dict[str, Any]] = None
|
||||
timestamp: float = 0.0
|
||||
|
||||
def __post_init__(self):
|
||||
if not self.timestamp:
|
||||
self.timestamp = time.time()
|
||||
|
||||
|
||||
@typechecked
|
||||
@dataclass
|
||||
class TestSuiteResult:
|
||||
"""Results for a test suite"""
|
||||
|
||||
name: str
|
||||
results: List[TestResult]
|
||||
start_time: float
|
||||
end_time: Optional[float] = None
|
||||
passed: bool = True
|
||||
|
||||
@property
|
||||
def duration(self) -> float:
|
||||
"""Get test duration in seconds"""
|
||||
if self.end_time is None:
|
||||
return 0.0
|
||||
return self.end_time - self.start_time
|
||||
|
||||
|
||||
@typechecked
|
||||
@dataclass
|
||||
class TestArgs:
|
||||
"""Type-safe test arguments"""
|
||||
|
||||
cpp: bool = False
|
||||
unit: bool = False
|
||||
py: bool = False
|
||||
test: Optional[str] = None
|
||||
clang: bool = False
|
||||
gcc: bool = False
|
||||
clean: bool = False
|
||||
no_interactive: bool = False
|
||||
interactive: bool = False
|
||||
verbose: bool = False
|
||||
show_compile: bool = False
|
||||
show_link: bool = False
|
||||
quick: bool = False
|
||||
no_stack_trace: bool = False
|
||||
check: bool = False
|
||||
examples: Optional[list[str]] = None
|
||||
no_pch: bool = False
|
||||
unity: bool = False
|
||||
no_unity: bool = False # Disable unity builds for cpp tests and examples
|
||||
full: bool = False
|
||||
|
||||
no_parallel: bool = False # Force sequential test execution
|
||||
unity_chunks: int = 1 # Number of unity chunks for libfastled build
|
||||
debug: bool = False # Enable debug mode for unit tests
|
||||
qemu: Optional[list[str]] = None # Run examples in QEMU emulation
|
||||
|
||||
|
||||
@typechecked
|
||||
@dataclass
|
||||
class TestCategories:
|
||||
"""Type-safe test category flags"""
|
||||
|
||||
unit: bool
|
||||
examples: bool
|
||||
py: bool
|
||||
integration: bool
|
||||
unit_only: bool
|
||||
examples_only: bool
|
||||
py_only: bool
|
||||
integration_only: bool
|
||||
qemu_esp32s3: bool
|
||||
qemu_esp32s3_only: bool
|
||||
|
||||
def __post_init__(self):
|
||||
# Type validation
|
||||
for field_name in [
|
||||
"unit",
|
||||
"examples",
|
||||
"py",
|
||||
"integration",
|
||||
"unit_only",
|
||||
"examples_only",
|
||||
"py_only",
|
||||
"integration_only",
|
||||
"qemu_esp32s3",
|
||||
"qemu_esp32s3_only",
|
||||
]:
|
||||
value = getattr(self, field_name)
|
||||
if not isinstance(value, bool):
|
||||
raise TypeError(f"{field_name} must be bool, got {type(value)}")
|
||||
|
||||
|
||||
@typechecked
|
||||
@dataclass
|
||||
class FingerprintResult:
|
||||
"""Type-safe fingerprint result"""
|
||||
|
||||
hash: str
|
||||
elapsed_seconds: Optional[str] = None
|
||||
status: Optional[str] = None
|
||||
|
||||
|
||||
def process_test_flags(args: TestArgs) -> TestArgs:
|
||||
"""Process and validate test execution flags"""
|
||||
|
||||
# Check which specific test flags are provided
|
||||
specific_flags = [
|
||||
args.unit,
|
||||
args.examples is not None,
|
||||
args.py,
|
||||
args.full,
|
||||
args.qemu is not None,
|
||||
]
|
||||
specific_count = sum(bool(flag) for flag in specific_flags)
|
||||
|
||||
# If --cpp is provided, default to --unit --examples (no Python)
|
||||
if args.cpp and specific_count == 0:
|
||||
args.unit = True
|
||||
args.examples = [] # Empty list means run all examples
|
||||
print("--cpp mode: Running unit tests and examples (Python tests suppressed)")
|
||||
return args
|
||||
|
||||
# If any specific flags are provided, ONLY run those (exclusive behavior)
|
||||
if specific_count > 0:
|
||||
# When specific flags are provided, disable everything else unless explicitly set
|
||||
if not args.unit:
|
||||
# Unit was not explicitly set, so disable it
|
||||
pass # args.unit is already False
|
||||
|
||||
# Auto-enable verbose mode for unit tests (disabled)
|
||||
# if args.unit and not args.verbose:
|
||||
# args.verbose = True
|
||||
# print("Auto-enabled --verbose mode for unit tests")
|
||||
if args.examples is None:
|
||||
# Examples was not explicitly set, so disable it
|
||||
pass # args.examples is already None
|
||||
if not args.py:
|
||||
# Python was not explicitly set, so disable it
|
||||
pass # args.py is already False
|
||||
if not args.full:
|
||||
# Full was not explicitly set, so disable it
|
||||
pass # args.full is already False
|
||||
|
||||
# Log what's running
|
||||
enabled_tests: list[str] = []
|
||||
if args.unit:
|
||||
enabled_tests.append("unit tests")
|
||||
if args.examples is not None:
|
||||
enabled_tests.append("examples")
|
||||
if args.py:
|
||||
enabled_tests.append("Python tests")
|
||||
if args.full:
|
||||
if args.examples is not None:
|
||||
enabled_tests.append("full example integration tests")
|
||||
else:
|
||||
enabled_tests.append("full integration tests")
|
||||
|
||||
print(f"Specific test flags provided: Running only {', '.join(enabled_tests)}")
|
||||
return args
|
||||
|
||||
# If no specific flags, run everything (backward compatibility)
|
||||
if specific_count == 0:
|
||||
args.unit = True
|
||||
args.examples = [] # Empty list means run all examples
|
||||
args.py = True
|
||||
print("No test flags specified: Running all tests (unit, examples, Python)")
|
||||
|
||||
# Auto-enable verbose mode for unit tests (disabled)
|
||||
# if args.unit and not args.verbose:
|
||||
# args.verbose = True
|
||||
# print("Auto-enabled --verbose mode for unit tests")
|
||||
|
||||
return args
|
||||
|
||||
return args
|
||||
|
||||
|
||||
def determine_test_categories(args: TestArgs) -> TestCategories:
|
||||
"""Determine which test categories should run based on flags"""
|
||||
unit_enabled = args.unit
|
||||
examples_enabled = args.examples is not None
|
||||
py_enabled = args.py
|
||||
# Integration tests only run when --full is used alone (not with --examples)
|
||||
integration_enabled = args.full and args.examples is None
|
||||
qemu_esp32s3_enabled = args.qemu is not None
|
||||
|
||||
return TestCategories(
|
||||
unit=unit_enabled,
|
||||
examples=examples_enabled,
|
||||
py=py_enabled,
|
||||
integration=integration_enabled,
|
||||
qemu_esp32s3=qemu_esp32s3_enabled,
|
||||
unit_only=unit_enabled
|
||||
and not examples_enabled
|
||||
and not py_enabled
|
||||
and not integration_enabled
|
||||
and not qemu_esp32s3_enabled,
|
||||
examples_only=examples_enabled
|
||||
and not unit_enabled
|
||||
and not py_enabled
|
||||
and not integration_enabled
|
||||
and not qemu_esp32s3_enabled,
|
||||
py_only=py_enabled
|
||||
and not unit_enabled
|
||||
and not examples_enabled
|
||||
and not integration_enabled
|
||||
and not qemu_esp32s3_enabled,
|
||||
integration_only=integration_enabled
|
||||
and not unit_enabled
|
||||
and not examples_enabled
|
||||
and not py_enabled
|
||||
and not qemu_esp32s3_enabled,
|
||||
qemu_esp32s3_only=qemu_esp32s3_enabled
|
||||
and not unit_enabled
|
||||
and not examples_enabled
|
||||
and not py_enabled
|
||||
and not integration_enabled,
|
||||
)
|
||||
|
||||
|
||||
def fingerprint_code_base(
|
||||
start_directory: Path, glob: str = "**/*.h,**/*.cpp,**/*.hpp"
|
||||
) -> FingerprintResult:
|
||||
"""
|
||||
Create a fingerprint of the code base by hashing file contents.
|
||||
|
||||
Args:
|
||||
start_directory: The root directory to start scanning from
|
||||
glob: Comma-separated list of glob patterns to match files
|
||||
|
||||
Returns:
|
||||
A FingerprintResult with hash and optional status
|
||||
"""
|
||||
try:
|
||||
hasher = hashlib.sha256()
|
||||
patterns = glob.split(",")
|
||||
|
||||
# Get all matching files
|
||||
all_files: list[Path] = []
|
||||
for pattern in patterns:
|
||||
pattern = pattern.strip()
|
||||
all_files.extend(sorted(start_directory.glob(pattern)))
|
||||
|
||||
# Sort files for consistent ordering
|
||||
all_files.sort()
|
||||
|
||||
# Process each file
|
||||
for file_path in all_files:
|
||||
if file_path.is_file():
|
||||
# Add the relative path to the hash
|
||||
rel_path = file_path.relative_to(start_directory)
|
||||
hasher.update(str(rel_path).encode("utf-8"))
|
||||
|
||||
# Add the file content to the hash
|
||||
try:
|
||||
with open(file_path, "rb") as f:
|
||||
# Read in chunks to handle large files
|
||||
for chunk in iter(lambda: f.read(4096), b""):
|
||||
hasher.update(chunk)
|
||||
except Exception as e:
|
||||
# If we can't read the file, include the error in the hash
|
||||
hasher.update(f"ERROR:{str(e)}".encode("utf-8"))
|
||||
|
||||
return FingerprintResult(hash=hasher.hexdigest())
|
||||
except Exception as e:
|
||||
return FingerprintResult(hash="", status=f"error: {str(e)}")
|
||||
|
||||
|
||||
def calculate_fingerprint(root_dir: Path | None = None) -> FingerprintResult:
|
||||
"""
|
||||
Calculate the code base fingerprint.
|
||||
|
||||
Args:
|
||||
root_dir: The root directory to start scanning from. If None, uses src directory.
|
||||
|
||||
Returns:
|
||||
The fingerprint result
|
||||
"""
|
||||
if root_dir is None:
|
||||
root_dir = Path.cwd() / "src"
|
||||
|
||||
start_time = time.time()
|
||||
# Compute the fingerprint
|
||||
result = fingerprint_code_base(root_dir)
|
||||
elapsed_time = time.time() - start_time
|
||||
# Add timing information to the result
|
||||
result.elapsed_seconds = f"{elapsed_time:.2f}"
|
||||
|
||||
return result
|
||||
211
libraries/FastLED/ci/util/tools.py
Normal file
211
libraries/FastLED/ci/util/tools.py
Normal file
@@ -0,0 +1,211 @@
|
||||
# pyright: reportUnknownMemberType=false
|
||||
"""
|
||||
Tools for working with build info and tool paths.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict
|
||||
|
||||
from ci.util.paths import BUILD
|
||||
|
||||
|
||||
@dataclass
|
||||
class Tools:
|
||||
as_path: Path
|
||||
ld_path: Path
|
||||
objcopy_path: Path
|
||||
objdump_path: Path
|
||||
cpp_filt_path: Path
|
||||
nm_path: Path
|
||||
|
||||
|
||||
def load_tools(build_info_path: Path) -> Tools:
|
||||
build_info: Dict[str, Any] = json.loads(build_info_path.read_text())
|
||||
board_info: Dict[str, Any] = build_info[next(iter(build_info))]
|
||||
aliases: Dict[str, str] = board_info["aliases"]
|
||||
as_path = Path(aliases["as"])
|
||||
ld_path = Path(aliases["ld"])
|
||||
objcopy_path = Path(aliases["objcopy"])
|
||||
objdump_path = Path(aliases["objdump"])
|
||||
cpp_filt_path = Path(aliases["c++filt"])
|
||||
nm_path = Path(aliases["nm"])
|
||||
if sys.platform == "win32":
|
||||
as_path = as_path.with_suffix(".exe")
|
||||
ld_path = ld_path.with_suffix(".exe")
|
||||
objcopy_path = objcopy_path.with_suffix(".exe")
|
||||
objdump_path = objdump_path.with_suffix(".exe")
|
||||
cpp_filt_path = cpp_filt_path.with_suffix(".exe")
|
||||
nm_path = nm_path.with_suffix(".exe")
|
||||
out = Tools(as_path, ld_path, objcopy_path, objdump_path, cpp_filt_path, nm_path)
|
||||
tools = [as_path, ld_path, objcopy_path, objdump_path, cpp_filt_path, nm_path]
|
||||
for tool in tools:
|
||||
if not tool.exists():
|
||||
raise FileNotFoundError(f"Tool not found: {tool}")
|
||||
return out
|
||||
|
||||
|
||||
def _list_builds() -> list[Path]:
|
||||
str_paths = os.listdir(BUILD)
|
||||
paths = [BUILD / p for p in str_paths]
|
||||
dirs = [p for p in paths if p.is_dir()]
|
||||
return dirs
|
||||
|
||||
|
||||
def _check_build(build: Path) -> bool:
|
||||
# 1. should contain a build_info.json file
|
||||
# 2. should contain a .pio/build directory
|
||||
has_build_info = (build / "build_info.json").exists()
|
||||
has_pio_build = (build / ".pio" / "build").exists()
|
||||
return has_build_info and has_pio_build
|
||||
|
||||
|
||||
def _prompt_build() -> Path:
|
||||
builds = _list_builds()
|
||||
if not builds:
|
||||
print("Error: No builds found", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
print("Select a build:")
|
||||
for i, build in enumerate(builds):
|
||||
print(f" [{i}]: {build}")
|
||||
while True:
|
||||
try:
|
||||
which = int(input("Enter the number of the build to use: "))
|
||||
if 0 <= which < len(builds):
|
||||
valid = _check_build(BUILD / builds[which])
|
||||
if valid:
|
||||
return BUILD / builds[which]
|
||||
print("Error: Invalid build", file=sys.stderr)
|
||||
else:
|
||||
print("Error: Invalid selection", file=sys.stderr)
|
||||
continue
|
||||
except ValueError:
|
||||
print("Error: Invalid input", file=sys.stderr)
|
||||
continue
|
||||
|
||||
|
||||
def _prompt_object_file(build: Path) -> Path:
|
||||
# Look for object files in .pio/build directory
|
||||
build_dir = build / ".pio" / "build"
|
||||
object_files: list[Path] = []
|
||||
|
||||
# Walk through build directory to find .o files
|
||||
for root, _, files in os.walk(build_dir):
|
||||
for file in files:
|
||||
if file.endswith(".o") and "FrameworkArduino" not in file:
|
||||
full_path = Path(root) / file
|
||||
if "FrameworkArduino" not in full_path.parts:
|
||||
object_files.append(full_path)
|
||||
|
||||
if not object_files:
|
||||
print("Error: No object files found", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
print("\nSelect an object file:")
|
||||
for i, obj_file in enumerate(object_files):
|
||||
print(f" [{i}]: {obj_file.relative_to(build_dir)}")
|
||||
|
||||
while True:
|
||||
try:
|
||||
which = int(input("Enter the number of the object file to use: "))
|
||||
if 0 <= which < len(object_files):
|
||||
return object_files[which]
|
||||
print("Error: Invalid selection", file=sys.stderr)
|
||||
except ValueError:
|
||||
print("Error: Invalid input", file=sys.stderr)
|
||||
continue
|
||||
|
||||
|
||||
def cli() -> None:
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Dump object file information using build tools"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"build_path",
|
||||
type=Path,
|
||||
nargs="?",
|
||||
help="Path to build directory containing build info JSON file",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--symbols", action="store_true", help="Dump symbol table using nm"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--disassemble", action="store_true", help="Dump disassembly using objdump"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
build_path = args.build_path
|
||||
symbols = args.symbols
|
||||
disassemble = args.disassemble
|
||||
|
||||
# Check if object file was provided and exists
|
||||
if build_path is None:
|
||||
build_path = _prompt_build()
|
||||
else:
|
||||
if not _check_build(build_path):
|
||||
print("Error: Invalid build directory", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
assert build_path is not None
|
||||
assert build_path
|
||||
|
||||
build_info_path = build_path / "build_info.json"
|
||||
assert build_info_path.exists(), f"File not found: {build_info_path}"
|
||||
|
||||
tools = load_tools(build_info_path)
|
||||
|
||||
if not symbols and not disassemble:
|
||||
while True:
|
||||
print(
|
||||
"Error: Please specify at least one action to perform", file=sys.stderr
|
||||
)
|
||||
action = input(
|
||||
"Enter 's' to dump symbols, 'd' to disassemble, or 'q' to quit: "
|
||||
)
|
||||
if action == "s":
|
||||
symbols = True
|
||||
break
|
||||
elif action == "d":
|
||||
disassemble = True
|
||||
break
|
||||
elif action == "q":
|
||||
sys.exit(0)
|
||||
else:
|
||||
print("Error: Invalid action", file=sys.stderr)
|
||||
|
||||
object_file = _prompt_object_file(build_path)
|
||||
if symbols:
|
||||
import subprocess
|
||||
|
||||
cmd_str = subprocess.list2cmdline(
|
||||
[str(tools.objdump_path), str(object_file), "--syms"]
|
||||
)
|
||||
print(f"Running command: {cmd_str}")
|
||||
subprocess.run([str(tools.objdump_path), str(object_file)])
|
||||
|
||||
if disassemble:
|
||||
import subprocess
|
||||
|
||||
cmd_str = subprocess.list2cmdline(
|
||||
[str(tools.objdump_path), "-d", str(object_file)]
|
||||
)
|
||||
print(f"Running command: {cmd_str}")
|
||||
subprocess.run([str(tools.objdump_path), "-d", str(object_file)])
|
||||
|
||||
if not (symbols or disassemble):
|
||||
parser.print_help()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
cli()
|
||||
except KeyboardInterrupt:
|
||||
print("Exiting...")
|
||||
sys.exit(1)
|
||||
60
libraries/FastLED/ci/util/url_utils.py
Normal file
60
libraries/FastLED/ci/util/url_utils.py
Normal file
@@ -0,0 +1,60 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
URL utility functions for PlatformIO and other tools.
|
||||
|
||||
This module provides utilities for working with URLs, particularly for
|
||||
sanitizing URLs to create filesystem-safe path names.
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def sanitize_url_for_path(url: str) -> Path:
|
||||
"""
|
||||
Sanitize URL to create a valid filesystem path name.
|
||||
Simply strips protocol and replaces invalid filesystem characters.
|
||||
|
||||
Args:
|
||||
url: URL to sanitize
|
||||
|
||||
Returns:
|
||||
Sanitized Path suitable for use as a filesystem path component
|
||||
|
||||
Example:
|
||||
>>> sanitize_url_for_path("https://github.com/owner/repo/releases/download/v1.0.0/file.zip")
|
||||
Path('github_com_owner_repo_releases_download_v1_0_0_file_zip')
|
||||
"""
|
||||
# Remove protocol (http://, https://, ftp://, etc.)
|
||||
if "://" in url:
|
||||
url = url.split("://", 1)[1]
|
||||
|
||||
# Replace invalid filesystem characters with underscores
|
||||
# Invalid characters: / \ : * ? " < > |
|
||||
invalid_chars = ["/", "\\", ":", "*", "?", '"', "<", ">", "|"]
|
||||
result = url
|
||||
for char in invalid_chars:
|
||||
result = result.replace(char, "_")
|
||||
|
||||
# Replace dots with underscores to avoid issues with file extensions
|
||||
result = result.replace(".", "_")
|
||||
|
||||
# Replace spaces and dashes with underscores for consistency
|
||||
result = result.replace(" ", "_").replace("-", "_")
|
||||
|
||||
# Remove any remaining non-alphanumeric characters except underscores
|
||||
result = "".join(c if c.isalnum() or c == "_" else "_" for c in result)
|
||||
|
||||
# Remove multiple consecutive underscores
|
||||
while "__" in result:
|
||||
result = result.replace("__", "_")
|
||||
|
||||
# Remove leading/trailing underscores
|
||||
result = result.strip("_")
|
||||
|
||||
# Ensure reasonable length (max 100 chars)
|
||||
if len(result) > 100:
|
||||
# Keep first 70 chars and add a short hash
|
||||
result = result[:70] + "_" + hashlib.sha256(url.encode()).hexdigest()[:8]
|
||||
|
||||
return Path(result)
|
||||
326
libraries/FastLED/ci/util/xcache.py
Normal file
326
libraries/FastLED/ci/util/xcache.py
Normal file
@@ -0,0 +1,326 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
xcache.py - Enhanced sccache wrapper with response file support
|
||||
|
||||
This trampoline wrapper handles the ESP32S3 sccache problem where long command lines
|
||||
use response files (@tmpfile.tmp) that sccache doesn't understand. It creates
|
||||
temporary wrapper scripts that act as compiler aliases.
|
||||
|
||||
WARNING: Never use sys.stdout.flush() in this file!
|
||||
It causes blocking issues on Windows that hang subprocess processes.
|
||||
Python's default buffering behavior works correctly across platforms.
|
||||
|
||||
Usage:
|
||||
xcache.py <compiler> [args...]
|
||||
|
||||
When xcache detects response file arguments (@file.tmp), it:
|
||||
1. Creates a temporary wrapper script that acts as the compiler
|
||||
2. The wrapper script internally calls: sccache <actual_compiler> "$@"
|
||||
3. Executes the original command with response files intact
|
||||
4. The system handles response file expansion normally
|
||||
|
||||
This solves the ESP32S3 issue where commands are too long for direct execution
|
||||
but need caching support.
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import stat
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
|
||||
|
||||
# os.environ["XCACHE_DEBUG"] = "1"
|
||||
|
||||
|
||||
@dataclass
|
||||
class XCacheConfig:
|
||||
"""Configuration for xcache wrapper."""
|
||||
|
||||
sccache_path: str
|
||||
compiler_path: str
|
||||
temp_dir: Path
|
||||
debug: bool = False
|
||||
|
||||
|
||||
def find_sccache() -> Optional[str]:
|
||||
"""Find sccache executable in PATH."""
|
||||
sccache_path = shutil.which("sccache")
|
||||
if sccache_path:
|
||||
return sccache_path
|
||||
|
||||
# Check common locations only if not found in PATH
|
||||
common_paths = [
|
||||
"/usr/local/bin/sccache",
|
||||
"/usr/bin/sccache",
|
||||
"/opt/local/bin/sccache",
|
||||
os.path.expanduser("~/.cargo/bin/sccache"),
|
||||
]
|
||||
|
||||
for path in common_paths:
|
||||
if os.path.isfile(path) and os.access(path, os.X_OK):
|
||||
return path
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def detect_response_files(args: List[str]) -> List[str]:
|
||||
"""Detect response file arguments (@file.tmp) in command line."""
|
||||
response_files: List[str] = []
|
||||
for arg in args:
|
||||
if arg.startswith("@") and len(arg) > 1:
|
||||
response_file = arg[1:]
|
||||
if os.path.isfile(response_file):
|
||||
response_files.append(response_file)
|
||||
return response_files
|
||||
|
||||
|
||||
def create_compiler_wrapper_script(config: XCacheConfig) -> Path:
|
||||
"""Create temporary wrapper script that acts as a compiler alias."""
|
||||
|
||||
# Determine platform-appropriate script type
|
||||
is_windows = os.name == "nt"
|
||||
script_suffix = ".bat" if is_windows else ".sh"
|
||||
|
||||
# Create temporary script file
|
||||
script_fd, script_path = tempfile.mkstemp(
|
||||
suffix=script_suffix, prefix="xcache_compiler_", dir=config.temp_dir
|
||||
)
|
||||
|
||||
try:
|
||||
if is_windows:
|
||||
# Create Windows batch file
|
||||
wrapper_content = f'''@echo off
|
||||
REM Temporary xcache compiler wrapper script
|
||||
REM Acts as alias for: sccache <actual_compiler>
|
||||
|
||||
if /i "{config.debug}"=="true" (
|
||||
echo XCACHE: Wrapper executing: "{config.sccache_path}" "{config.compiler_path}" %* >&2
|
||||
)
|
||||
|
||||
REM Execute sccache with the actual compiler and all arguments (including response files)
|
||||
"{config.sccache_path}" "{config.compiler_path}" %*
|
||||
'''
|
||||
else:
|
||||
# Create Unix shell script
|
||||
wrapper_content = f'''#!/bin/bash
|
||||
# Temporary xcache compiler wrapper script
|
||||
# Acts as alias for: sccache <actual_compiler>
|
||||
|
||||
if [ "{config.debug}" = "true" ]; then
|
||||
echo "XCACHE: Wrapper executing: {config.sccache_path} {config.compiler_path} $@" >&2
|
||||
fi
|
||||
|
||||
# Execute sccache with the actual compiler and all arguments (including response files)
|
||||
exec "{config.sccache_path}" "{config.compiler_path}" "$@"
|
||||
'''
|
||||
|
||||
# Write wrapper script
|
||||
with os.fdopen(script_fd, "w", encoding="utf-8") as f:
|
||||
f.write(wrapper_content)
|
||||
|
||||
# Make script executable (Unix only)
|
||||
script_path_obj = Path(script_path)
|
||||
if not is_windows:
|
||||
script_path_obj.chmod(script_path_obj.stat().st_mode | stat.S_IEXEC)
|
||||
|
||||
return script_path_obj
|
||||
|
||||
except Exception:
|
||||
# Clean up on error
|
||||
try:
|
||||
os.close(script_fd)
|
||||
os.unlink(script_path)
|
||||
except Exception:
|
||||
pass
|
||||
raise
|
||||
|
||||
|
||||
def execute_direct(config: XCacheConfig, args: List[str]) -> int:
|
||||
"""Execute sccache directly without response file handling."""
|
||||
command = [config.sccache_path, config.compiler_path] + args
|
||||
|
||||
if config.debug:
|
||||
print(f"XCACHE: Direct execution: {' '.join(command)}", file=sys.stderr)
|
||||
|
||||
try:
|
||||
# Use Popen to manually pump stdout/stderr and prevent hanging
|
||||
process = subprocess.Popen(
|
||||
command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT, # Merge stderr into stdout as requested
|
||||
text=True,
|
||||
bufsize=1, # Line buffered
|
||||
universal_newlines=True,
|
||||
)
|
||||
|
||||
# Manually pump output until process finishes
|
||||
while True:
|
||||
output = process.stdout.readline()
|
||||
if output == "" and process.poll() is not None:
|
||||
break
|
||||
if output:
|
||||
print(output.rstrip()) # Print to stdout, remove trailing newlines
|
||||
|
||||
# Wait for process to complete and get return code
|
||||
return_code = process.wait()
|
||||
return return_code
|
||||
|
||||
except FileNotFoundError as e:
|
||||
print(f"XCACHE ERROR: Command not found: {e}")
|
||||
return 127
|
||||
except Exception as e:
|
||||
print(f"XCACHE ERROR: Execution failed: {e}")
|
||||
return 1
|
||||
|
||||
|
||||
def execute_with_wrapper(config: XCacheConfig, args: List[str]) -> int:
|
||||
"""Execute using wrapper script for response file handling."""
|
||||
response_files = detect_response_files(args)
|
||||
|
||||
if not response_files:
|
||||
# No response files, use direct execution
|
||||
return execute_direct(config, args)
|
||||
|
||||
if config.debug:
|
||||
print(f"XCACHE: Detected response files: {response_files}", file=sys.stderr)
|
||||
|
||||
# Create compiler wrapper script (acts as alias for sccache + compiler)
|
||||
wrapper_script = None
|
||||
try:
|
||||
wrapper_script = create_compiler_wrapper_script(config)
|
||||
|
||||
if config.debug:
|
||||
print(
|
||||
f"XCACHE: Created compiler wrapper: {wrapper_script}", file=sys.stderr
|
||||
)
|
||||
# Show wrapper script content for debugging
|
||||
try:
|
||||
with open(wrapper_script, "r") as f:
|
||||
content = f.read()
|
||||
print(f"XCACHE: Wrapper script content:", file=sys.stderr)
|
||||
for i, line in enumerate(content.split("\n"), 1):
|
||||
print(f"XCACHE: {i}: {line}", file=sys.stderr)
|
||||
except Exception as e:
|
||||
print(f"XCACHE: Could not read wrapper script: {e}", file=sys.stderr)
|
||||
|
||||
# Execute the original command but replace the compiler with our wrapper
|
||||
# The wrapper script will handle: sccache <actual_compiler> "$@"
|
||||
# Response files are passed through and handled normally by the system
|
||||
command = [str(wrapper_script)] + args
|
||||
|
||||
if config.debug:
|
||||
print(
|
||||
f"XCACHE: Executing with wrapper: {' '.join(command)}", file=sys.stderr
|
||||
)
|
||||
print(f"XCACHE: Wrapper script path: {wrapper_script}", file=sys.stderr)
|
||||
print(
|
||||
f"XCACHE: Wrapper script exists: {wrapper_script.exists()}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
if wrapper_script.exists():
|
||||
print(
|
||||
f"XCACHE: Wrapper script executable: {os.access(wrapper_script, os.X_OK)}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
|
||||
# Use Popen to manually pump stdout/stderr and prevent hanging
|
||||
process = subprocess.Popen(
|
||||
command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT, # Merge stderr into stdout as requested
|
||||
text=True,
|
||||
bufsize=1, # Line buffered
|
||||
universal_newlines=True,
|
||||
)
|
||||
|
||||
# Manually pump output until process finishes
|
||||
while True:
|
||||
output = process.stdout.readline()
|
||||
if output == "" and process.poll() is not None:
|
||||
break
|
||||
if output:
|
||||
print(output.rstrip()) # Print to stdout, remove trailing newlines
|
||||
|
||||
# Wait for process to complete and get return code
|
||||
return_code = process.wait()
|
||||
return return_code
|
||||
|
||||
except Exception as e:
|
||||
print(f"XCACHE ERROR: Wrapper execution failed: {e}", file=sys.stderr)
|
||||
return 1
|
||||
finally:
|
||||
# Clean up wrapper script
|
||||
if wrapper_script and wrapper_script.exists():
|
||||
try:
|
||||
wrapper_script.unlink()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def main() -> int:
|
||||
"""Main xcache entry point."""
|
||||
|
||||
# Parse command line
|
||||
if len(sys.argv) < 2:
|
||||
print(f"Usage: {sys.argv[0]} <compiler> [args...]", file=sys.stderr)
|
||||
print("", file=sys.stderr)
|
||||
print(
|
||||
"xcache is an enhanced sccache wrapper with response file support.",
|
||||
file=sys.stderr,
|
||||
)
|
||||
print(
|
||||
"It handles ESP32S3 long command lines that use @response.tmp files.",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return 1
|
||||
|
||||
compiler_path = sys.argv[1]
|
||||
compiler_args = sys.argv[2:]
|
||||
|
||||
# Check for debug mode
|
||||
debug = os.environ.get("XCACHE_DEBUG", "").lower() in ("1", "true", "yes")
|
||||
|
||||
# Some ESP-IDF build steps (e.g., linker script generation) invoke the
|
||||
# compiler purely as a preprocessor (e.g. `-E -P`) on linker scripts.
|
||||
# These operations are not real compilations and often confuse sccache,
|
||||
# so bypass the cache entirely in this situation.
|
||||
if "-E" in compiler_args and "-P" in compiler_args:
|
||||
cmd = [compiler_path] + compiler_args
|
||||
return subprocess.call(cmd)
|
||||
|
||||
# Find sccache
|
||||
sccache_path = find_sccache()
|
||||
if not sccache_path:
|
||||
print("XCACHE ERROR: sccache not found in PATH", file=sys.stderr)
|
||||
print("Please install sccache or ensure it's in your PATH", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
# Set up temporary directory
|
||||
temp_dir = Path(tempfile.gettempdir()) / "xcache"
|
||||
temp_dir.mkdir(exist_ok=True)
|
||||
|
||||
# Configure xcache
|
||||
config = XCacheConfig(
|
||||
sccache_path=sccache_path,
|
||||
compiler_path=compiler_path,
|
||||
temp_dir=temp_dir,
|
||||
debug=debug,
|
||||
)
|
||||
|
||||
if debug:
|
||||
print(f"XCACHE: sccache={sccache_path}", file=sys.stderr)
|
||||
print(f"XCACHE: compiler={compiler_path}", file=sys.stderr)
|
||||
print(f"XCACHE: args={compiler_args}", file=sys.stderr)
|
||||
print(f"XCACHE: temp_dir={temp_dir}", file=sys.stderr)
|
||||
|
||||
# Execute with wrapper handling
|
||||
return execute_with_wrapper(config, compiler_args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
Reference in New Issue
Block a user