Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 10 additions & 6 deletions skills/detection/yolo-detection-2026/deploy.bat
Original file line number Diff line number Diff line change
Expand Up @@ -120,9 +120,13 @@ if exist "%SKILL_DIR%\scripts\env_config.py" (
if defined ENV_CONFIG_DIR (
echo %LOG_PREFIX% Detecting hardware via env_config.py...>&2

REM Run env_config detection via Python
for /f "tokens=*" %%B in ('"%VPYTHON%" -c "import sys; sys.path.insert(0, r'!ENV_CONFIG_DIR!'); from env_config import HardwareEnv; env = HardwareEnv.detect(); print(env.backend)" 2^>nul') do (
set "DETECTED_BACKEND=%%B"
REM Run env_config detection via Python (temp file avoids for /f quoting issues)
set "ENV_CONFIG_DIR=!ENV_CONFIG_DIR!"
set "_TMPBACK=%TEMP%\_aegis_detect_backend.txt"
"%VPYTHON%" -c "import sys,os; sys.path.insert(0, os.environ['ENV_CONFIG_DIR']); from env_config import HardwareEnv; env = HardwareEnv.detect(); print(env.backend)" > "!_TMPBACK!" 2>nul
if exist "!_TMPBACK!" (
set /p DETECTED_BACKEND=<"!_TMPBACK!"
del "!_TMPBACK!" 2>nul
)

REM Validate backend value (Windows: only cuda, intel, cpu are realistic)
Expand Down Expand Up @@ -187,7 +191,7 @@ if "!BACKEND!" neq "cpu" (
echo %LOG_PREFIX% Pre-converting model to optimized format for !BACKEND!...>&2
echo {"event": "progress", "stage": "optimize", "message": "Converting model for !BACKEND! (~30-120s)..."}

"%VPYTHON%" -c "import sys; sys.path.insert(0, r'!ENV_CONFIG_DIR!'); from env_config import HardwareEnv; env = HardwareEnv.detect(); from ultralytics import YOLO; model = YOLO('yolo26n.pt'); result = env.export_model(model, 'yolo26n'); print(f'Optimized model exported: {result}' if result else 'Export skipped or failed')" 2>&1
"%VPYTHON%" -c "import sys,os; sys.path.insert(0, os.environ['ENV_CONFIG_DIR']); from env_config import HardwareEnv; env = HardwareEnv.detect(); from ultralytics import YOLO; model = YOLO('yolo26n.pt'); result = env.export_model(model, 'yolo26n'); print(f'Optimized model exported: {result}' if result else 'Export skipped or failed')" 2>&1

if !errorlevel! equ 0 (
echo {"event": "progress", "stage": "optimize", "message": "Model optimization complete"}
Expand All @@ -199,7 +203,7 @@ if "!BACKEND!" neq "cpu" (
echo %LOG_PREFIX% Pre-converting model to ONNX for CPU...>&2
echo {"event": "progress", "stage": "optimize", "message": "Converting model for cpu (~30-120s)..."}

"%VPYTHON%" -c "import sys; sys.path.insert(0, r'!ENV_CONFIG_DIR!'); from env_config import HardwareEnv; env = HardwareEnv.detect(); from ultralytics import YOLO; model = YOLO('yolo26n.pt'); result = env.export_model(model, 'yolo26n'); print(f'Optimized model exported: {result}' if result else 'Export skipped or failed')" 2>&1
"%VPYTHON%" -c "import sys,os; sys.path.insert(0, os.environ['ENV_CONFIG_DIR']); from env_config import HardwareEnv; env = HardwareEnv.detect(); from ultralytics import YOLO; model = YOLO('yolo26n.pt'); result = env.export_model(model, 'yolo26n'); print(f'Optimized model exported: {result}' if result else 'Export skipped or failed')" 2>&1

if !errorlevel! equ 0 (
echo {"event": "progress", "stage": "optimize", "message": "Model optimization complete"}
Expand All @@ -212,7 +216,7 @@ if "!BACKEND!" neq "cpu" (
REM ─── Step 6: Verify installation ───────────────────────────────────────────

echo %LOG_PREFIX% Verifying installation...>&2
"%VPYTHON%" -c "import sys, json; sys.path.insert(0, r'!ENV_CONFIG_DIR!'); from env_config import HardwareEnv; env = HardwareEnv.detect(); print(json.dumps(env.to_dict(), indent=2))" 2>&1
"%VPYTHON%" -c "import sys,os,json; sys.path.insert(0, os.environ['ENV_CONFIG_DIR']); from env_config import HardwareEnv; env = HardwareEnv.detect(); print(json.dumps(env.to_dict(), indent=2))" 2>&1

echo {"event": "complete", "backend": "!BACKEND!", "message": "YOLO 2026 skill installed (!BACKEND! backend)"}
echo %LOG_PREFIX% Done! Backend: !BACKEND!>&2
Expand Down
21 changes: 15 additions & 6 deletions skills/detection/yolo-detection-2026/scripts/detect.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,14 +240,22 @@ def main():
target_classes = [c.strip() for c in target_classes.split(",")]

# ── Hardware detection & optimized model loading ──
emit({"event": "progress", "stage": "init", "message": "Detecting compute hardware..."})
env = HardwareEnv.detect()
perf = PerfTracker(interval=PERF_STATS_INTERVAL)

gpu_msg = f"{env.gpu_name} ({env.backend})" if env.gpu_name else env.backend
emit({"event": "progress", "stage": "init", "message": f"Hardware: {gpu_msg}"})

try:
emit({"event": "progress", "stage": "model", "message": f"Loading {model_name} model ({env.export_format} format)..."})
model, model_format = env.load_optimized(model_name, use_optimized=use_optimized)
perf.model_load_ms = env.load_ms
perf.export_ms = env.export_ms

if env.export_ms > 0:
emit({"event": "progress", "stage": "model", "message": f"Model optimized in {env.export_ms:.0f}ms"})

ready_event = {
"event": "ready",
"model": f"yolo2026{model_size[0]}",
Expand All @@ -268,18 +276,19 @@ def main():
emit({"event": "error", "message": f"Failed to load model: {e}", "retriable": False})
sys.exit(1)

# Graceful shutdown
running = True
# Graceful shutdown — exit immediately with code 0.
# The stdin read loop blocks, so setting a flag doesn't work;
# we must exit in the signal handler to avoid being killed (code null).
def handle_signal(signum, frame):
nonlocal running
running = False
sig_name = "SIGTERM" if signum == signal.SIGTERM else "SIGINT"
log(f"Received {sig_name}, shutting down gracefully")
perf.emit_final()
sys.exit(0)
signal.signal(signal.SIGTERM, handle_signal)
signal.signal(signal.SIGINT, handle_signal)

# Main loop: read frames from stdin, output detections to stdout
for line in sys.stdin:
if not running:
break

line = line.strip()
if not line:
Expand Down
82 changes: 65 additions & 17 deletions skills/lib/env_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,31 +133,79 @@ def detect() -> "HardwareEnv":
return env

def _try_cuda(self) -> bool:
"""Detect NVIDIA GPU via nvidia-smi and torch."""
if not shutil.which("nvidia-smi"):
return False
"""Detect NVIDIA GPU via nvidia-smi (with Windows path search) and WMI fallback."""
nvidia_smi = shutil.which("nvidia-smi")

# Windows: check well-known paths if not on PATH
if not nvidia_smi and platform.system() == "Windows":
for candidate in [
Path(os.environ.get("PROGRAMFILES", r"C:\Program Files"))
/ "NVIDIA Corporation" / "NVSMI" / "nvidia-smi.exe",
Path(os.environ.get("WINDIR", r"C:\Windows"))
/ "System32" / "nvidia-smi.exe",
]:
if candidate.is_file():
nvidia_smi = str(candidate)
_log(f"Found nvidia-smi at {nvidia_smi}")
break

if nvidia_smi:
try:
result = subprocess.run(
[nvidia_smi, "--query-gpu=name,memory.total,driver_version",
"--format=csv,noheader,nounits"],
capture_output=True, text=True, timeout=10,
)
if result.returncode == 0:
line = result.stdout.strip().split("\n")[0]
parts = [p.strip() for p in line.split(",")]
if len(parts) >= 3:
self.backend = "cuda"
self.device = "cuda"
self.gpu_name = parts[0]
self.gpu_memory_mb = int(float(parts[1]))
self.driver_version = parts[2]
self.detection_details["nvidia_smi"] = line
_log(f"NVIDIA GPU: {self.gpu_name} ({self.gpu_memory_mb}MB, driver {self.driver_version})")
return True
except (subprocess.TimeoutExpired, FileNotFoundError, ValueError) as e:
_log(f"nvidia-smi probe failed: {e}")

# Windows WMI fallback: detect NVIDIA GPU even without nvidia-smi on PATH
if platform.system() == "Windows":
return self._try_cuda_wmi()

return False

def _try_cuda_wmi(self) -> bool:
"""Windows-only: detect NVIDIA GPU via WMI (wmic)."""
try:
result = subprocess.run(
["nvidia-smi", "--query-gpu=name,memory.total,driver_version",
"--format=csv,noheader,nounits"],
["wmic", "path", "win32_VideoController", "get",
"Name,AdapterRAM,DriverVersion", "/format:csv"],
capture_output=True, text=True, timeout=10,
)
if result.returncode != 0:
return False

line = result.stdout.strip().split("\n")[0]
parts = [p.strip() for p in line.split(",")]
if len(parts) >= 3:
self.backend = "cuda"
self.device = "cuda"
self.gpu_name = parts[0]
self.gpu_memory_mb = int(float(parts[1]))
self.driver_version = parts[2]
self.detection_details["nvidia_smi"] = line
_log(f"NVIDIA GPU: {self.gpu_name} ({self.gpu_memory_mb}MB, driver {self.driver_version})")
return True
for line in result.stdout.strip().split("\n"):
if "NVIDIA" in line.upper():
parts = [p.strip() for p in line.split(",")]
# CSV format: Node,AdapterRAM,DriverVersion,Name
if len(parts) >= 4:
self.backend = "cuda"
self.device = "cuda"
self.gpu_name = parts[3]
try:
self.gpu_memory_mb = int(int(parts[1]) / (1024 * 1024))
except (ValueError, IndexError):
pass
self.driver_version = parts[2] if len(parts) > 2 else ""
self.detection_details["wmi"] = line
_log(f"NVIDIA GPU (WMI): {self.gpu_name} ({self.gpu_memory_mb}MB)")
return True
except (subprocess.TimeoutExpired, FileNotFoundError, ValueError) as e:
_log(f"nvidia-smi probe failed: {e}")
_log(f"WMI probe failed: {e}")
return False

def _try_rocm(self) -> bool:
Expand Down
Loading