Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
87 changes: 87 additions & 0 deletions .github/scripts/analyze_profile.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
#!/usr/bin/env python3
"""Analyze CPU profile using GitHub Models API for performance insights."""

import os
import sys
from pathlib import Path


def analyze_profile_with_github_models(profile_path: str) -> str:
"""Analyze a CPU profile using GitHub Models API."""
try:
import requests
except ImportError:
return "⚠️ requests package not available for LLM analysis"

github_token = os.getenv("GITHUB_TOKEN")
if not github_token:
return "⚠️ GITHUB_TOKEN not set"

# Read profile (limit to first 50KB to avoid token limits)
profile_content = Path(profile_path).read_text(encoding="utf-8")
if len(profile_content) > 50_000:
profile_content = profile_content[:50_000] + "\n... (truncated)"

prompt = f"""Analyze this CPU profiling data from py-spy and provide actionable performance insights.

The profile is in "collapsed stack trace" format where each line shows:
- A semicolon-separated call stack (deepest function last)
- Followed by a space and sample count

Focus on:
1. **Hotspots**: Which functions consume the most CPU time?
2. **Patterns**: Are there inefficiencies like excessive I/O, loops, or imports?
3. **Recommendations**: Specific, actionable suggestions to improve performance

Profile data:
```
{profile_content}
```

Provide a concise analysis (2-3 paragraphs max) with the most important findings."""

# Use GitHub Models API (available in GitHub Actions)
# Reference: https://docs.github.com/en/github-models
endpoint = "https://models.inference.ai.azure.com/chat/completions"

headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {github_token}",
}

payload = {
"model": "gpt-4o", # GitHub Models supports gpt-4o, gpt-4o-mini, etc.
"messages": [
{"role": "user", "content": prompt}
],
"max_tokens": 1024,
"temperature": 0.7,
}

try:
response = requests.post(endpoint, headers=headers, json=payload, timeout=30)
response.raise_for_status()
result = response.json()
return result["choices"][0]["message"]["content"]
except requests.exceptions.RequestException as e:
return f"⚠️ Failed to analyze with GitHub Models: {e}"
except (KeyError, IndexError) as e:
return f"⚠️ Unexpected response format from GitHub Models: {e}"


def main():
if len(sys.argv) != 2:
print("Usage: analyze_profile.py <profile.txt>")
sys.exit(1)

profile_path = sys.argv[1]
if not Path(profile_path).exists():
print(f"Error: {profile_path} not found")
sys.exit(1)

analysis = analyze_profile_with_github_models(profile_path)
print(analysis)


if __name__ == "__main__":
main()
71 changes: 71 additions & 0 deletions .github/workflows/integration_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,77 @@ jobs:
bash run.sh
bash ../common/validate_output.sh

- name: Upload testcase artifacts
if: always()
uses: actions/upload-artifact@v4
with:
name: artifacts-${{ matrix.testcase }}-${{ matrix.environment }}
path: testcases/${{ matrix.testcase }}/artifacts/
if-no-files-found: ignore

- name: Analyze performance profile with LLM
if: matrix.testcase == 'performance-testcase' && github.event_name == 'pull_request'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
if [ -f testcases/${{ matrix.testcase }}/artifacts/cpu_profile.txt ]; then
echo "## 🔍 Performance Profile Analysis - ${{ matrix.environment }}" > analysis.md
echo "" >> analysis.md

# Run LLM analysis using GitHub Models (powered by GitHub Copilot)
echo "### 🤖 AI Analysis (GitHub Copilot)" >> analysis.md
echo "" >> analysis.md
uv run --with requests python .github/scripts/analyze_profile.py testcases/${{ matrix.testcase }}/artifacts/cpu_profile.txt >> analysis.md || echo "_Analysis failed - GitHub Models may not be available in this repository_" >> analysis.md
echo "" >> analysis.md

# Add artifact links
echo "### 📦 Artifacts" >> analysis.md
echo "" >> analysis.md
echo "Download profiling artifacts from this run:" >> analysis.md
echo "" >> analysis.md
echo "**CPU Profiling (py-spy):**" >> analysis.md
echo "- 🔥 **Flamegraph SVG** - Visual representation (open in browser)" >> analysis.md
echo "- 📊 **Speedscope JSON** - Interactive timeline viewer ([speedscope.app](https://speedscope.app))" >> analysis.md
echo "- 📝 **Raw Profile** - Text format for custom analysis" >> analysis.md
echo "" >> analysis.md
echo "**Memory Profiling (memray):**" >> analysis.md
echo "- 💾 **Memory Flamegraph HTML** - Memory allocation visualization" >> analysis.md
echo "- 🔬 **Memory Profile Binary** - Raw memray data for custom analysis" >> analysis.md
echo "" >> analysis.md
echo "[View all artifacts →](../../actions/runs/${{ github.run_id }})" >> analysis.md
echo "" >> analysis.md

# Add sample stack traces for quick preview
echo "<details>" >> analysis.md
echo "<summary>Top 20 Stack Traces (click to expand)</summary>" >> analysis.md
echo "" >> analysis.md
echo '```' >> analysis.md
head -20 testcases/${{ matrix.testcase }}/artifacts/cpu_profile.txt >> analysis.md
echo '```' >> analysis.md
echo "</details>" >> analysis.md

echo "analysis_file=analysis.md" >> $GITHUB_OUTPUT
fi

- name: Comment PR with performance analysis
if: matrix.testcase == 'performance-testcase' && github.event_name == 'pull_request'
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const analysisFile = 'analysis.md';

if (fs.existsSync(analysisFile)) {
const analysis = fs.readFileSync(analysisFile, 'utf8');

github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: analysis
});
}

summarize-results:
needs: [integration-tests]
runs-on: ubuntu-latest
Expand Down
29 changes: 29 additions & 0 deletions testcases/performance-testcase/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import logging
from dataclasses import dataclass


logger = logging.getLogger(__name__)


@dataclass
class EchoIn:
message: str
repeat: int | None = 1
prefix: str | None = None


@dataclass
class EchoOut:
message: str


def main(input: EchoIn) -> EchoOut:
result = []

for _ in range(input.repeat):
line = input.message
if input.prefix:
line = f"{input.prefix}: {line}"
result.append(line)

return EchoOut(message="\n".join(result))
12 changes: 12 additions & 0 deletions testcases/performance-testcase/pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
[project]
name = "agent"
version = "0.0.1"
description = "agent"
authors = [{ name = "John Doe", email = "john.doe@myemail.com" }]
dependencies = [
"uipath",
]
requires-python = ">=3.11"

[tool.uv.sources]
uipath = { path = "../../", editable = true }
34 changes: 34 additions & 0 deletions testcases/performance-testcase/run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
#!/bin/bash
set -e

echo "Syncing dependencies..."
uv sync

uv add py-spy memray

echo "Authenticating with UiPath..."
uv run uipath auth --client-id="$CLIENT_ID" --client-secret="$CLIENT_SECRET" --base-url="$BASE_URL"

echo "Run init..."
uv run uipath init

echo "Packing agent..."
uv run uipath pack

echo "Creating artifacts directory..."
mkdir -p artifacts

echo "Run agent with py-spy profiling (raw text format for LLM analysis)"
uv run py-spy record --subprocesses -f raw -o artifacts/cpu_profile.txt -- uv run uipath run main '{"message": "abc", "repeat": 2, "prefix": "xyz"}'

echo "Run agent with py-spy profiling (flamegraph SVG for visualization)"
uv run py-spy record --subprocesses -f flamegraph -o artifacts/cpu_profile.svg -- uv run uipath run main '{"message": "abc", "repeat": 2, "prefix": "xyz"}'

echo "Run agent with py-spy profiling (speedscope JSON for interactive viewing)"
uv run py-spy record --subprocesses -f speedscope -o artifacts/cpu_profile_speedscope.json -- uv run uipath run main '{"message": "abc", "repeat": 2, "prefix": "xyz"}'

# echo "Run agent with memray memory profiling"
# uv run memray run -o artifacts/memory_profile.bin --force -m uipath run main '{"message": "abc", "repeat": 2, "prefix": "xyz"}'

# echo "Generating memory flamegraph from memray profile"
# uv run memray flamegraph artifacts/memory_profile.bin -o artifacts/memory_flamegraph.html --force || echo "Failed to generate memory flamegraph"
32 changes: 32 additions & 0 deletions testcases/performance-testcase/src/assert.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
import json
import os

# Check NuGet package
uipath_dir = ".uipath"
assert os.path.exists(uipath_dir), "NuGet package directory (.uipath) not found"

nupkg_files = [f for f in os.listdir(uipath_dir) if f.endswith(".nupkg")]
assert nupkg_files, "NuGet package file (.nupkg) not found in .uipath directory"

print(f"NuGet package found: {nupkg_files[0]}")

# Check agent output file
output_file = "__uipath/output.json"
assert os.path.isfile(output_file), "Agent output file not found"

print("Agent output file found")

# Check status and required fields
with open(output_file, "r", encoding="utf-8") as f:
output_data = json.load(f)

# Check status
status = output_data.get("status")
assert status == "successful", f"Agent execution failed with status: {status}"

print("Agent execution status: successful")

# Check required fields for ticket classification agent
assert "output" in output_data, "Missing 'output' field in agent response"

print("Required fields validation passed")
5 changes: 5 additions & 0 deletions testcases/performance-testcase/uipath.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
{
"functions": {
"main": "main.py:main"
}
}
Loading