Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 34 additions & 35 deletions .github/actions/analyzer/action.yml
Original file line number Diff line number Diff line change
@@ -1,82 +1,81 @@
name: 'Dependabot/Renovate Vulnerability Analyzer'
description: 'Analyze Dependabot/Renovate PRs for security vulnerabilities using NVD API and Claude AI'
author: 'Your Organization'
name: "Dependabot/Renovate Vulnerability Analyzer"
description: "Analyze Dependabot/Renovate PRs for security vulnerabilities using NVD API and Claude AI"
author: "Your Organization"

branding:
icon: 'shield'
color: 'red'
icon: "shield"
color: "red"

inputs:
repository:
description: 'Repository in the format owner/repo'
description: "Repository in the format owner/repo"
required: true
pr_number:
description: 'Pull request number to analyze'
description: "Pull request number to analyze"
required: true
additional_comment:
description: 'Additional instructions for analysis'
description: "Additional instructions for analysis"
required: false
default: ''
default: ""
include_previous:
description: 'Include previous analysis results for re-evaluation'
description: "Include previous analysis results for re-evaluation"
required: false
default: 'false'
default: "false"
package_name:
description: 'Specific package name to analyze (if empty, analyze all packages in the PR)'
description: "Specific package name to analyze (if empty, analyze all packages in the PR)"
required: false
default: ''
default: ""
working_directory:
description: 'Working directory for code analysis (relative to workspace root)'
description: "Working directory for code analysis (relative to workspace root)"
required: false
default: '.'
default: "."
ai_provider:
description: 'AI provider to use (claude-vertex/claude-direct/claude-bedrock/gemini-vertex/gemini-direct/openai)'
description: "AI provider to use (claude-vertex/claude-direct/claude-bedrock/gemini-vertex/gemini-direct/openai)"
required: false
default: 'claude-vertex'
default: "claude-vertex"
ai_model:
description: 'AI model to use (e.g., claude-sonnet-4-5@20250929, gemini-2.5-pro)'
description: "AI model to use (e.g., claude-sonnet-4-5@20250929, gemini-2.5-pro)"
required: false
ai_timeout:
description: 'Timeout for AI responses in seconds'
description: "Timeout for AI responses in seconds"
required: false
default: '360'
default: "360"
vertex_project_id:
description: 'GCP project ID for Vertex AI (required when ai_provider is claude-vertex or gemini-vertex)'
description: "GCP project ID for Vertex AI (required when ai_provider is claude-vertex or gemini-vertex)"
required: false
default: ''
default: ""
vertex_region:
description: 'GCP region for Vertex AI (us-east5 for Claude, us-central1 for Gemini)'
description: "GCP region for Vertex AI (us-east5 for Claude, us-central1 for Gemini)"
required: false
default: 'us-east5'
default: "us-east5"
anthropic_api_key:
description: 'Anthropic API Key (required when ai_provider is claude-direct)'
description: "Anthropic API Key (required when ai_provider is claude-direct)"
required: false
gemini_api_key:
description: 'Gemini API Key (required when ai_provider is gemini-direct)'
description: "Gemini API Key (required when ai_provider is gemini-direct)"
required: false
openai_api_key:
description: 'OpenAI API Key (required when ai_provider is openai)'
description: "OpenAI API Key (required when ai_provider is openai)"
required: false
aws_region:
description: 'AWS region for Bedrock (required when ai_provider is claude-bedrock)'
description: "AWS region for Bedrock (required when ai_provider is claude-bedrock)"
required: false
default: 'us-east-1'
default: "us-east-1"

outputs:
analysis_result:
description: 'Vulnerability analysis result'
risk_level:
description: 'Overall risk level (low/medium/high/critical)'
max_risk_level:
description: "Overall risk level (low/medium/high/critical)"

runs:
using: 'docker'
image: 'Dockerfile'
using: "docker"
image: "Dockerfile"
env:
INPUT_TARGET_REPOSITORY: ${{ inputs.repository }}
INPUT_TARGET_PR_NUMBER: ${{ inputs.pr_number }}
INPUT_ADDITIONAL_COMMENT: ${{ inputs.additional_comment }}
INPUT_INCLUDE_PREVIOUS: ${{ inputs.include_previous }}
INPUT_PACKAGE_NAME: ${{ inputs.package_name }}
INPUT_WORKING_DIRECTORY: ${{ inputs.working_directory }}

INPUT_AI_PROVIDER: ${{ inputs.ai_provider }}
INPUT_AI_MODEL: ${{ inputs.ai_model }}
Expand Down
1 change: 0 additions & 1 deletion .github/actions/analyzer/app/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@

# リスクレベルアイコン
RISK_ICONS = {
"極低": "🟢",
"低": "🟢",
"中": "🟡",
"高": "🔴",
Expand Down
1 change: 0 additions & 1 deletion .github/actions/analyzer/app/entrypoint.sh
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,6 @@ if [ -n "$INPUT_WORKING_DIRECTORY" ] && [ "$INPUT_WORKING_DIRECTORY" != "." ]; t
echo "Current directory: $(pwd)"
fi

cd "$GITHUB_WORKSPACE"
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

本修正によって、今まで無効化されていた機能(directory指定)を有効化。

# 脆弱性分析スクリプトを実行
echo "Running vulnerability analysis..."
eval python3 -u /app/vulnerability_analyzer.py "$INPUT_TARGET_REPOSITORY" "$INPUT_TARGET_PR_NUMBER" $FLAGS
Expand Down
6 changes: 4 additions & 2 deletions .github/actions/analyzer/app/prompt_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,8 @@ def create_ai_analysis_prompt(vuln_data: List[Dict[str, Any]], version_info: Dic
- 管理者権限でのみ到達可能な箇所での使用

#### 低リスク条件
- 脆弱な機能を利用していない
- ユーザ入力値が脆弱な機能に渡されない
- 完全に内部データのみで使用
- 既に適切なバリデーション・サニタイズで保護済み

Expand All @@ -182,7 +184,7 @@ def create_ai_analysis_prompt(vuln_data: List[Dict[str, Any]], version_info: Dic
あなたの応答は必ず次の「---RISK_ASSESSMENT_START---」から「---RISK_ASSESSMENT_END---」までの構造化ヘッダーで開始してください。構造化ヘッダは必ずコメントアウト(<!-- 〜〜〜 -->)したうえで出力してください。この出力はPythonコードでパースされます。

---RISK_ASSESSMENT_START---
RISK_LEVEL: [極低/低/中/高/Critical のいずれか]
RISK_LEVEL: [低/中/高/Critical のいずれか]
CONFIDENCE: [高/中/低]
PRIMARY_REASON: [判定理由を1行(100文字以内)で簡潔に]
---RISK_ASSESSMENT_END---
Expand All @@ -191,7 +193,7 @@ def create_ai_analysis_prompt(vuln_data: List[Dict[str, Any]], version_info: Dic
**ヘッダーの記入例(ここから)**:
<!--
---RISK_ASSESSMENT_START---
RISK_LEVEL: 極低
RISK_LEVEL:
CONFIDENCE: 高
PRIMARY_REASON: 脆弱パッケージ未使用、既にパッチ適用済み、攻撃経路なし
---RISK_ASSESSMENT_END---
Expand Down
111 changes: 94 additions & 17 deletions .github/actions/analyzer/app/risk_assessment.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,71 @@
from config import RISK_ICONS


def extract_risk_from_ai_analysis(ai_analysis: str, vuln_data: List[Dict[str, Any]], cves: List[str]) -> str:
"""AI分析結果から結論部分を抽出"""
# リスクレベルの優先順位(数値が大きいほど高リスク)
RISK_PRIORITY = {
"unknown": 0,
"low": 1,
"medium": 2,
"high": 3,
"critical": 4
}


def normalize_risk_level(risk_text: str) -> str:
"""リスクテキストを正規化してlow/medium/high/criticalに変換

Args:
risk_text: AI分析やextract_risk_from_ai_analysisから返されたリスク評価テキスト

Returns:
正規化されたリスクレベル (low/medium/high/critical)
"""
risk_lower = risk_text.lower()

if "critical" in risk_lower or "緊急" in risk_lower:
return "critical"
elif "高" in risk_text or "high" in risk_lower:
return "high"
elif "中" in risk_text or "medium" in risk_lower:
return "medium"
elif "低" in risk_text or "low" in risk_lower:
return "low"

# デフォルトはmedium(安全側に倒す)
return "medium"


def get_max_risk_level(risk_levels: List[str]) -> str:
"""複数のリスクレベルから最大値を取得

Args:
risk_levels: リスクレベルのリスト('unknown'を含む可能性あり)

Returns:
最大のリスクレベル (low/medium/high/critical/unknown)
"""

max_priority = 0
max_level = "unknown"

for level in risk_levels:
normalized = normalize_risk_level(level)
priority = RISK_PRIORITY.get(normalized, 0)
if priority > max_priority:
max_priority = priority
max_level = normalized

return max_level


def extract_risk_from_ai_analysis(ai_analysis: str, vuln_data: List[Dict[str, Any]], cves: List[str]) -> tuple[str, str]:
"""AI分析結果から結論部分を抽出

Returns:
tuple[str, str]: (formatted_text, risk_level)
- formatted_text: フォーマットされたリスク評価テキスト
- risk_level: 正規化されたリスクレベル (low/medium/high/critical/unknown)
"""

debug_mode = os.getenv('DEBUG_MODE') == '1'

Expand Down Expand Up @@ -59,13 +122,17 @@ def extract_risk_from_ai_analysis(ai_analysis: str, vuln_data: List[Dict[str, An
actions_content = action_match.group(1)
actions_text = f"\n\n### 📋 推奨アクション\n" + actions_content

return f"""### {icon} 総合リスク判定: {risk_level}リスク
formatted_text = f"""### {icon} 総合リスク判定: {risk_level}リスク

**判定根拠**: {reason}{cve_info}{actions_text}

### 💡 重要
この評価は下記の詳細分析に基づく総合判断です。技術的根拠は詳細分析結果をご確認ください。"""

# risk_levelを正規化して返す
normalized_risk = normalize_risk_level(risk_level)
return (formatted_text, normalized_risk)

# 構造化ヘッダーが見つからない場合のフォールバック
if debug_mode:
print("⚠️ DEBUG: 構造化ヘッダーが見つかりません。フォールバック処理に移行します。")
Expand All @@ -76,11 +143,12 @@ def extract_risk_from_ai_analysis(ai_analysis: str, vuln_data: List[Dict[str, An
print("⚠️ DEBUG: AI分析失敗を検出")

# AI分析が失敗した場合は、分析失敗メッセージをそのまま返す
return f"""### ❌ リスク評価: 分析失敗
error_text = f"""### ❌ リスク評価: 分析失敗

AI分析が正常に完了しませんでした。分析を再実行するか、手動でのレビューを実施してください。

{ai_analysis}"""
return (error_text, "unknown") # エラー時は'unknown'

# AI分析から「総合リスク判定」の部分を抽出
conclusion_patterns = [
Expand All @@ -106,12 +174,19 @@ def extract_risk_from_ai_analysis(ai_analysis: str, vuln_data: List[Dict[str, An
# 結論が見つかった場合、そのまま使用
if extracted_conclusion:
# アイコンを追加
if "低" in extracted_conclusion:
detected_risk = "medium" # デフォルト
if "低" in extracted_conclusion or "極低" in extracted_conclusion:
icon = "🟢"
detected_risk = "low"
elif "中" in extracted_conclusion:
icon = "🟡"
else:
detected_risk = "medium"
elif "高" in extracted_conclusion:
icon = "🔴"
detected_risk = "high"
elif "critical" in extracted_conclusion.lower() or "緊急" in extracted_conclusion:
icon = "🔴"
detected_risk = "critical"

# 推奨アクションを「推奨対策」から抽出
action_match = re.search(r"### 推奨対策[^\n]*\n(.*?)(?=\n##|\n---|\Z)", ai_analysis, re.DOTALL)
Expand All @@ -133,26 +208,22 @@ def extract_risk_from_ai_analysis(ai_analysis: str, vuln_data: List[Dict[str, An

cve_info = f"\n**対象CVE**: {cve_list}\n**最大CVSS**: {max_cvss} (参考値)"

return f"### {icon} 総合リスク判定\n{extracted_conclusion.replace('### 総合リスク判定', '')}{cve_info}{actions_text}"
formatted_text = f"### {icon} 総合リスク判定\n{extracted_conclusion.replace('### 総合リスク判定', '')}{cve_info}{actions_text}"
return (formatted_text, detected_risk)

# AI分析全体から重要な判定を抽出(フォールバック)
if debug_mode:
print("🔄 DEBUG: フォールバック判定ロジックを実行中...")

risk_level = "未評価"
normalized_risk = "unknown" # デフォルト

# 極低/ゼロリスク(最優先で判定)
if ("極低" in ai_analysis or "ゼロリスク" in ai_analysis or "ほぼゼロ" in ai_analysis or
"🟢 **極低リスク" in ai_analysis):
risk_level = "極低リスク"
icon = "🟢"
if debug_mode:
print("✅ DEBUG: 極低リスクを検出")
# 低リスク
elif ("**低**" in ai_analysis or "低リスク" in ai_analysis or
# 低リスク(ゼロリスクも含む)
if ("**低**" in ai_analysis or "低リスク" in ai_analysis or "ゼロリスク" in ai_analysis or "ほぼゼロ" in ai_analysis or
"リスクレベル「低」" in ai_analysis or "リスクレベル:低" in ai_analysis or
"LOW" in ai_analysis):
risk_level = "低リスク"
normalized_risk = "low"
icon = "🟢"
if debug_mode:
print("✅ DEBUG: 低リスクを検出")
Expand All @@ -161,6 +232,7 @@ def extract_risk_from_ai_analysis(ai_analysis: str, vuln_data: List[Dict[str, An
"リスクレベル「中」" in ai_analysis or "リスクレベル:中" in ai_analysis or
"MEDIUM" in ai_analysis or "CVSS 5." in ai_analysis or "CVSS 6." in ai_analysis):
risk_level = "中リスク"
normalized_risk = "medium"
icon = "🟡"
if debug_mode:
print("⚠️ DEBUG: 中リスクを検出")
Expand All @@ -169,6 +241,7 @@ def extract_risk_from_ai_analysis(ai_analysis: str, vuln_data: List[Dict[str, An
"リスクレベル「高」" in ai_analysis or "リスクレベル:高" in ai_analysis or
"HIGH" in ai_analysis or "CVSS 7." in ai_analysis):
risk_level = "高リスク"
normalized_risk = "high"
icon = "🔴"
if debug_mode:
print("🚨 DEBUG: 高リスクを検出")
Expand All @@ -177,11 +250,13 @@ def extract_risk_from_ai_analysis(ai_analysis: str, vuln_data: List[Dict[str, An
"🚨 Critical" in ai_analysis or "緊急" in ai_analysis or
"CVSS 9." in ai_analysis or "CVSS 8." in ai_analysis):
risk_level = "Critical(緊急)"
normalized_risk = "critical"
icon = "🔴"
if debug_mode:
print("🔴 DEBUG: Critical(緊急)を検出")
else:
icon = "🔴"
normalized_risk = "unknown" # リスクレベルを特定できなかった場合は'unknown'
if debug_mode:
print("❓ DEBUG: リスクレベルを特定できませんでした")

Expand Down Expand Up @@ -217,7 +292,7 @@ def extract_risk_from_ai_analysis(ai_analysis: str, vuln_data: List[Dict[str, An
else:
actions = ["⚡ 早急な影響範囲確認", "🔍 攻撃可能性の詳細分析", "👥 セキュリティチームとの連携"]

return f"""### {icon} 総合リスク判定: {risk_level}
formatted_text = f"""### {icon} 総合リスク判定: {risk_level}

**判定根拠**: {reason_text}{cve_info}

Expand All @@ -226,3 +301,5 @@ def extract_risk_from_ai_analysis(ai_analysis: str, vuln_data: List[Dict[str, An

### 💡 重要
この評価は下記の詳細分析に基づく総合判断です。技術的根拠は詳細分析結果をご確認ください。"""

return (formatted_text, normalized_risk)
Loading