Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions form-flow-backend/routers/suggestions.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,8 @@ async def get_smart_suggestions(
Requires: Bearer token authentication (for profile lookup)
"""
try:
logger.info(f"📥 [API] Smart suggestions request: {data.field_name} (Purpose: {data.form_purpose})")

# Get user from auth
user_id = None
profile_confidence = None
Expand All @@ -165,10 +167,13 @@ async def get_smart_suggestions(
if token:
user = await auth.get_current_user(token, db)
user_id = user.id
logger.info(f"👤 [API] User identified: {user_id}")

# Get profile confidence if available
if hasattr(user, 'behavioral_profile') and user.behavioral_profile:
profile_confidence = user.behavioral_profile.confidence_score
else:
logger.info("👻 [API] User not identified, using anonymous mode")
except Exception as e:
logger.debug(f"Auth lookup failed: {e}")

Expand Down
8 changes: 7 additions & 1 deletion form-flow-backend/services/ai/gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,9 @@
import os
import json
from typing import Dict, List, Any, Optional
from dotenv import load_dotenv # <--- Add this

load_dotenv()

from langchain_community.chat_models import ChatOpenAI
from langchain_google_genai import ChatGoogleGenerativeAI
Expand Down Expand Up @@ -101,7 +104,7 @@ def __init__(self, api_key: Optional[str] = None, model: str = "gemini-2.0-flash
logger.info("Detected OpenRouter API key. Switching to OpenRouter provider.")
# Default to Gemma 2 9B if using OpenRouter and default model was passed
if self.model in ("gemini-2.0-flash", "gemma-3-27b-it"):
self.model = "google/gemma-2-9b-it:free" # Use free tier
self.model = "google/gemma-3-27b-it:free" # Use free tier

self.llm = ChatOpenAI(
model=self.model,
Expand Down Expand Up @@ -341,6 +344,9 @@ async def fill(
def get_gemini_service() -> GeminiService:
"""Get singleton GeminiService instance."""
global _service_instance
print("Getting Gemini Service Instance")
api_key = os.getenv('GEMMA_API_KEY') or os.getenv('OPENROUTER_API_KEY') or os.getenv('GOOGLE_API_KEY')
print(f"Using API Key: {api_key[:8]}...") # Print first 8 chars for debugging
if _service_instance is None:
try:
_service_instance = GeminiService()
Expand Down
215 changes: 142 additions & 73 deletions form-flow-backend/services/ai/profile/suggestions.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
from typing import List, Dict, Any, Optional
from dataclasses import dataclass
from sqlalchemy.ext.asyncio import AsyncSession
from datetime import datetime
import json

from utils.logging import get_logger

Expand All @@ -21,6 +23,15 @@ class SuggestionTier(Enum):
PATTERN_ONLY = "pattern_only" # Tier 3: Fast fallback


from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import JsonOutputParser
from pydantic import BaseModel, Field

class SuggestionResponse(BaseModel):
"""Structured response for LLM suggestions."""
suggestions: List[str] = Field(description="List of suggested values")
reasoning: str = Field(description="Why these suggestions were made based on the profile")

@dataclass
class IntelligentSuggestion:
"""A single intelligent suggestion with context."""
Expand Down Expand Up @@ -56,44 +67,146 @@ async def get_suggestions(
) -> List[IntelligentSuggestion]:
"""
Generate intelligent suggestions for a form field.

Automatically selects the appropriate tier based on profile availability.
STRICT MODE: Only uses Tier 1 (Profile/LLM). Returns empty if no profile.
"""
field_name = field_context.get('name', 'unknown')
field_label = field_context.get('label', 'unknown')

logger.info(f"🟢 [Lifecycle] START: Request for User={user_id} Field='{field_name}' ({field_label})")

try:
# Try to get user profile
from .service import get_profile_service
profile_service = get_profile_service()
profile = await profile_service.get_profile(db, user_id)

if profile and hasattr(profile, 'confidence_score') and profile.confidence_score > 0.7:
# Tier 1: Full profile-based
return self._tier1_profile_based(profile, field_context, form_context, previous_answers)
elif profile:
# Tier 2: Blended
return self._tier2_blended(profile, field_context, form_context, previous_answers)
if profile:
# STRICT: Always use Tier 1 if profile exists. Ignore confidence score.
logger.info(f"👤 [Lifecycle] Profile Found. Confidence: {getattr(profile, 'confidence_score', 0)}")
logger.info("🚀 [Lifecycle] FORCING Tier 1: PROFILE_BASED (Ignoring confidence score)")
return await self._tier1_profile_based(profile, field_context, form_context, previous_answers)
else:
# Tier 3: Pattern only
return self._tier3_pattern_only(field_context, previous_answers)
# STRICT: No profile = No suggestions.
logger.warning("⛔ [Lifecycle] No Profile found. Skipping Tier 3 fallback (returning empty).")
return []

except Exception as e:
logger.warning(f"Profile suggestion failed, falling back to patterns: {e}")
return self._tier3_pattern_only(field_context, previous_answers)
logger.error(f"❌ [Lifecycle] CRITICAL ERROR: {str(e)}", exc_info=True)
return []

def _tier1_profile_based(
async def _tier1_profile_based(
self,
profile: Any,
field_context: Dict[str, Any],
form_context: Dict[str, Any],
previous_answers: Dict[str, str]
) -> List[IntelligentSuggestion]:
"""Tier 1: Full profile-based suggestions with LLM."""
# For now, fall back to pattern-based
suggestions = self._tier3_pattern_only(field_context, previous_answers)
# Upgrade tier
for s in suggestions:
s.tier = SuggestionTier.PROFILE_BASED
s.reasoning = "Based on your behavioral profile"
return suggestions
logger.info(f"🧠 [Lifecycle] Tier 1: Initiating LLM generation for '{field_context.get('name')}'")

# Try to generate suggestions via LLM
try:
llm_suggestions = await self._generate_llm_suggestions(profile, field_context, form_context)
if llm_suggestions:
logger.info(f"✅ [Lifecycle] Tier 1: LLM Success. Returned {len(llm_suggestions)} suggestions.")
return llm_suggestions
else:
logger.warning("⚠️ [Lifecycle] Tier 1: LLM returned empty results.")
return [] # STRICT: Return empty instead of fallback
except Exception as e:
logger.error(f"❌ [Lifecycle] Tier 1: LLM Failed ({str(e)})")
return [] # STRICT: Return empty instead of fallback

async def _generate_llm_suggestions(
self,
profile: Any,
field_context: Dict[str, Any],
form_context: Dict[str, Any]
) -> Optional[List[IntelligentSuggestion]]:
"""Generate suggestions using LLM and user profile."""
from services.ai.gemini import get_gemini_service
gemini = get_gemini_service()

if not gemini or not gemini.llm:
logger.error("❌ [Lifecycle] Gemini Service Unavailable")
return None

# Extract profile text safely
profile_text = getattr(profile, 'profile_text', str(profile))

# Context extraction
field_name = field_context.get("name", "unknown")
field_label = field_context.get("label", field_name)
form_purpose = form_context.get("purpose", "General Form")

logger.debug(f"🤖 [Lifecycle] LLM Prompting for '{field_label}'...")

# ---------------------------------------------------------
# 🧠 INTELLIGENT PROMPT ENGINEERING
# ---------------------------------------------------------
prompt = ChatPromptTemplate.from_messages([
("system", """You are an intelligent form-filling assistant.
Your goal is to infer the correct value for a specific form field based on a User Profile.

CONTEXT:
- **Field Label:** "{field_label}" (Internal Name: {field_name})
- **Form Context:** {form_purpose}
- **User Profile:** {profile}

INSTRUCTIONS:
1. **Analyze the Field:** - If the field is "Position", "Role", or "Title", interpret it as **Job Title**.
- If the field is "Company" or "Organization", look for the user's **Employer**.
- If the field is "Address", look for the user's **Home Address**.

2. **Search Profile:** Look for direct matches or infer logical answers (e.g., infer State from City).

3. **Output:** Return a JSON object with a list of 1-3 suggestions and your reasoning.

FORMAT:
{{
"suggestions": ["String Value 1", "String Value 2"],
"reasoning": "Brief explanation of why this fits the profile"
}}
"""),
])

parser = JsonOutputParser(pydantic_object=SuggestionResponse)
chain = prompt | gemini.llm | parser

try:
start_time = datetime.now()

# Execute the prompt
result = await chain.ainvoke({
"profile": profile_text,
"form_purpose": form_purpose,
"field_label": field_label,
"field_name": field_name
})

duration = (datetime.now() - start_time).total_seconds()
logger.info(f"🤖 [Lifecycle] LLM Response ({duration:.2f}s): {json.dumps(result)}")

if result and result.get("suggestions"):
suggestions = []
for val in result["suggestions"]:
suggestions.append(IntelligentSuggestion(
value=val,
confidence=0.85,
tier=SuggestionTier.PROFILE_BASED,
reasoning=result.get("reasoning", "Inferred from profile"),
behavioral_match="llm_inference"
))
return suggestions
else:
logger.warning(f"⚠️ [Lifecycle] LLM returned valid JSON but empty suggestions.")

except Exception as e:
logger.error(f"❌ [Lifecycle] LLM Invocation Exception: {str(e)}")
logger.info("suggestion is", suggestions)
return None

return None

def _tier2_blended(
self,
Expand All @@ -103,55 +216,20 @@ def _tier2_blended(
previous_answers: Dict[str, str]
) -> List[IntelligentSuggestion]:
"""Tier 2: Blended patterns + profile context."""
suggestions = self._tier3_pattern_only(field_context, previous_answers)
for s in suggestions:
s.tier = SuggestionTier.BLENDED
s.reasoning = "Based on common patterns and your preferences"
return suggestions
# Since we disabled Tier 3 fallback, Tier 2 essentially becomes empty or needs its own logic.
# For this request, we will return empty to be safe.
logger.info("🎨 [Lifecycle] Tier 2 requested but disabled in strict mode.")
return []

def _tier3_pattern_only(
self,
field_context: Dict[str, Any],
previous_answers: Dict[str, str]
) -> List[IntelligentSuggestion]:
"""Tier 3: Pattern-only suggestions for new users."""
field_name = field_context.get("name", "").lower()
field_type = field_context.get("type", "text").lower()
suggestions = []

# Generate pattern-based suggestions
if "email" in field_name or field_type == "email":
suggestions = [
IntelligentSuggestion(
value="",
confidence=0.5,
tier=SuggestionTier.PATTERN_ONLY,
reasoning="Enter your email address",
behavioral_match="common_pattern"
)
]
elif "name" in field_name:
suggestions = [
IntelligentSuggestion(
value="",
confidence=0.5,
tier=SuggestionTier.PATTERN_ONLY,
reasoning="Enter your full name",
behavioral_match="common_pattern"
)
]
elif "phone" in field_name or field_type == "tel":
suggestions = [
IntelligentSuggestion(
value="",
confidence=0.5,
tier=SuggestionTier.PATTERN_ONLY,
reasoning="Enter your phone number",
behavioral_match="common_pattern"
)
]

return suggestions
"""Tier 3: Intelligent Pattern-only suggestions."""
# DISABLED as per request
logger.info("🧩 [Lifecycle] Tier 3 requested but DISABLED.")
return []


# Singleton instance
Expand All @@ -175,15 +253,6 @@ async def get_intelligent_suggestions(
) -> List[IntelligentSuggestion]:
"""
Convenience function to get intelligent suggestions.

Usage:
suggestions = await get_intelligent_suggestions(
user_id=123,
field_context={"name": "email", "type": "email"},
form_context={"purpose": "Registration"},
previous_answers={},
db=session
)
"""
engine = get_profile_suggestion_engine()
return await engine.get_suggestions(
Expand All @@ -192,4 +261,4 @@ async def get_intelligent_suggestions(
form_context=form_context,
previous_answers=previous_answers,
db=db
)
)
Loading