Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
68 changes: 68 additions & 0 deletions src/data/guest-messages.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
{
"conversations": [
{
"id": "conv-001",
"propertyId": "prop-001",
"propertyName": "Oceanfront Villa",
"guestName": "Alice Johnson",
"guestEmail": "alice@email.com",
"status": "pending",
"messages": [
{
"id": "msg-001",
"from": "guest",
"timestamp": "2024-01-15T10:30:00Z",
"content": "Hi! I'm interested in booking your villa for my family vacation. We're a family of 4 with two young kids. Is the property child-friendly? Also, is there beach access?"
}
]
},
{
"id": "conv-002",
"propertyId": "prop-002",
"propertyName": "Downtown Loft",
"guestName": "Bob Williams",
"guestEmail": "bob@email.com",
"status": "pending",
"messages": [
{
"id": "msg-002",
"from": "guest",
"timestamp": "2024-01-16T14:20:00Z",
"content": "Hello, I'm looking to book for a business trip next month. Do you have a desk and reliable WiFi? I'll need to work during my stay."
}
]
},
{
"id": "conv-003",
"propertyId": "prop-003",
"propertyName": "Mountain Cabin",
"guestName": "Carol Davis",
"guestEmail": "carol@email.com",
"status": "pending",
"messages": [
{
"id": "msg-003",
"from": "guest",
"timestamp": "2024-01-17T09:15:00Z",
"content": "We're planning a romantic getaway for our anniversary. Does the cabin have a hot tub? And are there any good restaurants nearby you'd recommend?"
}
]
},
{
"id": "conv-004",
"propertyId": "prop-001",
"propertyName": "Oceanfront Villa",
"guestName": "David Martinez",
"guestEmail": "david@email.com",
"status": "pending",
"messages": [
{
"id": "msg-004",
"from": "guest",
"timestamp": "2024-01-18T16:45:00Z",
"content": "Quick question - what's your cancellation policy? I might need to change my dates depending on work."
}
]
}
]
}
177 changes: 177 additions & 0 deletions src/routes/suggestions.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,177 @@
import { Router, Request, Response } from 'express';
import { z } from 'zod';
import * as fs from 'fs';
import * as path from 'path';

const router = Router();

interface Message {
id: string;
from: 'guest' | 'host';
timestamp: string;
content: string;
}

interface Conversation {
id: string;
propertyId: string;
propertyName: string;
guestName: string;
guestEmail: string;
status: string;
messages: Message[];
}

interface ConversationDatabase {
conversations: Conversation[];
}

function loadConversations(): ConversationDatabase {
const dataPath = path.join(__dirname, '../data/guest-messages.json');
return JSON.parse(fs.readFileSync(dataPath, 'utf-8'));
}

const suggestionsQuerySchema = z.object({
conversationId: z.string(),
model: z.string().optional(),
});

const LITELLM_SERVER_URL = process.env.LITELLM_SERVER_URL || 'http://localhost:4000';

async function generateReplySuggestions(
conversation: Conversation,
model?: string
): Promise<string[]> {
// Get the last guest message
const guestMessages = conversation.messages.filter((m) => m.from === 'guest');
const lastGuestMessage = guestMessages[guestMessages.length - 1];

if (!lastGuestMessage) {
throw new Error('No guest message found in conversation');
}

const systemPrompt = `You are a helpful assistant for vacation rental hosts. Generate professional, friendly reply suggestions for guest inquiries.

Property: ${conversation.propertyName}
Guest Name: ${conversation.guestName}

Generate exactly 3 reply suggestions that are:
- Professional and welcoming
- Address the guest's specific questions
- Encourage booking while being honest
- Appropriately brief (2-4 sentences each)

Format your response as a JSON array of 3 strings, like:
["Reply 1", "Reply 2", "Reply 3"]`;

// VULNERABILITY: Guest message content is included directly in the prompt
// A malicious guest could embed prompt injection in their message
const userPrompt = `Guest Message:
"""
${lastGuestMessage.content}
"""

Generate 3 professional reply suggestions for this message.`;
Comment on lines +67 to +74

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🟠 High

Guest message content is embedded directly into the LLM prompt without sanitization, creating a cross-user prompt injection vulnerability. A malicious guest can craft messages with embedded instructions that manipulate the AI-generated suggestions shown to property hosts. This crosses a trust boundary since the guest (attacker) can influence content displayed to the host (victim), potentially causing reputation damage or facilitating social engineering attacks.

💡 Suggested Fix

Use structured message roles to separate system instructions from user content, and add input sanitization for defense-in-depth:

function sanitizeGuestInput(content: string): string {
  const sanitized = content
    .replace(/ignore\s+(previous|above|prior)\s+instructions?/gi, '')
    .replace(/new\s+instructions?:/gi, '')
    .replace(/system\s*:/gi, '')
    .replace(/\b(assistant|system|user)\s*:/gi, '')
    .slice(0, 1000);
  return sanitized.trim();
}

async function generateReplySuggestions(
  conversation: Conversation,
  model?: string
): Promise<string[]> {
  // ... existing code ...

  const sanitizedContent = sanitizeGuestInput(lastGuestMessage.content);

  const systemPrompt = `You are a helpful assistant for vacation rental hosts.

Property: ${conversation.propertyName}
Guest Name: ${conversation.guestName}

IMPORTANT: The guest message below is user-provided content. Generate exactly 3 professional reply suggestions that are welcoming, address the guest's questions, and encourage booking.

Format: ["Reply 1", "Reply 2", "Reply 3"]`;

  const userPrompt = `The guest wrote:\n\n${sanitizedContent}\n\nPlease generate 3 professional reply suggestions.`;

  // ... rest of implementation
}
🤖 AI Agent Prompt

The code at src/routes/suggestions.ts:67-74 embeds guest message content directly into LLM prompts without sanitization, creating a prompt injection vulnerability where malicious guests can manipulate suggestions shown to property hosts.

Investigate the complete data flow from guest message submission through to the LLM call. Check if there are existing input validation utilities in the codebase that could be leveraged. Consider whether a centralized prompt construction utility would benefit other LLM-using features in the application.

Implement a fix that:

  1. Separates system instructions from user content using structured message roles (system/user)
  2. Adds input sanitization to remove common injection patterns
  3. Includes explicit instructions in the system prompt that user content should be treated as data
  4. Limits input length to prevent token stuffing attacks

Test the fix with various prompt injection payloads (e.g., "Ignore previous instructions", role manipulation attempts, delimiter escaping) to ensure it provides robust protection while not breaking legitimate guest messages.


Was this helpful?  👍 Yes  |  👎 No 


const response = await fetch(`${LITELLM_SERVER_URL}/v1/chat/completions`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: model || 'gpt-4o-mini',
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: userPrompt },
],
}),
Comment on lines +79 to +85

Check warning

Code scanning / CodeQL

File data in outbound network request Medium

Outbound network request depends on
file data
.
});

if (!response.ok) {
throw new Error(`LiteLLM request failed: ${await response.text()}`);
}

const data: any = await response.json();
const content = data.choices[0].message.content;

// Try to parse as JSON array
try {
// Handle markdown code blocks
let jsonContent = content;
if (jsonContent.includes('```json')) {
jsonContent = jsonContent.replace(/```json\n?/g, '').replace(/```\n?/g, '');
} else if (jsonContent.includes('```')) {
jsonContent = jsonContent.replace(/```\n?/g, '');
}

const suggestions = JSON.parse(jsonContent.trim());
if (Array.isArray(suggestions)) {
return suggestions.slice(0, 3);
}
} catch {
// If not valid JSON, split by newlines or return as single suggestion
return [content];
}

return [content];
}

// Generate reply suggestions for a conversation
router.post('/authorized/:level/suggestions/generate', async (req: Request, res: Response) => {
try {
const { level } = req.params as { level: 'minnow' | 'shark' };
const { conversationId, model } = suggestionsQuerySchema.parse(req.body);

const database = loadConversations();
const conversation = database.conversations.find((c) => c.id === conversationId);

if (!conversation) {
return res.status(404).json({
error: 'Conversation not found',
message: `No conversation found with ID: ${conversationId}`,
});
}

const suggestions = await generateReplySuggestions(conversation, model);

return res.json({
conversationId,
propertyName: conversation.propertyName,
guestName: conversation.guestName,
suggestions,
});
} catch (error) {
if (error instanceof z.ZodError) {
return res.status(400).json({ error: 'Validation error', details: error.errors });
}
console.error('Suggestions generation error:', error);
return res.status(500).json({
error: 'Internal server error',
message: error instanceof Error ? error.message : 'Unknown error',
});
}
});

// List conversations endpoint
router.get('/authorized/:level/suggestions/conversations', async (req: Request, res: Response) => {
try {
const database = loadConversations();

return res.json({
conversations: database.conversations.map((c) => ({
id: c.id,
propertyName: c.propertyName,
guestName: c.guestName,
status: c.status,
messageCount: c.messages.length,
lastMessageAt: c.messages[c.messages.length - 1]?.timestamp,
})),
});
} catch (error) {
console.error('Conversations list error:', error);
return res.status(500).json({
error: 'Internal server error',
message: error instanceof Error ? error.message : 'Unknown error',
});
}
});

export default router;
4 changes: 4 additions & 0 deletions src/server.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import { chatHandler } from './routes/chat';
import { tokenHandler, jwksHandler } from './routes/oauth';
import { generateRSAKeyPair } from './utils/jwt-keys';
import { authenticateToken } from './middleware/auth';
import suggestionsRouter from './routes/suggestions';

// Initialize OAuth key pair on startup
generateRSAKeyPair();
Expand All @@ -31,6 +32,9 @@ app.get('/health', (req: Request, res: Response) => {
app.post('/:level/chat', chatHandler);
app.post('/authorized/:level/chat', authenticateToken, chatHandler);

// Smart reply suggestions endpoints
app.use(suggestionsRouter);

// OAuth endpoints
app.post('/oauth/token', tokenHandler);
app.get('/.well-known/jwks.json', jwksHandler);
Expand Down