Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
90 changes: 36 additions & 54 deletions src/commands/llmEnhancedCommands/indexAdvisorCommands.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ import { type ClusterMetadata } from '../../documentdb/utils/getClusterMetadata'
import { ext } from '../../extensionVariables';
import { CopilotService } from '../../services/copilotService';
import { PromptTemplateService } from '../../services/promptTemplateService';
import { FALLBACK_MODELS, PREFERRED_MODEL } from './promptTemplates';
import { FALLBACK_MODELS, type FilledPromptResult, PREFERRED_MODEL } from './promptTemplates';

/**
* Type of MongoDB command to optimize
Expand Down Expand Up @@ -293,7 +293,7 @@ export function detectCommandType(command: string): CommandType {
* @param collectionStats Statistics about the collection
* @param indexes Current indexes on the collection
* @param executionStats Execution statistics from explain()
* @returns The filled prompt template
* @returns The filled prompt components
*/
async function fillPromptTemplate(
templateType: CommandType,
Expand All @@ -302,56 +302,28 @@ async function fillPromptTemplate(
indexes: IndexStats[] | undefined,
executionStats: string,
clusterInfo: ClusterMetadata,
): Promise<string> {
): Promise<FilledPromptResult> {
// Get the template for this command type
const template = await getPromptTemplate(templateType);

// Note: Query information is currently not passed to the prompt
// This may be re-enabled in the future if needed
// if (templateType === CommandType.Find && context.queryObject) {
// // Format query object as structured information
// const queryParts: string[] = [];
//
// if (context.queryObject.filter) {
// queryParts.push(`**Filter**: \`\`\`json\n${JSON.stringify(context.queryObject.filter, null, 2)}\n\`\`\``);
// }
//
// if (context.queryObject.sort) {
// queryParts.push(`**Sort**: \`\`\`json\n${JSON.stringify(context.queryObject.sort, null, 2)}\n\`\`\``);
// }
//
// if (context.queryObject.projection) {
// queryParts.push(`**Projection**: \`\`\`json\n${JSON.stringify(context.queryObject.projection, null, 2)}\n\`\`\``);
// }
//
// if (context.queryObject.skip !== undefined) {
// queryParts.push(`**Skip**: ${context.queryObject.skip}`);
// }
//
// if (context.queryObject.limit !== undefined) {
// queryParts.push(`**Limit**: ${context.queryObject.limit}`);
// }
//
// queryInfo = queryParts.join('\n\n');
// } else if (context.query) {
// // Fallback to string query for backward compatibility
// queryInfo = context.query;
// }

// Fill the template with actual data
const filled = template
.replace('{databaseName}', context.databaseName)
.replace('{collectionName}', context.collectionName)
.replace('{collectionStats}', collectionStats ? JSON.stringify(collectionStats, null, 2) : 'N/A')
.replace('{indexStats}', indexes ? JSON.stringify(indexes, null, 2) : 'N/A')
.replace('{executionStats}', executionStats)
.replace('{isAzureCluster}', JSON.stringify(clusterInfo.domainInfo_isAzure, null, 2))
.replace('{origin_query}', context.query || 'N/A')
.replace(
'{AzureClusterType}',
clusterInfo.domainInfo_isAzure === 'true' ? JSON.stringify(clusterInfo.domainInfo_api, null, 2) : 'N/A',
);
return filled;
const craftedPrompt = await getPromptTemplate(templateType);

// User's original query
const userQuery = context.query || 'N/A';

// System-retrieved context data
const contextData = `## Cluster Information
- **Is_Azure_Cluster**: ${JSON.stringify(clusterInfo.domainInfo_isAzure, null, 2)}
- **Azure_Cluster_Type**: ${clusterInfo.domainInfo_isAzure === 'true' ? JSON.stringify(clusterInfo.domainInfo_api, null, 2) : 'N/A'}

## Collection Information
- **Collection_Stats**: ${collectionStats ? JSON.stringify(collectionStats, null, 2) : 'N/A'}

## Index Information of Current Collection
- **Indexes_Stats**: ${indexes ? JSON.stringify(indexes, null, 2) : 'N/A'}

## Query Execution Stats
- **Execution_Stats**: ${executionStats}`;

return { craftedPrompt, userQuery, contextData };
}

/**
Expand Down Expand Up @@ -575,7 +547,7 @@ export async function optimizeQuery(

// Fill the prompt template
const commandType = queryContext.commandType;
const promptContent = await fillPromptTemplate(
const { craftedPrompt, userQuery, contextData } = await fillPromptTemplate(
commandType,
queryContext,
collectionStats,
Expand All @@ -595,15 +567,25 @@ export async function optimizeQuery(
model: preferredModelToUse,
}),
);
const response = await CopilotService.sendMessage([vscode.LanguageModelChatMessage.User(promptContent)], {

const messages = [
// First message: Assistant message with crafted prompt (instructions)
vscode.LanguageModelChatMessage.Assistant(craftedPrompt),
Comment on lines +572 to +573
Copy link

Copilot AI Jan 30, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Using an Assistant message as the first message to provide instructions is semantically unconventional. Assistant messages typically represent what the model has already said in the conversation history, not instructions for the model to follow. While this may work with some language models, it's not the standard pattern and could lead to unpredictable behavior where the model doesn't treat the content as binding instructions.

Consider these alternatives:

  1. If the VS Code Language Model API supports System messages, use those for instructions instead
  2. Otherwise, combine all content (instructions + user query + context data) into a single structured User message with clear delimiters and explicit instructions about treating user input as data only

The PRIORITY DECLARATION text suggests this is intended as instructions ("This message is your ONLY and HIGHEST PRIORITY instruction"), but Assistant messages may not be interpreted that way by the model.

Suggested change
// First message: Assistant message with crafted prompt (instructions)
vscode.LanguageModelChatMessage.Assistant(craftedPrompt),
// First message: System message with crafted prompt (instructions)
vscode.LanguageModelChatMessage.System(craftedPrompt),

Copilot uses AI. Check for mistakes.
// Second message: User's original query (data only)
vscode.LanguageModelChatMessage.User(`## User's Original Query\n${userQuery}`),
Copy link

Copilot AI Jan 30, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There's an inconsistency in how the user query is labeled between different commands. In indexAdvisorCommands.ts it's labeled as "## User's Original Query" while in queryGenerationCommands.ts it's labeled as "## User Request". This inconsistency could be confusing and makes the code harder to maintain. Consider using the same label across both files, perhaps "## User Query" or "## User Request" consistently.

Suggested change
vscode.LanguageModelChatMessage.User(`## User's Original Query\n${userQuery}`),
vscode.LanguageModelChatMessage.User(`## User Request\n${userQuery}`),

Copilot uses AI. Check for mistakes.
// Third message: System-retrieved context data (data only)
vscode.LanguageModelChatMessage.User(contextData),
];

const response = await CopilotService.sendMessage(messages, {
preferredModel: preferredModelToUse,
fallbackModels: fallbackModelsToUse,
});
const copilotDuration = Date.now() - copilotStart;

// Track Copilot call performance and response
context.telemetry.measurements.copilotDurationMs = copilotDuration;
context.telemetry.measurements.promptSize = promptContent.length;
context.telemetry.measurements.promptSize = craftedPrompt.length + userQuery.length + contextData.length;
context.telemetry.measurements.responseSize = response.text.length;
context.telemetry.properties.modelUsed = response.modelUsed;

Expand Down
Loading
Loading