Skip to content

Commit 79ff661

Browse files
committed
Update to go to openai with gpt-5.1 model, and n param. Send n param in openai body
1 parent 54ff8d9 commit 79ff661

File tree

2 files changed

+11
-4
lines changed

2 files changed

+11
-4
lines changed

web/src/app/api/v1/chat/completions/_post.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -250,7 +250,8 @@ export async function postChatCompletions(params: {
250250
OPENAI_SUPPORTED_MODELS.includes(shortModelName as any)
251251
// Only use OpenAI endpoint for OpenAI models with n parameter
252252
// All other models (including non-OpenAI with n parameter) should use OpenRouter
253-
const shouldUseOpenAIEndpoint = isOpenAIDirectModel && (body as any)?.n
253+
const shouldUseOpenAIEndpoint =
254+
isOpenAIDirectModel && (body as any)?.codebuff_metadata?.n
254255

255256
const result = await (shouldUseOpenAIEndpoint
256257
? handleOpenAINonStream({

web/src/llm-api/openai.ts

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,17 +10,20 @@ import type { UsageData } from './helpers'
1010
import type { InsertMessageBigqueryFn } from '@codebuff/common/types/contracts/bigquery'
1111
import type { Logger } from '@codebuff/common/types/contracts/logger'
1212

13-
export const OPENAI_SUPPORTED_MODELS = ['gpt-5'] as const
13+
export const OPENAI_SUPPORTED_MODELS = ['gpt-5', 'gpt-5.1'] as const
1414
export type OpenAIModel = (typeof OPENAI_SUPPORTED_MODELS)[number]
1515

1616
const INPUT_TOKEN_COSTS: Record<OpenAIModel, number> = {
1717
'gpt-5': 1.25,
18+
'gpt-5.1': 1.25,
1819
} as const
1920
const CACHED_INPUT_TOKEN_COSTS: Record<OpenAIModel, number> = {
2021
'gpt-5': 0.125,
22+
'gpt-5.1': 0.125,
2123
} as const
2224
const OUTPUT_TOKEN_COSTS: Record<OpenAIModel, number> = {
2325
'gpt-5': 10,
26+
'gpt-5.1': 10,
2427
} as const
2528

2629
type OpenAIUsage = {
@@ -75,7 +78,10 @@ export async function handleOpenAINonStream({
7578
insertMessageBigquery: InsertMessageBigqueryFn
7679
}) {
7780
const startTime = new Date()
78-
const { clientId, clientRequestId } = extractRequestMetadata({ body, logger })
81+
const { clientId, clientRequestId, n } = extractRequestMetadata({
82+
body,
83+
logger,
84+
})
7985

8086
const { model } = body
8187
const modelShortName =
@@ -94,6 +100,7 @@ export async function handleOpenAINonStream({
94100
...body,
95101
model: modelShortName,
96102
stream: false,
103+
...(n && { n }),
97104
}
98105

99106
// Transform max_tokens to max_completion_tokens
@@ -189,4 +196,3 @@ export async function handleOpenAINonStream({
189196

190197
return data
191198
}
192-

0 commit comments

Comments
 (0)