@@ -176,6 +176,36 @@ new CompletionAdapterOpenAIChatGPT({
176176```
177177You can specify any gpt model you need. Default is ` gpt-5-nano `
178178
179+ This adapter uses the OpenAI ` responses ` API and supports:
180+
181+ - regular text completion
182+ - ` json_schema ` structured output
183+ - reasoning effort control
184+ - streaming output chunks
185+ - streaming reasoning chunks
186+
187+ ### OpenAI adapter ` complete() ` signature
188+
189+ ``` ts
190+ complete (
191+ content : string ,
192+ maxTokens ?: number ,
193+ outputSchema ?: any ,
194+ reasoningEffort ?: " none" | " minimal" | " low" | " medium" | " high" | " xhigh" ,
195+ onChunk ?: (
196+ chunk : string ,
197+ event ?: {
198+ type: " output" | " reasoning" ;
199+ delta : string ;
200+ text : string ;
201+ source ?: " summary" | " text" ;
202+ },
203+ ) => void | Promise < void > ,
204+ )
205+ ```
206+
207+ ` reasoningEffort ` is optional and defaults to ` low ` in the adapter implementation.
208+
179209---
180210
181211### Google Gemini Completion Adapter
@@ -225,7 +255,7 @@ new CompletionAdapterGoogleGemini({
225255
226256### Using json_schema with adapter
227257
228- If you want use custom json schema for completion response - you can use outputSchema param for completion :
258+ If you want to use a custom ` json_schema ` for completion response, pass it as ` outputSchema ` :
229259
230260
231261``` ts
@@ -237,27 +267,79 @@ const openAi = new CompletionAdapterOpenAIChatGPT({
237267const prompt = ' What is the capital of France? return json' ;
238268
239269openAi .complete (
240- prompt ,
241- [],
242- 200 ,
243- {
244- json_schema: {
245- name: " capital_response" ,
246- schema: {
247- type: " object" ,
248- properties: {
249- capital: { type: " string" },
250- },
251- required: [" capital" ],
252- },
270+ prompt ,
271+ 200 ,
272+ {
273+ name: " capital_response" ,
274+ schema: {
275+ type: " object" ,
276+ properties: {
277+ capital: { type: " string" },
253278 },
279+ required: [" capital" ],
254280 },
255- ).then ((resp ) => {
256- console .log (resp );
281+ },
282+ ).then ((resp ) => {
283+ console .log (resp );
284+ });
285+
286+ ```
287+
288+ ### Using reasoning effort
289+
290+ If you want to explicitly test a reasoning-capable GPT-5 model, pass ` reasoningEffort ` as the 4th argument:
291+
292+ ``` ts
293+ const openAi = new CompletionAdapterOpenAIChatGPT ({
294+ openAiApiKey: process .env .OPENAI_API_KEY as string ,
295+ model: " gpt-5-mini" ,
296+ });
297+
298+ const resp = await openAi .complete (
299+ " Explain why the sky looks blue in 3 short paragraphs" ,
300+ 300 ,
301+ undefined ,
302+ " medium" ,
303+ );
304+
305+ console .log (resp .content );
306+ ```
307+
308+ ### Using streaming output and reasoning events
309+
310+ If you pass ` onChunk ` , adapter switches to streaming mode automatically:
311+
312+ ``` ts
313+ const openAi = new CompletionAdapterOpenAIChatGPT ({
314+ openAiApiKey: process .env .OPENAI_API_KEY as string ,
315+ model: " gpt-5-mini" ,
257316});
258317
318+ await openAi .complete (
319+ " Think step by step and write a short answer about how ABS brakes work" ,
320+ 300 ,
321+ undefined ,
322+ " medium" ,
323+ async (chunk , event ) => {
324+ if (! event ) return ;
325+
326+ if (event .type === " reasoning" ) {
327+ console .log (" Reasoning chunk:" , event .delta );
328+ return ;
329+ }
330+
331+ console .log (" Output chunk:" , event .delta );
332+ },
333+ );
259334```
260335
336+ Note:
337+
338+ - ` event.type === "reasoning" ` means a reasoning chunk
339+ - ` event.type === "output" ` means visible output text chunk
340+ - ` event.text ` contains accumulated text for that stream type
341+ - reasoning events depend on the selected OpenAI model and provider response
342+
261343Then output will be like:
262344```
263345{ content: '{"capital":"Paris"}', finishReason: 'stop' }
@@ -359,4 +441,3 @@ pnpm i @adminforth/login-captcha-adapter-cloudflare
359441```
360442pnpm i @adminforth/login-captcha-adapter-recaptcha
361443```
362-
0 commit comments