@@ -6,7 +6,7 @@ import { renderNudge } from "../prompts"
66import {
77 extractParameterKey ,
88 buildToolIdList ,
9- createSyntheticUserMessage ,
9+ createSyntheticTextPart ,
1010 createSyntheticToolPart ,
1111 isIgnoredUserMessage ,
1212} from "./utils"
@@ -182,18 +182,22 @@ export const insertPruneToolContext = (
182182 }
183183
184184 const userInfo = lastUserMessage . info as UserMessage
185- const variant = state . variant ?? userInfo . variant
186185
187186 const lastNonIgnoredMessage = messages . findLast (
188187 ( msg ) => ! ( msg . info . role === "user" && isIgnoredUserMessage ( msg ) ) ,
189188 )
190189
190+ if ( ! lastNonIgnoredMessage ) {
191+ return
192+ }
193+
191194 // It's not safe to inject assistant role messages following a user message as models such
192195 // as Claude expect the assistant "turn" to start with reasoning parts. Reasoning parts in many
193196 // cases also cannot be faked as they may be encrypted by the model.
194197 // Gemini only accepts synth reasoning text if it is "skip_thought_signature_validator"
195- if ( ! lastNonIgnoredMessage || lastNonIgnoredMessage . info . role === "user" ) {
196- messages . push ( createSyntheticUserMessage ( lastUserMessage , combinedContent , variant ) )
198+ if ( lastNonIgnoredMessage . info . role === "user" ) {
199+ const textPart = createSyntheticTextPart ( lastNonIgnoredMessage , combinedContent )
200+ lastNonIgnoredMessage . parts . push ( textPart )
197201 } else {
198202 // Append tool part to existing assistant message. This approach works universally across
199203 // models including DeepSeek and Kimi which don't output reasoning parts following an
0 commit comments