@@ -3049,6 +3049,10 @@ def _process_mtmd_prompt(
30493049 self ,
30503050 llama : llama_core .Llama ,
30513051 messages : List [llama_types .ChatCompletionRequestMessage ],
3052+ functions : Optional [List [llama_types .ChatCompletionFunction ]] = None ,
3053+ function_call : Optional [llama_types .ChatCompletionRequestFunctionCall ] = None ,
3054+ tools : Optional [List [llama_types .ChatCompletionTool ]] = None ,
3055+ tool_choice : Optional [llama_types .ChatCompletionToolChoiceOption ] = None ,
30523056 ) -> Tuple [List [int ], List [tuple ], Any , List [Any ]]:
30533057 """
30543058 Core multimodal preprocessing pipeline.
@@ -3079,6 +3083,10 @@ def _process_mtmd_prompt(
30793083 add_generation_prompt = True ,
30803084 eos_token = self .mtmd_eos_token ,
30813085 bos_token = self .mtmd_bos_token ,
3086+ functions = functions ,
3087+ function_call = function_call ,
3088+ tools = tools ,
3089+ tool_choice = tool_choice ,
30823090 ** getattr (self , 'extra_template_arguments' , {})
30833091 )
30843092 # Replace image_url by media_marker in text
@@ -3263,7 +3271,14 @@ def __call__(
32633271 assert self .mtmd_ctx is not None
32643272
32653273 # 2. Concurrent Preprocessing & Ledger Construction
3266- full_prompt_ids , chunk_token_spans , chunks , bitmap_cleanup = self ._process_mtmd_prompt (llama , messages )
3274+ full_prompt_ids , chunk_token_spans , chunks , bitmap_cleanup = self ._process_mtmd_prompt (
3275+ llama = llama ,
3276+ messages = messages ,
3277+ functions = functions ,
3278+ function_call = function_call ,
3279+ tools = tools ,
3280+ tool_choice = tool_choice
3281+ )
32673282
32683283 if self .verbose :
32693284 print (f"{ self .log_prefix } (__call__): Prepared virtual token ledger of length { len (full_prompt_ids )} ." , file = sys .stderr )
0 commit comments