Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions langfun/core/data/conversion/gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,11 @@ def from_value(self, value: dict[str, Any]) -> lf.Message:
)
elif 'functionCall' in part or 'functionResponse' in part:
pass
elif 'executableCode' in part or 'codeExecutionResult' in part:
# Silently skip code execution parts. The model uses code execution
# as an internal reasoning tool; we don't surface intermediate code
# or execution output in the final AIMessage.
pass
else:
raise ValueError(f'Unsupported content part: {part!r}.')
message = message_cls.from_chunks(chunks)
Expand Down
18 changes: 13 additions & 5 deletions langfun/core/llms/gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -891,6 +891,11 @@ class Gemini(rest.REST):
+ 'are more than plain text.',
] = None

enable_code_execution: pg.typing.Annotated[
bool,
'Whether to enable code execution tool for the model.',
] = False

@functools.cached_property
def model_info(self) -> GeminiModelInfo:
return _SUPPORTED_MODELS_BY_ID[self.model]
Expand Down Expand Up @@ -932,11 +937,14 @@ def modality_conversion(chunk: str | lf.Modality) -> Any:
prompt.as_format('gemini', chunk_preprocessor=modality_conversion)
)
request['contents'] = contents
request['toolConfig'] = {
'functionCallingConfig': {
'mode': 'NONE',
}
}
if self.enable_code_execution:
request['tools'] = [{'codeExecution': {}}]
else:
request['toolConfig'] = {
'functionCallingConfig': {
'mode': 'NONE',
}
}
if sampling_options.extras:
request.update(sampling_options.extras)
return request
Expand Down
51 changes: 51 additions & 0 deletions langfun/core/llms/vertexai_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
from google import auth
from google.auth import exceptions
import langfun.core as lf
from langfun.core.data.conversion import gemini as gemini_conversion
from langfun.core.llms import rest
from langfun.core.llms import vertexai
import pyglove as pg
Expand Down Expand Up @@ -67,6 +68,56 @@ def test_gemini_31_flash_lite_preview(self):
del os.environ['VERTEXAI_PROJECT']
del os.environ['VERTEXAI_LOCATION']

@mock.patch.object(vertexai.VertexAI, 'credentials', new=True)
def test_code_execution(self):
os.environ['VERTEXAI_PROJECT'] = 'langfun'
model = vertexai.VertexAIGemini31ProPreview(enable_code_execution=True)
self.assertTrue(model.enable_code_execution)

request = model.request(
lf.UserMessage('Calculate 2+2'),
lf.LMSamplingOptions(),
)
self.assertEqual(request['tools'], [{'codeExecution': {}}])
self.assertNotIn('toolConfig', request)

converter = gemini_conversion.GeminiMessageConverter()

# TDD: Without the handler, unrecognized parts raise ValueError.
# This proves executableCode/codeExecutionResult would crash without
# the fix, since they would hit the same else branch.
with self.assertRaises(ValueError):
converter.from_value({
'role': 'model',
'parts': [{'unknownPartType': {'data': 'foo'}}],
})

# With the fix: executableCode and codeExecutionResult are silently
# skipped, only text parts appear in the final message.
msg = converter.from_value({
'role': 'model',
'parts': [
{'text': 'Sure!'},
{'executableCode': {'code': 'print(2+2)', 'language': 'PYTHON'}},
{'codeExecutionResult': {'outcome': 'OUTCOME_OK', 'output': '4\n'}},
{'text': 'And the result is 4!'},
]
})
self.assertEqual(msg.text, 'Sure! And the result is 4!')

# Test configuration via lf.LanguageModel.get URI
uri_model = lf.LanguageModel.get(
'gemini-3.1-pro-preview?project=langfun&location=global'
'&enable_code_execution=True'
)
self.assertTrue(uri_model.enable_code_execution)
req2 = uri_model.request(
lf.UserMessage('Calculate'), lf.LMSamplingOptions()
)
self.assertEqual(req2['tools'], [{'codeExecution': {}}])

del os.environ['VERTEXAI_PROJECT']

@mock.patch.object(vertexai.VertexAI, 'credentials', new=True)
def test_multi_project_support(self):
# Test single project (backward compatibility)
Expand Down
Loading