Skip to content

Commit c6f389d

Browse files
GWealecopybara-github
authored andcommitted
fix: Refine Ollama content flattening and provider checks
- Stripping whitespace from custom LLM provider and model names when checking for "ollama_chat". - Enhancing `_flatten_ollama_content` to correctly handle content that is None, a string, a dictionary, or an iterable (like a tuple) of content blocks, not just lists. This aligns with LiteLLM's `OpenAIMessageContent` type being an `Iterable`. Close #3928 Co-authored-by: George Weale <gweale@google.com> PiperOrigin-RevId: 845848017
1 parent 1add41e commit c6f389d

File tree

2 files changed

+33
-6
lines changed

2 files changed

+33
-6
lines changed

src/google/adk/models/lite_llm.py

Lines changed: 22 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -628,9 +628,12 @@ def _is_ollama_chat_provider(
628628
model: Optional[str], custom_llm_provider: Optional[str]
629629
) -> bool:
630630
"""Returns True when requests should be normalized for ollama_chat."""
631-
if custom_llm_provider and custom_llm_provider.lower() == "ollama_chat":
631+
if (
632+
custom_llm_provider
633+
and custom_llm_provider.strip().lower() == "ollama_chat"
634+
):
632635
return True
633-
if model and model.lower().startswith("ollama_chat"):
636+
if model and model.strip().lower().startswith("ollama_chat"):
634637
return True
635638
return False
636639

@@ -644,11 +647,24 @@ def _flatten_ollama_content(
644647
join them with newlines, and fall back to a JSON string for non-text content.
645648
If both text and non-text parts are present, only the text parts are kept.
646649
"""
647-
if not isinstance(content, list):
650+
if content is None or isinstance(content, str):
648651
return content
649652

653+
# `OpenAIMessageContent` is typed as `Iterable[...]` in LiteLLM. Some
654+
# providers or LiteLLM versions may hand back tuples or other iterables.
655+
if isinstance(content, dict):
656+
try:
657+
return json.dumps(content)
658+
except TypeError:
659+
return str(content)
660+
661+
try:
662+
blocks = list(content)
663+
except TypeError:
664+
return str(content)
665+
650666
text_parts = []
651-
for block in content:
667+
for block in blocks:
652668
if isinstance(block, dict) and block.get("type") == "text":
653669
text_value = block.get("text")
654670
if text_value:
@@ -658,9 +674,9 @@ def _flatten_ollama_content(
658674
return _NEW_LINE.join(text_parts)
659675

660676
try:
661-
return json.dumps(content)
677+
return json.dumps(blocks)
662678
except TypeError:
663-
return str(content)
679+
return str(blocks)
664680

665681

666682
def _normalize_ollama_chat_messages(

tests/unittests/models/test_litellm.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1549,6 +1549,17 @@ async def test_generate_content_async_custom_provider_flattens_content(
15491549
assert "Describe this image." in message_content
15501550

15511551

1552+
def test_flatten_ollama_content_accepts_tuple_blocks():
1553+
from google.adk.models.lite_llm import _flatten_ollama_content
1554+
1555+
content = (
1556+
{"type": "text", "text": "first"},
1557+
{"type": "text", "text": "second"},
1558+
)
1559+
flattened = _flatten_ollama_content(content)
1560+
assert flattened == "first\nsecond"
1561+
1562+
15521563
@pytest.mark.asyncio
15531564
async def test_content_to_message_param_user_message():
15541565
content = types.Content(

0 commit comments

Comments
 (0)