Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion src/google/adk/flows/llm_flows/audio_cache_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,6 @@ async def _flush_cache_to_services(
Returns:
True if the cache was successfully flushed, False otherwise.
"""
print('flush cache')
if not invocation_context.artifact_service or not audio_cache:
logger.debug('Skipping cache flush: no artifact service or empty cache')
return False
Expand Down
15 changes: 13 additions & 2 deletions src/google/adk/models/gemini_llm_connection.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,8 +164,14 @@ async def receive(self) -> AsyncGenerator[LlmResponse, None]:
message.server_content.input_transcription
and message.server_content.input_transcription.text
):
user_text = message.server_content.input_transcription.text
parts = [
types.Part.from_text(
text=user_text,
)
]
llm_response = LlmResponse(
input_transcription=message.server_content.input_transcription,
content=types.Content(role='user', parts=parts)
)
yield llm_response
if (
Expand All @@ -180,8 +186,13 @@ async def receive(self) -> AsyncGenerator[LlmResponse, None]:
# We rely on other control signals to determine when to yield the
# full text response(turn_complete, interrupted, or tool_call).
text += message.server_content.output_transcription.text
parts = [
types.Part.from_text(
text=message.server_content.output_transcription.text
)
]
llm_response = LlmResponse(
output_transcription=message.server_content.output_transcription
content=types.Content(role='model', parts=parts), partial=True
)
yield llm_response

Expand Down
Loading