Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
56 changes: 35 additions & 21 deletions src/google/adk/flows/llm_flows/contents.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,6 +234,30 @@ def _contains_empty_content(event: Event) -> bool:
) and (not event.output_transcription and not event.input_transcription)


def _should_include_event_in_context(
current_branch: Optional[str], event: Event
) -> bool:
"""Determines if an event should be included in the LLM context.

This filters out events that are considered empty (e.g., no text, function
calls, or transcriptions), do not belong to the current agent's branch, or
are internal events like authentication or confirmation requests.

Args:
current_branch: The current branch of the agent.
event: The event to filter.

Returns:
True if the event should be included in the context, False otherwise.
"""
return not (
_contains_empty_content(event)
or not _is_event_belongs_to_branch(current_branch, event)
or _is_auth_event(event)
or _is_request_confirmation_event(event)
)


def _process_compaction_events(events: list[Event]) -> list[Event]:
"""Processes events by applying compaction.

Expand Down Expand Up @@ -314,6 +338,7 @@ def _get_contents(
# By iterating backward, when a rewind event is found, we skip all events
# from that point back to the `rewind_before_invocation_id`, thus removing
# them from the history used for the LLM request.
has_compaction_events = False
rewind_filtered_events = []
i = len(events) - 1
while i >= 0:
Expand All @@ -326,29 +351,18 @@ def _get_contents(
break
else:
rewind_filtered_events.append(event)
if event.actions and event.actions.compaction:
has_compaction_events = True
i -= 1
rewind_filtered_events.reverse()

# Parse the events, leaving the contents and the function calls and
# responses from the current agent.
raw_filtered_events = []
has_compaction_events = False
for event in rewind_filtered_events:
if _contains_empty_content(event):
continue
if not _is_event_belongs_to_branch(current_branch, event):
# Skip events not belong to current branch.
continue
if _is_auth_event(event):
# Skip auth events.
continue
if _is_request_confirmation_event(event):
# Skip request confirmation events.
continue

if event.actions and event.actions.compaction:
has_compaction_events = True
raw_filtered_events.append(event)
raw_filtered_events = [
e
for e in rewind_filtered_events
if _should_include_event_in_context(current_branch, e)
]

if has_compaction_events:
events_to_process = _process_compaction_events(raw_filtered_events)
Expand Down Expand Up @@ -441,9 +455,9 @@ def _get_current_turn_contents(
# Find the latest event that starts the current turn and process from there
for i in range(len(events) - 1, -1, -1):
event = events[i]
if not event.content:
continue
if event.author == 'user' or _is_other_agent_reply(agent_name, event):
if _should_include_event_in_context(current_branch, event) and (
event.author == 'user' or _is_other_agent_reply(agent_name, event)
):
return _get_contents(current_branch, events[i:], agent_name)

return []
Expand Down
52 changes: 52 additions & 0 deletions tests/unittests/flows/llm_flows/test_contents.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,58 @@ async def test_include_contents_none_multi_agent_current_turn():
assert llm_request.contents[1] == types.ModelContent("Current agent in turn")


@pytest.mark.asyncio
async def test_include_contents_none_multi_branch_current_turn():
"""Test current turn detection in multi-branch scenarios with include_contents='none'."""
agent = Agent(
model="gemini-2.5-flash", name="current_agent", include_contents="none"
)
llm_request = LlmRequest(model="gemini-2.5-flash")
invocation_context = await testing_utils.create_invocation_context(
agent=agent
)
invocation_context.branch = "root.parent_agent"

# Create multi-branch conversation where current turn starts from user
# This can arise from having a Parallel Agent with two or more Sequential
# Agents as sub agents, each with two Llm Agents as sub agents
events = [
Event(
invocation_id="inv1",
branch="root",
author="user",
content=types.UserContent("First user message"),
),
Event(
invocation_id="inv1",
branch="root.parent_agent",
author="sibling_agent",
content=types.ModelContent("Sibling agent response"),
),
Event(
invocation_id="inv1",
branch="root.uncle_agent",
author="cousin_agent",
content=types.ModelContent("Cousin agent response"),
),
]
invocation_context.session.events = events

# Process the request
async for _ in contents.request_processor.run_async(
invocation_context, llm_request
):
pass

# Verify current turn starts from the most recent other agent message of the current branch
assert len(llm_request.contents) == 1
assert llm_request.contents[0].role == "user"
assert llm_request.contents[0].parts == [
types.Part(text="For context:"),
types.Part(text="[sibling_agent] said: Sibling agent response"),
]


@pytest.mark.asyncio
async def test_authentication_events_are_filtered():
"""Test that authentication function calls and responses are filtered out."""
Expand Down