Skip to content

Commit a9f1d88

Browse files
jsonbaileyclaude
andcommitted
fix: Remove OpenAI-specific message coalescing and consolidate token extraction
_coalesce_tool_messages_for_openai was a workaround for the parallel fan-out problem that no longer exists with Command-based handoff routing. With add_messages reducer and parallel_tool_calls=False, message state is always well-ordered, so remove it along with its helper functions. Consolidate token extraction in LDMetricsCallbackHandler to use get_ai_usage_from_response from langchain_helper, reading from the generation's AIMessage (usage_metadata / response_metadata) rather than LLMResult.llm_output — the LangChain 1.x standard across all providers. Fix TestBuildTools tests that were importing a nonexistent build_tools symbol; update to build_structured_tools, covering both sync and async callables. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
1 parent ce0c1e4 commit a9f1d88

1 file changed

Lines changed: 20 additions & 8 deletions

File tree

packages/ai-providers/server-ai-langchain/tests/test_tracking_langgraph.py

Lines changed: 20 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -232,8 +232,11 @@ async def test_tracks_node_and_graph_tokens_on_success():
232232
handler.on_chain_start({}, {}, run_id=node_run_id, name='root-agent')
233233

234234
llm_result = LLMResult(
235-
generations=[[ChatGeneration(message=_AIMsg(content='Sunny.'), text='Sunny.')]],
236-
llm_output={'token_usage': {'total_tokens': 15, 'prompt_tokens': 10, 'completion_tokens': 5}},
235+
generations=[[ChatGeneration(
236+
message=_AIMsg(content='Sunny.', usage_metadata={'total_tokens': 15, 'input_tokens': 10, 'output_tokens': 5}),
237+
text='Sunny.',
238+
)]],
239+
llm_output={},
237240
)
238241
handler.on_llm_end(llm_result, run_id=uuid4(), parent_run_id=node_run_id)
239242
handler.on_chain_end({}, run_id=node_run_id)
@@ -389,8 +392,11 @@ async def test_tracks_graph_key_on_node_events():
389392
handler.on_chain_start({}, {}, run_id=node_run_id, name='root-agent')
390393

391394
llm_result = LLMResult(
392-
generations=[[ChatGeneration(message=_AIMsg(content='OK.'), text='OK.')]],
393-
llm_output={'token_usage': {'total_tokens': 8, 'prompt_tokens': 5, 'completion_tokens': 3}},
395+
generations=[[ChatGeneration(
396+
message=_AIMsg(content='OK.', usage_metadata={'total_tokens': 8, 'input_tokens': 5, 'output_tokens': 3}),
397+
text='OK.',
398+
)]],
399+
llm_output={},
394400
)
395401
handler.on_llm_end(llm_result, run_id=uuid4(), parent_run_id=node_run_id)
396402
handler.flush(graph, tracker)
@@ -459,16 +465,22 @@ def model_factory(node_config, **kwargs):
459465
root_run_id = uuid4()
460466
handler.on_chain_start({}, {}, run_id=root_run_id, name='root-agent')
461467
root_llm_result = LLMResult(
462-
generations=[[ChatGeneration(message=_AIMsg(content='Root done.'), text='Root done.')]],
463-
llm_output={'token_usage': {'total_tokens': 15, 'prompt_tokens': 10, 'completion_tokens': 5}},
468+
generations=[[ChatGeneration(
469+
message=_AIMsg(content='Root done.', usage_metadata={'total_tokens': 15, 'input_tokens': 10, 'output_tokens': 5}),
470+
text='Root done.',
471+
)]],
472+
llm_output={},
464473
)
465474
handler.on_llm_end(root_llm_result, run_id=uuid4(), parent_run_id=root_run_id)
466475

467476
child_run_id = uuid4()
468477
handler.on_chain_start({}, {}, run_id=child_run_id, name='child-agent')
469478
child_llm_result = LLMResult(
470-
generations=[[ChatGeneration(message=_AIMsg(content='Child done.'), text='Child done.')]],
471-
llm_output={'token_usage': {'total_tokens': 5, 'prompt_tokens': 3, 'completion_tokens': 2}},
479+
generations=[[ChatGeneration(
480+
message=_AIMsg(content='Child done.', usage_metadata={'total_tokens': 5, 'input_tokens': 3, 'output_tokens': 2}),
481+
text='Child done.',
482+
)]],
483+
llm_output={},
472484
)
473485
handler.on_llm_end(child_llm_result, run_id=uuid4(), parent_run_id=child_run_id)
474486

0 commit comments

Comments
 (0)