Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 35 additions & 3 deletions src/agents/mcp/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ def __init__(
require_approval: RequireApprovalSetting = None,
failure_error_function: ToolErrorFunction | None | _UnsetType = _UNSET,
tool_meta_resolver: MCPToolMetaResolver | None = None,
tool_name_prefix: str | None = None,

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1 Badge Thread tool_name_prefix through concrete MCP server APIs

Adding tool_name_prefix only to MCPServer.__init__ makes the new feature unusable for the public server classes (MCPServerStdio, MCPServerSse, and MCPServerStreamableHttp), whose constructors (and _MCPServerWithClientSession.__init__) still reject that keyword; the usage shown in the commit message (MCPServerStdio(..., tool_name_prefix="fs1")) will raise TypeError before runtime, so users cannot actually enable collision avoidance.

Useful? React with 👍 / 👎.

):
"""
Args:
Expand All @@ -110,13 +111,17 @@ def __init__(
SDK default) will be used.
tool_meta_resolver: Optional callable that produces MCP request metadata (`_meta`) for
tool calls. It is invoked by the Agents SDK before calling `call_tool`.
tool_name_prefix: Optional prefix to add to tool names from this server. Useful for
avoiding name collisions when multiple MCP servers provide tools with the same name.
For example, with prefix "filesystem", tool "read_file" becomes "filesystem_read_file".
"""
self.use_structured_content = use_structured_content
self._needs_approval_policy = self._normalize_needs_approval(
require_approval=require_approval
)
self._failure_error_function = failure_error_function
self.tool_meta_resolver = tool_meta_resolver
self.tool_name_prefix = tool_name_prefix

@abc.abstractmethod
async def connect(self):
Expand Down Expand Up @@ -370,6 +375,23 @@ async def _apply_tool_filter(
raise UserError("run_context and agent are required for dynamic tool filtering")
return await self._apply_dynamic_tool_filter(tools, run_context, agent)

def _apply_tool_name_prefix(self, tools: list[MCPTool]) -> list[MCPTool]:
"""Apply tool name prefix to avoid collisions between multiple MCP servers."""
if not self.tool_name_prefix:
return tools

prefixed_tools = []
for tool in tools:
# Create a new MCPTool with prefixed name
prefixed_tool = MCPTool(
name=f"{self.tool_name_prefix}_{tool.name}",
description=tool.description,
inputSchema=tool.inputSchema,
)
Comment on lines +386 to +390

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2 Badge Preserve MCPTool metadata when prefixing tool names

_apply_tool_name_prefix reconstructs each MCPTool with only name, description, and inputSchema, which drops other MCP metadata fields (for example title/annotations) that this codebase uses to derive model-facing descriptions; in cases where a server provides only a title, enabling prefixes can silently erase that metadata and degrade tool definitions sent to the model.

Useful? React with 👍 / 👎.

prefixed_tools.append(prefixed_tool)

return prefixed_tools

def _apply_static_tool_filter(
self, tools: list[MCPTool], static_filter: ToolFilterStatic
) -> list[MCPTool]:
Expand Down Expand Up @@ -582,6 +604,11 @@ async def list_tools(
filtered_tools = tools
if self.tool_filter is not None:
filtered_tools = await self._apply_tool_filter(filtered_tools, run_context, agent)

# Apply tool name prefix if configured
if self.tool_name_prefix:
filtered_tools = self._apply_tool_name_prefix(filtered_tools)

return filtered_tools
except httpx.HTTPStatusError as e:
status_code = e.response.status_code
Expand All @@ -606,12 +633,17 @@ async def call_tool(
session = self.session
assert session is not None

# Strip prefix if configured and present
actual_tool_name = tool_name
if self.tool_name_prefix and tool_name.startswith(f"{self.tool_name_prefix}_"):
actual_tool_name = tool_name[len(self.tool_name_prefix) + 1:]

try:
self._validate_required_parameters(tool_name=tool_name, arguments=arguments)
self._validate_required_parameters(tool_name=actual_tool_name, arguments=arguments)
if meta is None:
return await self._run_with_retries(lambda: session.call_tool(tool_name, arguments))
return await self._run_with_retries(lambda: session.call_tool(actual_tool_name, arguments))
return await self._run_with_retries(
lambda: session.call_tool(tool_name, arguments, meta=meta)
lambda: session.call_tool(actual_tool_name, arguments, meta=meta)
)
except httpx.HTTPStatusError as e:
status_code = e.response.status_code
Expand Down
42 changes: 35 additions & 7 deletions src/agents/models/chatcmpl_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -415,6 +415,37 @@ def extract_all_content(
raise UserError(f"Unknown content: {c}")
return out

@classmethod
def _supports_reasoning_content(
cls,
model: str,
item_model: str | None,
item_provider_data: dict | None,
) -> bool:
"""Check if the model supports reasoning_content field in assistant messages.

Supported models:
- DeepSeek: deepseek-*
- Zhipu GLM: glm-*, zhipu-*
- Moonshot Kimi: kimi-*, moonshot-*
"""
# Models that require/support reasoning_content
reasoning_models = ("deepseek", "glm", "zhipu", "kimi", "moonshot")

model_lower = model.lower()
is_reasoning_model = any(m in model_lower for m in reasoning_models)

if not is_reasoning_model:
return False

# Check if item also from reasoning model
if item_provider_data == {}:
return True # Backward compatibility
if item_model:
item_model_lower = item_model.lower()
return any(m in item_model_lower for m in reasoning_models)
return True
Comment on lines +444 to +447

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2 Badge Enforce model origin check in reasoning-content helper

_supports_reasoning_content currently returns True when item_provider_data is non-empty but item_model is missing, which bypasses the model-match guard the caller relies on. That regresses the prior behavior (which only skipped model matching when provider metadata was entirely absent) and can incorrectly carry reasoning_content across mixed or legacy histories where provider metadata contains other keys but no model identifier.

Useful? React with 👍 / 👎.


@classmethod
def items_to_messages(
cls,
Expand Down Expand Up @@ -740,16 +771,13 @@ def ensure_assistant_message() -> ChatCompletionAssistantMessageParam:
# This preserves the original behavior
pending_thinking_blocks = reconstructed_thinking_blocks

# DeepSeek requires reasoning_content field in assistant messages with tool calls
# Items may not all originate from DeepSeek, so need to check for model match.
# DeepSeek/GLM/Kimi require reasoning_content field in assistant messages with tool calls
# Items may not all originate from the same model, so need to check for model match.
# For backward compatibility, if provider_data is missing, ignore the check.
# Supported models: deepseek, zhipu/glm, moonshot/kimi
elif (
model
and "deepseek" in model.lower()
and (
(item_model and "deepseek" in item_model.lower())
or item_provider_data == {}
)
and self._supports_reasoning_content(model, item_model, item_provider_data)

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1 Badge Call reasoning helper via cls in classmethod

items_to_messages is a @classmethod, but this branch calls self._supports_reasoning_content(...); self is undefined in this scope, so any reasoning item that reaches this condition (for DeepSeek/GLM/Kimi targets) will raise NameError and abort message conversion before the tool-call assistant message is emitted.

Useful? React with 👍 / 👎.

):
summary_items = reasoning_item.get("summary", [])
if summary_items:
Expand Down
Loading