Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "uipath-langchain"
version = "0.9.10"
version = "0.9.11"
description = "Python SDK that enables developers to build and deploy LangGraph agents to the UiPath Cloud Platform"
readme = { file = "README.md", content-type = "text/markdown" }
requires-python = ">=3.11"
Expand Down
1 change: 1 addition & 0 deletions src/uipath_langchain/agent/exceptions/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ class AgentRuntimeErrorCode(str, Enum):

UNEXPECTED_ERROR = "UNEXPECTED_ERROR"
HTTP_ERROR = "HTTP_ERROR"
LICENSE_NOT_AVAILABLE = "LICENSE_NOT_AVAILABLE"

# Routing
ROUTING_ERROR = "ROUTING_ERROR"
Expand Down
86 changes: 86 additions & 0 deletions src/uipath_langchain/agent/exceptions/licensing.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
"""Convert LLM provider HTTP errors into structured AgentRuntimeErrors.

Each LLM provider wraps HTTP errors in a different exception type:
- OpenAI: openai.PermissionDeniedError → e.status_code
- Vertex: google.genai.errors.ClientError → e.code
- Bedrock: botocore.exceptions.ClientError → e.response dict

This module extracts the HTTP status code from any of these and re-raises
as an AgentRuntimeError so that upstream error handling (exception mapper,
CAS bridge) can categorise by status code without provider-specific logic.
"""

from uipath.runtime.errors import UiPathErrorCategory

from uipath_langchain.agent.exceptions.exceptions import (
AgentRuntimeError,
AgentRuntimeErrorCode,
)


def _extract_status_code(e: BaseException) -> int | None:
"""Extract HTTP status code from any provider-specific exception.

Supports OpenAI (status_code), Vertex/google.genai (code), and
Bedrock/botocore (response dict). Walks __cause__ chain to handle
LangChain wrapper exceptions (e.g. ChatGoogleGenerativeAIError).
"""
# OpenAI: e.status_code
sc = getattr(e, "status_code", None)
if isinstance(sc, int):
return sc

# Vertex (google.genai.errors.APIError): e.code
sc = getattr(e, "code", None)
if isinstance(sc, int):
return sc

# Bedrock (botocore.exceptions.ClientError): e.response dict
resp = getattr(e, "response", None)
if isinstance(resp, dict):
sc = resp.get("ResponseMetadata", {}).get("HTTPStatusCode")
if isinstance(sc, int):
return sc

# Walk __cause__ chain
cause = getattr(e, "__cause__", None)
if cause is not None and cause is not e:
return _extract_status_code(cause)

return None


# Maps known LLM Gateway status codes to specific error codes.
# Unknown status codes fall back to HTTP_ERROR.
_LLM_STATUS_CODE_MAP: dict[int, AgentRuntimeErrorCode] = {
403: AgentRuntimeErrorCode.LICENSE_NOT_AVAILABLE,
}


def raise_for_provider_http_error(e: BaseException) -> None:
"""Re-raise provider-specific HTTP errors as a structured AgentRuntimeError.

Extracts the HTTP status code from any LLM provider exception and
converts it to an AgentRuntimeError with the status code preserved.
Known status codes (e.g. 403) get a specific error code so upstream
handlers can match on the suffix. Does nothing if no HTTP status code
can be extracted.
"""
sc = _extract_status_code(e)
if sc is None:
return

code = _LLM_STATUS_CODE_MAP.get(sc, AgentRuntimeErrorCode.HTTP_ERROR)

if sc == 403:
category = UiPathErrorCategory.DEPLOYMENT
else:
category = UiPathErrorCategory.UNKNOWN

raise AgentRuntimeError(
code=code,
title=f"LLM provider returned HTTP {sc}",
detail=str(e),
category=category,
status=sc,
Comment on lines +80 to +85
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: since we have specific error codes we could set meaningful title and details as well. The logic for the exception_mapper is to not touch AgentRuntimeErrors and assume that they are already correct with meaningful messages.

Fine to leave this as a further improvement

) from e
10 changes: 9 additions & 1 deletion src/uipath_langchain/agent/react/llm_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from uipath_langchain.chat.handlers import get_payload_handler

from ..exceptions import AgentRuntimeError, AgentRuntimeErrorCode
from ..exceptions.licensing import raise_for_provider_http_error
from ..messages.message_utils import replace_tool_calls
from ..tools.static_args import StaticArgsHandler
from .constants import (
Expand Down Expand Up @@ -112,7 +113,14 @@ async def llm_node(state: StateT):

llm = model.bind_tools(static_schema_tools, **binding_kwargs)

response = await llm.ainvoke(messages)
try:
response = await llm.ainvoke(messages)
except Exception as e:
# LLM errors arrive as provider-specific exceptions (OpenAI, Bedrock,
# Vertex). Convert to a structured AgentRuntimeError with the HTTP
# status code so upstream handlers can categorise (e.g. 403 → licensing).
raise_for_provider_http_error(e)
raise
if not isinstance(response, AIMessage):
raise AgentRuntimeError(
code=AgentRuntimeErrorCode.LLM_INVALID_RESPONSE,
Expand Down
2 changes: 1 addition & 1 deletion uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading