Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions 8_BeeAIRequirement.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@
"from beeai_framework.adapters.gemini import GeminiChatModel\n",
"\n",
"# If using Vertex AI\n",
"# from beeai_framework.adapters.vertexai import VertexAIChatModel\n",
"from beeai_framework.adapters.vertexai import VertexAIChatModel # noqa: F401\n",
"from beeai_framework.agents.requirement import RequirementAgent\n",
"from beeai_framework.agents.requirement.requirements.conditional import (\n",
" ConditionalRequirement,\n",
Expand Down Expand Up @@ -244,11 +244,11 @@
" allow_parallel_tool_calls=True,\n",
" ),\n",
" # If using Vertex AI\n",
" # llm=VertexAIChatModel(\n",
" # model_id=\"gemini-3-flash-preview\",\n",
" # project=project_id,\n",
" # location=\"global\",\n",
" # allow_parallel_tool_calls=True,\n",
" # llm = VertexAIChatModel(\n",
" # model_id=\"gemini-3-flash-preview\",\n",
" # project= os.environ.get(\"GOOGLE_CLOUD_PROJECT\"),\n",
" # location=\"global\",\n",
" # allow_parallel_tool_calls=True,\n",
" # ),\n",
" tools=[\n",
" thinktool := ThinkTool(),\n",
Expand Down
14 changes: 11 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ Follow these steps to set up your environment and run the example agents. Each n

Before running the examples, complete the following setup steps:

1. **Create a [Gemini API Key](https://ai.google.dev/gemini-api/docs/api-key) or [configure your environment for Vertex AI](https://docs.cloud.google.com/vertex-ai/generative-ai/docs/start/quickstart?usertype=adc).**
1. **Create a [Gemini API Key](https://ai.google.dev/gemini-api/docs/api-key) or [configure your environment for Vertex AI](https://docs.cloud.google.com/vertex-ai/generative-ai/docs/start/quickstart?usertype=adc).** If you choose to use Vertex AI, make sure to uncomment the corresponding code in the notebooks and Python files.

2. **Configure Environment Variables:**
- In the project root, make a copy of `example.env` and rename it to `.env`.
Expand All @@ -97,15 +97,15 @@ Before running the examples, complete the following setup steps:
uv sync
```

- **Notebooks / Google Colab:** If running in a notebook environment, you can install the dependencies by running the following in a cell:
- **Notebooks / Google Colab:** If running in a notebook environment, you can install the dependencies by running the following in a cell (or in the terminal):

```python
%pip install .
```

## Running the Agents

You can run the agent servers using `uv run`. Ensure you are in the project root.
You can run each agent server in a separate terminal using `uv run`. Ensure you are in the project root.

- **Policy Agent (Lesson 2):**

Expand All @@ -130,3 +130,11 @@ You can run the agent servers using `uv run`. Ensure you are in the project root
```sh
uv run a2a_healthcare_agent.py
```

To interact with these A2A agent servers, you can run the A2A clients defined in the provided notebooks or create Python files defining A2A clients to run in a terminal. For example, run:

```sh
uv run a2a_healthcare_client.py
```

to interact with the Healthcare Concierge Agent.
10 changes: 8 additions & 2 deletions a2a_healthcare_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from beeai_framework.adapters.a2a.agents import A2AAgent
from beeai_framework.adapters.a2a.serve.server import A2AServer, A2AServerConfig
from beeai_framework.adapters.gemini import GeminiChatModel
from beeai_framework.adapters.vertexai import VertexAIChatModel # noqa: F401
from beeai_framework.agents.requirement import RequirementAgent
from beeai_framework.agents.requirement.requirements.conditional import (
ConditionalRequirement,
Expand Down Expand Up @@ -66,10 +67,15 @@ def main() -> None:
description="A personal concierge for Healthcare Information, customized to your policy.",
llm=GeminiChatModel(
"gemini-3-flash-preview",
# For Vertex AI:
# "vertex_ai/gemini-3-flash-preview",
allow_parallel_tool_calls=True,
),
# If using Vertex AI
# llm = VertexAIChatModel(
# model_id="gemini-3-flash-preview",
# project= os.environ.get("GOOGLE_CLOUD_PROJECT"),
# location="global",
# allow_parallel_tool_calls=True,
# ),
tools=[
thinktool := ThinkTool(),
policy_tool := HandoffTool(
Expand Down
37 changes: 37 additions & 0 deletions a2a_healthcare_client.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
import asyncio
import os
from typing import Any

from beeai_framework.adapters.a2a.agents import A2AAgent
from beeai_framework.memory import UnconstrainedMemory
from beeai_framework.middleware.trajectory import EventMeta, GlobalTrajectoryMiddleware

from helpers import setup_env


class ConciseGlobalTrajectoryMiddleware(GlobalTrajectoryMiddleware):
def _format_prefix(self, meta: EventMeta) -> str:
prefix = super()._format_prefix(meta)
return prefix.rstrip(": ")

def _format_payload(self, value: Any) -> str:
return ""


async def main() -> None:
setup_env()

host = os.getenv("AGENT_HOST")
healthcare_agent_port = int(os.getenv("HEALTHCARE_AGENT_PORT"))

agent = A2AAgent(
url=f"http://{host}:{healthcare_agent_port}", memory=UnconstrainedMemory()
)
response = await agent.run(
"I'm based in Austin, TX. How do I get mental health therapy near me and what does my insurance cover?"
).middleware(ConciseGlobalTrajectoryMiddleware())
print(response.last_message.text)


if __name__ == "__main__":
asyncio.run(main())
3 changes: 3 additions & 0 deletions example.env
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,9 @@ GEMINI_API_KEY="YOUR_GEMINI_API_KEY"
# GOOGLE_CLOUD_LOCATION="global"
# GOOGLE_GENAI_USE_VERTEXAI=True
# GOOGLE_APPLICATION_CREDENTIALS="credentials.json"
# LiteLLM Vertex AI settings
# VERTEXAI_PROJECT="YOUR_GOOGLE_CLOUD_PROJECT"
# VERTEXAI_LOCATION="global"

# Agent Network Configuration
AGENT_HOST="localhost"
Expand Down
2 changes: 0 additions & 2 deletions helpers.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import logging
import warnings

import nest_asyncio
Expand All @@ -12,7 +11,6 @@ def setup_env() -> None:
load_dotenv(override=True)
nest_asyncio.apply()

logging.disable(level=logging.WARNING)
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=FutureWarning)

Expand Down
7 changes: 4 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,14 @@ dependencies = [
"agent-framework==1.0.0b251120",
"agent-framework-a2a==1.0.0b251120",
"langchain-mcp-adapters==0.1.11",
"langchain-litellm",
"langgraph==1.0.2",
"langchain==1.0.2",
"beeai-framework[a2a]==0.1.75",
"langchain_litellm",
"beeai-framework[a2a]==0.1.70",
"nest_asyncio",
"ipython",
"langgraph-a2a-server>=0.1.6",
"python-dotenv",
"langgraph-a2a-server==0.1.6",
]

[tool.ruff]
Expand Down
12 changes: 7 additions & 5 deletions uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.