Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
436 changes: 435 additions & 1 deletion .github/workflows/python-sample-validation.yml

Large diffs are not rendered by default.

16 changes: 9 additions & 7 deletions python/samples/02-agents/chat_client/built_in_chat_clients.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from random import randint
from typing import Annotated, Any, Literal

from agent_framework import SupportsChatGetResponse, tool
from agent_framework import Message, SupportsChatGetResponse, tool
from agent_framework.azure import (
AzureAIAgentClient,
AzureOpenAIAssistantsClient,
Expand Down Expand Up @@ -117,35 +117,37 @@ async def main(client_name: ClientName = "openai_chat") -> None:
client = get_client(client_name)

# 1. Configure prompt and streaming mode.
message = "What's the weather in Amsterdam and in Paris?"
message = Message("user", text="What's the weather in Amsterdam and in Paris?")
stream = os.getenv("STREAM", "false").lower() == "true"
print(f"Client: {client_name}")
print(f"User: {message}")
print(f"User: {message.text}")

# 2. Run with context-managed clients.
if isinstance(client, OpenAIAssistantsClient | AzureOpenAIAssistantsClient | AzureAIAgentClient):
Copy link

Copilot AI Mar 23, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

isinstance(client, OpenAIAssistantsClient | AzureOpenAIAssistantsClient | AzureAIAgentClient) will raise TypeError at runtime because isinstance does not accept PEP 604 union types. Use a tuple of classes instead (e.g., isinstance(client, (OpenAIAssistantsClient, AzureOpenAIAssistantsClient, AzureAIAgentClient))).

Suggested change
if isinstance(client, OpenAIAssistantsClient | AzureOpenAIAssistantsClient | AzureAIAgentClient):
if isinstance(client, (OpenAIAssistantsClient, AzureOpenAIAssistantsClient, AzureAIAgentClient)):

Copilot uses AI. Check for mistakes.
async with client:
if stream:
response_stream = client.get_response(message, stream=True, options={"tools": get_weather})
response_stream = client.get_response([message], stream=True, options={"tools": get_weather})
print("Assistant: ", end="")
async for chunk in response_stream:
if chunk.text:
print(chunk.text, end="")
print("")
else:
print(f"Assistant: {await client.get_response(message, stream=False, options={'tools': get_weather})}")
print(
f"Assistant: {await client.get_response([message], stream=False, options={'tools': get_weather})}"
)
return

# 3. Run with non-context-managed clients.
if stream:
response_stream = client.get_response(message, stream=True, options={"tools": get_weather})
response_stream = client.get_response([message], stream=True, options={"tools": get_weather})
print("Assistant: ", end="")
async for chunk in response_stream:
if chunk.text:
print(chunk.text, end="")
print("")
else:
print(f"Assistant: {await client.get_response(message, stream=False, options={'tools': get_weather})}")
print(f"Assistant: {await client.get_response([message], stream=False, options={'tools': get_weather})}")


if __name__ == "__main__":
Expand Down
Original file line number Diff line number Diff line change
@@ -1,25 +1,17 @@
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "autogen-agentchat",
# "autogen-ext[openai]",
# ]
# ///
# Run with any PEP 723 compatible runner, e.g.:
# uv run samples/autogen-migration/orchestrations/01_round_robin_group_chat.py

# Copyright (c) Microsoft. All rights reserved.
"""AutoGen RoundRobinGroupChat vs Agent Framework GroupChatBuilder/SequentialBuilder.

Demonstrates sequential agent orchestration where agents take turns processing
the task in a round-robin fashion.
"""

import asyncio

from agent_framework import Message
from dotenv import load_dotenv

"""AutoGen RoundRobinGroupChat vs Agent Framework GroupChatBuilder/SequentialBuilder.

Demonstrates sequential agent orchestration where agents take turns processing
the task in a round-robin fashion.
"""

# Load environment variables from .env file
load_dotenv()

Expand Down Expand Up @@ -98,7 +90,7 @@ async def run_agent_framework() -> None:
print("[Agent Framework] Sequential conversation:")
async for event in workflow.run("Create a brief summary about electric vehicles", stream=True):
if event.type == "output" and isinstance(event.data, list):
for message in event.data:
for message in event.data: # type: ignore
if isinstance(message, Message) and message.role == "assistant" and message.text:
print(f"---------- {message.author_name} ----------")
print(message.text)
Expand Down Expand Up @@ -144,9 +136,7 @@ async def check_approval(
if last_message and "APPROVED" in last_message.text:
await context.yield_output("Content approved.")
else:
await context.send_message(
AgentExecutorRequest(messages=response.full_conversation, should_respond=True)
)
await context.send_message(AgentExecutorRequest(messages=response.full_conversation, should_respond=True))

workflow = (
WorkflowBuilder(start_executor=researcher)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,25 +1,17 @@
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "autogen-agentchat",
# "autogen-ext[openai]",
# ]
# ///
# Run with any PEP 723 compatible runner, e.g.:
# uv run samples/autogen-migration/orchestrations/02_selector_group_chat.py

# Copyright (c) Microsoft. All rights reserved.
"""AutoGen SelectorGroupChat vs Agent Framework GroupChatBuilder.

Demonstrates LLM-based speaker selection where an orchestrator decides
which agent should speak next based on the conversation context.
"""

import asyncio

from agent_framework import Message
from dotenv import load_dotenv

"""AutoGen SelectorGroupChat vs Agent Framework GroupChatBuilder.

Demonstrates LLM-based speaker selection where an orchestrator decides
which agent should speak next based on the conversation context.
"""

# Load environment variables from .env file
load_dotenv()

Expand Down Expand Up @@ -113,7 +105,7 @@ async def run_agent_framework() -> None:
print("[Agent Framework] Group chat conversation:")
async for event in workflow.run("How do I connect to a PostgreSQL database using Python?", stream=True):
if event.type == "output" and isinstance(event.data, list):
for message in event.data:
for message in event.data: # type: ignore
if isinstance(message, Message) and message.role == "assistant" and message.text:
print(f"---------- {message.author_name} ----------")
print(message.text)
Expand Down
21 changes: 6 additions & 15 deletions python/samples/autogen-migration/orchestrations/03_swarm.py
Original file line number Diff line number Diff line change
@@ -1,26 +1,17 @@
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "autogen-agentchat",
# "autogen-ext[openai]",
# ]
# ///
# Run with any PEP 723 compatible runner, e.g.:
# uv run samples/autogen-migration/orchestrations/03_swarm.py

# Copyright (c) Microsoft. All rights reserved.
"""AutoGen Swarm pattern vs Agent Framework HandoffBuilder.

Demonstrates agent handoff coordination where agents can transfer control
to other specialized agents based on the task requirements.
"""

import asyncio
from typing import Any

from agent_framework import AgentResponseUpdate, WorkflowEvent
from dotenv import load_dotenv

"""AutoGen Swarm pattern vs Agent Framework HandoffBuilder.

Demonstrates agent handoff coordination where agents can transfer control
to other specialized agents based on the task requirements.
"""

# Load environment variables from .env file
load_dotenv()

Expand Down
21 changes: 6 additions & 15 deletions python/samples/autogen-migration/orchestrations/04_magentic_one.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,4 @@
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "autogen-agentchat",
# "autogen-ext[openai]",
# ]
# ///
# Run with any PEP 723 compatible runner, e.g.:
# uv run samples/autogen-migration/orchestrations/04_magentic_one.py

# Copyright (c) Microsoft. All rights reserved.
"""AutoGen MagenticOneGroupChat vs Agent Framework MagenticBuilder.

Demonstrates orchestrated multi-agent workflows with a central coordinator
managing specialized agents for complex tasks.
"""

import asyncio
import json
Expand All @@ -27,6 +12,12 @@
from agent_framework.orchestrations import MagenticProgressLedger
from dotenv import load_dotenv

"""AutoGen MagenticOneGroupChat vs Agent Framework MagenticBuilder.

Demonstrates orchestrated multi-agent workflows with a central coordinator
managing specialized agents for complex tasks.
"""

# Load environment variables from .env file
load_dotenv()

Expand Down
Original file line number Diff line number Diff line change
@@ -1,25 +1,16 @@
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "autogen-agentchat",
# "autogen-ext[openai]",
# ]
# ///
# Run with any PEP 723 compatible runner, e.g.:
# uv run samples/autogen-migration/single_agent/01_basic_assistant_agent.py

# Copyright (c) Microsoft. All rights reserved.

import asyncio

from dotenv import load_dotenv

"""Basic AutoGen AssistantAgent vs Agent Framework Agent.

Both samples expect OpenAI-compatible environment variables (OPENAI_API_KEY or
Azure OpenAI configuration). Update the prompts or client wiring to match your
model of choice before running.
"""

import asyncio

from dotenv import load_dotenv

# Load environment variables from .env file
load_dotenv()

Expand Down
Original file line number Diff line number Diff line change
@@ -1,24 +1,14 @@
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "autogen-agentchat",
# "autogen-core",
# "autogen-ext[openai]",
# ]
# ///
# Run with any PEP 723 compatible runner, e.g.:
# uv run samples/autogen-migration/single_agent/02_assistant_agent_with_tool.py

# Copyright (c) Microsoft. All rights reserved.
"""AutoGen AssistantAgent vs Agent Framework Agent with function tools.

Demonstrates how to create and attach tools to agents in both frameworks.
"""

import asyncio

from dotenv import load_dotenv

"""AutoGen AssistantAgent vs Agent Framework Agent with function tools.

Demonstrates how to create and attach tools to agents in both frameworks.
"""

# Load environment variables from .env file
load_dotenv()

Expand Down
Original file line number Diff line number Diff line change
@@ -1,23 +1,14 @@
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "autogen-agentchat",
# "autogen-ext[openai]",
# ]
# ///
# Run with any PEP 723 compatible runner, e.g.:
# uv run samples/autogen-migration/single_agent/03_assistant_agent_thread_and_stream.py

# Copyright (c) Microsoft. All rights reserved.
"""AutoGen vs Agent Framework: Thread management and streaming responses.

Demonstrates conversation state management and streaming in both frameworks.
"""

import asyncio

from dotenv import load_dotenv

"""AutoGen vs Agent Framework: Thread management and streaming responses.

Demonstrates conversation state management and streaming in both frameworks.
"""

# Load environment variables from .env file
load_dotenv()

Expand Down
20 changes: 6 additions & 14 deletions python/samples/autogen-migration/single_agent/04_agent_as_tool.py
Original file line number Diff line number Diff line change
@@ -1,24 +1,15 @@
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "autogen-agentchat",
# "autogen-ext[openai]",
# ]
# ///
# Run with any PEP 723 compatible runner, e.g.:
# uv run samples/autogen-migration/single_agent/04_agent_as_tool.py

# Copyright (c) Microsoft. All rights reserved.

import asyncio

from dotenv import load_dotenv

"""AutoGen vs Agent Framework: Agent-as-a-Tool pattern.

Demonstrates hierarchical agent architectures where one agent delegates
work to specialized sub-agents wrapped as tools.
"""

import asyncio

from dotenv import load_dotenv

# Load environment variables from .env file
load_dotenv()

Expand Down Expand Up @@ -107,6 +98,7 @@ async def run_agent_framework() -> None:
if content.type == "function_call":
# Accumulate function call content as it streams in
call_id = content.call_id
assert call_id is not None, "Function call content must have a call_id"
if call_id in accumulated_calls:
# Add to existing call (arguments stream in gradually)
accumulated_calls[call_id] = accumulated_calls[call_id] + content
Expand Down
11 changes: 5 additions & 6 deletions python/scripts/sample_validation/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -165,12 +165,11 @@ Produces:

## Report Status Codes

| Status | Label | Description |
| ------- | --------- | ----------------------------------------- |
| SUCCESS | [PASS] | Sample ran to completion with exit code 0 |
| FAILURE | [FAIL] | Sample exited with non-zero code |
| TIMEOUT | [TIMEOUT] | Sample exceeded timeout limit |
| ERROR | [ERROR] | Exception during execution |
| Status | Label | Description |
| ------------- | --------------- | ----------------------------------------- |
| SUCCESS | [PASS] | Sample ran to completion with exit code 0 |
| FAILURE | [FAIL] | Sample did not complete successfully (non-zero exit code) |
| MISSING_SETUP | [MISSING_SETUP] | Sample skipped due to missing setup |
Comment on lines +168 to +172
Copy link

Copilot AI Mar 23, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Documentation is inconsistent: the status code table only lists SUCCESS/FAILURE/MISSING_SETUP, but the Troubleshooting section below still says samples can be marked as ERROR. Either reintroduce an ERROR status in the models/reporting, or update the Troubleshooting guidance to match the new status taxonomy.

Copilot uses AI. Check for mistakes.

## Troubleshooting

Expand Down
10 changes: 9 additions & 1 deletion python/scripts/sample_validation/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,13 @@ def parse_arguments() -> argparse.Namespace:
help="Custom name for the report files (without extension). If not provided, uses timestamp.",
)

parser.add_argument(
"--exclude",
nargs="+",
type=str,
help="Subdirectory paths to exclude (relative to the search directory set by --subdir)",
)

return parser.parse_args()


Expand Down Expand Up @@ -104,6 +111,7 @@ async def main() -> int:
samples_dir=samples_dir,
python_root=python_root,
subdir=args.subdir,
exclude=args.exclude,
max_parallel_workers=max(1, args.max_parallel_workers),
)

Expand Down Expand Up @@ -138,7 +146,7 @@ async def main() -> int:
print(f" JSON: {json_path}")

# Return appropriate exit code
failed = report.failure_count + report.timeout_count + report.error_count
failed = report.failure_count + report.missing_setup_count
return 1 if failed > 0 else 0


Expand Down
Loading
Loading