Skip to content

Commit 7c2dae8

Browse files
giles17Copilot
andauthored
Python: Fix sample bugs: incorrect API params, wrong client types, and invalid options (#4983)
* Fix sample bugs: incorrect API params, wrong client types, and invalid options - typed_options.py: Fix AnthropicClient model->model_id, wrap raw strings in Message objects for get_response(), fix reasoning_effort->reasoning dict, fix budget_tokens minimum (1024), use OpenAIChatClient not FoundryChatClient, remove unused import - client_reasoning.py: Fix deprecated model_id to model param - client_with_hosted_mcp.py: Remove invalid store=True kwarg from Agent.run() - code_defined_skill.py: Fix precision kwarg to use function_invocation_kwargs - Various other samples: Fix deprecated API usage and incorrect params Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> * Address PR review comments - client_with_hosted_mcp.py: Fix remaining store=True kwarg on line 68 to use options dict - client_with_session.py: Change store=True to store=False to match in-memory persistence demo intent - typed_options.py: Remove non-existent import and model key from docstring example Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> * new sample fixes --------- Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
1 parent 3d09337 commit 7c2dae8

13 files changed

Lines changed: 34 additions & 41 deletions

python/samples/02-agents/background_responses.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
import asyncio
44

55
from agent_framework import Agent
6-
from agent_framework.openai import OpenAIResponsesClient
6+
from agent_framework.openai import OpenAIChatClient
77
from dotenv import load_dotenv
88

99
# Load environment variables from .env file
@@ -29,7 +29,7 @@
2929
agent = Agent(
3030
name="researcher",
3131
instructions="You are a helpful research assistant. Be concise.",
32-
client=OpenAIResponsesClient(model="o3"),
32+
client=OpenAIChatClient(model="o3"),
3333
)
3434

3535

python/samples/02-agents/chat_client/chat_response_cancellation.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44

55
from agent_framework import Message
66
from agent_framework.foundry import FoundryChatClient
7+
from azure.identity import AzureCliCredential
78
from dotenv import load_dotenv
89

910
# Load environment variables from .env file
@@ -26,7 +27,7 @@ async def main() -> None:
2627
- OpenAI model ID: Use "model_id" parameter or "OPENAI_MODEL" environment variable
2728
- OpenAI API key: Use "api_key" parameter or "OPENAI_API_KEY" environment variable
2829
"""
29-
client = FoundryChatClient()
30+
client = FoundryChatClient(credential=AzureCliCredential())
3031

3132
try:
3233
task = asyncio.create_task(

python/samples/02-agents/providers/anthropic/anthropic_basic.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ async def non_streaming_example() -> None:
3535
print("=== Non-streaming Response Example ===")
3636

3737
agent = Agent(
38-
client=AnthropicClient(),
38+
client=AnthropicClient(model_id="claude-sonnet-4-5-20250929"),
3939
name="WeatherAgent",
4040
instructions="You are a helpful weather agent.",
4141
tools=get_weather,
@@ -52,7 +52,7 @@ async def streaming_example() -> None:
5252
print("=== Streaming Response Example ===")
5353

5454
agent = Agent(
55-
client=AnthropicClient(),
55+
client=AnthropicClient(model_id="claude-sonnet-4-5-20250929"),
5656
name="WeatherAgent",
5757
instructions="You are a helpful weather agent.",
5858
tools=get_weather,

python/samples/02-agents/providers/custom/custom_agent.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,9 +51,9 @@ def __init__(
5151
super().__init__(
5252
name=name,
5353
description=description,
54-
echo_prefix=echo_prefix, # type: ignore
5554
**kwargs,
5655
)
56+
self.echo_prefix = echo_prefix
5757

5858
def run(
5959
self,

python/samples/02-agents/providers/openai/client_reasoning.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525

2626

2727
agent = Agent(
28-
client=OpenAIChatClient[OpenAIChatOptions](model_id="gpt-5"),
28+
client=OpenAIChatClient[OpenAIChatOptions](model="gpt-5"),
2929
name="MathHelper",
3030
instructions="You are a personal math tutor. When asked a math question, "
3131
"reason over how best to approach the problem and share your thought process.",

python/samples/02-agents/providers/openai/client_with_hosted_mcp.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ async def handle_approvals_with_session(query: str, agent: "SupportsAgentRun", s
5050
"""Here we let the session deal with the previous responses, and we just rerun with the approval."""
5151
from agent_framework import Message
5252

53-
result = await agent.run(query, session=session, store=True)
53+
result = await agent.run(query, session=session, options={"store": True})
5454
while len(result.user_input_requests) > 0:
5555
new_input: list[Any] = []
5656
for user_input_needed in result.user_input_requests:
@@ -65,7 +65,7 @@ async def handle_approvals_with_session(query: str, agent: "SupportsAgentRun", s
6565
contents=[user_input_needed.to_function_approval_response(user_approval.lower() == "y")],
6666
)
6767
)
68-
result = await agent.run(new_input, session=session, store=True)
68+
result = await agent.run(new_input, session=session, options={"store": True})
6969
return result
7070

7171

python/samples/02-agents/providers/openai/client_with_session.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -75,19 +75,19 @@ async def example_with_session_persistence_in_memory() -> None:
7575
# First conversation
7676
query1 = "What's the weather like in Tokyo?"
7777
print(f"User: {query1}")
78-
result1 = await agent.run(query1, session=session, store=False)
78+
result1 = await agent.run(query1, session=session, options={"store": False})
7979
print(f"Agent: {result1.text}")
8080

8181
# Second conversation using the same session - maintains context
8282
query2 = "How about London?"
8383
print(f"\nUser: {query2}")
84-
result2 = await agent.run(query2, session=session, store=False)
84+
result2 = await agent.run(query2, session=session, options={"store": False})
8585
print(f"Agent: {result2.text}")
8686

8787
# Third conversation - agent should remember both previous cities
8888
query3 = "Which of the cities I asked about has better weather?"
8989
print(f"\nUser: {query3}")
90-
result3 = await agent.run(query3, session=session, store=False)
90+
result3 = await agent.run(query3, session=session, options={"store": False})
9191
print(f"Agent: {result3.text}")
9292
print("Note: The agent remembers context from previous messages in the same session.\n")
9393

python/samples/02-agents/skills/code_defined_skill/code_defined_skill.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ async def main() -> None:
151151
print("-" * 60)
152152
response = await agent.run(
153153
"How many kilometers is a marathon (26.2 miles)? And how many pounds is 75 kilograms?",
154-
precision=2,
154+
function_invocation_kwargs={"precision": 2},
155155
)
156156
print(f"Agent: {response}\n")
157157

python/samples/02-agents/tools/agent_as_tool_with_session_propagation.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
from collections.abc import Awaitable, Callable
55

66
from agent_framework import Agent, AgentContext, AgentSession, FunctionInvocationContext, tool
7-
from agent_framework.openai import OpenAIResponsesClient
7+
from agent_framework.openai import OpenAIChatClient
88
from dotenv import load_dotenv
99

1010
load_dotenv()
@@ -63,7 +63,7 @@ def recall_findings(ctx: FunctionInvocationContext) -> str:
6363
async def main() -> None:
6464
print("=== Agent-as-Tool: Session Propagation ===\n")
6565

66-
client = OpenAIResponsesClient()
66+
client = OpenAIChatClient()
6767

6868
research_agent = Agent(
6969
client=client,

python/samples/02-agents/tools/control_total_tool_executions.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
from typing import Annotated
55

66
from agent_framework import Agent, tool
7-
from agent_framework.openai import OpenAIResponsesClient
7+
from agent_framework.openai import OpenAIChatClient
88
from dotenv import load_dotenv
99

1010
# Load environment variables from .env file
@@ -81,7 +81,7 @@ async def scenario_max_iterations():
8181
print("Scenario 1: max_iterations — limit LLM roundtrips")
8282
print("=" * 60)
8383

84-
client = OpenAIResponsesClient()
84+
client = OpenAIChatClient()
8585

8686
# 1. Set max_iterations to 3 — the tool loop will run at most 3 roundtrips
8787
# to the model before forcing a text response.
@@ -116,7 +116,7 @@ async def scenario_max_function_calls():
116116
print("Scenario 2: max_function_calls — limit total tool executions")
117117
print("=" * 60)
118118

119-
client = OpenAIResponsesClient()
119+
client = OpenAIChatClient()
120120

121121
# 1. Allow many iterations but cap total function calls to 4.
122122
# If the model requests 3 parallel searches per iteration, after 2
@@ -158,7 +158,7 @@ async def scenario_max_invocations():
158158
print("=" * 60)
159159

160160
agent = Agent(
161-
client=OpenAIResponsesClient(),
161+
client=OpenAIChatClient(),
162162
name="APIAgent",
163163
instructions="Use call_expensive_api when asked to analyze something.",
164164
tools=[call_expensive_api],
@@ -214,7 +214,7 @@ def _do_lookup(query: Annotated[str, "Search query."]) -> str:
214214
agent_a_lookup = tool(name="lookup", approval_mode="never_require", max_invocations=2)(_do_lookup)
215215
agent_b_lookup = tool(name="lookup", approval_mode="never_require", max_invocations=5)(_do_lookup)
216216

217-
client = OpenAIResponsesClient()
217+
client = OpenAIChatClient()
218218
agent_a = Agent(
219219
client=client,
220220
name="AgentA",
@@ -259,7 +259,7 @@ async def scenario_combined():
259259
print("Scenario 5: Combined — all mechanisms together")
260260
print("=" * 60)
261261

262-
client = OpenAIResponsesClient()
262+
client = OpenAIChatClient()
263263

264264
# 1. Configure the client with both iteration and function call limits.
265265
client.function_invocation_configuration["max_iterations"] = 5 # max 5 LLM roundtrips

0 commit comments

Comments
 (0)