diff --git a/openai_agents/model_providers/run_gpt_oss_worker.py b/openai_agents/model_providers/run_gpt_oss_worker.py index 88c85fca..59798a1d 100644 --- a/openai_agents/model_providers/run_gpt_oss_worker.py +++ b/openai_agents/model_providers/run_gpt_oss_worker.py @@ -3,7 +3,12 @@ from datetime import timedelta from typing import Optional -from agents import Model, ModelProvider, OpenAIChatCompletionsModel +from agents import ( + Model, + ModelProvider, + OpenAIChatCompletionsModel, + set_tracing_disabled, +) from openai import AsyncOpenAI from temporalio.client import Client from temporalio.contrib.openai_agents import ModelActivityParameters, OpenAIAgentsPlugin @@ -27,6 +32,11 @@ def get_model(self, model_name: Optional[str]) -> Model: async def main(): + # Disable Agents SDK tracing — the default exporter sends traces to OpenAI's + # backend, which requires an OpenAI API key not available in these samples. + # Call here rather than in the workflow because it's a global side effect. + set_tracing_disabled(disabled=True) + # Configure logging to show workflow debug messages logging.basicConfig(level=logging.WARNING) logging.getLogger("temporalio.workflow").setLevel(logging.DEBUG) diff --git a/openai_agents/model_providers/run_litellm_provider_worker.py b/openai_agents/model_providers/run_litellm_provider_worker.py index 5eb8b8b2..3441013c 100644 --- a/openai_agents/model_providers/run_litellm_provider_worker.py +++ b/openai_agents/model_providers/run_litellm_provider_worker.py @@ -1,6 +1,7 @@ import asyncio from datetime import timedelta +from agents import set_tracing_disabled from agents.extensions.models.litellm_provider import LitellmProvider from temporalio.client import Client from temporalio.contrib.openai_agents import ModelActivityParameters, OpenAIAgentsPlugin @@ -12,6 +13,11 @@ async def main(): + # Disable Agents SDK tracing — the default exporter sends traces to OpenAI's + # backend, which requires an OpenAI API key not available in these samples. + # Call here rather than in the workflow because it's a global side effect. + set_tracing_disabled(disabled=True) + # Create client connected to server at the given address client = await Client.connect( "localhost:7233", diff --git a/openai_agents/model_providers/workflows/gpt_oss_workflow.py b/openai_agents/model_providers/workflows/gpt_oss_workflow.py index d7ed8021..821844b3 100644 --- a/openai_agents/model_providers/workflows/gpt_oss_workflow.py +++ b/openai_agents/model_providers/workflows/gpt_oss_workflow.py @@ -1,6 +1,6 @@ from __future__ import annotations -from agents import Agent, Runner, function_tool, set_tracing_disabled +from agents import Agent, Runner, function_tool from temporalio import workflow @@ -8,8 +8,6 @@ class GptOssWorkflow: @workflow.run async def run(self, prompt: str) -> str: - set_tracing_disabled(disabled=True) - @function_tool def get_weather(city: str): workflow.logger.debug(f"Getting weather for {city}") diff --git a/openai_agents/model_providers/workflows/litellm_auto_workflow.py b/openai_agents/model_providers/workflows/litellm_auto_workflow.py index 4a67ded4..9fe3d40b 100644 --- a/openai_agents/model_providers/workflows/litellm_auto_workflow.py +++ b/openai_agents/model_providers/workflows/litellm_auto_workflow.py @@ -1,6 +1,6 @@ from __future__ import annotations -from agents import Agent, Runner, function_tool, set_tracing_disabled +from agents import Agent, Runner, function_tool from temporalio import workflow @@ -8,8 +8,6 @@ class LitellmAutoWorkflow: @workflow.run async def run(self, prompt: str) -> str: - set_tracing_disabled(disabled=True) - @function_tool def get_weather(city: str): return f"The weather in {city} is sunny."