Skip to content

Commit 4b9856e

Browse files
Python: updated azure ai inference sample (#5028)
* updated azure ai inference sample * openai multimodel fix * update language
1 parent acaadc9 commit 4b9856e

4 files changed

Lines changed: 37 additions & 12 deletions

File tree

python/packages/core/agent_framework/azure/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,10 @@
1515
"AzureAISearchContextProvider": ("agent_framework_azure_ai_search", "agent-framework-azure-ai-search"),
1616
"AzureAISearchSettings": ("agent_framework_azure_ai_search", "agent-framework-azure-ai-search"),
1717
"AzureAISettings": ("agent_framework_azure_ai", "agent-framework-azure-ai"),
18+
"AzureAIInferenceEmbeddingClient": ("agent_framework_azure_ai", "agent-framework-azure-ai"),
19+
"AzureAIInferenceEmbeddingOptions": ("agent_framework_azure_ai", "agent-framework-azure-ai"),
20+
"AzureAIInferenceEmbeddingSettings": ("agent_framework_azure_ai", "agent-framework-azure-ai"),
21+
"RawAzureAIInferenceEmbeddingClient": ("agent_framework_azure_ai", "agent-framework-azure-ai"),
1822
"AzureCredentialTypes": ("agent_framework_azure_ai", "agent-framework-azure-ai"),
1923
"AzureTokenProvider": ("agent_framework_azure_ai", "agent-framework-azure-ai"),
2024
"DurableAIAgent": ("agent_framework_durabletask", "agent-framework-durabletask"),

python/packages/core/agent_framework/azure/__init__.pyi

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,13 @@
44
# Install the relevant packages for full type support.
55

66
from agent_framework_azure_ai import (
7+
AzureAIInferenceEmbeddingClient,
8+
AzureAIInferenceEmbeddingOptions,
9+
AzureAIInferenceEmbeddingSettings,
710
AzureAISettings,
811
AzureCredentialTypes,
912
AzureTokenProvider,
13+
RawAzureAIInferenceEmbeddingClient,
1014
)
1115
from agent_framework_azure_ai_search import (
1216
AzureAISearchContextProvider,
@@ -26,6 +30,9 @@ __all__ = [
2630
"AgentCallbackContext",
2731
"AgentFunctionApp",
2832
"AgentResponseCallbackProtocol",
33+
"AzureAIInferenceEmbeddingClient",
34+
"AzureAIInferenceEmbeddingOptions",
35+
"AzureAIInferenceEmbeddingSettings",
2936
"AzureAISearchContextProvider",
3037
"AzureAISearchSettings",
3138
"AzureAISettings",
@@ -35,4 +42,5 @@ __all__ = [
3542
"DurableAIAgentClient",
3643
"DurableAIAgentOrchestrationContext",
3744
"DurableAIAgentWorker",
45+
"RawAzureAIInferenceEmbeddingClient",
3846
]

python/samples/02-agents/embeddings/azure_ai_inference_embeddings.py

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
import pathlib
1313

1414
from agent_framework import Content
15-
from agent_framework_azure_ai import AzureAIInferenceEmbeddingClient
15+
from agent_framework.azure import AzureAIInferenceEmbeddingClient
1616
from dotenv import load_dotenv
1717

1818
load_dotenv()
@@ -24,8 +24,12 @@
2424
Images are passed as ``Content`` objects created with ``Content.from_data()``.
2525
2626
Prerequisites:
27-
Set the following environment variables or add them to a .env file:
28-
- AZURE_AI_INFERENCE_ENDPOINT: Your Azure AI model inference endpoint URL
27+
Deploy an embedding model in Azure AI Inference that supports image inputs, such as Cohere-embed-v3-english.
28+
29+
The details page for that model, has a target URI and a Key, which should be set in environment variables or a .env
30+
file as follows, the target URI should append the `/models` path:
31+
- AZURE_AI_INFERENCE_ENDPOINT: Your Azure AI model inference endpoint URL, for instance:
32+
https://<apim-instance>.azure-api.net/<foundry-instance>/models
2933
- AZURE_AI_INFERENCE_API_KEY: Your API key
3034
- AZURE_AI_INFERENCE_EMBEDDING_MODEL_ID: The text embedding model name
3135
(e.g. "text-embedding-3-small")
@@ -73,15 +77,17 @@ async def main() -> None:
7377

7478

7579
"""
76-
Sample output (using Cohere-embed-v3-english):
80+
Sample output (using deployment: Cohere-embed-v3-english, which is Cohere's "embed-english-v3.0-image" model):
7781
Image embedding dimensions: 1024
78-
First 5 values: [0.023, -0.045, 0.067, -0.089, 0.011]
79-
Model: Cohere-embed-v3-english
80-
Usage: {'prompt_tokens': 1, 'total_tokens': 1}
82+
First 5 values: [0.029159546, -0.007926941, -0.0032978058, -0.0030403137, -0.012786865]
83+
Model: embed-english-v3.0-image
84+
Usage: {'input_token_count': 1000, 'output_token_count': 0}
8185
82-
Image+text (separate) results:
8386
Text embedding dimensions: 1536
87+
First 5 values: [-0.019439403, 0.015791258, 0.012358093, 0.0028533707, -0.01649483]
8488
Image embedding dimensions: 1024
89+
First 5 values: [0.029159546, -0.007926941, -0.0032978058, -0.0030403137, -0.012786865]
8590
8691
Document embedding dimensions: 1024
92+
First 5 values: [0.029159546, -0.007926941, -0.0032978058, -0.0030403137, -0.012786865]
8793
"""

python/samples/02-agents/multimodal_input/openai_chat_multimodal.py

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,14 +6,21 @@
66
from pathlib import Path
77

88
from agent_framework import Content, Message
9-
from agent_framework.foundry import FoundryChatClient
9+
from agent_framework.openai import OpenAIChatClient, OpenAIChatCompletionClient
1010
from dotenv import load_dotenv
1111

1212
# Load environment variables from .env file
1313
load_dotenv()
1414

1515
ASSETS_DIR = Path(__file__).resolve().parents[2] / "shared" / "sample_assets"
1616

17+
"""
18+
Leverage multimodel capabilities of different models.
19+
20+
Uses the OpenAIChatClient and OpenAIChatCompletionClient to demonstrate multimodal input handling with the gpt-4o and gpt-4o-audio-preview models, respectively. The sample includes demonstrations for image, audio, and PDF inputs, showcasing how to create appropriate Content objects and send them in messages to the chat clients.
21+
22+
"""
23+
1724

1825
def load_sample_pdf() -> bytes:
1926
"""Read the bundled sample PDF for tests."""
@@ -46,7 +53,7 @@ def create_sample_audio() -> str:
4653

4754
async def test_image() -> None:
4855
"""Test image analysis with OpenAI."""
49-
client = FoundryChatClient(model="gpt-4o")
56+
client = OpenAIChatClient(model="gpt-4o")
5057

5158
image_uri = create_sample_image()
5259
message = Message(
@@ -63,7 +70,7 @@ async def test_image() -> None:
6370

6471
async def test_audio() -> None:
6572
"""Test audio analysis with OpenAI."""
66-
client = FoundryChatClient(model="gpt-4o-audio-preview")
73+
client = OpenAIChatCompletionClient(model="gpt-4o-audio-preview-2025-06-03")
6774

6875
audio_uri = create_sample_audio()
6976
message = Message(
@@ -80,7 +87,7 @@ async def test_audio() -> None:
8087

8188
async def test_pdf() -> None:
8289
"""Test PDF document analysis with OpenAI."""
83-
client = FoundryChatClient(model="gpt-4o")
90+
client = OpenAIChatClient(model="gpt-4o")
8491

8592
pdf_bytes = load_sample_pdf()
8693
message = Message(

0 commit comments

Comments
 (0)