Skip to content

Commit 2faa4b1

Browse files
authored
feat(anthropic): Set gen_ai.response.id span attribute (#5662)
Set the `gen_ai.response.id` property on spans created by the Anthropic integration. For non-streaming responses, the ID is read from `result.id` on the Message object. For streaming responses, it's captured from `event.message.id` in the `message_start` event and threaded through the iterator to be set when the stream completes. The `_collect_ai_data` function's return tuple is extended with the new `response_id` field, and `_set_output_data` accepts an optional `response_id` parameter to set on the span. Refs PY-2137 Closes #5659
1 parent a3ee8aa commit 2faa4b1

File tree

2 files changed

+25
-3
lines changed

2 files changed

+25
-3
lines changed

sentry_sdk/integrations/anthropic.py

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,8 @@ def _collect_ai_data(
126126
model: "str | None",
127127
usage: "_RecordedUsage",
128128
content_blocks: "list[str]",
129-
) -> "tuple[str | None, _RecordedUsage, list[str]]":
129+
response_id: "str | None" = None,
130+
) -> "tuple[str | None, _RecordedUsage, list[str], str | None]":
130131
"""
131132
Collect model information, token usage, and collect content blocks from the AI streaming response.
132133
"""
@@ -146,6 +147,7 @@ def _collect_ai_data(
146147
# https://github.com/anthropics/anthropic-sdk-python/blob/9c485f6966e10ae0ea9eabb3a921d2ea8145a25b/src/anthropic/lib/streaming/_messages.py#L433-L518
147148
if event.type == "message_start":
148149
model = event.message.model or model
150+
response_id = event.message.id
149151

150152
incoming_usage = event.message.usage
151153
usage.output_tokens = incoming_usage.output_tokens
@@ -162,6 +164,7 @@ def _collect_ai_data(
162164
model,
163165
usage,
164166
content_blocks,
167+
response_id,
165168
)
166169

167170
# Counterintuitive, but message_delta contains cumulative token counts :)
@@ -190,12 +193,14 @@ def _collect_ai_data(
190193
model,
191194
usage,
192195
content_blocks,
196+
response_id,
193197
)
194198

195199
return (
196200
model,
197201
usage,
198202
content_blocks,
203+
response_id,
199204
)
200205

201206

@@ -349,10 +354,13 @@ def _set_output_data(
349354
cache_write_input_tokens: "int | None",
350355
content_blocks: "list[Any]",
351356
finish_span: bool = False,
357+
response_id: "str | None" = None,
352358
) -> None:
353359
"""
354360
Set output data for the span based on the AI response."""
355361
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, model)
362+
if response_id is not None:
363+
span.set_data(SPANDATA.GEN_AI_RESPONSE_ID, response_id)
356364
if should_send_default_pii() and integration.include_prompts:
357365
output_messages: "dict[str, list[Any]]" = {
358366
"response": [],
@@ -444,6 +452,7 @@ def _sentry_patched_create_common(f: "Any", *args: "Any", **kwargs: "Any") -> "A
444452
cache_write_input_tokens=cache_write_input_tokens,
445453
content_blocks=content_blocks,
446454
finish_span=True,
455+
response_id=getattr(result, "id", None),
447456
)
448457

449458
# Streaming response
@@ -454,17 +463,20 @@ def new_iterator() -> "Iterator[MessageStreamEvent]":
454463
model = None
455464
usage = _RecordedUsage()
456465
content_blocks: "list[str]" = []
466+
response_id = None
457467

458468
for event in old_iterator:
459469
(
460470
model,
461471
usage,
462472
content_blocks,
473+
response_id,
463474
) = _collect_ai_data(
464475
event,
465476
model,
466477
usage,
467478
content_blocks,
479+
response_id,
468480
)
469481
yield event
470482

@@ -486,23 +498,27 @@ def new_iterator() -> "Iterator[MessageStreamEvent]":
486498
cache_write_input_tokens=usage.cache_write_input_tokens,
487499
content_blocks=[{"text": "".join(content_blocks), "type": "text"}],
488500
finish_span=True,
501+
response_id=response_id,
489502
)
490503

491504
async def new_iterator_async() -> "AsyncIterator[MessageStreamEvent]":
492505
model = None
493506
usage = _RecordedUsage()
494507
content_blocks: "list[str]" = []
508+
response_id = None
495509

496510
async for event in old_iterator:
497511
(
498512
model,
499513
usage,
500514
content_blocks,
515+
response_id,
501516
) = _collect_ai_data(
502517
event,
503518
model,
504519
usage,
505520
content_blocks,
521+
response_id,
506522
)
507523
yield event
508524

@@ -524,6 +540,7 @@ async def new_iterator_async() -> "AsyncIterator[MessageStreamEvent]":
524540
cache_write_input_tokens=usage.cache_write_input_tokens,
525541
content_blocks=[{"text": "".join(content_blocks), "type": "text"}],
526542
finish_span=True,
543+
response_id=response_id,
527544
)
528545

529546
if str(type(result._iterator)) == "<class 'async_generator'>":

tests/integrations/anthropic/test_anthropic.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ async def __call__(self, *args, **kwargs):
5858
ANTHROPIC_VERSION = package_version("anthropic")
5959

6060
EXAMPLE_MESSAGE = Message(
61-
id="id",
61+
id="msg_01XFDUDYJgAACzvnptvVoYEL",
6262
model="model",
6363
role="assistant",
6464
content=[TextBlock(type="text", text="Hi, I'm Claude.")],
@@ -135,6 +135,7 @@ def test_nonstreaming_create_message(
135135
assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20
136136
assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30
137137
assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False
138+
assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL"
138139

139140

140141
@pytest.mark.asyncio
@@ -206,6 +207,7 @@ async def test_nonstreaming_create_message_async(
206207
assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20
207208
assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30
208209
assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False
210+
assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL"
209211

210212

211213
@pytest.mark.parametrize(
@@ -309,6 +311,7 @@ def test_streaming_create_message(
309311
assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10
310312
assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20
311313
assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True
314+
assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL"
312315

313316

314317
@pytest.mark.asyncio
@@ -415,6 +418,7 @@ async def test_streaming_create_message_async(
415418
assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10
416419
assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20
417420
assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True
421+
assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL"
418422

419423

420424
@pytest.mark.skipif(
@@ -862,13 +866,14 @@ def test_collect_ai_data_with_input_json_delta():
862866

863867
content_blocks = []
864868

865-
model, new_usage, new_content_blocks = _collect_ai_data(
869+
model, new_usage, new_content_blocks, response_id = _collect_ai_data(
866870
event, model, usage, content_blocks
867871
)
868872
assert model is None
869873
assert new_usage.input_tokens == usage.input_tokens
870874
assert new_usage.output_tokens == usage.output_tokens
871875
assert new_content_blocks == ["test"]
876+
assert response_id is None
872877

873878

874879
@pytest.mark.skipif(

0 commit comments

Comments
 (0)