Skip to content

Commit e5fc758

Browse files
authored
feat(firebaseai): implicit caching, add metadata (#17979)
* add metadata for caching * fix analyzer * fix format
1 parent b3caa54 commit e5fc758

3 files changed

Lines changed: 86 additions & 1 deletion

File tree

packages/firebase_ai/firebase_ai/example/lib/pages/token_count_page.dart

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,17 @@ class _TokenCountPageState extends State<TokenCountPage> {
7070
child: const Text('Count Tokens'),
7171
),
7272
),
73+
const SizedBox(width: 10),
74+
Expanded(
75+
child: ElevatedButton(
76+
onPressed: !_loading
77+
? () async {
78+
await _testUsageMetadata();
79+
}
80+
: null,
81+
child: const Text('Usage Metadata'),
82+
),
83+
),
7384
],
7485
),
7586
),
@@ -94,4 +105,41 @@ class _TokenCountPageState extends State<TokenCountPage> {
94105
_loading = false;
95106
});
96107
}
108+
109+
Future<void> _testUsageMetadata() async {
110+
setState(() {
111+
_loading = true;
112+
});
113+
114+
const prompt =
115+
'Tell a story about a magic backpack and the person who found it.';
116+
final content = [Content.text(prompt)];
117+
final response = await widget.model.generateContent(content);
118+
final usageMetadata = response.usageMetadata;
119+
120+
if (usageMetadata != null) {
121+
final message = '''
122+
Usage Metadata:
123+
- promptTokenCount: ${usageMetadata.promptTokenCount}
124+
- candidatesTokenCount: ${usageMetadata.candidatesTokenCount}
125+
- totalTokenCount: ${usageMetadata.totalTokenCount}
126+
- thoughtsTokenCount: ${usageMetadata.thoughtsTokenCount}
127+
- toolUsePromptTokenCount: ${usageMetadata.toolUsePromptTokenCount}
128+
- cachedContentTokenCount: ${usageMetadata.cachedContentTokenCount}
129+
- promptTokensDetails: ${usageMetadata.promptTokensDetails?.map((d) => '${d.modality}: ${d.tokenCount}')}
130+
- candidatesTokensDetails: ${usageMetadata.candidatesTokensDetails?.map((d) => '${d.modality}: ${d.tokenCount}')}
131+
- toolUsePromptTokensDetails: ${usageMetadata.toolUsePromptTokensDetails?.map((d) => '${d.modality}: ${d.tokenCount}')}
132+
- cacheTokensDetails: ${usageMetadata.cacheTokensDetails?.map((d) => '${d.modality}: ${d.tokenCount}')}
133+
''';
134+
_messages.add(MessageData(text: message, fromUser: false));
135+
} else {
136+
_messages.add(
137+
MessageData(text: 'No usage metadata available.', fromUser: false),
138+
);
139+
}
140+
141+
setState(() {
142+
_loading = false;
143+
});
144+
}
97145
}

packages/firebase_ai/firebase_ai/lib/src/api.dart

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -191,6 +191,8 @@ final class UsageMetadata {
191191
this.promptTokensDetails,
192192
this.candidatesTokensDetails,
193193
this.toolUsePromptTokensDetails,
194+
this.cacheTokensDetails,
195+
this.cachedContentTokenCount,
194196
});
195197

196198
/// Number of tokens in the prompt.
@@ -217,6 +219,14 @@ final class UsageMetadata {
217219
/// A list of tokens used by tools whose usage was triggered from a prompt,
218220
/// broken down by modality.
219221
final List<ModalityTokenCount>? toolUsePromptTokensDetails;
222+
223+
/// The number of tokens in the prompt that were served from the cache.
224+
/// If implicit caching is not active or no content was cached, this will be 0.
225+
final int? cachedContentTokenCount;
226+
227+
/// Detailed breakdown of the cached tokens by modality (e.g., text, image).
228+
/// This list provides granular insight into which parts of the content were cached.
229+
final List<ModalityTokenCount>? cacheTokensDetails;
220230
}
221231

222232
/// Response candidate generated from a [GenerativeModel].
@@ -1526,6 +1536,16 @@ UsageMetadata parseUsageMetadata(Object jsonObject) {
15261536
toolUsePromptTokensDetails.map(_parseModalityTokenCount).toList(),
15271537
_ => null,
15281538
};
1539+
final cachedContentTokenCount = switch (jsonObject) {
1540+
{'cachedContentTokenCount': final int cachedContentTokenCount} =>
1541+
cachedContentTokenCount,
1542+
_ => null,
1543+
};
1544+
final cacheTokensDetails = switch (jsonObject) {
1545+
{'cacheTokensDetails': final List<Object?> cacheTokensDetails} =>
1546+
cacheTokensDetails.map(_parseModalityTokenCount).toList(),
1547+
_ => null,
1548+
};
15291549
return UsageMetadata._(
15301550
promptTokenCount: promptTokenCount,
15311551
candidatesTokenCount: candidatesTokenCount,
@@ -1535,6 +1555,8 @@ UsageMetadata parseUsageMetadata(Object jsonObject) {
15351555
promptTokensDetails: promptTokensDetails,
15361556
candidatesTokensDetails: candidatesTokensDetails,
15371557
toolUsePromptTokensDetails: toolUsePromptTokensDetails,
1558+
cachedContentTokenCount: cachedContentTokenCount,
1559+
cacheTokensDetails: cacheTokensDetails,
15381560
);
15391561
}
15401562

packages/firebase_ai/firebase_ai/test/response_parsing_test.dart

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -711,7 +711,12 @@ void main() {
711711
"modality": "TEXT",
712712
"tokenCount": 76
713713
}],
714-
"toolUsePromptTokenCount": 5
714+
"toolUsePromptTokenCount": 5,
715+
"cachedContentTokenCount": 10,
716+
"cacheTokensDetails": [{
717+
"modality": "TEXT",
718+
"tokenCount": 10
719+
}]
715720
}
716721
}
717722
''';
@@ -722,6 +727,16 @@ void main() {
722727
generateContentResponse.text, 'Here is a description of the image:');
723728
expect(generateContentResponse.usageMetadata?.totalTokenCount, 1913);
724729
expect(generateContentResponse.usageMetadata?.toolUsePromptTokenCount, 5);
730+
expect(
731+
generateContentResponse.usageMetadata?.cachedContentTokenCount, 10);
732+
expect(
733+
generateContentResponse
734+
.usageMetadata?.cacheTokensDetails?.first.modality,
735+
ContentModality.text);
736+
expect(
737+
generateContentResponse
738+
.usageMetadata?.cacheTokensDetails?.first.tokenCount,
739+
10);
725740
expect(
726741
generateContentResponse
727742
.usageMetadata?.promptTokensDetails?[1].modality,

0 commit comments

Comments
 (0)