|
72 | 72 | ) |
73 | 73 |
|
74 | 74 |
|
75 | | -class GeminiModel(str, Enum): |
76 | | - """ |
77 | | - Gemini Model Names allowed by comfy-api |
78 | | - """ |
79 | | - |
80 | | - gemini_2_5_pro_preview_05_06 = "gemini-2.5-pro-preview-05-06" |
81 | | - gemini_2_5_flash_preview_04_17 = "gemini-2.5-flash-preview-04-17" |
82 | | - gemini_2_5_pro = "gemini-2.5-pro" |
83 | | - gemini_2_5_flash = "gemini-2.5-flash" |
84 | | - gemini_3_0_pro = "gemini-3-pro-preview" |
85 | | - |
86 | | - |
87 | 75 | class GeminiImageModel(str, Enum): |
88 | 76 | """ |
89 | 77 | Gemini Image Model Names allowed by comfy-api |
@@ -237,10 +225,14 @@ def calculate_tokens_price(response: GeminiGenerateContentResponse) -> float | N |
237 | 225 | input_tokens_price = 0.30 |
238 | 226 | output_text_tokens_price = 2.50 |
239 | 227 | output_image_tokens_price = 30.0 |
240 | | - elif response.modelVersion == "gemini-3-pro-preview": |
| 228 | + elif response.modelVersion in ("gemini-3-pro-preview", "gemini-3.1-pro-preview"): |
241 | 229 | input_tokens_price = 2 |
242 | 230 | output_text_tokens_price = 12.0 |
243 | 231 | output_image_tokens_price = 0.0 |
| 232 | + elif response.modelVersion == "gemini-3.1-flash-lite-preview": |
| 233 | + input_tokens_price = 0.25 |
| 234 | + output_text_tokens_price = 1.50 |
| 235 | + output_image_tokens_price = 0.0 |
244 | 236 | elif response.modelVersion == "gemini-3-pro-image-preview": |
245 | 237 | input_tokens_price = 2 |
246 | 238 | output_text_tokens_price = 12.0 |
@@ -292,8 +284,16 @@ def define_schema(cls): |
292 | 284 | ), |
293 | 285 | IO.Combo.Input( |
294 | 286 | "model", |
295 | | - options=GeminiModel, |
296 | | - default=GeminiModel.gemini_2_5_pro, |
| 287 | + options=[ |
| 288 | + "gemini-2.5-pro-preview-05-06", |
| 289 | + "gemini-2.5-flash-preview-04-17", |
| 290 | + "gemini-2.5-pro", |
| 291 | + "gemini-2.5-flash", |
| 292 | + "gemini-3-pro-preview", |
| 293 | + "gemini-3-1-pro", |
| 294 | + "gemini-3-1-flash-lite", |
| 295 | + ], |
| 296 | + default="gemini-3-1-pro", |
297 | 297 | tooltip="The Gemini model to use for generating responses.", |
298 | 298 | ), |
299 | 299 | IO.Int.Input( |
@@ -363,11 +363,16 @@ def define_schema(cls): |
363 | 363 | "usd": [0.00125, 0.01], |
364 | 364 | "format": { "approximate": true, "separator": "-", "suffix": " per 1K tokens" } |
365 | 365 | } |
366 | | - : $contains($m, "gemini-3-pro-preview") ? { |
| 366 | + : ($contains($m, "gemini-3-pro-preview") or $contains($m, "gemini-3-1-pro")) ? { |
367 | 367 | "type": "list_usd", |
368 | 368 | "usd": [0.002, 0.012], |
369 | 369 | "format": { "approximate": true, "separator": "-", "suffix": " per 1K tokens" } |
370 | 370 | } |
| 371 | + : $contains($m, "gemini-3-1-flash-lite") ? { |
| 372 | + "type": "list_usd", |
| 373 | + "usd": [0.00025, 0.0015], |
| 374 | + "format": { "approximate": true, "separator": "-", "suffix": " per 1K tokens" } |
| 375 | + } |
371 | 376 | : {"type":"text", "text":"Token-based"} |
372 | 377 | ) |
373 | 378 | """, |
@@ -436,12 +441,14 @@ async def execute( |
436 | 441 | files: list[GeminiPart] | None = None, |
437 | 442 | system_prompt: str = "", |
438 | 443 | ) -> IO.NodeOutput: |
439 | | - validate_string(prompt, strip_whitespace=False) |
| 444 | + if model == "gemini-3-pro-preview": |
| 445 | + model = "gemini-3.1-pro-preview" # model "gemini-3-pro-preview" will be soon deprecated by Google |
| 446 | + elif model == "gemini-3-1-pro": |
| 447 | + model = "gemini-3.1-pro-preview" |
| 448 | + elif model == "gemini-3-1-flash-lite": |
| 449 | + model = "gemini-3.1-flash-lite-preview" |
440 | 450 |
|
441 | | - # Create parts list with text prompt as the first part |
442 | 451 | parts: list[GeminiPart] = [GeminiPart(text=prompt)] |
443 | | - |
444 | | - # Add other modal parts |
445 | 452 | if images is not None: |
446 | 453 | parts.extend(await create_image_parts(cls, images)) |
447 | 454 | if audio is not None: |
|
0 commit comments