Skip to content

Commit e1d37af

Browse files
committed
feat: add Inception Labs mercury-2 model support
Register mercury-2 (128k context, tool-calling) and mercury-edit-2 in llm-info, add Inception Labs provider to the GUI Add Chat Model form, and flag mercury-2 as tool-capable in PROVIDER_TOOL_SUPPORT.
1 parent d971363 commit e1d37af

5 files changed

Lines changed: 77 additions & 0 deletions

File tree

core/llm/toolSupport.ts

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -276,6 +276,11 @@ export const PROVIDER_TOOL_SUPPORT: Record<string, (model: string) => boolean> =
276276

277277
return false;
278278
},
279+
inception: (model) => {
280+
// https://docs.inceptionlabs.ai/ - mercury-2 supports tool calling
281+
const lower = model.toLowerCase();
282+
return lower.startsWith("mercury-2");
283+
},
279284
deepseek: (model) => {
280285
// https://api-docs.deepseek.com/quick_start/pricing
281286
// https://api-docs.deepseek.com/guides/function_calling

gui/src/pages/AddNewModel/configs/models.ts

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3270,4 +3270,20 @@ export const models: { [key: string]: ModelPackage } = {
32703270
icon: "mimo.png",
32713271
isOpenSource: false,
32723272
},
3273+
3274+
// Inception Labs models
3275+
mercury2: {
3276+
title: "Mercury 2",
3277+
description:
3278+
"Inception Labs' fastest reasoning LLM with 128k context, tool calling, and structured outputs.",
3279+
refUrl: "https://docs.inceptionlabs.ai/",
3280+
params: {
3281+
title: "Mercury 2",
3282+
model: "mercury-2",
3283+
contextLength: 128_000,
3284+
},
3285+
providerOptions: ["inception"],
3286+
icon: "inception.png",
3287+
isOpenSource: false,
3288+
},
32733289
};

gui/src/pages/AddNewModel/configs/providers.ts

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -609,6 +609,28 @@ Select the \`GPT-4o\` model below to complete your provider configuration, but n
609609
],
610610
apiKeyUrl: "https://platform.minimax.io",
611611
},
612+
inception: {
613+
title: "Inception Labs",
614+
provider: "inception",
615+
icon: "inception.png",
616+
description:
617+
"Inception Labs provides Mercury, the fastest diffusion-based LLM family with 128k context and tool calling.",
618+
longDescription:
619+
"To get started with Inception Labs, obtain an API key from the [Inception Labs platform](https://platform.inceptionlabs.ai/). Their Mercury models are OpenAI-compatible and support chat, tool calling, and structured outputs.",
620+
tags: [ModelProviderTags.RequiresApiKey],
621+
collectInputFor: [
622+
{
623+
inputType: "text",
624+
key: "apiKey",
625+
label: "API Key",
626+
placeholder: "Enter your Inception Labs API key",
627+
required: true,
628+
},
629+
...completionParamsInputsConfigs,
630+
],
631+
packages: [models.mercury2],
632+
apiKeyUrl: "https://platform.inceptionlabs.ai/",
633+
},
612634
deepseek: {
613635
title: "DeepSeek",
614636
provider: "deepseek",

packages/llm-info/src/index.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ import { Bedrock } from "./providers/bedrock.js";
44
import { Cohere } from "./providers/cohere.js";
55
import { CometAPI } from "./providers/cometapi.js";
66
import { Gemini } from "./providers/gemini.js";
7+
import { Inception } from "./providers/inception.js";
78
import { MiniMax } from "./providers/minimax.js";
89
import { Mistral } from "./providers/mistral.js";
910
import { Ollama } from "./providers/ollama.js";
@@ -26,6 +27,7 @@ export const allModelProviders: ModelProvider[] = [
2627
Bedrock,
2728
Cohere,
2829
CometAPI,
30+
Inception,
2931
MiniMax,
3032
xAI,
3133
zAI,
Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
import { ModelProvider } from "../types.js";
2+
3+
export const Inception: ModelProvider = {
4+
models: [
5+
{
6+
model: "mercury-2",
7+
displayName: "Mercury 2",
8+
contextLength: 128000,
9+
description:
10+
"Inception Labs' fastest reasoning LLM and their most powerful model, with tool calling and structured outputs support.",
11+
regex: /mercury-2/i,
12+
recommendedFor: ["chat"],
13+
},
14+
{
15+
model: "mercury-edit-2",
16+
displayName: "Mercury Edit 2",
17+
contextLength: 32000,
18+
description:
19+
"Inception Labs' code editing model for autocomplete, apply edit, and next edit suggestions.",
20+
regex: /mercury-edit-2/i,
21+
recommendedFor: ["autocomplete"],
22+
},
23+
{
24+
model: "mercury-coder-small",
25+
displayName: "Mercury Coder Small",
26+
contextLength: 32000,
27+
regex: /mercury-coder-small/i,
28+
},
29+
],
30+
id: "inception",
31+
displayName: "Inception Labs",
32+
};

0 commit comments

Comments
 (0)