)}
@@ -288,7 +307,11 @@ function ToolUseRow({ tool }: { tool: ToolUse }) {
{availableTabs.length > 0 && (
<>
-
+
{availableTabs.map((tab) => (
);
}
+
+function SubAgentContent({ parts }: { parts: any[] }) {
+ // Extract sub-agent run ID from injected metadata part
+ const runPart = parts.find(
+ (p: any) => p.type === "data-subagent-run" && p.data?.runId
+ );
+ const subAgentRunId = runPart?.data?.runId as string | undefined;
+
+ return (
+
+ {subAgentRunId && (
+
+
+ View sub-agent run
+
+
+ )}
+ {parts.map((part: any, j: number) => {
+ const partType = part.type as string;
+
+ // Skip the injected metadata part — already rendered above
+ if (partType === "data-subagent-run") return null;
+
+ if (partType === "text" && part.text) {
+ return
;
+ }
+
+ if (partType === "step-start") {
+ return (
+
+ );
+ }
+
+ if (partType.startsWith("tool-")) {
+ const subToolName = partType.slice(5);
+ return (
+
+ );
+ }
+
+ if (partType === "reasoning" && part.text) {
+ return (
+
+ );
+ }
+
+ return null;
+ })}
+
+ );
+}
diff --git a/apps/webapp/app/components/runs/v3/ai/AISpanDetails.tsx b/apps/webapp/app/components/runs/v3/ai/AISpanDetails.tsx
index 5e8bb65688f..c243a1e4d9b 100644
--- a/apps/webapp/app/components/runs/v3/ai/AISpanDetails.tsx
+++ b/apps/webapp/app/components/runs/v3/ai/AISpanDetails.tsx
@@ -1,6 +1,7 @@
import { CheckIcon, ClipboardDocumentIcon } from "@heroicons/react/20/solid";
-import { lazy, Suspense, useState } from "react";
+import { Suspense, useState } from "react";
import { Button } from "~/components/primitives/Buttons";
+import { StreamdownRenderer } from "~/components/code/StreamdownRenderer";
import { Header3 } from "~/components/primitives/Headers";
import { Paragraph } from "~/components/primitives/Paragraph";
import { TabButton, TabContainer } from "~/components/primitives/Tabs";
@@ -20,16 +21,6 @@ import type { AISpanData, DisplayItem } from "./types";
import type { PromptSpanData } from "~/presenters/v3/SpanPresenter.server";
import { SpanHorizontalTimeline } from "~/components/runs/v3/SpanHorizontalTimeline";
-const StreamdownRenderer = lazy(() =>
- import("streamdown").then((mod) => ({
- default: ({ children }: { children: string }) => (
-
- {children}
-
- ),
- }))
-);
-
type AITab = "overview" | "messages" | "tools" | "prompt";
export function AISpanDetails({
diff --git a/apps/webapp/app/components/runs/v3/ai/types.ts b/apps/webapp/app/components/runs/v3/ai/types.ts
index bb0fd7e74b1..c59c87865d2 100644
--- a/apps/webapp/app/components/runs/v3/ai/types.ts
+++ b/apps/webapp/app/components/runs/v3/ai/types.ts
@@ -22,6 +22,11 @@ export type ToolUse = {
resultSummary?: string;
/** Full formatted result for display in a code block */
resultOutput?: string;
+ /** Sub-agent output — when the tool result is a UIMessage with parts */
+ subAgent?: {
+ parts: any[];
+ isStreaming: boolean;
+ };
};
// ---------------------------------------------------------------------------
diff --git a/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.$projectParam.env.$envParam.test.tasks.$taskParam/AIPayloadTabContent.tsx b/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.$projectParam.env.$envParam.test.tasks.$taskParam/AIPayloadTabContent.tsx
index 3d9302356cc..6fc50a41280 100644
--- a/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.$projectParam.env.$envParam.test.tasks.$taskParam/AIPayloadTabContent.tsx
+++ b/apps/webapp/app/routes/_app.orgs.$organizationSlug.projects.$projectParam.env.$envParam.test.tasks.$taskParam/AIPayloadTabContent.tsx
@@ -1,8 +1,9 @@
import { CheckIcon, XMarkIcon } from "@heroicons/react/20/solid";
import { AnimatePresence, motion } from "framer-motion";
-import { Suspense, lazy, useCallback, useEffect, useRef, useState } from "react";
+import { Suspense, useCallback, useEffect, useRef, useState } from "react";
import { SparkleListIcon } from "~/assets/icons/SparkleListIcon";
import { Button } from "~/components/primitives/Buttons";
+import { StreamdownRenderer } from "~/components/code/StreamdownRenderer";
import { Header3 } from "~/components/primitives/Headers";
import { Paragraph } from "~/components/primitives/Paragraph";
import { Spinner } from "~/components/primitives/Spinner";
@@ -11,16 +12,6 @@ import { useOrganization } from "~/hooks/useOrganizations";
import { useProject } from "~/hooks/useProject";
import { cn } from "~/utils/cn";
-const StreamdownRenderer = lazy(() =>
- import("streamdown").then((mod) => ({
- default: ({ children, isAnimating }: { children: string; isAnimating: boolean }) => (
-
- {children}
-
- ),
- }))
-);
-
type StreamEventType =
| { type: "thinking"; content: string }
| { type: "result"; success: true; payload: string }
@@ -31,11 +22,19 @@ export function AIPayloadTabContent({
payloadSchema,
taskIdentifier,
getCurrentPayload,
+ generateButtonLabel = "Generate payload",
+ placeholder,
+ examplePromptsOverride,
+ isAgent = false,
}: {
onPayloadGenerated: (payload: string) => void;
payloadSchema?: unknown;
taskIdentifier: string;
getCurrentPayload?: () => string;
+ generateButtonLabel?: string;
+ placeholder?: string;
+ examplePromptsOverride?: string[];
+ isAgent?: boolean;
}) {
const [prompt, setPrompt] = useState("");
const [isLoading, setIsLoading] = useState(false);
@@ -73,6 +72,7 @@ export function AIPayloadTabContent({
const formData = new FormData();
formData.append("prompt", queryPrompt);
formData.append("taskIdentifier", taskIdentifier);
+ formData.append("isAgent", isAgent ? "true" : "false");
if (payloadSchema) {
formData.append("payloadSchema", JSON.stringify(payloadSchema));
}
@@ -144,7 +144,7 @@ export function AIPayloadTabContent({
setIsLoading(false);
}
},
- [resourcePath, taskIdentifier, payloadSchema, getCurrentPayload]
+ [resourcePath, taskIdentifier, payloadSchema, getCurrentPayload, isAgent]
);
const processStreamEvent = useCallback(
@@ -191,7 +191,7 @@ export function AIPayloadTabContent({
}
}, [error]);
- const examplePrompts = payloadSchema
+ const examplePrompts = examplePromptsOverride ?? (payloadSchema
? [
"Generate a valid payload",
"Generate a payload with edge cases",
@@ -201,7 +201,7 @@ export function AIPayloadTabContent({
"Generate a simple JSON payload",
"Generate a payload with nested objects",
"Generate a payload with an array of items",
- ];
+ ]);
return (
@@ -215,9 +215,9 @@ export function AIPayloadTabContent({
ref={textareaRef}
name="prompt"
placeholder={
- payloadSchema
+ placeholder ?? (payloadSchema
? "e.g. generate a payload for a new user signup"
- : "e.g. generate a JSON payload with name, email, and age fields"
+ : "e.g. generate a JSON payload with name, email, and age fields")
}
value={prompt}
onChange={(e) => setPrompt(e.target.value)}
@@ -251,7 +251,7 @@ export function AIPayloadTabContent({
className={cn(!prompt.trim() && "opacity-50")}
onClick={() => handleSubmit()}
>
- Generate payload
+ {generateButtonLabel}
)}
diff --git a/apps/webapp/package.json b/apps/webapp/package.json
index 0afb011cce0..4c47de1cdcd 100644
--- a/apps/webapp/package.json
+++ b/apps/webapp/package.json
@@ -28,6 +28,7 @@
],
"dependencies": {
"@ai-sdk/openai": "^1.3.23",
+ "@ai-sdk/react": "^3.0.0",
"@ariakit/react": "^0.4.6",
"@ariakit/react-core": "^0.4.6",
"@aws-sdk/client-ecr": "^3.931.0",
@@ -218,7 +219,8 @@
"sonner": "^1.0.3",
"sql-formatter": "^15.4.10",
"sqs-consumer": "^7.4.0",
- "streamdown": "^1.4.0",
+ "@streamdown/code": "^1.1.1",
+ "streamdown": "^2.5.0",
"superjson": "^2.2.1",
"tailwind-merge": "^1.12.0",
"tailwind-scrollbar-hide": "^1.1.7",
diff --git a/apps/webapp/test/chat-snapshot-integration.test.ts b/apps/webapp/test/chat-snapshot-integration.test.ts
new file mode 100644
index 00000000000..3d157d58f9f
--- /dev/null
+++ b/apps/webapp/test/chat-snapshot-integration.test.ts
@@ -0,0 +1,235 @@
+// Plan F.3: integration test that round-trips a `ChatSnapshotV1` blob
+// through the SDK's snapshot helpers + a real MinIO backing store. Mirrors
+// the testcontainer pattern from `objectStore.test.ts`.
+//
+// What this verifies end-to-end:
+// - SDK's `writeChatSnapshot` calls `apiClient.createUploadPayloadUrl`
+// to mint a presigned PUT, then PUTs JSON to it.
+// - SDK's `readChatSnapshot` calls `apiClient.getPayloadUrl` to mint a
+// presigned GET, then fetches and parses.
+// - The webapp's `generatePresignedUrl` produces URLs MinIO accepts.
+// - The blob round-trips with `version: 1` shape preserved.
+// - 404 (no snapshot for a fresh session) returns `undefined`, not an
+// error.
+//
+// This is the integration safety net behind the unit tests in
+// `packages/trigger-sdk/test/chat-snapshot.test.ts` — those tests mock
+// `fetch`; this one drives a real S3-compatible backend.
+
+import { postgresAndMinioTest } from "@internal/testcontainers";
+import { apiClientManager } from "@trigger.dev/core/v3";
+import {
+ __readChatSnapshotProductionPathForTests as readChatSnapshot,
+ __writeChatSnapshotProductionPathForTests as writeChatSnapshot,
+ type ChatSnapshotV1,
+} from "@trigger.dev/sdk/ai";
+import type { UIMessage } from "ai";
+import { afterEach, describe, expect, vi } from "vitest";
+import { env } from "~/env.server";
+import { generatePresignedUrl } from "~/v3/objectStore.server";
+
+vi.setConfig({ testTimeout: 60_000 });
+
+// ── Helpers ────────────────────────────────────────────────────────────
+
+function makeSnapshot(opts: { messages?: UIMessage[]; lastOutEventId?: string } = {}): ChatSnapshotV1 {
+ return {
+ version: 1,
+ savedAt: 1_700_000_000_000,
+ messages: opts.messages ?? [
+ {
+ id: "u-1",
+ role: "user",
+ parts: [{ type: "text", text: "hello" }],
+ },
+ {
+ id: "a-1",
+ role: "assistant",
+ parts: [{ type: "text", text: "world" }],
+ },
+ ],
+ lastOutEventId: opts.lastOutEventId ?? "evt-42",
+ lastOutTimestamp: 1_700_000_000_500,
+ };
+}
+
+/**
+ * Stub `apiClientManager.clientOrThrow()` so the SDK helpers see a fake
+ * api client whose `getPayloadUrl` / `createUploadPayloadUrl` return
+ * presigned URLs minted by the webapp's real `generatePresignedUrl`
+ * (which signs against MinIO).
+ *
+ * The SDK helpers internally do `fetch(presignedUrl, ...)` to read/write
+ * the blob, so MinIO ends up holding the actual bytes.
+ */
+function stubApiClient(opts: { projectRef: string; envSlug: string }) {
+ vi.spyOn(apiClientManager, "clientOrThrow").mockReturnValue({
+ async getPayloadUrl(filename: string) {
+ const result = await generatePresignedUrl(opts.projectRef, opts.envSlug, filename, "GET");
+ if (!result.success) throw new Error(result.error);
+ return { presignedUrl: result.url };
+ },
+ async createUploadPayloadUrl(filename: string) {
+ const result = await generatePresignedUrl(opts.projectRef, opts.envSlug, filename, "PUT");
+ if (!result.success) throw new Error(result.error);
+ return { presignedUrl: result.url };
+ },
+ } as never);
+}
+
+// Suppress noisy warnings from logger.warn during error-path tests.
+let warnSpy: ReturnType
;
+
+afterEach(() => {
+ vi.restoreAllMocks();
+ warnSpy?.mockRestore();
+});
+
+// ── Tests ──────────────────────────────────────────────────────────────
+
+describe("chat snapshot integration (MinIO + SDK helpers)", () => {
+ postgresAndMinioTest("round-trips a snapshot through real MinIO", async ({ minioConfig }) => {
+ env.OBJECT_STORE_BASE_URL = minioConfig.baseUrl;
+ env.OBJECT_STORE_ACCESS_KEY_ID = minioConfig.accessKeyId;
+ env.OBJECT_STORE_SECRET_ACCESS_KEY = minioConfig.secretAccessKey;
+ env.OBJECT_STORE_REGION = minioConfig.region;
+ env.OBJECT_STORE_DEFAULT_PROTOCOL = undefined;
+
+ stubApiClient({ projectRef: "proj_snap_rt", envSlug: "dev" });
+
+ const sessionId = "sess_round_trip_1";
+ const snapshot = makeSnapshot();
+
+ // Write through the SDK helper — should land in MinIO at
+ // `packets/proj_snap_rt/dev/sessions/sess_round_trip_1/snapshot.json`.
+ await writeChatSnapshot(sessionId, snapshot);
+
+ // Read back through the SDK helper — should reconstruct the original.
+ const result = await readChatSnapshot(sessionId);
+
+ expect(result).toEqual(snapshot);
+ });
+
+ postgresAndMinioTest("returns undefined for a fresh session with no snapshot", async ({ minioConfig }) => {
+ env.OBJECT_STORE_BASE_URL = minioConfig.baseUrl;
+ env.OBJECT_STORE_ACCESS_KEY_ID = minioConfig.accessKeyId;
+ env.OBJECT_STORE_SECRET_ACCESS_KEY = minioConfig.secretAccessKey;
+ env.OBJECT_STORE_REGION = minioConfig.region;
+ env.OBJECT_STORE_DEFAULT_PROTOCOL = undefined;
+
+ stubApiClient({ projectRef: "proj_snap_404", envSlug: "dev" });
+
+ warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {});
+
+ // Session never had a snapshot written — read returns undefined.
+ const result = await readChatSnapshot("sess_never_existed");
+ expect(result).toBeUndefined();
+ });
+
+ postgresAndMinioTest("overwrites a prior snapshot in place (single-writer)", async ({ minioConfig }) => {
+ // The runtime guarantees one attempt alive at a time, and
+ // `writeChatSnapshot` runs awaited after `onTurnComplete`. Verify
+ // that a second write to the same key replaces the first cleanly —
+ // the read-after-write reflects the latest blob.
+ env.OBJECT_STORE_BASE_URL = minioConfig.baseUrl;
+ env.OBJECT_STORE_ACCESS_KEY_ID = minioConfig.accessKeyId;
+ env.OBJECT_STORE_SECRET_ACCESS_KEY = minioConfig.secretAccessKey;
+ env.OBJECT_STORE_REGION = minioConfig.region;
+ env.OBJECT_STORE_DEFAULT_PROTOCOL = undefined;
+
+ stubApiClient({ projectRef: "proj_snap_overwrite", envSlug: "dev" });
+
+ const sessionId = "sess_overwrite";
+
+ const turn1 = makeSnapshot({
+ messages: [
+ { id: "u-1", role: "user", parts: [{ type: "text", text: "first" }] },
+ ],
+ lastOutEventId: "evt-turn1",
+ });
+ const turn2 = makeSnapshot({
+ messages: [
+ { id: "u-1", role: "user", parts: [{ type: "text", text: "first" }] },
+ { id: "a-1", role: "assistant", parts: [{ type: "text", text: "reply-1" }] },
+ { id: "u-2", role: "user", parts: [{ type: "text", text: "second" }] },
+ { id: "a-2", role: "assistant", parts: [{ type: "text", text: "reply-2" }] },
+ ],
+ lastOutEventId: "evt-turn2",
+ });
+
+ await writeChatSnapshot(sessionId, turn1);
+ await writeChatSnapshot(sessionId, turn2);
+
+ const result = await readChatSnapshot(sessionId);
+ expect(result).toEqual(turn2);
+ expect(result?.messages).toHaveLength(4);
+ expect(result?.lastOutEventId).toBe("evt-turn2");
+ });
+
+ postgresAndMinioTest("isolates snapshots by sessionId (no cross-talk)", async ({ minioConfig }) => {
+ env.OBJECT_STORE_BASE_URL = minioConfig.baseUrl;
+ env.OBJECT_STORE_ACCESS_KEY_ID = minioConfig.accessKeyId;
+ env.OBJECT_STORE_SECRET_ACCESS_KEY = minioConfig.secretAccessKey;
+ env.OBJECT_STORE_REGION = minioConfig.region;
+ env.OBJECT_STORE_DEFAULT_PROTOCOL = undefined;
+
+ stubApiClient({ projectRef: "proj_snap_iso", envSlug: "dev" });
+
+ const sessA = "sess_iso_A";
+ const sessB = "sess_iso_B";
+ const snapA = makeSnapshot({ lastOutEventId: "evt-A" });
+ const snapB = makeSnapshot({ lastOutEventId: "evt-B" });
+
+ await writeChatSnapshot(sessA, snapA);
+ await writeChatSnapshot(sessB, snapB);
+
+ const readA = await readChatSnapshot(sessA);
+ const readB = await readChatSnapshot(sessB);
+
+ expect(readA?.lastOutEventId).toBe("evt-A");
+ expect(readB?.lastOutEventId).toBe("evt-B");
+ // Distinct objects — modifying one shouldn't affect the other.
+ expect(readA?.lastOutEventId).not.toBe(readB?.lastOutEventId);
+ });
+
+ postgresAndMinioTest("handles snapshots with large message lists (~50 messages)", async ({ minioConfig }) => {
+ // Stress test: a 50-turn chat snapshot. Plan F.4 mentions the
+ // pre-change baseline grew past 512 KiB around turn 10-30 with tool
+ // use; the post-slim wire keeps wire payloads small but the snapshot
+ // itself can still get large. Verify the helpers handle a realistic
+ // payload size.
+ env.OBJECT_STORE_BASE_URL = minioConfig.baseUrl;
+ env.OBJECT_STORE_ACCESS_KEY_ID = minioConfig.accessKeyId;
+ env.OBJECT_STORE_SECRET_ACCESS_KEY = minioConfig.secretAccessKey;
+ env.OBJECT_STORE_REGION = minioConfig.region;
+ env.OBJECT_STORE_DEFAULT_PROTOCOL = undefined;
+
+ stubApiClient({ projectRef: "proj_snap_big", envSlug: "dev" });
+
+ const messages: UIMessage[] = [];
+ for (let i = 0; i < 50; i++) {
+ messages.push({
+ id: `u-${i}`,
+ role: "user",
+ parts: [{ type: "text", text: `user message ${i}: ${"x".repeat(200)}` }],
+ });
+ messages.push({
+ id: `a-${i}`,
+ role: "assistant",
+ parts: [{ type: "text", text: `assistant reply ${i}: ${"y".repeat(500)}` }],
+ });
+ }
+ const snapshot = makeSnapshot({ messages, lastOutEventId: "evt-50" });
+
+ await writeChatSnapshot("sess_big_chat", snapshot);
+ const result = await readChatSnapshot("sess_big_chat");
+
+ expect(result).toBeDefined();
+ expect(result!.messages).toHaveLength(100);
+ expect(result!.lastOutEventId).toBe("evt-50");
+ // Spot-check ordering integrity — the messages array round-tripped
+ // in the same order.
+ expect(result!.messages[0]!.id).toBe("u-0");
+ expect(result!.messages[99]!.id).toBe("a-49");
+ });
+});
diff --git a/apps/webapp/test/replay-after-crash.test.ts b/apps/webapp/test/replay-after-crash.test.ts
new file mode 100644
index 00000000000..f5c6842b194
--- /dev/null
+++ b/apps/webapp/test/replay-after-crash.test.ts
@@ -0,0 +1,315 @@
+// Plan F.3: integration test for the crash-recovery boot path. The
+// scenario it locks down:
+//
+// 1. Run A streams chunks to `session.out` and `onTurnComplete` fires.
+// 2. Run A crashes BEFORE `writeChatSnapshot` lands the post-turn
+// blob (or the write fails silently — both have the same effect).
+// 3. Run B boots: `readChatSnapshot` returns `undefined` (no snapshot
+// yet, or stale-from-prior-turn). Replay then drains
+// `session.out` from the snapshot's `lastOutEventId` (or seq 0)
+// and reduces the chunks back into UIMessage[].
+// 4. The accumulator is consistent — Run A's completed chunks reach
+// Run B's run loop without losing data.
+//
+// Plan section H.1 / H.4 spell out the "snapshot didn't make it before
+// crash" path; this test is the integration safety net behind the
+// unit tests in `packages/trigger-sdk/test/replay-session-out.test.ts`.
+//
+// We exercise the SDK's `__replaySessionOutTailProductionPathForTests`
+// against a stubbed `apiClient.readSessionStreamRecords` — the new
+// non-SSE records endpoint introduced in plan task #22. The replay path
+// is a single GET that returns whatever's already on the stream; no
+// long-poll. MinIO is provisioned to keep parity with
+// `chat-snapshot-integration.test.ts` (the snapshot read path runs
+// through it), even though the replay path itself doesn't read from S3.
+
+import { postgresAndMinioTest } from "@internal/testcontainers";
+import { apiClientManager } from "@trigger.dev/core/v3";
+import {
+ __readChatSnapshotProductionPathForTests as readChatSnapshot,
+ __replaySessionOutTailProductionPathForTests as replaySessionOutTail,
+ type ChatSnapshotV1,
+} from "@trigger.dev/sdk/ai";
+import type { UIMessageChunk } from "ai";
+import { afterEach, describe, expect, vi } from "vitest";
+import { env } from "~/env.server";
+import { generatePresignedUrl } from "~/v3/objectStore.server";
+
+vi.setConfig({ testTimeout: 60_000 });
+
+// ── Helpers ────────────────────────────────────────────────────────────
+
+function textTurn(id: string, text: string): UIMessageChunk[] {
+ return [
+ { type: "start", messageId: id, messageMetadata: { role: "assistant" } } as UIMessageChunk,
+ { type: "text-start", id: `${id}.t1` } as UIMessageChunk,
+ { type: "text-delta", id: `${id}.t1`, delta: text } as UIMessageChunk,
+ { type: "text-end", id: `${id}.t1` } as UIMessageChunk,
+ { type: "finish" } as UIMessageChunk,
+ ];
+}
+
+/**
+ * Stub `apiClientManager.clientOrThrow()` so:
+ * - `getPayloadUrl` / `createUploadPayloadUrl` mint MinIO presigned URLs
+ * via the webapp's real `generatePresignedUrl` (so snapshot reads
+ * hit a real S3-compatible backend).
+ * - `readSessionStreamRecords` returns the canonical
+ * `{ records: [{ data, id, seqNum }] }` shape — `data` is the
+ * JSON-encoded chunk body, mirroring the webapp's S2 record shape.
+ */
+function stubApiClient(opts: {
+ projectRef: string;
+ envSlug: string;
+ sessionOutChunks: unknown[];
+}) {
+ const records = opts.sessionOutChunks.map((chunk, i) => ({
+ data: typeof chunk === "string" ? chunk : JSON.stringify(chunk),
+ id: `evt-${i + 1}`,
+ seqNum: i + 1,
+ }));
+ const readRecordsSpy = vi.fn(
+ async (_id: string, _io: "in" | "out", _options?: { afterEventId?: string }) => ({
+ records,
+ })
+ );
+ vi.spyOn(apiClientManager, "clientOrThrow").mockReturnValue({
+ async getPayloadUrl(filename: string) {
+ const result = await generatePresignedUrl(opts.projectRef, opts.envSlug, filename, "GET");
+ if (!result.success) throw new Error(result.error);
+ return { presignedUrl: result.url };
+ },
+ async createUploadPayloadUrl(filename: string) {
+ const result = await generatePresignedUrl(opts.projectRef, opts.envSlug, filename, "PUT");
+ if (!result.success) throw new Error(result.error);
+ return { presignedUrl: result.url };
+ },
+ readSessionStreamRecords: readRecordsSpy,
+ } as never);
+ return readRecordsSpy;
+}
+
+let warnSpy: ReturnType;
+
+afterEach(() => {
+ vi.restoreAllMocks();
+ warnSpy?.mockRestore();
+});
+
+// ── Tests ──────────────────────────────────────────────────────────────
+
+describe("replay after crash (MinIO + SDK helpers)", () => {
+ postgresAndMinioTest(
+ "boot reconstructs accumulator from session.out replay when no snapshot exists",
+ async ({ minioConfig }) => {
+ env.OBJECT_STORE_BASE_URL = minioConfig.baseUrl;
+ env.OBJECT_STORE_ACCESS_KEY_ID = minioConfig.accessKeyId;
+ env.OBJECT_STORE_SECRET_ACCESS_KEY = minioConfig.secretAccessKey;
+ env.OBJECT_STORE_REGION = minioConfig.region;
+ env.OBJECT_STORE_DEFAULT_PROTOCOL = undefined;
+
+ warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {});
+
+ // The crashed run's session.out: two completed assistant turns, no
+ // snapshot ever written. Boot must recover both via replay.
+ const chunks = [...textTurn("a-1", "first turn"), ...textTurn("a-2", "second turn")];
+ stubApiClient({
+ projectRef: "proj_replay_crash",
+ envSlug: "dev",
+ sessionOutChunks: chunks,
+ });
+
+ // Step 1: read snapshot — returns undefined (fresh boot, no snap).
+ const snapshot = await readChatSnapshot("sess_no_snap");
+ expect(snapshot).toBeUndefined();
+
+ // Step 2: replay tail.
+ const replayed = await replaySessionOutTail("sess_no_snap");
+
+ expect(replayed).toHaveLength(2);
+ expect(replayed.map((m) => m.id)).toEqual(["a-1", "a-2"]);
+ const texts = replayed.flatMap((m) =>
+ (m.parts as Array<{ type: string; text?: string }>)
+ .filter((p) => p.type === "text")
+ .map((p) => p.text)
+ );
+ expect(texts).toEqual(["first turn", "second turn"]);
+ }
+ );
+
+ postgresAndMinioTest(
+ "boot replays only chunks AFTER snapshot.lastOutEventId (resume cursor)",
+ async ({ minioConfig }) => {
+ env.OBJECT_STORE_BASE_URL = minioConfig.baseUrl;
+ env.OBJECT_STORE_ACCESS_KEY_ID = minioConfig.accessKeyId;
+ env.OBJECT_STORE_SECRET_ACCESS_KEY = minioConfig.secretAccessKey;
+ env.OBJECT_STORE_REGION = minioConfig.region;
+ env.OBJECT_STORE_DEFAULT_PROTOCOL = undefined;
+
+ // The replay helper accepts the snapshot's `lastEventId` cursor
+ // and forwards it as `afterEventId` on the records endpoint —
+ // that's the cursor field name on the new non-SSE route. Here we
+ // feed only the post-snapshot chunks (modeling what the server
+ // returns for `afterEventId=evt-snapped`) and verify the helper
+ // threads the cursor through.
+ const readRecordsSpy = stubApiClient({
+ projectRef: "proj_replay_resume",
+ envSlug: "dev",
+ sessionOutChunks: textTurn("a-after-snap", "post-snapshot turn"),
+ });
+
+ const result = await replaySessionOutTail("sess_resume", { lastEventId: "evt-snapped" });
+
+ expect(readRecordsSpy).toHaveBeenCalledWith(
+ "sess_resume",
+ "out",
+ expect.objectContaining({ afterEventId: "evt-snapped" })
+ );
+ expect(result).toHaveLength(1);
+ expect(result[0]!.id).toBe("a-after-snap");
+ }
+ );
+
+ postgresAndMinioTest(
+ "boot returns [] when session.out is empty (first-ever turn, no snapshot)",
+ async ({ minioConfig }) => {
+ env.OBJECT_STORE_BASE_URL = minioConfig.baseUrl;
+ env.OBJECT_STORE_ACCESS_KEY_ID = minioConfig.accessKeyId;
+ env.OBJECT_STORE_SECRET_ACCESS_KEY = minioConfig.secretAccessKey;
+ env.OBJECT_STORE_REGION = minioConfig.region;
+ env.OBJECT_STORE_DEFAULT_PROTOCOL = undefined;
+
+ warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {});
+
+ stubApiClient({
+ projectRef: "proj_replay_empty",
+ envSlug: "dev",
+ sessionOutChunks: [],
+ });
+
+ const snapshot = await readChatSnapshot("sess_empty");
+ expect(snapshot).toBeUndefined();
+
+ const replayed = await replaySessionOutTail("sess_empty");
+ expect(replayed).toEqual([]);
+ }
+ );
+
+ postgresAndMinioTest(
+ "boot drops orphaned trailing tool parts (cleanupAbortedParts) — partial crash",
+ async ({ minioConfig }) => {
+ // Simulates a true mid-turn crash: assistant finished one turn,
+ // then started a tool-call but the run died before resolution.
+ // Replay must surface the completed turn but NOT include the
+ // orphaned tool part in `input-streaming` state.
+ env.OBJECT_STORE_BASE_URL = minioConfig.baseUrl;
+ env.OBJECT_STORE_ACCESS_KEY_ID = minioConfig.accessKeyId;
+ env.OBJECT_STORE_SECRET_ACCESS_KEY = minioConfig.secretAccessKey;
+ env.OBJECT_STORE_REGION = minioConfig.region;
+ env.OBJECT_STORE_DEFAULT_PROTOCOL = undefined;
+
+ stubApiClient({
+ projectRef: "proj_replay_partial",
+ envSlug: "dev",
+ sessionOutChunks: [
+ ...textTurn("a-complete", "I finished step 1"),
+ // Partial tool turn — no tool-input-end, no finish.
+ { type: "start", messageId: "a-orphan", messageMetadata: { role: "assistant" } } as UIMessageChunk,
+ { type: "tool-input-start", id: "tc-cut", toolName: "search" } as UIMessageChunk,
+ { type: "tool-input-delta", id: "tc-cut", delta: '{"q":"x"}' } as UIMessageChunk,
+ ],
+ });
+
+ const replayed = await replaySessionOutTail("sess_partial_crash");
+
+ // Completed turn always present.
+ expect(replayed.find((m) => m.id === "a-complete")).toBeTruthy();
+ // Orphaned tool-call never surfaces in `input-streaming` state.
+ const orphan = replayed.find((m) => m.id === "a-orphan");
+ if (orphan) {
+ const stillStreaming = (orphan.parts as Array<{ toolCallId?: string; state?: string }>).find(
+ (p) => p.toolCallId === "tc-cut" && p.state === "input-streaming"
+ );
+ expect(stillStreaming).toBeUndefined();
+ }
+ }
+ );
+
+ postgresAndMinioTest(
+ "snapshot+replay merge: snapshot supplies user msgs, replay supplies assistants",
+ async ({ minioConfig }) => {
+ // The boot orchestration calls
+ // `mergeByIdReplaceWins(snapshot.messages, replayed)`. The runtime
+ // contract is that user messages live in snapshot only (session.in
+ // never goes through replay) and assistants come from replay
+ // (which carries the freshest representation). Here we simulate
+ // the realistic split: snapshot has [u-1, a-1-stale], replay has
+ // [a-1-fresh, a-2-new]. After merge the accumulator should reflect
+ // the fresh assistant + new assistant, with the user message
+ // preserved.
+ //
+ // Note: this is a pre-merge round-trip — we drive the read and
+ // replay through real MinIO + stubbed S2 to confirm both arrive
+ // intact for the orchestration to merge.
+ env.OBJECT_STORE_BASE_URL = minioConfig.baseUrl;
+ env.OBJECT_STORE_ACCESS_KEY_ID = minioConfig.accessKeyId;
+ env.OBJECT_STORE_SECRET_ACCESS_KEY = minioConfig.secretAccessKey;
+ env.OBJECT_STORE_REGION = minioConfig.region;
+ env.OBJECT_STORE_DEFAULT_PROTOCOL = undefined;
+
+ // Pre-write a snapshot to MinIO via real apiClient stub.
+ const sessionId = "sess_merge_round_trip";
+ const snapshot: ChatSnapshotV1 = {
+ version: 1,
+ savedAt: 1_700_000_000_000,
+ messages: [
+ { id: "u-1", role: "user", parts: [{ type: "text", text: "hi" }] },
+ { id: "a-1", role: "assistant", parts: [{ type: "text", text: "stale-assistant" }] },
+ ],
+ lastOutEventId: "evt-prev",
+ lastOutTimestamp: 1_700_000_000_500,
+ };
+
+ // Use the SDK's own writer to lay the snapshot down, then swap
+ // the stub to also serve replay chunks for the read path.
+ stubApiClient({
+ projectRef: "proj_merge",
+ envSlug: "dev",
+ sessionOutChunks: [],
+ });
+ const { __writeChatSnapshotProductionPathForTests: writeSnapshot } = await import(
+ "@trigger.dev/sdk/ai"
+ );
+ await writeSnapshot(sessionId, snapshot);
+
+ // Restubbing for the boot phase: replay tail carries the fresh
+ // assistant for `a-1` plus a brand-new `a-2`. The orchestration's
+ // merge would replace `a-1` and append `a-2` after `u-1`.
+ vi.restoreAllMocks();
+ stubApiClient({
+ projectRef: "proj_merge",
+ envSlug: "dev",
+ sessionOutChunks: [
+ ...textTurn("a-1", "fresh-assistant"),
+ ...textTurn("a-2", "next-assistant"),
+ ],
+ });
+
+ const readBack = await readChatSnapshot(sessionId);
+ expect(readBack?.messages.map((m) => m.id)).toEqual(["u-1", "a-1"]);
+
+ const replayed = await replaySessionOutTail(sessionId, {
+ lastEventId: readBack?.lastOutEventId,
+ });
+ expect(replayed.map((m) => m.id)).toEqual(["a-1", "a-2"]);
+ // Replay's `a-1` carries the fresh content — when merge runs in
+ // the runtime, this version would replace the snapshot's stale
+ // `a-1`.
+ const replayedA1Text = (replayed[0]!.parts as Array<{ type: string; text?: string }>)
+ .filter((p) => p.type === "text")
+ .map((p) => p.text)
+ .join("");
+ expect(replayedA1Text).toBe("fresh-assistant");
+ }
+ );
+});
diff --git a/package.json b/package.json
index 30f27bade95..3da35e40e67 100644
--- a/package.json
+++ b/package.json
@@ -82,7 +82,8 @@
"@sentry/remix@9.46.0": "patches/@sentry__remix@9.46.0.patch",
"@upstash/ratelimit@1.1.3": "patches/@upstash__ratelimit.patch",
"antlr4ts@0.5.0-alpha.4": "patches/antlr4ts@0.5.0-alpha.4.patch",
- "@window-splitter/state@1.1.3": "patches/@window-splitter__state@1.1.3.patch"
+ "@window-splitter/state@1.1.3": "patches/@window-splitter__state@1.1.3.patch",
+ "streamdown@2.5.0": "patches/streamdown@2.5.0.patch"
},
"overrides": {
"typescript": "5.5.4",
diff --git a/packages/build/package.json b/packages/build/package.json
index 49a310e46e7..f172eeb7c6a 100644
--- a/packages/build/package.json
+++ b/packages/build/package.json
@@ -31,7 +31,8 @@
"./extensions/typescript": "./src/extensions/typescript.ts",
"./extensions/puppeteer": "./src/extensions/puppeteer.ts",
"./extensions/playwright": "./src/extensions/playwright.ts",
- "./extensions/lightpanda": "./src/extensions/lightpanda.ts"
+ "./extensions/lightpanda": "./src/extensions/lightpanda.ts",
+ "./extensions/secureExec": "./src/extensions/secureExec.ts"
},
"sourceDialects": [
"@triggerdotdev/source"
@@ -65,6 +66,9 @@
],
"extensions/lightpanda": [
"dist/commonjs/extensions/lightpanda.d.ts"
+ ],
+ "extensions/secureExec": [
+ "dist/commonjs/extensions/secureExec.d.ts"
]
}
},
@@ -207,6 +211,17 @@
"types": "./dist/commonjs/extensions/lightpanda.d.ts",
"default": "./dist/commonjs/extensions/lightpanda.js"
}
+ },
+ "./extensions/secureExec": {
+ "import": {
+ "@triggerdotdev/source": "./src/extensions/secureExec.ts",
+ "types": "./dist/esm/extensions/secureExec.d.ts",
+ "default": "./dist/esm/extensions/secureExec.js"
+ },
+ "require": {
+ "types": "./dist/commonjs/extensions/secureExec.d.ts",
+ "default": "./dist/commonjs/extensions/secureExec.js"
+ }
}
},
"main": "./dist/commonjs/index.js",
diff --git a/packages/build/src/extensions/secureExec.ts b/packages/build/src/extensions/secureExec.ts
new file mode 100644
index 00000000000..808bc666501
--- /dev/null
+++ b/packages/build/src/extensions/secureExec.ts
@@ -0,0 +1,172 @@
+import { BuildTarget } from "@trigger.dev/core/v3";
+import { BuildManifest } from "@trigger.dev/core/v3/schemas";
+import { BuildContext, BuildExtension } from "@trigger.dev/core/v3/build";
+import { dirname, resolve, join } from "node:path";
+import { readFileSync } from "node:fs";
+import { createRequire } from "node:module";
+import { readPackageJSON } from "pkg-types";
+
+export type SecureExecOptions = {
+ /**
+ * Packages available inside the sandbox at runtime.
+ *
+ * These are `require()`'d inside the V8 isolate at runtime — the bundler
+ * never sees them statically. They are marked external and installed as
+ * deploy dependencies.
+ *
+ * @example
+ * ```ts
+ * secureExec({ packages: ["jszip", "lodash"] })
+ * ```
+ */
+ packages?: string[];
+};
+
+/**
+ * Build extension for [secure-exec](https://secureexec.dev) — run untrusted
+ * JavaScript/TypeScript in V8 isolates with configurable permissions.
+ *
+ * Handles the esbuild workarounds needed for secure-exec's runtime
+ * `require.resolve` calls, native binaries, and module-scope resolution.
+ *
+ * @example
+ * ```ts
+ * import { secureExec } from "@trigger.dev/build/extensions/secureExec";
+ *
+ * export default defineConfig({
+ * build: {
+ * extensions: [secureExec()],
+ * },
+ * });
+ * ```
+ */
+export function secureExec(options?: SecureExecOptions): BuildExtension {
+ return new SecureExecExtension(options ?? {});
+}
+
+class SecureExecExtension implements BuildExtension {
+ public readonly name = "SecureExecExtension";
+
+ private userPackages: string[];
+
+ constructor(options: SecureExecOptions) {
+ this.userPackages = options.packages ?? [];
+ }
+
+ externalsForTarget(_target: BuildTarget) {
+ return [
+ // esbuild must not be bundled — it locates its native binary via a
+ // relative path from its JS API entry point. secure-exec uses esbuild
+ // at runtime to bundle polyfills for sandbox code.
+ "esbuild",
+ // User-specified packages are require()'d inside the V8 sandbox at
+ // runtime — the bundler never sees them statically.
+ ...this.userPackages,
+ ];
+ }
+
+ onBuildStart(context: BuildContext) {
+ context.logger.debug(`Adding ${this.name} esbuild plugins`);
+
+ // Plugin 1: Replace node-stdlib-browser with pre-resolved paths.
+ //
+ // Trigger's ESM shim anchors require.resolve() to the chunk path, so
+ // node-stdlib-browser's runtime require.resolve("./mock/empty.js") breaks.
+ // Fix: load the real node-stdlib-browser at build time (where require.resolve
+ // works), capture the resolved path map, and inline it as a static export.
+ const workingDir = context.workingDir;
+ context.registerPlugin({
+ name: "secure-exec-stdlib-resolver",
+ setup(build) {
+ build.onResolve({ filter: /^node-stdlib-browser$/ }, () => ({
+ path: "node-stdlib-browser",
+ namespace: "secure-exec-nsb-resolved",
+ }));
+ build.onLoad({ filter: /.*/, namespace: "secure-exec-nsb-resolved" }, () => {
+ const buildRequire = createRequire(join(workingDir, "package.json"));
+ const resolved = buildRequire("node-stdlib-browser");
+ return {
+ contents: `export default ${JSON.stringify(resolved)};`,
+ loader: "js",
+ };
+ });
+ },
+ });
+
+ // Plugin 2: Inline bridge.js at build time.
+ //
+ // bridge-loader.js in @secure-exec/node(js) uses __dirname and
+ // require.resolve("@secure-exec/core") at module scope to locate
+ // dist/bridge.js on disk. This fails in Trigger's bundled output.
+ // Fix: read bridge.js content at build time and inline it as a
+ // string literal so no runtime filesystem resolution is needed.
+ //
+ context.registerPlugin({
+ name: "secure-exec-bridge-inline",
+ setup(build) {
+ build.onLoad(
+ { filter: /[\\/]@secure-exec[\\/]node[\\/]dist[\\/]bridge-loader\.js$/ },
+ (args) => {
+ try {
+ const buildRequire = createRequire(args.path);
+ const coreEntry = buildRequire.resolve("@secure-exec/core");
+ const coreRoot = resolve(dirname(coreEntry), "..");
+ const bridgeCode = readFileSync(join(coreRoot, "dist", "bridge.js"), "utf8");
+
+ return {
+ contents: [
+ `import { getIsolateRuntimeSource } from "@secure-exec/core";`,
+ `const bridgeCodeCache = ${JSON.stringify(bridgeCode)};`,
+ `export function getRawBridgeCode() { return bridgeCodeCache; }`,
+ `export function getBridgeAttachCode() { return getIsolateRuntimeSource("bridgeAttach"); }`,
+ ].join("\n"),
+ loader: "js",
+ };
+ } catch {
+ // If we can't inline the bridge, let the normal loader handle it.
+ return undefined;
+ }
+ }
+ );
+ },
+ });
+ }
+
+ async onBuildComplete(context: BuildContext, _manifest: BuildManifest) {
+ if (context.target === "dev") {
+ return;
+ }
+
+ context.logger.debug(`Adding ${this.name} deploy dependencies`);
+
+ const dependencies: Record = {};
+
+ // Resolve versions for user-specified sandbox packages
+ for (const pkg of this.userPackages) {
+ try {
+ const modulePath = await context.resolvePath(pkg);
+ if (!modulePath) {
+ dependencies[pkg] = "latest";
+ continue;
+ }
+
+ const packageJSON = await readPackageJSON(dirname(modulePath));
+ dependencies[pkg] = packageJSON.version ?? "latest";
+ } catch {
+ context.logger.warn(
+ `Could not resolve version for sandbox package ${pkg}, defaulting to latest`
+ );
+ dependencies[pkg] = "latest";
+ }
+ }
+
+ context.addLayer({
+ id: "secureExec",
+ dependencies,
+ image: {
+ // isolated-vm requires native compilation tools
+ pkgs: ["python3", "make", "g++"],
+ },
+ });
+ }
+}
diff --git a/packages/build/src/internal.ts b/packages/build/src/internal.ts
index 54f785a6106..0e1954c8b9e 100644
--- a/packages/build/src/internal.ts
+++ b/packages/build/src/internal.ts
@@ -1 +1,2 @@
export * from "./internal/additionalFiles.js";
+export * from "./internal/copyFiles.js";
diff --git a/packages/build/src/internal/additionalFiles.ts b/packages/build/src/internal/additionalFiles.ts
index a815b53c9aa..57a746c36b6 100644
--- a/packages/build/src/internal/additionalFiles.ts
+++ b/packages/build/src/internal/additionalFiles.ts
@@ -1,8 +1,10 @@
import { BuildManifest } from "@trigger.dev/core/v3";
import { BuildContext } from "@trigger.dev/core/v3/build";
-import { copyFile, mkdir } from "node:fs/promises";
-import { dirname, join, posix, relative } from "node:path";
-import { glob } from "tinyglobby";
+import {
+ copyMatcherResults,
+ findFilesByMatchers,
+ type MatcherResult,
+} from "./copyFiles.js";
export type AdditionalFilesOptions = {
files: string[];
@@ -14,12 +16,13 @@ export async function addAdditionalFilesToBuild(
context: BuildContext,
manifest: BuildManifest
) {
- // Copy any static assets to the destination
- const staticAssets = await findStaticAssetFiles(options.files ?? [], manifest.outputPath, {
- cwd: context.workingDir,
- });
+ const matcherResults: MatcherResult[] = await findFilesByMatchers(
+ options.files ?? [],
+ manifest.outputPath,
+ { cwd: context.workingDir }
+ );
- for (const { assets, matcher } of staticAssets) {
+ for (const { assets, matcher } of matcherResults) {
if (assets.length === 0) {
context.logger.warn(`[${source}] No files found for matcher`, matcher);
} else {
@@ -27,80 +30,7 @@ export async function addAdditionalFilesToBuild(
}
}
- await copyStaticAssets(staticAssets, source, context);
-}
-
-type MatchedStaticAssets = { source: string; destination: string }[];
-
-type FoundStaticAssetFiles = Array<{
- matcher: string;
- assets: MatchedStaticAssets;
-}>;
-
-async function findStaticAssetFiles(
- matchers: string[],
- destinationPath: string,
- options?: { cwd?: string; ignore?: string[] }
-): Promise {
- const result: FoundStaticAssetFiles = [];
-
- for (const matcher of matchers) {
- const assets = await findStaticAssetsForMatcher(matcher, destinationPath, options);
-
- result.push({ matcher, assets });
- }
-
- return result;
-}
-
-async function findStaticAssetsForMatcher(
- matcher: string,
- destinationPath: string,
- options?: { cwd?: string; ignore?: string[] }
-): Promise {
- const result: MatchedStaticAssets = [];
-
- const files = await glob({
- patterns: [matcher],
- cwd: options?.cwd,
- ignore: options?.ignore ?? [],
- onlyFiles: true,
- absolute: true,
+ await copyMatcherResults(matcherResults, (pair) => {
+ context.logger.debug(`[${source}] Copying ${pair.source} to ${pair.destination}`);
});
-
- let matches = 0;
-
- for (const file of files) {
- matches++;
-
- const pathInsideDestinationDir = relative(options?.cwd ?? process.cwd(), file)
- .split(posix.sep)
- .filter((p) => p !== "..")
- .join(posix.sep);
-
- const relativeDestinationPath = join(destinationPath, pathInsideDestinationDir);
-
- result.push({
- source: file,
- destination: relativeDestinationPath,
- });
- }
-
- return result;
-}
-
-async function copyStaticAssets(
- staticAssetFiles: FoundStaticAssetFiles,
- sourceName: string,
- context: BuildContext
-): Promise {
- for (const { assets } of staticAssetFiles) {
- for (const { source, destination } of assets) {
- await mkdir(dirname(destination), { recursive: true });
-
- context.logger.debug(`[${sourceName}] Copying ${source} to ${destination}`);
-
- await copyFile(source, destination);
- }
- }
}
diff --git a/packages/build/src/internal/copyFiles.ts b/packages/build/src/internal/copyFiles.ts
new file mode 100644
index 00000000000..6fd3ede9545
--- /dev/null
+++ b/packages/build/src/internal/copyFiles.ts
@@ -0,0 +1,99 @@
+import { cp, copyFile, mkdir } from "node:fs/promises";
+import { dirname, join, posix, relative } from "node:path";
+import { glob } from "tinyglobby";
+
+/**
+ * A single matched asset — source file and its destination inside the
+ * build output directory.
+ */
+export type CopyPair = { source: string; destination: string };
+
+/**
+ * Result of a single matcher's glob, grouped with the matcher that
+ * produced it so callers can warn on empty matches.
+ */
+export type MatcherResult = {
+ matcher: string;
+ assets: CopyPair[];
+};
+
+/**
+ * Glob a set of matchers relative to `cwd` and return pairs describing
+ * where each matched file should be copied to under `destinationDir`.
+ *
+ * Relative paths are preserved under `destinationDir`. Leading `..`
+ * segments (from `../shared/file.txt` style patterns) are stripped so
+ * files always land inside the destination.
+ */
+export async function findFilesByMatchers(
+ matchers: string[],
+ destinationDir: string,
+ options?: { cwd?: string; ignore?: string[] }
+): Promise {
+ const result: MatcherResult[] = [];
+ const cwd = options?.cwd ?? process.cwd();
+
+ for (const matcher of matchers) {
+ const files = await glob({
+ patterns: [matcher],
+ cwd,
+ ignore: options?.ignore ?? [],
+ onlyFiles: true,
+ absolute: true,
+ });
+
+ const assets: CopyPair[] = files.map((file) => {
+ const pathInsideDestinationDir = relative(cwd, file)
+ .split(posix.sep)
+ .filter((p) => p !== "..")
+ .join(posix.sep);
+ return {
+ source: file,
+ destination: join(destinationDir, pathInsideDestinationDir),
+ };
+ });
+
+ result.push({ matcher, assets });
+ }
+
+ return result;
+}
+
+/**
+ * Copy a single file, creating parent directories as needed.
+ */
+export async function copyFileEnsuringDir(source: string, destination: string): Promise {
+ await mkdir(dirname(destination), { recursive: true });
+ await copyFile(source, destination);
+}
+
+/**
+ * Copy every pair in the given matcher results. Parent directories are
+ * created automatically. Returns the total number of files copied.
+ */
+export async function copyMatcherResults(
+ matcherResults: MatcherResult[],
+ onCopy?: (pair: CopyPair) => void
+): Promise {
+ let count = 0;
+ for (const { assets } of matcherResults) {
+ for (const pair of assets) {
+ onCopy?.(pair);
+ await copyFileEnsuringDir(pair.source, pair.destination);
+ count++;
+ }
+ }
+ return count;
+}
+
+/**
+ * Recursively copy a directory to another location. Preserves structure;
+ * overwrites existing files at the destination.
+ *
+ * Used by the built-in skill bundler — we copy entire skill folders as a
+ * unit, not file-by-file.
+ */
+export async function copyDirectoryRecursive(source: string, destination: string): Promise {
+ await mkdir(destination, { recursive: true });
+ await cp(source, destination, { recursive: true, force: true });
+}
diff --git a/packages/core/package.json b/packages/core/package.json
index f58708dff92..34acdb91b89 100644
--- a/packages/core/package.json
+++ b/packages/core/package.json
@@ -41,6 +41,8 @@
"./v3/utils/omit": "./src/v3/utils/omit.ts",
"./v3/utils/retries": "./src/v3/utils/retries.ts",
"./v3/utils/structuredLogger": "./src/v3/utils/structuredLogger.ts",
+ "./v3/chat-client": "./src/v3/chat-client.ts",
+ "./v3/test": "./src/v3/test/index.ts",
"./v3/zodfetch": "./src/v3/zodfetch.ts",
"./v3/zodMessageHandler": "./src/v3/zodMessageHandler.ts",
"./v3/zodNamespace": "./src/v3/zodNamespace.ts",
@@ -87,6 +89,9 @@
"v3/errors": [
"dist/commonjs/v3/errors.d.ts"
],
+ "v3/chat-client": [
+ "dist/commonjs/v3/chat-client.d.ts"
+ ],
"v3/logger-api": [
"dist/commonjs/v3/logger-api.d.ts"
],
@@ -152,6 +157,9 @@
],
"v3/isomorphic": [
"dist/commonjs/v3/isomorphic/index.d.ts"
+ ],
+ "v3/test": [
+ "dist/commonjs/v3/test/index.d.ts"
]
}
},
@@ -446,6 +454,28 @@
"default": "./dist/commonjs/v3/utils/structuredLogger.js"
}
},
+ "./v3/chat-client": {
+ "import": {
+ "@triggerdotdev/source": "./src/v3/chat-client.ts",
+ "types": "./dist/esm/v3/chat-client.d.ts",
+ "default": "./dist/esm/v3/chat-client.js"
+ },
+ "require": {
+ "types": "./dist/commonjs/v3/chat-client.d.ts",
+ "default": "./dist/commonjs/v3/chat-client.js"
+ }
+ },
+ "./v3/test": {
+ "import": {
+ "@triggerdotdev/source": "./src/v3/test/index.ts",
+ "types": "./dist/esm/v3/test/index.d.ts",
+ "default": "./dist/esm/v3/test/index.js"
+ },
+ "require": {
+ "types": "./dist/commonjs/v3/test/index.d.ts",
+ "default": "./dist/commonjs/v3/test/index.js"
+ }
+ },
"./v3/zodfetch": {
"import": {
"@triggerdotdev/source": "./src/v3/zodfetch.ts",
diff --git a/packages/core/src/v3/chat-client.ts b/packages/core/src/v3/chat-client.ts
new file mode 100644
index 00000000000..b1c96146df6
--- /dev/null
+++ b/packages/core/src/v3/chat-client.ts
@@ -0,0 +1,207 @@
+/**
+ * Chat shared types used by backend (ai.ts) and frontend (chat.ts)
+ * code paths — primarily {@link ChatStoreChunk} + {@link applyChatStorePatch}
+ * for the `chat.store` primitive. Pre-Session transport also exported
+ * `CHAT_STREAM_KEY` / `CHAT_MESSAGES_STREAM_ID` / `CHAT_STOP_STREAM_ID`
+ * from here; those are gone — chat output and input both live on the
+ * backing Session now (see `@trigger.dev/sdk/sessions`).
+ */
+
+// ─── chat.store chunk types ────────────────────────────────────────
+//
+// First-class chunk types for `chat.store` — bidirectional shared data
+// between a chat.agent and its clients. Emitted on the same S2 output
+// stream as UIMessageChunks but intercepted by the transport (not
+// passed to the AI SDK).
+
+/**
+ * An RFC 6902 JSON Patch operation used by `chat.store.patch()` and
+ * emitted inside {@link ChatStoreDeltaChunk}.
+ *
+ * @see https://tools.ietf.org/html/rfc6902
+ */
+export type ChatStorePatchOperation =
+ | { op: "add"; path: string; value: unknown }
+ | { op: "remove"; path: string }
+ | { op: "replace"; path: string; value: unknown }
+ | { op: "move"; path: string; from: string }
+ | { op: "copy"; path: string; from: string }
+ | { op: "test"; path: string; value: unknown };
+
+/** Full-value snapshot — emitted by `chat.store.set(...)`. */
+export type ChatStoreSnapshotChunk = {
+ type: "store-snapshot";
+ value: unknown;
+};
+
+/** Incremental update — emitted by `chat.store.patch([...])`. */
+export type ChatStoreDeltaChunk = {
+ type: "store-delta";
+ operations: ChatStorePatchOperation[];
+};
+
+export type ChatStoreChunk = ChatStoreSnapshotChunk | ChatStoreDeltaChunk;
+
+// ─── RFC 6902 JSON Patch applier ───────────────────────────────────
+//
+// Minimal in-process implementation so we don't pull a runtime dep
+// into the SDK or webapp. Handles the six RFC 6902 ops with RFC 6901
+// JSON Pointer paths. Used by `chat.store.patch()` on the agent and
+// the matching client-side `applyStorePatch` on the transport.
+
+// Reject these segments at the parser to prevent prototype pollution: a
+// malicious patch like `{ op: "replace", path: "/__proto__/polluted", value: 1 }`
+// would otherwise mutate Object.prototype. Patches with these keys aren't
+// legitimate for chat.store, so reject the whole patch with a clear error.
+const FORBIDDEN_POINTER_SEGMENTS = new Set(["__proto__", "constructor", "prototype"]);
+
+function parseJsonPointer(path: string): string[] {
+ if (path === "") return [];
+ if (!path.startsWith("/")) {
+ throw new Error(`Invalid JSON Pointer (must start with "/"): ${path}`);
+ }
+ const tokens = path
+ .slice(1)
+ .split("/")
+ .map((segment) => segment.replace(/~1/g, "/").replace(/~0/g, "~"));
+ for (const token of tokens) {
+ if (FORBIDDEN_POINTER_SEGMENTS.has(token)) {
+ throw new Error(`Invalid JSON Pointer segment "${token}" in path "${path}"`);
+ }
+ }
+ return tokens;
+}
+
+function cloneValue(value: T): T {
+ if (value === undefined || value === null) return value;
+ if (typeof structuredClone === "function") {
+ try {
+ return structuredClone(value);
+ } catch {
+ // Fall through for values that can't be structured-cloned
+ }
+ }
+ return JSON.parse(JSON.stringify(value));
+}
+
+function getParentAndKey(
+ doc: unknown,
+ tokens: string[]
+): { parent: any; lastToken: string } {
+ if (tokens.length === 0) {
+ throw new Error("Cannot get parent of root");
+ }
+ let parent: any = doc;
+ for (let i = 0; i < tokens.length - 1; i++) {
+ if (parent == null || typeof parent !== "object") {
+ throw new Error(`Path traversal failed at segment "${tokens[i]}"`);
+ }
+ const key = Array.isArray(parent) ? Number(tokens[i]) : tokens[i];
+ parent = (parent as any)[key as any];
+ }
+ return { parent, lastToken: tokens[tokens.length - 1]! };
+}
+
+function readPointer(doc: unknown, tokens: string[]): unknown {
+ if (tokens.length === 0) return doc;
+ let cursor: any = doc;
+ for (const token of tokens) {
+ if (cursor == null) return undefined;
+ const key = Array.isArray(cursor) ? Number(token) : token;
+ cursor = cursor[key];
+ }
+ return cursor;
+}
+
+function removeAt(parent: any, lastToken: string): void {
+ if (Array.isArray(parent)) {
+ parent.splice(Number(lastToken), 1);
+ } else if (parent && typeof parent === "object") {
+ if (
+ lastToken === "__proto__" ||
+ lastToken === "constructor" ||
+ lastToken === "prototype"
+ ) {
+ throw new Error(`Refusing to mutate forbidden key "${lastToken}"`);
+ }
+ delete parent[lastToken];
+ } else {
+ throw new Error("Cannot remove: parent is not a container");
+ }
+}
+
+function insertAt(parent: any, lastToken: string, value: unknown, op: "add" | "replace"): void {
+ if (Array.isArray(parent)) {
+ const idx = lastToken === "-" ? parent.length : Number(lastToken);
+ if (op === "add") parent.splice(idx, 0, value);
+ else parent[idx] = value;
+ } else if (parent && typeof parent === "object") {
+ if (
+ lastToken === "__proto__" ||
+ lastToken === "constructor" ||
+ lastToken === "prototype"
+ ) {
+ throw new Error(`Refusing to mutate forbidden key "${lastToken}"`);
+ }
+ parent[lastToken] = value;
+ } else {
+ throw new Error("Cannot insert: parent is not a container");
+ }
+}
+
+/**
+ * Apply an RFC 6902 JSON Patch to a document and return the new value.
+ * Never mutates the input.
+ */
+export function applyChatStorePatch(
+ doc: unknown,
+ operations: readonly ChatStorePatchOperation[]
+): unknown {
+ let result: any = doc === undefined ? undefined : cloneValue(doc);
+
+ for (const op of operations) {
+ const tokens = parseJsonPointer(op.path);
+
+ if (op.op === "test") {
+ const actual = readPointer(result, tokens);
+ if (JSON.stringify(actual) !== JSON.stringify(op.value)) {
+ throw new Error(`JSON Patch test failed at path "${op.path}"`);
+ }
+ continue;
+ }
+
+ if (op.op === "remove") {
+ if (tokens.length === 0) {
+ result = undefined;
+ continue;
+ }
+ const { parent, lastToken } = getParentAndKey(result, tokens);
+ removeAt(parent, lastToken);
+ continue;
+ }
+
+ // add / replace / move / copy all insert a value at `path`
+ let valueToInsert: unknown;
+ if (op.op === "add" || op.op === "replace") {
+ valueToInsert = cloneValue(op.value);
+ } else {
+ // move / copy — source must exist
+ const fromTokens = parseJsonPointer(op.from);
+ valueToInsert = cloneValue(readPointer(result, fromTokens));
+ if (op.op === "move" && fromTokens.length > 0) {
+ const { parent: fromParent, lastToken: fromLast } = getParentAndKey(result, fromTokens);
+ removeAt(fromParent, fromLast);
+ }
+ }
+
+ if (tokens.length === 0) {
+ result = valueToInsert;
+ continue;
+ }
+
+ const { parent, lastToken } = getParentAndKey(result, tokens);
+ insertAt(parent, lastToken, valueToInsert, op.op === "replace" ? "replace" : "add");
+ }
+
+ return result;
+}
diff --git a/packages/core/src/v3/resource-catalog/catalog.ts b/packages/core/src/v3/resource-catalog/catalog.ts
index 5b3ab023639..5c443b253cf 100644
--- a/packages/core/src/v3/resource-catalog/catalog.ts
+++ b/packages/core/src/v3/resource-catalog/catalog.ts
@@ -1,4 +1,11 @@
-import { PromptManifest, QueueManifest, TaskManifest, WorkerManifest } from "../schemas/index.js";
+import {
+ PromptManifest,
+ QueueManifest,
+ SkillManifest,
+ SkillMetadata,
+ TaskManifest,
+ WorkerManifest,
+} from "../schemas/index.js";
import { PromptMetadataWithFunctions, TaskMetadataWithFunctions, TaskSchema } from "../types/index.js";
export interface ResourceCatalog {
@@ -18,4 +25,7 @@ export interface ResourceCatalog {
listPromptManifests(): Array;
getPrompt(id: string): PromptMetadataWithFunctions | undefined;
getPromptSchema(id: string): TaskSchema | undefined;
+ registerSkillMetadata(skill: SkillMetadata): void;
+ listSkillManifests(): Array;
+ getSkillManifest(id: string): SkillManifest | undefined;
}
diff --git a/packages/core/src/v3/resource-catalog/index.ts b/packages/core/src/v3/resource-catalog/index.ts
index 9ce7dee64cf..f809ede8135 100644
--- a/packages/core/src/v3/resource-catalog/index.ts
+++ b/packages/core/src/v3/resource-catalog/index.ts
@@ -1,6 +1,13 @@
const API_NAME = "resource-catalog";
-import { PromptManifest, QueueManifest, TaskManifest, WorkerManifest } from "../schemas/index.js";
+import {
+ PromptManifest,
+ QueueManifest,
+ SkillManifest,
+ SkillMetadata,
+ TaskManifest,
+ WorkerManifest,
+} from "../schemas/index.js";
import { PromptMetadataWithFunctions, TaskMetadataWithFunctions, TaskSchema } from "../types/index.js";
import { getGlobal, registerGlobal, unregisterGlobal } from "../utils/globals.js";
import { type ResourceCatalog } from "./catalog.js";
@@ -93,6 +100,18 @@ export class ResourceCatalogAPI {
return this.#getCatalog().getPromptSchema(id);
}
+ public registerSkillMetadata(skill: SkillMetadata): void {
+ this.#getCatalog().registerSkillMetadata(skill);
+ }
+
+ public listSkillManifests(): Array {
+ return this.#getCatalog().listSkillManifests();
+ }
+
+ public getSkillManifest(id: string): SkillManifest | undefined {
+ return this.#getCatalog().getSkillManifest(id);
+ }
+
#getCatalog(): ResourceCatalog {
return getGlobal(API_NAME) ?? NOOP_RESOURCE_CATALOG;
}
diff --git a/packages/core/src/v3/resource-catalog/noopResourceCatalog.ts b/packages/core/src/v3/resource-catalog/noopResourceCatalog.ts
index 8f77544f05c..5da74d4a9b1 100644
--- a/packages/core/src/v3/resource-catalog/noopResourceCatalog.ts
+++ b/packages/core/src/v3/resource-catalog/noopResourceCatalog.ts
@@ -1,4 +1,11 @@
-import { PromptManifest, QueueManifest, TaskManifest, WorkerManifest } from "../schemas/index.js";
+import {
+ PromptManifest,
+ QueueManifest,
+ SkillManifest,
+ SkillMetadata,
+ TaskManifest,
+ WorkerManifest,
+} from "../schemas/index.js";
import { type PromptMetadataWithFunctions, type TaskMetadataWithFunctions, type TaskSchema } from "../types/index.js";
import { ResourceCatalog } from "./catalog.js";
@@ -70,4 +77,16 @@ export class NoopResourceCatalog implements ResourceCatalog {
getPromptSchema(id: string): TaskSchema | undefined {
return undefined;
}
+
+ registerSkillMetadata(skill: SkillMetadata): void {
+ // noop
+ }
+
+ listSkillManifests(): Array {
+ return [];
+ }
+
+ getSkillManifest(id: string): SkillManifest | undefined {
+ return undefined;
+ }
}
diff --git a/packages/core/src/v3/resource-catalog/standardResourceCatalog.ts b/packages/core/src/v3/resource-catalog/standardResourceCatalog.ts
index ea134a45663..0a67a4fd9a4 100644
--- a/packages/core/src/v3/resource-catalog/standardResourceCatalog.ts
+++ b/packages/core/src/v3/resource-catalog/standardResourceCatalog.ts
@@ -1,6 +1,8 @@
import {
PromptManifest,
PromptMetadata,
+ SkillManifest,
+ SkillMetadata,
TaskFileMetadata,
TaskMetadata,
TaskManifest,
@@ -21,6 +23,8 @@ export class StandardResourceCatalog implements ResourceCatalog {
private _promptSchemas: Map = new Map();
private _currentFileContext?: Omit;
private _queueMetadata: Map = new Map();
+ private _skillMetadata: Map = new Map();
+ private _skillFileMetadata: Map = new Map();
setCurrentFileContext(filePath: string, entryPoint: string) {
this._currentFileContext = { filePath, entryPoint };
@@ -86,25 +90,31 @@ export class StandardResourceCatalog implements ResourceCatalog {
}
updateTaskMetadata(id: string, updates: Partial): void {
+ const { fns, schema, ...metadataUpdates } = updates;
+
const existingMetadata = this._taskMetadata.get(id);
- if (existingMetadata) {
+ if (existingMetadata && Object.keys(metadataUpdates).length > 0) {
this._taskMetadata.set(id, {
...existingMetadata,
- ...updates,
+ ...metadataUpdates,
});
}
- if (updates.fns) {
+ if (fns) {
const existingFunctions = this._taskFunctions.get(id);
if (existingFunctions) {
this._taskFunctions.set(id, {
...existingFunctions,
- ...updates.fns,
+ ...fns,
});
}
}
+
+ if (schema) {
+ this._taskSchemas.set(id, schema);
+ }
}
// Return all the tasks, without the functions
@@ -233,6 +243,58 @@ export class StandardResourceCatalog implements ResourceCatalog {
};
}
+ registerSkillMetadata(skill: SkillMetadata): void {
+ if (!this._currentFileContext) {
+ return;
+ }
+
+ if (!skill.id) {
+ return;
+ }
+
+ const existing = this._skillMetadata.get(skill.id);
+ if (existing && existing.sourcePath !== skill.sourcePath) {
+ console.warn(
+ `Skill "${skill.id}" is defined twice with different paths. Keeping the first:\n` +
+ ` existing: ${existing.sourcePath}\n` +
+ ` ignored: ${skill.sourcePath}`
+ );
+ return;
+ }
+
+ this._skillFileMetadata.set(skill.id, {
+ ...this._currentFileContext,
+ });
+ this._skillMetadata.set(skill.id, skill);
+ }
+
+ listSkillManifests(): Array {
+ const result: Array = [];
+
+ for (const [id, metadata] of this._skillMetadata) {
+ const fileMetadata = this._skillFileMetadata.get(id);
+ if (!fileMetadata) continue;
+
+ result.push({
+ ...metadata,
+ ...fileMetadata,
+ });
+ }
+
+ return result;
+ }
+
+ getSkillManifest(id: string): SkillManifest | undefined {
+ const metadata = this._skillMetadata.get(id);
+ const fileMetadata = this._skillFileMetadata.get(id);
+ if (!metadata || !fileMetadata) return undefined;
+
+ return {
+ ...metadata,
+ ...fileMetadata,
+ };
+ }
+
disable() {
// noop
}
diff --git a/packages/core/src/v3/taskContext/index.test.ts b/packages/core/src/v3/taskContext/index.test.ts
new file mode 100644
index 00000000000..34d169a177c
--- /dev/null
+++ b/packages/core/src/v3/taskContext/index.test.ts
@@ -0,0 +1,86 @@
+import { afterEach, describe, expect, it } from "vitest";
+import { unregisterGlobal } from "../utils/globals.js";
+import { SemanticInternalAttributes } from "../semanticInternalAttributes.js";
+import { TaskContextAPI } from "./index.js";
+
+const FAKE_CTX = {
+ attempt: { id: "attempt_1", number: 1, startedAt: new Date(), status: "EXECUTING" as const },
+ run: {
+ id: "run_1",
+ payload: undefined,
+ payloadType: "application/json",
+ context: undefined,
+ createdAt: new Date(),
+ tags: [],
+ isTest: false,
+ isReplay: false,
+ startedAt: new Date(),
+ durationMs: 0,
+ costInCents: 0,
+ baseCostInCents: 0,
+ },
+ task: { id: "my-task", filePath: "src/trigger/task.ts", exportName: "myTask" },
+ queue: { id: "queue_1", name: "default" },
+ environment: { id: "env_1", slug: "dev", type: "DEVELOPMENT" as const },
+ organization: { id: "org_1", slug: "acme", name: "Acme" },
+ project: { id: "proj_1", ref: "proj_xyz", slug: "demo", name: "Demo" },
+ machine: {
+ name: "small-1x" as const,
+ cpu: 0.5,
+ memory: 0.5,
+ centsPerMs: 0.0001,
+ },
+} as never;
+
+const FAKE_WORKER = { id: "worker_1", version: "1.0.0", contentHash: "abc" } as never;
+
+describe("TaskContextAPI conversation id", () => {
+ afterEach(() => {
+ unregisterGlobal("task-context");
+ TaskContextAPI.getInstance().setConversationId(undefined);
+ });
+
+ it("returns no conversation attribute when setConversationId was never called", () => {
+ const api = TaskContextAPI.getInstance();
+ api.setGlobalTaskContext({ ctx: FAKE_CTX, worker: FAKE_WORKER });
+
+ expect(api.attributes[SemanticInternalAttributes.GEN_AI_CONVERSATION_ID]).toBeUndefined();
+ });
+
+ it("includes gen_ai.conversation.id after setConversationId", () => {
+ const api = TaskContextAPI.getInstance();
+ api.setGlobalTaskContext({ ctx: FAKE_CTX, worker: FAKE_WORKER });
+
+ api.setConversationId("chat_123");
+
+ expect(api.attributes[SemanticInternalAttributes.GEN_AI_CONVERSATION_ID]).toBe("chat_123");
+ });
+
+ it("clears the conversation attribute when called with undefined", () => {
+ const api = TaskContextAPI.getInstance();
+ api.setGlobalTaskContext({ ctx: FAKE_CTX, worker: FAKE_WORKER });
+ api.setConversationId("chat_123");
+
+ api.setConversationId(undefined);
+
+ expect(api.attributes[SemanticInternalAttributes.GEN_AI_CONVERSATION_ID]).toBeUndefined();
+ expect(api.conversationId).toBeUndefined();
+ });
+
+ it("returns no attributes when there is no task context", () => {
+ const api = TaskContextAPI.getInstance();
+ api.setConversationId("chat_123");
+
+ expect(api.attributes).toEqual({});
+ });
+
+ it("clears conversation id when a new task context is registered (warm restart)", () => {
+ const api = TaskContextAPI.getInstance();
+ api.setGlobalTaskContext({ ctx: FAKE_CTX, worker: FAKE_WORKER });
+ api.setConversationId("chat_old");
+
+ api.setGlobalTaskContext({ ctx: FAKE_CTX, worker: FAKE_WORKER });
+
+ expect(api.attributes[SemanticInternalAttributes.GEN_AI_CONVERSATION_ID]).toBeUndefined();
+ });
+});
diff --git a/packages/core/src/v3/taskContext/index.ts b/packages/core/src/v3/taskContext/index.ts
index 92e0194cde9..ecbfa184a6b 100644
--- a/packages/core/src/v3/taskContext/index.ts
+++ b/packages/core/src/v3/taskContext/index.ts
@@ -9,6 +9,7 @@ const API_NAME = "task-context";
export class TaskContextAPI {
private static _instance?: TaskContextAPI;
private _runDisabled = false;
+ private _conversationId?: string;
private constructor() {}
@@ -45,6 +46,7 @@ export class TaskContextAPI {
return {
...this.contextAttributes,
...this.workerAttributes,
+ ...this.conversationAttributes,
[SemanticInternalAttributes.WARM_START]: !!this.isWarmStart,
};
}
@@ -52,6 +54,19 @@ export class TaskContextAPI {
return {};
}
+ get conversationAttributes(): Attributes {
+ if (!this._conversationId) return {};
+ return { [SemanticInternalAttributes.GEN_AI_CONVERSATION_ID]: this._conversationId };
+ }
+
+ get conversationId(): string | undefined {
+ return this._conversationId;
+ }
+
+ public setConversationId(conversationId: string | undefined): void {
+ this._conversationId = conversationId || undefined;
+ }
+
get resourceAttributes(): Attributes {
if (this.ctx) {
return {
@@ -109,6 +124,11 @@ export class TaskContextAPI {
public setGlobalTaskContext(taskContext: TaskContext): boolean {
this._runDisabled = false;
+ // Each run boot re-registers the global; clear any conversation id
+ // left over from a previous run on this warm-restarted process so
+ // attributes don't bleed across runs that don't call
+ // `setConversationId` themselves.
+ this._conversationId = undefined;
return registerGlobal(API_NAME, taskContext, true);
}
diff --git a/packages/core/src/v3/taskContext/otelProcessors.ts b/packages/core/src/v3/taskContext/otelProcessors.ts
index 1c0958d655d..fc30e9d1145 100644
--- a/packages/core/src/v3/taskContext/otelProcessors.ts
+++ b/packages/core/src/v3/taskContext/otelProcessors.ts
@@ -36,6 +36,17 @@ export class TaskContextSpanProcessor implements SpanProcessor {
if (!taskContext.isRunDisabled && taskContext.ctx.run.tags?.length) {
span.setAttribute(SemanticInternalAttributes.RUN_TAGS, taskContext.ctx.run.tags);
}
+
+ // Stamp `gen_ai.conversation.id` (OTel GenAI semantic convention)
+ // directly on every span so it survives the OTLP ingest's `ctx.*`
+ // strip and lands in the stored attributes column without a schema
+ // migration.
+ if (taskContext.conversationId) {
+ span.setAttribute(
+ SemanticInternalAttributes.GEN_AI_CONVERSATION_ID,
+ taskContext.conversationId
+ );
+ }
}
if (!isPartialSpan(span) && !skipPartialSpan(span)) {
@@ -178,6 +189,11 @@ export class TaskContextMetricExporter implements PushMetricExporter {
contextAttrs[SemanticInternalAttributes.RUN_TAGS] = ctx.run.tags;
}
+ if (taskContext.conversationId) {
+ contextAttrs[SemanticInternalAttributes.GEN_AI_CONVERSATION_ID] =
+ taskContext.conversationId;
+ }
+
const modified: ResourceMetrics = {
resource: metrics.resource,
scopeMetrics: metrics.scopeMetrics.map((scope) => ({
diff --git a/packages/core/src/v3/test/index.ts b/packages/core/src/v3/test/index.ts
new file mode 100644
index 00000000000..402f618c01b
--- /dev/null
+++ b/packages/core/src/v3/test/index.ts
@@ -0,0 +1,9 @@
+export {
+ runInMockTaskContext,
+ type MockTaskContextDrivers,
+ type MockTaskContextOptions,
+} from "./mock-task-context.js";
+export { TestInputStreamManager } from "./test-input-stream-manager.js";
+export { TestRealtimeStreamsManager } from "./test-realtime-streams-manager.js";
+export { TestRunMetadataManager } from "./test-run-metadata-manager.js";
+export { TestSessionStreamManager } from "./test-session-stream-manager.js";
diff --git a/packages/core/src/v3/test/mock-task-context.ts b/packages/core/src/v3/test/mock-task-context.ts
new file mode 100644
index 00000000000..66e58490019
--- /dev/null
+++ b/packages/core/src/v3/test/mock-task-context.ts
@@ -0,0 +1,294 @@
+import { inputStreams } from "../input-streams-api.js";
+import { realtimeStreams } from "../realtime-streams-api.js";
+import { sessionStreams } from "../session-streams-api.js";
+import { localsAPI } from "../locals-api.js";
+import { runMetadata } from "../run-metadata-api.js";
+import { taskContext } from "../task-context-api.js";
+import { lifecycleHooks } from "../lifecycle-hooks-api.js";
+import { runtime } from "../runtime-api.js";
+import { StandardLocalsManager } from "../locals/manager.js";
+import { StandardLifecycleHooksManager } from "../lifecycleHooks/manager.js";
+import { NoopRuntimeManager } from "../runtime/noopRuntimeManager.js";
+import { unregisterGlobal } from "../utils/globals.js";
+import type { ServerBackgroundWorker, TaskRunContext } from "../schemas/index.js";
+import type { LocalsKey } from "../locals/types.js";
+import type { SessionChannelIO } from "../sessionStreams/types.js";
+import { TestInputStreamManager } from "./test-input-stream-manager.js";
+import { TestRealtimeStreamsManager } from "./test-realtime-streams-manager.js";
+import { TestRunMetadataManager } from "./test-run-metadata-manager.js";
+import { TestSessionStreamManager } from "./test-session-stream-manager.js";
+
+/**
+ * Shallow-partial overrides applied on top of the default mock
+ * `TaskRunContext`. Each sub-object is a partial of its real shape —
+ * unset fields get sensible defaults.
+ */
+export type MockTaskRunContextOverrides = {
+ task?: Partial;
+ attempt?: Partial;
+ run?: Partial;
+ machine?: Partial;
+ queue?: Partial;
+ environment?: Partial;
+ organization?: Partial;
+ project?: Partial;
+ batch?: TaskRunContext["batch"];
+};
+
+/**
+ * Options for overriding parts of the mock task context.
+ */
+export type MockTaskContextOptions = {
+ /** Overrides applied on top of the default mock `TaskRunContext`. */
+ ctx?: MockTaskRunContextOverrides;
+ /** Overrides applied on top of the default `ServerBackgroundWorker`. */
+ worker?: Partial;
+ /** Whether this is a warm start. */
+ isWarmStart?: boolean;
+};
+
+/**
+ * Drivers passed to the function running inside `runInMockTaskContext`.
+ */
+export type MockTaskContextDrivers = {
+ /** Push data into input streams — simulates realtime input from outside the task. */
+ inputs: {
+ /**
+ * Send `data` to the named input stream. Resolves when all `.on()`
+ * handlers have run.
+ */
+ send(streamId: string, data: unknown): Promise;
+ /** Resolve any pending `.once()` waiters with a timeout error. */
+ close(streamId: string): void;
+ };
+ /** Inspect chunks written to output (realtime) streams. */
+ outputs: {
+ /** All chunks for a given stream, in the order they were written. */
+ chunks(streamId: string): T[];
+ /** All chunks across every stream, keyed by stream id. */
+ all(): Record;
+ /** Clear chunks for one stream, or all streams if no id is provided. */
+ clear(streamId?: string): void;
+ /**
+ * Register a listener fired for every chunk written to any stream.
+ * Returns an unsubscribe function.
+ */
+ onWrite(listener: (streamId: string, chunk: unknown) => void): () => void;
+ };
+ /** Read or seed locals for the run. */
+ locals: {
+ /** Read a local set by either the task or `set()` below. */
+ get(key: LocalsKey): T | undefined;
+ /**
+ * Pre-seed a local before the task runs. Use this for dependency
+ * injection — e.g. supply a test database client that the agent's
+ * hooks read via `locals.get()` instead of constructing the prod one.
+ */
+ set(key: LocalsKey, value: T): void;
+ };
+ /**
+ * Session-scoped channel drivers. The `.in` side is backed by a
+ * {@link TestSessionStreamManager} installed as the `sessionStreams`
+ * global — so the task's `session.in.on/once/peek/waitWithIdleTimeout`
+ * calls receive records sent through this driver.
+ */
+ sessions: {
+ in: {
+ /**
+ * Send a record onto `session.in` for the given session. Resolves
+ * pending `once()` waiters and fires all `on()` handlers.
+ */
+ send(sessionId: string, data: unknown, io?: SessionChannelIO): Promise;
+ /** Close pending `once()` waiters with a timeout error. */
+ close(sessionId: string, io?: SessionChannelIO): void;
+ };
+ };
+ /** The mock `TaskRunContext` assembled from defaults + user overrides. */
+ ctx: TaskRunContext;
+};
+
+function defaultTaskRunContext(overrides?: MockTaskRunContextOverrides): TaskRunContext {
+ return {
+ task: {
+ id: "test-task",
+ filePath: "test-task.ts",
+ ...overrides?.task,
+ },
+ attempt: {
+ number: 1,
+ startedAt: new Date(),
+ ...overrides?.attempt,
+ },
+ run: {
+ id: "run_test",
+ tags: [],
+ isTest: false,
+ isReplay: false,
+ createdAt: new Date(),
+ startedAt: new Date(),
+ ...overrides?.run,
+ },
+ machine: {
+ name: "micro",
+ cpu: 1,
+ memory: 0.5,
+ centsPerMs: 0,
+ ...overrides?.machine,
+ },
+ queue: {
+ name: "test-queue",
+ id: "test-queue-id",
+ ...overrides?.queue,
+ },
+ environment: {
+ id: "test-env-id",
+ slug: "test-env",
+ type: "DEVELOPMENT",
+ ...overrides?.environment,
+ },
+ organization: {
+ id: "test-org-id",
+ slug: "test-org",
+ name: "Test Org",
+ ...overrides?.organization,
+ },
+ project: {
+ id: "test-project-id",
+ ref: "test-project-ref",
+ slug: "test-project",
+ name: "Test Project",
+ ...overrides?.project,
+ },
+ batch: overrides?.batch,
+ };
+}
+
+function defaultWorker(overrides?: Partial): ServerBackgroundWorker {
+ return {
+ id: "test-worker-id",
+ version: "test-version",
+ contentHash: "test-content-hash",
+ engine: "V2",
+ ...overrides,
+ };
+}
+
+/**
+ * Run a function inside a fully mocked task runtime context.
+ *
+ * Installs in-memory test managers for `locals`, `inputStreams`,
+ * `realtimeStreams`, `lifecycleHooks`, and `runtime`, sets a mock
+ * `TaskContext`, and tears everything down when the function returns.
+ *
+ * Inside the function, any code that reads from `locals`, `inputStreams`,
+ * `realtimeStreams`, or `taskContext.ctx` will see the mock context —
+ * so you can directly invoke the internal `run` function of any task
+ * (including `chat.agent`) without hitting the Trigger.dev runtime.
+ *
+ * @example
+ * ```ts
+ * import { runInMockTaskContext } from "@trigger.dev/core/v3/test";
+ *
+ * await runInMockTaskContext(
+ * async ({ inputs, outputs, ctx }) => {
+ * // Fire an input stream from the "outside"
+ * setTimeout(() => {
+ * inputs.send("chat-messages", { messages: [], chatId: "c1" });
+ * }, 0);
+ *
+ * // Run task code that reads from inputStreams.once(...)
+ * await myTask.fns.run(payload, { ctx, signal: new AbortController().signal });
+ *
+ * // Inspect chunks written to the output stream
+ * expect(outputs.chunks("chat")).toContainEqual({ type: "text-delta", delta: "hi" });
+ * },
+ * { ctx: { run: { id: "run_abc" } } }
+ * );
+ * ```
+ */
+export async function runInMockTaskContext(
+ fn: (drivers: MockTaskContextDrivers) => T | Promise,
+ options?: MockTaskContextOptions
+): Promise {
+ const ctx = defaultTaskRunContext(options?.ctx);
+ const worker = defaultWorker(options?.worker);
+
+ const localsManager = new StandardLocalsManager();
+ const lifecycleManager = new StandardLifecycleHooksManager();
+ const runtimeManager = new NoopRuntimeManager();
+ const metadataManager = new TestRunMetadataManager();
+ const inputManager = new TestInputStreamManager();
+ const outputManager = new TestRealtimeStreamsManager();
+ const sessionStreamManager = new TestSessionStreamManager();
+
+ // Unregister any previously-installed managers so `setGlobal*` wins —
+ // `registerGlobal` returns false silently if an entry already exists.
+ unregisterGlobal("locals");
+ unregisterGlobal("lifecycle-hooks");
+ unregisterGlobal("runtime");
+ unregisterGlobal("run-metadata");
+ unregisterGlobal("input-streams");
+ unregisterGlobal("realtime-streams");
+ unregisterGlobal("session-streams");
+ unregisterGlobal("task-context");
+
+ localsAPI.setGlobalLocalsManager(localsManager);
+ lifecycleHooks.setGlobalLifecycleHooksManager(lifecycleManager);
+ runtime.setGlobalRuntimeManager(runtimeManager);
+ runMetadata.setGlobalManager(metadataManager);
+ inputStreams.setGlobalManager(inputManager);
+ realtimeStreams.setGlobalManager(outputManager);
+ sessionStreams.setGlobalManager(sessionStreamManager);
+ taskContext.setGlobalTaskContext({
+ ctx,
+ worker,
+ isWarmStart: options?.isWarmStart ?? false,
+ });
+
+ const drivers: MockTaskContextDrivers = {
+ inputs: {
+ send: (streamId, data) => inputManager.__sendFromTest(streamId, data),
+ close: (streamId) => inputManager.__closeFromTest(streamId),
+ },
+ outputs: {
+ chunks: (streamId) => outputManager.__chunksFromTest(streamId),
+ all: () => outputManager.__allChunksFromTest(),
+ clear: (streamId) => outputManager.__clearFromTest(streamId),
+ onWrite: (listener) => outputManager.onWrite(listener),
+ },
+ locals: {
+ get: (key: LocalsKey) => localsManager.getLocal(key),
+ set: (key: LocalsKey, value: TValue) =>
+ localsManager.setLocal(key, value),
+ },
+ sessions: {
+ in: {
+ send: (sessionId, data, io = "in") =>
+ sessionStreamManager.__sendFromTest(sessionId, io, data),
+ close: (sessionId, io = "in") =>
+ sessionStreamManager.__closeFromTest(sessionId, io),
+ },
+ },
+ ctx,
+ };
+
+ try {
+ return await fn(drivers);
+ } finally {
+ localsAPI.disable();
+ lifecycleHooks.disable();
+ runtime.disable();
+ // taskContext.disable() only sets a flag — unregister the global so
+ // `taskContext.ctx` returns undefined after the harness returns.
+ unregisterGlobal("task-context");
+ unregisterGlobal("input-streams");
+ unregisterGlobal("realtime-streams");
+ unregisterGlobal("session-streams");
+ unregisterGlobal("run-metadata");
+ localsManager.reset();
+ inputManager.reset();
+ outputManager.reset();
+ sessionStreamManager.reset();
+ metadataManager.reset();
+ }
+}
diff --git a/packages/core/test/mockTaskContext.test.ts b/packages/core/test/mockTaskContext.test.ts
new file mode 100644
index 00000000000..5ea3685e466
--- /dev/null
+++ b/packages/core/test/mockTaskContext.test.ts
@@ -0,0 +1,226 @@
+import { describe, expect, it } from "vitest";
+import { runInMockTaskContext } from "../src/v3/test/index.js";
+import { inputStreams } from "../src/v3/input-streams-api.js";
+import { realtimeStreams } from "../src/v3/realtime-streams-api.js";
+import { locals } from "../src/v3/locals-api.js";
+import { taskContext } from "../src/v3/task-context-api.js";
+
+describe("runInMockTaskContext", () => {
+ it("installs a mock TaskRunContext with sensible defaults", async () => {
+ await runInMockTaskContext(async ({ ctx }) => {
+ expect(taskContext.ctx).toBeDefined();
+ expect(taskContext.ctx?.run.id).toBe("run_test");
+ expect(taskContext.ctx?.task.id).toBe("test-task");
+ expect(ctx.run.id).toBe("run_test");
+ });
+ });
+
+ it("applies ctx overrides on top of defaults", async () => {
+ await runInMockTaskContext(
+ async ({ ctx }) => {
+ expect(ctx.run.id).toBe("run_abc");
+ expect(ctx.task.id).toBe("my-chat-agent");
+ // Unspecified fields still use defaults
+ expect(ctx.queue.id).toBe("test-queue-id");
+ },
+ {
+ ctx: {
+ run: { id: "run_abc" },
+ task: { id: "my-chat-agent", filePath: "chat.ts" },
+ },
+ }
+ );
+ });
+
+ it("isolates locals from the surrounding context", async () => {
+ const key = locals.create<{ count: number }>("test.counter");
+
+ await runInMockTaskContext(async ({ locals: inspect }) => {
+ expect(inspect.get(key)).toBeUndefined();
+ locals.set(key, { count: 1 });
+ expect(inspect.get(key)).toEqual({ count: 1 });
+ });
+
+ // After the harness exits, the locals should be gone
+ expect(locals.get(key)).toBeUndefined();
+ });
+
+ it("tears down the task context after fn returns", async () => {
+ await runInMockTaskContext(async () => {
+ expect(taskContext.ctx).toBeDefined();
+ });
+
+ expect(taskContext.ctx).toBeUndefined();
+ });
+
+ it("tears down even when fn throws", async () => {
+ await expect(
+ runInMockTaskContext(async () => {
+ throw new Error("boom");
+ })
+ ).rejects.toThrow("boom");
+
+ expect(taskContext.ctx).toBeUndefined();
+ });
+
+ it("returns the value returned by fn", async () => {
+ const result = await runInMockTaskContext(async () => "hello");
+ expect(result).toBe("hello");
+ });
+
+ describe("input streams driver", () => {
+ it("resolves inputStreams.once() when test sends data", async () => {
+ await runInMockTaskContext(async ({ inputs }) => {
+ const pending = inputStreams.once("chat-messages");
+ setTimeout(() => inputs.send("chat-messages", { hello: "world" }), 0);
+ const result = await pending;
+ expect(result.ok).toBe(true);
+ if (result.ok) {
+ expect(result.output).toEqual({ hello: "world" });
+ }
+ });
+ });
+
+ it("fires inputStreams.on() handlers when test sends data", async () => {
+ await runInMockTaskContext(async ({ inputs }) => {
+ const received: unknown[] = [];
+ inputStreams.on("chat-messages", (data) => {
+ received.push(data);
+ });
+
+ await inputs.send("chat-messages", { n: 1 });
+ await inputs.send("chat-messages", { n: 2 });
+
+ expect(received).toEqual([{ n: 1 }, { n: 2 }]);
+ });
+ });
+
+ it("fires multiple on() handlers on the same stream", async () => {
+ await runInMockTaskContext(async ({ inputs }) => {
+ const a: unknown[] = [];
+ const b: unknown[] = [];
+ inputStreams.on("chat-messages", (data) => a.push(data));
+ inputStreams.on("chat-messages", (data) => b.push(data));
+
+ await inputs.send("chat-messages", "hi");
+ expect(a).toEqual(["hi"]);
+ expect(b).toEqual(["hi"]);
+ });
+ });
+
+ it("off() unsubscribes a handler", async () => {
+ await runInMockTaskContext(async ({ inputs }) => {
+ const received: unknown[] = [];
+ const sub = inputStreams.on("chat-messages", (data) => received.push(data));
+
+ await inputs.send("chat-messages", 1);
+ sub.off();
+ await inputs.send("chat-messages", 2);
+
+ expect(received).toEqual([1]);
+ });
+ });
+
+ it("times out once() after timeoutMs", async () => {
+ await runInMockTaskContext(async () => {
+ const result = await inputStreams.once("chat-messages", { timeoutMs: 10 });
+ expect(result.ok).toBe(false);
+ });
+ });
+
+ it("peek() returns the latest sent value", async () => {
+ await runInMockTaskContext(async ({ inputs }) => {
+ expect(inputStreams.peek("chat-messages")).toBeUndefined();
+ await inputs.send("chat-messages", { latest: true });
+ expect(inputStreams.peek("chat-messages")).toEqual({ latest: true });
+ });
+ });
+
+ it("close() rejects pending once() waiters with a timeout error", async () => {
+ await runInMockTaskContext(async ({ inputs }) => {
+ const pending = inputStreams.once("chat-messages");
+ inputs.close("chat-messages");
+ const result = await pending;
+ expect(result.ok).toBe(false);
+ });
+ });
+
+ it("resolves multiple concurrent once() waiters from a single send", async () => {
+ await runInMockTaskContext(async ({ inputs }) => {
+ const a = inputStreams.once("chat-messages");
+ const b = inputStreams.once("chat-messages");
+ await inputs.send("chat-messages", "shared");
+ const [ra, rb] = await Promise.all([a, b]);
+ expect(ra.ok && ra.output).toBe("shared");
+ expect(rb.ok && rb.output).toBe("shared");
+ });
+ });
+ });
+
+ describe("realtime streams driver", () => {
+ it("collects chunks from realtimeStreams.append()", async () => {
+ await runInMockTaskContext(async ({ outputs }) => {
+ await realtimeStreams.append("chat", "chunk-1" as unknown as BodyInit);
+ await realtimeStreams.append("chat", "chunk-2" as unknown as BodyInit);
+
+ expect(outputs.chunks("chat")).toEqual(["chunk-1", "chunk-2"]);
+ });
+ });
+
+ it("collects chunks from realtimeStreams.pipe()", async () => {
+ await runInMockTaskContext(async ({ outputs }) => {
+ const source = (async function* () {
+ yield "a";
+ yield "b";
+ yield "c";
+ })();
+
+ const instance = realtimeStreams.pipe("chat", source);
+
+ // Drain the returned stream — that's what feeds the buffer
+ for await (const _ of instance.stream) {
+ // no-op
+ }
+
+ expect(outputs.chunks("chat")).toEqual(["a", "b", "c"]);
+ });
+ });
+
+ it("separates chunks by stream id", async () => {
+ await runInMockTaskContext(async ({ outputs }) => {
+ await realtimeStreams.append("chat", "a" as unknown as BodyInit);
+ await realtimeStreams.append("stop", "halt" as unknown as BodyInit);
+
+ expect(outputs.chunks("chat")).toEqual(["a"]);
+ expect(outputs.chunks("stop")).toEqual(["halt"]);
+ expect(outputs.all()).toEqual({ chat: ["a"], stop: ["halt"] });
+ });
+ });
+
+ it("clear() empties one stream or all streams", async () => {
+ await runInMockTaskContext(async ({ outputs }) => {
+ await realtimeStreams.append("chat", "a" as unknown as BodyInit);
+ await realtimeStreams.append("stop", "halt" as unknown as BodyInit);
+
+ outputs.clear("chat");
+ expect(outputs.chunks("chat")).toEqual([]);
+ expect(outputs.chunks("stop")).toEqual(["halt"]);
+
+ outputs.clear();
+ expect(outputs.chunks("stop")).toEqual([]);
+ });
+ });
+ });
+
+ it("tears down input/output managers so consecutive calls are isolated", async () => {
+ await runInMockTaskContext(async ({ inputs }) => {
+ await inputs.send("chat-messages", "first-run");
+ });
+
+ await runInMockTaskContext(async ({ outputs }) => {
+ expect(outputs.chunks("chat-messages")).toEqual([]);
+ // inputs.peek should NOT see "first-run" from the prior harness
+ expect(inputStreams.peek("chat-messages")).toBeUndefined();
+ });
+ });
+});
diff --git a/packages/core/test/skillCatalog.test.ts b/packages/core/test/skillCatalog.test.ts
new file mode 100644
index 00000000000..3f1d29bf572
--- /dev/null
+++ b/packages/core/test/skillCatalog.test.ts
@@ -0,0 +1,74 @@
+import { describe, expect, it, vi } from "vitest";
+import { StandardResourceCatalog } from "../src/v3/resource-catalog/standardResourceCatalog.js";
+
+describe("StandardResourceCatalog — skills", () => {
+ it("registers and lists a skill manifest", () => {
+ const catalog = new StandardResourceCatalog();
+ catalog.setCurrentFileContext("trigger/chat.ts", "chat");
+
+ catalog.registerSkillMetadata({ id: "pdf-processing", sourcePath: "./skills/pdf-processing" });
+
+ const manifests = catalog.listSkillManifests();
+ expect(manifests).toHaveLength(1);
+ expect(manifests[0]).toMatchObject({
+ id: "pdf-processing",
+ sourcePath: "./skills/pdf-processing",
+ filePath: "trigger/chat.ts",
+ entryPoint: "chat",
+ });
+ });
+
+ it("getSkillManifest returns the registered skill", () => {
+ const catalog = new StandardResourceCatalog();
+ catalog.setCurrentFileContext("trigger/chat.ts", "chat");
+ catalog.registerSkillMetadata({ id: "a", sourcePath: "./skills/a" });
+
+ expect(catalog.getSkillManifest("a")?.sourcePath).toBe("./skills/a");
+ expect(catalog.getSkillManifest("missing")).toBeUndefined();
+ });
+
+ it("skips registration without a file context", () => {
+ const catalog = new StandardResourceCatalog();
+
+ catalog.registerSkillMetadata({ id: "pdf", sourcePath: "./skills/pdf" });
+
+ expect(catalog.listSkillManifests()).toHaveLength(0);
+ });
+
+ it("warns and ignores when the same id is registered with a different path", () => {
+ const catalog = new StandardResourceCatalog();
+ catalog.setCurrentFileContext("trigger/chat.ts", "chat");
+
+ const warn = vi.spyOn(console, "warn").mockImplementation(() => {});
+
+ catalog.registerSkillMetadata({ id: "pdf", sourcePath: "./skills/pdf" });
+ catalog.registerSkillMetadata({ id: "pdf", sourcePath: "./skills/other-pdf" });
+
+ const manifests = catalog.listSkillManifests();
+ expect(manifests).toHaveLength(1);
+ expect(manifests[0]?.sourcePath).toBe("./skills/pdf");
+ expect(warn).toHaveBeenCalledWith(expect.stringContaining("defined twice"));
+
+ warn.mockRestore();
+ });
+
+ it("re-registering the same id + path is idempotent", () => {
+ const catalog = new StandardResourceCatalog();
+ catalog.setCurrentFileContext("trigger/chat.ts", "chat");
+
+ catalog.registerSkillMetadata({ id: "pdf", sourcePath: "./skills/pdf" });
+ catalog.registerSkillMetadata({ id: "pdf", sourcePath: "./skills/pdf" });
+
+ expect(catalog.listSkillManifests()).toHaveLength(1);
+ });
+
+ it("registers multiple distinct skills", () => {
+ const catalog = new StandardResourceCatalog();
+ catalog.setCurrentFileContext("trigger/chat.ts", "chat");
+
+ catalog.registerSkillMetadata({ id: "pdf", sourcePath: "./skills/pdf" });
+ catalog.registerSkillMetadata({ id: "researcher", sourcePath: "./skills/researcher" });
+
+ expect(catalog.listSkillManifests().map((s) => s.id).sort()).toEqual(["pdf", "researcher"]);
+ });
+});
diff --git a/packages/trigger-sdk/package.json b/packages/trigger-sdk/package.json
index 9a1b90b059e..e2661b91719 100644
--- a/packages/trigger-sdk/package.json
+++ b/packages/trigger-sdk/package.json
@@ -24,7 +24,12 @@
"./package.json": "./package.json",
".": "./src/v3/index.ts",
"./v3": "./src/v3/index.ts",
- "./ai": "./src/v3/ai.ts"
+ "./ai": "./src/v3/ai.ts",
+ "./ai/skills-runtime": "./src/v3/agentSkillsRuntime.ts",
+ "./ai/test": "./src/v3/test/index.ts",
+ "./chat": "./src/v3/chat.ts",
+ "./chat/react": "./src/v3/chat-react.ts",
+ "./chat-server": "./src/v3/chat-server.ts"
},
"sourceDialects": [
"@triggerdotdev/source"
@@ -37,6 +42,21 @@
],
"ai": [
"dist/commonjs/v3/ai.d.ts"
+ ],
+ "ai/skills-runtime": [
+ "dist/commonjs/v3/agentSkillsRuntime.d.ts"
+ ],
+ "ai/test": [
+ "dist/commonjs/v3/test/index.d.ts"
+ ],
+ "chat": [
+ "dist/commonjs/v3/chat.d.ts"
+ ],
+ "chat/react": [
+ "dist/commonjs/v3/chat-react.d.ts"
+ ],
+ "chat-server": [
+ "dist/commonjs/v3/chat-server.d.ts"
]
}
},
@@ -63,11 +83,13 @@
"ws": "^8.11.0"
},
"devDependencies": {
+ "@ai-sdk/provider": "3.0.8",
"@arethetypeswrong/cli": "^0.15.4",
"@types/debug": "^4.1.7",
+ "@types/react": "^19.2.14",
"@types/slug": "^5.0.3",
"@types/ws": "^8.5.3",
- "ai": "^6.0.0",
+ "ai": "^6.0.116",
"encoding": "^0.1.13",
"rimraf": "^6.0.1",
"tshy": "^3.0.2",
@@ -76,12 +98,16 @@
"zod": "3.25.76"
},
"peerDependencies": {
- "zod": "^3.0.0 || ^4.0.0",
- "ai": "^4.2.0 || ^5.0.0 || ^6.0.0"
+ "ai": "^5.0.0 || ^6.0.0",
+ "react": "^18.0 || ^19.0",
+ "zod": "^3.0.0 || ^4.0.0"
},
"peerDependenciesMeta": {
"ai": {
"optional": true
+ },
+ "react": {
+ "optional": true
}
},
"engines": {
@@ -121,6 +147,61 @@
"types": "./dist/commonjs/v3/ai.d.ts",
"default": "./dist/commonjs/v3/ai.js"
}
+ },
+ "./ai/skills-runtime": {
+ "import": {
+ "@triggerdotdev/source": "./src/v3/agentSkillsRuntime.ts",
+ "types": "./dist/esm/v3/agentSkillsRuntime.d.ts",
+ "default": "./dist/esm/v3/agentSkillsRuntime.js"
+ },
+ "require": {
+ "types": "./dist/commonjs/v3/agentSkillsRuntime.d.ts",
+ "default": "./dist/commonjs/v3/agentSkillsRuntime.js"
+ }
+ },
+ "./ai/test": {
+ "import": {
+ "@triggerdotdev/source": "./src/v3/test/index.ts",
+ "types": "./dist/esm/v3/test/index.d.ts",
+ "default": "./dist/esm/v3/test/index.js"
+ },
+ "require": {
+ "types": "./dist/commonjs/v3/test/index.d.ts",
+ "default": "./dist/commonjs/v3/test/index.js"
+ }
+ },
+ "./chat": {
+ "import": {
+ "@triggerdotdev/source": "./src/v3/chat.ts",
+ "types": "./dist/esm/v3/chat.d.ts",
+ "default": "./dist/esm/v3/chat.js"
+ },
+ "require": {
+ "types": "./dist/commonjs/v3/chat.d.ts",
+ "default": "./dist/commonjs/v3/chat.js"
+ }
+ },
+ "./chat/react": {
+ "import": {
+ "@triggerdotdev/source": "./src/v3/chat-react.ts",
+ "types": "./dist/esm/v3/chat-react.d.ts",
+ "default": "./dist/esm/v3/chat-react.js"
+ },
+ "require": {
+ "types": "./dist/commonjs/v3/chat-react.d.ts",
+ "default": "./dist/commonjs/v3/chat-react.js"
+ }
+ },
+ "./chat-server": {
+ "import": {
+ "@triggerdotdev/source": "./src/v3/chat-server.ts",
+ "types": "./dist/esm/v3/chat-server.d.ts",
+ "default": "./dist/esm/v3/chat-server.js"
+ },
+ "require": {
+ "types": "./dist/commonjs/v3/chat-server.d.ts",
+ "default": "./dist/commonjs/v3/chat-server.js"
+ }
}
},
"main": "./dist/commonjs/v3/index.js",
diff --git a/packages/trigger-sdk/src/v3/agentSkillsRuntime.ts b/packages/trigger-sdk/src/v3/agentSkillsRuntime.ts
new file mode 100644
index 00000000000..31501ca4aef
--- /dev/null
+++ b/packages/trigger-sdk/src/v3/agentSkillsRuntime.ts
@@ -0,0 +1,127 @@
+import { spawn } from "node:child_process";
+import * as fs from "node:fs/promises";
+import * as nodePath from "node:path";
+
+/**
+ * Server-only runtime for the auto-injected skill tools
+ * (`loadSkill` / `readFile` / `bash`) that `chat.agent({ skills })`
+ * wires up. Split off from `./ai.ts` so the chat-agent surface in
+ * `@trigger.dev/sdk/ai` stays importable from client bundles —
+ * Next.js + Webpack reject top-level `node:*` imports anywhere in a
+ * client graph, even when a consumer only pulls in types.
+ *
+ * The SDK's `ai.ts` loads this module via a computed-string dynamic
+ * import inside each tool's `execute` — webpack treats the
+ * expression as an unknown dependency and skips static tracing, so
+ * the node-only symbols here never surface in a client build. The
+ * module resolves fine at runtime on a server worker because the
+ * relative path (`./agentSkillsRuntime.js`) lands next to `ai.js` in
+ * the emitted dist.
+ *
+ * Public subpath: `@trigger.dev/sdk/ai/skills-runtime`. Customers
+ * who want to eagerly bundle the runtime server-side (e.g. warming
+ * it on worker bootstrap) can import from there.
+ */
+
+const DEFAULT_BASH_OUTPUT_BYTES = 64 * 1024;
+const DEFAULT_READ_FILE_BYTES = 1024 * 1024;
+
+export type BashSkillInput = {
+ /** Absolute path to the skill's root (used as `cwd`). */
+ skillPath: string;
+ /** The bash command to run. */
+ command: string;
+ /** Optional abort signal forwarded to `spawn()`. */
+ abortSignal?: AbortSignal;
+};
+
+export type BashSkillResult =
+ | { exitCode: number | null; stdout: string; stderr: string }
+ | { error: string };
+
+export type ReadFileInSkillInput = {
+ /** Absolute path to the skill's root — the relative path must resolve inside it. */
+ skillPath: string;
+ /** Relative path the tool caller supplied. */
+ relativePath: string;
+};
+
+export type ReadFileInSkillResult = { content: string } | { error: string };
+
+function truncate(s: string, limit: number): string {
+ if (s.length <= limit) return s;
+ return s.slice(0, limit) + `\n…[truncated ${s.length - limit} bytes]`;
+}
+
+/**
+ * Path-traversal guard: confirm `relative` resolves inside `root`.
+ * Throws if it escapes via `..` or an absolute prefix. Returns the
+ * absolute resolved path.
+ */
+function safeJoinInside(root: string, relative: string): string {
+ if (nodePath.isAbsolute(relative)) {
+ throw new Error(`Path must be relative to the skill directory: ${relative}`);
+ }
+ const resolved = nodePath.resolve(root, relative);
+ const normalized = nodePath.resolve(root) + nodePath.sep;
+ if (resolved !== nodePath.resolve(root) && !resolved.startsWith(normalized)) {
+ throw new Error(`Path escapes the skill directory: ${relative}`);
+ }
+ return resolved;
+}
+
+export async function readFileInSkill({
+ skillPath,
+ relativePath,
+}: ReadFileInSkillInput): Promise {
+ let absolute: string;
+ try {
+ absolute = safeJoinInside(skillPath, relativePath);
+ } catch (err) {
+ return { error: (err as Error).message };
+ }
+ try {
+ const content = await fs.readFile(absolute, "utf8");
+ return { content: truncate(content, DEFAULT_READ_FILE_BYTES) };
+ } catch (err) {
+ return { error: (err as Error).message };
+ }
+}
+
+export async function runBashInSkill({
+ skillPath,
+ command,
+ abortSignal,
+}: BashSkillInput): Promise {
+ return new Promise((resolvePromise) => {
+ let child;
+ try {
+ child = spawn("bash", ["-c", command], {
+ cwd: skillPath,
+ signal: abortSignal,
+ });
+ } catch (err) {
+ resolvePromise({ error: (err as Error).message });
+ return;
+ }
+
+ let stdout = "";
+ let stderr = "";
+ child.stdout?.on("data", (chunk: Buffer | string) => {
+ stdout += chunk.toString();
+ });
+ child.stderr?.on("data", (chunk: Buffer | string) => {
+ stderr += chunk.toString();
+ });
+ child.once("close", (code: number | null) => {
+ resolvePromise({
+ exitCode: code,
+ stdout: truncate(stdout, DEFAULT_BASH_OUTPUT_BYTES),
+ stderr: truncate(stderr, DEFAULT_BASH_OUTPUT_BYTES),
+ });
+ });
+ child.once("error", (err: Error) => {
+ resolvePromise({ error: err.message });
+ });
+ });
+}
diff --git a/packages/trigger-sdk/src/v3/ai-shared.ts b/packages/trigger-sdk/src/v3/ai-shared.ts
new file mode 100644
index 00000000000..7161385764f
--- /dev/null
+++ b/packages/trigger-sdk/src/v3/ai-shared.ts
@@ -0,0 +1,210 @@
+/**
+ * Browser-safe primitives shared between `@trigger.dev/sdk/ai` (server) and
+ * `@trigger.dev/sdk/chat` / `@trigger.dev/sdk/chat/react` (client).
+ *
+ * This module exists to keep `ai.ts` reachable only from the server graph.
+ * `ai.ts` weighs in at ~7000 lines and statically imports the agent-skills
+ * runtime (which uses `node:child_process` / `node:fs/promises`). When a
+ * browser bundle imports a runtime value from `ai.ts` — historically the
+ * `PENDING_MESSAGE_INJECTED_TYPE` constant in `chat-react.ts` — the bundler
+ * traces `ai.ts`'s entire module graph into the client chunk and hits the
+ * `node:` builtins, which Turbopack rejects outright (and webpack flags as
+ * a "Critical dependency" warning).
+ *
+ * Anything in this file MUST stay free of `node:*` imports and free of any
+ * import from `ai.ts`.
+ */
+
+import type { Task, AnyTask } from "@trigger.dev/core/v3";
+import type { ModelMessage, UIMessage } from "ai";
+
+/**
+ * Message-part `type` value for the pending-message data part the agent
+ * injects when a follow-up message arrives mid-turn.
+ */
+export const PENDING_MESSAGE_INJECTED_TYPE = "data-pending-message-injected" as const;
+
+/**
+ * The wire payload shape sent by `TriggerChatTransport`.
+ * Uses `metadata` to match the AI SDK's `ChatRequestOptions` field name.
+ *
+ * Slim wire: at most ONE message per record. The agent runtime
+ * reconstructs prior history at run boot from a durable S3 snapshot +
+ * `session.out` replay (or `hydrateMessages` if registered). The wire is
+ * delta-only — see plan `vivid-humming-bonbon.md`.
+ */
+export type ChatTaskWirePayload = {
+ /**
+ * The single message being delivered on this trigger. Set for:
+ * - `submit-message`: the new user message OR a tool-approval-responded
+ * assistant message (with `state: "approval-responded"` tool parts).
+ * - `regenerate-message`: omitted (the agent slices its own history).
+ * - `preload` / `close` / `action`: omitted.
+ * - `handover-prepare`: omitted (use `headStartMessages` instead).
+ */
+ message?: TMessage;
+ /**
+ * Bespoke escape hatch for `chat.headStart`. The customer's HTTP route
+ * handler ships full `UIMessage[]` history at the very first turn — before
+ * any snapshot exists. The route handler isn't subject to the
+ * `MAX_APPEND_BODY_BYTES` cap on `/in/append` because it goes through the
+ * customer's own HTTP endpoint. Used ONLY by `trigger: "handover-prepare"`.
+ * Ignored on every other trigger.
+ */
+ headStartMessages?: TMessage[];
+ chatId: string;
+ trigger:
+ | "submit-message"
+ | "regenerate-message"
+ | "preload"
+ | "close"
+ | "action"
+ /**
+ * The customer's `chat.handover` route handler kicked us off in
+ * parallel with the first-turn `streamText` running in the warm
+ * Next.js process. The run sits idle on `session.in` waiting for
+ * a `kind: "handover"` (continue from tool execution) or
+ * `kind: "handover-skip"` (handler finished pure-text, exit
+ * cleanly). See `chat.handover` in `@trigger.dev/sdk/chat-server`.
+ */
+ | "handover-prepare";
+ messageId?: string;
+ metadata?: TMetadata;
+ /** Custom action payload when `trigger` is `"action"`. Validated against `actionSchema` on the backend. */
+ action?: unknown;
+ /** Whether this run is continuing an existing chat whose previous run ended. */
+ continuation?: boolean;
+ /** The run ID of the previous run (only set when `continuation` is true). */
+ previousRunId?: string;
+ /** Override idle timeout for this run (seconds). Set by transport.preload(). */
+ idleTimeoutInSeconds?: number;
+ /**
+ * The friendlyId of the Session primitive backing this chat. The
+ * transport opens (or lazy-creates) the session with
+ * `externalId = chatId` on first message, then sends this friendlyId
+ * through to the run so the agent can attach to `.in` / `.out`
+ * without needing to round-trip through the control plane again.
+ * Optional for backward-compat while the migration is in flight;
+ * required once the legacy run-scoped stream path is removed.
+ */
+ sessionId?: string;
+ /**
+ * Client-side `chat.store` value sent by the transport. Applied at turn
+ * start before `run()` fires, overwriting any in-memory store value on the
+ * agent (last-write-wins).
+ *
+ * The transport queues this via `setStore` / `applyStorePatch` and flushes
+ * it with the next `sendMessage`. On the agent you typically don't read
+ * this directly — it's applied into `chat.store` transparently.
+ */
+ incomingStore?: unknown;
+};
+
+/**
+ * One chunk on the chat input stream. `kind` discriminates the variants —
+ * a single ordered stream now carries all the signals the old three-stream
+ * split did (`chat-messages`, `chat-stop`, plus action messages piggybacked
+ * on `chat-messages`).
+ */
+export type ChatInputChunk =
+ | {
+ kind: "message";
+ /**
+ * Full wire payload for a new user message or regeneration. Mirrors
+ * what the legacy `chat-messages` input stream carried.
+ */
+ payload: ChatTaskWirePayload;
+ }
+ | {
+ kind: "stop";
+ /** Optional human-readable reason. Maps to the legacy `chat-stop` record. */
+ message?: string;
+ }
+ | {
+ /**
+ * Sent by `chat.headStart` when the customer's first-turn
+ * `streamText` finishes. The agent run (currently parked in
+ * `handover-prepare`) wakes, seeds its accumulators with
+ * `partialAssistantMessage`, and runs the normal turn loop
+ * (`onChatStart` → `onTurnStart` → … → `onTurnComplete`).
+ *
+ * What happens after that depends on `isFinal`:
+ *
+ * - `isFinal: false` — step 1 ended with `finishReason:
+ * "tool-calls"`. The partial carries the assistant's
+ * tool-call(s) wrapped in AI SDK's tool-approval round. The
+ * agent's `streamText` runs the approved tools and continues
+ * from step 2.
+ * - `isFinal: true` — step 1 ended pure-text (no tool calls).
+ * The partial carries the final assistant text. The agent
+ * skips the LLM call entirely (the response is already
+ * complete on the customer side) and runs `onTurnComplete`
+ * with the partial as `responseMessage` so persistence and
+ * any post-turn work fire normally.
+ */
+ kind: "handover";
+ /** Customer's step-1 response messages (ModelMessage form). */
+ partialAssistantMessage: ModelMessage[];
+ /**
+ * The UI messageId the customer's handler used for its step-1
+ * assistant message. The agent reuses this so any post-handover
+ * chunks (tool-output-available, step-2 text, data-* parts
+ * written by hooks) merge into the SAME assistant message on
+ * the browser side instead of starting a new one.
+ */
+ messageId?: string;
+ /**
+ * Whether the customer's step 1 is the final response. See
+ * `kind` description above for the two branches.
+ */
+ isFinal: boolean;
+ }
+ | {
+ /**
+ * Sent by `chat.headStart` only when the customer's handler
+ * ABORTS before producing a finishReason (e.g., dispatch error,
+ * stream cancelled before any tokens). The agent run exits
+ * cleanly without firing turn hooks. Normal pure-text and
+ * tool-call finishes go through `kind: "handover"` with the
+ * appropriate `isFinal` flag.
+ */
+ kind: "handover-skip";
+ };
+
+/**
+ * Extracts the client-data (`metadata`) type from a chat task.
+ *
+ * @example
+ * ```ts
+ * import type { InferChatClientData } from "@trigger.dev/sdk/ai";
+ * import type { myChat } from "@/trigger/chat";
+ *
+ * type MyClientData = InferChatClientData;
+ * ```
+ */
+export type InferChatClientData = TTask extends Task<
+ string,
+ ChatTaskWirePayload,
+ any
+>
+ ? TMetadata
+ : unknown;
+
+/**
+ * Extracts the UI message type from a chat task (wire payload `message` items).
+ *
+ * @example
+ * ```ts
+ * import type { InferChatUIMessage } from "@trigger.dev/sdk/ai";
+ * import type { myChat } from "@/trigger/chat";
+ *
+ * type Msg = InferChatUIMessage;
+ * ```
+ */
+export type InferChatUIMessage = TTask extends Task<
+ string,
+ ChatTaskWirePayload,
+ any
+>
+ ? TUIM
+ : UIMessage;
diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts
index 59afa2fe21a..1b0fa19e390 100644
--- a/packages/trigger-sdk/src/v3/ai.ts
+++ b/packages/trigger-sdk/src/v3/ai.ts
@@ -1,38 +1,837 @@
import {
+ accessoryAttributes,
AnyTask,
+ apiClientManager,
+ getSchemaParseFn,
+ InputStreamOncePromise,
+ type InputStreamOnceOptions,
+ type InputStreamWaitOptions,
+ type InputStreamWaitWithIdleTimeoutOptions,
isSchemaZodEsque,
+ logger,
+ type MachinePresetName,
+ ManualWaitpointPromise,
+ OutOfMemoryError,
+ sessionStreams,
+ type PipeStreamResult,
+ type RealtimeDefinedInputStream,
+ type RealtimeDefinedStream,
+ type ReadStreamOptions,
+ SemanticInternalAttributes,
+ type SendInputStreamOptions,
Task,
+ taskContext,
+ type AppendStreamOptions,
+ type InputStreamOnceResult,
type inferSchemaIn,
+ type inferSchemaOut,
+ type PipeStreamOptions,
+ type TaskIdentifier,
+ type TaskOptions,
type TaskSchema,
+ type TaskRunContext,
type TaskWithSchema,
+ type WriterStreamOptions,
} from "@trigger.dev/core/v3";
-import { dynamicTool, jsonSchema, JSONSchema7, Schema, Tool, ToolCallOptions, zodSchema } from "ai";
+import type {
+ FinishReason,
+ ModelMessage,
+ ToolSet,
+ UIMessage,
+ UIMessageChunk,
+ UIMessageStreamOptions,
+ LanguageModelUsage,
+} from "ai";
+import type { StreamWriteResult } from "@trigger.dev/core/v3";
+import {
+ convertToModelMessages,
+ dynamicTool,
+ generateId as generateMessageId,
+ getToolName,
+ isToolUIPart,
+ jsonSchema,
+ JSONSchema7,
+ readUIMessageStream,
+ Schema,
+ tool as aiTool,
+ Tool,
+ ToolCallOptions,
+ zodSchema,
+} from "ai";
+import { type Attributes, trace } from "@opentelemetry/api";
+import { auth } from "./auth.js";
+import { locals } from "./locals.js";
import { metadata } from "./metadata.js";
+import type { ResolvedPrompt } from "./prompt.js";
+import type { ResolvedSkill } from "./skill.js";
+// Bash-skill runtime lives in `./agentSkillsRuntime.ts` (exposed as
+// the `@trigger.dev/sdk/ai/skills-runtime` subpath). It's a normal
+// static import — `ai.ts` is server-only by reachability now that
+// browser-side primitives (PENDING_MESSAGE_INJECTED_TYPE and the
+// chat-task wire types) live in `./ai-shared.ts`. Any browser bundle
+// that wants those primitives imports `./ai-shared.js` directly and
+// never touches `ai.ts`'s module graph, so the `node:*` builtins
+// pulled in transitively here never reach a client chunk.
+import { runBashInSkill, readFileInSkill } from "./agentSkillsRuntime.js";
+import { streams } from "./streams.js";
+import {
+ sessions,
+ type SessionHandle,
+ type SessionInputChannel,
+ type SessionOutputChannel,
+ type SessionPipeStreamOptions,
+ type SessionSubscribeOptions,
+} from "./sessions.js";
+import { createTask } from "./shared.js";
+import { resourceCatalog, type SessionTriggerConfig } from "@trigger.dev/core/v3";
+import { tracer } from "./tracer.js";
+
+/** Re-export for typing `ctx` in `chat.agent` hooks without importing `@trigger.dev/core`. */
+export type { TaskRunContext } from "@trigger.dev/core/v3";
+import {
+ applyChatStorePatch,
+ type ChatStoreChunk,
+ type ChatStoreDeltaChunk,
+ type ChatStorePatchOperation,
+ type ChatStoreSnapshotChunk,
+} from "@trigger.dev/core/v3/chat-client";
const METADATA_KEY = "tool.execute.options";
-export type ToolCallExecutionOptions = Omit;
+/**
+ * Wrapper around `convertToModelMessages` that always passes
+ * `ignoreIncompleteToolCalls: true` to prevent failures from
+ * stopped/aborted conversations with partial tool parts.
+ */
+function toModelMessages(messages: UIMessage[]): Promise {
+ return convertToModelMessages(messages, { ignoreIncompleteToolCalls: true });
+}
+
+export type ToolCallExecutionOptions = {
+ toolCallId: string;
+ experimental_context?: unknown;
+ /** Chat context — only present when the tool runs inside a chat.agent turn. */
+ chatId?: string;
+ turn?: number;
+ continuation?: boolean;
+ clientData?: unknown;
+ /** Serialized chat.local values from the parent run. @internal */
+ chatLocals?: Record;
+};
+
+/** Chat context stored in locals during each chat.agent turn for auto-detection. */
+type ChatTurnContext = {
+ chatId: string;
+ turn: number;
+ continuation: boolean;
+ clientData?: TClientData;
+};
+const chatTurnContextKey = locals.create("chat.turnContext");
+
+/**
+ * Per-run slot holding the Session handle that backs this chat's `.in` /
+ * `.out` channels. Populated at the top of `chatAgent`'s run function from
+ * `payload.sessionId`; read by every module-level helper (`chatStream`,
+ * `messagesInput`, `stopInput`) so the chat.agent internals can remain
+ * the same module-level shape they were when the I/O was run-scoped.
+ * @internal
+ */
+const chatSessionHandleKey = locals.create("chat.sessionHandle");
+
+/**
+ * Scan `session.out` for the latest `trigger:turn-complete` chunk and
+ * return its SSE timestamp. Used at OOM-retry boot to derive a
+ * lower-bound timestamp for the `session.in` filter — records older
+ * than `T_last_complete` belong to turns that already completed on the
+ * prior attempt and are dropped before they reach the turn loop.
+ *
+ * Implementation is a streaming scan: subscribes via the existing SSE
+ * endpoint with a short `timeoutInSeconds`, processes each part inline,
+ * and discards the chunk body so memory stays O(1) regardless of how
+ * many records are on `session.out`. Bandwidth scales linearly with
+ * stream length but the scan only fires on retry — a rare event.
+ *
+ * Returns `undefined` if no `trigger:turn-complete` chunk has been
+ * written yet (first-turn OOM, no completed turns to dedup against).
+ * @internal
+ */
+async function findLatestTurnCompleteTimestamp(
+ chatId: string
+): Promise {
+ const apiClient = apiClientManager.clientOrThrow();
+ let latestTs: number | undefined;
+ const stream = await apiClient.subscribeToSessionStream(chatId, "out", {
+ timeoutInSeconds: 1,
+ onPart: (part) => {
+ let chunk: unknown = part.chunk;
+ if (typeof chunk === "string") {
+ try {
+ chunk = JSON.parse(chunk);
+ } catch {
+ return;
+ }
+ }
+ if (chunk && typeof chunk === "object" && (chunk as { type?: unknown }).type === "trigger:turn-complete") {
+ latestTs = part.timestamp;
+ }
+ },
+ });
+ // Drain the stream to drive `onPart`. We don't accumulate the chunks —
+ // each iteration discards the data immediately, so a long session.out
+ // doesn't blow memory on the retry-boot worker.
+ for await (const _ of stream) {
+ // intentionally empty
+ }
+ return latestTs;
+}
+
+/**
+ * Versioned blob written to S3 after every turn completes (when no
+ * `hydrateMessages` hook is registered). Read at run boot to seed the
+ * accumulator with prior conversation state, replacing the old wire-borne
+ * full-history seed. Only the runtime owns this format — customers never
+ * touch it.
+ *
+ * `lastOutEventId` is the SSE Last-Event-ID after the snapshot's final
+ * chunk, used to resume `session.out` replay from precisely after the
+ * snapshot. `lastOutTimestamp` is the same chunk's timestamp, used to
+ * skip `findLatestTurnCompleteTimestamp` on OOM retry boot.
+ *
+ * @internal
+ */
+export type ChatSnapshotV1 = {
+ version: 1;
+ savedAt: number;
+ messages: TUIMessage[];
+ lastOutEventId?: string;
+ lastOutTimestamp?: number;
+};
+
+/**
+ * S3 key suffix for a session's snapshot blob. The webapp's presigned-URL
+ * routes prefix this with `packets/{projectRef}/{envSlug}/` server-side, so
+ * the final S3 key lands at
+ * `packets/{projectRef}/{envSlug}/sessions/{sessionId}/snapshot.json`.
+ *
+ * Stable per session: the friendlyId persists across `chat.requestUpgrade`
+ * continuations and idle-suspend restarts.
+ * @internal
+ */
+function snapshotFilename(sessionId: string): string {
+ return `sessions/${sessionId}/snapshot.json`;
+}
+
+/**
+ * Test-only override hook — `mockChatAgent` installs a fake to return
+ * synthetic snapshots without hitting S3. Mirrors the `__set*ImplForTests`
+ * pattern in `sessions.ts`. Not part of the public API.
+ * @internal
+ */
+type ReadChatSnapshotImpl = (
+ sessionId: string
+) => Promise | undefined> | ChatSnapshotV1 | undefined;
+let readChatSnapshotImpl: ReadChatSnapshotImpl | undefined;
+
+export function __setReadChatSnapshotImplForTests(impl: ReadChatSnapshotImpl | undefined): void {
+ readChatSnapshotImpl = impl;
+}
+
+/**
+ * Test-only override hook — see `__setReadChatSnapshotImplForTests`. The
+ * mock harness records writes for assertion via this setter. Not public.
+ * @internal
+ */
+type WriteChatSnapshotImpl = (
+ sessionId: string,
+ snapshot: ChatSnapshotV1
+) => Promise | void;
+let writeChatSnapshotImpl: WriteChatSnapshotImpl | undefined;
+
+export function __setWriteChatSnapshotImplForTests(impl: WriteChatSnapshotImpl | undefined): void {
+ writeChatSnapshotImpl = impl;
+}
+
+/**
+ * Read the persisted snapshot for a session. Returns `undefined` on:
+ * - missing object (404 from the presigned GET — fresh session, never
+ * persisted)
+ * - presign failure (network/auth issue)
+ * - malformed JSON
+ * - version mismatch (forward-compat — older runtimes ignore newer blobs)
+ *
+ * Always swallows errors via `logger.warn`. The agent boot loop must stay
+ * available even if S3 hiccups; the worst case is replaying more of
+ * `session.out` than strictly necessary.
+ * @internal
+ */
+async function readChatSnapshot(
+ sessionId: string
+): Promise | undefined> {
+ if (readChatSnapshotImpl) {
+ return (await readChatSnapshotImpl(sessionId)) ?? undefined;
+ }
+ const apiClient = apiClientManager.clientOrThrow();
+ let presignedUrl: string;
+ try {
+ const resp = await apiClient.getPayloadUrl(snapshotFilename(sessionId));
+ presignedUrl = resp.presignedUrl;
+ } catch (error) {
+ logger.warn("chat.agent: snapshot presign (read) failed; continuing without snapshot", {
+ error: error instanceof Error ? error.message : String(error),
+ sessionId,
+ });
+ return undefined;
+ }
+ let response: Response;
+ try {
+ response = await fetch(presignedUrl, { method: "GET" });
+ } catch (error) {
+ logger.warn("chat.agent: snapshot fetch failed; continuing without snapshot", {
+ error: error instanceof Error ? error.message : String(error),
+ sessionId,
+ });
+ return undefined;
+ }
+ if (response.status === 404) {
+ // First-ever boot for this session — no snapshot yet. Caller falls
+ // through to replay-only.
+ return undefined;
+ }
+ if (!response.ok) {
+ logger.warn("chat.agent: snapshot fetch returned non-OK; continuing without snapshot", {
+ status: response.status,
+ sessionId,
+ });
+ return undefined;
+ }
+ let parsed: unknown;
+ try {
+ parsed = await response.json();
+ } catch (error) {
+ logger.warn("chat.agent: snapshot JSON parse failed; continuing without snapshot", {
+ error: error instanceof Error ? error.message : String(error),
+ sessionId,
+ });
+ return undefined;
+ }
+ if (!parsed || typeof parsed !== "object") return undefined;
+ const candidate = parsed as Partial>;
+ if (candidate.version !== 1 || !Array.isArray(candidate.messages)) {
+ logger.warn("chat.agent: snapshot version/shape mismatch; ignoring", {
+ version: candidate.version,
+ sessionId,
+ });
+ return undefined;
+ }
+ return candidate as ChatSnapshotV1;
+}
+
+/**
+ * Persist the snapshot for a session. Awaited by callers immediately after
+ * `onTurnComplete` — the agent may suspend right after this point, and
+ * fire-and-forget promises don't reliably complete on suspend.
+ *
+ * Errors are swallowed via `logger.warn`. A failed write means the next
+ * boot replays slightly more of `session.out` (back to the previous
+ * snapshot's cursor) instead of failing — the conversation stays
+ * coherent, only the boot path does marginally more work.
+ * @internal
+ */
+async function writeChatSnapshot(
+ sessionId: string,
+ snapshot: ChatSnapshotV1
+): Promise {
+ if (writeChatSnapshotImpl) {
+ await writeChatSnapshotImpl(sessionId, snapshot);
+ return;
+ }
+ const apiClient = apiClientManager.clientOrThrow();
+ let presignedUrl: string;
+ try {
+ const resp = await apiClient.createUploadPayloadUrl(snapshotFilename(sessionId));
+ presignedUrl = resp.presignedUrl;
+ } catch (error) {
+ logger.warn("chat.agent: snapshot presign (write) failed; next run will replay further", {
+ error: error instanceof Error ? error.message : String(error),
+ sessionId,
+ });
+ return;
+ }
+ let response: Response;
+ try {
+ response = await fetch(presignedUrl, {
+ method: "PUT",
+ headers: { "content-type": "application/json" },
+ body: JSON.stringify(snapshot),
+ });
+ } catch (error) {
+ logger.warn("chat.agent: snapshot upload failed; next run will replay further", {
+ error: error instanceof Error ? error.message : String(error),
+ sessionId,
+ });
+ return;
+ }
+ if (!response.ok) {
+ logger.warn("chat.agent: snapshot upload returned non-OK; next run will replay further", {
+ status: response.status,
+ sessionId,
+ });
+ }
+}
+
+/**
+ * Test-only entry point that bypasses `__setReadChatSnapshotImplForTests`
+ * and reaches the real `apiClient.getPayloadUrl` + `fetch` + JSON-parse path.
+ * Used by `chat-snapshot.test.ts` to verify 404 / 500 / malformed JSON /
+ * version-mismatch / network-error behavior end-to-end. Tests mock global
+ * `fetch` and the api-client config; this wrapper lets them drive the
+ * production code without the override hook short-circuiting.
+ *
+ * Not part of the public API. The `__` prefix and `ForTests` suffix mirror
+ * the override-hook setters above.
+ * @internal
+ */
+export async function __readChatSnapshotProductionPathForTests(
+ sessionId: string
+): Promise | undefined> {
+ const saved = readChatSnapshotImpl;
+ readChatSnapshotImpl = undefined;
+ try {
+ return await readChatSnapshot(sessionId);
+ } finally {
+ readChatSnapshotImpl = saved;
+ }
+}
+
+/**
+ * Test-only entry point that bypasses `__setWriteChatSnapshotImplForTests`
+ * and reaches the real `apiClient.createUploadPayloadUrl` + `fetch` PUT
+ * path. Pairs with `__readChatSnapshotProductionPathForTests` — see that
+ * function's note for the rationale.
+ *
+ * Not part of the public API.
+ * @internal
+ */
+export async function __writeChatSnapshotProductionPathForTests(
+ sessionId: string,
+ snapshot: ChatSnapshotV1
+): Promise {
+ const saved = writeChatSnapshotImpl;
+ writeChatSnapshotImpl = undefined;
+ try {
+ await writeChatSnapshot(sessionId, snapshot);
+ } finally {
+ writeChatSnapshotImpl = saved;
+ }
+}
+
+/**
+ * Merge two `UIMessage[]` lists by `id`, with the second list winning on
+ * collision. Used at run boot to combine the snapshot's persisted history
+ * with the replayed `session.out` tail — replay produces the freshest
+ * representation of any assistant message that landed after the snapshot's
+ * cursor, so it should overwrite the older copy from the snapshot.
+ *
+ * Order: items unique to `a` keep their original positions; items unique to
+ * `b` are appended at the end in their `b` order; collisions take `b`'s
+ * value but keep the position they had in `a`.
+ *
+ * @internal
+ */
+function mergeByIdReplaceWins(
+ a: TUIMessage[],
+ b: TUIMessage[]
+): TUIMessage[] {
+ if (b.length === 0) return [...a];
+ if (a.length === 0) return [...b];
+ const indexById = new Map();
+ for (let i = 0; i < a.length; i++) {
+ const id = a[i]!.id;
+ if (typeof id === "string" && id.length > 0) indexById.set(id, i);
+ }
+ const result = [...a];
+ for (const next of b) {
+ const id = next.id;
+ if (typeof id === "string" && id.length > 0 && indexById.has(id)) {
+ result[indexById.get(id)!] = next;
+ } else {
+ const newIdx = result.length;
+ result.push(next);
+ if (typeof id === "string" && id.length > 0) indexById.set(id, newIdx);
+ }
+ }
+ return result;
+}
+
+/**
+ * Test-only entry point for `mergeByIdReplaceWins`. The merge helper is the
+ * one piece of slim-wire boot logic that's purely functional, so it earns a
+ * direct unit test that exercises empty inputs, id collisions, no-id append,
+ * order preservation, and the replay-wins-on-collision invariant. Mirrors
+ * the `__*ProductionPathForTests` pattern used for the snapshot/replay
+ * helpers above.
+ *
+ * Not part of the public API.
+ * @internal
+ */
+export function __mergeByIdReplaceWinsForTests(
+ a: TUIMessage[],
+ b: TUIMessage[]
+): TUIMessage[] {
+ return mergeByIdReplaceWins(a, b);
+}
+
+/**
+ * Test-only override hook — `mockChatAgent` installs a fake replay that
+ * returns a synthetic `UIMessage[]` so unit tests can drive the boot loop
+ * without an SSE subscription. Mirrors the snapshot setters above. Not
+ * part of the public API.
+ * @internal
+ */
+type ReplaySessionOutTailImpl = (
+ sessionId: string,
+ options?: { lastEventId?: string }
+) => Promise;
+let replaySessionOutTailImpl: ReplaySessionOutTailImpl | undefined;
+
+export function __setReplaySessionOutTailImplForTests(
+ impl: ReplaySessionOutTailImpl | undefined
+): void {
+ replaySessionOutTailImpl = impl;
+}
+
+/**
+ * Drain `session.out` from `lastEventId` (or the start) and reduce the
+ * remaining `UIMessageChunk`s back into `UIMessage[]`. Used at run boot to
+ * catch any chunks that landed AFTER the last persisted snapshot — typically
+ * the chunks from the turn whose `onTurnComplete` ran but whose snapshot
+ * write didn't make it to S3 before the run crashed / suspended.
+ *
+ * Implementation:
+ * 1. `apiClient.readSessionStreamRecords` — non-SSE, `wait=0` drain.
+ * Returns immediately with whatever records exist after the cursor.
+ * The previous SSE-subscribe path paid a fixed ~1s long-poll tax on
+ * every fresh chat (timeout duration on empty streams) — unacceptable
+ * for the first-message TTFC budget.
+ * 2. Filter out the agent's control chunks (`type: "trigger:*"`) — they
+ * ride on the same stream as the user-visible UIMessageChunks.
+ * 3. Split chunks at `start`/`finish` boundaries so each segment is a
+ * single message, then feed each segment through the AI SDK's
+ * `readUIMessageStream` reducer (the same one `useChat` uses on the
+ * browser side) and grab the final emitted snapshot.
+ * 4. The trailing message — if it never received a `finish` chunk —
+ * goes through `cleanupAbortedParts` so partial in-flight parts
+ * don't leak into the next turn's accumulator. Drop it entirely
+ * if cleanup empties it.
+ *
+ * Errors are propagated to the caller (the boot loop wraps in try/catch and
+ * `logger.warn`s); we don't swallow here so test code can observe failures
+ * directly.
+ * @internal
+ */
+async function replaySessionOutTail(
+ sessionId: string,
+ options?: { lastEventId?: string }
+): Promise {
+ if (replaySessionOutTailImpl) {
+ return await replaySessionOutTailImpl(sessionId, options);
+ }
+ const apiClient = apiClientManager.clientOrThrow();
+ const response = await apiClient.readSessionStreamRecords(sessionId, "out", {
+ afterEventId: options?.lastEventId,
+ });
+ const collected: UIMessageChunk[] = [];
+ for (const record of response.records) {
+ // Each record's `data` is the JSON-encoded chunk body the agent
+ // wrote at append time. The records endpoint returns it as an
+ // opaque string so the parsing cost is paid here, not on the
+ // server's hot path.
+ let chunk: unknown;
+ try {
+ chunk = JSON.parse(record.data);
+ } catch {
+ continue;
+ }
+ if (!chunk || typeof chunk !== "object") continue;
+ const type = (chunk as { type?: unknown }).type;
+ if (typeof type !== "string") continue;
+ // Drop agent control chunks (`trigger:turn-complete`, `trigger:upgrade-required`,
+ // session-state telemetry, etc.). They ride the same stream but aren't part
+ // of the UIMessageChunk discriminated union and would confuse the reducer.
+ if (type.startsWith("trigger:")) continue;
+ collected.push(chunk as UIMessageChunk);
+ }
+ if (collected.length === 0) return [];
+
+ // Split chunks into per-message segments. A `start` chunk demarcates the
+ // beginning of an assistant message; chunks before any `start` (rare —
+ // but possible if the stream begins mid-message after a resume) get
+ // bundled into a leading "implicit" segment so we don't drop them silently.
+ type Segment = { chunks: UIMessageChunk[]; closed: boolean };
+ const segments: Segment[] = [];
+ let current: Segment | undefined;
+ for (const chunk of collected) {
+ if (chunk.type === "start") {
+ current = { chunks: [chunk], closed: false };
+ segments.push(current);
+ continue;
+ }
+ if (!current) {
+ // Chunk arrived before any `start`. Synthesize a segment so the reducer
+ // has something to work with — `readUIMessageStream` tolerates a missing
+ // `start` because we pass `message: undefined`.
+ current = { chunks: [], closed: false };
+ segments.push(current);
+ }
+ current.chunks.push(chunk);
+ if (chunk.type === "finish") {
+ current.closed = true;
+ current = undefined;
+ }
+ }
+
+ const messages: TUIMessage[] = [];
+ for (let i = 0; i < segments.length; i++) {
+ const seg = segments[i]!;
+ const isTrailing = i === segments.length - 1 && !seg.closed;
+ const segmentStream = new ReadableStream({
+ start(controller) {
+ for (const c of seg.chunks) controller.enqueue(c);
+ controller.close();
+ },
+ });
+ let last: UIMessage | undefined;
+ try {
+ for await (const snapshot of readUIMessageStream({ stream: segmentStream })) {
+ last = snapshot;
+ }
+ } catch (error) {
+ // Reducer error — the segment is malformed. Skip it and keep going so a
+ // single corrupt chunk doesn't sink the entire replay.
+ logger.warn("chat.agent: replay reducer failed for segment; skipping", {
+ sessionId,
+ segmentIndex: i,
+ error: error instanceof Error ? error.message : String(error),
+ });
+ continue;
+ }
+ if (!last) continue;
+ if (isTrailing) {
+ const cleaned = cleanupAbortedParts(last as TUIMessage);
+ if (cleaned.parts.length === 0) continue;
+ messages.push(cleaned);
+ } else {
+ messages.push(last as TUIMessage);
+ }
+ }
+ return messages;
+}
+
+/**
+ * Test-only entry point that bypasses `__setReplaySessionOutTailImplForTests`
+ * and reaches the real `apiClient.subscribeToSessionStream` + chunk-segment
+ * splitter + `readUIMessageStream` reducer. Pairs with the snapshot
+ * production-path wrappers above. Lets `replay-session-out.test.ts` drive
+ * synthetic chunk sequences through the real reducer to lock down chunk-
+ * stream → `UIMessage[]` correctness — if the AI SDK's chunk semantics
+ * shift in a future version, the test catches it before customers do.
+ *
+ * Tests should mock `apiClient.subscribeToSessionStream` (e.g. via
+ * `vi.spyOn(apiClient, ...)`) to feed a `ReadableStream`.
+ *
+ * Not part of the public API.
+ * @internal
+ */
+export async function __replaySessionOutTailProductionPathForTests<
+ TUIMessage extends UIMessage,
+>(
+ sessionId: string,
+ options?: { lastEventId?: string }
+): Promise {
+ const saved = replaySessionOutTailImpl;
+ replaySessionOutTailImpl = undefined;
+ try {
+ return await replaySessionOutTail(sessionId, options);
+ } finally {
+ replaySessionOutTailImpl = saved;
+ }
+}
+
+/**
+ * Resolve the Session handle for the current chat.agent run. Throws if
+ * called outside of a chat.agent `run()` — every internal consumer is
+ * inside the run, and every external consumer goes through the public
+ * `sessions.open(id)` entry point.
+ * @internal
+ */
+function getChatSession(): SessionHandle {
+ const handle = locals.get(chatSessionHandleKey);
+ if (!handle) {
+ throw new Error(
+ "chat.agent session handle is not initialized. This indicates a chat.agent helper was used outside of a chat.agent run, or the transport did not send a sessionId."
+ );
+ }
+ return handle;
+}
+
+/**
+ * Stamp `gen_ai.conversation.id` on the active span at chat-run boot.
+ * The run-level span is already alive when the run callback fires, so
+ * `TaskContextSpanProcessor.onStart` (which stamps subsequent spans
+ * automatically) won't catch it — set explicitly here.
+ */
+function stampConversationIdOnActiveSpan(
+ conversationId: string | undefined,
+ span = trace.getActiveSpan()
+): void {
+ if (!span || !conversationId) return;
+ span.setAttribute(SemanticInternalAttributes.GEN_AI_CONVERSATION_ID, conversationId);
+}
type ToolResultContent = Array<
| {
- type: "text";
- text: string;
- }
+ type: "text";
+ text: string;
+ }
| {
- type: "image";
- data: string;
- mimeType?: string;
- }
+ type: "image";
+ data: string;
+ mimeType?: string;
+ }
>;
export type ToolOptions = {
experimental_toToolResultContent?: (result: TResult) => ToolResultContent;
};
+/** Satisfies AI SDK `ToolSet` index signature alongside concrete `Tool` input/output types. */
+type ToolSetCompatible> = T & NonNullable;
+
+function assertTaskUsableAsTool(task: AnyTask): void {
+ if (("schema" in task && !task.schema) || ("jsonSchema" in task && !task.jsonSchema)) {
+ throw new Error(
+ "Cannot convert this task to to a tool because the task has no schema. Make sure to either use schemaTask or a task with an input jsonSchema."
+ );
+ }
+}
+
+/**
+ * Shared implementation: run a task as a tool invocation (`triggerAndSubscribe` + tool metadata).
+ * Used by {@link toolExecute} and the deprecated `ai.tool()` wrapper.
+ */
+function createTaskToolExecuteHandler<
+ TIdentifier extends string,
+ TTaskSchema extends TaskSchema | undefined = undefined,
+ TInput = void,
+ TOutput = unknown,
+>(
+ task: TaskWithSchema | Task
+): (input: unknown, toolOpts: ToolCallOptions | undefined) => Promise {
+ assertTaskUsableAsTool(task);
+
+ return async function taskToolExecuteHandler(
+ input: unknown,
+ toolOpts: ToolCallOptions | undefined
+ ): Promise {
+ const toolMeta: ToolCallExecutionOptions = {
+ toolCallId: toolOpts?.toolCallId ?? "",
+ };
+ if (toolOpts?.experimental_context !== undefined) {
+ try {
+ toolMeta.experimental_context = JSON.parse(JSON.stringify(toolOpts.experimental_context));
+ } catch {
+ /* non-serializable */
+ }
+ }
+
+ const chatCtx = locals.get(chatTurnContextKey);
+ if (chatCtx) {
+ toolMeta.chatId = chatCtx.chatId;
+ toolMeta.turn = chatCtx.turn;
+ toolMeta.continuation = chatCtx.continuation;
+ toolMeta.clientData = chatCtx.clientData;
+ }
+
+ const chatLocals: Record = {};
+ for (const entry of chatLocalRegistry) {
+ const value = locals.get(entry.key);
+ if (value !== undefined) {
+ chatLocals[entry.id] = value;
+ }
+ }
+ if (Object.keys(chatLocals).length > 0) {
+ toolMeta.chatLocals = chatLocals;
+ }
+
+ return await task
+ .triggerAndSubscribe(input as inferSchemaIn, {
+ metadata: {
+ [METADATA_KEY]: toolMeta as any,
+ },
+ tags: toolOpts?.toolCallId ? [`toolCallId:${toolOpts.toolCallId}`] : undefined,
+ signal: toolOpts?.abortSignal,
+ })
+ .unwrap();
+ };
+}
+
+/**
+ * Returns an `execute` function for the AI SDK `tool()` helper (or any compatible tool definition).
+ * Preferred API for task-backed tools: the same Trigger wiring as the deprecated `ai.tool()`
+ * (`triggerAndSubscribe`, tool-call metadata, chat context, `chat.local` serialization) without
+ * building the tool object. You supply `description`, `inputSchema`, and any AI-SDK-only options
+ * (e.g. `experimental_toToolResultContent`) on `tool()` yourself.
+ *
+ * @example
+ * ```ts
+ * import { tool } from "ai";
+ * import { z } from "zod";
+ * import { ai } from "@trigger.dev/sdk/ai";
+ * import { myTask } from "./trigger/myTask";
+ *
+ * export const myTool = tool({
+ * description: myTask.description ?? "",
+ * inputSchema: z.object({ id: z.string() }),
+ * execute: ai.toolExecute(myTask),
+ * });
+ * ```
+ */
+function toolExecute(
+ task: Task
+): (input: TInput, toolOpts: ToolCallOptions) => Promise;
+function toolExecute<
+ TIdentifier extends string,
+ TTaskSchema extends TaskSchema | undefined = undefined,
+ TOutput = unknown,
+>(
+ task: TaskWithSchema
+): (input: inferSchemaIn, toolOpts: ToolCallOptions) => Promise;
+function toolExecute<
+ TIdentifier extends string,
+ TTaskSchema extends TaskSchema | undefined = undefined,
+ TInput = void,
+ TOutput = unknown,
+>(
+ task: TaskWithSchema | Task
+): (
+ input: TTaskSchema extends TaskSchema ? inferSchemaIn : TInput,
+ toolOpts: ToolCallOptions
+) => Promise {
+ return createTaskToolExecuteHandler(task) as (
+ input: TTaskSchema extends TaskSchema ? inferSchemaIn : TInput,
+ toolOpts: ToolCallOptions
+ ) => Promise;
+}
+
+/**
+ * @deprecated Use `tool()` from the `ai` package with `execute: ai.toolExecute(task)` instead.
+ * This helper may be removed in a future major release.
+ */
function toolFromTask(
task: Task,
options?: ToolOptions
-): Tool;
+): ToolSetCompatible>;
+/** @deprecated Use `tool()` from `ai` with `execute: ai.toolExecute(task)`. */
function toolFromTask<
TIdentifier extends string,
TTaskSchema extends TaskSchema | undefined = undefined,
@@ -40,7 +839,8 @@ function toolFromTask<
>(
task: TaskWithSchema,
options?: ToolOptions
-): Tool, TOutput>;
+): ToolSetCompatible, TOutput>>;
+/** @deprecated Use `tool()` from `ai` with `execute: ai.toolExecute(task)`. */
function toolFromTask<
TIdentifier extends string,
TTaskSchema extends TaskSchema | undefined = undefined,
@@ -49,35 +849,41 @@ function toolFromTask<
>(
task: TaskWithSchema | Task,
options?: ToolOptions
-): TTaskSchema extends TaskSchema
- ? Tool, TOutput>
- : Tool {
- if (("schema" in task && !task.schema) || ("jsonSchema" in task && !task.jsonSchema)) {
- throw new Error(
- "Cannot convert this task to to a tool because the task has no schema. Make sure to either use schemaTask or a task with an input jsonSchema."
- );
+): ToolSetCompatible<
+ TTaskSchema extends TaskSchema ? Tool, TOutput> : Tool
+> {
+ const executeFromTaskInput = createTaskToolExecuteHandler(task);
+
+ // Zod-backed tasks: use static `tool()` so runtime shape matches `ToolSet`. Generic task context
+ // prevents `tool()` overloads from inferring input; `as any` is localized to this call only.
+ if ("schema" in task && task.schema && isSchemaZodEsque(task.schema)) {
+ const staticTool = aiTool({
+ description: task.description ?? "",
+ inputSchema: zodSchema(task.schema as any),
+ execute: async (input: unknown, toolOpts: ToolCallOptions) =>
+ executeFromTaskInput(input, toolOpts),
+ ...(options?.experimental_toToolResultContent !== undefined
+ ? { experimental_toToolResultContent: options.experimental_toToolResultContent }
+ : {}),
+ } as any);
+ return staticTool as unknown as ToolSetCompatible<
+ TTaskSchema extends TaskSchema ? Tool, TOutput> : Tool
+ >;
}
const toolDefinition = dynamicTool({
description: task.description,
inputSchema: convertTaskSchemaToToolParameters(task),
- execute: async (input, options) => {
- const serializedOptions = options ? JSON.parse(JSON.stringify(options)) : undefined;
-
- return await task
- .triggerAndWait(input as inferSchemaIn, {
- metadata: {
- [METADATA_KEY]: serializedOptions,
- },
- })
- .unwrap();
- },
- ...options,
+ ...(options?.experimental_toToolResultContent !== undefined
+ ? { experimental_toToolResultContent: options.experimental_toToolResultContent }
+ : {}),
+ execute: async (input: unknown, toolOpts: ToolCallOptions) =>
+ executeFromTaskInput(input, toolOpts),
});
- return toolDefinition as TTaskSchema extends TaskSchema
- ? Tool, TOutput>
- : Tool;
+ return toolDefinition as unknown as ToolSetCompatible<
+ TTaskSchema extends TaskSchema ? Tool, TOutput> : Tool
+ >;
}
function getToolOptionsFromMetadata(): ToolCallExecutionOptions | undefined {
@@ -88,6 +894,61 @@ function getToolOptionsFromMetadata(): ToolCallExecutionOptions | undefined {
return tool as ToolCallExecutionOptions;
}
+/**
+ * Get the current tool call ID from inside a subtask invoked via `ai.toolExecute()` (or legacy `ai.tool()`).
+ * Returns `undefined` if not running as a tool subtask.
+ */
+function getToolCallId(): string | undefined {
+ return getToolOptionsFromMetadata()?.toolCallId;
+}
+
+/**
+ * Get the chat context from inside a subtask invoked via `ai.toolExecute()` (or legacy `ai.tool()`) within a `chat.agent`.
+ * Pass `typeof yourChatTask` as the type parameter to get typed `clientData`.
+ * Returns `undefined` if the parent is not a chat task.
+ *
+ * @example
+ * ```ts
+ * const ctx = ai.chatContext();
+ * // ctx?.clientData is typed based on myChat's clientDataSchema
+ * ```
+ */
+function getToolChatContext():
+ | ChatTurnContext>
+ | undefined {
+ const opts = getToolOptionsFromMetadata();
+ if (!opts?.chatId) return undefined;
+ return {
+ chatId: opts.chatId,
+ turn: opts.turn ?? 0,
+ continuation: opts.continuation ?? false,
+ clientData: opts.clientData as InferChatClientData,
+ };
+}
+
+/**
+ * Get the chat context from inside a subtask, throwing if not in a chat context.
+ * Pass `typeof yourChatTask` as the type parameter to get typed `clientData`.
+ *
+ * @example
+ * ```ts
+ * const ctx = ai.chatContextOrThrow();
+ * // ctx.chatId, ctx.clientData are guaranteed non-null
+ * ```
+ */
+function getToolChatContextOrThrow(): ChatTurnContext<
+ InferChatClientData
+> {
+ const ctx = getToolChatContext();
+ if (!ctx) {
+ throw new Error(
+ "ai.chatContextOrThrow() called outside of a chat.agent context. " +
+ "This helper can only be used inside a subtask invoked via ai.toolExecute() (or legacy ai.tool()) from a chat.agent."
+ );
+ }
+ return ctx;
+}
+
function convertTaskSchemaToToolParameters(
task: AnyTask | TaskWithSchema
): Schema {
@@ -113,6 +974,7805 @@ function convertTaskSchemaToToolParameters(
}
export const ai = {
+ /**
+ * @deprecated Use `tool()` from the `ai` package with `execute: ai.toolExecute(task)` instead.
+ */
tool: toolFromTask,
+ /**
+ * Preferred: return value for the `execute` field of AI SDK `tool()`. Keeps Trigger subtask and
+ * metadata behavior without coupling to a specific `ai` version’s `Tool` / `ToolSet` types.
+ */
+ toolExecute,
currentToolOptions: getToolOptionsFromMetadata,
+ /** Get the tool call ID from inside a subtask invoked via `ai.toolExecute()` (or legacy `ai.tool()`). */
+ toolCallId: getToolCallId,
+ /** Get chat context (chatId, turn, clientData, etc.) from inside a subtask of a `chat.agent`. Returns undefined if not in a chat context. */
+ chatContext: getToolChatContext,
+ /** Get chat context or throw if not in a chat context. Pass `typeof yourChatTask` for typed clientData. */
+ chatContextOrThrow: getToolChatContextOrThrow,
+};
+
+/**
+ * Creates a public access token for a chat task.
+ *
+ * This is a convenience helper that creates a multi-use trigger public token
+ * scoped to the given task. Use it in a server action to provide the frontend
+ * `TriggerChatTransport` with an `accessToken`.
+ *
+ * @example
+ * ```ts
+ * // actions.ts
+ * "use server";
+ * import { chat } from "@trigger.dev/sdk/ai";
+ * import type { myChat } from "@/trigger/chat";
+ *
+ * export const getChatToken = () => chat.createAccessToken("my-chat");
+ * ```
+ */
+function createChatAccessToken(
+ taskId: TaskIdentifier
+): Promise {
+ return auth.createTriggerPublicToken(taskId as string, { expirationTime: "24h" });
+}
+
+// ---------------------------------------------------------------------------
+// Chat transport helpers — backend side
+// ---------------------------------------------------------------------------
+
+/**
+ * Typed chat output stream — `.writer()`, `.pipe()`, `.append()`, and
+ * `.read()` methods pre-bound to this run's Session `.out` channel and
+ * typed to `UIMessageChunk`.
+ *
+ * Use from within a `chat.agent` run to write custom chunks:
+ * ```ts
+ * const { waitUntilComplete } = chat.stream.writer({
+ * execute: ({ write }) => {
+ * write({ type: "text-start", id: "status-1" });
+ * write({ type: "text-delta", id: "status-1", delta: "Processing..." });
+ * write({ type: "text-end", id: "status-1" });
+ * },
+ * });
+ * await waitUntilComplete();
+ * ```
+ *
+ * Backed by the Session primitive so a chat's output outlives any single
+ * run — subscribers (browser transport, server-side `ChatStream`) read
+ * the session's `.out`, not a per-run stream. Run-scoped `target`
+ * options on `.pipe()` are honoured as no-ops; the session is the target.
+ */
+const chatStream: RealtimeDefinedStream = {
+ // Stable opaque label for the run-scoped `RealtimeDefinedStream` shape.
+ // `chatStream` is backed by the Session's `.out` channel — this id is
+ // not the real addressing key (the session is). Kept as a literal so
+ // the facade type stays satisfied without re-introducing a top-level
+ // constant; dashboards/telemetry that already read "chat" keep working.
+ id: "chat",
+ pipe(value, options) {
+ const { target: _target, ...sessionOptions } = (options ?? {}) as PipeStreamOptions;
+ return getChatSession().out.pipe(
+ value,
+ sessionOptions as SessionPipeStreamOptions
+ );
+ },
+ async read(_runId, options) {
+ // Session channels don't need a runId — the session is the address.
+ // Keep the signature for backward compatibility with the run-scoped
+ // RealtimeDefinedStream shape, but ignore the argument.
+ return getChatSession().out.read(
+ options as SessionSubscribeOptions | undefined
+ );
+ },
+ async append(value, options) {
+ const { target: _target, ...sessionOptions } = (options ?? {}) as AppendStreamOptions;
+ return getChatSession().out.append(value, sessionOptions as SessionPipeStreamOptions);
+ },
+ writer(options) {
+ return getChatSession().out.writer(options);
+ },
+};
+
+// ---------------------------------------------------------------------------
+// chat.response — write data parts that persist to the response message
+// ---------------------------------------------------------------------------
+
+/**
+ * Write data parts that both stream to the frontend AND persist in
+ * `onTurnComplete`'s `responseMessage` and `uiMessages`.
+ *
+ * Non-transient data chunks (`type` starts with `data-`, no `transient: true`)
+ * are queued for accumulation into the assistant response message.
+ * Transient or non-data chunks are streamed only (same as `chat.stream`).
+ *
+ * @example
+ * ```ts
+ * // Persists to responseMessage.parts
+ * chat.response.write({ type: "data-handover", data: { context: summary } });
+ *
+ * // Transient — streams only, not in responseMessage
+ * chat.response.write({ type: "data-progress", data: { percent: 50 }, transient: true });
+ * ```
+ */
+const chatResponse = {
+ /**
+ * Write a single chunk. Non-transient data parts are accumulated into the
+ * response message; everything else is stream-only.
+ */
+ write(part: UIMessageChunk): void {
+ queueResponsePart(part);
+ const { waitUntilComplete } = chatStream.writer({
+ spanName: "chat.response.write",
+ collapsed: true,
+ execute: ({ write }) => {
+ write(part);
+ },
+ });
+ waitUntilComplete().catch(() => {});
+ },
+};
+
+// ---------------------------------------------------------------------------
+// chat.store — typed, bidirectional shared data between agent and clients
+// ---------------------------------------------------------------------------
+
+/**
+ * Listener fired when the store value changes. `operations` is present for
+ * `patch()` updates and absent for `set()` (which is a full snapshot).
+ */
+export type ChatStoreChangeListener = (
+ value: TStore,
+ operations?: ChatStorePatchOperation[]
+) => void;
+
+/**
+ * @internal Holder for the current store value. We wrap in an object so
+ * `undefined` (cleared) is distinguishable from "never set".
+ */
+type ChatStoreSlot = { value: unknown };
+
+/** @internal */
+const chatStoreSlotKey = locals.create("chat.store.slot");
+
+/** @internal */
+const chatStoreListenersKey = locals.create>(
+ "chat.store.listeners"
+);
+
+/** @internal — write a store chunk onto the chat output stream. */
+function writeStoreChunk(chunk: ChatStoreChunk): void {
+ const { waitUntilComplete } = chatStream.writer({
+ spanName: chunk.type === "store-snapshot" ? "chat.store.set" : "chat.store.patch",
+ collapsed: true,
+ execute: ({ write }) => {
+ write(chunk as unknown as UIMessageChunk);
+ },
+ });
+ waitUntilComplete().catch(() => {});
+}
+
+/** @internal — fire all listeners, swallowing per-listener errors. */
+function fireStoreListeners(
+ value: unknown,
+ operations?: ChatStorePatchOperation[]
+): void {
+ const listeners = locals.get(chatStoreListenersKey);
+ if (!listeners || listeners.size === 0) return;
+ for (const listener of listeners) {
+ try {
+ listener(value, operations);
+ } catch {
+ // non-fatal — listener errors don't break the agent
+ }
+ }
+}
+
+/**
+ * Replace the entire store value with `value`. Emits a `store-snapshot`
+ * chunk on the chat output stream and fires all `onChange` listeners.
+ */
+function chatStoreSet(value: TStore): void {
+ locals.set(chatStoreSlotKey, { value });
+ writeStoreChunk({ type: "store-snapshot", value } satisfies ChatStoreSnapshotChunk);
+ fireStoreListeners(value);
+}
+
+/**
+ * Apply RFC 6902 JSON Patch operations to the current store value.
+ * Emits a `store-delta` chunk on the chat output stream and fires all
+ * `onChange` listeners with the new value and the operations.
+ */
+function chatStorePatch(operations: ChatStorePatchOperation[]): void {
+ const slot = locals.get(chatStoreSlotKey);
+ const current = slot?.value;
+ const next = applyChatStorePatch(current, operations);
+ locals.set(chatStoreSlotKey, { value: next });
+ writeStoreChunk({
+ type: "store-delta",
+ operations,
+ } satisfies ChatStoreDeltaChunk);
+ fireStoreListeners(next, operations);
+}
+
+/** Get the current store value. Returns `undefined` if no value has been set. */
+function chatStoreGet(): TStore | undefined {
+ return locals.get(chatStoreSlotKey)?.value as TStore | undefined;
+}
+
+/**
+ * Subscribe to store changes for the current run. Returns an
+ * unsubscribe function.
+ */
+function chatStoreOnChange(
+ listener: ChatStoreChangeListener
+): () => void {
+ let listeners = locals.get(chatStoreListenersKey);
+ if (!listeners) {
+ listeners = new Set();
+ locals.set(chatStoreListenersKey, listeners);
+ }
+ listeners.add(listener as ChatStoreChangeListener);
+ return () => {
+ listeners!.delete(listener as ChatStoreChangeListener);
+ };
+}
+
+/**
+ * @internal — set the value without emitting a chunk. Used when applying
+ * `hydrateStore` results / `incomingStore` at turn start; the emitted
+ * snapshot is written separately so we don't double-emit.
+ */
+function chatStoreSetSilent(value: unknown): void {
+ locals.set(chatStoreSlotKey, { value });
+}
+
+/**
+ * @internal — emit the current value as a snapshot without touching the
+ * slot. Used at turn start after hydration so clients observing the stream
+ * see the initial value.
+ */
+function chatStoreEmitSnapshot(value: unknown): void {
+ writeStoreChunk({ type: "store-snapshot", value } satisfies ChatStoreSnapshotChunk);
+}
+
+// ---------------------------------------------------------------------------
+// ChatWriter — stream writer for callbacks
+// ---------------------------------------------------------------------------
+
+/**
+ * A stream writer passed to chat lifecycle callbacks (`onPreload`, `onChatStart`,
+ * `onTurnStart`, `onTurnComplete`, `onCompacted`).
+ *
+ * Write custom `UIMessageChunk` parts (e.g. `data-*` parts) directly to the chat
+ * stream without the ceremony of `chat.stream.writer({ execute })`.
+ *
+ * The writer is lazy — no stream overhead if you don't call `write()` or `merge()`.
+ *
+ * @example
+ * ```ts
+ * onTurnStart: async ({ writer }) => {
+ * writer.write({ type: "data-status", data: { loading: true } });
+ * },
+ * onTurnComplete: async ({ writer, uiMessages }) => {
+ * writer.write({ type: "data-analytics", data: { messageCount: uiMessages.length } });
+ * },
+ * ```
+ */
+export type ChatWriter = {
+ /** Write a single UIMessageChunk to the chat stream. */
+ write(part: UIMessageChunk): void;
+ /** Merge another stream's chunks into the chat stream. */
+ merge(stream: ReadableStream): void;
+};
+
+/**
+ * Creates a lazy ChatWriter that only opens a realtime stream on first use.
+ * Call `flush()` after the callback returns to await stream completion.
+ * @internal
+ */
+function createLazyChatWriter(): { writer: ChatWriter; flush: () => Promise } {
+ let writeImpl: ((part: UIMessageChunk) => void) | null = null;
+ let mergeImpl: ((stream: ReadableStream) => void) | null = null;
+ let waitPromise: (() => Promise) | null = null;
+ let resolveExecute: (() => void) | null = null;
+
+ function ensureInitialized() {
+ if (writeImpl) return;
+
+ const executePromise = new Promise((resolve) => {
+ resolveExecute = resolve;
+ });
+
+ const { waitUntilComplete } = chatStream.writer({
+ collapsed: true,
+ spanName: "callback writer",
+ execute: ({ write, merge }) => {
+ writeImpl = write;
+ mergeImpl = merge;
+ return executePromise; // Keep execute alive until flush()
+ },
+ });
+ waitPromise = waitUntilComplete;
+ }
+
+ return {
+ writer: {
+ write(part: UIMessageChunk) {
+ ensureInitialized();
+ queueResponsePart(part);
+ writeImpl!(part);
+ },
+ merge(stream: ReadableStream) {
+ ensureInitialized();
+ mergeImpl!(stream);
+ },
+ },
+ async flush() {
+ if (resolveExecute) {
+ resolveExecute(); // Signal execute to complete
+ await waitPromise!(); // Wait for stream to finish piping
+ }
+ },
+ };
+}
+
+/**
+ * Runs a callback with a lazy ChatWriter, flushing the stream after completion.
+ * @internal
+ */
+async function withChatWriter(fn: (writer: ChatWriter) => Promise | T): Promise {
+ const { writer, flush } = createLazyChatWriter();
+ const result = await fn(writer);
+ await flush();
+ return result;
+}
+
+// `ChatTaskWirePayload` and `ChatInputChunk` live in `./ai-shared.ts` so
+// browser bundles (which import them via `chat-client.ts` / `chat.ts`)
+// can pull the types without dragging `ai.ts` into the client graph.
+// Re-exported here so `@trigger.dev/sdk/ai` consumers see them.
+import type { ChatTaskWirePayload, ChatInputChunk } from "./ai-shared.js";
+export type { ChatTaskWirePayload, ChatInputChunk } from "./ai-shared.js";
+
+/**
+ * The payload shape passed to the `chatAgent` run function.
+ *
+ * - `messages` contains model-ready messages (converted via `convertToModelMessages`) —
+ * pass these directly to `streamText`.
+ * - `clientData` contains custom data from the frontend (the `metadata` field from `sendMessage()`).
+ *
+ * The backend accumulates the full conversation history across turns, so the frontend
+ * only needs to send new messages after the first turn.
+ */
+export type ChatTaskPayload = {
+ /** Model-ready messages — pass directly to `streamText({ messages })`. */
+ messages: ModelMessage[];
+
+ /** The unique identifier for the chat session */
+ chatId: string;
+
+ /**
+ * The trigger type:
+ * - `"submit-message"`: A new user message
+ * - `"regenerate-message"`: Regenerate the last assistant response
+ * - `"preload"`: Run was preloaded before the first message (only on turn 0)
+ * - `"action"`: A typed action from the frontend (see `actionSchema` + `onAction`).
+ * The action has already been applied before `run()` fires — check `trigger === "action"`
+ * to short-circuit the LLM call when an action doesn't need a response.
+ * - `"close"`: The chat session is being closed (internal; `run()` is not called).
+ */
+ trigger: "submit-message" | "regenerate-message" | "preload" | "action" | "close";
+
+ /** The ID of the message to regenerate (only for `"regenerate-message"`) */
+ messageId?: string;
+
+ /** Custom data from the frontend (passed via `metadata` on `sendMessage()` or the transport). */
+ clientData?: TClientData;
+
+ /** Whether this run is continuing an existing chat (previous run timed out or was cancelled). False for brand new chats. */
+ continuation: boolean;
+ /** The run ID of the previous run (only set when `continuation` is true). */
+ previousRunId?: string;
+ /** Whether this run was preloaded before the first message. */
+ preloaded: boolean;
+ /**
+ * The friendlyId of the Session primitive backing this chat. Use with
+ * `sessions.open(sessionId)` when you need direct access to the session's
+ * `.in` / `.out` channels outside the hooks the agent already wires for
+ * you. Undefined only for legacy transports that predate the sessions
+ * migration.
+ */
+ sessionId?: string;
+};
+
+/**
+ * Abort signals provided to the `chatAgent` run function.
+ */
+export type ChatTaskSignals = {
+ /** Combined signal — fires on run cancel OR stop generation. Pass to `streamText`. */
+ signal: AbortSignal;
+ /** Fires only when the run is cancelled, expired, or exceeds maxDuration. */
+ cancelSignal: AbortSignal;
+ /** Fires only when the frontend stops generation for this turn (per-turn, reset each turn). */
+ stopSignal: AbortSignal;
+};
+
+/**
+ * The full payload passed to a `chatAgent` run function.
+ * Extends `ChatTaskPayload` (the wire payload) with abort signals.
+ */
+export type ChatTaskRunPayload = ChatTaskPayload &
+ ChatTaskSignals & {
+ /**
+ * Task run context — same object as the `ctx` passed to a standard `task({ run })` handler’s second argument.
+ * Use for tags, metadata, parent run links, or any API that needs the full run record.
+ */
+ ctx: TaskRunContext;
+ /** Token usage from the previous turn. Undefined on turn 0. */
+ previousTurnUsage?: LanguageModelUsage;
+ /** Cumulative token usage across all completed turns so far. */
+ totalUsage: LanguageModelUsage;
+ };
+
+// Input streams for bidirectional chat communication
+//
+// Both `messagesInput` and `stopInput` are thin facades over the current
+// run's Session `.in` channel. The Session carries a single tagged stream
+// (`ChatInputChunk`); these facades filter by `kind` so existing call
+// sites (both internal and exposed via `chat.messages` / `chat.createStopSignal`)
+// keep their original shape. Each accessor resolves the session handle
+// lazily via `getChatSession()` so the module-level references stay
+// compatible with the pre-migration wiring.
+const messagesInput: RealtimeDefinedInputStream = {
+ id: "chat-messages",
+ on(handler) {
+ return getChatSession().in.on((chunk) => {
+ if (chunk.kind === "message") {
+ return handler(chunk.payload);
+ }
+ });
+ },
+ once(options) {
+ const ctx = taskContext.ctx;
+ const runId = ctx?.run.id;
+
+ return new InputStreamOncePromise((resolve, reject) => {
+ tracer
+ .startActiveSpan(
+ options?.spanName ?? `chat.messages.once()`,
+ async () => {
+ while (true) {
+ const result = await getChatSession().in.once(options);
+ if (!result.ok) {
+ resolve(result as InputStreamOnceResult);
+ return;
+ }
+ if (result.output.kind === "message") {
+ resolve({ ok: true, output: result.output.payload });
+ return;
+ }
+ // Non-message chunks (stops) are handled by the stopInput
+ // facade's persistent listener; loop and wait for the next.
+ }
+ },
+ {
+ attributes: {
+ [SemanticInternalAttributes.STYLE_ICON]: "streams",
+ [SemanticInternalAttributes.ENTITY_TYPE]: "input-stream",
+ ...(runId
+ ? {
+ [SemanticInternalAttributes.ENTITY_ID]: `${runId}:chat-messages`,
+ }
+ : {}),
+ streamId: "chat-messages",
+ ...accessoryAttributes({
+ items: [{ text: "chat-messages", variant: "normal" }],
+ style: "codepath",
+ }),
+ },
+ }
+ )
+ .catch(reject);
+ });
+ },
+ peek() {
+ const chunk = getChatSession().in.peek();
+ if (chunk && chunk.kind === "message") return chunk.payload;
+ return undefined;
+ },
+ wait(options) {
+ return new ManualWaitpointPromise(async (resolve, reject) => {
+ try {
+ while (true) {
+ const result = await getChatSession().in.wait(options);
+ if (!result.ok) {
+ resolve(result);
+ return;
+ }
+ if (result.output.kind === "message") {
+ resolve({ ok: true, output: result.output.payload });
+ return;
+ }
+ // Stop chunks are handled by the stopInput facade's persistent
+ // listener; loop back into the suspending wait.
+ }
+ } catch (error) {
+ reject(error);
+ }
+ });
+ },
+ async waitWithIdleTimeout(options) {
+ while (true) {
+ const result = await getChatSession().in.waitWithIdleTimeout(options);
+ if (!result.ok) return result;
+ if (result.output.kind === "message") {
+ return { ok: true, output: result.output.payload };
+ }
+ // Swallow stop-kind chunks — persistent stop listener already handled
+ // the abort; we just loop for the next message.
+ }
+ },
+ async send(_runId, data, options) {
+ // The `runId` argument is kept for signature parity with
+ // `RealtimeDefinedInputStream` but ignored — sessions are addressed
+ // by sessionId, not runId. Callers producing messages from outside
+ // the run should prefer the transport's `session.in.send(...)` path.
+ await getChatSession().in.send(
+ { kind: "message", payload: data } satisfies ChatInputChunk,
+ options?.requestOptions
+ );
+ },
+};
+
+const stopInput: RealtimeDefinedInputStream<{ stop: true; message?: string }> = {
+ id: "chat-stop",
+ on(handler) {
+ return getChatSession().in.on((chunk) => {
+ if (chunk.kind === "stop") {
+ return handler({ stop: true, message: chunk.message });
+ }
+ });
+ },
+ once(options) {
+ const ctx = taskContext.ctx;
+ const runId = ctx?.run.id;
+
+ return new InputStreamOncePromise<{ stop: true; message?: string }>((resolve, reject) => {
+ tracer
+ .startActiveSpan(
+ options?.spanName ?? `chat.stop.once()`,
+ async () => {
+ while (true) {
+ const result = await getChatSession().in.once(options);
+ if (!result.ok) {
+ resolve(result as InputStreamOnceResult<{ stop: true; message?: string }>);
+ return;
+ }
+ if (result.output.kind === "stop") {
+ resolve({
+ ok: true,
+ output: { stop: true, message: result.output.message },
+ });
+ return;
+ }
+ }
+ },
+ {
+ attributes: {
+ [SemanticInternalAttributes.STYLE_ICON]: "streams",
+ [SemanticInternalAttributes.ENTITY_TYPE]: "input-stream",
+ ...(runId
+ ? {
+ [SemanticInternalAttributes.ENTITY_ID]: `${runId}:chat-stop`,
+ }
+ : {}),
+ streamId: "chat-stop",
+ ...accessoryAttributes({
+ items: [{ text: "chat-stop", variant: "normal" }],
+ style: "codepath",
+ }),
+ },
+ }
+ )
+ .catch(reject);
+ });
+ },
+ peek() {
+ const chunk = getChatSession().in.peek();
+ if (chunk && chunk.kind === "stop") {
+ return { stop: true, message: chunk.message };
+ }
+ return undefined;
+ },
+ wait(options) {
+ return new ManualWaitpointPromise<{ stop: true; message?: string }>(async (resolve, reject) => {
+ try {
+ while (true) {
+ const result = await getChatSession().in.wait(options);
+ if (!result.ok) {
+ resolve(result);
+ return;
+ }
+ if (result.output.kind === "stop") {
+ resolve({
+ ok: true,
+ output: { stop: true, message: result.output.message },
+ });
+ return;
+ }
+ }
+ } catch (error) {
+ reject(error);
+ }
+ });
+ },
+ async waitWithIdleTimeout(options) {
+ while (true) {
+ const result = await getChatSession().in.waitWithIdleTimeout(options);
+ if (!result.ok) return result;
+ if (result.output.kind === "stop") {
+ return { ok: true, output: { stop: true, message: result.output.message } };
+ }
+ }
+ },
+ async send(_runId, data, options) {
+ await getChatSession().in.send(
+ { kind: "stop", message: data?.message } satisfies ChatInputChunk,
+ options?.requestOptions
+ );
+ },
+};
+
+/**
+ * Signal received by a `handover-prepare` agent run waiting on
+ * `session.in`. Either the customer's first-turn `streamText` finished
+ * with pending tool calls (`"handover"` — agent picks up from tool
+ * execution), or it finished pure-text (`"handover-skip"` — agent
+ * exits cleanly without making an LLM call).
+ * @internal
+ */
+type HandoverSignal =
+ | {
+ kind: "handover";
+ partialAssistantMessage: ModelMessage[];
+ messageId?: string;
+ /**
+ * Whether the customer's step 1 is the final response. When
+ * true, the agent's turn loop runs hooks but skips the LLM
+ * call (the partial IS the response). When false, the agent
+ * runs `streamText` which executes pending tool-calls via the
+ * approval round and continues from step 2.
+ */
+ isFinal: boolean;
+ }
+ | { kind: "handover-skip" };
+
+/**
+ * Internal facade for waiting on the handover signal. Mirrors
+ * `messagesInput` / `stopInput` so the wait paths and tracing
+ * attributes stay consistent across all input-stream branches.
+ * @internal
+ */
+const handoverInput = {
+ async waitWithIdleTimeout(options: {
+ idleTimeoutInSeconds: number;
+ timeout?: string;
+ spanName?: string;
+ skipSuspend?: boolean;
+ }) {
+ while (true) {
+ const result = await getChatSession().in.waitWithIdleTimeout(options);
+ if (!result.ok) return result;
+ if (
+ result.output.kind === "handover" ||
+ result.output.kind === "handover-skip"
+ ) {
+ return { ok: true as const, output: result.output as HandoverSignal };
+ }
+ // Other kinds (message, stop) are not expected during handover-prepare.
+ // Loop back; the message and stop facades have their own listeners
+ // running so signals on those kinds aren't lost.
+ }
+ },
};
+
+/**
+ * Per-turn deferred promises. Registered via `chat.defer()`, awaited
+ * before `onTurnComplete` fires. Reset each turn.
+ * @internal
+ */
+const chatDeferKey = locals.create>>("chat.defer");
+
+/**
+ * Run-scoped slot holding the partial assistant message handed over by
+ * `chat.handover` from a customer's first-turn `streamText`. Appended
+ * to `accumulatedMessages` during turn 0 setup so `streamText` resumes
+ * at tool execution. Cleared (read once) after consumption.
+ * @internal
+ */
+const chatHandoverPartialKey = locals.create("chat.handoverPartial");
+
+/**
+ * Run-scoped slot holding the assistant `messageId` the customer's
+ * `chat.handover` handler used for its step-1 stream. The agent reuses
+ * it on the agent-side `toUIMessageStream` (and the synthesized
+ * partial UIMessage in `originalMessages`) so all chunks merge into a
+ * single assistant message on the browser side.
+ * @internal
+ */
+const chatHandoverMessageIdKey = locals.create("chat.handoverMessageId");
+
+/**
+ * Run-scoped slot indicating that the customer's step-1 head-start
+ * response is the FINAL turn response. When true, turn 0 runs through
+ * the full turn-loop hooks but SKIPS the `userRun` / `streamText`
+ * call — the customer's partial already IS the response. The agent's
+ * `onTurnComplete` fires with that partial so persistence + any
+ * post-turn work happens normally. Cleared after consumption.
+ * @internal
+ */
+const chatHandoverIsFinalKey = locals.create("chat.handoverIsFinal");
+
+/**
+ * Build a UIMessage representation of a `chat.handover` partial so AI
+ * SDK's `processUIMessageStream` can transition `tool-output-available`
+ * chunks (emitted by the initial-tool-execution branch when the
+ * approval round runs) onto the existing tool-call. Without this,
+ * `state.message.parts` is empty when the agent's `streamText`
+ * finishes, and AI SDK throws
+ * `UIMessageStreamError: No tool invocation found`.
+ *
+ * Only the assistant message matters — the synthesized
+ * `tool-approval-response` rows are AI-SDK-internal and don't need a
+ * UIMessage representation. We map:
+ * - `text` parts → `{ type: "text", text }`
+ * - `tool-call` parts → `{ type: "tool-${name}", toolCallId,
+ * state: "input-available", input }`
+ * - `tool-approval-request` parts → skipped (AI SDK derives the
+ * approval state from chunks during processing)
+ *
+ * @internal
+ */
+function synthesizeHandoverUIMessage(
+ partial: ModelMessage[],
+ messageId?: string
+): UIMessage | undefined {
+ const assistant = partial.find((m) => m.role === "assistant");
+ if (!assistant || typeof assistant.content === "string") return undefined;
+
+ const parts: UIMessage["parts"] = [];
+ for (const part of assistant.content as Array<{
+ type: string;
+ text?: string;
+ toolCallId?: string;
+ toolName?: string;
+ input?: unknown;
+ }>) {
+ if (part.type === "text" && typeof part.text === "string") {
+ parts.push({ type: "text", text: part.text } as UIMessage["parts"][number]);
+ } else if (part.type === "tool-call" && part.toolCallId && part.toolName) {
+ parts.push({
+ type: `tool-${part.toolName}`,
+ toolCallId: part.toolCallId,
+ state: "input-available",
+ input: part.input,
+ } as unknown as UIMessage["parts"][number]);
+ }
+ // tool-approval-request parts intentionally skipped — they're an
+ // AI-SDK protocol detail, not a UI surface.
+ }
+
+ if (parts.length === 0) return undefined;
+
+ // Use the customer's step-1 messageId if provided (so the agent's
+ // post-handover chunks merge into the same assistant message on the
+ // browser). Fall back to a fresh id only if the handover signal
+ // didn't carry one.
+ return {
+ id: messageId ?? generateMessageId(),
+ role: "assistant",
+ parts,
+ } as UIMessage;
+}
+
+/**
+ * Per-turn background context queue. Messages added via `chat.backgroundWork.inject()`
+ * are drained at the next `prepareStep` boundary and appended to the model messages.
+ * @internal
+ */
+const chatBackgroundQueueKey = locals.create("chat.backgroundQueue");
+
+/**
+ * Run-scoped pipe counter. Stored in locals so concurrent runs in the
+ * same worker don't share state.
+ * @internal
+ */
+const chatPipeCountKey = locals.create("chat.pipeCount");
+const chatStopControllerKey = locals.create("chat.stopController");
+/** Static (task-level) UIMessageStream options, set once during chatAgent setup. @internal */
+const chatUIStreamStaticKey = locals.create>(
+ "chat.uiMessageStreamOptions.static"
+);
+/** Per-turn UIMessageStream options, set via chat.setUIMessageStreamOptions(). @internal */
+const chatUIStreamPerTurnKey = locals.create>(
+ "chat.uiMessageStreamOptions.perTurn"
+);
+
+/**
+ * Run-scoped `toolCallId → assistant messageId` map. Records the head
+ * assistant id whenever the accumulator absorbs an assistant message
+ * containing tool parts. Used as a fallback in the id-merge for
+ * incoming tool-answer messages — if the AI SDK regenerates the
+ * assistant id on a HITL `addToolOutput` resume, we look up the
+ * original head id by `toolCallId` and rewrite it before the merge.
+ *
+ * Customer-side workaround for the same case is documented in Arena
+ * AI's chat-agent task; lifting it into the SDK so customers don't
+ * have to. See TRI-9137.
+ * @internal
+ */
+const chatToolCallToMessageIdKey = locals.create