diff --git a/.github/workflows/preview-sdks.yml b/.github/workflows/preview-sdks.yml
index b42d63901..297e51730 100644
--- a/.github/workflows/preview-sdks.yml
+++ b/.github/workflows/preview-sdks.yml
@@ -17,6 +17,11 @@ jobs:
- name: Setup node
uses: actions/setup-node@v3
+ - name: Setup pnpm
+ uses: pnpm/action-setup@v2
+ with:
+ version: 8
+
- name: Download Fern
run: npm install -g fern-api
@@ -30,9 +35,9 @@ jobs:
env:
FERN_TOKEN: ${{ secrets.FERN_TOKEN }}
run: |
- cd fern/apis/api/.preview/fern-typescript-node-sdk
- yarn install
- yarn build
+ cd fern/apis/api/.preview/fern-typescript-sdk
+ pnpm install
+ pnpm build
preview-python:
runs-on: ubuntu-latest
diff --git a/.github/workflows/release-php-sdk.yml b/.github/workflows/release-php-sdk.yml
new file mode 100644
index 000000000..78341822a
--- /dev/null
+++ b/.github/workflows/release-php-sdk.yml
@@ -0,0 +1,25 @@
+name: Release PHP SDK
+
+on:
+ workflow_dispatch:
+ inputs:
+ version:
+ description: "The version of the PHP SDK that you would like to release"
+ required: true
+ type: string
+
+jobs:
+ release:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo
+ uses: actions/checkout@v4
+
+ - name: Download Fern
+ run: npm install -g fern-api
+
+ - name: Release PHP SDK
+ env:
+ FERN_TOKEN: ${{ secrets.FERN_TOKEN }}
+ run: |
+ fern generate --group php-sdk --version ${{ inputs.version }} --log-level debug
diff --git a/.github/workflows/release-swift-sdk.yml b/.github/workflows/release-swift-sdk.yml
new file mode 100644
index 000000000..85cab59b3
--- /dev/null
+++ b/.github/workflows/release-swift-sdk.yml
@@ -0,0 +1,25 @@
+name: Release Swift SDK
+
+on:
+ workflow_dispatch:
+ inputs:
+ version:
+ description: "The version of the Swift SDK that you would like to release"
+ required: true
+ type: string
+
+jobs:
+ release:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo
+ uses: actions/checkout@v4
+
+ - name: Download Fern
+ run: npm install -g fern-api
+
+ - name: Release Swift SDK
+ env:
+ FERN_TOKEN: ${{ secrets.FERN_TOKEN }}
+ run: |
+ fern generate --group swift-sdk --version ${{ inputs.version }} --log-level debug
diff --git a/.github/workflows/update-openapi.yml b/.github/workflows/update-openapi.yml
index 7d83b663d..8dc3484a8 100644
--- a/.github/workflows/update-openapi.yml
+++ b/.github/workflows/update-openapi.yml
@@ -16,9 +16,21 @@ jobs:
with:
token: ${{ secrets.GITHUB_TOKEN }}
- name: Update OpenAPI Spec
- uses: fern-api/sync-openapi@v2
+ id: sync-openapi
+ uses: fern-api/sync-openapi@v4
with:
token: ${{ secrets.GITHUB_TOKEN }}
branch: 'update-openapi-spec'
update_from_source: true
- add_timestamp: true
\ No newline at end of file
+ add_timestamp: true
+ - name: Enable auto-merge
+ env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: |
+ PR_NUMBER=$(gh pr list --json number,headRefName --jq '[.[] | select(.headRefName | startswith("update-openapi-spec"))] | sort_by(.number) | last | .number')
+ if [ -n "$PR_NUMBER" ] && [ "$PR_NUMBER" != "null" ]; then
+ echo "Found PR #$PR_NUMBER, enabling auto-merge"
+ gh pr merge "$PR_NUMBER" --auto --squash
+ else
+ echo "No PR found for branch starting with update-openapi-spec"
+ fi
diff --git a/.gitignore b/.gitignore
index 74231d1ff..a2eb222c9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,4 +3,5 @@
node_modules/
dist/
.env
-.DS_Store
\ No newline at end of file
+.DS_Store
+.tool-versions
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 000000000..4a728d29b
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2025 Vapi
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/README.md b/README.md
index 393cf32a0..f5fdbeae5 100644
--- a/README.md
+++ b/README.md
@@ -27,6 +27,7 @@ You can suggest edits by making a pull request.
To run a local development server with hot-reloading you can run the following command
```sh
+npm install -g fern-api
fern docs dev
```
diff --git a/fern/advanced/sip/sip-chime.mdx b/fern/advanced/sip/sip-chime.mdx
new file mode 100644
index 000000000..4e3ee9941
--- /dev/null
+++ b/fern/advanced/sip/sip-chime.mdx
@@ -0,0 +1,305 @@
+---
+title: Amazon Chime SDK SIP Integration
+subtitle: How to integrate Amazon Chime SDK Voice Connector with Vapi
+slug: advanced/sip/amazon-chime
+---
+
+This guide walks you through setting up both outbound and inbound SIP trunking between Amazon Chime SDK and Vapi using a Voice Connector.
+
+This is a **Voice Connector-only** integration — inbound and outbound calls work with no Lambda functions or custom logic required. Vapi handles the AI assistant entirely. This approach is best for straightforward AI assistants on a phone number where no additional integration is needed.
+
+
+This integration does not support passing custom SIP headers, metadata, or enriched escalation data (e.g., human transfer with SIP header context). For those use cases, use a **SIP Media Application** with **CallAndBridge** instead.
+
+
+## Prerequisites
+
+- An AWS account with access to the [Amazon Chime SDK console](https://console.aws.amazon.com/chime-sdk/)
+- A Vapi account with a private API key
+- AWS CLI configured, or access to the Chime SDK console
+- A phone number provisioned in Amazon Chime SDK (or the ability to order one)
+- A Vapi assistant already created (for inbound calls)
+
+## Outbound calls (Chime SDK to Vapi)
+
+### Chime SDK configuration
+
+
+
+
+
+In the Amazon Chime SDK console, navigate to **Voice Connectors** and create a new one.
+
+Configure the following settings:
+- **Encryption:** Enabled (default)
+- **Network Type:** IPV4_ONLY
+
+
+
+Save the **Outbound host name** from the Voice Connector details — you need it when configuring the Vapi SIP trunk.
+
+
+
+
+
+Navigate to the **Termination** tab of your Voice Connector and enable it.
+
+Add Vapi's static IP addresses to the allowed host list:
+
+
+
+
+
+- `44.229.228.186/32`
+- `44.238.177.138/32`
+
+
+
+
+
+In the **Termination** tab, scroll to the calling plan section and select the countries you want to allow outbound calls to.
+
+
+
+
+
+Still in the **Termination** tab, create a new credential with a username and password. Save these credentials — you need them for the Vapi SIP trunk configuration.
+
+
+
+
+
+
+
+Navigate to the **Phone numbers** tab and click **Assign from inventory** to attach a phone number to this Voice Connector.
+
+
+
+Select the phone number you want to assign and confirm.
+
+
+
+
+If you don't have any phone numbers in your inventory, order them from **Amazon Chime SDK → Phone Number Management → Orders → Provision Phone Numbers**.
+
+
+
+
+
+
+### Vapi configuration
+
+
+
+
+
+Log in to your [Vapi dashboard](https://dashboard.vapi.ai) and retrieve your API key from the **Organization Settings**.
+
+
+
+
+
+Use the following API call to create a SIP trunk credential. Replace the placeholders with your Chime SDK Voice Connector details:
+
+```bash
+curl -X POST https://api.vapi.ai/credential \
+-H "Content-Type: application/json" \
+-H "Authorization: Bearer YOUR_VAPI_API_KEY" \
+-d '{
+ "provider": "byo-sip-trunk",
+ "name": "Chime SDK Trunk",
+ "outboundLeadingPlusEnabled": true,
+ "outboundAuthenticationPlan": {
+ "authUsername": "YOUR_CHIME_CREDENTIAL_USERNAME",
+ "authPassword": "YOUR_CHIME_CREDENTIAL_PASSWORD"
+ },
+ "gateways": [
+ {
+ "ip": "YOUR_CHIME_OUTBOUND_HOSTNAME",
+ "outboundEnabled": true,
+ "outboundProtocol": "tls/srtp",
+ "inboundEnabled": false,
+ "optionsPingEnabled": true
+ }
+ ]
+}'
+```
+
+Note the `id` (credential ID) from the response for the next step.
+
+
+The `outboundProtocol` must be set to `tls/srtp` when encryption is enabled on the Voice Connector (the default).
+
+
+
+
+
+
+Associate your Chime SDK phone number with the Vapi SIP trunk:
+
+```bash
+curl -X POST https://api.vapi.ai/phone-number \
+-H "Content-Type: application/json" \
+-H "Authorization: Bearer YOUR_VAPI_API_KEY" \
+-d '{
+ "provider": "byo-phone-number",
+ "name": "Chime SDK SIP Number",
+ "number": "YOUR_CHIME_PHONE_NUMBER",
+ "numberE164CheckEnabled": true,
+ "credentialId": "YOUR_CREDENTIAL_ID"
+}'
+```
+
+Note the phone number ID from the response for making calls.
+
+
+The phone number must be in E.164 format (e.g., `+18312168445`).
+
+
+
+
+
+
+You can make outbound calls in two ways:
+
+**Using the Vapi Dashboard:**
+
+The phone number appears in your dashboard. Select your assistant and enter the destination number you want to call.
+
+**Using the API:**
+
+```bash
+curl -X POST https://api.vapi.ai/call/phone \
+-H "Content-Type: application/json" \
+-H "Authorization: Bearer YOUR_VAPI_API_KEY" \
+-d '{
+ "assistantId": "YOUR_ASSISTANT_ID",
+ "customer": {
+ "number": "DESTINATION_PHONE_NUMBER",
+ "numberE164CheckEnabled": false
+ },
+ "phoneNumberId": "YOUR_PHONE_NUMBER_ID"
+}'
+```
+
+
+
+
+
+## Inbound calls (Chime SDK to Vapi)
+
+For inbound calls, a caller dials your Chime SDK phone number. The Voice Connector routes the call to Vapi through its origination settings — no Lambda or SIP Media Application required:
+
+```mermaid
+graph LR
+ A[Caller] --> B[Chime Phone Number]
+ B --> C[Voice Connector]
+ C --> D[sip.vapi.ai]
+ D --> E[Vapi AI Assistant]
+```
+
+### Vapi configuration
+
+
+
+
+
+Vapi needs to know which IP addresses are allowed to send SIP traffic to it. Since the Voice Connector originates calls from its regional signaling IPs, you must register those IPs as a BYO SIP trunk credential in Vapi.
+
+Look up the **SIP signaling subnet** for your Voice Connector's region from the [Chime SDK Voice Connector network configuration docs](https://docs.aws.amazon.com/chime-sdk/latest/ag/network-config.html).
+
+Create the credential via the Vapi API:
+
+```bash
+curl -X POST https://api.vapi.ai/credential \
+-H "Content-Type: application/json" \
+-H "Authorization: Bearer YOUR_VAPI_API_KEY" \
+-d '{
+ "provider": "byo-sip-trunk",
+ "name": "Amazon Chime SDK Trunk",
+ "gateways": [
+ {
+ "ip": "YOUR_VOICE_CONNECTOR_SIGNALING_IP",
+ "netmask": 24,
+ "inboundEnabled": true,
+ "outboundEnabled": false,
+ "outboundProtocol": "tls/srtp",
+ "optionsPingEnabled": true
+ }
+ ]
+}'
+```
+
+Replace the `ip` and `netmask` with the values for your Voice Connector's region. For example:
+- **US West (Oregon):** `99.77.253.0` with netmask `24`
+- **US East (N. Virginia):** `3.80.16.0` with netmask `23`
+
+Set `outboundProtocol` to `tls/srtp` if your Voice Connector has encryption enabled (the default), or `udp` if not.
+
+Save the returned `id` — this is your **Credential ID** used in the following steps.
+
+
+
+
+
+Register your Chime SDK phone number in Vapi, linking it to the credential and your assistant:
+
+```bash
+curl -X POST https://api.vapi.ai/phone-number \
+-H "Content-Type: application/json" \
+-H "Authorization: Bearer YOUR_VAPI_API_KEY" \
+-d '{
+ "provider": "byo-phone-number",
+ "name": "Chime SDK Number",
+ "number": "YOUR_CHIME_PHONE_NUMBER",
+ "numberE164CheckEnabled": true,
+ "credentialId": "YOUR_CREDENTIAL_ID",
+ "assistantId": "YOUR_ASSISTANT_ID"
+}'
+```
+
+
+The `number` field must exactly match the E.164 phone number assigned to your Voice Connector (e.g., `+18312168445`). Inbound calls will fail to route if the numbers don't match.
+
+
+
+
+
+
+### Chime SDK configuration
+
+
+
+
+
+Navigate to your Voice Connector's **Origination** tab and set **Origination status** to **Enabled**.
+
+
+
+Click **New** to add an inbound route pointing to Vapi's SIP server:
+
+- **Host:** `YOUR_CREDENTIAL_ID.sip.vapi.ai`
+- **Port:** `5061` (for encrypted connections)
+- **Protocol:** TCP
+
+
+
+
+
+
+
+Call your Chime SDK phone number from any phone. The call routes through the Voice Connector's origination settings to `sip.vapi.ai`, where your Vapi assistant answers.
+
+To debug issues, enable **SIP logging** on the Voice Connector (under the **Logging** tab) for detailed SIP message traces.
+
+
+
+
+
+## Next steps
+
+Now that you have Amazon Chime SDK SIP trunking configured:
+
+- **[SIP trunking overview](/advanced/sip/sip-trunk):** Learn more about SIP trunk concepts and configuration options.
+- **[Networking and firewall](/advanced/sip/sip-networking):** Review network requirements and firewall rules.
+- **[Troubleshoot SIP trunk credential errors](/advanced/sip/troubleshoot-sip-trunk-credential-errors):** Debug common SIP integration issues.
diff --git a/fern/advanced/sip/sip-networking.mdx b/fern/advanced/sip/sip-networking.mdx
new file mode 100644
index 000000000..13f1208e3
--- /dev/null
+++ b/fern/advanced/sip/sip-networking.mdx
@@ -0,0 +1,134 @@
+---
+title: SIP networking and firewall configuration
+subtitle: Learn to configure your network to allow SIP signalling and media traffic with Vapi
+slug: advanced/sip/sip-networking
+---
+
+## Overview
+
+When you integrate a SIP trunk with Vapi, your firewall and network infrastructure must allow SIP signalling and media (RTP) traffic to flow between your environment and Vapi's SIP servers. This page provides the complete set of IP addresses, ports, and protocols you need to configure.
+
+**In this reference, you'll find:**
+
+- All IP addresses and ports used by Vapi for SIP signalling
+- RTP media port ranges, directionality details, and dynamic IP behavior
+- Recommended firewall rules for inbound and outbound traffic
+
+
+ These networking details apply to **all** SIP trunk integrations with Vapi, regardless of your SIP provider. For provider-specific setup instructions, see the [SIP trunking](/advanced/sip/sip-trunk) guide.
+
+
+## Quick reference
+
+The table below summarizes every IP address, port, and protocol you need to allowlist.
+
+| Traffic type | IP addresses | Ports | Protocol | Direction |
+| --- | --- | --- | --- | --- |
+| SIP signalling | `44.229.228.186`, `44.238.177.138` | `5060` | UDP | Bidirectional |
+| SIP signalling (TLS) | `44.229.228.186`, `44.238.177.138` | `5061` | TLS | Bidirectional |
+| RTP media | No static IPs (dynamic) | `40000`-`60000` | UDP | Bidirectional |
+
+You can also use the DNS hostname `sip.vapi.ai`, which resolves to the SIP signalling IP addresses listed above.
+
+## SIP signalling
+
+Vapi's SIP infrastructure uses two static IP addresses for all signalling traffic:
+
+- `44.229.228.186/32`
+- `44.238.177.138/32`
+
+These are the public IPs of Vapi's SBC (Session Border Controller) nodes. All SIP `INVITE`, `REGISTER`, `BYE`, and other signalling messages originate from and are received at these addresses.
+
+### Ports
+
+| Port | Protocol | Use case |
+| --- | --- | --- |
+| **5060** | UDP | Default SIP signalling |
+| **5061** | TLS | SIP over TLS (SIPS) for encrypted signalling |
+
+Use port **5060** unless your provider or security requirements mandate encrypted signalling, in which case use port **5061** with TLS.
+
+### DNS resolution
+
+The hostname `sip.vapi.ai` resolves to both signalling IP addresses. You can configure your SIP client or PBX to point to `sip.vapi.ai` instead of using the IP addresses directly.
+
+
+ If your firewall rules are IP-based, allowlist both IP addresses explicitly rather than relying on DNS resolution. DNS-based rules may not update immediately if the resolution changes.
+
+
+## SIP media (RTP)
+
+Vapi does not use static IP addresses for RTP media (voice audio). The media source IP addresses are dynamically assigned and may change between calls. Because of this, you should not rely on allowlisting specific IPs for RTP media traffic.
+
+
+ Unlike SIP signalling, RTP media does **not** originate from a fixed set of IP addresses. Your firewall rules for RTP media should allow traffic based on port ranges rather than specific source IPs.
+
+
+### Port range
+
+Vapi uses **UDP ports 40000 through 60000** for RTP media traffic.
+
+| Setting | Value |
+| --- | --- |
+| Local RTP port range | `40000`-`60000` (UDP) |
+| Direction | Bidirectional |
+
+- **Inbound RTP**: Vapi listens on ports `40000`-`60000` for incoming media packets.
+- **Outbound RTP**: Vapi sends media from ports in the `40000`-`60000` range. The destination IP and port are determined by the remote SDP offer/answer, so Vapi can send to any IP and port your provider advertises.
+
+
+ Vapi does not restrict the remote RTP port range. Your provider may use any port for its RTP traffic. The `40000`-`60000` range applies only to Vapi's local ports.
+
+
+## Firewall rules
+
+Configure your firewall to allow the following traffic. Both SIP signalling IP addresses must be allowlisted, as Vapi may use either one for any given call. For RTP media, allow traffic on the full port range without IP restrictions since Vapi uses dynamic IPs for media.
+
+### Inbound rules (traffic from Vapi to your network)
+
+Allow these if your SIP provider or PBX needs to receive traffic from Vapi:
+
+| Rule | Source IP | Destination | Port(s) | Protocol |
+| --- | --- | --- | --- | --- |
+| SIP signalling | `44.229.228.186`, `44.238.177.138` | Your SIP server | `5060` | UDP |
+| SIP signalling (TLS) | `44.229.228.186`, `44.238.177.138` | Your SIP server | `5061` | TLS |
+| RTP media | Any (dynamic) | Your media server | `40000`-`60000` | UDP |
+
+### Outbound rules (traffic from your network to Vapi)
+
+Allow these if your firewall restricts outbound connections:
+
+| Rule | Source | Destination IP | Port(s) | Protocol |
+| --- | --- | --- | --- | --- |
+| SIP signalling | Your SIP server | `44.229.228.186`, `44.238.177.138` | `5060` | UDP |
+| SIP signalling (TLS) | Your SIP server | `44.229.228.186`, `44.238.177.138` | `5061` | TLS |
+| RTP media | Your media server | Any (dynamic) | `40000`-`60000` | UDP |
+
+
+ Both SIP signalling IP addresses must be allowed in your firewall rules. Vapi may use either address for signalling on any given call. Missing one address can cause intermittent call failures. For RTP media, since Vapi uses dynamic IPs, configure your firewall to allow the full port range (`40000`-`60000` UDP) without restricting by source or destination IP.
+
+
+## FAQ
+
+
+
+ The hostname `sip.vapi.ai` resolves to both Vapi SIP signalling IP addresses. However, if your firewall supports only IP-based rules, add both `44.229.228.186` and `44.238.177.138` explicitly for signalling. DNS-based firewall rules depend on TTL and caching behavior, which can lead to gaps during DNS updates. Note that this DNS hostname applies to SIP signalling only; RTP media uses dynamic IPs that cannot be resolved via DNS.
+
+
+ Yes. Vapi's RTP stack dynamically allocates ports within this range for each call. You cannot predict which specific port a given call will use, so the entire range must be open for reliable media flow.
+
+
+ No. Vapi's SIP signalling uses the static IP addresses `44.229.228.186` and `44.238.177.138`, but RTP media does not use static IP addresses. Media source IPs are dynamically assigned and may vary between calls.
+
+
+ Vapi supports TLS for SIP signalling on port 5061. For encrypted media (SRTP), configure your SIP trunk gateway with the `tls/srtp` outbound protocol option. See the [gateway configuration reference](/advanced/sip/troubleshoot-sip-trunk-credential-errors#gateway-configuration-reference) for details.
+
+
+
+## Next steps
+
+Now that you have your network configured for Vapi SIP traffic:
+
+- **Set up a SIP trunk:** Follow the [SIP trunking](/advanced/sip/sip-trunk) guide to create your trunk credential and phone number
+- **Configure a provider:** Set up with [Twilio](/advanced/sip/twilio), [Telnyx](/advanced/sip/telnyx), [Plivo](/advanced/sip/plivo), or [Zadarma](/advanced/sip/zadarma)
+- **Troubleshoot errors:** Resolve gateway issues with the [SIP trunk credential troubleshooting](/advanced/sip/troubleshoot-sip-trunk-credential-errors) guide
diff --git a/fern/advanced/sip/sip-telnyx.mdx b/fern/advanced/sip/sip-telnyx.mdx
index 693f22e6d..de8062642 100644
--- a/fern/advanced/sip/sip-telnyx.mdx
+++ b/fern/advanced/sip/sip-telnyx.mdx
@@ -85,6 +85,9 @@ Integrate your Telnyx SIP trunk with Vapi to enable your AI voice assistants to
Use the Vapi API to create a SIP trunk credential:
+
+ Use IP addresses in `gateways`. FQDNs like `sip.telnyx.com` return a `400 Bad Request`.
+
```bash
curl -X POST https://api.vapi.ai/credential \
-H "Content-Type: application/json" \
@@ -94,20 +97,26 @@ Integrate your Telnyx SIP trunk with Vapi to enable your AI voice assistants to
"name": "Telnyx Trunk",
"gateways": [
{
- "ip": "sip.telnyx.com",
- "inboundEnabled": false
+ "ip": "192.76.120.10",
+ "inboundEnabled": true
+ },
+ {
+ "ip": "64.16.250.10",
+ "inboundEnabled": true
}
],
"outboundAuthenticationPlan": {
"authUsername": "YOUR_SIP_USERNAME",
"authPassword": "YOUR_SIP_PASSWORD",
"sipRegisterPlan": {
- "realm": "sip.telnyx.com"
- }
+ "realm": "sip.telnyx.com"
+ }
}
}'
```
Replace `YOUR_VAPI_PRIVATE_KEY`, `YOUR_SIP_USERNAME`, and `YOUR_SIP_PASSWORD` with your actual credentials.
+ Replace the gateway IPs with the Telnyx gateway IPs assigned to your trunk.
+ Set `inboundEnabled` to `false` if you only need outbound calls.
If successful, the response will include an `id` for the created credential, which you'll use in the next step.
diff --git a/fern/advanced/sip/sip-trunk.mdx b/fern/advanced/sip/sip-trunk.mdx
index 0e40db6fb..e119ac6f3 100644
--- a/fern/advanced/sip/sip-trunk.mdx
+++ b/fern/advanced/sip/sip-trunk.mdx
@@ -13,7 +13,7 @@ To allow SIP signaling and media between Vapi and your SIP provider, you must al
- 44.229.228.186/32
- 44.238.177.138/32
-These IPs are used exclusively for SIP traffic.
+These IPs are used exclusively for SIP traffic. For the complete list of ports, RTP ranges, and firewall configuration details, see the [networking and firewall](/advanced/sip/sip-networking) reference.
We generally don't recommend IP-based authentication for SIP trunks as it can lead to routing issues. Since our servers are shared by many customers, if your telephony provider has multiple customers using IP-based authentication, calls may be routed incorrectly. IP-based authentication works reliably only when your SIP provider offers a unique termination URI or a dedicated SIP server for each customer, as is the case with Plivo and Twilio integrations.
diff --git a/fern/advanced/sip/sip-twilio.mdx b/fern/advanced/sip/sip-twilio.mdx
index 0a7136fbe..98c3f1ea6 100644
--- a/fern/advanced/sip/sip-twilio.mdx
+++ b/fern/advanced/sip/sip-twilio.mdx
@@ -137,7 +137,7 @@ This guide walks you through setting up both outbound and inbound SIP trunking b

- Add your Vapi SIP URI in the following format: `sip:YOUR_PHONE_NUMBER@sip.vapi.ai`, where "YOUR_PHONE_NUMBER" is your chosen SIP number that you will attach to this trunk.
+ Add your Vapi SIP URI in the following format: `sip:YOUR_PHONE_NUMBER@.sip.vapi.ai`, where "YOUR_PHONE_NUMBER" is your chosen SIP number that you will attach to this trunk.

diff --git a/fern/advanced/sip/troubleshoot-sip-trunk-credential-errors.mdx b/fern/advanced/sip/troubleshoot-sip-trunk-credential-errors.mdx
new file mode 100644
index 000000000..a3134046b
--- /dev/null
+++ b/fern/advanced/sip/troubleshoot-sip-trunk-credential-errors.mdx
@@ -0,0 +1,207 @@
+---
+title: "Troubleshoot SIP trunk credential errors"
+subtitle: "Learn to resolve gateway creation failures when setting up a BYO SIP trunk"
+slug: advanced/sip/troubleshoot-sip-trunk-credential-errors
+---
+
+## Overview
+
+This guide helps you resolve the `Couldn't validate SIP trunk credential. SIP gateway creation failed.` error when creating a BYO SIP trunk credential in Vapi.
+
+This error occurs during the gateway creation step of SIP trunk provisioning. Vapi's SBC (Session Border Controller) rejects the gateway configuration you provided. The sections below cover the most common causes and how to fix each one.
+
+**In this guide, you'll learn to:**
+
+- Identify the three most common causes of SIP trunk credential validation failures
+- Understand when hostnames work (outbound) and when they don't (inbound)
+- Resolve hostname-vs-IP, inbound-flag, and IP-allowlist issues
+- Verify your gateway configuration against the full parameter reference
+
+
+ This guide focuses on the specific `SIP gateway creation failed` error. For
+ general SIP trunk setup instructions, see the
+ [SIP trunking](/advanced/sip/sip-trunk) page.
+
+
+## Prerequisites
+
+Before you start troubleshooting, ensure you have:
+
+- A Vapi account with API access
+- Your SIP provider's server address, username, and password
+- Access to your SIP provider's admin panel (to check IP whitelisting)
+
+## Using a hostname for an inbound gateway
+
+This is the most common cause of this error.
+
+### What happens
+
+The `gateways[].ip` field accepts both hostnames (for example, `sip.example.com`) and IPv4 addresses (for example, `203.0.113.10`). However, the behavior differs depending on the call direction:
+
+- **Outbound gateways** — Hostnames and IPv4 addresses both work. Vapi resolves the hostname when routing outbound calls to your SIP provider.
+- **Inbound gateways** — Only IPv4 addresses work. The SBC needs a numeric IP address to match incoming SIP requests to your trunk. When you provide a hostname with `inboundEnabled: true`, the SBC rejects the gateway configuration.
+
+### How to check
+
+Look at your gateway configuration. If `inboundEnabled` is `true` (or omitted, since it defaults to `true`) and the `ip` field contains a hostname (for example, `sip.example.com`), this is the cause of the error.
+
+### How to fix
+
+You have two options depending on whether you need inbound calling:
+
+**If you need inbound calling**, resolve the hostname to its IPv4 address:
+
+
+
+
+Run one of the following commands to resolve your SIP provider's hostname to an IPv4 address:
+
+```bash title="Terminal"
+dig +short sip.example.com A
+```
+
+```bash title="Terminal (alternative)"
+nslookup sip.example.com
+```
+
+This returns one or more IPv4 addresses, for example `203.0.113.10`.
+
+
+
+
+
+Replace the hostname with the numeric IPv4 address in your gateway configuration:
+
+```json title="Gateway configuration"
+{
+ "provider": "byo-sip-trunk",
+ "name": "my sip trunk",
+ "gateways": [
+ {
+ "ip": "203.0.113.10",
+ "port": 5060,
+ "outboundEnabled": true,
+ "inboundEnabled": true
+ }
+ ]
+}
+```
+
+
+
+
+
+ If your provider's IP address changes, you need to update the gateway
+ configuration with the new address.
+
+
+**If you only need outbound calling**, you can keep the hostname and disable inbound:
+
+```json title="Gateway configuration"
+{
+ "provider": "byo-sip-trunk",
+ "name": "my sip trunk",
+ "gateways": [
+ {
+ "ip": "sip.example.com",
+ "port": 5060,
+ "outboundEnabled": true,
+ "inboundEnabled": false
+ }
+ ]
+}
+```
+
+## Inbound enabled on an outbound-only trunk
+
+### What happens
+
+The `inboundEnabled` gateway option defaults to `true`. If your SIP trunk is outbound-only (you only make calls through it, you do not receive inbound calls through Vapi), having inbound enabled can cause gateway creation to fail with some providers.
+
+### How to check
+
+Look at your API request. If you did not set `inboundEnabled` explicitly, it defaulted to `true`. If you only need outbound calling, this is likely the problem.
+
+### How to fix
+
+Set `inboundEnabled` to `false` in your gateway configuration:
+
+```json title="Gateway configuration"
+{
+ "provider": "byo-sip-trunk",
+ "name": "my sip trunk",
+ "gateways": [
+ {
+ "ip": "203.0.113.10",
+ "port": 5060,
+ "outboundEnabled": true,
+ "inboundEnabled": false
+ }
+ ]
+}
+```
+
+
+ If you are using the Vapi dashboard, uncheck the **Inbound** option when
+ configuring the gateway.
+
+
+## Carrier IP allowlist not configured
+
+### What happens
+
+Your SIP provider needs to allow traffic from Vapi's SBC IP addresses. If these IPs are not on the allowlist, the SBC's registration and signaling requests to your provider are blocked, and gateway creation fails.
+
+### How to check
+
+Ask your SIP provider whether the following IP addresses are on their allowlist:
+
+- `44.229.228.186/32`
+- `44.238.177.138/32`
+
+### How to fix
+
+Ask your SIP provider to add both Vapi SBC IP addresses to their allowlist:
+
+| IP address | Netmask |
+| ------------------- | ------- |
+| `44.229.228.186` | `/32` |
+| `44.238.177.138` | `/32` |
+
+
+ Both addresses must be allowed. Vapi may use either one for signaling, so
+ missing one can cause intermittent failures.
+
+
+## Gateway configuration reference
+
+The table below lists all available options for each entry in the `gateways` array.
+
+| Option | Type | Default | Description |
+| -------------------- | ------- | --------- | ------------------------------------------------------------------------------ |
+| `ip` | string | (required)| IPv4 address or hostname of your SIP gateway. Hostnames work for outbound-only gateways. Inbound gateways require a numeric IPv4 address.|
+| `port` | number | `5060` | SIP signaling port. |
+| `netmask` | number | `32` | Subnet mask for inbound IP matching. Valid range: 24 to 32. |
+| `inboundEnabled` | boolean | `true` | Whether this gateway accepts inbound calls. Set to `false` for outbound-only trunks. |
+| `outboundEnabled` | boolean | `true` | Whether outbound calls route through this gateway. |
+| `outboundProtocol` | string | `"udp"` | Signaling protocol. Options: `udp`, `tcp`, `tls`, `tls/srtp`. |
+| `optionsPingEnabled` | boolean | `false` | Whether to send SIP OPTIONS pings to check if the gateway is reachable. |
+
+## If the error persists
+
+If none of the above resolves your issue, gather the following information and contact Vapi support:
+
+- Your organization ID
+- The exact error message you received
+- The full request payload you sent (redact the password)
+- Your SIP provider name and server address
+- Whether you are setting up for inbound calls, outbound calls, or both
+
+## Next steps
+
+Now that you can troubleshoot SIP trunk credential errors:
+
+- **Review SIP trunk setup:** Follow the complete [SIP trunking](/advanced/sip/sip-trunk) guide to verify your configuration end-to-end
+- **Configure a provider:** Set up your SIP trunk with a specific provider such as [Twilio](/advanced/sip/sip-twilio), [Telnyx](/advanced/sip/sip-telnyx), [Zadarma](/advanced/sip/sip-zadarma), or [Plivo](/advanced/sip/sip-plivo)
+- **Learn about SIP telephony:** Explore the [SIP telephony](/advanced/sip/sip) overview for broader SIP integration options
diff --git a/fern/apis/api/generators.yml b/fern/apis/api/generators.yml
index 4a3b5c409..d8751add0 100644
--- a/fern/apis/api/generators.yml
+++ b/fern/apis/api/generators.yml
@@ -1,3 +1,4 @@
+
api:
specs:
- openapi: ./openapi.json
@@ -11,13 +12,13 @@ groups:
python-sdk:
generators:
- name: fernapi/fern-python-sdk
- version: 4.23.2
+ version: 5.3.3
api:
settings:
- unions: v1
+ prefer-undiscriminated-unions-with-literals: true
output:
location: pypi
- package-name: "vapi_server_sdk"
+ package-name: vapi_server_sdk
token: ${PYPI_TOKEN}
github:
repository: VapiAI/server-sdk-python
@@ -25,17 +26,18 @@ groups:
pydantic_config:
skip_validation: true
client_class_name: Vapi
+ smart-casing: false
ts-sdk:
generators:
- - name: fernapi/fern-typescript-node-sdk
- version: 2.1.0
+ - name: fernapi/fern-typescript-sdk
+ version: 3.63.0
api:
settings:
- unions: v1
+ prefer-undiscriminated-unions-with-literals: true
output:
location: npm
- package-name: "@vapi-ai/server-sdk"
- token: ${NPM_TOKEN}
+ package-name: '@vapi-ai/server-sdk'
+ token: OIDC
github:
repository: VapiAI/server-sdk-typescript
config:
@@ -45,54 +47,39 @@ groups:
includeApiReference: true
noSerdeLayer: true
omitUndefined: true
+ enableInlineTypes: false
smart-casing: true
- java-sdk:
- generators:
- - name: fernapi/fern-java-sdk
- version: 2.38.6
- disable-examples: true
- output:
- location: maven
- coordinate: dev.vapi:server-sdk
- username: ${MAVEN_USERNAME}
- password: ${MAVEN_PASSWORD}
- signature:
- keyId: ${MAVEN_CENTRAL_SECRET_KEY_KEY_ID}
- password: ${MAVEN_CENTRAL_SECRET_KEY_PASSWORD}
- secretKey: ${MAVEN_CENTRAL_SECRET_KEY}
- github:
- repository: VapiAI/server-sdk-java
- config:
- client-class-name: Vapi
go-sdk:
generators:
- name: fernapi/fern-go-sdk
- version: 1.4.0
+ version: 1.33.5
disable-examples: true
api:
settings:
- unions: v1
+ prefer-undiscriminated-unions-with-literals: true
github:
repository: VapiAI/server-sdk-go
config:
union: v1
+ smart-casing: false
ruby-sdk:
generators:
- name: fernapi/fern-ruby-sdk
- version: 0.8.2
+ version: 1.1.13
disable-examples: true
github:
repository: VapiAI/server-sdk-ruby
output:
location: rubygems
- package-name: vapi-server-sdk
- api-key: ${RUBYGEMS_API_KEY}
+ package-name: vapi_server_sdk
config:
clientClassName: Vapi
+ rubocopVariableNumberStyle: disabled
+ smart-casing: false
csharp-sdk:
generators:
- name: fernapi/fern-csharp-sdk
- version: 2.0.2
+ version: 2.58.0
disable-examples: true
github:
repository: VapiAI/server-sdk-csharp
@@ -109,3 +96,26 @@ groups:
inline-path-parameters: false
simplify-object-dictionaries: true
use-discriminated-unions: false
+ smart-casing: false
+ php-sdk:
+ generators:
+ - name: fernapi/fern-php-sdk
+ version: 2.4.0
+ github:
+ repository: VapiAI/server-sdk-php
+ config:
+ namespace: Vapi
+ client-class-name: VapiClient
+ smart-casing: false
+ swift-sdk:
+ generators:
+ - name: fernapi/fern-swift-sdk
+ version: 0.31.0
+ disable-examples: true
+ github:
+ repository: VapiAI/server-sdk-swift
+ config:
+ clientClassName: VapiClient
+ moduleName: Vapi
+ environmentEnumName: VapiEnvironment
+ smart-casing: false
diff --git a/fern/apis/api/openapi-overrides.yml b/fern/apis/api/openapi-overrides.yml
index 598d91ed1..32dbd8d2a 100644
--- a/fern/apis/api/openapi-overrides.yml
+++ b/fern/apis/api/openapi-overrides.yml
@@ -196,23 +196,28 @@ paths:
get:
x-fern-sdk-group-name:
- knowledgeBases
+ x-fern-ignore: true
x-fern-sdk-method-name: list
post:
x-fern-sdk-group-name:
- knowledgeBases
+ x-fern-ignore: true
x-fern-sdk-method-name: create
/knowledge-base/{id}:
get:
x-fern-sdk-group-name:
- knowledgeBases
+ x-fern-ignore: true
x-fern-sdk-method-name: get
delete:
x-fern-sdk-group-name:
- knowledgeBases
+ x-fern-ignore: true
x-fern-sdk-method-name: delete
patch:
x-fern-sdk-group-name:
- knowledgeBases
+ x-fern-ignore: true
x-fern-sdk-method-name: update
/analytics:
post:
@@ -224,6 +229,7 @@ paths:
/logs:
get:
x-fern-pagination: true
+ x-fern-ignore: true
x-fern-sdk-group-name:
- logs
x-fern-sdk-method-name: get
@@ -233,8 +239,6 @@ components:
properties:
serverMessages:
items:
- enum:
- - "transcript[transcriptType='final']"
x-fern-enum:
"transcript[transcriptType='final']":
name: FinalTranscript
@@ -242,8 +246,6 @@ components:
properties:
serverMessages:
items:
- enum:
- - "transcript[transcriptType='final']"
x-fern-enum:
"transcript[transcriptType='final']":
name: FinalTranscript
@@ -251,8 +253,6 @@ components:
properties:
serverMessages:
items:
- enum:
- - "transcript[transcriptType='final']"
x-fern-enum:
"transcript[transcriptType='final']":
name: FinalTranscript
@@ -260,8 +260,6 @@ components:
properties:
serverMessages:
items:
- enum:
- - "transcript[transcriptType='final']"
x-fern-enum:
"transcript[transcriptType='final']":
name: FinalTranscript
@@ -269,8 +267,6 @@ components:
title: ClientMessageTranscript
properties:
type:
- enum:
- - "transcript[transcriptType='final']"
x-fern-enum:
"transcript[transcriptType='final']":
name: FinalTranscript
@@ -278,8 +274,6 @@ components:
title: ServerMessageTranscript
properties:
type:
- enum:
- - "transcript[transcriptType='final']"
x-fern-enum:
"transcript[transcriptType='final']":
name: FinalTranscript
@@ -287,8 +281,6 @@ components:
properties:
serverMessages:
items:
- enum:
- - "transcript[transcriptType='final']"
x-fern-enum:
"transcript[transcriptType='final']":
name: FinalTranscript
@@ -426,6 +418,14 @@ components:
name: Asterisk
"#":
name: Hash
+ items:
+ x-fern-enum:
+ "":
+ name: Empty
+ "*":
+ name: Asterisk
+ "#":
+ name: Hash
NeuphonicVoice:
title: NeuphonicVoice
properties:
@@ -581,6 +581,7 @@ components:
DeepgramTranscriberModel:
type: string
enum:
+ - "flux-general-en"
- "nova-3"
- "nova-3-general"
- "nova-3-medical"
@@ -612,7 +613,10 @@ components:
- "base-conversationalai"
- "base-voicemail"
- "base-video"
+ - "whisper"
x-fern-enum:
+ "flux-general-en":
+ name: fluxGeneralEn
"nova-3":
name: nova3
"nova-3-general":
@@ -675,6 +679,8 @@ components:
name: baseVoicemail
"base-video":
name: baseVideo
+ "whisper":
+ name: whisper
FallbackDeepgramTranscriber:
title: FallbackDeepgramTranscriber
properties:
@@ -785,6 +791,7 @@ components:
FallbackDeepgramTranscriberModel:
type: string
enum:
+ - "flux-general-en"
- "nova-3"
- "nova-3-general"
- "nova-3-medical"
@@ -816,7 +823,10 @@ components:
- "base-conversationalai"
- "base-voicemail"
- "base-video"
+ - "whisper"
x-fern-enum:
+ "flux-general-en":
+ name: fluxGeneralEn
"nova-3":
name: nova3
"nova-3-general":
@@ -879,6 +889,8 @@ components:
name: baseVoicemail
"base-video":
name: baseVideo
+ "whisper":
+ name: whisper
TransferDestinationAssistant:
title: TransferDestinationAssistant
properties:
diff --git a/fern/apis/api/openapi.json b/fern/apis/api/openapi.json
index 0ac803577..7748d5731 100644
--- a/fern/apis/api/openapi.json
+++ b/fern/apis/api/openapi.json
@@ -1,17 +1,17 @@
{
"openapi": "3.0.0",
"paths": {
- "/call": {
+ "/assistant": {
"post": {
- "operationId": "CallController_create",
- "summary": "Create Call",
+ "operationId": "AssistantController_create",
+ "summary": "Create Assistant",
"parameters": [],
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/CreateCallDTO"
+ "$ref": "#/components/schemas/CreateAssistantDTO"
}
}
}
@@ -22,21 +22,14 @@
"content": {
"application/json": {
"schema": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/Call"
- },
- {
- "$ref": "#/components/schemas/CallBatchResponse"
- }
- ]
+ "$ref": "#/components/schemas/Assistant"
}
}
}
}
},
"tags": [
- "Calls"
+ "Assistants"
],
"security": [
{
@@ -45,36 +38,9 @@
]
},
"get": {
- "operationId": "CallController_findAll",
- "summary": "List Calls",
+ "operationId": "AssistantController_findAll",
+ "summary": "List Assistants",
"parameters": [
- {
- "name": "id",
- "required": false,
- "in": "query",
- "description": "This is the unique identifier for the call.",
- "schema": {
- "type": "string"
- }
- },
- {
- "name": "assistantId",
- "required": false,
- "in": "query",
- "description": "This will return calls with the specified assistantId.",
- "schema": {
- "type": "string"
- }
- },
- {
- "name": "phoneNumberId",
- "required": false,
- "in": "query",
- "description": "This is the phone number that will be used for the call. To use a transient number, use `phoneNumber` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type.",
- "schema": {
- "type": "string"
- }
- },
{
"name": "limit",
"required": false,
@@ -175,7 +141,7 @@
"schema": {
"type": "array",
"items": {
- "$ref": "#/components/schemas/Call"
+ "$ref": "#/components/schemas/Assistant"
}
}
}
@@ -183,7 +149,7 @@
}
},
"tags": [
- "Calls"
+ "Assistants"
],
"security": [
{
@@ -192,10 +158,10 @@
]
}
},
- "/call/{id}": {
+ "/assistant/{id}": {
"get": {
- "operationId": "CallController_findOne",
- "summary": "Get Call",
+ "operationId": "AssistantController_findOne",
+ "summary": "Get Assistant",
"parameters": [
{
"name": "id",
@@ -212,14 +178,14 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Call"
+ "$ref": "#/components/schemas/Assistant"
}
}
}
}
},
"tags": [
- "Calls"
+ "Assistants"
],
"security": [
{
@@ -228,8 +194,8 @@
]
},
"patch": {
- "operationId": "CallController_update",
- "summary": "Update Call",
+ "operationId": "AssistantController_update",
+ "summary": "Update Assistant",
"parameters": [
{
"name": "id",
@@ -245,7 +211,7 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/UpdateCallDTO"
+ "$ref": "#/components/schemas/UpdateAssistantDTO"
}
}
}
@@ -256,14 +222,14 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Call"
+ "$ref": "#/components/schemas/Assistant"
}
}
}
}
},
"tags": [
- "Calls"
+ "Assistants"
],
"security": [
{
@@ -272,8 +238,8 @@
]
},
"delete": {
- "operationId": "CallController_deleteCallData",
- "summary": "Delete Call Data",
+ "operationId": "AssistantController_remove",
+ "summary": "Delete Assistant",
"parameters": [
{
"name": "id",
@@ -290,14 +256,14 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Call"
+ "$ref": "#/components/schemas/Assistant"
}
}
}
}
},
"tags": [
- "Calls"
+ "Assistants"
],
"security": [
{
@@ -306,61 +272,46 @@
]
}
},
- "/chat": {
- "get": {
- "operationId": "ChatController_listChats",
- "summary": "List Chats",
- "parameters": [
- {
- "name": "assistantId",
- "required": false,
- "in": "query",
- "description": "This is the unique identifier for the assistant that will be used for the chat.",
- "schema": {
- "type": "string"
- }
- },
- {
- "name": "workflowId",
- "required": false,
- "in": "query",
- "description": "This is the unique identifier for the workflow that will be used for the chat.",
- "schema": {
- "type": "string"
- }
- },
- {
- "name": "sessionId",
- "required": false,
- "in": "query",
- "description": "This is the unique identifier for the session that will be used for the chat.",
- "schema": {
- "type": "string"
+ "/squad": {
+ "post": {
+ "operationId": "SquadController_create",
+ "summary": "Create Squad",
+ "parameters": [],
+ "requestBody": {
+ "required": true,
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/CreateSquadDTO"
+ }
}
- },
- {
- "name": "page",
- "required": false,
- "in": "query",
- "description": "This is the page number to return. Defaults to 1.",
- "schema": {
- "minimum": 1,
- "type": "number"
+ }
+ },
+ "responses": {
+ "201": {
+ "description": "",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/Squad"
+ }
+ }
}
- },
+ }
+ },
+ "tags": [
+ "Squads"
+ ],
+ "security": [
{
- "name": "sortOrder",
- "required": false,
- "in": "query",
- "description": "This is the sort order for pagination. Defaults to 'DESC'.",
- "schema": {
- "enum": [
- "ASC",
- "DESC"
- ],
- "type": "string"
- }
- },
+ "bearer": []
+ }
+ ]
+ },
+ "get": {
+ "operationId": "SquadController_findAll",
+ "summary": "List Squads",
+ "parameters": [
{
"name": "limit",
"required": false,
@@ -459,79 +410,63 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/ChatPaginatedResponse"
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/Squad"
+ }
}
}
}
}
},
"tags": [
- "Chats"
+ "Squads"
],
"security": [
{
"bearer": []
}
]
- },
- "post": {
- "operationId": "ChatController_createChat",
- "summary": "Create Chat",
- "description": "Creates a new chat. Requires at least one of: assistantId/assistant, sessionId, or previousChatId. Note: sessionId and previousChatId are mutually exclusive.",
- "parameters": [],
- "requestBody": {
- "required": true,
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/CreateChatDTO"
- }
+ }
+ },
+ "/squad/{id}": {
+ "get": {
+ "operationId": "SquadController_findOne",
+ "summary": "Get Squad",
+ "parameters": [
+ {
+ "name": "id",
+ "required": true,
+ "in": "path",
+ "schema": {
+ "type": "string"
}
}
- },
+ ],
"responses": {
"200": {
- "description": "Chat response - either non-streaming chat or streaming",
- "content": {
- "application/json": {
- "schema": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/Chat"
- },
- {
- "$ref": "#/components/schemas/CreateChatStreamResponse"
- }
- ]
- }
- }
- }
- },
- "201": {
"description": "",
"content": {
"application/json": {
"schema": {
- "type": "object"
+ "$ref": "#/components/schemas/Squad"
}
}
}
}
},
"tags": [
- "Chats"
+ "Squads"
],
"security": [
{
"bearer": []
}
]
- }
- },
- "/chat/{id}": {
- "get": {
- "operationId": "ChatController_getChat",
- "summary": "Get Chat",
+ },
+ "patch": {
+ "operationId": "SquadController_update",
+ "summary": "Update Squad",
"parameters": [
{
"name": "id",
@@ -542,20 +477,30 @@
}
}
],
+ "requestBody": {
+ "required": true,
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/UpdateSquadDTO"
+ }
+ }
+ }
+ },
"responses": {
"200": {
"description": "",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Chat"
+ "$ref": "#/components/schemas/Squad"
}
}
}
}
},
"tags": [
- "Chats"
+ "Squads"
],
"security": [
{
@@ -564,8 +509,8 @@
]
},
"delete": {
- "operationId": "ChatController_deleteChat",
- "summary": "Delete Chat",
+ "operationId": "SquadController_remove",
+ "summary": "Delete Squad",
"parameters": [
{
"name": "id",
@@ -582,14 +527,14 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Chat"
+ "$ref": "#/components/schemas/Squad"
}
}
}
}
},
"tags": [
- "Chats"
+ "Squads"
],
"security": [
{
@@ -598,98 +543,42 @@
]
}
},
- "/chat/responses": {
+ "/call": {
"post": {
- "operationId": "ChatController_createOpenAIChat",
- "summary": "Create Chat (OpenAI Compatible)",
+ "operationId": "CallController_create",
+ "summary": "Create Call",
"parameters": [],
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/OpenAIResponsesRequest"
+ "$ref": "#/components/schemas/CreateCallDTO"
}
}
}
},
"responses": {
- "200": {
- "description": "OpenAI Responses API format - either non-streaming or streaming",
+ "201": {
+ "description": "",
"content": {
"application/json": {
"schema": {
"oneOf": [
{
- "$ref": "#/components/schemas/ResponseObject"
- },
- {
- "$ref": "#/components/schemas/ResponseTextDeltaEvent"
- },
- {
- "$ref": "#/components/schemas/ResponseTextDoneEvent"
- },
- {
- "$ref": "#/components/schemas/ResponseCompletedEvent"
+ "$ref": "#/components/schemas/Call"
},
{
- "$ref": "#/components/schemas/ResponseErrorEvent"
+ "$ref": "#/components/schemas/CallBatchResponse"
}
]
}
}
}
- },
- "201": {
- "description": "",
- "content": {
- "application/json": {
- "schema": {
- "type": "object"
- }
- }
- }
- }
- },
- "tags": [
- "Chats"
- ],
- "security": [
- {
- "bearer": []
- }
- ]
- }
- },
- "/campaign": {
- "post": {
- "operationId": "CampaignController_create",
- "summary": "Create Campaign",
- "parameters": [],
- "requestBody": {
- "required": true,
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/CreateCampaignDTO"
- }
- }
- }
- },
- "responses": {
- "201": {
- "description": "",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/Campaign"
- }
- }
- }
}
},
"tags": [
- "Campaigns"
+ "Calls"
],
"security": [
{
@@ -698,50 +587,33 @@
]
},
"get": {
- "operationId": "CampaignController_findAll",
- "summary": "List Campaigns",
+ "operationId": "CallController_findAll",
+ "summary": "List Calls",
"parameters": [
{
"name": "id",
"required": false,
"in": "query",
+ "description": "This is the unique identifier for the call.",
"schema": {
"type": "string"
}
},
{
- "name": "status",
+ "name": "assistantId",
"required": false,
"in": "query",
+ "description": "This will return calls with the specified assistantId.",
"schema": {
- "enum": [
- "scheduled",
- "in-progress",
- "ended"
- ],
"type": "string"
}
},
{
- "name": "page",
- "required": false,
- "in": "query",
- "description": "This is the page number to return. Defaults to 1.",
- "schema": {
- "minimum": 1,
- "type": "number"
- }
- },
- {
- "name": "sortOrder",
+ "name": "phoneNumberId",
"required": false,
"in": "query",
- "description": "This is the sort order for pagination. Defaults to 'DESC'.",
+ "description": "This is the phone number that will be used for the call. To use a transient number, use `phoneNumber` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type.",
"schema": {
- "enum": [
- "ASC",
- "DESC"
- ],
"type": "string"
}
},
@@ -843,14 +715,17 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/CampaignPaginatedResponse"
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/Call"
+ }
}
}
}
}
},
"tags": [
- "Campaigns"
+ "Calls"
],
"security": [
{
@@ -859,10 +734,10 @@
]
}
},
- "/campaign/{id}": {
+ "/call/{id}": {
"get": {
- "operationId": "CampaignController_findOne",
- "summary": "Get Campaign",
+ "operationId": "CallController_findOne",
+ "summary": "Get Call",
"parameters": [
{
"name": "id",
@@ -879,14 +754,14 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Campaign"
+ "$ref": "#/components/schemas/Call"
}
}
}
}
},
"tags": [
- "Campaigns"
+ "Calls"
],
"security": [
{
@@ -895,8 +770,8 @@
]
},
"patch": {
- "operationId": "CampaignController_update",
- "summary": "Update Campaign",
+ "operationId": "CallController_update",
+ "summary": "Update Call",
"parameters": [
{
"name": "id",
@@ -912,7 +787,7 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/UpdateCampaignDTO"
+ "$ref": "#/components/schemas/UpdateCallDTO"
}
}
}
@@ -923,14 +798,14 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Campaign"
+ "$ref": "#/components/schemas/Call"
}
}
}
}
},
"tags": [
- "Campaigns"
+ "Calls"
],
"security": [
{
@@ -939,85 +814,51 @@
]
},
"delete": {
- "operationId": "CampaignController_remove",
- "summary": "Delete Campaign",
- "parameters": [
- {
- "name": "id",
- "required": true,
- "in": "path",
- "schema": {
- "type": "string"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/Campaign"
- }
- }
- }
- }
- },
- "tags": [
- "Campaigns"
- ],
- "security": [
- {
- "bearer": []
- }
- ]
- }
- },
- "/session": {
- "post": {
- "operationId": "SessionController_create",
- "summary": "Create Session",
+ "operationId": "CallController_deleteCallData",
+ "summary": "Delete Call",
"parameters": [],
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/CreateSessionDTO"
+ "$ref": "#/components/schemas/DeleteCallDTO"
}
}
}
},
"responses": {
- "201": {
+ "200": {
"description": "",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Session"
+ "$ref": "#/components/schemas/Call"
}
}
}
}
},
"tags": [
- "Sessions"
+ "Calls"
],
"security": [
{
"bearer": []
}
]
- },
+ }
+ },
+ "/chat": {
"get": {
- "operationId": "SessionController_findAllPaginated",
- "summary": "List Sessions",
+ "operationId": "ChatController_listChats",
+ "summary": "List Chats",
"parameters": [
{
- "name": "name",
+ "name": "id",
"required": false,
"in": "query",
- "description": "This is the name of the session to filter by.",
+ "description": "This is the unique identifier for the chat to filter by.",
"schema": {
"type": "string"
}
@@ -1026,16 +867,44 @@
"name": "assistantId",
"required": false,
"in": "query",
- "description": "This is the ID of the assistant to filter sessions by.",
+ "description": "This is the unique identifier for the assistant that will be used for the chat.",
"schema": {
"type": "string"
}
},
{
- "name": "workflowId",
+ "name": "assistantIdAny",
"required": false,
"in": "query",
- "description": "This is the ID of the workflow to filter sessions by.",
+ "description": "Filter by multiple assistant IDs. Provide as comma-separated values.",
+ "schema": {
+ "example": "assistant-1,assistant-2,assistant-3",
+ "type": "string"
+ }
+ },
+ {
+ "name": "squadId",
+ "required": false,
+ "in": "query",
+ "description": "This is the unique identifier for the squad that will be used for the chat.",
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "sessionId",
+ "required": false,
+ "in": "query",
+ "description": "This is the unique identifier for the session that will be used for the chat.",
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "previousChatId",
+ "required": false,
+ "in": "query",
+ "description": "This is the unique identifier for the previous chat to filter by.",
"schema": {
"type": "string"
}
@@ -1161,14 +1030,67 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/SessionPaginatedResponse"
+ "$ref": "#/components/schemas/ChatPaginatedResponse"
}
}
}
}
},
"tags": [
- "Sessions"
+ "Chats"
+ ],
+ "security": [
+ {
+ "bearer": []
+ }
+ ]
+ },
+ "post": {
+ "operationId": "ChatController_createChat",
+ "summary": "Create Chat",
+ "description": "Creates a new chat with optional SMS delivery via transport field. Requires at least one of: assistantId/assistant, sessionId, or previousChatId. Note: sessionId and previousChatId are mutually exclusive. Transport field enables SMS delivery with two modes: (1) New conversation - provide transport.phoneNumberId and transport.customer to create a new session, (2) Existing conversation - provide sessionId to use existing session data. Cannot specify both sessionId and transport fields together. The transport.useLLMGeneratedMessageForOutbound flag controls whether input is processed by LLM (true, default) or forwarded directly as SMS (false).",
+ "parameters": [],
+ "requestBody": {
+ "required": true,
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/CreateChatDTO"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "Chat response - either non-streaming chat or streaming",
+ "content": {
+ "application/json": {
+ "schema": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/Chat"
+ },
+ {
+ "$ref": "#/components/schemas/CreateChatStreamResponse"
+ }
+ ]
+ }
+ }
+ }
+ },
+ "201": {
+ "description": "",
+ "content": {
+ "application/json": {
+ "schema": {
+ "type": "object"
+ }
+ }
+ }
+ }
+ },
+ "tags": [
+ "Chats"
],
"security": [
{
@@ -1177,10 +1099,10 @@
]
}
},
- "/session/{id}": {
+ "/chat/{id}": {
"get": {
- "operationId": "SessionController_findOne",
- "summary": "Get Session",
+ "operationId": "ChatController_getChat",
+ "summary": "Get Chat",
"parameters": [
{
"name": "id",
@@ -1197,14 +1119,14 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Session"
+ "$ref": "#/components/schemas/Chat"
}
}
}
}
},
"tags": [
- "Sessions"
+ "Chats"
],
"security": [
{
@@ -1212,9 +1134,9 @@
}
]
},
- "patch": {
- "operationId": "SessionController_update",
- "summary": "Update Session",
+ "delete": {
+ "operationId": "ChatController_deleteChat",
+ "summary": "Delete Chat",
"parameters": [
{
"name": "id",
@@ -1225,64 +1147,83 @@
}
}
],
- "requestBody": {
- "required": true,
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/UpdateSessionDTO"
- }
- }
- }
- },
"responses": {
"200": {
"description": "",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Session"
+ "$ref": "#/components/schemas/Chat"
}
}
}
}
},
"tags": [
- "Sessions"
+ "Chats"
],
"security": [
{
"bearer": []
}
]
- },
- "delete": {
- "operationId": "SessionController_remove",
- "summary": "Delete Session",
- "parameters": [
- {
- "name": "id",
- "required": true,
- "in": "path",
- "schema": {
- "type": "string"
+ }
+ },
+ "/chat/responses": {
+ "post": {
+ "operationId": "ChatController_createOpenAIChat",
+ "summary": "Create Chat (OpenAI Compatible)",
+ "parameters": [],
+ "requestBody": {
+ "required": true,
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/OpenAIResponsesRequest"
+ }
}
}
- ],
+ },
"responses": {
"200": {
+ "description": "OpenAI Responses API format - either non-streaming or streaming",
+ "content": {
+ "application/json": {
+ "schema": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ResponseObject"
+ },
+ {
+ "$ref": "#/components/schemas/ResponseTextDeltaEvent"
+ },
+ {
+ "$ref": "#/components/schemas/ResponseTextDoneEvent"
+ },
+ {
+ "$ref": "#/components/schemas/ResponseCompletedEvent"
+ },
+ {
+ "$ref": "#/components/schemas/ResponseErrorEvent"
+ }
+ ]
+ }
+ }
+ }
+ },
+ "201": {
"description": "",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Session"
+ "type": "object"
}
}
}
}
},
"tags": [
- "Sessions"
+ "Chats"
],
"security": [
{
@@ -1291,17 +1232,17 @@
]
}
},
- "/assistant": {
+ "/campaign": {
"post": {
- "operationId": "AssistantController_create",
- "summary": "Create Assistant",
+ "operationId": "CampaignController_create",
+ "summary": "Create Campaign",
"parameters": [],
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/CreateAssistantDTO"
+ "$ref": "#/components/schemas/CreateCampaignDTO"
}
}
}
@@ -1312,14 +1253,14 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Assistant"
+ "$ref": "#/components/schemas/Campaign"
}
}
}
}
},
"tags": [
- "Assistants"
+ "Campaigns"
],
"security": [
{
@@ -1328,9 +1269,53 @@
]
},
"get": {
- "operationId": "AssistantController_findAll",
- "summary": "List Assistants",
+ "operationId": "CampaignController_findAll",
+ "summary": "List Campaigns",
"parameters": [
+ {
+ "name": "id",
+ "required": false,
+ "in": "query",
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "status",
+ "required": false,
+ "in": "query",
+ "schema": {
+ "enum": [
+ "scheduled",
+ "in-progress",
+ "ended"
+ ],
+ "type": "string"
+ }
+ },
+ {
+ "name": "page",
+ "required": false,
+ "in": "query",
+ "description": "This is the page number to return. Defaults to 1.",
+ "schema": {
+ "minimum": 1,
+ "type": "number"
+ }
+ },
+ {
+ "name": "sortOrder",
+ "required": false,
+ "in": "query",
+ "description": "This is the sort order for pagination. Defaults to 'DESC'.",
+ "schema": {
+ "enum": [
+ "ASC",
+ "DESC"
+ ],
+ "type": "string"
+ }
+ },
{
"name": "limit",
"required": false,
@@ -1429,17 +1414,14 @@
"content": {
"application/json": {
"schema": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/Assistant"
- }
+ "$ref": "#/components/schemas/CampaignPaginatedResponse"
}
}
}
}
},
"tags": [
- "Assistants"
+ "Campaigns"
],
"security": [
{
@@ -1448,10 +1430,10 @@
]
}
},
- "/assistant/{id}": {
+ "/campaign/{id}": {
"get": {
- "operationId": "AssistantController_findOne",
- "summary": "Get Assistant",
+ "operationId": "CampaignController_findOne",
+ "summary": "Get Campaign",
"parameters": [
{
"name": "id",
@@ -1468,14 +1450,14 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Assistant"
+ "$ref": "#/components/schemas/Campaign"
}
}
}
}
},
"tags": [
- "Assistants"
+ "Campaigns"
],
"security": [
{
@@ -1484,8 +1466,8 @@
]
},
"patch": {
- "operationId": "AssistantController_update",
- "summary": "Update Assistant",
+ "operationId": "CampaignController_update",
+ "summary": "Update Campaign",
"parameters": [
{
"name": "id",
@@ -1501,7 +1483,7 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/UpdateAssistantDTO"
+ "$ref": "#/components/schemas/UpdateCampaignDTO"
}
}
}
@@ -1512,14 +1494,14 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Assistant"
+ "$ref": "#/components/schemas/Campaign"
}
}
}
}
},
"tags": [
- "Assistants"
+ "Campaigns"
],
"security": [
{
@@ -1528,8 +1510,8 @@
]
},
"delete": {
- "operationId": "AssistantController_remove",
- "summary": "Delete Assistant",
+ "operationId": "CampaignController_remove",
+ "summary": "Delete Campaign",
"parameters": [
{
"name": "id",
@@ -1546,14 +1528,14 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Assistant"
+ "$ref": "#/components/schemas/Campaign"
}
}
}
}
},
"tags": [
- "Assistants"
+ "Campaigns"
],
"security": [
{
@@ -1562,48 +1544,17 @@
]
}
},
- "/phone-number": {
+ "/session": {
"post": {
- "operationId": "PhoneNumberController_create",
- "summary": "Create Phone Number",
+ "operationId": "SessionController_create",
+ "summary": "Create Session",
"parameters": [],
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/CreateByoPhoneNumberDTO",
- "title": "ByoPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/CreateTwilioPhoneNumberDTO",
- "title": "TwilioPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/CreateVonagePhoneNumberDTO",
- "title": "VonagePhoneNumber"
- },
- {
- "$ref": "#/components/schemas/CreateVapiPhoneNumberDTO",
- "title": "VapiPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/CreateTelnyxPhoneNumberDTO",
- "title": "TelnyxPhoneNumber"
- }
- ],
- "discriminator": {
- "propertyName": "provider",
- "mapping": {
- "byo-phone-number": "#/components/schemas/CreateByoPhoneNumberDTO",
- "twilio": "#/components/schemas/CreateTwilioPhoneNumberDTO",
- "vonage": "#/components/schemas/CreateVonagePhoneNumberDTO",
- "vapi": "#/components/schemas/CreateVapiPhoneNumberDTO",
- "telnyx": "#/components/schemas/CreateTelnyxPhoneNumberDTO"
- }
- }
+ "$ref": "#/components/schemas/CreateSessionDTO"
}
}
}
@@ -1614,46 +1565,14 @@
"content": {
"application/json": {
"schema": {
- "title": "PhoneNumber",
- "oneOf": [
- {
- "$ref": "#/components/schemas/ByoPhoneNumber",
- "title": "ByoPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/TwilioPhoneNumber",
- "title": "TwilioPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/VonagePhoneNumber",
- "title": "VonagePhoneNumber"
- },
- {
- "$ref": "#/components/schemas/VapiPhoneNumber",
- "title": "VapiPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/TelnyxPhoneNumber",
- "title": "TelnyxPhoneNumber"
- }
- ],
- "discriminator": {
- "propertyName": "provider",
- "mapping": {
- "byo-phone-number": "#/components/schemas/ByoPhoneNumber",
- "twilio": "#/components/schemas/TwilioPhoneNumber",
- "vonage": "#/components/schemas/VonagePhoneNumber",
- "vapi": "#/components/schemas/VapiPhoneNumber",
- "telnyx": "#/components/schemas/TelnyxPhoneNumber"
- }
- }
+ "$ref": "#/components/schemas/Session"
}
}
}
}
},
"tags": [
- "Phone Numbers"
+ "Sessions"
],
"security": [
{
@@ -1662,9 +1581,201 @@
]
},
"get": {
- "operationId": "PhoneNumberController_findAll",
- "summary": "List Phone Numbers",
+ "operationId": "SessionController_findAllPaginated",
+ "summary": "List Sessions",
"parameters": [
+ {
+ "name": "id",
+ "required": false,
+ "in": "query",
+ "description": "This is the unique identifier for the session to filter by.",
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "name",
+ "required": false,
+ "in": "query",
+ "description": "This is the name of the session to filter by.",
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "assistantId",
+ "required": false,
+ "in": "query",
+ "description": "This is the ID of the assistant to filter sessions by.",
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "assistantIdAny",
+ "required": false,
+ "in": "query",
+ "description": "Filter by multiple assistant IDs. Provide as comma-separated values.",
+ "schema": {
+ "example": "assistant-1,assistant-2,assistant-3",
+ "type": "string"
+ }
+ },
+ {
+ "name": "squadId",
+ "required": false,
+ "in": "query",
+ "description": "This is the ID of the squad to filter sessions by.",
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "workflowId",
+ "required": false,
+ "in": "query",
+ "description": "This is the ID of the workflow to filter sessions by.",
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "required": false,
+ "description": "This is the flag to toggle the E164 check for the `number` field. This is an advanced property which should be used if you know your use case requires it.\n\nUse cases:\n- `false`: To allow non-E164 numbers like `+001234567890`, `1234`, or `abc`. This is useful for dialing out to non-E164 numbers on your SIP trunks.\n- `true` (default): To allow only E164 numbers like `+14155551234`. This is standard for PSTN calls.\n\nIf `false`, the `number` is still required to only contain alphanumeric characters (regex: `/^\\+?[a-zA-Z0-9]+$/`).\n\n@default true (E164 check is enabled)",
+ "name": "numberE164CheckEnabled",
+ "in": "query",
+ "schema": {
+ "default": true,
+ "type": "boolean"
+ }
+ },
+ {
+ "required": false,
+ "description": "This is the extension that will be dialed after the call is answered.",
+ "name": "extension",
+ "in": "query",
+ "schema": {
+ "maxLength": 10,
+ "example": null,
+ "type": "string"
+ }
+ },
+ {
+ "name": "assistantOverrides",
+ "required": false,
+ "description": "These are the overrides for the assistant's settings and template variables specific to this customer.\nThis allows customization of the assistant's behavior for individual customers in batch calls.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AssistantOverrides"
+ }
+ ],
+ "in": "query",
+ "schema": {}
+ },
+ {
+ "required": false,
+ "description": "This is the number of the customer.",
+ "name": "number",
+ "in": "query",
+ "schema": {
+ "minLength": 3,
+ "maxLength": 40,
+ "type": "string"
+ }
+ },
+ {
+ "required": false,
+ "description": "This is the SIP URI of the customer.",
+ "name": "sipUri",
+ "in": "query",
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "required": false,
+ "description": "This is the name of the customer. This is just for your own reference.\n\nFor SIP inbound calls, this is extracted from the `From` SIP header with format `\"Display Name\" `.",
+ "name": "name",
+ "in": "query",
+ "schema": {
+ "maxLength": 40,
+ "type": "string"
+ }
+ },
+ {
+ "required": false,
+ "description": "This is the email of the customer.",
+ "name": "email",
+ "in": "query",
+ "schema": {
+ "maxLength": 40,
+ "type": "string"
+ }
+ },
+ {
+ "required": false,
+ "description": "This is the external ID of the customer.",
+ "name": "externalId",
+ "in": "query",
+ "schema": {
+ "maxLength": 40,
+ "type": "string"
+ }
+ },
+ {
+ "name": "customerNumberAny",
+ "required": false,
+ "in": "query",
+ "description": "Filter by any of the specified customer phone numbers (comma-separated).",
+ "schema": {
+ "example": "+1234567890,+0987654321",
+ "type": "string"
+ }
+ },
+ {
+ "name": "phoneNumberId",
+ "required": false,
+ "in": "query",
+ "description": "This will return sessions with the specified phoneNumberId.",
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "phoneNumberIdAny",
+ "required": false,
+ "in": "query",
+ "description": "This will return sessions with any of the specified phoneNumberIds.",
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ },
+ {
+ "name": "page",
+ "required": false,
+ "in": "query",
+ "description": "This is the page number to return. Defaults to 1.",
+ "schema": {
+ "minimum": 1,
+ "type": "number"
+ }
+ },
+ {
+ "name": "sortOrder",
+ "required": false,
+ "in": "query",
+ "description": "This is the sort order for pagination. Defaults to 'DESC'.",
+ "schema": {
+ "enum": [
+ "ASC",
+ "DESC"
+ ],
+ "type": "string"
+ }
+ },
{
"name": "limit",
"required": false,
@@ -1763,49 +1874,14 @@
"content": {
"application/json": {
"schema": {
- "type": "array",
- "items": {
- "title": "PhoneNumber",
- "oneOf": [
- {
- "$ref": "#/components/schemas/ByoPhoneNumber",
- "title": "ByoPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/TwilioPhoneNumber",
- "title": "TwilioPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/VonagePhoneNumber",
- "title": "VonagePhoneNumber"
- },
- {
- "$ref": "#/components/schemas/VapiPhoneNumber",
- "title": "VapiPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/TelnyxPhoneNumber",
- "title": "TelnyxPhoneNumber"
- }
- ],
- "discriminator": {
- "propertyName": "provider",
- "mapping": {
- "byo-phone-number": "#/components/schemas/ByoPhoneNumber",
- "twilio": "#/components/schemas/TwilioPhoneNumber",
- "vonage": "#/components/schemas/VonagePhoneNumber",
- "vapi": "#/components/schemas/VapiPhoneNumber",
- "telnyx": "#/components/schemas/TelnyxPhoneNumber"
- }
- }
- }
+ "$ref": "#/components/schemas/SessionPaginatedResponse"
}
}
}
}
},
"tags": [
- "Phone Numbers"
+ "Sessions"
],
"security": [
{
@@ -1814,10 +1890,10 @@
]
}
},
- "/phone-number/{id}": {
+ "/session/{id}": {
"get": {
- "operationId": "PhoneNumberController_findOne",
- "summary": "Get Phone Number",
+ "operationId": "SessionController_findOne",
+ "summary": "Get Session",
"parameters": [
{
"name": "id",
@@ -1834,46 +1910,14 @@
"content": {
"application/json": {
"schema": {
- "title": "PhoneNumber",
- "oneOf": [
- {
- "$ref": "#/components/schemas/ByoPhoneNumber",
- "title": "ByoPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/TwilioPhoneNumber",
- "title": "TwilioPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/VonagePhoneNumber",
- "title": "VonagePhoneNumber"
- },
- {
- "$ref": "#/components/schemas/VapiPhoneNumber",
- "title": "VapiPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/TelnyxPhoneNumber",
- "title": "TelnyxPhoneNumber"
- }
- ],
- "discriminator": {
- "propertyName": "provider",
- "mapping": {
- "byo-phone-number": "#/components/schemas/ByoPhoneNumber",
- "twilio": "#/components/schemas/TwilioPhoneNumber",
- "vonage": "#/components/schemas/VonagePhoneNumber",
- "vapi": "#/components/schemas/VapiPhoneNumber",
- "telnyx": "#/components/schemas/TelnyxPhoneNumber"
- }
- }
+ "$ref": "#/components/schemas/Session"
}
}
}
}
},
"tags": [
- "Phone Numbers"
+ "Sessions"
],
"security": [
{
@@ -1882,8 +1926,8 @@
]
},
"patch": {
- "operationId": "PhoneNumberController_update",
- "summary": "Update Phone Number",
+ "operationId": "SessionController_update",
+ "summary": "Update Session",
"parameters": [
{
"name": "id",
@@ -1899,38 +1943,7 @@
"content": {
"application/json": {
"schema": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/UpdateByoPhoneNumberDTO",
- "title": "ByoPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/UpdateTwilioPhoneNumberDTO",
- "title": "TwilioPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/UpdateVonagePhoneNumberDTO",
- "title": "VonagePhoneNumber"
- },
- {
- "$ref": "#/components/schemas/UpdateVapiPhoneNumberDTO",
- "title": "VapiPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/UpdateTelnyxPhoneNumberDTO",
- "title": "TelnyxPhoneNumber"
- }
- ],
- "discriminator": {
- "propertyName": "provider",
- "mapping": {
- "byo-phone-number": "#/components/schemas/UpdateByoPhoneNumberDTO",
- "twilio": "#/components/schemas/UpdateTwilioPhoneNumberDTO",
- "vonage": "#/components/schemas/UpdateVonagePhoneNumberDTO",
- "vapi": "#/components/schemas/UpdateVapiPhoneNumberDTO",
- "telnyx": "#/components/schemas/UpdateTelnyxPhoneNumberDTO"
- }
- }
+ "$ref": "#/components/schemas/UpdateSessionDTO"
}
}
}
@@ -1941,46 +1954,14 @@
"content": {
"application/json": {
"schema": {
- "title": "PhoneNumber",
- "oneOf": [
- {
- "$ref": "#/components/schemas/ByoPhoneNumber",
- "title": "ByoPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/TwilioPhoneNumber",
- "title": "TwilioPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/VonagePhoneNumber",
- "title": "VonagePhoneNumber"
- },
- {
- "$ref": "#/components/schemas/VapiPhoneNumber",
- "title": "VapiPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/TelnyxPhoneNumber",
- "title": "TelnyxPhoneNumber"
- }
- ],
- "discriminator": {
- "propertyName": "provider",
- "mapping": {
- "byo-phone-number": "#/components/schemas/ByoPhoneNumber",
- "twilio": "#/components/schemas/TwilioPhoneNumber",
- "vonage": "#/components/schemas/VonagePhoneNumber",
- "vapi": "#/components/schemas/VapiPhoneNumber",
- "telnyx": "#/components/schemas/TelnyxPhoneNumber"
- }
- }
+ "$ref": "#/components/schemas/Session"
}
}
}
}
},
"tags": [
- "Phone Numbers"
+ "Sessions"
],
"security": [
{
@@ -1989,8 +1970,8 @@
]
},
"delete": {
- "operationId": "PhoneNumberController_remove",
- "summary": "Delete Phone Number",
+ "operationId": "SessionController_remove",
+ "summary": "Delete Session",
"parameters": [
{
"name": "id",
@@ -2007,46 +1988,14 @@
"content": {
"application/json": {
"schema": {
- "title": "PhoneNumber",
- "oneOf": [
- {
- "$ref": "#/components/schemas/ByoPhoneNumber",
- "title": "ByoPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/TwilioPhoneNumber",
- "title": "TwilioPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/VonagePhoneNumber",
- "title": "VonagePhoneNumber"
- },
- {
- "$ref": "#/components/schemas/VapiPhoneNumber",
- "title": "VapiPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/TelnyxPhoneNumber",
- "title": "TelnyxPhoneNumber"
- }
- ],
- "discriminator": {
- "propertyName": "provider",
- "mapping": {
- "byo-phone-number": "#/components/schemas/ByoPhoneNumber",
- "twilio": "#/components/schemas/TwilioPhoneNumber",
- "vonage": "#/components/schemas/VonagePhoneNumber",
- "vapi": "#/components/schemas/VapiPhoneNumber",
- "telnyx": "#/components/schemas/TelnyxPhoneNumber"
- }
- }
+ "$ref": "#/components/schemas/Session"
}
}
}
}
},
"tags": [
- "Phone Numbers"
+ "Sessions"
],
"security": [
{
@@ -2055,10 +2004,10 @@
]
}
},
- "/tool": {
+ "/phone-number": {
"post": {
- "operationId": "ToolController_create",
- "summary": "Create Tool",
+ "operationId": "PhoneNumberController_create",
+ "summary": "Create Phone Number",
"parameters": [],
"requestBody": {
"required": true,
@@ -2067,109 +2016,34 @@
"schema": {
"oneOf": [
{
- "$ref": "#/components/schemas/CreateApiRequestToolDTO",
- "title": "ApiRequestTool"
- },
- {
- "$ref": "#/components/schemas/CreateDtmfToolDTO",
- "title": "DtmfTool"
- },
- {
- "$ref": "#/components/schemas/CreateEndCallToolDTO",
- "title": "EndCallTool"
- },
- {
- "$ref": "#/components/schemas/CreateFunctionToolDTO",
- "title": "FunctionTool"
- },
- {
- "$ref": "#/components/schemas/CreateTransferCallToolDTO",
- "title": "TransferCallTool"
- },
- {
- "$ref": "#/components/schemas/CreateHandoffToolDTO",
- "title": "HandoffTool"
- },
- {
- "$ref": "#/components/schemas/CreateBashToolDTO",
- "title": "BashTool"
- },
- {
- "$ref": "#/components/schemas/CreateComputerToolDTO",
- "title": "ComputerTool"
- },
- {
- "$ref": "#/components/schemas/CreateTextEditorToolDTO",
- "title": "TextEditorTool"
- },
- {
- "$ref": "#/components/schemas/CreateQueryToolDTO",
- "title": "QueryTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
- "title": "GoogleCalendarCreateEventTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
- "title": "GoogleSheetsRowAppendTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
- "title": "GoogleCalendarCheckAvailabilityTool"
- },
- {
- "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO",
- "title": "SlackSendMessageTool"
- },
- {
- "$ref": "#/components/schemas/CreateSmsToolDTO",
- "title": "SmsSendTool"
- },
- {
- "$ref": "#/components/schemas/CreateMcpToolDTO",
- "title": "McpTool"
+ "$ref": "#/components/schemas/CreateByoPhoneNumberDTO",
+ "title": "ByoPhoneNumber"
},
{
- "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
- "title": "GoHighLevelCalendarAvailabilityTool"
+ "$ref": "#/components/schemas/CreateTwilioPhoneNumberDTO",
+ "title": "TwilioPhoneNumber"
},
{
- "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
- "title": "GoHighLevelCalendarEventCreateTool"
+ "$ref": "#/components/schemas/CreateVonagePhoneNumberDTO",
+ "title": "VonagePhoneNumber"
},
{
- "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
- "title": "GoHighLevelContactCreateTool"
+ "$ref": "#/components/schemas/CreateVapiPhoneNumberDTO",
+ "title": "VapiPhoneNumber"
},
{
- "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
- "title": "GoHighLevelContactGetTool"
+ "$ref": "#/components/schemas/CreateTelnyxPhoneNumberDTO",
+ "title": "TelnyxPhoneNumber"
}
],
"discriminator": {
- "propertyName": "type",
+ "propertyName": "provider",
"mapping": {
- "apiRequest": "#/components/schemas/CreateApiRequestToolDTO",
- "dtmf": "#/components/schemas/CreateDtmfToolDTO",
- "endCall": "#/components/schemas/CreateEndCallToolDTO",
- "function": "#/components/schemas/CreateFunctionToolDTO",
- "transferCall": "#/components/schemas/CreateTransferCallToolDTO",
- "handoff": "#/components/schemas/CreateHandoffToolDTO",
- "bash": "#/components/schemas/CreateBashToolDTO",
- "computer": "#/components/schemas/CreateComputerToolDTO",
- "textEditor": "#/components/schemas/CreateTextEditorToolDTO",
- "query": "#/components/schemas/CreateQueryToolDTO",
- "google.calendar.event.create": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
- "google.sheets.row.append": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
- "google.calendar.availability.check": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
- "slack.message.send": "#/components/schemas/CreateSlackSendMessageToolDTO",
- "sms": "#/components/schemas/CreateSmsToolDTO",
- "mcp": "#/components/schemas/CreateMcpToolDTO",
- "gohighlevel.calendar.availability.check": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
- "gohighlevel.calendar.event.create": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
- "gohighlevel.contact.create": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
- "gohighlevel.contact.get": "#/components/schemas/CreateGoHighLevelContactGetToolDTO"
+ "byo-phone-number": "#/components/schemas/CreateByoPhoneNumberDTO",
+ "twilio": "#/components/schemas/CreateTwilioPhoneNumberDTO",
+ "vonage": "#/components/schemas/CreateVonagePhoneNumberDTO",
+ "vapi": "#/components/schemas/CreateVapiPhoneNumberDTO",
+ "telnyx": "#/components/schemas/CreateTelnyxPhoneNumberDTO"
}
}
}
@@ -2182,115 +2056,37 @@
"content": {
"application/json": {
"schema": {
+ "title": "PhoneNumber",
"oneOf": [
{
- "$ref": "#/components/schemas/ApiRequestTool",
- "title": "ApiRequestTool"
- },
- {
- "$ref": "#/components/schemas/DtmfTool",
- "title": "DtmfTool"
- },
- {
- "$ref": "#/components/schemas/EndCallTool",
- "title": "EndCallTool"
- },
- {
- "$ref": "#/components/schemas/FunctionTool",
- "title": "FunctionTool"
- },
- {
- "$ref": "#/components/schemas/GhlTool",
- "title": "GhlTool"
- },
- {
- "$ref": "#/components/schemas/TransferCallTool",
- "title": "TransferCallTool"
- },
- {
- "$ref": "#/components/schemas/HandoffTool",
- "title": "HandoffTool"
- },
- {
- "$ref": "#/components/schemas/BashTool",
- "title": "BashTool"
- },
- {
- "$ref": "#/components/schemas/ComputerTool",
- "title": "ComputerTool"
- },
- {
- "$ref": "#/components/schemas/TextEditorTool",
- "title": "TextEditorTool"
- },
- {
- "$ref": "#/components/schemas/QueryTool",
- "title": "QueryTool"
- },
- {
- "$ref": "#/components/schemas/GoogleCalendarCreateEventTool",
- "title": "GoogleCalendarCreateEventTool"
- },
- {
- "$ref": "#/components/schemas/GoogleSheetsRowAppendTool",
- "title": "GoogleSheetsRowAppendTool"
- },
- {
- "$ref": "#/components/schemas/GoogleCalendarCheckAvailabilityTool",
- "title": "GoogleCalendarCheckAvailabilityTool"
- },
- {
- "$ref": "#/components/schemas/SlackSendMessageTool",
- "title": "SlackSendMessageTool"
- },
- {
- "$ref": "#/components/schemas/SmsTool",
- "title": "SmsSendTool"
- },
- {
- "$ref": "#/components/schemas/McpTool",
- "title": "McpTool"
+ "$ref": "#/components/schemas/ByoPhoneNumber",
+ "title": "ByoPhoneNumber"
},
{
- "$ref": "#/components/schemas/GoHighLevelCalendarAvailabilityTool",
- "title": "GoHighLevelCalendarAvailabilityTool"
+ "$ref": "#/components/schemas/TwilioPhoneNumber",
+ "title": "TwilioPhoneNumber"
},
{
- "$ref": "#/components/schemas/GoHighLevelCalendarEventCreateTool",
- "title": "GoHighLevelCalendarEventCreateTool"
+ "$ref": "#/components/schemas/VonagePhoneNumber",
+ "title": "VonagePhoneNumber"
},
{
- "$ref": "#/components/schemas/GoHighLevelContactCreateTool",
- "title": "GoHighLevelContactCreateTool"
+ "$ref": "#/components/schemas/VapiPhoneNumber",
+ "title": "VapiPhoneNumber"
},
{
- "$ref": "#/components/schemas/GoHighLevelContactGetTool",
- "title": "GoHighLevelContactGetTool"
+ "$ref": "#/components/schemas/TelnyxPhoneNumber",
+ "title": "TelnyxPhoneNumber"
}
],
"discriminator": {
- "propertyName": "type",
+ "propertyName": "provider",
"mapping": {
- "apiRequest": "#/components/schemas/ApiRequestTool",
- "dtmf": "#/components/schemas/DtmfTool",
- "endCall": "#/components/schemas/EndCallTool",
- "function": "#/components/schemas/FunctionTool",
- "transferCall": "#/components/schemas/TransferCallTool",
- "handoff": "#/components/schemas/HandoffTool",
- "bash": "#/components/schemas/BashTool",
- "computer": "#/components/schemas/ComputerTool",
- "textEditor": "#/components/schemas/TextEditorTool",
- "query": "#/components/schemas/QueryTool",
- "google.calendar.event.create": "#/components/schemas/GoogleCalendarCreateEventTool",
- "google.sheets.row.append": "#/components/schemas/GoogleSheetsRowAppendTool",
- "google.calendar.availability.check": "#/components/schemas/GoogleCalendarCheckAvailabilityTool",
- "slack.message.send": "#/components/schemas/SlackSendMessageTool",
- "sms": "#/components/schemas/SmsTool",
- "mcp": "#/components/schemas/McpTool",
- "gohighlevel.calendar.availability.check": "#/components/schemas/GoHighLevelCalendarAvailabilityTool",
- "gohighlevel.calendar.event.create": "#/components/schemas/GoHighLevelCalendarEventCreateTool",
- "gohighlevel.contact.create": "#/components/schemas/GoHighLevelContactCreateTool",
- "gohighlevel.contact.get": "#/components/schemas/GoHighLevelContactGetTool"
+ "byo-phone-number": "#/components/schemas/ByoPhoneNumber",
+ "twilio": "#/components/schemas/TwilioPhoneNumber",
+ "vonage": "#/components/schemas/VonagePhoneNumber",
+ "vapi": "#/components/schemas/VapiPhoneNumber",
+ "telnyx": "#/components/schemas/TelnyxPhoneNumber"
}
}
}
@@ -2299,7 +2095,7 @@
}
},
"tags": [
- "Tools"
+ "Phone Numbers"
],
"security": [
{
@@ -2308,8 +2104,8 @@
]
},
"get": {
- "operationId": "ToolController_findAll",
- "summary": "List Tools",
+ "operationId": "PhoneNumberController_findAll",
+ "summary": "List Phone Numbers",
"parameters": [
{
"name": "limit",
@@ -2411,115 +2207,37 @@
"schema": {
"type": "array",
"items": {
+ "title": "PhoneNumber",
"oneOf": [
{
- "$ref": "#/components/schemas/ApiRequestTool",
- "title": "ApiRequestTool"
- },
- {
- "$ref": "#/components/schemas/DtmfTool",
- "title": "DtmfTool"
- },
- {
- "$ref": "#/components/schemas/EndCallTool",
- "title": "EndCallTool"
- },
- {
- "$ref": "#/components/schemas/FunctionTool",
- "title": "FunctionTool"
- },
- {
- "$ref": "#/components/schemas/GhlTool",
- "title": "GhlTool"
- },
- {
- "$ref": "#/components/schemas/TransferCallTool",
- "title": "TransferCallTool"
- },
- {
- "$ref": "#/components/schemas/HandoffTool",
- "title": "HandoffTool"
- },
- {
- "$ref": "#/components/schemas/BashTool",
- "title": "BashTool"
- },
- {
- "$ref": "#/components/schemas/ComputerTool",
- "title": "ComputerTool"
- },
- {
- "$ref": "#/components/schemas/TextEditorTool",
- "title": "TextEditorTool"
- },
- {
- "$ref": "#/components/schemas/QueryTool",
- "title": "QueryTool"
- },
- {
- "$ref": "#/components/schemas/GoogleCalendarCreateEventTool",
- "title": "GoogleCalendarCreateEventTool"
- },
- {
- "$ref": "#/components/schemas/GoogleSheetsRowAppendTool",
- "title": "GoogleSheetsRowAppendTool"
- },
- {
- "$ref": "#/components/schemas/GoogleCalendarCheckAvailabilityTool",
- "title": "GoogleCalendarCheckAvailabilityTool"
- },
- {
- "$ref": "#/components/schemas/SlackSendMessageTool",
- "title": "SlackSendMessageTool"
- },
- {
- "$ref": "#/components/schemas/SmsTool",
- "title": "SmsSendTool"
- },
- {
- "$ref": "#/components/schemas/McpTool",
- "title": "McpTool"
+ "$ref": "#/components/schemas/ByoPhoneNumber",
+ "title": "ByoPhoneNumber"
},
{
- "$ref": "#/components/schemas/GoHighLevelCalendarAvailabilityTool",
- "title": "GoHighLevelCalendarAvailabilityTool"
+ "$ref": "#/components/schemas/TwilioPhoneNumber",
+ "title": "TwilioPhoneNumber"
},
{
- "$ref": "#/components/schemas/GoHighLevelCalendarEventCreateTool",
- "title": "GoHighLevelCalendarEventCreateTool"
+ "$ref": "#/components/schemas/VonagePhoneNumber",
+ "title": "VonagePhoneNumber"
},
{
- "$ref": "#/components/schemas/GoHighLevelContactCreateTool",
- "title": "GoHighLevelContactCreateTool"
+ "$ref": "#/components/schemas/VapiPhoneNumber",
+ "title": "VapiPhoneNumber"
},
{
- "$ref": "#/components/schemas/GoHighLevelContactGetTool",
- "title": "GoHighLevelContactGetTool"
+ "$ref": "#/components/schemas/TelnyxPhoneNumber",
+ "title": "TelnyxPhoneNumber"
}
],
"discriminator": {
- "propertyName": "type",
+ "propertyName": "provider",
"mapping": {
- "apiRequest": "#/components/schemas/ApiRequestTool",
- "dtmf": "#/components/schemas/DtmfTool",
- "endCall": "#/components/schemas/EndCallTool",
- "function": "#/components/schemas/FunctionTool",
- "transferCall": "#/components/schemas/TransferCallTool",
- "handoff": "#/components/schemas/HandoffTool",
- "bash": "#/components/schemas/BashTool",
- "computer": "#/components/schemas/ComputerTool",
- "textEditor": "#/components/schemas/TextEditorTool",
- "query": "#/components/schemas/QueryTool",
- "google.calendar.event.create": "#/components/schemas/GoogleCalendarCreateEventTool",
- "google.sheets.row.append": "#/components/schemas/GoogleSheetsRowAppendTool",
- "google.calendar.availability.check": "#/components/schemas/GoogleCalendarCheckAvailabilityTool",
- "slack.message.send": "#/components/schemas/SlackSendMessageTool",
- "sms": "#/components/schemas/SmsTool",
- "mcp": "#/components/schemas/McpTool",
- "gohighlevel.calendar.availability.check": "#/components/schemas/GoHighLevelCalendarAvailabilityTool",
- "gohighlevel.calendar.event.create": "#/components/schemas/GoHighLevelCalendarEventCreateTool",
- "gohighlevel.contact.create": "#/components/schemas/GoHighLevelContactCreateTool",
- "gohighlevel.contact.get": "#/components/schemas/GoHighLevelContactGetTool"
+ "byo-phone-number": "#/components/schemas/ByoPhoneNumber",
+ "twilio": "#/components/schemas/TwilioPhoneNumber",
+ "vonage": "#/components/schemas/VonagePhoneNumber",
+ "vapi": "#/components/schemas/VapiPhoneNumber",
+ "telnyx": "#/components/schemas/TelnyxPhoneNumber"
}
}
}
@@ -2529,7 +2247,7 @@
}
},
"tags": [
- "Tools"
+ "Phone Numbers"
],
"security": [
{
@@ -2538,19 +2256,171 @@
]
}
},
- "/tool/{id}": {
+ "/v2/phone-number": {
"get": {
- "operationId": "ToolController_findOne",
- "summary": "Get Tool",
+ "operationId": "PhoneNumberController_findAllPaginated",
+ "summary": "List Phone Numbers",
"parameters": [
{
- "name": "id",
- "required": true,
- "in": "path",
+ "name": "search",
+ "required": false,
+ "in": "query",
+ "description": "This will search phone numbers by name, number, or SIP URI (partial match, case-insensitive).",
"schema": {
+ "maxLength": 100,
"type": "string"
}
- }
+ },
+ {
+ "name": "page",
+ "required": false,
+ "in": "query",
+ "description": "This is the page number to return. Defaults to 1.",
+ "schema": {
+ "minimum": 1,
+ "type": "number"
+ }
+ },
+ {
+ "name": "sortOrder",
+ "required": false,
+ "in": "query",
+ "description": "This is the sort order for pagination. Defaults to 'DESC'.",
+ "schema": {
+ "enum": [
+ "ASC",
+ "DESC"
+ ],
+ "type": "string"
+ }
+ },
+ {
+ "name": "limit",
+ "required": false,
+ "in": "query",
+ "description": "This is the maximum number of items to return. Defaults to 100.",
+ "schema": {
+ "minimum": 0,
+ "maximum": 1000,
+ "type": "number"
+ }
+ },
+ {
+ "name": "createdAtGt",
+ "required": false,
+ "in": "query",
+ "description": "This will return items where the createdAt is greater than the specified value.",
+ "schema": {
+ "format": "date-time",
+ "type": "string"
+ }
+ },
+ {
+ "name": "createdAtLt",
+ "required": false,
+ "in": "query",
+ "description": "This will return items where the createdAt is less than the specified value.",
+ "schema": {
+ "format": "date-time",
+ "type": "string"
+ }
+ },
+ {
+ "name": "createdAtGe",
+ "required": false,
+ "in": "query",
+ "description": "This will return items where the createdAt is greater than or equal to the specified value.",
+ "schema": {
+ "format": "date-time",
+ "type": "string"
+ }
+ },
+ {
+ "name": "createdAtLe",
+ "required": false,
+ "in": "query",
+ "description": "This will return items where the createdAt is less than or equal to the specified value.",
+ "schema": {
+ "format": "date-time",
+ "type": "string"
+ }
+ },
+ {
+ "name": "updatedAtGt",
+ "required": false,
+ "in": "query",
+ "description": "This will return items where the updatedAt is greater than the specified value.",
+ "schema": {
+ "format": "date-time",
+ "type": "string"
+ }
+ },
+ {
+ "name": "updatedAtLt",
+ "required": false,
+ "in": "query",
+ "description": "This will return items where the updatedAt is less than the specified value.",
+ "schema": {
+ "format": "date-time",
+ "type": "string"
+ }
+ },
+ {
+ "name": "updatedAtGe",
+ "required": false,
+ "in": "query",
+ "description": "This will return items where the updatedAt is greater than or equal to the specified value.",
+ "schema": {
+ "format": "date-time",
+ "type": "string"
+ }
+ },
+ {
+ "name": "updatedAtLe",
+ "required": false,
+ "in": "query",
+ "description": "This will return items where the updatedAt is less than or equal to the specified value.",
+ "schema": {
+ "format": "date-time",
+ "type": "string"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/PhoneNumberPaginatedResponse"
+ }
+ }
+ }
+ }
+ },
+ "tags": [
+ "Phone Numbers"
+ ],
+ "security": [
+ {
+ "bearer": []
+ }
+ ]
+ }
+ },
+ "/phone-number/{id}": {
+ "get": {
+ "operationId": "PhoneNumberController_findOne",
+ "summary": "Get Phone Number",
+ "parameters": [
+ {
+ "name": "id",
+ "required": true,
+ "in": "path",
+ "schema": {
+ "type": "string"
+ }
+ }
],
"responses": {
"200": {
@@ -2558,115 +2428,144 @@
"content": {
"application/json": {
"schema": {
+ "title": "PhoneNumber",
"oneOf": [
{
- "$ref": "#/components/schemas/ApiRequestTool",
- "title": "ApiRequestTool"
- },
- {
- "$ref": "#/components/schemas/DtmfTool",
- "title": "DtmfTool"
- },
- {
- "$ref": "#/components/schemas/EndCallTool",
- "title": "EndCallTool"
- },
- {
- "$ref": "#/components/schemas/FunctionTool",
- "title": "FunctionTool"
- },
- {
- "$ref": "#/components/schemas/GhlTool",
- "title": "GhlTool"
- },
- {
- "$ref": "#/components/schemas/TransferCallTool",
- "title": "TransferCallTool"
- },
- {
- "$ref": "#/components/schemas/HandoffTool",
- "title": "HandoffTool"
- },
- {
- "$ref": "#/components/schemas/BashTool",
- "title": "BashTool"
- },
- {
- "$ref": "#/components/schemas/ComputerTool",
- "title": "ComputerTool"
- },
- {
- "$ref": "#/components/schemas/TextEditorTool",
- "title": "TextEditorTool"
- },
- {
- "$ref": "#/components/schemas/QueryTool",
- "title": "QueryTool"
- },
- {
- "$ref": "#/components/schemas/GoogleCalendarCreateEventTool",
- "title": "GoogleCalendarCreateEventTool"
+ "$ref": "#/components/schemas/ByoPhoneNumber",
+ "title": "ByoPhoneNumber"
},
{
- "$ref": "#/components/schemas/GoogleSheetsRowAppendTool",
- "title": "GoogleSheetsRowAppendTool"
+ "$ref": "#/components/schemas/TwilioPhoneNumber",
+ "title": "TwilioPhoneNumber"
},
{
- "$ref": "#/components/schemas/GoogleCalendarCheckAvailabilityTool",
- "title": "GoogleCalendarCheckAvailabilityTool"
+ "$ref": "#/components/schemas/VonagePhoneNumber",
+ "title": "VonagePhoneNumber"
},
{
- "$ref": "#/components/schemas/SlackSendMessageTool",
- "title": "SlackSendMessageTool"
+ "$ref": "#/components/schemas/VapiPhoneNumber",
+ "title": "VapiPhoneNumber"
},
{
- "$ref": "#/components/schemas/SmsTool",
- "title": "SmsSendTool"
- },
+ "$ref": "#/components/schemas/TelnyxPhoneNumber",
+ "title": "TelnyxPhoneNumber"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "provider",
+ "mapping": {
+ "byo-phone-number": "#/components/schemas/ByoPhoneNumber",
+ "twilio": "#/components/schemas/TwilioPhoneNumber",
+ "vonage": "#/components/schemas/VonagePhoneNumber",
+ "vapi": "#/components/schemas/VapiPhoneNumber",
+ "telnyx": "#/components/schemas/TelnyxPhoneNumber"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "tags": [
+ "Phone Numbers"
+ ],
+ "security": [
+ {
+ "bearer": []
+ }
+ ]
+ },
+ "patch": {
+ "operationId": "PhoneNumberController_update",
+ "summary": "Update Phone Number",
+ "parameters": [
+ {
+ "name": "id",
+ "required": true,
+ "in": "path",
+ "schema": {
+ "type": "string"
+ }
+ }
+ ],
+ "requestBody": {
+ "required": true,
+ "content": {
+ "application/json": {
+ "schema": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/UpdateByoPhoneNumberDTO",
+ "title": "ByoPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateTwilioPhoneNumberDTO",
+ "title": "TwilioPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateVonagePhoneNumberDTO",
+ "title": "VonagePhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateVapiPhoneNumberDTO",
+ "title": "VapiPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateTelnyxPhoneNumberDTO",
+ "title": "TelnyxPhoneNumber"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "provider",
+ "mapping": {
+ "byo-phone-number": "#/components/schemas/UpdateByoPhoneNumberDTO",
+ "twilio": "#/components/schemas/UpdateTwilioPhoneNumberDTO",
+ "vonage": "#/components/schemas/UpdateVonagePhoneNumberDTO",
+ "vapi": "#/components/schemas/UpdateVapiPhoneNumberDTO",
+ "telnyx": "#/components/schemas/UpdateTelnyxPhoneNumberDTO"
+ }
+ }
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "",
+ "content": {
+ "application/json": {
+ "schema": {
+ "title": "PhoneNumber",
+ "oneOf": [
{
- "$ref": "#/components/schemas/McpTool",
- "title": "McpTool"
+ "$ref": "#/components/schemas/ByoPhoneNumber",
+ "title": "ByoPhoneNumber"
},
{
- "$ref": "#/components/schemas/GoHighLevelCalendarAvailabilityTool",
- "title": "GoHighLevelCalendarAvailabilityTool"
+ "$ref": "#/components/schemas/TwilioPhoneNumber",
+ "title": "TwilioPhoneNumber"
},
{
- "$ref": "#/components/schemas/GoHighLevelCalendarEventCreateTool",
- "title": "GoHighLevelCalendarEventCreateTool"
+ "$ref": "#/components/schemas/VonagePhoneNumber",
+ "title": "VonagePhoneNumber"
},
{
- "$ref": "#/components/schemas/GoHighLevelContactCreateTool",
- "title": "GoHighLevelContactCreateTool"
+ "$ref": "#/components/schemas/VapiPhoneNumber",
+ "title": "VapiPhoneNumber"
},
{
- "$ref": "#/components/schemas/GoHighLevelContactGetTool",
- "title": "GoHighLevelContactGetTool"
+ "$ref": "#/components/schemas/TelnyxPhoneNumber",
+ "title": "TelnyxPhoneNumber"
}
],
"discriminator": {
- "propertyName": "type",
+ "propertyName": "provider",
"mapping": {
- "apiRequest": "#/components/schemas/ApiRequestTool",
- "dtmf": "#/components/schemas/DtmfTool",
- "endCall": "#/components/schemas/EndCallTool",
- "function": "#/components/schemas/FunctionTool",
- "transferCall": "#/components/schemas/TransferCallTool",
- "handoff": "#/components/schemas/HandoffTool",
- "bash": "#/components/schemas/BashTool",
- "computer": "#/components/schemas/ComputerTool",
- "textEditor": "#/components/schemas/TextEditorTool",
- "query": "#/components/schemas/QueryTool",
- "google.calendar.event.create": "#/components/schemas/GoogleCalendarCreateEventTool",
- "google.sheets.row.append": "#/components/schemas/GoogleSheetsRowAppendTool",
- "google.calendar.availability.check": "#/components/schemas/GoogleCalendarCheckAvailabilityTool",
- "slack.message.send": "#/components/schemas/SlackSendMessageTool",
- "sms": "#/components/schemas/SmsTool",
- "mcp": "#/components/schemas/McpTool",
- "gohighlevel.calendar.availability.check": "#/components/schemas/GoHighLevelCalendarAvailabilityTool",
- "gohighlevel.calendar.event.create": "#/components/schemas/GoHighLevelCalendarEventCreateTool",
- "gohighlevel.contact.create": "#/components/schemas/GoHighLevelContactCreateTool",
- "gohighlevel.contact.get": "#/components/schemas/GoHighLevelContactGetTool"
+ "byo-phone-number": "#/components/schemas/ByoPhoneNumber",
+ "twilio": "#/components/schemas/TwilioPhoneNumber",
+ "vonage": "#/components/schemas/VonagePhoneNumber",
+ "vapi": "#/components/schemas/VapiPhoneNumber",
+ "telnyx": "#/components/schemas/TelnyxPhoneNumber"
}
}
}
@@ -2675,7 +2574,7 @@
}
},
"tags": [
- "Tools"
+ "Phone Numbers"
],
"security": [
{
@@ -2683,9 +2582,9 @@
}
]
},
- "patch": {
- "operationId": "ToolController_update",
- "summary": "Update Tool",
+ "delete": {
+ "operationId": "PhoneNumberController_remove",
+ "summary": "Delete Phone Number",
"parameters": [
{
"name": "id",
@@ -2696,6 +2595,65 @@
}
}
],
+ "responses": {
+ "200": {
+ "description": "",
+ "content": {
+ "application/json": {
+ "schema": {
+ "title": "PhoneNumber",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ByoPhoneNumber",
+ "title": "ByoPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/TwilioPhoneNumber",
+ "title": "TwilioPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/VonagePhoneNumber",
+ "title": "VonagePhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/VapiPhoneNumber",
+ "title": "VapiPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/TelnyxPhoneNumber",
+ "title": "TelnyxPhoneNumber"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "provider",
+ "mapping": {
+ "byo-phone-number": "#/components/schemas/ByoPhoneNumber",
+ "twilio": "#/components/schemas/TwilioPhoneNumber",
+ "vonage": "#/components/schemas/VonagePhoneNumber",
+ "vapi": "#/components/schemas/VapiPhoneNumber",
+ "telnyx": "#/components/schemas/TelnyxPhoneNumber"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "tags": [
+ "Phone Numbers"
+ ],
+ "security": [
+ {
+ "bearer": []
+ }
+ ]
+ }
+ },
+ "/tool": {
+ "post": {
+ "operationId": "ToolController_create",
+ "summary": "Create Tool",
+ "parameters": [],
"requestBody": {
"required": true,
"content": {
@@ -2703,109 +2661,123 @@
"schema": {
"oneOf": [
{
- "$ref": "#/components/schemas/UpdateApiRequestToolDTO",
+ "$ref": "#/components/schemas/CreateApiRequestToolDTO",
"title": "ApiRequestTool"
},
{
- "$ref": "#/components/schemas/UpdateDtmfToolDTO",
+ "$ref": "#/components/schemas/CreateCodeToolDTO",
+ "title": "CodeTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDtmfToolDTO",
"title": "DtmfTool"
},
{
- "$ref": "#/components/schemas/UpdateEndCallToolDTO",
+ "$ref": "#/components/schemas/CreateEndCallToolDTO",
"title": "EndCallTool"
},
{
- "$ref": "#/components/schemas/UpdateFunctionToolDTO",
+ "$ref": "#/components/schemas/CreateFunctionToolDTO",
"title": "FunctionTool"
},
{
- "$ref": "#/components/schemas/UpdateTransferCallToolDTO",
+ "$ref": "#/components/schemas/CreateTransferCallToolDTO",
"title": "TransferCallTool"
},
{
- "$ref": "#/components/schemas/UpdateHandoffToolDTO",
+ "$ref": "#/components/schemas/CreateHandoffToolDTO",
"title": "HandoffTool"
},
{
- "$ref": "#/components/schemas/UpdateBashToolDTO",
+ "$ref": "#/components/schemas/CreateBashToolDTO",
"title": "BashTool"
},
{
- "$ref": "#/components/schemas/UpdateComputerToolDTO",
+ "$ref": "#/components/schemas/CreateComputerToolDTO",
"title": "ComputerTool"
},
{
- "$ref": "#/components/schemas/UpdateTextEditorToolDTO",
+ "$ref": "#/components/schemas/CreateTextEditorToolDTO",
"title": "TextEditorTool"
},
{
- "$ref": "#/components/schemas/UpdateQueryToolDTO",
+ "$ref": "#/components/schemas/CreateQueryToolDTO",
"title": "QueryTool"
},
{
- "$ref": "#/components/schemas/UpdateGoogleCalendarCreateEventToolDTO",
+ "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
"title": "GoogleCalendarCreateEventTool"
},
{
- "$ref": "#/components/schemas/UpdateGoogleSheetsRowAppendToolDTO",
+ "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
"title": "GoogleSheetsRowAppendTool"
},
{
- "$ref": "#/components/schemas/UpdateGoogleCalendarCheckAvailabilityToolDTO",
+ "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
"title": "GoogleCalendarCheckAvailabilityTool"
},
{
- "$ref": "#/components/schemas/UpdateSlackSendMessageToolDTO",
+ "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO",
"title": "SlackSendMessageTool"
},
{
- "$ref": "#/components/schemas/UpdateSmsToolDTO",
+ "$ref": "#/components/schemas/CreateSmsToolDTO",
"title": "SmsSendTool"
},
{
- "$ref": "#/components/schemas/UpdateMcpToolDTO",
+ "$ref": "#/components/schemas/CreateMcpToolDTO",
"title": "McpTool"
},
{
- "$ref": "#/components/schemas/UpdateGoHighLevelCalendarAvailabilityToolDTO",
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
"title": "GoHighLevelCalendarAvailabilityTool"
},
{
- "$ref": "#/components/schemas/UpdateGoHighLevelCalendarEventCreateToolDTO",
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
"title": "GoHighLevelCalendarEventCreateTool"
},
{
- "$ref": "#/components/schemas/UpdateGoHighLevelContactCreateToolDTO",
+ "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
"title": "GoHighLevelContactCreateTool"
},
{
- "$ref": "#/components/schemas/UpdateGoHighLevelContactGetToolDTO",
+ "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
"title": "GoHighLevelContactGetTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSipRequestToolDTO",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVoicemailToolDTO",
+ "title": "VoicemailTool"
}
],
"discriminator": {
"propertyName": "type",
"mapping": {
- "apiRequest": "#/components/schemas/UpdateApiRequestToolDTO",
- "dtmf": "#/components/schemas/UpdateDtmfToolDTO",
- "endCall": "#/components/schemas/UpdateEndCallToolDTO",
- "function": "#/components/schemas/UpdateFunctionToolDTO",
- "transferCall": "#/components/schemas/UpdateTransferCallToolDTO",
- "handoff": "#/components/schemas/UpdateHandoffToolDTO",
- "bash": "#/components/schemas/UpdateBashToolDTO",
- "computer": "#/components/schemas/UpdateComputerToolDTO",
- "textEditor": "#/components/schemas/UpdateTextEditorToolDTO",
- "query": "#/components/schemas/UpdateQueryToolDTO",
- "google.calendar.event.create": "#/components/schemas/UpdateGoogleCalendarCreateEventToolDTO",
- "google.sheets.row.append": "#/components/schemas/UpdateGoogleSheetsRowAppendToolDTO",
- "google.calendar.availability.check": "#/components/schemas/UpdateGoogleCalendarCheckAvailabilityToolDTO",
- "slack.message.send": "#/components/schemas/UpdateSlackSendMessageToolDTO",
- "sms": "#/components/schemas/UpdateSmsToolDTO",
- "mcp": "#/components/schemas/UpdateMcpToolDTO",
- "gohighlevel.calendar.availability.check": "#/components/schemas/UpdateGoHighLevelCalendarAvailabilityToolDTO",
- "gohighlevel.calendar.event.create": "#/components/schemas/UpdateGoHighLevelCalendarEventCreateToolDTO",
- "gohighlevel.contact.create": "#/components/schemas/UpdateGoHighLevelContactCreateToolDTO",
- "gohighlevel.contact.get": "#/components/schemas/UpdateGoHighLevelContactGetToolDTO"
+ "apiRequest": "#/components/schemas/CreateApiRequestToolDTO",
+ "dtmf": "#/components/schemas/CreateDtmfToolDTO",
+ "endCall": "#/components/schemas/CreateEndCallToolDTO",
+ "function": "#/components/schemas/CreateFunctionToolDTO",
+ "transferCall": "#/components/schemas/CreateTransferCallToolDTO",
+ "handoff": "#/components/schemas/CreateHandoffToolDTO",
+ "bash": "#/components/schemas/CreateBashToolDTO",
+ "computer": "#/components/schemas/CreateComputerToolDTO",
+ "textEditor": "#/components/schemas/CreateTextEditorToolDTO",
+ "query": "#/components/schemas/CreateQueryToolDTO",
+ "google.calendar.event.create": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
+ "google.sheets.row.append": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
+ "google.calendar.availability.check": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
+ "slack.message.send": "#/components/schemas/CreateSlackSendMessageToolDTO",
+ "sms": "#/components/schemas/CreateSmsToolDTO",
+ "mcp": "#/components/schemas/CreateMcpToolDTO",
+ "gohighlevel.calendar.availability.check": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
+ "gohighlevel.calendar.event.create": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
+ "gohighlevel.contact.create": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
+ "gohighlevel.contact.get": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
+ "sipRequest": "#/components/schemas/CreateSipRequestToolDTO",
+ "voicemail": "#/components/schemas/CreateVoicemailToolDTO"
}
}
}
@@ -2813,7 +2785,7 @@
}
},
"responses": {
- "200": {
+ "201": {
"description": "",
"content": {
"application/json": {
@@ -2823,6 +2795,10 @@
"$ref": "#/components/schemas/ApiRequestTool",
"title": "ApiRequestTool"
},
+ {
+ "$ref": "#/components/schemas/CodeTool",
+ "title": "CodeTool"
+ },
{
"$ref": "#/components/schemas/DtmfTool",
"title": "DtmfTool"
@@ -2902,12 +2878,21 @@
{
"$ref": "#/components/schemas/GoHighLevelContactGetTool",
"title": "GoHighLevelContactGetTool"
+ },
+ {
+ "$ref": "#/components/schemas/SipRequestTool",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/VoicemailTool",
+ "title": "VoicemailTool"
}
],
"discriminator": {
"propertyName": "type",
"mapping": {
"apiRequest": "#/components/schemas/ApiRequestTool",
+ "code": "#/components/schemas/CodeTool",
"dtmf": "#/components/schemas/DtmfTool",
"endCall": "#/components/schemas/EndCallTool",
"function": "#/components/schemas/FunctionTool",
@@ -2926,7 +2911,9 @@
"gohighlevel.calendar.availability.check": "#/components/schemas/GoHighLevelCalendarAvailabilityTool",
"gohighlevel.calendar.event.create": "#/components/schemas/GoHighLevelCalendarEventCreateTool",
"gohighlevel.contact.create": "#/components/schemas/GoHighLevelContactCreateTool",
- "gohighlevel.contact.get": "#/components/schemas/GoHighLevelContactGetTool"
+ "gohighlevel.contact.get": "#/components/schemas/GoHighLevelContactGetTool",
+ "sipRequest": "#/components/schemas/SipRequestTool",
+ "voicemail": "#/components/schemas/VoicemailTool"
}
}
}
@@ -2943,9 +2930,256 @@
}
]
},
- "delete": {
- "operationId": "ToolController_remove",
- "summary": "Delete Tool",
+ "get": {
+ "operationId": "ToolController_findAll",
+ "summary": "List Tools",
+ "parameters": [
+ {
+ "name": "limit",
+ "required": false,
+ "in": "query",
+ "description": "This is the maximum number of items to return. Defaults to 100.",
+ "schema": {
+ "minimum": 0,
+ "maximum": 1000,
+ "type": "number"
+ }
+ },
+ {
+ "name": "createdAtGt",
+ "required": false,
+ "in": "query",
+ "description": "This will return items where the createdAt is greater than the specified value.",
+ "schema": {
+ "format": "date-time",
+ "type": "string"
+ }
+ },
+ {
+ "name": "createdAtLt",
+ "required": false,
+ "in": "query",
+ "description": "This will return items where the createdAt is less than the specified value.",
+ "schema": {
+ "format": "date-time",
+ "type": "string"
+ }
+ },
+ {
+ "name": "createdAtGe",
+ "required": false,
+ "in": "query",
+ "description": "This will return items where the createdAt is greater than or equal to the specified value.",
+ "schema": {
+ "format": "date-time",
+ "type": "string"
+ }
+ },
+ {
+ "name": "createdAtLe",
+ "required": false,
+ "in": "query",
+ "description": "This will return items where the createdAt is less than or equal to the specified value.",
+ "schema": {
+ "format": "date-time",
+ "type": "string"
+ }
+ },
+ {
+ "name": "updatedAtGt",
+ "required": false,
+ "in": "query",
+ "description": "This will return items where the updatedAt is greater than the specified value.",
+ "schema": {
+ "format": "date-time",
+ "type": "string"
+ }
+ },
+ {
+ "name": "updatedAtLt",
+ "required": false,
+ "in": "query",
+ "description": "This will return items where the updatedAt is less than the specified value.",
+ "schema": {
+ "format": "date-time",
+ "type": "string"
+ }
+ },
+ {
+ "name": "updatedAtGe",
+ "required": false,
+ "in": "query",
+ "description": "This will return items where the updatedAt is greater than or equal to the specified value.",
+ "schema": {
+ "format": "date-time",
+ "type": "string"
+ }
+ },
+ {
+ "name": "updatedAtLe",
+ "required": false,
+ "in": "query",
+ "description": "This will return items where the updatedAt is less than or equal to the specified value.",
+ "schema": {
+ "format": "date-time",
+ "type": "string"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "",
+ "content": {
+ "application/json": {
+ "schema": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ApiRequestTool",
+ "title": "ApiRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CodeTool",
+ "title": "CodeTool"
+ },
+ {
+ "$ref": "#/components/schemas/DtmfTool",
+ "title": "DtmfTool"
+ },
+ {
+ "$ref": "#/components/schemas/EndCallTool",
+ "title": "EndCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/FunctionTool",
+ "title": "FunctionTool"
+ },
+ {
+ "$ref": "#/components/schemas/GhlTool",
+ "title": "GhlTool"
+ },
+ {
+ "$ref": "#/components/schemas/TransferCallTool",
+ "title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/HandoffTool",
+ "title": "HandoffTool"
+ },
+ {
+ "$ref": "#/components/schemas/BashTool",
+ "title": "BashTool"
+ },
+ {
+ "$ref": "#/components/schemas/ComputerTool",
+ "title": "ComputerTool"
+ },
+ {
+ "$ref": "#/components/schemas/TextEditorTool",
+ "title": "TextEditorTool"
+ },
+ {
+ "$ref": "#/components/schemas/QueryTool",
+ "title": "QueryTool"
+ },
+ {
+ "$ref": "#/components/schemas/GoogleCalendarCreateEventTool",
+ "title": "GoogleCalendarCreateEventTool"
+ },
+ {
+ "$ref": "#/components/schemas/GoogleSheetsRowAppendTool",
+ "title": "GoogleSheetsRowAppendTool"
+ },
+ {
+ "$ref": "#/components/schemas/GoogleCalendarCheckAvailabilityTool",
+ "title": "GoogleCalendarCheckAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/SlackSendMessageTool",
+ "title": "SlackSendMessageTool"
+ },
+ {
+ "$ref": "#/components/schemas/SmsTool",
+ "title": "SmsSendTool"
+ },
+ {
+ "$ref": "#/components/schemas/McpTool",
+ "title": "McpTool"
+ },
+ {
+ "$ref": "#/components/schemas/GoHighLevelCalendarAvailabilityTool",
+ "title": "GoHighLevelCalendarAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/GoHighLevelCalendarEventCreateTool",
+ "title": "GoHighLevelCalendarEventCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/GoHighLevelContactCreateTool",
+ "title": "GoHighLevelContactCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/GoHighLevelContactGetTool",
+ "title": "GoHighLevelContactGetTool"
+ },
+ {
+ "$ref": "#/components/schemas/SipRequestTool",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/VoicemailTool",
+ "title": "VoicemailTool"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "type",
+ "mapping": {
+ "apiRequest": "#/components/schemas/ApiRequestTool",
+ "code": "#/components/schemas/CodeTool",
+ "dtmf": "#/components/schemas/DtmfTool",
+ "endCall": "#/components/schemas/EndCallTool",
+ "function": "#/components/schemas/FunctionTool",
+ "transferCall": "#/components/schemas/TransferCallTool",
+ "handoff": "#/components/schemas/HandoffTool",
+ "bash": "#/components/schemas/BashTool",
+ "computer": "#/components/schemas/ComputerTool",
+ "textEditor": "#/components/schemas/TextEditorTool",
+ "query": "#/components/schemas/QueryTool",
+ "google.calendar.event.create": "#/components/schemas/GoogleCalendarCreateEventTool",
+ "google.sheets.row.append": "#/components/schemas/GoogleSheetsRowAppendTool",
+ "google.calendar.availability.check": "#/components/schemas/GoogleCalendarCheckAvailabilityTool",
+ "slack.message.send": "#/components/schemas/SlackSendMessageTool",
+ "sms": "#/components/schemas/SmsTool",
+ "mcp": "#/components/schemas/McpTool",
+ "gohighlevel.calendar.availability.check": "#/components/schemas/GoHighLevelCalendarAvailabilityTool",
+ "gohighlevel.calendar.event.create": "#/components/schemas/GoHighLevelCalendarEventCreateTool",
+ "gohighlevel.contact.create": "#/components/schemas/GoHighLevelContactCreateTool",
+ "gohighlevel.contact.get": "#/components/schemas/GoHighLevelContactGetTool",
+ "sipRequest": "#/components/schemas/SipRequestTool",
+ "voicemail": "#/components/schemas/VoicemailTool"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "tags": [
+ "Tools"
+ ],
+ "security": [
+ {
+ "bearer": []
+ }
+ ]
+ }
+ },
+ "/tool/{id}": {
+ "get": {
+ "operationId": "ToolController_findOne",
+ "summary": "Get Tool",
"parameters": [
{
"name": "id",
@@ -2967,6 +3201,10 @@
"$ref": "#/components/schemas/ApiRequestTool",
"title": "ApiRequestTool"
},
+ {
+ "$ref": "#/components/schemas/CodeTool",
+ "title": "CodeTool"
+ },
{
"$ref": "#/components/schemas/DtmfTool",
"title": "DtmfTool"
@@ -3046,12 +3284,21 @@
{
"$ref": "#/components/schemas/GoHighLevelContactGetTool",
"title": "GoHighLevelContactGetTool"
+ },
+ {
+ "$ref": "#/components/schemas/SipRequestTool",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/VoicemailTool",
+ "title": "VoicemailTool"
}
],
"discriminator": {
"propertyName": "type",
"mapping": {
"apiRequest": "#/components/schemas/ApiRequestTool",
+ "code": "#/components/schemas/CodeTool",
"dtmf": "#/components/schemas/DtmfTool",
"endCall": "#/components/schemas/EndCallTool",
"function": "#/components/schemas/FunctionTool",
@@ -3070,7 +3317,9 @@
"gohighlevel.calendar.availability.check": "#/components/schemas/GoHighLevelCalendarAvailabilityTool",
"gohighlevel.calendar.event.create": "#/components/schemas/GoHighLevelCalendarEventCreateTool",
"gohighlevel.contact.create": "#/components/schemas/GoHighLevelContactCreateTool",
- "gohighlevel.contact.get": "#/components/schemas/GoHighLevelContactGetTool"
+ "gohighlevel.contact.get": "#/components/schemas/GoHighLevelContactGetTool",
+ "sipRequest": "#/components/schemas/SipRequestTool",
+ "voicemail": "#/components/schemas/VoicemailTool"
}
}
}
@@ -3086,114 +3335,10 @@
"bearer": []
}
]
- }
- },
- "/file": {
- "post": {
- "operationId": "FileController_create",
- "summary": "Upload File",
- "parameters": [],
- "requestBody": {
- "required": true,
- "content": {
- "multipart/form-data": {
- "schema": {
- "$ref": "#/components/schemas/CreateFileDTO"
- }
- }
- }
- },
- "responses": {
- "201": {
- "description": "File uploaded successfully",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/File"
- }
- }
- }
- },
- "400": {
- "description": "Invalid file"
- }
- },
- "tags": [
- "Files"
- ],
- "security": [
- {
- "bearer": []
- }
- ]
- },
- "get": {
- "operationId": "FileController_findAll",
- "summary": "List Files",
- "parameters": [],
- "responses": {
- "200": {
- "description": "",
- "content": {
- "application/json": {
- "schema": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/File"
- }
- }
- }
- }
- }
- },
- "tags": [
- "Files"
- ],
- "security": [
- {
- "bearer": []
- }
- ]
- }
- },
- "/file/{id}": {
- "get": {
- "operationId": "FileController_findOne",
- "summary": "Get File",
- "parameters": [
- {
- "name": "id",
- "required": true,
- "in": "path",
- "schema": {
- "type": "string"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/File"
- }
- }
- }
- }
- },
- "tags": [
- "Files"
- ],
- "security": [
- {
- "bearer": []
- }
- ]
},
"patch": {
- "operationId": "FileController_update",
- "summary": "Update File",
+ "operationId": "ToolController_update",
+ "summary": "Update Tool",
"parameters": [
{
"name": "id",
@@ -3209,119 +3354,261 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/UpdateFileDTO"
- }
- }
- }
- },
- "responses": {
- "200": {
- "description": "",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/File"
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/UpdateApiRequestToolDTO",
+ "title": "ApiRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateCodeToolDTO",
+ "title": "CodeTool"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateDtmfToolDTO",
+ "title": "DtmfTool"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateEndCallToolDTO",
+ "title": "EndCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateFunctionToolDTO",
+ "title": "FunctionTool"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateTransferCallToolDTO",
+ "title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateHandoffToolDTO",
+ "title": "HandoffTool"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateBashToolDTO",
+ "title": "BashTool"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateComputerToolDTO",
+ "title": "ComputerTool"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateTextEditorToolDTO",
+ "title": "TextEditorTool"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateQueryToolDTO",
+ "title": "QueryTool"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateGoogleCalendarCreateEventToolDTO",
+ "title": "GoogleCalendarCreateEventTool"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateGoogleSheetsRowAppendToolDTO",
+ "title": "GoogleSheetsRowAppendTool"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateGoogleCalendarCheckAvailabilityToolDTO",
+ "title": "GoogleCalendarCheckAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateSlackSendMessageToolDTO",
+ "title": "SlackSendMessageTool"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateSmsToolDTO",
+ "title": "SmsSendTool"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateMcpToolDTO",
+ "title": "McpTool"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateGoHighLevelCalendarAvailabilityToolDTO",
+ "title": "GoHighLevelCalendarAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateGoHighLevelCalendarEventCreateToolDTO",
+ "title": "GoHighLevelCalendarEventCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateGoHighLevelContactCreateToolDTO",
+ "title": "GoHighLevelContactCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateGoHighLevelContactGetToolDTO",
+ "title": "GoHighLevelContactGetTool"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateSipRequestToolDTO",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateVoicemailToolDTO",
+ "title": "VoicemailTool"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "type",
+ "mapping": {
+ "apiRequest": "#/components/schemas/UpdateApiRequestToolDTO",
+ "dtmf": "#/components/schemas/UpdateDtmfToolDTO",
+ "endCall": "#/components/schemas/UpdateEndCallToolDTO",
+ "function": "#/components/schemas/UpdateFunctionToolDTO",
+ "transferCall": "#/components/schemas/UpdateTransferCallToolDTO",
+ "handoff": "#/components/schemas/UpdateHandoffToolDTO",
+ "bash": "#/components/schemas/UpdateBashToolDTO",
+ "computer": "#/components/schemas/UpdateComputerToolDTO",
+ "textEditor": "#/components/schemas/UpdateTextEditorToolDTO",
+ "query": "#/components/schemas/UpdateQueryToolDTO",
+ "google.calendar.event.create": "#/components/schemas/UpdateGoogleCalendarCreateEventToolDTO",
+ "google.sheets.row.append": "#/components/schemas/UpdateGoogleSheetsRowAppendToolDTO",
+ "google.calendar.availability.check": "#/components/schemas/UpdateGoogleCalendarCheckAvailabilityToolDTO",
+ "slack.message.send": "#/components/schemas/UpdateSlackSendMessageToolDTO",
+ "sms": "#/components/schemas/UpdateSmsToolDTO",
+ "mcp": "#/components/schemas/UpdateMcpToolDTO",
+ "gohighlevel.calendar.availability.check": "#/components/schemas/UpdateGoHighLevelCalendarAvailabilityToolDTO",
+ "gohighlevel.calendar.event.create": "#/components/schemas/UpdateGoHighLevelCalendarEventCreateToolDTO",
+ "gohighlevel.contact.create": "#/components/schemas/UpdateGoHighLevelContactCreateToolDTO",
+ "gohighlevel.contact.get": "#/components/schemas/UpdateGoHighLevelContactGetToolDTO",
+ "sipRequest": "#/components/schemas/UpdateSipRequestToolDTO",
+ "voicemail": "#/components/schemas/UpdateVoicemailToolDTO"
+ }
}
}
}
}
},
- "tags": [
- "Files"
- ],
- "security": [
- {
- "bearer": []
- }
- ]
- },
- "delete": {
- "operationId": "FileController_remove",
- "summary": "Delete File",
- "parameters": [
- {
- "name": "id",
- "required": true,
- "in": "path",
- "schema": {
- "type": "string"
- }
- }
- ],
"responses": {
"200": {
- "description": "",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/File"
- }
- }
- }
- }
- },
- "tags": [
- "Files"
- ],
- "security": [
- {
- "bearer": []
- }
- ]
- }
- },
- "/knowledge-base": {
- "post": {
- "operationId": "KnowledgeBaseController_create",
- "summary": "Create Knowledge Base",
- "parameters": [],
- "requestBody": {
- "required": true,
- "content": {
- "application/json": {
- "schema": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/CreateTrieveKnowledgeBaseDTO",
- "title": "TrieveKnowledgeBaseDTO"
- },
- {
- "$ref": "#/components/schemas/CreateCustomKnowledgeBaseDTO",
- "title": "CustomKnowledgeBaseDTO"
- }
- ],
- "discriminator": {
- "propertyName": "provider",
- "mapping": {
- "trieve": "#/components/schemas/CreateTrieveKnowledgeBaseDTO",
- "custom-knowledge-base": "#/components/schemas/CreateCustomKnowledgeBaseDTO"
- }
- }
- }
- }
- }
- },
- "responses": {
- "201": {
"description": "",
"content": {
"application/json": {
"schema": {
"oneOf": [
{
- "$ref": "#/components/schemas/TrieveKnowledgeBase",
- "title": "TrieveKnowledgeBase"
+ "$ref": "#/components/schemas/ApiRequestTool",
+ "title": "ApiRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CodeTool",
+ "title": "CodeTool"
+ },
+ {
+ "$ref": "#/components/schemas/DtmfTool",
+ "title": "DtmfTool"
+ },
+ {
+ "$ref": "#/components/schemas/EndCallTool",
+ "title": "EndCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/FunctionTool",
+ "title": "FunctionTool"
+ },
+ {
+ "$ref": "#/components/schemas/GhlTool",
+ "title": "GhlTool"
+ },
+ {
+ "$ref": "#/components/schemas/TransferCallTool",
+ "title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/HandoffTool",
+ "title": "HandoffTool"
+ },
+ {
+ "$ref": "#/components/schemas/BashTool",
+ "title": "BashTool"
+ },
+ {
+ "$ref": "#/components/schemas/ComputerTool",
+ "title": "ComputerTool"
+ },
+ {
+ "$ref": "#/components/schemas/TextEditorTool",
+ "title": "TextEditorTool"
+ },
+ {
+ "$ref": "#/components/schemas/QueryTool",
+ "title": "QueryTool"
+ },
+ {
+ "$ref": "#/components/schemas/GoogleCalendarCreateEventTool",
+ "title": "GoogleCalendarCreateEventTool"
+ },
+ {
+ "$ref": "#/components/schemas/GoogleSheetsRowAppendTool",
+ "title": "GoogleSheetsRowAppendTool"
+ },
+ {
+ "$ref": "#/components/schemas/GoogleCalendarCheckAvailabilityTool",
+ "title": "GoogleCalendarCheckAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/SlackSendMessageTool",
+ "title": "SlackSendMessageTool"
+ },
+ {
+ "$ref": "#/components/schemas/SmsTool",
+ "title": "SmsSendTool"
+ },
+ {
+ "$ref": "#/components/schemas/McpTool",
+ "title": "McpTool"
+ },
+ {
+ "$ref": "#/components/schemas/GoHighLevelCalendarAvailabilityTool",
+ "title": "GoHighLevelCalendarAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/GoHighLevelCalendarEventCreateTool",
+ "title": "GoHighLevelCalendarEventCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/GoHighLevelContactCreateTool",
+ "title": "GoHighLevelContactCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/GoHighLevelContactGetTool",
+ "title": "GoHighLevelContactGetTool"
+ },
+ {
+ "$ref": "#/components/schemas/SipRequestTool",
+ "title": "SipRequestTool"
},
{
- "$ref": "#/components/schemas/CustomKnowledgeBase",
- "title": "CustomKnowledgeBase"
+ "$ref": "#/components/schemas/VoicemailTool",
+ "title": "VoicemailTool"
}
],
"discriminator": {
- "propertyName": "provider",
+ "propertyName": "type",
"mapping": {
- "trieve": "#/components/schemas/TrieveKnowledgeBase",
- "custom-knowledge-base": "#/components/schemas/CustomKnowledgeBase"
+ "apiRequest": "#/components/schemas/ApiRequestTool",
+ "code": "#/components/schemas/CodeTool",
+ "dtmf": "#/components/schemas/DtmfTool",
+ "endCall": "#/components/schemas/EndCallTool",
+ "function": "#/components/schemas/FunctionTool",
+ "transferCall": "#/components/schemas/TransferCallTool",
+ "handoff": "#/components/schemas/HandoffTool",
+ "bash": "#/components/schemas/BashTool",
+ "computer": "#/components/schemas/ComputerTool",
+ "textEditor": "#/components/schemas/TextEditorTool",
+ "query": "#/components/schemas/QueryTool",
+ "google.calendar.event.create": "#/components/schemas/GoogleCalendarCreateEventTool",
+ "google.sheets.row.append": "#/components/schemas/GoogleSheetsRowAppendTool",
+ "google.calendar.availability.check": "#/components/schemas/GoogleCalendarCheckAvailabilityTool",
+ "slack.message.send": "#/components/schemas/SlackSendMessageTool",
+ "sms": "#/components/schemas/SmsTool",
+ "mcp": "#/components/schemas/McpTool",
+ "gohighlevel.calendar.availability.check": "#/components/schemas/GoHighLevelCalendarAvailabilityTool",
+ "gohighlevel.calendar.event.create": "#/components/schemas/GoHighLevelCalendarEventCreateTool",
+ "gohighlevel.contact.create": "#/components/schemas/GoHighLevelContactCreateTool",
+ "gohighlevel.contact.get": "#/components/schemas/GoHighLevelContactGetTool",
+ "sipRequest": "#/components/schemas/SipRequestTool",
+ "voicemail": "#/components/schemas/VoicemailTool"
}
}
}
@@ -3330,7 +3617,7 @@
}
},
"tags": [
- "Knowledge Base"
+ "Tools"
],
"security": [
{
@@ -3338,147 +3625,9 @@
}
]
},
- "get": {
- "operationId": "KnowledgeBaseController_findAll",
- "summary": "List Knowledge Bases",
- "parameters": [
- {
- "name": "limit",
- "required": false,
- "in": "query",
- "description": "This is the maximum number of items to return. Defaults to 100.",
- "schema": {
- "minimum": 0,
- "maximum": 1000,
- "type": "number"
- }
- },
- {
- "name": "createdAtGt",
- "required": false,
- "in": "query",
- "description": "This will return items where the createdAt is greater than the specified value.",
- "schema": {
- "format": "date-time",
- "type": "string"
- }
- },
- {
- "name": "createdAtLt",
- "required": false,
- "in": "query",
- "description": "This will return items where the createdAt is less than the specified value.",
- "schema": {
- "format": "date-time",
- "type": "string"
- }
- },
- {
- "name": "createdAtGe",
- "required": false,
- "in": "query",
- "description": "This will return items where the createdAt is greater than or equal to the specified value.",
- "schema": {
- "format": "date-time",
- "type": "string"
- }
- },
- {
- "name": "createdAtLe",
- "required": false,
- "in": "query",
- "description": "This will return items where the createdAt is less than or equal to the specified value.",
- "schema": {
- "format": "date-time",
- "type": "string"
- }
- },
- {
- "name": "updatedAtGt",
- "required": false,
- "in": "query",
- "description": "This will return items where the updatedAt is greater than the specified value.",
- "schema": {
- "format": "date-time",
- "type": "string"
- }
- },
- {
- "name": "updatedAtLt",
- "required": false,
- "in": "query",
- "description": "This will return items where the updatedAt is less than the specified value.",
- "schema": {
- "format": "date-time",
- "type": "string"
- }
- },
- {
- "name": "updatedAtGe",
- "required": false,
- "in": "query",
- "description": "This will return items where the updatedAt is greater than or equal to the specified value.",
- "schema": {
- "format": "date-time",
- "type": "string"
- }
- },
- {
- "name": "updatedAtLe",
- "required": false,
- "in": "query",
- "description": "This will return items where the updatedAt is less than or equal to the specified value.",
- "schema": {
- "format": "date-time",
- "type": "string"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "",
- "content": {
- "application/json": {
- "schema": {
- "type": "array",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/TrieveKnowledgeBase",
- "title": "TrieveKnowledgeBase"
- },
- {
- "$ref": "#/components/schemas/CustomKnowledgeBase",
- "title": "CustomKnowledgeBase"
- }
- ],
- "discriminator": {
- "propertyName": "provider",
- "mapping": {
- "trieve": "#/components/schemas/TrieveKnowledgeBase",
- "custom-knowledge-base": "#/components/schemas/CustomKnowledgeBase"
- }
- }
- }
- }
- }
- }
- }
- },
- "tags": [
- "Knowledge Base"
- ],
- "security": [
- {
- "bearer": []
- }
- ]
- }
- },
- "/knowledge-base/{id}": {
- "get": {
- "operationId": "KnowledgeBaseController_findOne",
- "summary": "Get Knowledge Base",
+ "delete": {
+ "operationId": "ToolController_remove",
+ "summary": "Delete Tool",
"parameters": [
{
"name": "id",
@@ -3497,19 +3646,128 @@
"schema": {
"oneOf": [
{
- "$ref": "#/components/schemas/TrieveKnowledgeBase",
- "title": "TrieveKnowledgeBase"
+ "$ref": "#/components/schemas/ApiRequestTool",
+ "title": "ApiRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CodeTool",
+ "title": "CodeTool"
+ },
+ {
+ "$ref": "#/components/schemas/DtmfTool",
+ "title": "DtmfTool"
+ },
+ {
+ "$ref": "#/components/schemas/EndCallTool",
+ "title": "EndCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/FunctionTool",
+ "title": "FunctionTool"
+ },
+ {
+ "$ref": "#/components/schemas/GhlTool",
+ "title": "GhlTool"
+ },
+ {
+ "$ref": "#/components/schemas/TransferCallTool",
+ "title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/HandoffTool",
+ "title": "HandoffTool"
+ },
+ {
+ "$ref": "#/components/schemas/BashTool",
+ "title": "BashTool"
+ },
+ {
+ "$ref": "#/components/schemas/ComputerTool",
+ "title": "ComputerTool"
+ },
+ {
+ "$ref": "#/components/schemas/TextEditorTool",
+ "title": "TextEditorTool"
+ },
+ {
+ "$ref": "#/components/schemas/QueryTool",
+ "title": "QueryTool"
+ },
+ {
+ "$ref": "#/components/schemas/GoogleCalendarCreateEventTool",
+ "title": "GoogleCalendarCreateEventTool"
+ },
+ {
+ "$ref": "#/components/schemas/GoogleSheetsRowAppendTool",
+ "title": "GoogleSheetsRowAppendTool"
+ },
+ {
+ "$ref": "#/components/schemas/GoogleCalendarCheckAvailabilityTool",
+ "title": "GoogleCalendarCheckAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/SlackSendMessageTool",
+ "title": "SlackSendMessageTool"
+ },
+ {
+ "$ref": "#/components/schemas/SmsTool",
+ "title": "SmsSendTool"
+ },
+ {
+ "$ref": "#/components/schemas/McpTool",
+ "title": "McpTool"
+ },
+ {
+ "$ref": "#/components/schemas/GoHighLevelCalendarAvailabilityTool",
+ "title": "GoHighLevelCalendarAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/GoHighLevelCalendarEventCreateTool",
+ "title": "GoHighLevelCalendarEventCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/GoHighLevelContactCreateTool",
+ "title": "GoHighLevelContactCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/GoHighLevelContactGetTool",
+ "title": "GoHighLevelContactGetTool"
+ },
+ {
+ "$ref": "#/components/schemas/SipRequestTool",
+ "title": "SipRequestTool"
},
{
- "$ref": "#/components/schemas/CustomKnowledgeBase",
- "title": "CustomKnowledgeBase"
+ "$ref": "#/components/schemas/VoicemailTool",
+ "title": "VoicemailTool"
}
],
"discriminator": {
- "propertyName": "provider",
+ "propertyName": "type",
"mapping": {
- "trieve": "#/components/schemas/TrieveKnowledgeBase",
- "custom-knowledge-base": "#/components/schemas/CustomKnowledgeBase"
+ "apiRequest": "#/components/schemas/ApiRequestTool",
+ "code": "#/components/schemas/CodeTool",
+ "dtmf": "#/components/schemas/DtmfTool",
+ "endCall": "#/components/schemas/EndCallTool",
+ "function": "#/components/schemas/FunctionTool",
+ "transferCall": "#/components/schemas/TransferCallTool",
+ "handoff": "#/components/schemas/HandoffTool",
+ "bash": "#/components/schemas/BashTool",
+ "computer": "#/components/schemas/ComputerTool",
+ "textEditor": "#/components/schemas/TextEditorTool",
+ "query": "#/components/schemas/QueryTool",
+ "google.calendar.event.create": "#/components/schemas/GoogleCalendarCreateEventTool",
+ "google.sheets.row.append": "#/components/schemas/GoogleSheetsRowAppendTool",
+ "google.calendar.availability.check": "#/components/schemas/GoogleCalendarCheckAvailabilityTool",
+ "slack.message.send": "#/components/schemas/SlackSendMessageTool",
+ "sms": "#/components/schemas/SmsTool",
+ "mcp": "#/components/schemas/McpTool",
+ "gohighlevel.calendar.availability.check": "#/components/schemas/GoHighLevelCalendarAvailabilityTool",
+ "gohighlevel.calendar.event.create": "#/components/schemas/GoHighLevelCalendarEventCreateTool",
+ "gohighlevel.contact.create": "#/components/schemas/GoHighLevelContactCreateTool",
+ "gohighlevel.contact.get": "#/components/schemas/GoHighLevelContactGetTool",
+ "sipRequest": "#/components/schemas/SipRequestTool",
+ "voicemail": "#/components/schemas/VoicemailTool"
}
}
}
@@ -3518,83 +3776,47 @@
}
},
"tags": [
- "Knowledge Base"
+ "Tools"
],
"security": [
{
"bearer": []
}
]
- },
- "patch": {
- "operationId": "KnowledgeBaseController_update",
- "summary": "Update Knowledge Base",
- "parameters": [
- {
- "name": "id",
- "required": true,
- "in": "path",
- "schema": {
- "type": "string"
- }
- }
- ],
+ }
+ },
+ "/file": {
+ "post": {
+ "operationId": "FileController_create",
+ "summary": "Upload File",
+ "parameters": [],
"requestBody": {
"required": true,
"content": {
- "application/json": {
+ "multipart/form-data": {
"schema": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/UpdateTrieveKnowledgeBaseDTO",
- "title": "UpdateTrieveKnowledgeBaseDTO"
- },
- {
- "$ref": "#/components/schemas/UpdateCustomKnowledgeBaseDTO",
- "title": "UpdateCustomKnowledgeBaseDTO"
- }
- ],
- "discriminator": {
- "propertyName": "provider",
- "mapping": {
- "trieve": "#/components/schemas/UpdateTrieveKnowledgeBaseDTO",
- "custom-knowledge-base": "#/components/schemas/UpdateCustomKnowledgeBaseDTO"
- }
- }
+ "$ref": "#/components/schemas/CreateFileDTO"
}
}
}
},
"responses": {
- "200": {
- "description": "",
+ "201": {
+ "description": "File uploaded successfully",
"content": {
"application/json": {
"schema": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/TrieveKnowledgeBase",
- "title": "TrieveKnowledgeBase"
- },
- {
- "$ref": "#/components/schemas/CustomKnowledgeBase",
- "title": "CustomKnowledgeBase"
- }
- ],
- "discriminator": {
- "propertyName": "provider",
- "mapping": {
- "trieve": "#/components/schemas/TrieveKnowledgeBase",
- "custom-knowledge-base": "#/components/schemas/CustomKnowledgeBase"
- }
- }
+ "$ref": "#/components/schemas/File"
}
}
}
+ },
+ "400": {
+ "description": "Invalid file"
}
},
"tags": [
- "Knowledge Base"
+ "Files"
],
"security": [
{
@@ -3602,41 +3824,19 @@
}
]
},
- "delete": {
- "operationId": "KnowledgeBaseController_remove",
- "summary": "Delete Knowledge Base",
- "parameters": [
- {
- "name": "id",
- "required": true,
- "in": "path",
- "schema": {
- "type": "string"
- }
- }
- ],
+ "get": {
+ "operationId": "FileController_findAll",
+ "summary": "List Files",
+ "parameters": [],
"responses": {
"200": {
"description": "",
"content": {
"application/json": {
"schema": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/TrieveKnowledgeBase",
- "title": "TrieveKnowledgeBase"
- },
- {
- "$ref": "#/components/schemas/CustomKnowledgeBase",
- "title": "CustomKnowledgeBase"
- }
- ],
- "discriminator": {
- "propertyName": "provider",
- "mapping": {
- "trieve": "#/components/schemas/TrieveKnowledgeBase",
- "custom-knowledge-base": "#/components/schemas/CustomKnowledgeBase"
- }
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/File"
}
}
}
@@ -3644,7 +3844,7 @@
}
},
"tags": [
- "Knowledge Base"
+ "Files"
],
"security": [
{
@@ -3653,28 +3853,34 @@
]
}
},
- "/workflow": {
+ "/file/{id}": {
"get": {
- "operationId": "WorkflowController_findAll",
- "summary": "Get Workflows",
- "parameters": [],
+ "operationId": "FileController_findOne",
+ "summary": "Get File",
+ "parameters": [
+ {
+ "name": "id",
+ "required": true,
+ "in": "path",
+ "schema": {
+ "type": "string"
+ }
+ }
+ ],
"responses": {
"200": {
"description": "",
"content": {
"application/json": {
"schema": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/Workflow"
- }
+ "$ref": "#/components/schemas/File"
}
}
}
}
},
"tags": [
- "Workflow"
+ "Files"
],
"security": [
{
@@ -3682,46 +3888,53 @@
}
]
},
- "post": {
- "operationId": "WorkflowController_create",
- "summary": "Create Workflow",
- "parameters": [],
+ "patch": {
+ "operationId": "FileController_update",
+ "summary": "Update File",
+ "parameters": [
+ {
+ "name": "id",
+ "required": true,
+ "in": "path",
+ "schema": {
+ "type": "string"
+ }
+ }
+ ],
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/CreateWorkflowDTO"
+ "$ref": "#/components/schemas/UpdateFileDTO"
}
}
}
},
"responses": {
- "201": {
+ "200": {
"description": "",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Workflow"
+ "$ref": "#/components/schemas/File"
}
}
}
}
},
"tags": [
- "Workflow"
+ "Files"
],
"security": [
{
"bearer": []
}
]
- }
- },
- "/workflow/{id}": {
- "get": {
- "operationId": "WorkflowController_findOne",
- "summary": "Get Workflow",
+ },
+ "delete": {
+ "operationId": "FileController_remove",
+ "summary": "Delete File",
"parameters": [
{
"name": "id",
@@ -3738,140 +3951,68 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Workflow"
+ "$ref": "#/components/schemas/File"
}
}
}
}
},
"tags": [
- "Workflow"
+ "Files"
],
"security": [
{
"bearer": []
}
]
- },
- "delete": {
- "operationId": "WorkflowController_delete",
- "summary": "Delete Workflow",
+ }
+ },
+ "/structured-output": {
+ "get": {
+ "operationId": "StructuredOutputController_findAll",
+ "summary": "List Structured Outputs",
"parameters": [
{
"name": "id",
- "required": true,
- "in": "path",
+ "required": false,
+ "in": "query",
+ "description": "This will return structured outputs where the id matches the specified value.",
"schema": {
"type": "string"
}
- }
- ],
- "responses": {
- "200": {
- "description": "",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/Workflow"
- }
- }
- }
- }
- },
- "tags": [
- "Workflow"
- ],
- "security": [
- {
- "bearer": []
- }
- ]
- },
- "patch": {
- "operationId": "WorkflowController_update",
- "summary": "Update Workflow",
- "parameters": [
+ },
{
- "name": "id",
- "required": true,
- "in": "path",
+ "name": "name",
+ "required": false,
+ "in": "query",
+ "description": "This will return structured outputs where the name matches the specified value.",
"schema": {
"type": "string"
}
- }
- ],
- "requestBody": {
- "required": true,
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/UpdateWorkflowDTO"
- }
- }
- }
- },
- "responses": {
- "200": {
- "description": "",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/Workflow"
- }
- }
- }
- }
- },
- "tags": [
- "Workflow"
- ],
- "security": [
+ },
{
- "bearer": []
- }
- ]
- }
- },
- "/squad": {
- "post": {
- "operationId": "SquadController_create",
- "summary": "Create Squad",
- "parameters": [],
- "requestBody": {
- "required": true,
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/CreateSquadDTO"
- }
- }
- }
- },
- "responses": {
- "201": {
- "description": "",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/Squad"
- }
- }
+ "name": "page",
+ "required": false,
+ "in": "query",
+ "description": "This is the page number to return. Defaults to 1.",
+ "schema": {
+ "minimum": 1,
+ "type": "number"
}
- }
- },
- "tags": [
- "Squads"
- ],
- "security": [
+ },
{
- "bearer": []
- }
- ]
- },
- "get": {
- "operationId": "SquadController_findAll",
- "summary": "List Squads",
- "parameters": [
+ "name": "sortOrder",
+ "required": false,
+ "in": "query",
+ "description": "This is the sort order for pagination. Defaults to 'DESC'.",
+ "schema": {
+ "enum": [
+ "ASC",
+ "DESC"
+ ],
+ "type": "string"
+ }
+ },
{
"name": "limit",
"required": false,
@@ -3970,53 +4111,14 @@
"content": {
"application/json": {
"schema": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/Squad"
- }
- }
- }
- }
- }
- },
- "tags": [
- "Squads"
- ],
- "security": [
- {
- "bearer": []
- }
- ]
- }
- },
- "/squad/{id}": {
- "get": {
- "operationId": "SquadController_findOne",
- "summary": "Get Squad",
- "parameters": [
- {
- "name": "id",
- "required": true,
- "in": "path",
- "schema": {
- "type": "string"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/Squad"
+ "$ref": "#/components/schemas/StructuredOutputPaginatedResponse"
}
}
}
}
},
"tags": [
- "Squads"
+ "Structured Outputs"
],
"security": [
{
@@ -4024,53 +4126,46 @@
}
]
},
- "patch": {
- "operationId": "SquadController_update",
- "summary": "Update Squad",
- "parameters": [
- {
- "name": "id",
- "required": true,
- "in": "path",
- "schema": {
- "type": "string"
- }
- }
- ],
+ "post": {
+ "operationId": "StructuredOutputController_create",
+ "summary": "Create Structured Output",
+ "parameters": [],
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/UpdateSquadDTO"
+ "$ref": "#/components/schemas/CreateStructuredOutputDTO"
}
}
}
},
"responses": {
- "200": {
+ "201": {
"description": "",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Squad"
+ "$ref": "#/components/schemas/StructuredOutput"
}
}
}
}
},
"tags": [
- "Squads"
+ "Structured Outputs"
],
"security": [
{
"bearer": []
}
]
- },
- "delete": {
- "operationId": "SquadController_remove",
- "summary": "Delete Squad",
+ }
+ },
+ "/structured-output/{id}": {
+ "get": {
+ "operationId": "StructuredOutputController_findOne",
+ "summary": "Get Structured Output",
"parameters": [
{
"name": "id",
@@ -4087,203 +4182,76 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/Squad"
+ "$ref": "#/components/schemas/StructuredOutput"
}
}
}
}
},
"tags": [
- "Squads"
+ "Structured Outputs"
],
"security": [
{
"bearer": []
}
]
- }
- },
- "/test-suite": {
- "get": {
- "operationId": "TestSuiteController_findAllPaginated",
- "summary": "List Test Suites",
+ },
+ "patch": {
+ "operationId": "StructuredOutputController_update",
+ "summary": "Update Structured Output",
"parameters": [
{
- "name": "page",
- "required": false,
- "in": "query",
- "description": "This is the page number to return. Defaults to 1.",
- "schema": {
- "minimum": 1,
- "type": "number"
- }
- },
- {
- "name": "sortOrder",
- "required": false,
- "in": "query",
- "description": "This is the sort order for pagination. Defaults to 'DESC'.",
- "schema": {
- "enum": [
- "ASC",
- "DESC"
- ],
- "type": "string"
- }
- },
- {
- "name": "limit",
- "required": false,
- "in": "query",
- "description": "This is the maximum number of items to return. Defaults to 100.",
- "schema": {
- "minimum": 0,
- "maximum": 1000,
- "type": "number"
- }
- },
- {
- "name": "createdAtGt",
- "required": false,
- "in": "query",
- "description": "This will return items where the createdAt is greater than the specified value.",
- "schema": {
- "format": "date-time",
- "type": "string"
- }
- },
- {
- "name": "createdAtLt",
- "required": false,
- "in": "query",
- "description": "This will return items where the createdAt is less than the specified value.",
- "schema": {
- "format": "date-time",
- "type": "string"
- }
- },
- {
- "name": "createdAtGe",
- "required": false,
- "in": "query",
- "description": "This will return items where the createdAt is greater than or equal to the specified value.",
- "schema": {
- "format": "date-time",
- "type": "string"
- }
- },
- {
- "name": "createdAtLe",
- "required": false,
- "in": "query",
- "description": "This will return items where the createdAt is less than or equal to the specified value.",
- "schema": {
- "format": "date-time",
- "type": "string"
- }
- },
- {
- "name": "updatedAtGt",
- "required": false,
- "in": "query",
- "description": "This will return items where the updatedAt is greater than the specified value.",
- "schema": {
- "format": "date-time",
- "type": "string"
- }
- },
- {
- "name": "updatedAtLt",
- "required": false,
- "in": "query",
- "description": "This will return items where the updatedAt is less than the specified value.",
- "schema": {
- "format": "date-time",
- "type": "string"
- }
- },
- {
- "name": "updatedAtGe",
- "required": false,
- "in": "query",
- "description": "This will return items where the updatedAt is greater than or equal to the specified value.",
+ "name": "id",
+ "required": true,
+ "in": "path",
"schema": {
- "format": "date-time",
"type": "string"
}
},
{
- "name": "updatedAtLe",
- "required": false,
+ "name": "schemaOverride",
+ "required": true,
"in": "query",
- "description": "This will return items where the updatedAt is less than or equal to the specified value.",
"schema": {
- "format": "date-time",
"type": "string"
}
}
],
- "responses": {
- "200": {
- "description": "",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/TestSuitesPaginatedResponse"
- }
- }
- }
- }
- },
- "tags": [
- "Test Suites"
- ],
- "security": [
- {
- "bearer": []
- }
- ]
- },
- "post": {
- "operationId": "TestSuiteController_create",
- "summary": "Create Test Suite",
- "parameters": [],
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/CreateTestSuiteDto"
+ "$ref": "#/components/schemas/UpdateStructuredOutputDTO"
}
}
}
},
"responses": {
- "201": {
+ "200": {
"description": "",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/TestSuite"
+ "$ref": "#/components/schemas/StructuredOutput"
}
}
}
}
},
"tags": [
- "Test Suites"
+ "Structured Outputs"
],
"security": [
{
"bearer": []
}
]
- }
- },
- "/test-suite/{id}": {
- "get": {
- "operationId": "TestSuiteController_findOne",
- "summary": "Get Test Suite",
+ },
+ "delete": {
+ "operationId": "StructuredOutputController_remove",
+ "summary": "Delete Structured Output",
"parameters": [
{
"name": "id",
@@ -4300,40 +4268,33 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/TestSuite"
+ "$ref": "#/components/schemas/StructuredOutput"
}
}
}
}
},
"tags": [
- "Test Suites"
+ "Structured Outputs"
],
"security": [
{
"bearer": []
}
]
- },
- "patch": {
- "operationId": "TestSuiteController_update",
- "summary": "Update Test Suite",
- "parameters": [
- {
- "name": "id",
- "required": true,
- "in": "path",
- "schema": {
- "type": "string"
- }
- }
- ],
+ }
+ },
+ "/structured-output/run": {
+ "post": {
+ "operationId": "StructuredOutputController_run",
+ "summary": "Run Structured Output",
+ "parameters": [],
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/UpdateTestSuiteDto"
+ "$ref": "#/components/schemas/StructuredOutputRunDTO"
}
}
}
@@ -4344,65 +4305,124 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/TestSuite"
+ "$ref": "#/components/schemas/StructuredOutput"
+ }
+ }
+ }
+ },
+ "201": {
+ "description": "",
+ "content": {
+ "application/json": {
+ "schema": {
+ "type": "object"
}
}
}
}
},
"tags": [
- "Test Suites"
+ "Structured Outputs"
],
"security": [
{
"bearer": []
}
]
- },
- "delete": {
- "operationId": "TestSuiteController_remove",
- "summary": "Delete Test Suite",
- "parameters": [
- {
- "name": "id",
- "required": true,
- "in": "path",
- "schema": {
- "type": "string"
+ }
+ },
+ "/reporting/insight": {
+ "post": {
+ "operationId": "InsightController_create",
+ "summary": "Create Insight",
+ "parameters": [],
+ "requestBody": {
+ "required": true,
+ "content": {
+ "application/json": {
+ "schema": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateBarInsightFromCallTableDTO",
+ "title": "CreateBarInsightFromCallTableDTO"
+ },
+ {
+ "$ref": "#/components/schemas/CreatePieInsightFromCallTableDTO",
+ "title": "CreatePieInsightFromCallTableDTO"
+ },
+ {
+ "$ref": "#/components/schemas/CreateLineInsightFromCallTableDTO",
+ "title": "CreateLineInsightFromCallTableDTO"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTextInsightFromCallTableDTO",
+ "title": "CreateTextInsightFromCallTableDTO"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "type",
+ "mapping": {
+ "bar": "#/components/schemas/CreateBarInsightFromCallTableDTO",
+ "pie": "#/components/schemas/CreatePieInsightFromCallTableDTO",
+ "line": "#/components/schemas/CreateLineInsightFromCallTableDTO",
+ "text": "#/components/schemas/CreateTextInsightFromCallTableDTO"
+ }
+ }
+ }
}
}
- ],
+ },
"responses": {
- "200": {
+ "201": {
"description": "",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/TestSuite"
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/BarInsight"
+ },
+ {
+ "$ref": "#/components/schemas/PieInsight"
+ },
+ {
+ "$ref": "#/components/schemas/LineInsight"
+ },
+ {
+ "$ref": "#/components/schemas/TextInsight"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "type",
+ "mapping": {
+ "bar": "#/components/schemas/BarInsight",
+ "pie": "#/components/schemas/PieInsight",
+ "line": "#/components/schemas/LineInsight",
+ "text": "#/components/schemas/TextInsight"
+ }
+ }
}
}
}
}
},
"tags": [
- "Test Suites"
+ "Insight"
],
"security": [
{
"bearer": []
}
]
- }
- },
- "/test-suite/{testSuiteId}/test": {
+ },
"get": {
- "operationId": "TestSuiteTestController_findAllPaginated",
- "summary": "List Tests",
+ "operationId": "InsightController_findAll",
+ "summary": "Get Insights",
"parameters": [
{
- "name": "testSuiteId",
- "required": true,
- "in": "path",
+ "name": "id",
+ "required": false,
+ "in": "query",
"schema": {
"type": "string"
}
@@ -4528,27 +4548,29 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/TestSuiteTestsPaginatedResponse"
+ "$ref": "#/components/schemas/InsightPaginatedResponse"
}
}
}
}
},
"tags": [
- "Test Suite Tests"
+ "Insight"
],
"security": [
{
"bearer": []
}
]
- },
- "post": {
- "operationId": "TestSuiteTestController_create",
- "summary": "Create Test",
+ }
+ },
+ "/reporting/insight/{id}": {
+ "patch": {
+ "operationId": "InsightController_update",
+ "summary": "Update Insight",
"parameters": [
{
- "name": "testSuiteId",
+ "name": "id",
"required": true,
"in": "path",
"schema": {
@@ -4563,19 +4585,29 @@
"schema": {
"oneOf": [
{
- "$ref": "#/components/schemas/CreateTestSuiteTestVoiceDto",
- "title": "TestSuiteTestVoice"
+ "$ref": "#/components/schemas/UpdateBarInsightFromCallTableDTO",
+ "title": "UpdateBarInsightFromCallTableDTO"
+ },
+ {
+ "$ref": "#/components/schemas/UpdatePieInsightFromCallTableDTO",
+ "title": "UpdatePieInsightFromCallTableDTO"
},
{
- "$ref": "#/components/schemas/CreateTestSuiteTestChatDto",
- "title": "TestSuiteTestChat"
+ "$ref": "#/components/schemas/UpdateLineInsightFromCallTableDTO",
+ "title": "UpdateLineInsightFromCallTableDTO"
+ },
+ {
+ "$ref": "#/components/schemas/UpdateTextInsightFromCallTableDTO",
+ "title": "UpdateTextInsightFromCallTableDTO"
}
],
"discriminator": {
"propertyName": "type",
"mapping": {
- "voice": "#/components/schemas/CreateTestSuiteTestVoiceDto",
- "chat": "#/components/schemas/CreateTestSuiteTestChatDto"
+ "bar": "#/components/schemas/UpdateBarInsightFromCallTableDTO",
+ "pie": "#/components/schemas/UpdatePieInsightFromCallTableDTO",
+ "line": "#/components/schemas/UpdateLineInsightFromCallTableDTO",
+ "text": "#/components/schemas/UpdateTextInsightFromCallTableDTO"
}
}
}
@@ -4583,26 +4615,32 @@
}
},
"responses": {
- "201": {
+ "200": {
"description": "",
"content": {
"application/json": {
"schema": {
"oneOf": [
{
- "$ref": "#/components/schemas/TestSuiteTestVoice",
- "title": "Voice"
+ "$ref": "#/components/schemas/BarInsight"
},
{
- "$ref": "#/components/schemas/TestSuiteTestChat",
- "title": "Chat"
+ "$ref": "#/components/schemas/PieInsight"
+ },
+ {
+ "$ref": "#/components/schemas/LineInsight"
+ },
+ {
+ "$ref": "#/components/schemas/TextInsight"
}
],
"discriminator": {
"propertyName": "type",
"mapping": {
- "voice": "#/components/schemas/TestSuiteTestVoice",
- "chat": "#/components/schemas/TestSuiteTestChat"
+ "bar": "#/components/schemas/BarInsight",
+ "pie": "#/components/schemas/PieInsight",
+ "line": "#/components/schemas/LineInsight",
+ "text": "#/components/schemas/TextInsight"
}
}
}
@@ -4611,28 +4649,18 @@
}
},
"tags": [
- "Test Suite Tests"
+ "Insight"
],
"security": [
{
"bearer": []
}
]
- }
- },
- "/test-suite/{testSuiteId}/test/{id}": {
+ },
"get": {
- "operationId": "TestSuiteTestController_findOne",
- "summary": "Get Test",
+ "operationId": "InsightController_findOne",
+ "summary": "Get Insight",
"parameters": [
- {
- "name": "testSuiteId",
- "required": true,
- "in": "path",
- "schema": {
- "type": "string"
- }
- },
{
"name": "id",
"required": true,
@@ -4650,19 +4678,25 @@
"schema": {
"oneOf": [
{
- "$ref": "#/components/schemas/TestSuiteTestVoice",
- "title": "Voice"
+ "$ref": "#/components/schemas/BarInsight"
+ },
+ {
+ "$ref": "#/components/schemas/PieInsight"
},
{
- "$ref": "#/components/schemas/TestSuiteTestChat",
- "title": "Chat"
+ "$ref": "#/components/schemas/LineInsight"
+ },
+ {
+ "$ref": "#/components/schemas/TextInsight"
}
],
"discriminator": {
"propertyName": "type",
"mapping": {
- "voice": "#/components/schemas/TestSuiteTestVoice",
- "chat": "#/components/schemas/TestSuiteTestChat"
+ "bar": "#/components/schemas/BarInsight",
+ "pie": "#/components/schemas/PieInsight",
+ "line": "#/components/schemas/LineInsight",
+ "text": "#/components/schemas/TextInsight"
}
}
}
@@ -4671,7 +4705,7 @@
}
},
"tags": [
- "Test Suite Tests"
+ "Insight"
],
"security": [
{
@@ -4679,18 +4713,10 @@
}
]
},
- "patch": {
- "operationId": "TestSuiteTestController_update",
- "summary": "Update Test",
+ "delete": {
+ "operationId": "InsightController_remove",
+ "summary": "Delete Insight",
"parameters": [
- {
- "name": "testSuiteId",
- "required": true,
- "in": "path",
- "schema": {
- "type": "string"
- }
- },
{
"name": "id",
"required": true,
@@ -4700,32 +4726,6 @@
}
}
],
- "requestBody": {
- "required": true,
- "content": {
- "application/json": {
- "schema": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/UpdateTestSuiteTestVoiceDto",
- "title": "TestSuiteTestVoice"
- },
- {
- "$ref": "#/components/schemas/UpdateTestSuiteTestChatDto",
- "title": "TestSuiteTestChat"
- }
- ],
- "discriminator": {
- "propertyName": "type",
- "mapping": {
- "voice": "#/components/schemas/UpdateTestSuiteTestVoiceDto",
- "chat": "#/components/schemas/UpdateTestSuiteTestChatDto"
- }
- }
- }
- }
- }
- },
"responses": {
"200": {
"description": "",
@@ -4734,19 +4734,25 @@
"schema": {
"oneOf": [
{
- "$ref": "#/components/schemas/TestSuiteTestVoice",
- "title": "Voice"
+ "$ref": "#/components/schemas/BarInsight"
},
{
- "$ref": "#/components/schemas/TestSuiteTestChat",
- "title": "Chat"
- }
+ "$ref": "#/components/schemas/PieInsight"
+ },
+ {
+ "$ref": "#/components/schemas/LineInsight"
+ },
+ {
+ "$ref": "#/components/schemas/TextInsight"
+ }
],
"discriminator": {
"propertyName": "type",
"mapping": {
- "voice": "#/components/schemas/TestSuiteTestVoice",
- "chat": "#/components/schemas/TestSuiteTestChat"
+ "bar": "#/components/schemas/BarInsight",
+ "pie": "#/components/schemas/PieInsight",
+ "line": "#/components/schemas/LineInsight",
+ "text": "#/components/schemas/TextInsight"
}
}
}
@@ -4755,26 +4761,20 @@
}
},
"tags": [
- "Test Suite Tests"
+ "Insight"
],
"security": [
{
"bearer": []
}
]
- },
- "delete": {
- "operationId": "TestSuiteTestController_remove",
- "summary": "Delete Test",
+ }
+ },
+ "/reporting/insight/{id}/run": {
+ "post": {
+ "operationId": "InsightController_run",
+ "summary": "Run Insight",
"parameters": [
- {
- "name": "testSuiteId",
- "required": true,
- "in": "path",
- "schema": {
- "type": "string"
- }
- },
{
"name": "id",
"required": true,
@@ -4784,36 +4784,113 @@
}
}
],
+ "requestBody": {
+ "required": true,
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/InsightRunDTO"
+ }
+ }
+ }
+ },
"responses": {
"200": {
"description": "",
"content": {
"application/json": {
"schema": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/TestSuiteTestVoice",
- "title": "Voice"
- },
- {
- "$ref": "#/components/schemas/TestSuiteTestChat",
- "title": "Chat"
- }
- ],
- "discriminator": {
- "propertyName": "type",
- "mapping": {
- "voice": "#/components/schemas/TestSuiteTestVoice",
- "chat": "#/components/schemas/TestSuiteTestChat"
- }
+ "$ref": "#/components/schemas/InsightRunResponse"
+ }
+ }
+ }
+ },
+ "201": {
+ "description": "",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/InsightRunResponse"
+ }
+ }
+ }
+ }
+ },
+ "tags": [
+ "Insight"
+ ],
+ "security": [
+ {
+ "bearer": []
+ }
+ ]
+ }
+ },
+ "/reporting/insight/preview": {
+ "post": {
+ "operationId": "InsightController_preview",
+ "summary": "Preview Insight",
+ "parameters": [],
+ "requestBody": {
+ "required": true,
+ "content": {
+ "application/json": {
+ "schema": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateBarInsightFromCallTableDTO",
+ "title": "CreateBarInsightFromCallTableDTO"
+ },
+ {
+ "$ref": "#/components/schemas/CreatePieInsightFromCallTableDTO",
+ "title": "CreatePieInsightFromCallTableDTO"
+ },
+ {
+ "$ref": "#/components/schemas/CreateLineInsightFromCallTableDTO",
+ "title": "CreateLineInsightFromCallTableDTO"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTextInsightFromCallTableDTO",
+ "title": "CreateTextInsightFromCallTableDTO"
}
+ ],
+ "discriminator": {
+ "propertyName": "type",
+ "mapping": {
+ "bar": "#/components/schemas/CreateBarInsightFromCallTableDTO",
+ "pie": "#/components/schemas/CreatePieInsightFromCallTableDTO",
+ "line": "#/components/schemas/CreateLineInsightFromCallTableDTO",
+ "text": "#/components/schemas/CreateTextInsightFromCallTableDTO"
+ }
+ }
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/InsightRunResponse"
+ }
+ }
+ }
+ },
+ "201": {
+ "description": "",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/InsightRunResponse"
}
}
}
}
},
"tags": [
- "Test Suite Tests"
+ "Insight"
],
"security": [
{
@@ -4822,15 +4899,50 @@
]
}
},
- "/test-suite/{testSuiteId}/run": {
+ "/eval": {
+ "post": {
+ "operationId": "EvalController_create",
+ "summary": "Create Eval",
+ "parameters": [],
+ "requestBody": {
+ "required": true,
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/CreateEvalDTO"
+ }
+ }
+ }
+ },
+ "responses": {
+ "201": {
+ "description": "",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/Eval"
+ }
+ }
+ }
+ }
+ },
+ "tags": [
+ "Eval"
+ ],
+ "security": [
+ {
+ "bearer": []
+ }
+ ]
+ },
"get": {
- "operationId": "TestSuiteRunController_findAllPaginated",
- "summary": "List Test Suite Runs",
+ "operationId": "EvalController_getPaginated",
+ "summary": "List Evals",
"parameters": [
{
- "name": "testSuiteId",
- "required": true,
- "in": "path",
+ "name": "id",
+ "required": false,
+ "in": "query",
"schema": {
"type": "string"
}
@@ -4956,27 +5068,29 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/TestSuiteRunsPaginatedResponse"
+ "$ref": "#/components/schemas/EvalPaginatedResponse"
}
}
}
}
},
"tags": [
- "Test Suite Runs"
+ "Eval"
],
"security": [
{
"bearer": []
}
]
- },
- "post": {
- "operationId": "TestSuiteRunController_create",
- "summary": "Create Test Suite Run",
+ }
+ },
+ "/eval/{id}": {
+ "patch": {
+ "operationId": "EvalController_update",
+ "summary": "Update Eval",
"parameters": [
{
- "name": "testSuiteId",
+ "name": "id",
"required": true,
"in": "path",
"schema": {
@@ -4989,46 +5103,36 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/CreateTestSuiteRunDto"
+ "$ref": "#/components/schemas/UpdateEvalDTO"
}
}
}
},
"responses": {
- "201": {
+ "200": {
"description": "",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/TestSuiteRun"
+ "$ref": "#/components/schemas/Eval"
}
}
}
}
},
"tags": [
- "Test Suite Runs"
+ "Eval"
],
"security": [
{
"bearer": []
}
]
- }
- },
- "/test-suite/{testSuiteId}/run/{id}": {
- "get": {
- "operationId": "TestSuiteRunController_findOne",
- "summary": "Get Test Suite Run",
+ },
+ "delete": {
+ "operationId": "EvalController_remove",
+ "summary": "Delete Eval",
"parameters": [
- {
- "name": "testSuiteId",
- "required": true,
- "in": "path",
- "schema": {
- "type": "string"
- }
- },
{
"name": "id",
"required": true,
@@ -5044,14 +5148,14 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/TestSuiteRun"
+ "$ref": "#/components/schemas/Eval"
}
}
}
}
},
"tags": [
- "Test Suite Runs"
+ "Eval"
],
"security": [
{
@@ -5059,18 +5163,10 @@
}
]
},
- "patch": {
- "operationId": "TestSuiteRunController_update",
- "summary": "Update Test Suite Run",
+ "get": {
+ "operationId": "EvalController_get",
+ "summary": "Get Eval",
"parameters": [
- {
- "name": "testSuiteId",
- "required": true,
- "in": "path",
- "schema": {
- "type": "string"
- }
- },
{
"name": "id",
"required": true,
@@ -5080,49 +5176,67 @@
}
}
],
- "requestBody": {
- "required": true,
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/UpdateTestSuiteRunDto"
- }
- }
- }
- },
"responses": {
"200": {
"description": "",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/TestSuiteRun"
+ "$ref": "#/components/schemas/Eval"
}
}
}
}
},
"tags": [
- "Test Suite Runs"
+ "Eval"
],
"security": [
{
"bearer": []
}
]
- },
+ }
+ },
+ "/eval/run/{id}": {
"delete": {
- "operationId": "TestSuiteRunController_remove",
- "summary": "Delete Test Suite Run",
+ "operationId": "EvalController_removeRun",
+ "summary": "Delete Eval Run",
"parameters": [
{
- "name": "testSuiteId",
+ "name": "id",
"required": true,
"in": "path",
"schema": {
"type": "string"
}
- },
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/EvalRun"
+ }
+ }
+ }
+ }
+ },
+ "tags": [
+ "Eval"
+ ],
+ "security": [
+ {
+ "bearer": []
+ }
+ ]
+ },
+ "get": {
+ "operationId": "EvalController_getRun",
+ "summary": "Get Eval Run",
+ "parameters": [
{
"name": "id",
"required": true,
@@ -5138,14 +5252,14 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/TestSuiteRun"
+ "$ref": "#/components/schemas/EvalRun"
}
}
}
}
},
"tags": [
- "Test Suite Runs"
+ "Eval"
],
"security": [
{
@@ -5154,120 +5268,53 @@
]
}
},
- "/analytics": {
+ "/eval/run": {
"post": {
- "operationId": "AnalyticsController_query",
- "summary": "Create Analytics Queries",
+ "operationId": "EvalController_run",
+ "summary": "Create Eval Run",
"parameters": [],
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/AnalyticsQueryDTO"
+ "$ref": "#/components/schemas/CreateEvalRunDTO"
}
}
}
},
"responses": {
"200": {
+ "description": ""
+ },
+ "201": {
"description": "",
"content": {
"application/json": {
"schema": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/AnalyticsQueryResult"
- }
+ "type": "object"
}
}
}
- },
- "201": {
- "description": ""
}
},
"tags": [
- "Analytics"
+ "Eval"
],
"security": [
{
"bearer": []
}
]
- }
- },
- "/logs": {
+ },
"get": {
- "operationId": "LoggingController_logsQuery",
- "summary": "Get Logs",
- "deprecated": true,
+ "operationId": "EvalController_getRunsPaginated",
+ "summary": "List Eval Runs",
"parameters": [
{
- "name": "type",
- "required": false,
- "in": "query",
- "description": "This is the type of the log.",
- "schema": {
- "enum": [
- "API",
- "Webhook",
- "Call",
- "Provider"
- ],
- "type": "string"
- }
- },
- {
- "name": "webhookType",
- "required": false,
- "in": "query",
- "description": "This is the type of the webhook, given the log is from a webhook.",
- "schema": {
- "type": "string"
- }
- },
- {
- "name": "assistantId",
- "required": false,
- "in": "query",
- "description": "This is the ID of the assistant.",
- "schema": {
- "type": "string"
- }
- },
- {
- "name": "phoneNumberId",
- "required": false,
- "in": "query",
- "description": "This is the ID of the phone number.",
- "schema": {
- "type": "string"
- }
- },
- {
- "name": "customerId",
- "required": false,
- "in": "query",
- "description": "This is the ID of the customer.",
- "schema": {
- "type": "string"
- }
- },
- {
- "name": "squadId",
- "required": false,
- "in": "query",
- "description": "This is the ID of the squad.",
- "schema": {
- "type": "string"
- }
- },
- {
- "name": "callId",
+ "name": "id",
"required": false,
"in": "query",
- "description": "This is the ID of the call.",
"schema": {
"type": "string"
}
@@ -5393,93 +5440,128 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/LogsPaginatedResponse"
+ "$ref": "#/components/schemas/EvalRunPaginatedResponse"
}
}
}
}
},
"tags": [
- "Logs"
+ "Eval"
],
"security": [
{
"bearer": []
}
]
- },
- "delete": {
- "operationId": "LoggingController_logsDeleteQuery",
- "summary": "Delete Logs",
- "deprecated": true,
+ }
+ },
+ "/observability/scorecard/{id}": {
+ "get": {
+ "operationId": "ScorecardController_get",
+ "summary": "Get Scorecard",
"parameters": [
{
- "name": "type",
- "required": false,
- "in": "query",
- "description": "This is the type of the log.",
+ "name": "id",
+ "required": true,
+ "in": "path",
"schema": {
- "enum": [
- "API",
- "Webhook",
- "Call",
- "Provider"
- ],
"type": "string"
}
- },
- {
- "name": "assistantId",
- "required": false,
- "in": "query",
- "schema": {
- "type": "string"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/Scorecard"
+ }
+ }
}
- },
+ }
+ },
+ "tags": [
+ "Observability/Scorecard"
+ ],
+ "security": [
{
- "name": "phoneNumberId",
- "required": false,
- "in": "query",
- "description": "This is the ID of the phone number.",
- "schema": {
- "type": "string"
- }
- },
+ "bearer": []
+ }
+ ]
+ },
+ "patch": {
+ "operationId": "ScorecardController_update",
+ "summary": "Update Scorecard",
+ "parameters": [
{
- "name": "customerId",
- "required": false,
- "in": "query",
- "description": "This is the ID of the customer.",
+ "name": "id",
+ "required": true,
+ "in": "path",
"schema": {
"type": "string"
}
- },
- {
- "name": "squadId",
- "required": false,
- "in": "query",
- "description": "This is the ID of the squad.",
- "schema": {
- "type": "string"
+ }
+ ],
+ "requestBody": {
+ "required": true,
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/UpdateScorecardDTO"
+ }
}
- },
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/Scorecard"
+ }
+ }
+ }
+ }
+ },
+ "tags": [
+ "Observability/Scorecard"
+ ],
+ "security": [
{
- "name": "callId",
- "required": false,
- "in": "query",
- "description": "This is the ID of the call.",
+ "bearer": []
+ }
+ ]
+ },
+ "delete": {
+ "operationId": "ScorecardController_remove",
+ "summary": "Delete Scorecard",
+ "parameters": [
+ {
+ "name": "id",
+ "required": true,
+ "in": "path",
"schema": {
"type": "string"
}
}
],
"responses": {
- "202": {
- "description": ""
+ "200": {
+ "description": "",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/Scorecard"
+ }
+ }
+ }
}
},
"tags": [
- "Logs"
+ "Observability/Scorecard"
],
"security": [
{
@@ -5488,25 +5570,15 @@
]
}
},
- "/structured-output": {
+ "/observability/scorecard": {
"get": {
- "operationId": "StructuredOutputController_findAll",
- "summary": "List Structured Outputs",
+ "operationId": "ScorecardController_getPaginated",
+ "summary": "List Scorecards",
"parameters": [
{
"name": "id",
"required": false,
"in": "query",
- "description": "This will return structured outputs where the id matches the specified value.",
- "schema": {
- "type": "string"
- }
- },
- {
- "name": "name",
- "required": false,
- "in": "query",
- "description": "This will return structured outputs where the name matches the specified value.",
"schema": {
"type": "string"
}
@@ -5632,14 +5704,14 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/StructuredOutputPaginatedResponse"
+ "$ref": "#/components/schemas/ScorecardPaginatedResponse"
}
}
}
}
},
"tags": [
- "Structured Outputs"
+ "Observability/Scorecard"
],
"security": [
{
@@ -5648,15 +5720,15 @@
]
},
"post": {
- "operationId": "StructuredOutputController_create",
- "summary": "Create Structured Output",
+ "operationId": "ScorecardController_create",
+ "summary": "Create Scorecard",
"parameters": [],
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/CreateStructuredOutputDTO"
+ "$ref": "#/components/schemas/CreateScorecardDTO"
}
}
}
@@ -5667,136 +5739,14 @@
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/StructuredOutput"
- }
- }
- }
- }
- },
- "tags": [
- "Structured Outputs"
- ],
- "security": [
- {
- "bearer": []
- }
- ]
- }
- },
- "/structured-output/{id}": {
- "get": {
- "operationId": "StructuredOutputController_findOne",
- "summary": "Get Structured Output",
- "parameters": [
- {
- "name": "id",
- "required": true,
- "in": "path",
- "schema": {
- "type": "string"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/StructuredOutput"
- }
- }
- }
- }
- },
- "tags": [
- "Structured Outputs"
- ],
- "security": [
- {
- "bearer": []
- }
- ]
- },
- "patch": {
- "operationId": "StructuredOutputController_update",
- "summary": "Update Structured Output",
- "parameters": [
- {
- "name": "id",
- "required": true,
- "in": "path",
- "schema": {
- "type": "string"
- }
- },
- {
- "name": "schemaOverride",
- "required": true,
- "in": "query",
- "schema": {
- "type": "string"
- }
- }
- ],
- "requestBody": {
- "required": true,
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/UpdateStructuredOutputDTO"
- }
- }
- }
- },
- "responses": {
- "200": {
- "description": "",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/StructuredOutput"
- }
- }
- }
- }
- },
- "tags": [
- "Structured Outputs"
- ],
- "security": [
- {
- "bearer": []
- }
- ]
- },
- "delete": {
- "operationId": "StructuredOutputController_remove",
- "summary": "Delete Structured Output",
- "parameters": [
- {
- "name": "id",
- "required": true,
- "in": "path",
- "schema": {
- "type": "string"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/StructuredOutput"
+ "$ref": "#/components/schemas/Scorecard"
}
}
}
}
},
"tags": [
- "Structured Outputs"
+ "Observability/Scorecard"
],
"security": [
{
@@ -5825,6 +5775,7 @@
"description": "The provider (e.g., 11labs)",
"schema": {
"enum": [
+ "cartesia",
"11labs"
],
"type": "string"
@@ -5875,6 +5826,7 @@
"description": "The provider (e.g., 11labs)",
"schema": {
"enum": [
+ "cartesia",
"11labs"
],
"type": "string"
@@ -6057,6 +6009,7 @@
"description": "The provider (e.g., 11labs)",
"schema": {
"enum": [
+ "cartesia",
"11labs"
],
"type": "string"
@@ -6119,6 +6072,7 @@
"description": "The provider (e.g., 11labs)",
"schema": {
"enum": [
+ "cartesia",
"11labs"
],
"type": "string"
@@ -6181,6 +6135,7 @@
"description": "The provider (e.g., 11labs)",
"schema": {
"enum": [
+ "cartesia",
"11labs"
],
"type": "string"
@@ -6232,6 +6187,49 @@
}
]
}
+ },
+ "/analytics": {
+ "post": {
+ "operationId": "AnalyticsController_query",
+ "summary": "Create Analytics Queries",
+ "parameters": [],
+ "requestBody": {
+ "required": true,
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/AnalyticsQueryDTO"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "",
+ "content": {
+ "application/json": {
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/AnalyticsQueryResult"
+ }
+ }
+ }
+ }
+ },
+ "201": {
+ "description": ""
+ }
+ },
+ "tags": [
+ "Analytics"
+ ],
+ "security": [
+ {
+ "bearer": []
+ }
+ ]
+ }
}
},
"info": {
@@ -6256,495 +6254,6 @@
}
},
"schemas": {
- "AnalysisCostBreakdown": {
- "type": "object",
- "properties": {
- "summary": {
- "type": "number",
- "description": "This is the cost to summarize the call."
- },
- "summaryPromptTokens": {
- "type": "number",
- "description": "This is the number of prompt tokens used to summarize the call."
- },
- "summaryCompletionTokens": {
- "type": "number",
- "description": "This is the number of completion tokens used to summarize the call."
- },
- "structuredData": {
- "type": "number",
- "description": "This is the cost to extract structured data from the call."
- },
- "structuredDataPromptTokens": {
- "type": "number",
- "description": "This is the number of prompt tokens used to extract structured data from the call."
- },
- "structuredDataCompletionTokens": {
- "type": "number",
- "description": "This is the number of completion tokens used to extract structured data from the call."
- },
- "successEvaluation": {
- "type": "number",
- "description": "This is the cost to evaluate if the call was successful."
- },
- "successEvaluationPromptTokens": {
- "type": "number",
- "description": "This is the number of prompt tokens used to evaluate if the call was successful."
- },
- "successEvaluationCompletionTokens": {
- "type": "number",
- "description": "This is the number of completion tokens used to evaluate if the call was successful."
- },
- "structuredOutput": {
- "type": "number",
- "description": "This is the cost to evaluate structuredOutputs from the call."
- },
- "structuredOutputPromptTokens": {
- "type": "number",
- "description": "This is the number of prompt tokens used to evaluate structuredOutputs from the call."
- },
- "structuredOutputCompletionTokens": {
- "type": "number",
- "description": "This is the number of completion tokens used to evaluate structuredOutputs from the call."
- }
- }
- },
- "CostBreakdown": {
- "type": "object",
- "properties": {
- "transport": {
- "type": "number",
- "description": "This is the cost of the transport provider, like Twilio or Vonage."
- },
- "stt": {
- "type": "number",
- "description": "This is the cost of the speech-to-text service."
- },
- "llm": {
- "type": "number",
- "description": "This is the cost of the language model."
- },
- "tts": {
- "type": "number",
- "description": "This is the cost of the text-to-speech service."
- },
- "vapi": {
- "type": "number",
- "description": "This is the cost of Vapi."
- },
- "chat": {
- "type": "number",
- "description": "This is the cost of chat interactions."
- },
- "total": {
- "type": "number",
- "description": "This is the total cost of the call."
- },
- "llmPromptTokens": {
- "type": "number",
- "description": "This is the LLM prompt tokens used for the call."
- },
- "llmCompletionTokens": {
- "type": "number",
- "description": "This is the LLM completion tokens used for the call."
- },
- "ttsCharacters": {
- "type": "number",
- "description": "This is the TTS characters used for the call."
- },
- "analysisCostBreakdown": {
- "description": "This is the cost of the analysis.",
- "allOf": [
- {
- "$ref": "#/components/schemas/AnalysisCostBreakdown"
- }
- ]
- }
- }
- },
- "TranscriptPlan": {
- "type": "object",
- "properties": {
- "enabled": {
- "type": "boolean",
- "description": "This determines whether the transcript is stored in `call.artifact.transcript`. Defaults to true.\n\n@default true",
- "example": true
- },
- "assistantName": {
- "type": "string",
- "description": "This is the name of the assistant in the transcript. Defaults to 'AI'.\n\nUsage:\n- If you want to change the name of the assistant in the transcript, set this. Example, here is what the transcript would look like with `assistantName` set to 'Buyer':\n```\nUser: Hello, how are you?\nBuyer: I'm fine.\nUser: Do you want to buy a car?\nBuyer: No.\n```\n\n@default 'AI'"
- },
- "userName": {
- "type": "string",
- "description": "This is the name of the user in the transcript. Defaults to 'User'.\n\nUsage:\n- If you want to change the name of the user in the transcript, set this. Example, here is what the transcript would look like with `userName` set to 'Seller':\n```\nSeller: Hello, how are you?\nAI: I'm fine.\nSeller: Do you want to buy a car?\nAI: No.\n```\n\n@default 'User'"
- }
- }
- },
- "ArtifactPlan": {
- "type": "object",
- "properties": {
- "recordingEnabled": {
- "type": "boolean",
- "description": "This determines whether assistant's calls are recorded. Defaults to true.\n\nUsage:\n- If you don't want to record the calls, set this to false.\n- If you want to record the calls when `assistant.hipaaEnabled` (deprecated) or `assistant.compliancePlan.hipaaEnabled` explicity set this to true and make sure to provide S3 or GCP credentials on the Provider Credentials page in the Dashboard.\n\nYou can find the recording at `call.artifact.recordingUrl` and `call.artifact.stereoRecordingUrl` after the call is ended.\n\n@default true",
- "example": true
- },
- "recordingFormat": {
- "type": "string",
- "description": "This determines the format of the recording. Defaults to `wav;l16`.\n\n@default 'wav;l16'",
- "enum": [
- "wav;l16",
- "mp3"
- ]
- },
- "videoRecordingEnabled": {
- "type": "boolean",
- "description": "This determines whether the video is recorded during the call. Defaults to false. Only relevant for `webCall` type.\n\nYou can find the video recording at `call.artifact.videoRecordingUrl` after the call is ended.\n\n@default false",
- "example": false
- },
- "pcapEnabled": {
- "type": "boolean",
- "description": "This determines whether the SIP packet capture is enabled. Defaults to true. Only relevant for `phone` type calls where phone number's provider is `vapi` or `byo-phone-number`.\n\nYou can find the packet capture at `call.artifact.pcapUrl` after the call is ended.\n\n@default true",
- "example": true
- },
- "pcapS3PathPrefix": {
- "type": "string",
- "description": "This is the path where the SIP packet capture will be uploaded. This is only used if you have provided S3 or GCP credentials on the Provider Credentials page in the Dashboard.\n\nIf credential.s3PathPrefix or credential.bucketPlan.path is set, this will append to it.\n\nUsage:\n- If you want to upload the packet capture to a specific path, set this to the path. Example: `/my-assistant-captures`.\n- If you want to upload the packet capture to the root of the bucket, set this to `/`.\n\n@default '/'",
- "example": "/pcaps"
- },
- "loggingEnabled": {
- "type": "boolean",
- "description": "This determines whether the call logs are enabled. Defaults to true.\n\n@default true",
- "example": true
- },
- "transcriptPlan": {
- "description": "This is the plan for `call.artifact.transcript`. To disable, set `transcriptPlan.enabled` to false.",
- "allOf": [
- {
- "$ref": "#/components/schemas/TranscriptPlan"
- }
- ]
- },
- "recordingPath": {
- "type": "string",
- "description": "This is the path where the recording will be uploaded. This is only used if you have provided S3 or GCP credentials on the Provider Credentials page in the Dashboard.\n\nIf credential.s3PathPrefix or credential.bucketPlan.path is set, this will append to it.\n\nUsage:\n- If you want to upload the recording to a specific path, set this to the path. Example: `/my-assistant-recordings`.\n- If you want to upload the recording to the root of the bucket, set this to `/`.\n\n@default '/'"
- },
- "structuredOutputIds": {
- "description": "This is an array of structured output IDs to be calculated during the call.\nThe outputs will be extracted and stored in `call.artifact.structuredOutputs` after the call is ended.",
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "loggingPath": {
- "type": "string",
- "description": "This is the path where the call logs will be uploaded. This is only used if you have provided S3 or GCP credentials on the Provider Credentials page in the Dashboard.\n\nIf credential.s3PathPrefix or credential.bucketPlan.path is set, this will append to it.\n\nUsage:\n- If you want to upload the call logs to a specific path, set this to the path. Example: `/my-assistant-logs`.\n- If you want to upload the call logs to the root of the bucket, set this to `/`.\n\n@default '/'"
- }
- }
- },
- "Analysis": {
- "type": "object",
- "properties": {
- "summary": {
- "type": "string",
- "description": "This is the summary of the call. Customize by setting `assistant.analysisPlan.summaryPrompt`."
- },
- "structuredData": {
- "type": "object",
- "description": "This is the structured data extracted from the call. Customize by setting `assistant.analysisPlan.structuredDataPrompt` and/or `assistant.analysisPlan.structuredDataSchema`."
- },
- "structuredDataMulti": {
- "description": "This is the structured data catalog of the call. Customize by setting `assistant.analysisPlan.structuredDataMultiPlan`.",
- "type": "array",
- "items": {
- "type": "object"
- }
- },
- "successEvaluation": {
- "type": "string",
- "description": "This is the evaluation of the call. Customize by setting `assistant.analysisPlan.successEvaluationPrompt` and/or `assistant.analysisPlan.successEvaluationRubric`."
- }
- }
- },
- "Monitor": {
- "type": "object",
- "properties": {
- "listenUrl": {
- "type": "string",
- "description": "This is the URL where the assistant's calls can be listened to in real-time. To enable, set `assistant.monitorPlan.listenEnabled` to `true`."
- },
- "controlUrl": {
- "type": "string",
- "description": "This is the URL where the assistant's calls can be controlled in real-time. To enable, set `assistant.monitorPlan.controlEnabled` to `true`."
- }
- }
- },
- "OpenAIMessage": {
- "type": "object",
- "properties": {
- "content": {
- "type": "string",
- "nullable": true,
- "maxLength": 100000000
- },
- "role": {
- "type": "string",
- "enum": [
- "assistant",
- "function",
- "user",
- "system",
- "tool"
- ]
- }
- },
- "required": [
- "content",
- "role"
- ]
- },
- "Mono": {
- "type": "object",
- "properties": {
- "combinedUrl": {
- "type": "string",
- "description": "This is the combined recording url for the call. To enable, set `assistant.artifactPlan.recordingEnabled`."
- },
- "assistantUrl": {
- "type": "string",
- "description": "This is the mono recording url for the assistant. To enable, set `assistant.artifactPlan.recordingEnabled`."
- },
- "customerUrl": {
- "type": "string",
- "description": "This is the mono recording url for the customer. To enable, set `assistant.artifactPlan.recordingEnabled`."
- }
- }
- },
- "Recording": {
- "type": "object",
- "properties": {
- "stereoUrl": {
- "type": "string",
- "description": "This is the stereo recording url for the call. To enable, set `assistant.artifactPlan.recordingEnabled`."
- },
- "videoUrl": {
- "type": "string",
- "description": "This is the video recording url for the call. To enable, set `assistant.artifactPlan.videoRecordingEnabled`."
- },
- "videoRecordingStartDelaySeconds": {
- "type": "number",
- "description": "This is video recording start delay in ms. To enable, set `assistant.artifactPlan.videoRecordingEnabled`. This can be used to align the playback of the recording with artifact.messages timestamps."
- },
- "mono": {
- "description": "This is the mono recording url for the call. To enable, set `assistant.artifactPlan.recordingEnabled`.",
- "allOf": [
- {
- "$ref": "#/components/schemas/Mono"
- }
- ]
- }
- }
- },
- "NodeArtifact": {
- "type": "object",
- "properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that were spoken during the node.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/UserMessage",
- "title": "UserMessage"
- },
- {
- "$ref": "#/components/schemas/SystemMessage",
- "title": "SystemMessage"
- },
- {
- "$ref": "#/components/schemas/BotMessage",
- "title": "BotMessage"
- },
- {
- "$ref": "#/components/schemas/ToolCallMessage",
- "title": "ToolCallMessage"
- },
- {
- "$ref": "#/components/schemas/ToolCallResultMessage",
- "title": "ToolCallResultMessage"
- }
- ]
- }
- },
- "nodeName": {
- "type": "string",
- "description": "This is the node name."
- },
- "variableValues": {
- "type": "object",
- "description": "These are the variable values that were extracted from the node."
- }
- }
- },
- "TurnLatency": {
- "type": "object",
- "properties": {
- "modelLatency": {
- "type": "number",
- "description": "This is the model latency for the first token."
- },
- "voiceLatency": {
- "type": "number",
- "description": "This is the voice latency from the model output."
- },
- "transcriberLatency": {
- "type": "number",
- "description": "This is the transcriber latency from the user speech."
- },
- "endpointingLatency": {
- "type": "number",
- "description": "This is the endpointing latency."
- },
- "turnLatency": {
- "type": "number",
- "description": "This is the latency for the whole turn."
- }
- }
- },
- "PerformanceMetrics": {
- "type": "object",
- "properties": {
- "turnLatencies": {
- "description": "These are the individual latencies for each turn.",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/TurnLatency"
- }
- },
- "modelLatencyAverage": {
- "type": "number",
- "description": "This is the average latency for the model to output the first token."
- },
- "voiceLatencyAverage": {
- "type": "number",
- "description": "This is the average latency for the text to speech."
- },
- "transcriberLatencyAverage": {
- "type": "number",
- "description": "This is the average latency for the transcriber."
- },
- "endpointingLatencyAverage": {
- "type": "number",
- "description": "This is the average latency for the endpointing."
- },
- "turnLatencyAverage": {
- "type": "number",
- "description": "This is the average latency for complete turns."
- }
- }
- },
- "Artifact": {
- "type": "object",
- "properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that were spoken during the call.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/UserMessage",
- "title": "UserMessage"
- },
- {
- "$ref": "#/components/schemas/SystemMessage",
- "title": "SystemMessage"
- },
- {
- "$ref": "#/components/schemas/BotMessage",
- "title": "BotMessage"
- },
- {
- "$ref": "#/components/schemas/ToolCallMessage",
- "title": "ToolCallMessage"
- },
- {
- "$ref": "#/components/schemas/ToolCallResultMessage",
- "title": "ToolCallResultMessage"
- }
- ]
- }
- },
- "messagesOpenAIFormatted": {
- "description": "These are the messages that were spoken during the call, formatted for OpenAI.",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/OpenAIMessage"
- }
- },
- "recordingUrl": {
- "type": "string",
- "description": "This is the recording url for the call. To enable, set `assistant.artifactPlan.recordingEnabled`.",
- "deprecated": true
- },
- "stereoRecordingUrl": {
- "type": "string",
- "description": "This is the stereo recording url for the call. To enable, set `assistant.artifactPlan.recordingEnabled`.",
- "deprecated": true
- },
- "videoRecordingUrl": {
- "type": "string",
- "description": "This is video recording url for the call. To enable, set `assistant.artifactPlan.videoRecordingEnabled`.",
- "deprecated": true
- },
- "videoRecordingStartDelaySeconds": {
- "type": "number",
- "description": "This is video recording start delay in ms. To enable, set `assistant.artifactPlan.videoRecordingEnabled`. This can be used to align the playback of the recording with artifact.messages timestamps.",
- "deprecated": true
- },
- "recording": {
- "description": "This is the recording url for the call. To enable, set `assistant.artifactPlan.recordingEnabled`.",
- "allOf": [
- {
- "$ref": "#/components/schemas/Recording"
- }
- ]
- },
- "transcript": {
- "type": "string",
- "description": "This is the transcript of the call. This is derived from `artifact.messages` but provided for convenience."
- },
- "pcapUrl": {
- "type": "string",
- "description": "This is the packet capture url for the call. This is only available for `phone` type calls where phone number's provider is `vapi` or `byo-phone-number`."
- },
- "logUrl": {
- "type": "string",
- "description": "This is the url for the call logs. This includes all logging output during the call for debugging purposes."
- },
- "nodes": {
- "description": "This is the history of workflow nodes that were executed during the call.",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/NodeArtifact"
- }
- },
- "variableValues": {
- "type": "object",
- "description": "These are the variable values at the end of the workflow execution."
- },
- "performanceMetrics": {
- "description": "This is the performance metrics for the call. It contains the turn latency, broken down by component.",
- "allOf": [
- {
- "$ref": "#/components/schemas/PerformanceMetrics"
- }
- ]
- },
- "structuredOutputs": {
- "type": "object",
- "description": "These are the structured outputs that will be extracted from the call.\nTo enable, set `assistant.artifactPlan.structuredOutputIds` with the IDs of the structured outputs you want to extract."
- }
- }
- },
"FallbackTranscriberPlan": {
"type": "object",
"properties": {
@@ -6795,6 +6304,10 @@
{
"$ref": "#/components/schemas/FallbackCartesiaTranscriber",
"title": "Cartesia"
+ },
+ {
+ "$ref": "#/components/schemas/FallbackSonioxTranscriber",
+ "title": "Soniox"
}
]
}
@@ -6818,6 +6331,7 @@
"type": "string",
"description": "This is the language that will be set for the transcription.",
"enum": [
+ "multi",
"en"
]
},
@@ -6835,29 +6349,42 @@
},
"endOfTurnConfidenceThreshold": {
"type": "number",
- "description": "This is the end of turn confidence threshold. The minimum confidence that the end of turn is detected.\n\n@min 0\n@max 1\n@default 0.7",
+ "description": "This is the end of turn confidence threshold. The minimum confidence that the end of turn is detected.\nNote: Only used if startSpeakingPlan.smartEndpointingPlan is not set.\n@min 0\n@max 1\n@default 0.7",
"minimum": 0,
"maximum": 1,
"example": 0.7
},
"minEndOfTurnSilenceWhenConfident": {
"type": "number",
- "description": "This is the minimum end of turn silence when confident in milliseconds.\n\n@default 160",
+ "description": "This is the minimum end of turn silence when confident in milliseconds.\nNote: Only used if startSpeakingPlan.smartEndpointingPlan is not set.\n@default 160",
"minimum": 0,
"example": 160
},
"wordFinalizationMaxWaitTime": {
"type": "number",
- "description": "This is the maximum wait time for word finalization in milliseconds.\n\n@default 160",
+ "deprecated": true,
"minimum": 0,
"example": 160
},
"maxTurnSilence": {
"type": "number",
- "description": "This is the maximum turn silence time in milliseconds.\n\n@default 400",
+ "description": "This is the maximum turn silence time in milliseconds.\nNote: Only used if startSpeakingPlan.smartEndpointingPlan is not set.\n@default 400",
"minimum": 0,
"example": 400
},
+ "vadAssistedEndpointingEnabled": {
+ "type": "boolean",
+ "description": "Use VAD to assist with endpointing decisions from the transcriber.\nWhen enabled, transcriber endpointing will be buffered if VAD detects the user is still speaking, preventing premature turn-taking.\nWhen disabled, transcriber endpointing will be used immediately regardless of VAD state, allowing for quicker but more aggressive turn-taking.\nNote: Only used if startSpeakingPlan.smartEndpointingPlan is not set.\n\n@default true",
+ "example": true
+ },
+ "speechModel": {
+ "type": "string",
+ "description": "This is the speech model used for the streaming session.\nNote: Keyterms prompting is not supported with multilingual streaming.\n@default 'universal-streaming-english'",
+ "enum": [
+ "universal-streaming-english",
+ "universal-streaming-multilingual"
+ ]
+ },
"realtimeUrl": {
"type": "string",
"description": "The WebSocket URL that the transcriber connects to."
@@ -6870,6 +6397,14 @@
"maxLength": 2500
}
},
+ "keytermsPrompt": {
+ "description": "Keyterms prompting improves recognition accuracy for specific words and phrases.\nCan include up to 100 keyterms, each up to 50 characters.\nCosts an additional $0.04/hour when enabled.",
+ "type": "array",
+ "items": {
+ "type": "string",
+ "maxLength": 50
+ }
+ },
"endUtteranceSilenceThreshold": {
"type": "number",
"description": "The duration of the end utterance silence threshold in milliseconds."
@@ -6879,7 +6414,7 @@
"description": "Disable partial transcripts.\nSet to `true` to not receive partial transcripts. Defaults to `false`."
},
"fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "description": "This is the plan for transcriber provider fallbacks in the event that the primary transcriber provider fails.",
"allOf": [
{
"$ref": "#/components/schemas/FallbackTranscriberPlan"
@@ -7072,7 +6607,7 @@
"maximum": 70000
},
"fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "description": "This is the plan for transcriber provider fallbacks in the event that the primary transcriber provider fails.",
"allOf": [
{
"$ref": "#/components/schemas/FallbackTranscriberPlan"
@@ -7290,7 +6825,7 @@
]
},
"fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "description": "This is the plan for transcriber provider fallbacks in the event that the primary transcriber provider fails.",
"allOf": [
{
"$ref": "#/components/schemas/FallbackTranscriberPlan"
@@ -7327,10 +6862,23 @@
"minimum": 0,
"maximum": 10,
"example": 1
- }
- },
- "required": [
- "type",
+ },
+ "excludedStatusCodes": {
+ "description": "This is the excluded status codes. If the response status code is in this list, the request will not be retried.\nBy default, the request will be retried for any non-2xx status code.",
+ "example": [
+ 400,
+ 401,
+ 403,
+ 404
+ ],
+ "type": "array",
+ "items": {
+ "type": "object"
+ }
+ }
+ },
+ "required": [
+ "type",
"maxRetries",
"baseDelaySeconds"
]
@@ -7345,13 +6893,30 @@
"maximum": 300,
"example": 20
},
+ "credentialId": {
+ "type": "string",
+ "description": "The credential ID for server authentication",
+ "example": "550e8400-e29b-41d4-a716-446655440000"
+ },
+ "staticIpAddressesEnabled": {
+ "type": "boolean",
+ "description": "If enabled, requests will originate from a static set of IPs owned and managed by Vapi.\n\n@default false",
+ "example": false
+ },
+ "encryptedPaths": {
+ "type": "array",
+ "description": "This is the paths to encrypt in the request body if credentialId and encryptionPlan are defined.",
+ "items": {
+ "type": "string"
+ }
+ },
"url": {
"type": "string",
"description": "This is where the request will be sent."
},
"headers": {
"type": "object",
- "description": "These are the headers to include in the request.\n\nEach key-value pair represents a header name and its value."
+ "description": "These are the headers to include in the request.\n\nEach key-value pair represents a header name and its value.\n\nNote: Specifying an Authorization header here will override the authorization provided by the `credentialId` (if provided). This is an anti-pattern and should be avoided outside of edge case scenarios."
},
"backoffPlan": {
"description": "This is the backoff plan if the request fails. Defaults to undefined (the request will not be retried).\n\n@default undefined (the request will not be retried)",
@@ -7382,7 +6947,7 @@
]
},
"fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "description": "This is the plan for transcriber provider fallbacks in the event that the primary transcriber provider fails.",
"allOf": [
{
"$ref": "#/components/schemas/FallbackTranscriberPlan"
@@ -7442,7 +7007,8 @@
"base-conversationalai",
"base-voicemail",
"base-video",
- "whisper"
+ "whisper",
+ "flux-general-en"
]
},
{
@@ -7479,6 +7045,7 @@
"es-LATAM",
"et",
"eu",
+ "fa",
"fi",
"fr",
"fr-CA",
@@ -7508,6 +7075,7 @@
"pl",
"pt",
"pt-BR",
+ "pt-PT",
"ro",
"ru",
"sk",
@@ -7552,6 +7120,11 @@
"description": "If set to true, this will cause deepgram to convert spoken numbers to literal numerals. For example, \"my phone number is nine-seven-two...\" would become \"my phone number is 972...\"\n\n@default false",
"example": false
},
+ "profanityFilter": {
+ "type": "boolean",
+ "description": "If set to true, Deepgram will replace profanity in transcripts with surrounding asterisks, e.g. \"f***\".\n\n@default false",
+ "example": false
+ },
"confidenceThreshold": {
"type": "number",
"description": "Transcripts below this confidence threshold will be discarded.\n\n@default 0.4",
@@ -7559,6 +7132,27 @@
"maximum": 1,
"example": 0.4
},
+ "eagerEotThreshold": {
+ "type": "number",
+ "description": "Eager end-of-turn confidence required to fire a eager end-of-turn event. Setting a value here will enable EagerEndOfTurn and SpeechResumed events. It is disabled by default. Only used with Flux models.",
+ "minimum": 0,
+ "maximum": 1,
+ "example": 0.3
+ },
+ "eotThreshold": {
+ "type": "number",
+ "description": "End-of-turn confidence required to finish a turn. Only used with Flux models.\n\n@default 0.7",
+ "minimum": 0.5,
+ "maximum": 0.9,
+ "example": 0.7
+ },
+ "eotTimeoutMs": {
+ "type": "number",
+ "description": "A turn will be finished when this much time has passed after speech, regardless of EOT confidence. Only used with Flux models.\n\n@default 5000",
+ "minimum": 500,
+ "maximum": 10000,
+ "example": 5000
+ },
"keywords": {
"description": "These keywords are passed to the transcription model to help it pick up use-case specific words. Anything that may not be a common word, like your company name, should be added here.",
"type": "array",
@@ -7581,7 +7175,7 @@
"maximum": 500
},
"fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "description": "This is the plan for transcriber provider fallbacks in the event that the primary transcriber provider fails.",
"allOf": [
{
"$ref": "#/components/schemas/FallbackTranscriberPlan"
@@ -7604,14 +7198,20 @@
]
},
"model": {
- "type": "string",
"description": "This is the model that will be used for the transcription.",
- "enum": [
- "scribe_v1"
+ "oneOf": [
+ {
+ "enum": [
+ "scribe_v1",
+ "scribe_v2",
+ "scribe_v2_realtime"
+ ]
+ }
]
},
"language": {
"type": "string",
+ "description": "This is the language that will be used for the transcription.",
"enum": [
"aa",
"ab",
@@ -7800,8 +7400,36 @@
"zu"
]
},
+ "silenceThresholdSeconds": {
+ "type": "number",
+ "description": "This is the number of seconds of silence before VAD commits (0.3-3.0).",
+ "minimum": 0.3,
+ "maximum": 3,
+ "example": 1.5
+ },
+ "confidenceThreshold": {
+ "type": "number",
+ "description": "This is the VAD sensitivity (0.1-0.9, lower indicates more sensitive).",
+ "minimum": 0.1,
+ "maximum": 0.9,
+ "example": 0.4
+ },
+ "minSpeechDurationMs": {
+ "type": "number",
+ "description": "This is the minimum speech duration for VAD (50-2000ms).",
+ "minimum": 50,
+ "maximum": 2000,
+ "example": 100
+ },
+ "minSilenceDurationMs": {
+ "type": "number",
+ "description": "This is the minimum silence duration for VAD (50-2000ms).",
+ "minimum": 50,
+ "maximum": 2000,
+ "example": 100
+ },
"fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "description": "This is the plan for transcriber provider fallbacks in the event that the primary transcriber provider fails.",
"allOf": [
{
"$ref": "#/components/schemas/FallbackTranscriberPlan"
@@ -8137,8 +7765,259 @@
}
]
},
+ "region": {
+ "type": "string",
+ "enum": [
+ "us-west",
+ "eu-west"
+ ],
+ "description": "Region for processing audio (us-west or eu-west)",
+ "example": "us-west"
+ },
+ "receivePartialTranscripts": {
+ "type": "boolean",
+ "example": false,
+ "description": "Enable partial transcripts for low-latency streaming transcription"
+ },
"fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "description": "This is the plan for transcriber provider fallbacks in the event that the primary transcriber provider fails.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/FallbackTranscriberPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "provider"
+ ]
+ },
+ "SonioxTranscriber": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "soniox"
+ ]
+ },
+ "model": {
+ "type": "string",
+ "enum": [
+ "stt-rt-v4"
+ ],
+ "description": "The Soniox model to use for transcription."
+ },
+ "language": {
+ "type": "string",
+ "enum": [
+ "aa",
+ "ab",
+ "ae",
+ "af",
+ "ak",
+ "am",
+ "an",
+ "ar",
+ "as",
+ "av",
+ "ay",
+ "az",
+ "ba",
+ "be",
+ "bg",
+ "bh",
+ "bi",
+ "bm",
+ "bn",
+ "bo",
+ "br",
+ "bs",
+ "ca",
+ "ce",
+ "ch",
+ "co",
+ "cr",
+ "cs",
+ "cu",
+ "cv",
+ "cy",
+ "da",
+ "de",
+ "dv",
+ "dz",
+ "ee",
+ "el",
+ "en",
+ "eo",
+ "es",
+ "et",
+ "eu",
+ "fa",
+ "ff",
+ "fi",
+ "fj",
+ "fo",
+ "fr",
+ "fy",
+ "ga",
+ "gd",
+ "gl",
+ "gn",
+ "gu",
+ "gv",
+ "ha",
+ "he",
+ "hi",
+ "ho",
+ "hr",
+ "ht",
+ "hu",
+ "hy",
+ "hz",
+ "ia",
+ "id",
+ "ie",
+ "ig",
+ "ii",
+ "ik",
+ "io",
+ "is",
+ "it",
+ "iu",
+ "ja",
+ "jv",
+ "ka",
+ "kg",
+ "ki",
+ "kj",
+ "kk",
+ "kl",
+ "km",
+ "kn",
+ "ko",
+ "kr",
+ "ks",
+ "ku",
+ "kv",
+ "kw",
+ "ky",
+ "la",
+ "lb",
+ "lg",
+ "li",
+ "ln",
+ "lo",
+ "lt",
+ "lu",
+ "lv",
+ "mg",
+ "mh",
+ "mi",
+ "mk",
+ "ml",
+ "mn",
+ "mr",
+ "ms",
+ "mt",
+ "my",
+ "na",
+ "nb",
+ "nd",
+ "ne",
+ "ng",
+ "nl",
+ "nn",
+ "no",
+ "nr",
+ "nv",
+ "ny",
+ "oc",
+ "oj",
+ "om",
+ "or",
+ "os",
+ "pa",
+ "pi",
+ "pl",
+ "ps",
+ "pt",
+ "qu",
+ "rm",
+ "rn",
+ "ro",
+ "ru",
+ "rw",
+ "sa",
+ "sc",
+ "sd",
+ "se",
+ "sg",
+ "si",
+ "sk",
+ "sl",
+ "sm",
+ "sn",
+ "so",
+ "sq",
+ "sr",
+ "ss",
+ "st",
+ "su",
+ "sv",
+ "sw",
+ "ta",
+ "te",
+ "tg",
+ "th",
+ "ti",
+ "tk",
+ "tl",
+ "tn",
+ "to",
+ "tr",
+ "ts",
+ "tt",
+ "tw",
+ "ty",
+ "ug",
+ "uk",
+ "ur",
+ "uz",
+ "ve",
+ "vi",
+ "vo",
+ "wa",
+ "wo",
+ "xh",
+ "yi",
+ "yue",
+ "yo",
+ "za",
+ "zh",
+ "zu"
+ ],
+ "description": "The language for transcription. Uses ISO 639-1 codes. Soniox supports 60+ languages with a single universal model."
+ },
+ "languageHintsStrict": {
+ "type": "boolean",
+ "description": "When enabled, restricts transcription to the language specified in the language field. When disabled, the model can detect and transcribe any of 60+ supported languages. Defaults to true."
+ },
+ "maxEndpointDelayMs": {
+ "type": "number",
+ "minimum": 500,
+ "maximum": 3000,
+ "description": "Maximum delay in milliseconds between when the speaker stops and when the endpoint is detected. Lower values mean faster turn-taking but more false endpoints. Range: 500-3000. Default: 500."
+ },
+ "customVocabulary": {
+ "description": "Custom vocabulary terms to boost recognition accuracy. Useful for brand names, product names, and domain-specific terminology. Maps to Soniox context.terms.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "fallbackPlan": {
+ "description": "This is the plan for transcriber provider fallbacks in the event that the primary transcriber provider fails.",
"allOf": [
{
"$ref": "#/components/schemas/FallbackTranscriberPlan"
@@ -8150,6 +8029,30 @@
"provider"
]
},
+ "SpeechmaticsCustomVocabularyItem": {
+ "type": "object",
+ "properties": {
+ "content": {
+ "type": "string",
+ "description": "The word or phrase to add to the custom vocabulary.",
+ "minLength": 1,
+ "example": "Speechmatics"
+ },
+ "soundsLike": {
+ "description": "Alternative phonetic representations of how the word might sound. This helps recognition when the word might be pronounced differently.",
+ "example": [
+ "speech mattix"
+ ],
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ },
+ "required": [
+ "content"
+ ]
+ },
"SpeechmaticsTranscriber": {
"type": "object",
"properties": {
@@ -8172,6 +8075,7 @@
"enum": [
"auto",
"ar",
+ "ar_en",
"ba",
"eu",
"be",
@@ -8203,8 +8107,10 @@
"lv",
"lt",
"ms",
+ "en_ms",
"mt",
"cmn",
+ "cmn_en",
"mr",
"mn",
"no",
@@ -8216,9 +8122,12 @@
"sk",
"sl",
"es",
+ "en_es",
"sw",
"sv",
+ "tl",
"ta",
+ "en_ta",
"th",
"tr",
"uk",
@@ -8228,8 +8137,88 @@
"cy"
]
},
+ "operatingPoint": {
+ "type": "string",
+ "description": "This is the operating point for the transcription. Choose between `standard` for faster turnaround with strong accuracy or `enhanced` for highest accuracy when precision is critical.\n\n@default 'enhanced'",
+ "example": "enhanced",
+ "enum": [
+ "standard",
+ "enhanced"
+ ],
+ "default": "enhanced"
+ },
+ "region": {
+ "type": "string",
+ "description": "This is the region for the Speechmatics API. Choose between EU (Europe) and US (United States) regions for lower latency and data sovereignty compliance.\n\n@default 'eu'",
+ "example": "us",
+ "enum": [
+ "eu",
+ "us"
+ ],
+ "default": "eu"
+ },
+ "enableDiarization": {
+ "type": "boolean",
+ "description": "This enables speaker diarization, which identifies and separates speakers in the transcription. Essential for multi-speaker conversations and conference calls.\n\n@default false",
+ "example": true,
+ "default": false
+ },
+ "maxDelay": {
+ "type": "number",
+ "description": "This sets the maximum delay in milliseconds for partial transcripts. Balances latency and accuracy.\n\n@default 3000",
+ "example": 1500,
+ "minimum": 500,
+ "maximum": 10000,
+ "default": 3000
+ },
+ "customVocabulary": {
+ "example": [
+ {
+ "content": "Speechmatics",
+ "soundsLike": [
+ "speech mattix"
+ ]
+ }
+ ],
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/SpeechmaticsCustomVocabularyItem"
+ }
+ },
+ "numeralStyle": {
+ "type": "string",
+ "description": "This controls how numbers, dates, currencies, and other entities are formatted in the transcription output.\n\n@default 'written'",
+ "example": "spoken",
+ "enum": [
+ "written",
+ "spoken"
+ ],
+ "default": "written"
+ },
+ "endOfTurnSensitivity": {
+ "type": "number",
+ "description": "This is the sensitivity level for end-of-turn detection, which determines when a speaker has finished talking. Higher values are more sensitive.\n\n@default 0.5",
+ "example": 0.8,
+ "minimum": 0,
+ "maximum": 1,
+ "default": 0.5
+ },
+ "removeDisfluencies": {
+ "type": "boolean",
+ "description": "This enables removal of disfluencies (um, uh) from the transcript to create cleaner, more professional output.\n\nThis is only supported for the English language transcriber.\n\n@default false",
+ "example": true,
+ "default": false
+ },
+ "minimumSpeechDuration": {
+ "type": "number",
+ "description": "This is the minimum duration in seconds for speech segments. Shorter segments will be filtered out. Helps remove noise and improve accuracy.\n\n@default 0.0",
+ "example": 0.2,
+ "minimum": 0,
+ "maximum": 5,
+ "default": 0
+ },
"fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "description": "This is the plan for transcriber provider fallbacks in the event that the primary transcriber provider fails.",
"allOf": [
{
"$ref": "#/components/schemas/FallbackTranscriberPlan"
@@ -8238,7 +8227,8 @@
}
},
"required": [
- "provider"
+ "provider",
+ "customVocabulary"
]
},
"TalkscriberTranscriber": {
@@ -8365,7 +8355,7 @@
]
},
"fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "description": "This is the plan for transcriber provider fallbacks in the event that the primary transcriber provider fails.",
"allOf": [
{
"$ref": "#/components/schemas/FallbackTranscriberPlan"
@@ -8391,6 +8381,7 @@
"type": "string",
"description": "This is the model that will be used for the transcription.",
"enum": [
+ "gemini-3-flash-preview",
"gemini-2.5-pro",
"gemini-2.5-flash",
"gemini-2.5-flash-lite",
@@ -8453,7 +8444,7 @@
]
},
"fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "description": "This is the plan for transcriber provider fallbacks in the event that the primary transcriber provider fails.",
"allOf": [
{
"$ref": "#/components/schemas/FallbackTranscriberPlan"
@@ -8547,7 +8538,7 @@
]
},
"fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "description": "This is the plan for transcriber provider fallbacks in the event that the primary transcriber provider fails.",
"allOf": [
{
"$ref": "#/components/schemas/FallbackTranscriberPlan"
@@ -8574,6 +8565,7 @@
"type": "string",
"description": "This is the language that will be set for the transcription.",
"enum": [
+ "multi",
"en"
]
},
@@ -8591,29 +8583,42 @@
},
"endOfTurnConfidenceThreshold": {
"type": "number",
- "description": "This is the end of turn confidence threshold. The minimum confidence that the end of turn is detected.\n\n@min 0\n@max 1\n@default 0.7",
+ "description": "This is the end of turn confidence threshold. The minimum confidence that the end of turn is detected.\nNote: Only used if startSpeakingPlan.smartEndpointingPlan is not set.\n@min 0\n@max 1\n@default 0.7",
"minimum": 0,
"maximum": 1,
"example": 0.7
},
"minEndOfTurnSilenceWhenConfident": {
"type": "number",
- "description": "This is the minimum end of turn silence when confident in milliseconds.\n\n@default 160",
+ "description": "This is the minimum end of turn silence when confident in milliseconds.\nNote: Only used if startSpeakingPlan.smartEndpointingPlan is not set.\n@default 160",
"minimum": 0,
"example": 160
},
"wordFinalizationMaxWaitTime": {
"type": "number",
- "description": "This is the maximum wait time for word finalization in milliseconds.\n\n@default 160",
+ "deprecated": true,
"minimum": 0,
"example": 160
},
"maxTurnSilence": {
"type": "number",
- "description": "This is the maximum turn silence time in milliseconds.\n\n@default 400",
+ "description": "This is the maximum turn silence time in milliseconds.\nNote: Only used if startSpeakingPlan.smartEndpointingPlan is not set.\n@default 400",
"minimum": 0,
"example": 400
},
+ "vadAssistedEndpointingEnabled": {
+ "type": "boolean",
+ "description": "Use VAD to assist with endpointing decisions from the transcriber.\nWhen enabled, transcriber endpointing will be buffered if VAD detects the user is still speaking, preventing premature turn-taking.\nWhen disabled, transcriber endpointing will be used immediately regardless of VAD state, allowing for quicker but more aggressive turn-taking.\nNote: Only used if startSpeakingPlan.smartEndpointingPlan is not set.\n\n@default true",
+ "example": true
+ },
+ "speechModel": {
+ "type": "string",
+ "description": "This is the speech model used for the streaming session.\nNote: Keyterms prompting is not supported with multilingual streaming.\n@default 'universal-streaming-english'",
+ "enum": [
+ "universal-streaming-english",
+ "universal-streaming-multilingual"
+ ]
+ },
"realtimeUrl": {
"type": "string",
"description": "The WebSocket URL that the transcriber connects to."
@@ -8626,6 +8631,14 @@
"maxLength": 2500
}
},
+ "keytermsPrompt": {
+ "description": "Keyterms prompting improves recognition accuracy for specific words and phrases.\nCan include up to 100 keyterms, each up to 50 characters.\nCosts an additional $0.04/hour when enabled.",
+ "type": "array",
+ "items": {
+ "type": "string",
+ "maxLength": 50
+ }
+ },
"endUtteranceSilenceThreshold": {
"type": "number",
"description": "The duration of the end utterance silence threshold in milliseconds."
@@ -9105,7 +9118,8 @@
"base-conversationalai",
"base-voicemail",
"base-video",
- "whisper"
+ "whisper",
+ "flux-general-en"
]
},
{
@@ -9142,6 +9156,7 @@
"es-LATAM",
"et",
"eu",
+ "fa",
"fi",
"fr",
"fr-CA",
@@ -9171,6 +9186,7 @@
"pl",
"pt",
"pt-BR",
+ "pt-PT",
"ro",
"ru",
"sk",
@@ -9215,6 +9231,11 @@
"description": "If set to true, this will cause deepgram to convert spoken numbers to literal numerals. For example, \"my phone number is nine-seven-two...\" would become \"my phone number is 972...\"\n\n@default false",
"example": false
},
+ "profanityFilter": {
+ "type": "boolean",
+ "description": "If set to true, Deepgram will replace profanity in transcripts with surrounding asterisks, e.g. \"f***\".\n\n@default false",
+ "example": false
+ },
"confidenceThreshold": {
"type": "number",
"description": "Transcripts below this confidence threshold will be discarded.\n\n@default 0.4",
@@ -9222,6 +9243,27 @@
"maximum": 1,
"example": 0.4
},
+ "eagerEotThreshold": {
+ "type": "number",
+ "description": "Eager end-of-turn confidence required to fire a eager end-of-turn event. Setting a value here will enable EagerEndOfTurn and SpeechResumed events. It is disabled by default. Only used with Flux models.",
+ "minimum": 0,
+ "maximum": 1,
+ "example": 0.3
+ },
+ "eotThreshold": {
+ "type": "number",
+ "description": "End-of-turn confidence required to finish a turn. Only used with Flux models.\n\n@default 0.7",
+ "minimum": 0.5,
+ "maximum": 0.9,
+ "example": 0.7
+ },
+ "eotTimeoutMs": {
+ "type": "number",
+ "description": "A turn will be finished when this much time has passed after speech, regardless of EOT confidence. Only used with Flux models.\n\n@default 5000",
+ "minimum": 500,
+ "maximum": 10000,
+ "example": 5000
+ },
"keywords": {
"description": "These keywords are passed to the transcription model to help it pick up use-case specific words. Anything that may not be a common word, like your company name, should be added here.",
"type": "array",
@@ -9259,14 +9301,20 @@
]
},
"model": {
- "type": "string",
"description": "This is the model that will be used for the transcription.",
- "enum": [
- "scribe_v1"
+ "oneOf": [
+ {
+ "enum": [
+ "scribe_v1",
+ "scribe_v2",
+ "scribe_v2_realtime"
+ ]
+ }
]
},
"language": {
"type": "string",
+ "description": "This is the language that will be used for the transcription.",
"enum": [
"aa",
"ab",
@@ -9454,6 +9502,34 @@
"zh",
"zu"
]
+ },
+ "silenceThresholdSeconds": {
+ "type": "number",
+ "description": "This is the number of seconds of silence before VAD commits (0.3-3.0).",
+ "minimum": 0.3,
+ "maximum": 3,
+ "example": 1.5
+ },
+ "confidenceThreshold": {
+ "type": "number",
+ "description": "This is the VAD sensitivity (0.1-0.9, lower indicates more sensitive).",
+ "minimum": 0.1,
+ "maximum": 0.9,
+ "example": 0.4
+ },
+ "minSpeechDurationMs": {
+ "type": "number",
+ "description": "This is the minimum speech duration for VAD (50-2000ms).",
+ "minimum": 50,
+ "maximum": 2000,
+ "example": 100
+ },
+ "minSilenceDurationMs": {
+ "type": "number",
+ "description": "This is the minimum silence duration for VAD (50-2000ms).",
+ "minimum": 50,
+ "maximum": 2000,
+ "example": 100
}
},
"required": [
@@ -9783,6 +9859,249 @@
"$ref": "#/components/schemas/GladiaCustomVocabularyConfigDTO"
}
]
+ },
+ "region": {
+ "type": "string",
+ "enum": [
+ "us-west",
+ "eu-west"
+ ],
+ "description": "Region for processing audio (us-west or eu-west)",
+ "example": "us-west"
+ },
+ "receivePartialTranscripts": {
+ "type": "boolean",
+ "example": false,
+ "description": "Enable partial transcripts for low-latency streaming transcription"
+ }
+ },
+ "required": [
+ "provider"
+ ]
+ },
+ "FallbackSonioxTranscriber": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "soniox"
+ ]
+ },
+ "model": {
+ "type": "string",
+ "enum": [
+ "stt-rt-v4"
+ ],
+ "description": "The Soniox model to use for transcription."
+ },
+ "language": {
+ "type": "string",
+ "enum": [
+ "aa",
+ "ab",
+ "ae",
+ "af",
+ "ak",
+ "am",
+ "an",
+ "ar",
+ "as",
+ "av",
+ "ay",
+ "az",
+ "ba",
+ "be",
+ "bg",
+ "bh",
+ "bi",
+ "bm",
+ "bn",
+ "bo",
+ "br",
+ "bs",
+ "ca",
+ "ce",
+ "ch",
+ "co",
+ "cr",
+ "cs",
+ "cu",
+ "cv",
+ "cy",
+ "da",
+ "de",
+ "dv",
+ "dz",
+ "ee",
+ "el",
+ "en",
+ "eo",
+ "es",
+ "et",
+ "eu",
+ "fa",
+ "ff",
+ "fi",
+ "fj",
+ "fo",
+ "fr",
+ "fy",
+ "ga",
+ "gd",
+ "gl",
+ "gn",
+ "gu",
+ "gv",
+ "ha",
+ "he",
+ "hi",
+ "ho",
+ "hr",
+ "ht",
+ "hu",
+ "hy",
+ "hz",
+ "ia",
+ "id",
+ "ie",
+ "ig",
+ "ii",
+ "ik",
+ "io",
+ "is",
+ "it",
+ "iu",
+ "ja",
+ "jv",
+ "ka",
+ "kg",
+ "ki",
+ "kj",
+ "kk",
+ "kl",
+ "km",
+ "kn",
+ "ko",
+ "kr",
+ "ks",
+ "ku",
+ "kv",
+ "kw",
+ "ky",
+ "la",
+ "lb",
+ "lg",
+ "li",
+ "ln",
+ "lo",
+ "lt",
+ "lu",
+ "lv",
+ "mg",
+ "mh",
+ "mi",
+ "mk",
+ "ml",
+ "mn",
+ "mr",
+ "ms",
+ "mt",
+ "my",
+ "na",
+ "nb",
+ "nd",
+ "ne",
+ "ng",
+ "nl",
+ "nn",
+ "no",
+ "nr",
+ "nv",
+ "ny",
+ "oc",
+ "oj",
+ "om",
+ "or",
+ "os",
+ "pa",
+ "pi",
+ "pl",
+ "ps",
+ "pt",
+ "qu",
+ "rm",
+ "rn",
+ "ro",
+ "ru",
+ "rw",
+ "sa",
+ "sc",
+ "sd",
+ "se",
+ "sg",
+ "si",
+ "sk",
+ "sl",
+ "sm",
+ "sn",
+ "so",
+ "sq",
+ "sr",
+ "ss",
+ "st",
+ "su",
+ "sv",
+ "sw",
+ "ta",
+ "te",
+ "tg",
+ "th",
+ "ti",
+ "tk",
+ "tl",
+ "tn",
+ "to",
+ "tr",
+ "ts",
+ "tt",
+ "tw",
+ "ty",
+ "ug",
+ "uk",
+ "ur",
+ "uz",
+ "ve",
+ "vi",
+ "vo",
+ "wa",
+ "wo",
+ "xh",
+ "yi",
+ "yue",
+ "yo",
+ "za",
+ "zh",
+ "zu"
+ ],
+ "description": "The language for transcription. Uses ISO 639-1 codes. Soniox supports 60+ languages with a single universal model."
+ },
+ "languageHintsStrict": {
+ "type": "boolean",
+ "description": "When enabled, restricts transcription to the language specified in the language field. When disabled, the model can detect and transcribe any of 60+ supported languages. Defaults to true."
+ },
+ "maxEndpointDelayMs": {
+ "type": "number",
+ "minimum": 500,
+ "maximum": 3000,
+ "description": "Maximum delay in milliseconds between when the speaker stops and when the endpoint is detected. Lower values mean faster turn-taking but more false endpoints. Range: 500-3000. Default: 500."
+ },
+ "customVocabulary": {
+ "description": "Custom vocabulary terms to boost recognition accuracy. Useful for brand names, product names, and domain-specific terminology. Maps to Soniox context.terms.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
}
},
"required": [
@@ -9811,6 +10130,7 @@
"enum": [
"auto",
"ar",
+ "ar_en",
"ba",
"eu",
"be",
@@ -9842,8 +10162,10 @@
"lv",
"lt",
"ms",
+ "en_ms",
"mt",
"cmn",
+ "cmn_en",
"mr",
"mn",
"no",
@@ -9855,9 +10177,12 @@
"sk",
"sl",
"es",
+ "en_es",
"sw",
"sv",
+ "tl",
"ta",
+ "en_ta",
"th",
"tr",
"uk",
@@ -9866,10 +10191,91 @@
"vi",
"cy"
]
+ },
+ "operatingPoint": {
+ "type": "string",
+ "description": "This is the operating point for the transcription. Choose between `standard` for faster turnaround with strong accuracy or `enhanced` for highest accuracy when precision is critical.\n\n@default 'enhanced'",
+ "example": "enhanced",
+ "enum": [
+ "standard",
+ "enhanced"
+ ],
+ "default": "enhanced"
+ },
+ "region": {
+ "type": "string",
+ "description": "This is the region for the Speechmatics API. Choose between EU (Europe) and US (United States) regions for lower latency and data sovereignty compliance.\n\n@default 'eu'",
+ "example": "us",
+ "enum": [
+ "eu",
+ "us"
+ ],
+ "default": "eu"
+ },
+ "enableDiarization": {
+ "type": "boolean",
+ "description": "This enables speaker diarization, which identifies and separates speakers in the transcription. Essential for multi-speaker conversations and conference calls.\n\n@default false",
+ "example": true,
+ "default": false
+ },
+ "maxDelay": {
+ "type": "number",
+ "description": "This sets the maximum delay in milliseconds for partial transcripts. Balances latency and accuracy.\n\n@default 3000",
+ "example": 1500,
+ "minimum": 500,
+ "maximum": 10000,
+ "default": 3000
+ },
+ "customVocabulary": {
+ "example": [
+ {
+ "content": "Speechmatics",
+ "soundsLike": [
+ "speech mattix"
+ ]
+ }
+ ],
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/SpeechmaticsCustomVocabularyItem"
+ }
+ },
+ "numeralStyle": {
+ "type": "string",
+ "description": "This controls how numbers, dates, currencies, and other entities are formatted in the transcription output.\n\n@default 'written'",
+ "example": "spoken",
+ "enum": [
+ "written",
+ "spoken"
+ ],
+ "default": "written"
+ },
+ "endOfTurnSensitivity": {
+ "type": "number",
+ "description": "This is the sensitivity level for end-of-turn detection, which determines when a speaker has finished talking. Higher values are more sensitive.\n\n@default 0.5",
+ "example": 0.8,
+ "minimum": 0,
+ "maximum": 1,
+ "default": 0.5
+ },
+ "removeDisfluencies": {
+ "type": "boolean",
+ "description": "This enables removal of disfluencies (um, uh) from the transcript to create cleaner, more professional output.\n\nThis is only supported for the English language transcriber.\n\n@default false",
+ "example": true,
+ "default": false
+ },
+ "minimumSpeechDuration": {
+ "type": "number",
+ "description": "This is the minimum duration in seconds for speech segments. Shorter segments will be filtered out. Helps remove noise and improve accuracy.\n\n@default 0.0",
+ "example": 0.2,
+ "minimum": 0,
+ "maximum": 5,
+ "default": 0
}
},
"required": [
- "provider"
+ "provider",
+ "customVocabulary"
]
},
"FallbackTalkscriberTranscriber": {
@@ -10014,6 +10420,7 @@
"type": "string",
"description": "This is the model that will be used for the transcription.",
"enum": [
+ "gemini-3-flash-preview",
"gemini-2.5-pro",
"gemini-2.5-flash",
"gemini-2.5-flash-lite",
@@ -10176,6 +10583,19 @@
"langfuse"
]
},
+ "promptName": {
+ "type": "string",
+ "description": "The name of a Langfuse prompt to link generations to. This enables tracking which prompt version was used for each generation. https://langfuse.com/docs/prompt-management/features/link-to-traces"
+ },
+ "promptVersion": {
+ "type": "number",
+ "description": "The version number of the Langfuse prompt to link generations to. Used together with promptName to identify the exact prompt version. https://langfuse.com/docs/prompt-management/features/link-to-traces",
+ "minimum": 1
+ },
+ "traceName": {
+ "type": "string",
+ "description": "Custom name for the Langfuse trace. Supports Liquid templates.\n\nAvailable variables:\n- {{ call.id }} - Call UUID\n- {{ call.type }} - 'inboundPhoneCall', 'outboundPhoneCall', 'webCall'\n- {{ assistant.name }} - Assistant name\n- {{ assistant.id }} - Assistant ID\n\nExample: \"{{ assistant.name }} - {{ call.type }}\"\n\nDefaults to call ID if not provided."
+ },
"tags": {
"description": "This is an array of tags to be added to the Langfuse trace. Tags allow you to categorize and filter traces. https://langfuse.com/docs/tracing-features/tags",
"type": "array",
@@ -10823,299 +11243,11 @@
],
"description": "The type of tool. \"dtmf\" for DTMF tool."
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
- "allOf": [
- {
- "$ref": "#/components/schemas/ToolRejectionPlan"
- }
- ]
- }
- },
- "required": [
- "type"
- ]
- },
- "CreateEndCallToolDTO": {
- "type": "object",
- "properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
- },
- "type": {
- "type": "string",
- "enum": [
- "endCall"
- ],
- "description": "The type of tool. \"endCall\" for End Call tool."
- },
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
- "allOf": [
- {
- "$ref": "#/components/schemas/ToolRejectionPlan"
- }
- ]
- }
- },
- "required": [
- "type"
- ]
- },
- "CreateVoicemailToolDTO": {
- "type": "object",
- "properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
- },
- "type": {
- "type": "string",
- "description": "The type of tool. \"voicemail\" for Voicemail tool.",
- "enum": [
- "voicemail"
- ]
- },
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
- "allOf": [
- {
- "$ref": "#/components/schemas/ToolRejectionPlan"
- }
- ]
- }
- },
- "required": [
- "type"
- ]
- },
- "JsonSchema": {
- "type": "object",
- "properties": {
- "type": {
- "type": "string",
- "description": "This is the type of output you'd like.\n\n`string`, `number`, `integer`, `boolean` are the primitive types and should be obvious.\n\n`array` and `object` are more interesting and quite powerful. They allow you to define nested structures.\n\nFor `array`, you can define the schema of the items in the array using the `items` property.\n\nFor `object`, you can define the properties of the object using the `properties` property.",
- "enum": [
- "string",
- "number",
- "integer",
- "boolean",
- "array",
- "object"
- ]
- },
- "items": {
- "type": "object",
- "description": "This is required if the type is \"array\". This is the schema of the items in the array.\n\nThis is of type JsonSchema. However, Swagger doesn't support circular references."
- },
- "properties": {
- "type": "object",
- "description": "This is required if the type is \"object\". This specifies the properties of the object.\n\nThis is a map of string to JsonSchema. However, Swagger doesn't support circular references."
- },
- "description": {
- "type": "string",
- "description": "This is the description to help the model understand what it needs to output."
- },
- "pattern": {
- "type": "string",
- "description": "This is the pattern of the string. This is a regex that will be used to validate the data in question. To use a common format, use the `format` property instead.\n\nOpenAI documentation: https://platform.openai.com/docs/guides/structured-outputs#supported-properties"
- },
- "format": {
- "type": "string",
- "description": "This is the format of the string. To pass a regex, use the `pattern` property instead.\n\nOpenAI documentation: https://platform.openai.com/docs/guides/structured-outputs?api-mode=chat&type-restrictions=string-restrictions",
- "enum": [
- "date-time",
- "time",
- "date",
- "duration",
- "email",
- "hostname",
- "ipv4",
- "ipv6",
- "uuid"
- ]
- },
- "required": {
- "description": "This is a list of properties that are required.\n\nThis only makes sense if the type is \"object\".",
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "enum": {
- "description": "This array specifies the allowed values that can be used to restrict the output of the model.",
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "title": {
- "type": "string",
- "description": "This is the title of the schema."
- }
- },
- "required": [
- "type"
- ]
- },
- "OpenAIFunctionParameters": {
- "type": "object",
- "properties": {
- "type": {
- "type": "string",
- "description": "This must be set to 'object'. It instructs the model to return a JSON object containing the function call properties.",
- "enum": [
- "object"
- ]
- },
- "properties": {
- "type": "object",
- "description": "This provides a description of the properties required by the function.\nJSON Schema can be used to specify expectations for each property.\nRefer to [this doc](https://ajv.js.org/json-schema.html#json-data-type) for a comprehensive guide on JSON Schema.",
- "additionalProperties": {
- "$ref": "#/components/schemas/JsonSchema"
- }
- },
- "required": {
- "description": "This specifies the properties that are required by the function.",
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- },
- "required": [
- "type",
- "properties"
- ]
- },
- "OpenAIFunction": {
- "type": "object",
- "properties": {
- "strict": {
+ "sipInfoDtmfEnabled": {
"type": "boolean",
- "description": "This is a boolean that controls whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the parameters field. Only a subset of JSON Schema is supported when strict is true. Learn more about Structured Outputs in the [OpenAI guide](https://openai.com/index/introducing-structured-outputs-in-the-api/).\n\n@default false",
+ "description": "This enables sending DTMF tones via SIP INFO messages instead of RFC 2833 (RTP events). When enabled, DTMF digits will be sent using the SIP INFO method, which can be more reliable in some network configurations. Only relevant when using the `vapi.sip` transport.",
"default": false
},
- "name": {
- "type": "string",
- "description": "This is the the name of the function to be called.\n\nMust be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.",
- "maxLength": 64,
- "pattern": "/^[a-zA-Z0-9_-]{1,64}$/"
- },
- "description": {
- "type": "string",
- "description": "This is the description of what the function does, used by the AI to choose when and how to call the function.",
- "maxLength": 1000
- },
- "parameters": {
- "description": "These are the parameters the functions accepts, described as a JSON Schema object.\n\nSee the [OpenAI guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema) for documentation about the format.\n\nOmitting parameters defines a function with an empty parameter list.",
- "allOf": [
- {
- "$ref": "#/components/schemas/OpenAIFunctionParameters"
- }
- ]
- }
- },
- "required": [
- "name"
- ]
- },
- "CreateFunctionToolDTO": {
- "type": "object",
- "properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
- },
- "type": {
- "type": "string",
- "enum": [
- "function"
- ],
- "description": "The type of tool. \"function\" for Function tool."
- },
- "async": {
- "type": "boolean",
- "example": false,
- "description": "This determines if the tool is async.\n\n If async, the assistant will move forward without waiting for your server to respond. This is useful if you just want to trigger something on your server.\n\n If sync, the assistant will wait for your server to respond. This is useful if want assistant to respond with the result from your server.\n\n Defaults to synchronous (`false`)."
- },
- "server": {
- "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
- "allOf": [
- {
- "$ref": "#/components/schemas/Server"
- }
- ]
- },
- "function": {
- "description": "This is the function definition of the tool.",
- "allOf": [
- {
- "$ref": "#/components/schemas/OpenAIFunction"
- }
- ]
- },
"rejectionPlan": {
"description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
@@ -11129,18 +11261,7 @@
"type"
]
},
- "GhlToolMetadata": {
- "type": "object",
- "properties": {
- "workflowId": {
- "type": "string"
- },
- "locationId": {
- "type": "string"
- }
- }
- },
- "CreateGhlToolDTO": {
+ "CreateEndCallToolDTO": {
"type": "object",
"properties": {
"messages": {
@@ -11170,12 +11291,9 @@
"type": {
"type": "string",
"enum": [
- "ghl"
+ "endCall"
],
- "description": "The type of tool. \"ghl\" for GHL tool."
- },
- "metadata": {
- "$ref": "#/components/schemas/GhlToolMetadata"
+ "description": "The type of tool. \"endCall\" for End Call tool."
},
"rejectionPlan": {
"description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
@@ -11187,22 +11305,10 @@
}
},
"required": [
- "type",
- "metadata"
+ "type"
]
},
- "MakeToolMetadata": {
- "type": "object",
- "properties": {
- "scenarioId": {
- "type": "number"
- },
- "triggerHookId": {
- "type": "number"
- }
- }
- },
- "CreateMakeToolDTO": {
+ "CreateVoicemailToolDTO": {
"type": "object",
"properties": {
"messages": {
@@ -11231,13 +11337,16 @@
},
"type": {
"type": "string",
+ "description": "The type of tool. \"voicemail\" for Voicemail tool.",
"enum": [
- "make"
- ],
- "description": "The type of tool. \"make\" for Make tool."
+ "voicemail"
+ ]
},
- "metadata": {
- "$ref": "#/components/schemas/MakeToolMetadata"
+ "beepDetectionEnabled": {
+ "type": "boolean",
+ "description": "This is the flag that enables beep detection for voicemail detection and applies only for twilio based calls.\n\n@default false",
+ "default": false,
+ "example": false
},
"rejectionPlan": {
"description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
@@ -11249,188 +11358,221 @@
}
},
"required": [
- "type",
- "metadata"
+ "type"
]
},
- "CustomMessage": {
+ "JsonSchema": {
"type": "object",
"properties": {
- "contents": {
- "type": "array",
- "description": "This is an alternative to the `content` property. It allows to specify variants of the same content, one per language.\n\nUsage:\n- If your assistants are multilingual, you can provide content for each language.\n- If you don't provide content for a language, the first item in the array will be automatically translated to the active language at that moment.\n\nThis will override the `content` property.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/TextContent",
- "title": "Text"
- }
- ]
- }
- },
"type": {
"type": "string",
- "description": "This is a custom message.",
+ "description": "This is the type of output you'd like.\n\n`string`, `number`, `integer`, `boolean` are the primitive types and should be obvious.\n\n`array` and `object` are more interesting and quite powerful. They allow you to define nested structures.\n\nFor `array`, you can define the schema of the items in the array using the `items` property.\n\nFor `object`, you can define the properties of the object using the `properties` property.",
"enum": [
- "custom-message"
+ "string",
+ "number",
+ "integer",
+ "boolean",
+ "array",
+ "object"
]
},
- "content": {
- "type": "string",
- "description": "This is the content that the assistant will say when this message is triggered.",
- "maxLength": 1000
- }
- },
- "required": [
- "type"
- ]
- },
- "TransferDestinationAssistant": {
- "type": "object",
- "properties": {
- "message": {
- "description": "This is spoken to the customer before connecting them to the destination.\n\nUsage:\n- If this is not provided and transfer tool messages is not provided, default is \"Transferring the call now\".\n- If set to \"\", nothing is spoken. This is useful when you want to silently transfer. This is especially useful when transferring between assistants in a squad. In this scenario, you likely also want to set `assistant.firstMessageMode=assistant-speaks-first-with-model-generated-message` for the destination assistant.\n\nThis accepts a string or a ToolMessageStart class. Latter is useful if you want to specify multiple messages for different languages through the `contents` field.",
- "oneOf": [
- {
- "type": "string"
- },
+ "items": {
+ "description": "This is required if the type is \"array\". This is the schema of the items in the array. This is a recursive reference to JsonSchema.",
+ "allOf": [
{
- "$ref": "#/components/schemas/CustomMessage"
+ "$ref": "#/components/schemas/JsonSchema"
}
]
},
- "type": {
+ "properties": {
+ "type": "object",
+ "description": "This is required if the type is \"object\". This specifies the properties of the object. This is a map of property names to JsonSchema objects.",
+ "additionalProperties": {
+ "$ref": "#/components/schemas/JsonSchema"
+ }
+ },
+ "description": {
"type": "string",
- "enum": [
- "assistant"
- ]
+ "description": "This is the description to help the model understand what it needs to output."
},
- "transferMode": {
+ "pattern": {
"type": "string",
- "description": "This is the mode to use for the transfer. Defaults to `rolling-history`.\n\n- `rolling-history`: This is the default mode. It keeps the entire conversation history and appends the new assistant's system message on transfer.\n\n Example:\n\n Pre-transfer:\n system: assistant1 system message\n assistant: assistant1 first message\n user: hey, good morning\n assistant: how can i help?\n user: i need help with my account\n assistant: (destination.message)\n\n Post-transfer:\n system: assistant1 system message\n assistant: assistant1 first message\n user: hey, good morning\n assistant: how can i help?\n user: i need help with my account\n assistant: (destination.message)\n system: assistant2 system message\n assistant: assistant2 first message (or model generated if firstMessageMode is set to `assistant-speaks-first-with-model-generated-message`)\n\n- `swap-system-message-in-history`: This replaces the original system message with the new assistant's system message on transfer.\n\n Example:\n\n Pre-transfer:\n system: assistant1 system message\n assistant: assistant1 first message\n user: hey, good morning\n assistant: how can i help?\n user: i need help with my account\n assistant: (destination.message)\n\n Post-transfer:\n system: assistant2 system message\n assistant: assistant1 first message\n user: hey, good morning\n assistant: how can i help?\n user: i need help with my account\n assistant: (destination.message)\n assistant: assistant2 first message (or model generated if firstMessageMode is set to `assistant-speaks-first-with-model-generated-message`)\n\n- `delete-history`: This deletes the entire conversation history on transfer.\n\n Example:\n\n Pre-transfer:\n system: assistant1 system message\n assistant: assistant1 first message\n user: hey, good morning\n assistant: how can i help?\n user: i need help with my account\n assistant: (destination.message)\n\n Post-transfer:\n system: assistant2 system message\n assistant: assistant2 first message\n user: Yes, please\n assistant: how can i help?\n user: i need help with my account\n\n- `swap-system-message-in-history-and-remove-transfer-tool-messages`: This replaces the original system message with the new assistant's system message on transfer and removes transfer tool messages from conversation history sent to the LLM.\n\n Example:\n\n Pre-transfer:\n system: assistant1 system message\n assistant: assistant1 first message\n user: hey, good morning\n assistant: how can i help?\n user: i need help with my account\n transfer-tool\n transfer-tool-result\n assistant: (destination.message)\n\n Post-transfer:\n system: assistant2 system message\n assistant: assistant1 first message\n user: hey, good morning\n assistant: how can i help?\n user: i need help with my account\n assistant: (destination.message)\n assistant: assistant2 first message (or model generated if firstMessageMode is set to `assistant-speaks-first-with-model-generated-message`)\n\n@default 'rolling-history'",
+ "description": "This is the pattern of the string. This is a regex that will be used to validate the data in question. To use a common format, use the `format` property instead.\n\nOpenAI documentation: https://platform.openai.com/docs/guides/structured-outputs#supported-properties"
+ },
+ "format": {
+ "type": "string",
+ "description": "This is the format of the string. To pass a regex, use the `pattern` property instead.\n\nOpenAI documentation: https://platform.openai.com/docs/guides/structured-outputs?api-mode=chat&type-restrictions=string-restrictions",
"enum": [
- "rolling-history",
- "swap-system-message-in-history",
- "swap-system-message-in-history-and-remove-transfer-tool-messages",
- "delete-history"
+ "date-time",
+ "time",
+ "date",
+ "duration",
+ "email",
+ "hostname",
+ "ipv4",
+ "ipv6",
+ "uuid"
]
},
- "assistantName": {
+ "required": {
+ "description": "This is a list of properties that are required.\n\nThis only makes sense if the type is \"object\".",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "enum": {
+ "description": "This array specifies the allowed values that can be used to restrict the output of the model.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "title": {
"type": "string",
- "description": "This is the assistant to transfer the call to."
+ "description": "This is the title of the schema."
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "VariableExtractionAlias": {
+ "type": "object",
+ "properties": {
+ "key": {
+ "type": "string",
+ "description": "This is the key of the variable.\n\nThis variable will be accessible during the call as `{{key}}` and stored in `call.artifact.variableValues` after the call.\n\nRules:\n- Must start with a letter (a-z, A-Z).\n- Subsequent characters can be letters, numbers, or underscores.\n- Minimum length of 1 and maximum length of 40.",
+ "minLength": 1,
+ "maxLength": 40,
+ "pattern": "/^[a-zA-Z][a-zA-Z0-9_]*$/"
},
- "description": {
+ "value": {
"type": "string",
- "description": "This is the description of the destination, used by the AI to choose when and how to transfer the call."
+ "description": "This is the value of the variable.\n\nThis can reference existing variables, use filters, and perform transformations.\n\nExamples: \"{{name}}\", \"{{customer.email}}\", \"Hello {{name | upcase}}\"",
+ "maxLength": 10000
}
},
"required": [
- "type",
- "assistantName"
+ "key",
+ "value"
]
},
- "TransferFallbackPlan": {
+ "VariableExtractionPlan": {
"type": "object",
"properties": {
- "message": {
- "description": "This is the message the assistant will deliver to the customer if the transfer fails.",
+ "schema": {
+ "description": "This is the schema to extract.\n\nExamples:\n1. To extract object properties, you can use the following schema:\n```json\n{\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"age\": {\n \"type\": \"number\"\n }\n }\n}\n```\n\nThese will be extracted as `{{ name }}` and `{{ age }}` respectively. To emphasize, object properties are extracted as direct global variables.\n\n2. To extract nested properties, you can use the following schema:\n```json\n{\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"object\",\n \"properties\": {\n \"first\": {\n \"type\": \"string\"\n },\n \"last\": {\n \"type\": \"string\"\n }\n }\n }\n }\n}\n```\n\nThese will be extracted as `{{ name }}`. And, `{{ name.first }}` and `{{ name.last }}` will be accessible.\n\n3. To extract array items, you can use the following schema:\n```json\n{\n \"type\": \"array\",\n \"title\": \"zipCodes\",\n \"items\": {\n \"type\": \"string\"\n }\n}\n```\n\nThis will be extracted as `{{ zipCodes }}`. To access the array items, you can use `{{ zipCodes[0] }}` and `{{ zipCodes[1] }}`.\n\n4. To extract array of objects, you can use the following schema:\n\n```json\n{\n \"type\": \"array\",\n \"name\": \"people\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"age\": {\n \"type\": \"number\"\n },\n \"zipCodes\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n }\n }\n }\n }\n}\n```\n\nThis will be extracted as `{{ people }}`. To access the array items, you can use `{{ people[n].name }}`, `{{ people[n].age }}`, `{{ people[n].zipCodes }}`, `{{ people[n].zipCodes[0] }}` and `{{ people[n].zipCodes[1] }}`.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/JsonSchema"
+ }
+ ]
+ },
+ "aliases": {
+ "description": "These are additional variables to create.\n\nThese will be accessible during the call as `{{key}}` and stored in `call.artifact.variableValues` after the call.\n\nExample:\n```json\n{\n \"aliases\": [\n {\n \"key\": \"customerName\",\n \"value\": \"{{name}}\"\n },\n {\n \"key\": \"fullName\",\n \"value\": \"{{firstName}} {{lastName}}\"\n },\n {\n \"key\": \"greeting\",\n \"value\": \"Hello {{name}}, welcome to {{company}}!\"\n },\n {\n \"key\": \"customerCity\",\n \"value\": \"{{addresses[0].city}}\"\n },\n {\n \"key\": \"something\",\n \"value\": \"{{any liquid}}\"\n }\n ]\n}\n```\n\nThis will create variables `customerName`, `fullName`, `greeting`, `customerCity`, and `something`. To access these variables, you can reference them as `{{customerName}}`, `{{fullName}}`, `{{greeting}}`, `{{customerCity}}`, and `{{something}}`.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/VariableExtractionAlias"
+ }
+ }
+ }
+ },
+ "ToolParameter": {
+ "type": "object",
+ "properties": {
+ "key": {
+ "type": "string",
+ "description": "This is the key of the parameter."
+ },
+ "value": {
+ "description": "The value of the parameter. Any JSON type. String values support Liquid templates.",
"oneOf": [
{
"type": "string"
},
{
- "$ref": "#/components/schemas/CustomMessage"
+ "type": "number"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "object"
+ },
+ {
+ "type": "array"
}
]
- },
- "endCallEnabled": {
- "type": "boolean",
- "description": "This controls what happens after delivering the failure message to the customer.\n- true: End the call after delivering the failure message (default)\n- false: Keep the assistant on the call to continue handling the customer's request\n\n@default true",
- "default": true
}
},
"required": [
- "message"
+ "key",
+ "value"
]
},
- "TransferAssistantModel": {
+ "OpenAIFunctionParameters": {
"type": "object",
"properties": {
- "provider": {
+ "type": {
"type": "string",
- "description": "The model provider for the transfer assistant",
+ "description": "This must be set to 'object'. It instructs the model to return a JSON object containing the function call properties.",
"enum": [
- "openai",
- "anthropic",
- "google",
- "custom-llm"
+ "object"
]
},
- "model": {
- "type": "string",
- "description": "The model name - must be compatible with the selected provider",
- "example": "gpt-4o"
- },
- "messages": {
- "type": "array",
- "description": "These are the messages used to configure the transfer assistant.\n\n@default: ```\n[\n {\n role: 'system',\n content: 'You are a transfer assistant designed to facilitate call transfers. Your core responsibility is to manage the transfer process efficiently.\\n\\n## Core Responsibility\\n- Facilitate the transfer process by using transferSuccessful or transferCancel tools appropriately\\n\\n## When to Respond\\n- Answer questions about the transfer process or provide summaries when specifically asked by the operator\\n- Respond to direct questions about the current transfer situation\\n\\n## What to Avoid\\n- Do not discuss topics unrelated to the transfer\\n- Do not engage in general conversation\\n- Keep all interactions focused on facilitating the transfer\\n\\n## Transfer Tools\\n- Use transferSuccessful when the transfer should proceed\\n- Use transferCancel when the transfer cannot be completed\\n\\nStay focused on your core responsibility of facilitating transfers.'\n }\n]```\n\n**Default Behavior:** If you don't provide any messages or don't include a system message as the first message, the default system message above will be automatically added.\n\n**Override Default:** To replace the default system message, provide your own system message as the first message in the array.\n\n**Add Context:** You can provide additional messages (user, assistant, etc.) to add context while keeping the default system message, or combine them with your custom system message."
+ "properties": {
+ "type": "object",
+ "description": "This provides a description of the properties required by the function.\nJSON Schema can be used to specify expectations for each property.\nRefer to [this doc](https://ajv.js.org/json-schema.html#json-data-type) for a comprehensive guide on JSON Schema.",
+ "additionalProperties": {
+ "$ref": "#/components/schemas/JsonSchema"
+ }
},
- "tools": {
+ "required": {
+ "description": "This specifies the properties that are required by the function.",
"type": "array",
- "description": "Tools available to the transfer assistant during warm-transfer-experimental.\n\n**Default Behavior:** The transfer assistant will ALWAYS have both `transferSuccessful` and `transferCancel` tools automatically added, regardless of what you provide here.\n\n**Default Tools:**\n- `transferSuccessful`: \"Call this function to confirm the transfer is successful and connect the customer. Use this when you detect a human has answered and is ready to take the call.\"\n- `transferCancel`: \"Call this function to cancel the transfer when no human answers or transfer should not proceed. Use this when you detect voicemail, busy signal, or no answer.\"\n\n**Customization:** You can override the default tools by providing `transferSuccessful` and/or `transferCancel` tools with custom `function` or `messages` configurations.\n\n**Additional Tools:** You can also provide other tools, but the two transfer tools will always be present and available to the assistant."
+ "items": {
+ "type": "string"
+ }
}
},
"required": [
- "provider",
- "model"
+ "type",
+ "properties"
]
},
- "TransferAssistant": {
+ "OpenAIFunction": {
"type": "object",
"properties": {
+ "strict": {
+ "type": "boolean",
+ "description": "This is a boolean that controls whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the parameters field. Only a subset of JSON Schema is supported when strict is true. Learn more about Structured Outputs in the [OpenAI guide](https://openai.com/index/introducing-structured-outputs-in-the-api/).\n\n@default false",
+ "default": false
+ },
"name": {
"type": "string",
- "description": "Optional name for the transfer assistant",
- "maxLength": 100,
- "default": "transfer-assistant",
- "example": "Sales Transfer Assistant"
+ "description": "This is the the name of the function to be called.\n\nMust be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.",
+ "maxLength": 64,
+ "pattern": "/^[a-zA-Z0-9_-]{1,64}$/"
},
- "model": {
- "description": "Model configuration for the transfer assistant",
+ "description": {
+ "type": "string",
+ "description": "This is the description of what the function does, used by the AI to choose when and how to call the function."
+ },
+ "parameters": {
+ "description": "These are the parameters the functions accepts, described as a JSON Schema object.\n\nSee the [OpenAI guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema) for documentation about the format.\n\nOmitting parameters defines a function with an empty parameter list.",
"allOf": [
{
- "$ref": "#/components/schemas/TransferAssistantModel"
+ "$ref": "#/components/schemas/OpenAIFunctionParameters"
}
]
- },
- "firstMessage": {
- "type": "string",
- "description": "This is the first message that the transfer assistant will say.\nThis can also be a URL to a custom audio file.\n\nIf unspecified, assistant will wait for user to speak and use the model to respond once they speak.",
- "example": "Hello! I understand you need to be transferred. Let me connect you."
- },
- "firstMessageMode": {
- "type": "string",
- "description": "This is the mode for the first message. Default is 'assistant-speaks-first'.\n\nUse:\n- 'assistant-speaks-first' to have the assistant speak first.\n- 'assistant-waits-for-user' to have the assistant wait for the user to speak first.\n- 'assistant-speaks-first-with-model-generated-message' to have the assistant speak first with a message generated by the model based on the conversation state.\n\n@default 'assistant-speaks-first'",
- "enum": [
- "assistant-speaks-first",
- "assistant-speaks-first-with-model-generated-message",
- "assistant-waits-for-user"
- ],
- "example": "assistant-speaks-first"
- },
- "maxDurationSeconds": {
- "type": "number",
- "description": "This is the maximum duration in seconds for the transfer assistant conversation.\nAfter this time, the transfer will be cancelled automatically.\n@default 120",
- "minimum": 10,
- "maximum": 43200,
- "example": 120
}
},
"required": [
- "model"
+ "name"
]
},
- "TransferCancelToolUserEditable": {
+ "CreateFunctionToolDTO": {
"type": "object",
"properties": {
"messages": {
@@ -11460,9 +11602,45 @@
"type": {
"type": "string",
"enum": [
- "transferCancel"
+ "function"
],
- "description": "The type of tool. \"transferCancel\" for Transfer Cancel tool. This tool can only be used during warm-transfer-experimental by the transfer assistant to cancel an ongoing transfer and return the call back to the original assistant when the transfer cannot be completed."
+ "description": "The type of tool. \"function\" for Function tool."
+ },
+ "async": {
+ "type": "boolean",
+ "example": false,
+ "description": "This determines if the tool is async.\n\n If async, the assistant will move forward without waiting for your server to respond. This is useful if you just want to trigger something on your server.\n\n If sync, the assistant will wait for your server to respond. This is useful if want assistant to respond with the result from your server.\n\n Defaults to synchronous (`false`)."
+ },
+ "server": {
+ "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
+ },
+ "variableExtractionPlan": {
+ "description": "Plan to extract variables from the tool response",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/VariableExtractionPlan"
+ }
+ ]
+ },
+ "parameters": {
+ "description": "Static key-value pairs merged into the request body. Values support Liquid templates.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/ToolParameter"
+ }
+ },
+ "function": {
+ "description": "This is the function definition of the tool.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/OpenAIFunction"
+ }
+ ]
},
"rejectionPlan": {
"description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
@@ -11477,7 +11655,18 @@
"type"
]
},
- "TransferSuccessfulToolUserEditable": {
+ "GhlToolMetadata": {
+ "type": "object",
+ "properties": {
+ "workflowId": {
+ "type": "string"
+ },
+ "locationId": {
+ "type": "string"
+ }
+ }
+ },
+ "CreateGhlToolDTO": {
"type": "object",
"properties": {
"messages": {
@@ -11507,9 +11696,12 @@
"type": {
"type": "string",
"enum": [
- "transferSuccessful"
+ "ghl"
],
- "description": "The type of tool. \"transferSuccessful\" for Transfer Successful tool. This tool can only be used during warm-transfer-experimental by the transfer assistant to confirm that the transfer should proceed and finalize the handoff to the destination."
+ "description": "The type of tool. \"ghl\" for GHL tool."
+ },
+ "metadata": {
+ "$ref": "#/components/schemas/GhlToolMetadata"
},
"rejectionPlan": {
"description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
@@ -11521,115 +11713,105 @@
}
},
"required": [
- "type"
+ "type",
+ "metadata"
]
},
- "SummaryPlan": {
+ "MakeToolMetadata": {
"type": "object",
"properties": {
- "messages": {
- "description": "These are the messages used to generate the summary.\n\n@default: ```\n[\n {\n \"role\": \"system\",\n \"content\": \"You are an expert note-taker. You will be given a transcript of a call. Summarize the call in 2-3 sentences. DO NOT return anything except the summary.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Here is the transcript:\\n\\n{{transcript}}\\n\\n. Here is the ended reason of the call:\\n\\n{{endedReason}}\\n\\n\"\n }\n]```\n\nYou can customize by providing any messages you want.\n\nHere are the template variables available:\n- {{transcript}}: The transcript of the call from `call.artifact.transcript` \n- {{systemPrompt}}: The system prompt of the call from `assistant.model.messages[type=system].content` \n- {{messages}}: The messages of the call from `assistant.model.messages` \n- {{endedReason}}: The ended reason of the call from `call.endedReason`",
- "type": "array",
- "items": {
- "type": "object"
- }
- },
- "enabled": {
- "type": "boolean",
- "description": "This determines whether a summary is generated and stored in `call.analysis.summary`. Defaults to true.\n\nUsage:\n- If you want to disable the summary, set this to false.\n\n@default true"
+ "scenarioId": {
+ "type": "number"
},
- "timeoutSeconds": {
- "type": "number",
- "description": "This is how long the request is tried before giving up. When request times out, `call.analysis.summary` will be empty.\n\nUsage:\n- To guarantee the summary is generated, set this value high. Note, this will delay the end of call report in cases where model is slow to respond.\n\n@default 5 seconds",
- "minimum": 1,
- "maximum": 60
+ "triggerHookId": {
+ "type": "number"
}
}
},
- "TransferPlan": {
+ "CreateMakeToolDTO": {
"type": "object",
"properties": {
- "mode": {
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
+ },
+ "type": {
"type": "string",
- "description": "This configures how transfer is executed and the experience of the destination party receiving the call.\n\nUsage:\n- `blind-transfer`: The assistant forwards the call to the destination without any message or summary.\n- `blind-transfer-add-summary-to-sip-header`: The assistant forwards the call to the destination and adds a SIP header X-Transfer-Summary to the call to include the summary.\n- `warm-transfer-say-message`: The assistant dials the destination, delivers the `message` to the destination party, connects the customer, and leaves the call.\n- `warm-transfer-say-summary`: The assistant dials the destination, provides a summary of the call to the destination party, connects the customer, and leaves the call.\n- `warm-transfer-wait-for-operator-to-speak-first-and-then-say-message`: The assistant dials the destination, waits for the operator to speak, delivers the `message` to the destination party, and then connects the customer.\n- `warm-transfer-wait-for-operator-to-speak-first-and-then-say-summary`: The assistant dials the destination, waits for the operator to speak, provides a summary of the call to the destination party, and then connects the customer.\n- `warm-transfer-twiml`: The assistant dials the destination, executes the twiml instructions on the destination call leg, connects the customer, and leaves the call.\n- `warm-transfer-experimental`: The assistant puts the customer on hold, dials the destination, and if the destination answers (and is human), delivers a message or summary before connecting the customer. If the destination is unreachable or not human (e.g., with voicemail detection), the assistant delivers the `fallbackMessage` to the customer and optionally ends the call.\n\n@default 'blind-transfer'",
"enum": [
- "blind-transfer",
- "blind-transfer-add-summary-to-sip-header",
- "warm-transfer-say-message",
- "warm-transfer-say-summary",
- "warm-transfer-twiml",
- "warm-transfer-wait-for-operator-to-speak-first-and-then-say-message",
- "warm-transfer-wait-for-operator-to-speak-first-and-then-say-summary",
- "warm-transfer-experimental"
- ]
+ "make"
+ ],
+ "description": "The type of tool. \"make\" for Make tool."
},
- "message": {
- "description": "This is the message the assistant will deliver to the destination party before connecting the customer.\n\nUsage:\n- Used only when `mode` is `blind-transfer-add-summary-to-sip-header`, `warm-transfer-say-message`, `warm-transfer-wait-for-operator-to-speak-first-and-then-say-message`, or `warm-transfer-experimental`.",
- "oneOf": [
- {
- "type": "string"
- },
+ "metadata": {
+ "$ref": "#/components/schemas/MakeToolMetadata"
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
{
- "$ref": "#/components/schemas/CustomMessage"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
+ }
+ },
+ "required": [
+ "type",
+ "metadata"
+ ]
+ },
+ "CustomMessage": {
+ "type": "object",
+ "properties": {
+ "contents": {
+ "type": "array",
+ "description": "This is an alternative to the `content` property. It allows to specify variants of the same content, one per language.\n\nUsage:\n- If your assistants are multilingual, you can provide content for each language.\n- If you don't provide content for a language, the first item in the array will be automatically translated to the active language at that moment.\n\nThis will override the `content` property.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TextContent",
+ "title": "Text"
+ }
+ ]
+ }
},
- "timeout": {
- "type": "number",
- "description": "This is the timeout in seconds for the warm-transfer-wait-for-operator-to-speak-first-and-then-say-message/summary\n\n@default 60",
- "minimum": 1,
- "maximum": 600,
- "default": 60
- },
- "sipVerb": {
- "type": "object",
- "description": "This specifies the SIP verb to use while transferring the call.\n- 'refer': Uses SIP REFER to transfer the call (default)\n- 'bye': Ends current call with SIP BYE\n- 'dial': Uses SIP DIAL to transfer the call",
- "default": "refer",
+ "type": {
+ "type": "string",
+ "description": "This is a custom message.",
"enum": [
- "refer",
- "bye",
- "dial"
+ "custom-message"
]
},
- "holdAudioUrl": {
- "type": "string",
- "description": "This is the URL to an audio file played while the customer is on hold during transfer.\n\nUsage:\n- Used only when `mode` is `warm-transfer-experimental`.\n- Used when transferring calls to play hold audio for the customer.\n- Must be a publicly accessible URL to an audio file.\n- Supported formats: MP3 and WAV.\n- If not provided, the default hold audio will be used."
- },
- "transferCompleteAudioUrl": {
- "type": "string",
- "description": "This is the URL to an audio file played after the warm transfer message or summary is delivered to the destination party.\nIt can be used to play a custom sound like 'beep' to notify that the transfer is complete.\n\nUsage:\n- Used only when `mode` is `warm-transfer-experimental`.\n- Used when transferring calls to play hold audio for the destination party.\n- Must be a publicly accessible URL to an audio file.\n- Supported formats: MP3 and WAV."
- },
- "twiml": {
+ "content": {
"type": "string",
- "description": "This is the TwiML instructions to execute on the destination call leg before connecting the customer.\n\nUsage:\n- Used only when `mode` is `warm-transfer-twiml`.\n- Supports only `Play`, `Say`, `Gather`, `Hangup` and `Pause` verbs.\n- Maximum length is 4096 characters.\n\nExample:\n```\nHello, transferring a customer to you.\n\nThey called about billing questions.\n```",
- "maxLength": 4096
- },
- "summaryPlan": {
- "description": "This is the plan for generating a summary of the call to present to the destination party.\n\nUsage:\n- Used only when `mode` is `blind-transfer-add-summary-to-sip-header` or `warm-transfer-say-summary` or `warm-transfer-wait-for-operator-to-speak-first-and-then-say-summary` or `warm-transfer-experimental`.",
- "allOf": [
- {
- "$ref": "#/components/schemas/SummaryPlan"
- }
- ]
- },
- "sipHeadersInReferToEnabled": {
- "type": "boolean",
- "description": "This flag includes the sipHeaders from above in the refer to sip uri as url encoded query params.\n\n@default false"
- },
- "fallbackPlan": {
- "description": "This configures the fallback plan when the transfer fails (destination unreachable, busy, or not human).\n\nUsage:\n- Used only when `mode` is `warm-transfer-experimental`.\n- If not provided when using `warm-transfer-experimental`, a default message will be used.",
- "allOf": [
- {
- "$ref": "#/components/schemas/TransferFallbackPlan"
- }
- ]
+ "description": "This is the content that the assistant will say when this message is triggered.",
+ "maxLength": 1000
}
},
"required": [
- "mode"
+ "type"
]
},
- "TransferDestinationNumber": {
+ "TransferDestinationAssistant": {
"type": "object",
"properties": {
"message": {
@@ -11646,38 +11828,22 @@
"type": {
"type": "string",
"enum": [
- "number"
+ "assistant"
]
},
- "numberE164CheckEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle the E164 check for the `number` field. This is an advanced property which should be used if you know your use case requires it.\n\nUse cases:\n- `false`: To allow non-E164 numbers like `+001234567890`, `1234`, or `abc`. This is useful for dialing out to non-E164 numbers on your SIP trunks.\n- `true` (default): To allow only E164 numbers like `+14155551234`. This is standard for PSTN calls.\n\nIf `false`, the `number` is still required to only contain alphanumeric characters (regex: `/^\\+?[a-zA-Z0-9]+$/`).\n\n@default true (E164 check is enabled)",
- "default": true
- },
- "number": {
+ "transferMode": {
"type": "string",
- "description": "This is the phone number to transfer the call to.",
- "minLength": 3,
- "maxLength": 40
- },
- "extension": {
- "type": "string",
- "description": "This is the extension to dial after transferring the call to the `number`.",
- "minLength": 1,
- "maxLength": 10
+ "description": "This is the mode to use for the transfer. Defaults to `rolling-history`.\n\n- `rolling-history`: This is the default mode. It keeps the entire conversation history and appends the new assistant's system message on transfer.\n\n Example:\n\n Pre-transfer:\n system: assistant1 system message\n assistant: assistant1 first message\n user: hey, good morning\n assistant: how can i help?\n user: i need help with my account\n assistant: (destination.message)\n\n Post-transfer:\n system: assistant1 system message\n assistant: assistant1 first message\n user: hey, good morning\n assistant: how can i help?\n user: i need help with my account\n assistant: (destination.message)\n system: assistant2 system message\n assistant: assistant2 first message (or model generated if firstMessageMode is set to `assistant-speaks-first-with-model-generated-message`)\n\n- `swap-system-message-in-history`: This replaces the original system message with the new assistant's system message on transfer.\n\n Example:\n\n Pre-transfer:\n system: assistant1 system message\n assistant: assistant1 first message\n user: hey, good morning\n assistant: how can i help?\n user: i need help with my account\n assistant: (destination.message)\n\n Post-transfer:\n system: assistant2 system message\n assistant: assistant1 first message\n user: hey, good morning\n assistant: how can i help?\n user: i need help with my account\n assistant: (destination.message)\n assistant: assistant2 first message (or model generated if firstMessageMode is set to `assistant-speaks-first-with-model-generated-message`)\n\n- `delete-history`: This deletes the entire conversation history on transfer.\n\n Example:\n\n Pre-transfer:\n system: assistant1 system message\n assistant: assistant1 first message\n user: hey, good morning\n assistant: how can i help?\n user: i need help with my account\n assistant: (destination.message)\n\n Post-transfer:\n system: assistant2 system message\n assistant: assistant2 first message\n user: Yes, please\n assistant: how can i help?\n user: i need help with my account\n\n- `swap-system-message-in-history-and-remove-transfer-tool-messages`: This replaces the original system message with the new assistant's system message on transfer and removes transfer tool messages from conversation history sent to the LLM.\n\n Example:\n\n Pre-transfer:\n system: assistant1 system message\n assistant: assistant1 first message\n user: hey, good morning\n assistant: how can i help?\n user: i need help with my account\n transfer-tool\n transfer-tool-result\n assistant: (destination.message)\n\n Post-transfer:\n system: assistant2 system message\n assistant: assistant1 first message\n user: hey, good morning\n assistant: how can i help?\n user: i need help with my account\n assistant: (destination.message)\n assistant: assistant2 first message (or model generated if firstMessageMode is set to `assistant-speaks-first-with-model-generated-message`)\n\n@default 'rolling-history'",
+ "enum": [
+ "rolling-history",
+ "swap-system-message-in-history",
+ "swap-system-message-in-history-and-remove-transfer-tool-messages",
+ "delete-history"
+ ]
},
- "callerId": {
+ "assistantName": {
"type": "string",
- "description": "This is the caller ID to use when transferring the call to the `number`.\n\nUsage:\n- If not provided, the caller ID will be the number the call is coming from. Example, +14151111111 calls in to and the assistant transfers out to +16470000000. +16470000000 will see +14151111111 as the caller.\n- To change this behavior, provide a `callerId`.\n- Set to '{{customer.number}}' to always use the customer's number as the caller ID.\n- Set to '{{phoneNumber.number}}' to always use the phone number of the assistant as the caller ID.\n- Set to any E164 number to always use that number as the caller ID. This needs to be a number that is owned or verified by your Transport provider like Twilio.\n\nFor Twilio, you can read up more here: https://www.twilio.com/docs/voice/twiml/dial#callerid",
- "maxLength": 40
- },
- "transferPlan": {
- "description": "This configures how transfer is executed and the experience of the destination party receiving the call. Defaults to `blind-transfer`.\n\n@default `transferPlan.mode='blind-transfer'`",
- "allOf": [
- {
- "$ref": "#/components/schemas/TransferPlan"
- }
- ]
+ "description": "This is the assistant to transfer the call to."
},
"description": {
"type": "string",
@@ -11686,14 +11852,14 @@
},
"required": [
"type",
- "number"
+ "assistantName"
]
},
- "TransferDestinationSip": {
+ "TransferFallbackPlan": {
"type": "object",
"properties": {
"message": {
- "description": "This is spoken to the customer before connecting them to the destination.\n\nUsage:\n- If this is not provided and transfer tool messages is not provided, default is \"Transferring the call now\".\n- If set to \"\", nothing is spoken. This is useful when you want to silently transfer. This is especially useful when transferring between assistants in a squad. In this scenario, you likely also want to set `assistant.firstMessageMode=assistant-speaks-first-with-model-generated-message` for the destination assistant.\n\nThis accepts a string or a ToolMessageStart class. Latter is useful if you want to specify multiple messages for different languages through the `contents` field.",
+ "description": "This is the message the assistant will deliver to the customer if the transfer fails.",
"oneOf": [
{
"type": "string"
@@ -11703,616 +11869,647 @@
}
]
},
- "type": {
+ "endCallEnabled": {
+ "type": "boolean",
+ "description": "This controls what happens after delivering the failure message to the customer.\n- true: End the call after delivering the failure message (default)\n- false: Keep the assistant on the call to continue handling the customer's request\n\n@default true",
+ "default": true
+ }
+ },
+ "required": [
+ "message"
+ ]
+ },
+ "TransferAssistantModel": {
+ "type": "object",
+ "properties": {
+ "provider": {
"type": "string",
+ "description": "The model provider for the transfer assistant",
"enum": [
- "sip"
+ "openai",
+ "anthropic",
+ "google",
+ "custom-llm"
]
},
- "sipUri": {
+ "model": {
"type": "string",
- "description": "This is the SIP URI to transfer the call to."
- },
- "transferPlan": {
- "description": "This configures how transfer is executed and the experience of the destination party receiving the call. Defaults to `blind-transfer`.\n\n@default `transferPlan.mode='blind-transfer'`",
- "allOf": [
- {
- "$ref": "#/components/schemas/TransferPlan"
- }
- ]
+ "description": "The model name - must be compatible with the selected provider",
+ "example": "gpt-4o"
},
- "sipHeaders": {
- "type": "object",
- "description": "These are custom headers to be added to SIP refer during transfer call."
+ "messages": {
+ "type": "array",
+ "description": "These are the messages used to configure the transfer assistant.\n\n@default: ```\n[\n {\n role: 'system',\n content: 'You are a transfer assistant designed to facilitate call transfers. Your core responsibility is to manage the transfer process efficiently.\\n\\n## Core Responsibility\\n- Facilitate the transfer process by using transferSuccessful or transferCancel tools appropriately\\n\\n## When to Respond\\n- Answer questions about the transfer process or provide summaries when specifically asked by the operator\\n- Respond to direct questions about the current transfer situation\\n\\n## What to Avoid\\n- Do not discuss topics unrelated to the transfer\\n- Do not engage in general conversation\\n- Keep all interactions focused on facilitating the transfer\\n\\n## Transfer Tools\\n- Use transferSuccessful when the transfer should proceed\\n- Use transferCancel when the transfer cannot be completed\\n\\nStay focused on your core responsibility of facilitating transfers.'\n }\n]```\n\n**Default Behavior:** If you don't provide any messages or don't include a system message as the first message, the default system message above will be automatically added.\n\n**Override Default:** To replace the default system message, provide your own system message as the first message in the array.\n\n**Add Context:** You can provide additional messages (user, assistant, etc.) to add context while keeping the default system message, or combine them with your custom system message."
},
- "description": {
- "type": "string",
- "description": "This is the description of the destination, used by the AI to choose when and how to transfer the call."
+ "tools": {
+ "type": "array",
+ "description": "Tools available to the transfer assistant during warm-transfer-experimental.\n\n**Default Behavior:** The transfer assistant will ALWAYS have both `transferSuccessful` and `transferCancel` tools automatically added, regardless of what you provide here.\n\n**Default Tools:**\n- `transferSuccessful`: \"Call this function to confirm the transfer is successful and connect the customer. Use this when you detect a human has answered and is ready to take the call.\"\n- `transferCancel`: \"Call this function to cancel the transfer when no human answers or transfer should not proceed. Use this when you detect voicemail, busy signal, or no answer.\"\n\n**Customization:** You can override the default tools by providing `transferSuccessful` and/or `transferCancel` tools with custom `function` or `messages` configurations.\n\n**Additional Tools:** You can also provide other tools, but the two transfer tools will always be present and available to the assistant."
}
},
"required": [
- "type",
- "sipUri"
+ "provider",
+ "model"
]
},
- "CreateTransferCallToolDTO": {
+ "RegexOption": {
"type": "object",
"properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
- },
"type": {
"type": "string",
+ "description": "This is the type of the regex option. Options are:\n- `ignore-case`: Ignores the case of the text being matched. Add\n- `whole-word`: Matches whole words only.\n- `multi-line`: Matches across multiple lines.",
"enum": [
- "transferCall"
+ "ignore-case",
+ "whole-word",
+ "multi-line"
]
},
- "destinations": {
- "type": "array",
- "description": "These are the destinations that the call can be transferred to. If no destinations are provided, server.url will be used to get the transfer destination once the tool is called.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/TransferDestinationAssistant",
- "title": "Assistant"
- },
- {
- "$ref": "#/components/schemas/TransferDestinationNumber",
- "title": "Number"
- },
- {
- "$ref": "#/components/schemas/TransferDestinationSip",
- "title": "Sip"
- }
- ]
- }
- },
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
- "allOf": [
- {
- "$ref": "#/components/schemas/ToolRejectionPlan"
- }
- ]
+ "enabled": {
+ "type": "boolean",
+ "description": "This is whether to enable the option.\n\n@default false"
}
},
"required": [
- "type"
+ "type",
+ "enabled"
]
},
- "ContextEngineeringPlanLastNMessages": {
+ "AssistantCustomEndpointingRule": {
"type": "object",
"properties": {
"type": {
"type": "string",
+ "description": "This endpointing rule is based on the last assistant message before customer started speaking.\n\nFlow:\n- Assistant speaks\n- Customer starts speaking\n- Customer transcription comes in\n- This rule is evaluated on the last assistant message\n- If a match is found based on `regex`, the endpointing timeout is set to `timeoutSeconds`\n\nUsage:\n- If you have yes/no questions in your use case like \"are you interested in a loan?\", you can set a shorter timeout.\n- If you have questions where the customer may pause to look up information like \"what's my account number?\", you can set a longer timeout.",
"enum": [
- "lastNMessages"
+ "assistant"
]
},
- "maxMessages": {
+ "regex": {
+ "type": "string",
+ "description": "This is the regex pattern to match.\n\nNote:\n- This works by using the `RegExp.test` method in Node.JS. Eg. `/hello/.test(\"hello there\")` will return `true`.\n\nHot tip:\n- In JavaScript, escape `\\` when sending the regex pattern. Eg. `\"hello\\sthere\"` will be sent over the wire as `\"hellosthere\"`. Send `\"hello\\\\sthere\"` instead.\n- `RegExp.test` does substring matching, so `/cat/.test(\"I love cats\")` will return `true`. To do full string matching, send \"^cat$\"."
+ },
+ "regexOptions": {
+ "description": "These are the options for the regex match. Defaults to all disabled.\n\n@default []",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/RegexOption"
+ }
+ },
+ "timeoutSeconds": {
"type": "number",
- "description": "This is the maximum number of messages to include in the context engineering plan.",
- "minimum": 0
+ "description": "This is the endpointing timeout in seconds, if the rule is matched.",
+ "minimum": 0,
+ "maximum": 15
}
},
"required": [
"type",
- "maxMessages"
+ "regex",
+ "timeoutSeconds"
]
},
- "ContextEngineeringPlanNone": {
+ "CustomerCustomEndpointingRule": {
"type": "object",
"properties": {
"type": {
"type": "string",
+ "description": "This endpointing rule is based on current customer message as they are speaking.\n\nFlow:\n- Assistant speaks\n- Customer starts speaking\n- Customer transcription comes in\n- This rule is evaluated on the current customer transcription\n- If a match is found based on `regex`, the endpointing timeout is set to `timeoutSeconds`\n\nUsage:\n- If you want to wait longer while customer is speaking numbers, you can set a longer timeout.",
"enum": [
- "none"
+ "customer"
]
+ },
+ "regex": {
+ "type": "string",
+ "description": "This is the regex pattern to match.\n\nNote:\n- This works by using the `RegExp.test` method in Node.JS. Eg. `/hello/.test(\"hello there\")` will return `true`.\n\nHot tip:\n- In JavaScript, escape `\\` when sending the regex pattern. Eg. `\"hello\\sthere\"` will be sent over the wire as `\"hellosthere\"`. Send `\"hello\\\\sthere\"` instead.\n- `RegExp.test` does substring matching, so `/cat/.test(\"I love cats\")` will return `true`. To do full string matching, send \"^cat$\"."
+ },
+ "regexOptions": {
+ "description": "These are the options for the regex match. Defaults to all disabled.\n\n@default []",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/RegexOption"
+ }
+ },
+ "timeoutSeconds": {
+ "type": "number",
+ "description": "This is the endpointing timeout in seconds, if the rule is matched.",
+ "minimum": 0,
+ "maximum": 15
}
},
"required": [
- "type"
+ "type",
+ "regex",
+ "timeoutSeconds"
]
},
- "ContextEngineeringPlanAll": {
+ "BothCustomEndpointingRule": {
"type": "object",
"properties": {
"type": {
"type": "string",
+ "description": "This endpointing rule is based on both the last assistant message and the current customer message as they are speaking.\n\nFlow:\n- Assistant speaks\n- Customer starts speaking\n- Customer transcription comes in\n- This rule is evaluated on the last assistant message and the current customer transcription\n- If assistant message matches `assistantRegex` AND customer message matches `customerRegex`, the endpointing timeout is set to `timeoutSeconds`\n\nUsage:\n- If you want to wait longer while customer is speaking numbers, you can set a longer timeout.",
"enum": [
- "all"
+ "both"
]
+ },
+ "assistantRegex": {
+ "type": "string",
+ "description": "This is the regex pattern to match the assistant's message.\n\nNote:\n- This works by using the `RegExp.test` method in Node.JS. Eg. `/hello/.test(\"hello there\")` will return `true`.\n\nHot tip:\n- In JavaScript, escape `\\` when sending the regex pattern. Eg. `\"hello\\sthere\"` will be sent over the wire as `\"hellosthere\"`. Send `\"hello\\\\sthere\"` instead.\n- `RegExp.test` does substring matching, so `/cat/.test(\"I love cats\")` will return `true`. To do full string matching, send \"^cat$\"."
+ },
+ "assistantRegexOptions": {
+ "description": "These are the options for the assistant's message regex match. Defaults to all disabled.\n\n@default []",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/RegexOption"
+ }
+ },
+ "customerRegex": {
+ "type": "string"
+ },
+ "customerRegexOptions": {
+ "description": "These are the options for the customer's message regex match. Defaults to all disabled.\n\n@default []",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/RegexOption"
+ }
+ },
+ "timeoutSeconds": {
+ "type": "number",
+ "description": "This is the endpointing timeout in seconds, if the rule is matched.",
+ "minimum": 0,
+ "maximum": 15
}
},
"required": [
- "type"
+ "type",
+ "assistantRegex",
+ "customerRegex",
+ "timeoutSeconds"
]
},
- "VariableExtractionAlias": {
+ "VapiSmartEndpointingPlan": {
"type": "object",
"properties": {
- "key": {
- "type": "string",
- "description": "This is the key of the variable.\n\nThis variable will be accessible during the call as `{{key}}` and stored in `call.artifact.variableValues` after the call.\n\nRules:\n- Must start with a letter (a-z, A-Z).\n- Subsequent characters can be letters, numbers, or underscores.\n- Minimum length of 1 and maximum length of 40.",
- "minLength": 1,
- "maxLength": 40,
- "pattern": "/^[a-zA-Z][a-zA-Z0-9_]*$/"
- },
- "value": {
+ "provider": {
"type": "string",
- "description": "This is the value of the variable.\n\nThis can reference existing variables, use filters, and perform transformations.\n\nExamples: \"{{name}}\", \"{{customer.email}}\", \"Hello {{name | upcase}}\"",
- "maxLength": 10000
+ "description": "This is the provider for the smart endpointing plan.",
+ "enum": [
+ "vapi",
+ "livekit",
+ "custom-endpointing-model"
+ ],
+ "example": "vapi"
}
},
"required": [
- "key",
- "value"
+ "provider"
]
},
- "VariableExtractionPlan": {
- "type": "object",
- "properties": {
- "schema": {
- "description": "This is the schema to extract.\n\nExamples:\n1. To extract object properties, you can use the following schema:\n```json\n{\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"age\": {\n \"type\": \"number\"\n }\n }\n}\n```\n\nThese will be extracted as `{{ name }}` and `{{ age }}` respectively. To emphasize, object properties are extracted as direct global variables.\n\n2. To extract nested properties, you can use the following schema:\n```json\n{\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"object\",\n \"properties\": {\n \"first\": {\n \"type\": \"string\"\n },\n \"last\": {\n \"type\": \"string\"\n }\n }\n }\n }\n}\n```\n\nThese will be extracted as `{{ name }}`. And, `{{ name.first }}` and `{{ name.last }}` will be accessible.\n\n3. To extract array items, you can use the following schema:\n```json\n{\n \"type\": \"array\",\n \"title\": \"zipCodes\",\n \"items\": {\n \"type\": \"string\"\n }\n}\n```\n\nThis will be extracted as `{{ zipCodes }}`. To access the array items, you can use `{{ zipCodes[0] }}` and `{{ zipCodes[1] }}`.\n\n4. To extract array of objects, you can use the following schema:\n\n```json\n{\n \"type\": \"array\",\n \"name\": \"people\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"age\": {\n \"type\": \"number\"\n },\n \"zipCodes\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n }\n }\n }\n }\n}\n```\n\nThis will be extracted as `{{ people }}`. To access the array items, you can use `{{ people[n].name }}`, `{{ people[n].age }}`, `{{ people[n].zipCodes }}`, `{{ people[n].zipCodes[0] }}` and `{{ people[n].zipCodes[1] }}`.",
- "allOf": [
- {
- "$ref": "#/components/schemas/JsonSchema"
- }
- ]
- },
- "aliases": {
- "description": "These are additional variables to create.\n\nThese will be accessible during the call as `{{key}}` and stored in `call.artifact.variableValues` after the call.\n\nExample:\n```json\n{\n \"aliases\": [\n {\n \"key\": \"customerName\",\n \"value\": \"{{name}}\"\n },\n {\n \"key\": \"fullName\",\n \"value\": \"{{firstName}} {{lastName}}\"\n },\n {\n \"key\": \"greeting\",\n \"value\": \"Hello {{name}}, welcome to {{company}}!\"\n },\n {\n \"key\": \"customerCity\",\n \"value\": \"{{addresses[0].city}}\"\n },\n {\n \"key\": \"something\",\n \"value\": \"{{any liquid}}\"\n }\n ]\n}\n```\n\nThis will create variables `customerName`, `fullName`, `greeting`, `customerCity`, and `something`. To access these variables, you can reference them as `{{customerName}}`, `{{fullName}}`, `{{greeting}}`, `{{customerCity}}`, and `{{something}}`.",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/VariableExtractionAlias"
- }
- }
- }
- },
- "HandoffDestinationAssistant": {
+ "LivekitSmartEndpointingPlan": {
"type": "object",
"properties": {
- "type": {
+ "provider": {
"type": "string",
+ "description": "This is the provider for the smart endpointing plan.",
"enum": [
- "assistant"
- ]
- },
- "contextEngineeringPlan": {
- "description": "This is the plan for manipulating the message context before handing off the call to the next assistant.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/ContextEngineeringPlanLastNMessages",
- "title": "Last N Messages"
- },
- {
- "$ref": "#/components/schemas/ContextEngineeringPlanNone",
- "title": "None"
- },
- {
- "$ref": "#/components/schemas/ContextEngineeringPlanAll",
- "title": "All"
- }
- ]
- },
- "assistantName": {
- "type": "string",
- "description": "This is the assistant to transfer the call to. You must provide either assistantName or assistantId."
+ "vapi",
+ "livekit",
+ "custom-endpointing-model"
+ ],
+ "example": "livekit"
},
- "assistantId": {
+ "waitFunction": {
"type": "string",
- "description": "This is the assistant id to transfer the call to. You must provide either assistantName or assistantId."
- },
- "assistant": {
- "description": "This is a transient assistant to transfer the call to. You may provide a transient assistant in the response `handoff-destination-request` in a dynamic handoff.",
- "allOf": [
- {
- "$ref": "#/components/schemas/CreateAssistantDTO"
- }
- ]
- },
- "variableExtractionPlan": {
- "description": "This is the variable extraction plan for the handoff tool.",
- "allOf": [
- {
- "$ref": "#/components/schemas/VariableExtractionPlan"
- }
+ "description": "This expression describes how long the bot will wait to start speaking based on the likelihood that the user has reached an endpoint.\n\nThis is a millisecond valued function. It maps probabilities (real numbers on [0,1]) to milliseconds that the bot should wait before speaking ([0, \\infty]). Any negative values that are returned are set to zero (the bot can't start talking in the past).\n\nA probability of zero represents very high confidence that the caller has stopped speaking, and would like the bot to speak to them. A probability of one represents very high confidence that the caller is still speaking.\n\nUnder the hood, this is parsed into a mathjs expression. Whatever you use to write your expression needs to be valid with respect to mathjs\n\n@default \"20 + 500 * sqrt(x) + 2500 * x^3\"",
+ "examples": [
+ "70 + 4000 * x",
+ "200 + 8000 * x",
+ "4000 * (1 - cos(pi * x))"
]
- },
- "description": {
- "type": "string",
- "description": "This is the description of the destination, used by the AI to choose when and how to transfer the call."
}
},
"required": [
- "type"
+ "provider"
]
},
- "HandoffDestinationDynamic": {
+ "CustomEndpointingModelSmartEndpointingPlan": {
"type": "object",
"properties": {
- "type": {
+ "provider": {
"type": "string",
+ "description": "This is the provider for the smart endpointing plan. Use `custom-endpointing-model` for custom endpointing providers that are not natively supported.",
"enum": [
- "dynamic"
- ]
+ "vapi",
+ "livekit",
+ "custom-endpointing-model"
+ ],
+ "example": "custom-endpointing-model"
},
"server": {
- "description": "This is where Vapi will send the handoff-destination-request webhook in a dynamic handoff.\n\nThe order of precedence is:\n\n1. tool.server.url\n2. assistant.server.url\n3. phoneNumber.server.url\n4. org.server.url",
+ "description": "This is where the endpointing request will be sent. If not provided, will be sent to `assistant.server`. If that does not exist either, will be sent to `org.server`.\n\nRequest Example:\n\nPOST https://{server.url}\nContent-Type: application/json\n\n{\n \"message\": {\n \"type\": \"call.endpointing.request\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"message\": \"Hello, how are you?\",\n \"time\": 1234567890,\n \"secondsFromStart\": 0\n }\n ],\n ...other metadata about the call...\n }\n}\n\nResponse Expected:\n{\n \"timeoutSeconds\": 0.5\n}\n\nThe timeout is the number of seconds to wait before considering the user's speech as finished. The endpointing timeout is automatically reset each time a new transcript is received (and another `call.endpointing.request` is sent).",
"allOf": [
{
"$ref": "#/components/schemas/Server"
}
]
- },
- "description": {
- "type": "string",
- "description": "This is the description of the destination, used by the AI to choose when and how to transfer the call."
}
},
"required": [
- "type"
+ "provider"
]
},
- "CreateHandoffToolDTO": {
+ "TranscriptionEndpointingPlan": {
"type": "object",
"properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
+ "onPunctuationSeconds": {
+ "type": "number",
+ "description": "The minimum number of seconds to wait after transcription ending with punctuation before sending a request to the model. Defaults to 0.1.\n\nThis setting exists because the transcriber punctuates the transcription when it's more confident that customer has completed a thought.\n\n@default 0.1",
+ "minimum": 0,
+ "maximum": 3,
+ "example": 0.1
},
- "type": {
- "type": "string",
- "description": "This is the type of the tool.\nWhen you're using handoff tool, we recommend adding this to your system prompt\n---\n# System context\n\nYou are part of a multi-agent system designed to make agent coordination and execution easy. Agents uses two primary abstraction: **Agents** and **Handoffs**. An agent encompasses instructions and tools and can hand off a conversation to another agent when appropriate. Handoffs are achieved by calling a handoff function, generally named `handoff_to_`. Handoffs between agents are handled seamlessly in the background; do not mention or draw attention to these handoffs in your conversation with the user.\n\n# Agent context\n\n{put your agent system prompt here}\n---",
- "enum": [
- "handoff"
+ "onNoPunctuationSeconds": {
+ "type": "number",
+ "description": "The minimum number of seconds to wait after transcription ending without punctuation before sending a request to the model. Defaults to 1.5.\n\nThis setting exists to catch the cases where the transcriber was not confident enough to punctuate the transcription, but the customer is done and has been silent for a long time.\n\n@default 1.5",
+ "minimum": 0,
+ "maximum": 3,
+ "example": 1.5
+ },
+ "onNumberSeconds": {
+ "type": "number",
+ "description": "The minimum number of seconds to wait after transcription ending with a number before sending a request to the model. Defaults to 0.4.\n\nThis setting exists because the transcriber will sometimes punctuate the transcription ending with a number, even though the customer hasn't uttered the full number. This happens commonly for long numbers when the customer reads the number in chunks.\n\n@default 0.5",
+ "minimum": 0,
+ "maximum": 3,
+ "example": 0.5
+ }
+ }
+ },
+ "StartSpeakingPlan": {
+ "type": "object",
+ "properties": {
+ "waitSeconds": {
+ "type": "number",
+ "description": "This is how long assistant waits before speaking. Defaults to 0.4.\n\nThis is the minimum it will wait but if there is latency is the pipeline, this minimum will be exceeded. This is intended as a stopgap in case the pipeline is moving too fast.\n\nExample:\n- If model generates tokens and voice generates bytes within 100ms, the pipeline still waits 300ms before outputting speech.\n\nUsage:\n- If the customer is taking long pauses, set this to a higher value.\n- If the assistant is accidentally jumping in too much, set this to a higher value.\n\n@default 0.4",
+ "minimum": 0,
+ "maximum": 5,
+ "example": 0.4
+ },
+ "smartEndpointingEnabled": {
+ "example": false,
+ "deprecated": true,
+ "oneOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "string",
+ "enum": [
+ "livekit"
+ ]
+ }
]
},
- "destinations": {
+ "smartEndpointingPlan": {
+ "description": "This is the plan for smart endpointing. Pick between Vapi smart endpointing, LiveKit, or custom endpointing model (or nothing). We strongly recommend using livekit endpointing when working in English. LiveKit endpointing is not supported in other languages, yet.\n\nIf this is set, it will override and take precedence over `transcriptionEndpointingPlan`.\nThis plan will still be overridden by any matching `customEndpointingRules`.\n\nIf this is not set, the system will automatically use the transcriber's built-in endpointing capabilities if available.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/VapiSmartEndpointingPlan",
+ "title": "Vapi"
+ },
+ {
+ "$ref": "#/components/schemas/LivekitSmartEndpointingPlan",
+ "title": "Livekit"
+ },
+ {
+ "$ref": "#/components/schemas/CustomEndpointingModelSmartEndpointingPlan",
+ "title": "Custom Endpointing Model"
+ }
+ ]
+ },
+ "customEndpointingRules": {
"type": "array",
- "description": "These are the destinations that the call can be handed off to.\n\nUsage:\n1. Single destination\n\nUse `assistantId` to handoff the call to a saved assistant, or `assistantName` to handoff the call to an assistant in the same squad.\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-123\", // or \"assistantName\": \"Assistant123\"\n \"description\": \"customer wants to be handed off to assistant-123\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n }\n ],\n }\n ]\n}\n```\n\n2. Multiple destinations\n\n2.1. Multiple Tools, Each With One Destination (OpenAI recommended)\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-123\",\n \"description\": \"customer wants to be handed off to assistant-123\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n },\n ],\n },\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-456\",\n \"description\": \"customer wants to be handed off to assistant-456\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n }\n ],\n }\n ]\n}\n```\n\n2.2. One Tool, Multiple Destinations (Anthropic recommended)\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-123\",\n \"description\": \"customer wants to be handed off to assistant-123\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n },\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-456\",\n \"description\": \"customer wants to be handed off to assistant-456\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n }\n ],\n }\n ]\n}\n```\n\n3. Dynamic destination\n\n3.1 To determine the destination dynamically, supply a `dynamic` handoff destination type and a `server` object.\n VAPI will send a handoff-destination-request webhook to the `server.url`.\n The response from the server will be used as the destination (if valid).\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"dynamic\",\n \"server\": {\n \"url\": \"https://example.com\"\n }\n }\n ],\n }\n ]\n}\n```\n\n3.2. To pass custom parameters to the server, you can use the `function` object.\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"dynamic\",\n \"server\": {\n \"url\": \"https://example.com\"\n },\n }\n ],\n \"function\": {\n \"name\": \"handoff\",\n \"description\": \"Call this function when the customer is ready to be handed off to the next assistant\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"destination\": {\n \"type\": \"string\",\n \"description\": \"Use dynamic when customer is ready to be handed off to the next assistant\",\n \"enum\": [\"dynamic\"]\n },\n \"customerAreaCode\": {\n \"type\": \"number\",\n \"description\": \"Area code of the customer\"\n },\n \"customerIntent\": {\n \"type\": \"string\",\n \"enum\": [\"new-customer\", \"existing-customer\"],\n \"description\": \"Use new-customer when customer is a new customer, existing-customer when customer is an existing customer\"\n },\n \"customerSentiment\": {\n \"type\": \"string\",\n \"enum\": [\"positive\", \"negative\", \"neutral\"],\n \"description\": \"Use positive when customer is happy, negative when customer is unhappy, neutral when customer is neutral\"\n }\n }\n }\n }\n }\n ]\n}\n```\n\nThe properties `customerAreaCode`, `customerIntent`, and `customerSentiment` will be passed to the server in the webhook request body.",
+ "description": "These are the custom endpointing rules to set an endpointing timeout based on a regex on the customer's speech or the assistant's last message.\n\nUsage:\n- If you have yes/no questions like \"are you interested in a loan?\", you can set a shorter timeout.\n- If you have questions where the customer may pause to look up information like \"what's my account number?\", you can set a longer timeout.\n- If you want to wait longer while customer is enumerating a list of numbers, you can set a longer timeout.\n\nThese rules have the highest precedence and will override both `smartEndpointingPlan` and `transcriptionEndpointingPlan` when a rule is matched.\n\nThe rules are evaluated in order and the first one that matches will be used.\n\nOrder of precedence for endpointing:\n1. customEndpointingRules (if any match)\n2. smartEndpointingPlan (if set)\n3. transcriptionEndpointingPlan\n\n@default []",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/HandoffDestinationAssistant",
+ "$ref": "#/components/schemas/AssistantCustomEndpointingRule",
"title": "Assistant"
},
{
- "$ref": "#/components/schemas/HandoffDestinationDynamic",
- "title": "Dynamic"
+ "$ref": "#/components/schemas/CustomerCustomEndpointingRule",
+ "title": "Customer"
+ },
+ {
+ "$ref": "#/components/schemas/BothCustomEndpointingRule",
+ "title": "Both"
}
]
}
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "transcriptionEndpointingPlan": {
+ "description": "This determines how a customer speech is considered done (endpointing) using the transcription of customer's speech.\n\nOnce an endpoint is triggered, the request is sent to `assistant.model`.\n\nNote: This plan is only used if `smartEndpointingPlan` is not set and transcriber does not have built-in endpointing capabilities. If both are provided, `smartEndpointingPlan` takes precedence.\nThis plan will also be overridden by any matching `customEndpointingRules`.",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/TranscriptionEndpointingPlan"
}
]
}
- },
- "required": [
- "type"
- ]
+ }
},
- "CreateCustomKnowledgeBaseDTO": {
+ "SmartDenoisingPlan": {
"type": "object",
"properties": {
- "provider": {
- "type": "string",
- "description": "This knowledge base is bring your own knowledge base implementation.",
- "enum": [
- "custom-knowledge-base"
- ]
- },
- "server": {
- "description": "This is where the knowledge base request will be sent.\n\nRequest Example:\n\nPOST https://{server.url}\nContent-Type: application/json\n\n{\n \"messsage\": {\n \"type\": \"knowledge-base-request\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": \"Why is ocean blue?\"\n }\n ],\n ...other metadata about the call...\n }\n}\n\nResponse Expected:\n```\n{\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"The ocean is blue because water absorbs everything but blue.\",\n }, // YOU CAN RETURN THE EXACT RESPONSE TO SPEAK\n \"documents\": [\n {\n \"content\": \"The ocean is blue primarily because water absorbs colors in the red part of the light spectrum and scatters the blue light, making it more visible to our eyes.\",\n \"similarity\": 1\n },\n {\n \"content\": \"Blue light is scattered more by the water molecules than other colors, enhancing the blue appearance of the ocean.\",\n \"similarity\": .5\n }\n ] // OR, YOU CAN RETURN AN ARRAY OF DOCUMENTS THAT WILL BE SENT TO THE MODEL\n}\n```",
- "allOf": [
- {
- "$ref": "#/components/schemas/Server"
- }
- ]
+ "enabled": {
+ "type": "boolean",
+ "description": "Whether smart denoising using Krisp is enabled.",
+ "default": true
}
- },
- "required": [
- "provider",
- "server"
- ]
+ }
},
- "KnowledgeBase": {
+ "FourierDenoisingPlan": {
"type": "object",
"properties": {
- "name": {
- "type": "string",
- "description": "The name of the knowledge base",
- "example": "My Knowledge Base"
+ "enabled": {
+ "type": "boolean",
+ "description": "Whether Fourier denoising is enabled. Note that this is experimental and may not work as expected.",
+ "default": false
},
- "provider": {
- "type": "string",
- "description": "The provider of the knowledge base",
- "enum": [
- "google"
- ],
- "example": "google"
+ "mediaDetectionEnabled": {
+ "type": "boolean",
+ "description": "Whether automatic media detection is enabled. When enabled, the filter will automatically\ndetect consistent background TV/music/radio and switch to more aggressive filtering settings.\nOnly applies when enabled is true.",
+ "example": true,
+ "default": true
},
- "model": {
- "type": "string",
- "description": "The model to use for the knowledge base",
- "enum": [
- "gemini-2.5-pro",
- "gemini-2.5-flash",
- "gemini-2.5-flash-lite",
- "gemini-2.0-flash-thinking-exp",
- "gemini-2.0-pro-exp-02-05",
- "gemini-2.0-flash",
- "gemini-2.0-flash-lite",
- "gemini-2.0-flash-exp",
- "gemini-2.0-flash-realtime-exp",
- "gemini-1.5-flash",
- "gemini-1.5-flash-002",
- "gemini-1.5-pro",
- "gemini-1.5-pro-002",
- "gemini-1.0-pro"
- ]
+ "staticThreshold": {
+ "type": "number",
+ "description": "Static threshold in dB used as fallback when no baseline is established.",
+ "example": -35,
+ "minimum": -80,
+ "maximum": 0,
+ "default": -35
},
- "description": {
- "type": "string",
- "description": "A description of the knowledge base"
+ "baselineOffsetDb": {
+ "type": "number",
+ "description": "How far below the rolling baseline to filter audio, in dB.\nLower values (e.g., -10) are more aggressive, higher values (e.g., -20) are more conservative.",
+ "example": -15,
+ "minimum": -30,
+ "maximum": -5,
+ "default": -15
},
- "fileIds": {
- "description": "The file IDs associated with this knowledge base",
- "type": "array",
- "items": {
- "type": "string"
- }
+ "windowSizeMs": {
+ "type": "number",
+ "description": "Rolling window size in milliseconds for calculating the audio baseline.\nLarger windows adapt more slowly but are more stable.",
+ "example": 3000,
+ "minimum": 1000,
+ "maximum": 30000,
+ "default": 3000
+ },
+ "baselinePercentile": {
+ "type": "number",
+ "description": "Percentile to use for baseline calculation (1-99).\nHigher percentiles (e.g., 85) focus on louder speech, lower percentiles (e.g., 50) include quieter speech.",
+ "example": 85,
+ "minimum": 1,
+ "maximum": 99,
+ "default": 85
}
- },
- "required": [
- "name",
- "provider",
- "description",
- "fileIds"
- ]
+ }
},
- "CreateQueryToolDTO": {
+ "BackgroundSpeechDenoisingPlan": {
"type": "object",
"properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
- },
- "type": {
- "type": "string",
- "enum": [
- "query"
- ],
- "description": "The type of tool. \"query\" for Query tool."
- },
- "knowledgeBases": {
- "description": "The knowledge bases to query",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/KnowledgeBase"
- }
+ "smartDenoisingPlan": {
+ "description": "Whether smart denoising using Krisp is enabled.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/SmartDenoisingPlan"
+ }
+ ]
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "fourierDenoisingPlan": {
+ "description": "Whether Fourier denoising is enabled. Note that this is experimental and may not work as expected.\n\nThis can be combined with smart denoising, and will be run afterwards.",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/FourierDenoisingPlan"
}
]
}
- },
- "required": [
- "type"
- ]
+ }
},
- "CreateGoogleCalendarCreateEventToolDTO": {
+ "TransferAssistant": {
"type": "object",
"properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
- },
- "type": {
+ "name": {
"type": "string",
- "enum": [
- "google.calendar.event.create"
- ],
- "description": "The type of tool. \"google.calendar.event.create\" for Google Calendar Create Event tool."
+ "description": "Optional name for the transfer assistant",
+ "maxLength": 100,
+ "default": "transfer-assistant",
+ "example": "Sales Transfer Assistant"
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "model": {
+ "description": "Model configuration for the transfer assistant",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/TransferAssistantModel"
}
]
- }
- },
- "required": [
- "type"
- ]
- },
- "CreateGoogleSheetsRowAppendToolDTO": {
- "type": "object",
- "properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
},
- "type": {
+ "voice": {
+ "description": "These are the options for the transfer assistant's voice.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AzureVoice",
+ "title": "AzureVoice"
+ },
+ {
+ "$ref": "#/components/schemas/CartesiaVoice",
+ "title": "CartesiaVoice"
+ },
+ {
+ "$ref": "#/components/schemas/CustomVoice",
+ "title": "CustomVoice"
+ },
+ {
+ "$ref": "#/components/schemas/DeepgramVoice",
+ "title": "DeepgramVoice"
+ },
+ {
+ "$ref": "#/components/schemas/ElevenLabsVoice",
+ "title": "ElevenLabsVoice"
+ },
+ {
+ "$ref": "#/components/schemas/HumeVoice",
+ "title": "HumeVoice"
+ },
+ {
+ "$ref": "#/components/schemas/LMNTVoice",
+ "title": "LMNTVoice"
+ },
+ {
+ "$ref": "#/components/schemas/NeuphonicVoice",
+ "title": "NeuphonicVoice"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIVoice",
+ "title": "OpenAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/PlayHTVoice",
+ "title": "PlayHTVoice"
+ },
+ {
+ "$ref": "#/components/schemas/WellSaidVoice",
+ "title": "WellSaidVoice"
+ },
+ {
+ "$ref": "#/components/schemas/RimeAIVoice",
+ "title": "RimeAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/SmallestAIVoice",
+ "title": "SmallestAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/TavusVoice",
+ "title": "TavusVoice"
+ },
+ {
+ "$ref": "#/components/schemas/VapiVoice",
+ "title": "VapiVoice"
+ },
+ {
+ "$ref": "#/components/schemas/SesameVoice",
+ "title": "SesameVoice"
+ },
+ {
+ "$ref": "#/components/schemas/InworldVoice",
+ "title": "InworldVoice"
+ },
+ {
+ "$ref": "#/components/schemas/MinimaxVoice",
+ "title": "MinimaxVoice"
+ }
+ ]
+ },
+ "transcriber": {
+ "description": "These are the options for the transfer assistant's transcriber.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AssemblyAITranscriber",
+ "title": "AssemblyAITranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/AzureSpeechTranscriber",
+ "title": "AzureSpeechTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/CustomTranscriber",
+ "title": "CustomTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/DeepgramTranscriber",
+ "title": "DeepgramTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/ElevenLabsTranscriber",
+ "title": "ElevenLabsTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/GladiaTranscriber",
+ "title": "GladiaTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/GoogleTranscriber",
+ "title": "GoogleTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/SpeechmaticsTranscriber",
+ "title": "SpeechmaticsTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/TalkscriberTranscriber",
+ "title": "TalkscriberTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAITranscriber",
+ "title": "OpenAITranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/CartesiaTranscriber",
+ "title": "CartesiaTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/SonioxTranscriber",
+ "title": "SonioxTranscriber"
+ }
+ ]
+ },
+ "firstMessage": {
"type": "string",
- "enum": [
- "google.sheets.row.append"
- ],
- "description": "The type of tool. \"google.sheets.row.append\" for Google Sheets Row Append tool."
+ "description": "This is the first message that the transfer assistant will say.\nThis can also be a URL to a custom audio file.\n\nIf unspecified, assistant will wait for user to speak and use the model to respond once they speak.",
+ "example": "Hello! I understand you need to be transferred. Let me connect you."
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "backgroundSound": {
+ "description": "This is the background sound in the transfer assistant call. Default for phone calls is 'office' and default for web calls is 'off'.\nYou can also provide a custom sound by providing a URL to an audio file.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "off",
+ "office"
+ ],
+ "example": "office"
+ },
+ {
+ "type": "string",
+ "format": "uri",
+ "example": "https://www.soundjay.com/ambient/sounds/people-in-lounge-1.mp3"
+ }
+ ]
+ },
+ "startSpeakingPlan": {
+ "description": "This is the plan for when the transfer assistant should start talking.\n\nYou should configure this if the transfer assistant needs different endpointing behavior than the base assistant.\n\nIf this is not set, the transfer assistant will inherit the start speaking plan from the base assistant.",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/StartSpeakingPlan"
}
]
- }
- },
- "required": [
- "type"
- ]
- },
- "CreateGoogleCalendarCheckAvailabilityToolDTO": {
- "type": "object",
- "properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
},
- "type": {
+ "firstMessageMode": {
"type": "string",
+ "description": "This is the mode for the first message. Default is 'assistant-speaks-first'.\n\nUse:\n- 'assistant-speaks-first' to have the assistant speak first.\n- 'assistant-waits-for-user' to have the assistant wait for the user to speak first.\n- 'assistant-speaks-first-with-model-generated-message' to have the assistant speak first with a message generated by the model based on the conversation state.\n\n@default 'assistant-speaks-first'",
"enum": [
- "google.calendar.availability.check"
+ "assistant-speaks-first",
+ "assistant-speaks-first-with-model-generated-message",
+ "assistant-waits-for-user"
],
- "description": "The type of tool. \"google.calendar.availability.check\" for Google Calendar Check Availability tool."
+ "example": "assistant-speaks-first"
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "maxDurationSeconds": {
+ "type": "number",
+ "description": "This is the maximum duration in seconds for the transfer assistant conversation.\nAfter this time, the transfer will be cancelled automatically.\n@default 120",
+ "minimum": 10,
+ "maximum": 43200,
+ "example": 120
+ },
+ "backgroundSpeechDenoisingPlan": {
+ "description": "This enables filtering of noise and background speech while the user is talking.\n\nFeatures:\n- Smart denoising using Krisp\n- Fourier denoising\n\nSmart denoising can be combined with or used independently of Fourier denoising.\n\nOrder of precedence:\n- Smart denoising\n- Fourier denoising",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/BackgroundSpeechDenoisingPlan"
}
]
+ },
+ "silenceTimeoutSeconds": {
+ "type": "number",
+ "description": "This is the number of seconds of silence to wait before ending the call. Defaults to 30.\n\n@default 30",
+ "minimum": 10,
+ "maximum": 3600
}
},
"required": [
- "type"
+ "model"
]
},
- "CreateSlackSendMessageToolDTO": {
+ "TransferCancelToolUserEditable": {
"type": "object",
"properties": {
"messages": {
@@ -12342,9 +12539,9 @@
"type": {
"type": "string",
"enum": [
- "slack.message.send"
+ "transferCancel"
],
- "description": "The type of tool. \"slack.message.send\" for Slack Send Message tool."
+ "description": "The type of tool. \"transferCancel\" for Transfer Cancel tool. This tool can only be used during warm-transfer-experimental by the transfer assistant to cancel an ongoing transfer and return the call back to the original assistant when the transfer cannot be completed."
},
"rejectionPlan": {
"description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
@@ -12359,20 +12556,7 @@
"type"
]
},
- "McpToolMetadata": {
- "type": "object",
- "properties": {
- "protocol": {
- "type": "string",
- "enum": [
- "sse",
- "shttp"
- ],
- "description": "This is the protocol used for MCP communication. Defaults to Streamable HTTP."
- }
- }
- },
- "CreateMcpToolDTO": {
+ "TransferSuccessfulToolUserEditable": {
"type": "object",
"properties": {
"messages": {
@@ -12402,20 +12586,9 @@
"type": {
"type": "string",
"enum": [
- "mcp"
+ "transferSuccessful"
],
- "description": "The type of tool. \"mcp\" for MCP tool."
- },
- "server": {
- "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
- "allOf": [
- {
- "$ref": "#/components/schemas/Server"
- }
- ]
- },
- "metadata": {
- "$ref": "#/components/schemas/McpToolMetadata"
+ "description": "The type of tool. \"transferSuccessful\" for Transfer Successful tool. This tool can only be used during warm-transfer-experimental by the transfer assistant to confirm that the transfer should proceed and finalize the handoff to the destination."
},
"rejectionPlan": {
"description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
@@ -12430,148 +12603,247 @@
"type"
]
},
- "CreateGoHighLevelCalendarAvailabilityToolDTO": {
+ "SummaryPlan": {
"type": "object",
"properties": {
"messages": {
+ "description": "These are the messages used to generate the summary.\n\n@default: ```\n[\n {\n \"role\": \"system\",\n \"content\": \"You are an expert note-taker. You will be given a transcript of a call. Summarize the call in 2-3 sentences. DO NOT return anything except the summary.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Here is the transcript:\\n\\n{{transcript}}\\n\\n. Here is the ended reason of the call:\\n\\n{{endedReason}}\\n\\n\"\n }\n]```\n\nYou can customize by providing any messages you want.\n\nHere are the template variables available:\n- {{transcript}}: The transcript of the call from `call.artifact.transcript` \n- {{systemPrompt}}: The system prompt of the call from `assistant.model.messages[type=system].content` \n- {{messages}}: The messages of the call from `assistant.model.messages` \n- {{endedReason}}: The ended reason of the call from `call.endedReason`",
"type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
+ "type": "object"
}
},
- "type": {
- "type": "string",
- "enum": [
- "gohighlevel.calendar.availability.check"
- ],
- "description": "The type of tool. \"gohighlevel.calendar.availability.check\" for GoHighLevel Calendar Availability Check tool."
+ "enabled": {
+ "type": "boolean",
+ "description": "This determines whether a summary is generated and stored in `call.analysis.summary`. Defaults to true.\n\nUsage:\n- If you want to disable the summary, set this to false.\n\n@default true"
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
- "allOf": [
- {
- "$ref": "#/components/schemas/ToolRejectionPlan"
- }
- ]
+ "timeoutSeconds": {
+ "type": "number",
+ "description": "This is how long the request is tried before giving up. When request times out, `call.analysis.summary` will be empty.\n\nUsage:\n- To guarantee the summary is generated, set this value high. Note, this will delay the end of call report in cases where model is slow to respond.\n\n@default 5 seconds",
+ "minimum": 1,
+ "maximum": 60
}
- },
- "required": [
- "type"
- ]
+ }
},
- "CreateGoHighLevelCalendarEventCreateToolDTO": {
+ "TransferPlan": {
"type": "object",
"properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
+ "mode": {
+ "type": "string",
+ "description": "This configures how transfer is executed and the experience of the destination party receiving the call.\n\nUsage:\n- `blind-transfer`: The assistant forwards the call to the destination without any message or summary.\n- `blind-transfer-add-summary-to-sip-header`: The assistant forwards the call to the destination and adds a SIP header X-Transfer-Summary to the call to include the summary.\n- `warm-transfer-say-message`: The assistant dials the destination, delivers the `message` to the destination party, connects the customer, and leaves the call.\n- `warm-transfer-say-summary`: The assistant dials the destination, provides a summary of the call to the destination party, connects the customer, and leaves the call.\n- `warm-transfer-wait-for-operator-to-speak-first-and-then-say-message`: The assistant dials the destination, waits for the operator to speak, delivers the `message` to the destination party, and then connects the customer.\n- `warm-transfer-wait-for-operator-to-speak-first-and-then-say-summary`: The assistant dials the destination, waits for the operator to speak, provides a summary of the call to the destination party, and then connects the customer.\n- `warm-transfer-twiml`: The assistant dials the destination, executes the twiml instructions on the destination call leg, connects the customer, and leaves the call.\n- `warm-transfer-experimental`: The assistant puts the customer on hold, dials the destination, and if the destination answers (and is human), delivers a message or summary before connecting the customer. If the destination is unreachable or not human (e.g., with voicemail detection), the assistant delivers the `fallbackMessage` to the customer and optionally ends the call.\n\n@default 'blind-transfer'",
+ "enum": [
+ "blind-transfer",
+ "blind-transfer-add-summary-to-sip-header",
+ "warm-transfer-say-message",
+ "warm-transfer-say-summary",
+ "warm-transfer-twiml",
+ "warm-transfer-wait-for-operator-to-speak-first-and-then-say-message",
+ "warm-transfer-wait-for-operator-to-speak-first-and-then-say-summary",
+ "warm-transfer-experimental"
+ ]
+ },
+ "message": {
+ "description": "This is the message the assistant will deliver to the destination party before connecting the customer.\n\nUsage:\n- Used only when `mode` is `blind-transfer-add-summary-to-sip-header`, `warm-transfer-say-message`, `warm-transfer-wait-for-operator-to-speak-first-and-then-say-message`, or `warm-transfer-experimental`.",
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "$ref": "#/components/schemas/CustomMessage"
+ }
+ ]
+ },
+ "timeout": {
+ "type": "number",
+ "description": "This is the timeout in seconds for the warm-transfer-wait-for-operator-to-speak-first-and-then-say-message/summary\n\n@default 60",
+ "minimum": 1,
+ "maximum": 600,
+ "default": 60
+ },
+ "sipVerb": {
+ "type": "object",
+ "description": "This specifies the SIP verb to use while transferring the call.\n- 'refer': Uses SIP REFER to transfer the call (default)\n- 'bye': Ends current call with SIP BYE\n- 'dial': Uses SIP DIAL to transfer the call",
+ "default": "refer",
+ "enum": [
+ "refer",
+ "bye",
+ "dial"
+ ]
+ },
+ "dialTimeout": {
+ "type": "number",
+ "description": "This sets the timeout for the dial operation in seconds. This is the duration the call will ring before timing out.\n\nOnly applicable when `sipVerb='dial'`. Not applicable for SIP REFER or BYE.\n\n@default 60",
+ "minimum": 1,
+ "maximum": 600,
+ "default": 60
+ },
+ "holdAudioUrl": {
+ "type": "string",
+ "description": "This is the URL to an audio file played while the customer is on hold during transfer.\n\nUsage:\n- Used only when `mode` is `warm-transfer-experimental`.\n- Used when transferring calls to play hold audio for the customer.\n- Must be a publicly accessible URL to an audio file.\n- Supported formats: MP3 and WAV.\n- If not provided, the default hold audio will be used."
+ },
+ "transferCompleteAudioUrl": {
+ "type": "string",
+ "description": "This is the URL to an audio file played after the warm transfer message or summary is delivered to the destination party.\nIt can be used to play a custom sound like 'beep' to notify that the transfer is complete.\n\nUsage:\n- Used only when `mode` is `warm-transfer-experimental`.\n- Used when transferring calls to play hold audio for the destination party.\n- Must be a publicly accessible URL to an audio file.\n- Supported formats: MP3 and WAV."
+ },
+ "contextEngineeringPlan": {
+ "description": "This is the plan for manipulating the message context before initiating the warm transfer.\nUsage:\n- Used only when `mode` is `warm-transfer-experimental`.\n- These messages will automatically be added to the transferAssistant's system message.\n- If 'none', we will not add any transcript to the transferAssistant's system message.\n- If you want to provide your own messages, use transferAssistant.model.messages instead.\n\n@default { type: 'all' }",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ContextEngineeringPlanLastNMessages",
+ "title": "Last N Messages"
+ },
+ {
+ "$ref": "#/components/schemas/ContextEngineeringPlanNone",
+ "title": "None"
+ },
+ {
+ "$ref": "#/components/schemas/ContextEngineeringPlanAll",
+ "title": "All"
+ }
+ ]
+ },
+ "twiml": {
+ "type": "string",
+ "description": "This is the TwiML instructions to execute on the destination call leg before connecting the customer.\n\nUsage:\n- Used only when `mode` is `warm-transfer-twiml`.\n- Supports only `Play`, `Say`, `Gather`, `Hangup` and `Pause` verbs.\n- Maximum length is 4096 characters.\n\nExample:\n```\nHello, transferring a customer to you.\n\nThey called about billing questions.\n```",
+ "maxLength": 4096
+ },
+ "summaryPlan": {
+ "description": "This is the plan for generating a summary of the call to present to the destination party.\n\nUsage:\n- Used only when `mode` is `blind-transfer-add-summary-to-sip-header` or `warm-transfer-say-summary` or `warm-transfer-wait-for-operator-to-speak-first-and-then-say-summary` or `warm-transfer-experimental`.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/SummaryPlan"
+ }
+ ]
+ },
+ "sipHeadersInReferToEnabled": {
+ "type": "boolean",
+ "description": "This flag includes the sipHeaders from above in the refer to sip uri as url encoded query params.\n\n@default false"
+ },
+ "fallbackPlan": {
+ "description": "This configures the fallback plan when the transfer fails (destination unreachable, busy, or not human).\n\nUsage:\n- Used only when `mode` is `warm-transfer-experimental`.\n- If not provided when using `warm-transfer-experimental`, a default message will be used.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/TransferFallbackPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "mode"
+ ]
+ },
+ "TransferDestinationNumber": {
+ "type": "object",
+ "properties": {
+ "message": {
+ "description": "This is spoken to the customer before connecting them to the destination.\n\nUsage:\n- If this is not provided and transfer tool messages is not provided, default is \"Transferring the call now\".\n- If set to \"\", nothing is spoken. This is useful when you want to silently transfer. This is especially useful when transferring between assistants in a squad. In this scenario, you likely also want to set `assistant.firstMessageMode=assistant-speaks-first-with-model-generated-message` for the destination assistant.\n\nThis accepts a string or a ToolMessageStart class. Latter is useful if you want to specify multiple messages for different languages through the `contents` field.",
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "$ref": "#/components/schemas/CustomMessage"
+ }
+ ]
},
"type": {
"type": "string",
"enum": [
- "gohighlevel.calendar.event.create"
- ],
- "description": "The type of tool. \"gohighlevel.calendar.event.create\" for GoHighLevel Calendar Event Create tool."
+ "number"
+ ]
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "numberE164CheckEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle the E164 check for the `number` field. This is an advanced property which should be used if you know your use case requires it.\n\nUse cases:\n- `false`: To allow non-E164 numbers like `+001234567890`, `1234`, or `abc`. This is useful for dialing out to non-E164 numbers on your SIP trunks.\n- `true` (default): To allow only E164 numbers like `+14155551234`. This is standard for PSTN calls.\n\nIf `false`, the `number` is still required to only contain alphanumeric characters (regex: `/^\\+?[a-zA-Z0-9]+$/`).\n\n@default true (E164 check is enabled)",
+ "default": true
+ },
+ "number": {
+ "type": "string",
+ "description": "This is the phone number to transfer the call to.",
+ "minLength": 3,
+ "maxLength": 40
+ },
+ "extension": {
+ "type": "string",
+ "description": "This is the extension to dial after transferring the call to the `number`.",
+ "minLength": 1,
+ "maxLength": 10
+ },
+ "callerId": {
+ "type": "string",
+ "description": "This is the caller ID to use when transferring the call to the `number`.\n\nUsage:\n- If not provided, the caller ID will be the number the call is coming **from**.\n Example: a customer with number +14151111111 calls in to and the assistant transfers out to +16470000000. +16470000000 will see +14151111111 as the caller.\n For inbound calls, the caller ID is the customer's number. For outbound calls, the caller ID is the phone number of the assistant.\n- To change this behavior, provide a `callerId`.\n- Set to '{{customer.number}}' to always use the customer's number as the caller ID.\n- Set to '{{phoneNumber.number}}' to always use the phone number of the assistant as the caller ID.\n- Set to any E164 number to always use that number as the caller ID. This needs to be a number that is owned or verified by your Transport provider like Twilio.\n\nFor Twilio, you can read up more here: https://www.twilio.com/docs/voice/twiml/dial#callerid",
+ "maxLength": 40
+ },
+ "transferPlan": {
+ "description": "This configures how transfer is executed and the experience of the destination party receiving the call. Defaults to `blind-transfer`.\n\n@default `transferPlan.mode='blind-transfer'`",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/TransferPlan"
}
]
+ },
+ "description": {
+ "type": "string",
+ "description": "This is the description of the destination, used by the AI to choose when and how to transfer the call."
}
},
"required": [
- "type"
+ "type",
+ "number"
]
},
- "CreateGoHighLevelContactCreateToolDTO": {
+ "TransferDestinationSip": {
"type": "object",
"properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
+ "message": {
+ "description": "This is spoken to the customer before connecting them to the destination.\n\nUsage:\n- If this is not provided and transfer tool messages is not provided, default is \"Transferring the call now\".\n- If set to \"\", nothing is spoken. This is useful when you want to silently transfer. This is especially useful when transferring between assistants in a squad. In this scenario, you likely also want to set `assistant.firstMessageMode=assistant-speaks-first-with-model-generated-message` for the destination assistant.\n\nThis accepts a string or a ToolMessageStart class. Latter is useful if you want to specify multiple messages for different languages through the `contents` field.",
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "$ref": "#/components/schemas/CustomMessage"
+ }
+ ]
},
"type": {
"type": "string",
"enum": [
- "gohighlevel.contact.create"
- ],
- "description": "The type of tool. \"gohighlevel.contact.create\" for GoHighLevel Contact Create tool."
+ "sip"
+ ]
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "sipUri": {
+ "type": "string",
+ "description": "This is the SIP URI to transfer the call to."
+ },
+ "callerId": {
+ "type": "string",
+ "description": "This is the caller ID to use when transferring the call to the `sipUri`.\n\nUsage:\n- If not provided, the caller ID will be determined by the SIP infrastructure.\n- Set to '{{customer.number}}' to always use the customer's number as the caller ID.\n- Set to '{{phoneNumber.number}}' to always use the phone number of the assistant as the caller ID.\n- Set to any E164 number to always use that number as the caller ID.\n\nOnly applicable when `transferPlan.sipVerb='dial'`. Not applicable for SIP REFER.",
+ "maxLength": 40
+ },
+ "transferPlan": {
+ "description": "This configures how transfer is executed and the experience of the destination party receiving the call. Defaults to `blind-transfer`.\n\n@default `transferPlan.mode='blind-transfer'`",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/TransferPlan"
}
]
+ },
+ "sipHeaders": {
+ "type": "object",
+ "description": "These are custom headers to be added to SIP refer during transfer call."
+ },
+ "description": {
+ "type": "string",
+ "description": "This is the description of the destination, used by the AI to choose when and how to transfer the call."
}
},
"required": [
- "type"
+ "type",
+ "sipUri"
]
},
- "CreateGoHighLevelContactGetToolDTO": {
+ "CreateTransferCallToolDTO": {
"type": "object",
"properties": {
"messages": {
@@ -12601,1103 +12873,1068 @@
"type": {
"type": "string",
"enum": [
- "gohighlevel.contact.get"
- ],
- "description": "The type of tool. \"gohighlevel.contact.get\" for GoHighLevel Contact Get tool."
- },
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
- "allOf": [
- {
- "$ref": "#/components/schemas/ToolRejectionPlan"
- }
+ "transferCall"
]
- }
- },
- "required": [
- "type"
- ]
- },
- "AnyscaleModel": {
- "type": "object",
- "properties": {
- "messages": {
- "description": "This is the starting state for the conversation.",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/OpenAIMessage"
- }
},
- "tools": {
+ "destinations": {
"type": "array",
- "description": "These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`.\n\nBoth `tools` and `toolIds` can be used together.",
+ "description": "These are the destinations that the call can be transferred to. If no destinations are provided, server.url will be used to get the transfer destination once the tool is called.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/CreateApiRequestToolDTO",
- "title": "ApiRequestTool"
- },
- {
- "$ref": "#/components/schemas/CreateBashToolDTO",
- "title": "BashTool"
- },
- {
- "$ref": "#/components/schemas/CreateComputerToolDTO",
- "title": "ComputerTool"
- },
- {
- "$ref": "#/components/schemas/CreateDtmfToolDTO",
- "title": "DtmfTool"
- },
- {
- "$ref": "#/components/schemas/CreateEndCallToolDTO",
- "title": "EndCallTool"
- },
- {
- "$ref": "#/components/schemas/CreateFunctionToolDTO",
- "title": "FunctionTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
- "title": "GoHighLevelCalendarAvailabilityTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
- "title": "GoHighLevelCalendarEventCreateTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
- "title": "GoHighLevelContactCreateTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
- "title": "GoHighLevelContactGetTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
- "title": "GoogleCalendarCheckAvailabilityTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
- "title": "GoogleCalendarCreateEventTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
- "title": "GoogleSheetsRowAppendTool"
- },
- {
- "$ref": "#/components/schemas/CreateHandoffToolDTO",
- "title": "HandoffTool"
- },
- {
- "$ref": "#/components/schemas/CreateMcpToolDTO",
- "title": "McpTool"
- },
- {
- "$ref": "#/components/schemas/CreateQueryToolDTO",
- "title": "QueryTool"
- },
- {
- "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO",
- "title": "SlackSendMessageTool"
- },
- {
- "$ref": "#/components/schemas/CreateSmsToolDTO",
- "title": "SmsTool"
+ "$ref": "#/components/schemas/TransferDestinationAssistant",
+ "title": "Assistant"
},
{
- "$ref": "#/components/schemas/CreateTextEditorToolDTO",
- "title": "TextEditorTool"
+ "$ref": "#/components/schemas/TransferDestinationNumber",
+ "title": "Number"
},
{
- "$ref": "#/components/schemas/CreateTransferCallToolDTO",
- "title": "TransferCallTool"
+ "$ref": "#/components/schemas/TransferDestinationSip",
+ "title": "Sip"
}
]
}
},
- "toolIds": {
- "description": "These are the tools that the assistant can use during the call. To use transient tools, use `tools`.\n\nBoth `tools` and `toolIds` can be used together.",
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "knowledgeBase": {
- "description": "These are the options for the knowledge base.",
- "oneOf": [
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
{
- "$ref": "#/components/schemas/CreateCustomKnowledgeBaseDTO",
- "title": "Custom"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
- },
- "knowledgeBaseId": {
- "type": "string",
- "description": "This is the ID of the knowledge base the model will use."
- },
- "provider": {
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "ContextEngineeringPlanLastNMessages": {
+ "type": "object",
+ "properties": {
+ "type": {
"type": "string",
"enum": [
- "anyscale"
+ "lastNMessages"
]
},
- "model": {
- "type": "string",
- "description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b"
- },
- "temperature": {
- "type": "number",
- "description": "This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.",
- "minimum": 0,
- "maximum": 2
- },
- "maxTokens": {
- "type": "number",
- "description": "This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250.",
- "minimum": 50,
- "maximum": 10000
- },
- "emotionRecognitionEnabled": {
- "type": "boolean",
- "description": "This determines whether we detect user's emotion while they speak and send it as an additional info to model.\n\nDefault `false` because the model is usually are good at understanding the user's emotion from text.\n\n@default false"
- },
- "numFastTurns": {
+ "maxMessages": {
"type": "number",
- "description": "This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai.\n\nDefault is 0.\n\n@default 0",
+ "description": "This is the maximum number of messages to include in the context engineering plan.",
"minimum": 0
}
},
"required": [
- "provider",
- "model"
+ "type",
+ "maxMessages"
]
},
- "AnthropicThinkingConfig": {
+ "ContextEngineeringPlanNone": {
"type": "object",
"properties": {
"type": {
"type": "string",
"enum": [
- "enabled"
+ "none"
]
- },
- "budgetTokens": {
- "type": "number",
- "description": "The maximum number of tokens to allocate for thinking.\nMust be between 1024 and 100000 tokens.",
- "minimum": 1024,
- "maximum": 100000
}
},
"required": [
- "type",
- "budgetTokens"
+ "type"
]
},
- "AnthropicModel": {
+ "ContextEngineeringPlanAll": {
"type": "object",
"properties": {
- "messages": {
- "description": "This is the starting state for the conversation.",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/OpenAIMessage"
- }
+ "type": {
+ "type": "string",
+ "enum": [
+ "all"
+ ]
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "ContextEngineeringPlanUserAndAssistantMessages": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "userAndAssistantMessages"
+ ]
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "HandoffDestinationAssistant": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "assistant"
+ ]
},
- "tools": {
- "type": "array",
- "description": "These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`.\n\nBoth `tools` and `toolIds` can be used together.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/CreateApiRequestToolDTO",
- "title": "ApiRequestTool"
- },
- {
- "$ref": "#/components/schemas/CreateBashToolDTO",
- "title": "BashTool"
- },
- {
- "$ref": "#/components/schemas/CreateComputerToolDTO",
- "title": "ComputerTool"
- },
- {
- "$ref": "#/components/schemas/CreateDtmfToolDTO",
- "title": "DtmfTool"
- },
- {
- "$ref": "#/components/schemas/CreateEndCallToolDTO",
- "title": "EndCallTool"
- },
- {
- "$ref": "#/components/schemas/CreateFunctionToolDTO",
- "title": "FunctionTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
- "title": "GoHighLevelCalendarAvailabilityTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
- "title": "GoHighLevelCalendarEventCreateTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
- "title": "GoHighLevelContactCreateTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
- "title": "GoHighLevelContactGetTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
- "title": "GoogleCalendarCheckAvailabilityTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
- "title": "GoogleCalendarCreateEventTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
- "title": "GoogleSheetsRowAppendTool"
- },
- {
- "$ref": "#/components/schemas/CreateHandoffToolDTO",
- "title": "HandoffTool"
- },
- {
- "$ref": "#/components/schemas/CreateMcpToolDTO",
- "title": "McpTool"
- },
- {
- "$ref": "#/components/schemas/CreateQueryToolDTO",
- "title": "QueryTool"
- },
- {
- "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO",
- "title": "SlackSendMessageTool"
- },
- {
- "$ref": "#/components/schemas/CreateSmsToolDTO",
- "title": "SmsTool"
- },
- {
- "$ref": "#/components/schemas/CreateTextEditorToolDTO",
- "title": "TextEditorTool"
- },
- {
- "$ref": "#/components/schemas/CreateTransferCallToolDTO",
- "title": "TransferCallTool"
- }
- ]
- }
- },
- "toolIds": {
- "description": "These are the tools that the assistant can use during the call. To use transient tools, use `tools`.\n\nBoth `tools` and `toolIds` can be used together.",
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "knowledgeBase": {
- "description": "These are the options for the knowledge base.",
+ "contextEngineeringPlan": {
+ "description": "This is the plan for manipulating the message context before handing off the call to the next assistant.",
"oneOf": [
{
- "$ref": "#/components/schemas/CreateCustomKnowledgeBaseDTO",
- "title": "Custom"
+ "$ref": "#/components/schemas/ContextEngineeringPlanLastNMessages",
+ "title": "Last N Messages"
+ },
+ {
+ "$ref": "#/components/schemas/ContextEngineeringPlanNone",
+ "title": "None"
+ },
+ {
+ "$ref": "#/components/schemas/ContextEngineeringPlanAll",
+ "title": "All"
+ },
+ {
+ "$ref": "#/components/schemas/ContextEngineeringPlanUserAndAssistantMessages",
+ "title": "User And Assistant Messages"
}
]
},
- "knowledgeBaseId": {
+ "assistantName": {
"type": "string",
- "description": "This is the ID of the knowledge base the model will use."
+ "description": "This is the assistant to transfer the call to. You must provide either assistantName or assistantId."
},
- "model": {
+ "assistantId": {
"type": "string",
- "description": "The specific Anthropic/Claude model that will be used.",
- "enum": [
- "claude-3-opus-20240229",
- "claude-3-sonnet-20240229",
- "claude-3-haiku-20240307",
- "claude-3-5-sonnet-20240620",
- "claude-3-5-sonnet-20241022",
- "claude-3-5-haiku-20241022",
- "claude-3-7-sonnet-20250219",
- "claude-opus-4-20250514",
- "claude-sonnet-4-20250514"
- ]
+ "description": "This is the assistant id to transfer the call to. You must provide either assistantName or assistantId."
},
- "provider": {
- "type": "string",
- "description": "The provider identifier for Anthropic.",
- "enum": [
- "anthropic"
+ "assistant": {
+ "description": "This is a transient assistant to transfer the call to. You may provide a transient assistant in the response `handoff-destination-request` in a dynamic handoff.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateAssistantDTO"
+ }
]
},
- "thinking": {
- "description": "Optional configuration for Anthropic's thinking feature.\nOnly applicable for claude-3-7-sonnet-20250219 model.\nIf provided, maxTokens must be greater than thinking.budgetTokens.",
+ "variableExtractionPlan": {
+ "description": "This is the variable extraction plan for the handoff tool.",
"allOf": [
{
- "$ref": "#/components/schemas/AnthropicThinkingConfig"
+ "$ref": "#/components/schemas/VariableExtractionPlan"
}
]
},
- "temperature": {
- "type": "number",
- "description": "This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.",
- "minimum": 0,
- "maximum": 2
+ "assistantOverrides": {
+ "description": "These are the assistant overrides to apply to the destination assistant.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AssistantOverrides"
+ }
+ ]
},
- "maxTokens": {
- "type": "number",
- "description": "This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250.",
- "minimum": 50,
- "maximum": 10000
+ "description": {
+ "type": "string",
+ "description": "This is the description of the destination, used by the AI to choose when and how to transfer the call."
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "HandoffDestinationDynamic": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "dynamic"
+ ]
},
- "emotionRecognitionEnabled": {
- "type": "boolean",
- "description": "This determines whether we detect user's emotion while they speak and send it as an additional info to model.\n\nDefault `false` because the model is usually are good at understanding the user's emotion from text.\n\n@default false"
+ "server": {
+ "description": "This is where Vapi will send the handoff-destination-request webhook in a dynamic handoff.\n\nThe order of precedence is:\n\n1. tool.server.url\n2. assistant.server.url\n3. phoneNumber.server.url\n4. org.server.url",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
},
- "numFastTurns": {
- "type": "number",
- "description": "This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai.\n\nDefault is 0.\n\n@default 0",
- "minimum": 0
+ "description": {
+ "type": "string",
+ "description": "This is the description of the destination, used by the AI to choose when and how to transfer the call."
}
},
"required": [
- "model",
- "provider"
+ "type"
]
},
- "CerebrasModel": {
+ "SquadMemberDTO": {
"type": "object",
"properties": {
- "messages": {
- "description": "This is the starting state for the conversation.",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/OpenAIMessage"
- }
- },
- "tools": {
+ "assistantDestinations": {
"type": "array",
- "description": "These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`.\n\nBoth `tools` and `toolIds` can be used together.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/CreateApiRequestToolDTO",
- "title": "ApiRequestTool"
- },
- {
- "$ref": "#/components/schemas/CreateBashToolDTO",
- "title": "BashTool"
- },
- {
- "$ref": "#/components/schemas/CreateComputerToolDTO",
- "title": "ComputerTool"
- },
- {
- "$ref": "#/components/schemas/CreateDtmfToolDTO",
- "title": "DtmfTool"
- },
- {
- "$ref": "#/components/schemas/CreateEndCallToolDTO",
- "title": "EndCallTool"
- },
- {
- "$ref": "#/components/schemas/CreateFunctionToolDTO",
- "title": "FunctionTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
- "title": "GoHighLevelCalendarAvailabilityTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
- "title": "GoHighLevelCalendarEventCreateTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
- "title": "GoHighLevelContactCreateTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
- "title": "GoHighLevelContactGetTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
- "title": "GoogleCalendarCheckAvailabilityTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
- "title": "GoogleCalendarCreateEventTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
- "title": "GoogleSheetsRowAppendTool"
- },
- {
- "$ref": "#/components/schemas/CreateHandoffToolDTO",
- "title": "HandoffTool"
- },
- {
- "$ref": "#/components/schemas/CreateMcpToolDTO",
- "title": "McpTool"
- },
- {
- "$ref": "#/components/schemas/CreateQueryToolDTO",
- "title": "QueryTool"
- },
- {
- "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO",
- "title": "SlackSendMessageTool"
- },
- {
- "$ref": "#/components/schemas/CreateSmsToolDTO",
- "title": "SmsTool"
- },
- {
- "$ref": "#/components/schemas/CreateTextEditorToolDTO",
- "title": "TextEditorTool"
+ "$ref": "#/components/schemas/TransferDestinationAssistant",
+ "title": "Transfer Destination"
},
{
- "$ref": "#/components/schemas/CreateTransferCallToolDTO",
- "title": "TransferCallTool"
+ "$ref": "#/components/schemas/HandoffDestinationAssistant",
+ "title": "Handoff Destination"
}
]
}
},
- "toolIds": {
- "description": "These are the tools that the assistant can use during the call. To use transient tools, use `tools`.\n\nBoth `tools` and `toolIds` can be used together.",
- "type": "array",
- "items": {
- "type": "string"
- }
+ "assistantId": {
+ "type": "string",
+ "nullable": true,
+ "description": "This is the assistant that will be used for the call. To use a transient assistant, use `assistant` instead."
},
- "knowledgeBase": {
- "description": "These are the options for the knowledge base.",
- "oneOf": [
+ "assistant": {
+ "description": "This is the assistant that will be used for the call. To use an existing assistant, use `assistantId` instead.",
+ "allOf": [
{
- "$ref": "#/components/schemas/CreateCustomKnowledgeBaseDTO",
- "title": "Custom"
+ "$ref": "#/components/schemas/CreateAssistantDTO"
}
]
},
- "knowledgeBaseId": {
+ "assistantOverrides": {
+ "description": "This can be used to override the assistant's settings and provide values for it's template variables.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AssistantOverrides"
+ }
+ ]
+ }
+ }
+ },
+ "CreateSquadDTO": {
+ "type": "object",
+ "properties": {
+ "name": {
"type": "string",
- "description": "This is the ID of the knowledge base the model will use."
+ "description": "This is the name of the squad."
},
- "model": {
+ "members": {
+ "description": "This is the list of assistants that make up the squad.\n\nThe call will start with the first assistant in the list.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/SquadMemberDTO"
+ }
+ },
+ "membersOverrides": {
+ "description": "This can be used to override all the assistants' settings and provide values for their template variables.\n\nBoth `membersOverrides` and `members[n].assistantOverrides` can be used together. First, `members[n].assistantOverrides` is applied. Then, `membersOverrides` is applied as a global override.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AssistantOverrides"
+ }
+ ]
+ }
+ },
+ "required": [
+ "members"
+ ]
+ },
+ "HandoffDestinationSquad": {
+ "type": "object",
+ "properties": {
+ "type": {
"type": "string",
- "description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b",
"enum": [
- "llama3.1-8b",
- "llama-3.3-70b"
+ "squad"
]
},
- "provider": {
+ "contextEngineeringPlan": {
+ "description": "This is the plan for manipulating the message context before handing off the call to the squad.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ContextEngineeringPlanLastNMessages",
+ "title": "Last N Messages"
+ },
+ {
+ "$ref": "#/components/schemas/ContextEngineeringPlanNone",
+ "title": "None"
+ },
+ {
+ "$ref": "#/components/schemas/ContextEngineeringPlanAll",
+ "title": "All"
+ },
+ {
+ "$ref": "#/components/schemas/ContextEngineeringPlanUserAndAssistantMessages",
+ "title": "User And Assistant Messages"
+ }
+ ]
+ },
+ "squadId": {
"type": "string",
- "enum": [
- "cerebras"
+ "description": "This is the squad id to transfer the call to."
+ },
+ "squad": {
+ "description": "This is a transient squad to transfer the call to.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateSquadDTO"
+ }
]
},
- "temperature": {
- "type": "number",
- "description": "This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.",
- "minimum": 0,
- "maximum": 2
+ "entryAssistantName": {
+ "type": "string",
+ "description": "This is the name of the entry assistant to start with when handing off to the squad.\nIf not provided, the first member of the squad will be used."
},
- "maxTokens": {
- "type": "number",
- "description": "This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250.",
- "minimum": 50,
- "maximum": 10000
+ "variableExtractionPlan": {
+ "description": "This is the variable extraction plan for the handoff tool.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/VariableExtractionPlan"
+ }
+ ]
},
- "emotionRecognitionEnabled": {
- "type": "boolean",
- "description": "This determines whether we detect user's emotion while they speak and send it as an additional info to model.\n\nDefault `false` because the model is usually are good at understanding the user's emotion from text.\n\n@default false"
+ "squadOverrides": {
+ "description": "These are the overrides to apply to the squad configuration.\nMaps to squad-level membersOverrides.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AssistantOverrides"
+ }
+ ]
},
- "numFastTurns": {
- "type": "number",
- "description": "This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai.\n\nDefault is 0.\n\n@default 0",
- "minimum": 0
+ "description": {
+ "type": "string",
+ "description": "This is the description of the destination, used by the AI to choose when and how to transfer the call."
}
},
"required": [
- "model",
- "provider"
+ "type"
]
},
- "CustomLLMModel": {
+ "CreateHandoffToolDTO": {
"type": "object",
"properties": {
"messages": {
- "description": "This is the starting state for the conversation.",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/OpenAIMessage"
- }
- },
- "tools": {
"type": "array",
- "description": "These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`.\n\nBoth `tools` and `toolIds` can be used together.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/CreateApiRequestToolDTO",
- "title": "ApiRequestTool"
- },
- {
- "$ref": "#/components/schemas/CreateBashToolDTO",
- "title": "BashTool"
- },
- {
- "$ref": "#/components/schemas/CreateComputerToolDTO",
- "title": "ComputerTool"
- },
- {
- "$ref": "#/components/schemas/CreateDtmfToolDTO",
- "title": "DtmfTool"
- },
- {
- "$ref": "#/components/schemas/CreateEndCallToolDTO",
- "title": "EndCallTool"
- },
- {
- "$ref": "#/components/schemas/CreateFunctionToolDTO",
- "title": "FunctionTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
- "title": "GoHighLevelCalendarAvailabilityTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
- "title": "GoHighLevelCalendarEventCreateTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
- "title": "GoHighLevelContactCreateTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
- "title": "GoHighLevelContactGetTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
- "title": "GoogleCalendarCheckAvailabilityTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
- "title": "GoogleCalendarCreateEventTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
- "title": "GoogleSheetsRowAppendTool"
- },
- {
- "$ref": "#/components/schemas/CreateHandoffToolDTO",
- "title": "HandoffTool"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/CreateMcpToolDTO",
- "title": "McpTool"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
},
{
- "$ref": "#/components/schemas/CreateQueryToolDTO",
- "title": "QueryTool"
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
},
{
- "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO",
- "title": "SlackSendMessageTool"
- },
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
+ },
+ "type": {
+ "type": "string",
+ "description": "This is the type of the tool.\nWhen you're using handoff tool, we recommend adding this to your system prompt\n---\n# System context\n\nYou are part of a multi-agent system designed to make agent coordination and execution easy. Agents uses two primary abstraction: **Agents** and **Handoffs**. An agent encompasses instructions and tools and can hand off a conversation to another agent when appropriate. Handoffs are achieved by calling a handoff function, generally named `handoff_to_`. Handoffs between agents are handled seamlessly in the background; do not mention or draw attention to these handoffs in your conversation with the user.\n\n# Agent context\n\n{put your agent system prompt here}\n---",
+ "enum": [
+ "handoff"
+ ]
+ },
+ "defaultResult": {
+ "type": "string",
+ "description": "This is the default local tool result message used when no runtime handoff result override is returned."
+ },
+ "destinations": {
+ "type": "array",
+ "description": "These are the destinations that the call can be handed off to.\n\nUsage:\n1. Single destination\n\nUse `assistantId` to handoff the call to a saved assistant, or `assistantName` to handoff the call to an assistant in the same squad.\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-123\", // or \"assistantName\": \"Assistant123\"\n \"description\": \"customer wants to be handed off to assistant-123\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n }\n ],\n }\n ]\n}\n```\n\n2. Multiple destinations\n\n2.1. Multiple Tools, Each With One Destination (OpenAI recommended)\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-123\",\n \"description\": \"customer wants to be handed off to assistant-123\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n },\n ],\n },\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-456\",\n \"description\": \"customer wants to be handed off to assistant-456\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n }\n ],\n }\n ]\n}\n```\n\n2.2. One Tool, Multiple Destinations (Anthropic recommended)\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-123\",\n \"description\": \"customer wants to be handed off to assistant-123\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n },\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-456\",\n \"description\": \"customer wants to be handed off to assistant-456\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n }\n ],\n }\n ]\n}\n```\n\n3. Dynamic destination\n\n3.1 To determine the destination dynamically, supply a `dynamic` handoff destination type and a `server` object.\n VAPI will send a handoff-destination-request webhook to the `server.url`.\n The response from the server will be used as the destination (if valid).\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"dynamic\",\n \"server\": {\n \"url\": \"https://example.com\"\n }\n }\n ],\n }\n ]\n}\n```\n\n3.2. To pass custom parameters to the server, you can use the `function` object.\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"dynamic\",\n \"server\": {\n \"url\": \"https://example.com\"\n },\n }\n ],\n \"function\": {\n \"name\": \"handoff\",\n \"description\": \"Call this function when the customer is ready to be handed off to the next assistant\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"destination\": {\n \"type\": \"string\",\n \"description\": \"Use dynamic when customer is ready to be handed off to the next assistant\",\n \"enum\": [\"dynamic\"]\n },\n \"customerAreaCode\": {\n \"type\": \"number\",\n \"description\": \"Area code of the customer\"\n },\n \"customerIntent\": {\n \"type\": \"string\",\n \"enum\": [\"new-customer\", \"existing-customer\"],\n \"description\": \"Use new-customer when customer is a new customer, existing-customer when customer is an existing customer\"\n },\n \"customerSentiment\": {\n \"type\": \"string\",\n \"enum\": [\"positive\", \"negative\", \"neutral\"],\n \"description\": \"Use positive when customer is happy, negative when customer is unhappy, neutral when customer is neutral\"\n }\n }\n }\n }\n }\n ]\n}\n```\n\nThe properties `customerAreaCode`, `customerIntent`, and `customerSentiment` will be passed to the server in the webhook request body.",
+ "items": {
+ "oneOf": [
{
- "$ref": "#/components/schemas/CreateSmsToolDTO",
- "title": "SmsTool"
+ "$ref": "#/components/schemas/HandoffDestinationAssistant",
+ "title": "Assistant"
},
{
- "$ref": "#/components/schemas/CreateTextEditorToolDTO",
- "title": "TextEditorTool"
+ "$ref": "#/components/schemas/HandoffDestinationDynamic",
+ "title": "Dynamic"
},
{
- "$ref": "#/components/schemas/CreateTransferCallToolDTO",
- "title": "TransferCallTool"
+ "$ref": "#/components/schemas/HandoffDestinationSquad",
+ "title": "Squad"
}
]
}
},
- "toolIds": {
- "description": "These are the tools that the assistant can use during the call. To use transient tools, use `tools`.\n\nBoth `tools` and `toolIds` can be used together.",
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "knowledgeBase": {
- "description": "These are the options for the knowledge base.",
- "oneOf": [
+ "function": {
+ "description": "This is the optional function definition that will be passed to the LLM.\nIf this is not defined, we will construct this based on the other properties.\n\nFor example, given the following tools definition:\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-123\",\n \"description\": \"customer wants to be handed off to assistant-123\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n },\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-456\",\n \"description\": \"customer wants to be handed off to assistant-456\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n }\n ],\n }\n ]\n}\n```\n\nWe will construct the following function definition:\n```json\n{\n \"function\": {\n \"name\": \"handoff_to_assistant-123\",\n \"description\": \"\n Use this function to handoff the call to the next assistant.\n Only use it when instructions explicitly ask you to use the handoff_to_assistant function.\n DO NOT call this function unless you are instructed to do so.\n Here are the destinations you can handoff the call to:\n 1. assistant-123. When: customer wants to be handed off to assistant-123\n 2. assistant-456. When: customer wants to be handed off to assistant-456\n \",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"destination\": {\n \"type\": \"string\",\n \"description\": \"Options: assistant-123 (customer wants to be handed off to assistant-123), assistant-456 (customer wants to be handed off to assistant-456)\",\n \"enum\": [\"assistant-123\", \"assistant-456\"]\n },\n },\n \"required\": [\"destination\"]\n }\n }\n}\n```\n\nTo override this function, please provide an OpenAI function definition and refer to it in the system prompt.\nYou may override parts of the function definition (i.e. you may only want to change the function name for your prompt).\nIf you choose to override the function parameters, it must include `destination` as a required parameter, and it must evaluate to either an assistantId, assistantName, or a the string literal `dynamic`.\n\nTo pass custom parameters to the server in a dynamic handoff, you can use the function parameters, with `dynamic` as the destination.\n```json\n{\n \"function\": {\n \"name\": \"dynamic_handoff\",\n \"description\": \"\n Call this function when the customer is ready to be handed off to the next assistant\n \",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"destination\": {\n \"type\": \"string\",\n \"enum\": [\"dynamic\"]\n },\n \"customerAreaCode\": {\n \"type\": \"number\",\n \"description\": \"Area code of the customer\"\n },\n \"customerIntent\": {\n \"type\": \"string\",\n \"enum\": [\"new-customer\", \"existing-customer\"],\n \"description\": \"Use new-customer when customer is a new customer, existing-customer when customer is an existing customer\"\n },\n \"customerSentiment\": {\n \"type\": \"string\",\n \"enum\": [\"positive\", \"negative\", \"neutral\"],\n \"description\": \"Use positive when customer is happy, negative when customer is unhappy, neutral when customer is neutral\"\n }\n },\n \"required\": [\"destination\", \"customerAreaCode\", \"customerIntent\", \"customerSentiment\"]\n }\n }\n}\n```",
+ "allOf": [
{
- "$ref": "#/components/schemas/CreateCustomKnowledgeBaseDTO",
- "title": "Custom"
+ "$ref": "#/components/schemas/OpenAIFunction"
}
]
},
- "knowledgeBaseId": {
- "type": "string",
- "description": "This is the ID of the knowledge base the model will use."
- },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "CreateCustomKnowledgeBaseDTO": {
+ "type": "object",
+ "properties": {
"provider": {
"type": "string",
- "description": "This is the provider that will be used for the model. Any service, including your own server, that is compatible with the OpenAI API can be used.",
+ "description": "This knowledge base is bring your own knowledge base implementation.",
"enum": [
- "custom-llm"
+ "custom-knowledge-base"
]
},
- "metadataSendMode": {
- "type": "string",
- "description": "This determines whether metadata is sent in requests to the custom provider.\n\n- `off` will not send any metadata. payload will look like `{ messages }`\n- `variable` will send `assistant.metadata` as a variable on the payload. payload will look like `{ messages, metadata }`\n- `destructured` will send `assistant.metadata` fields directly on the payload. payload will look like `{ messages, ...metadata }`\n\nFurther, `variable` and `destructured` will send `call`, `phoneNumber`, and `customer` objects in the payload.\n\nDefault is `variable`.",
- "enum": [
- "off",
- "variable",
- "destructured"
- ]
- },
- "headers": {
- "type": "object",
- "description": "Custom headers to send with requests. These headers can override default OpenAI headers except for Authorization (which should be specified using a custom-llm credential).",
- "additionalProperties": {
- "type": "string"
- },
- "example": {
- "X-Custom-Header": "value"
- }
- },
- "url": {
+ "server": {
+ "description": "This is where the knowledge base request will be sent.\n\nRequest Example:\n\nPOST https://{server.url}\nContent-Type: application/json\n\n{\n \"messsage\": {\n \"type\": \"knowledge-base-request\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": \"Why is ocean blue?\"\n }\n ],\n ...other metadata about the call...\n }\n}\n\nResponse Expected:\n```\n{\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"The ocean is blue because water absorbs everything but blue.\",\n }, // YOU CAN RETURN THE EXACT RESPONSE TO SPEAK\n \"documents\": [\n {\n \"content\": \"The ocean is blue primarily because water absorbs colors in the red part of the light spectrum and scatters the blue light, making it more visible to our eyes.\",\n \"similarity\": 1\n },\n {\n \"content\": \"Blue light is scattered more by the water molecules than other colors, enhancing the blue appearance of the ocean.\",\n \"similarity\": .5\n }\n ] // OR, YOU CAN RETURN AN ARRAY OF DOCUMENTS THAT WILL BE SENT TO THE MODEL\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
+ }
+ },
+ "required": [
+ "provider",
+ "server"
+ ]
+ },
+ "KnowledgeBase": {
+ "type": "object",
+ "properties": {
+ "name": {
"type": "string",
- "description": "These is the URL we'll use for the OpenAI client's `baseURL`. Ex. https://openrouter.ai/api/v1"
+ "description": "The name of the knowledge base",
+ "example": "My Knowledge Base"
},
- "timeoutSeconds": {
- "type": "number",
- "description": "This sets the timeout for the connection to the custom provider without needing to stream any tokens back. Default is 20 seconds.",
- "minimum": 0,
- "maximum": 300
+ "provider": {
+ "type": "string",
+ "description": "The provider of the knowledge base",
+ "enum": [
+ "google"
+ ],
+ "example": "google"
},
"model": {
"type": "string",
- "description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b"
- },
- "temperature": {
- "type": "number",
- "description": "This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.",
- "minimum": 0,
- "maximum": 2
- },
- "maxTokens": {
- "type": "number",
- "description": "This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250.",
- "minimum": 50,
- "maximum": 10000
+ "description": "The model to use for the knowledge base",
+ "enum": [
+ "gemini-3-flash-preview",
+ "gemini-2.5-pro",
+ "gemini-2.5-flash",
+ "gemini-2.5-flash-lite",
+ "gemini-2.0-flash-thinking-exp",
+ "gemini-2.0-pro-exp-02-05",
+ "gemini-2.0-flash",
+ "gemini-2.0-flash-lite",
+ "gemini-2.0-flash-exp",
+ "gemini-2.0-flash-realtime-exp",
+ "gemini-1.5-flash",
+ "gemini-1.5-flash-002",
+ "gemini-1.5-pro",
+ "gemini-1.5-pro-002",
+ "gemini-1.0-pro"
+ ]
},
- "emotionRecognitionEnabled": {
- "type": "boolean",
- "description": "This determines whether we detect user's emotion while they speak and send it as an additional info to model.\n\nDefault `false` because the model is usually are good at understanding the user's emotion from text.\n\n@default false"
+ "description": {
+ "type": "string",
+ "description": "A description of the knowledge base"
},
- "numFastTurns": {
- "type": "number",
- "description": "This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai.\n\nDefault is 0.\n\n@default 0",
- "minimum": 0
+ "fileIds": {
+ "description": "The file IDs associated with this knowledge base",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
}
},
"required": [
+ "name",
"provider",
- "url",
- "model"
+ "description",
+ "fileIds"
]
},
- "DeepInfraModel": {
+ "CreateQueryToolDTO": {
"type": "object",
"properties": {
"messages": {
- "description": "This is the starting state for the conversation.",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/OpenAIMessage"
- }
- },
- "tools": {
"type": "array",
- "description": "These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`.\n\nBoth `tools` and `toolIds` can be used together.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/CreateApiRequestToolDTO",
- "title": "ApiRequestTool"
- },
- {
- "$ref": "#/components/schemas/CreateBashToolDTO",
- "title": "BashTool"
- },
- {
- "$ref": "#/components/schemas/CreateComputerToolDTO",
- "title": "ComputerTool"
- },
- {
- "$ref": "#/components/schemas/CreateDtmfToolDTO",
- "title": "DtmfTool"
- },
- {
- "$ref": "#/components/schemas/CreateEndCallToolDTO",
- "title": "EndCallTool"
- },
- {
- "$ref": "#/components/schemas/CreateFunctionToolDTO",
- "title": "FunctionTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
- "title": "GoHighLevelCalendarAvailabilityTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
- "title": "GoHighLevelCalendarEventCreateTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
- "title": "GoHighLevelContactCreateTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
- "title": "GoHighLevelContactGetTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
- "title": "GoogleCalendarCheckAvailabilityTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
- "title": "GoogleCalendarCreateEventTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
- "title": "GoogleSheetsRowAppendTool"
- },
- {
- "$ref": "#/components/schemas/CreateHandoffToolDTO",
- "title": "HandoffTool"
- },
- {
- "$ref": "#/components/schemas/CreateMcpToolDTO",
- "title": "McpTool"
- },
- {
- "$ref": "#/components/schemas/CreateQueryToolDTO",
- "title": "QueryTool"
- },
- {
- "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO",
- "title": "SlackSendMessageTool"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/CreateSmsToolDTO",
- "title": "SmsTool"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
},
{
- "$ref": "#/components/schemas/CreateTextEditorToolDTO",
- "title": "TextEditorTool"
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
},
{
- "$ref": "#/components/schemas/CreateTransferCallToolDTO",
- "title": "TransferCallTool"
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
]
}
},
- "toolIds": {
- "description": "These are the tools that the assistant can use during the call. To use transient tools, use `tools`.\n\nBoth `tools` and `toolIds` can be used together.",
+ "type": {
+ "type": "string",
+ "enum": [
+ "query"
+ ],
+ "description": "The type of tool. \"query\" for Query tool."
+ },
+ "knowledgeBases": {
+ "description": "The knowledge bases to query",
"type": "array",
"items": {
- "type": "string"
+ "$ref": "#/components/schemas/KnowledgeBase"
}
},
- "knowledgeBase": {
- "description": "These are the options for the knowledge base.",
- "oneOf": [
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
{
- "$ref": "#/components/schemas/CreateCustomKnowledgeBaseDTO",
- "title": "Custom"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
- },
- "knowledgeBaseId": {
- "type": "string",
- "description": "This is the ID of the knowledge base the model will use."
- },
- "provider": {
- "type": "string",
- "enum": [
- "deepinfra"
- ]
- },
- "model": {
- "type": "string",
- "description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b"
- },
- "temperature": {
- "type": "number",
- "description": "This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.",
- "minimum": 0,
- "maximum": 2
- },
- "maxTokens": {
- "type": "number",
- "description": "This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250.",
- "minimum": 50,
- "maximum": 10000
- },
- "emotionRecognitionEnabled": {
- "type": "boolean",
- "description": "This determines whether we detect user's emotion while they speak and send it as an additional info to model.\n\nDefault `false` because the model is usually are good at understanding the user's emotion from text.\n\n@default false"
- },
- "numFastTurns": {
- "type": "number",
- "description": "This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai.\n\nDefault is 0.\n\n@default 0",
- "minimum": 0
}
},
"required": [
- "provider",
- "model"
+ "type"
]
},
- "DeepSeekModel": {
+ "CreateGoogleCalendarCreateEventToolDTO": {
"type": "object",
"properties": {
"messages": {
- "description": "This is the starting state for the conversation.",
"type": "array",
- "items": {
- "$ref": "#/components/schemas/OpenAIMessage"
- }
- },
- "tools": {
- "type": "array",
- "description": "These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`.\n\nBoth `tools` and `toolIds` can be used together.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/CreateApiRequestToolDTO",
- "title": "ApiRequestTool"
- },
- {
- "$ref": "#/components/schemas/CreateBashToolDTO",
- "title": "BashTool"
- },
- {
- "$ref": "#/components/schemas/CreateComputerToolDTO",
- "title": "ComputerTool"
- },
- {
- "$ref": "#/components/schemas/CreateDtmfToolDTO",
- "title": "DtmfTool"
- },
- {
- "$ref": "#/components/schemas/CreateEndCallToolDTO",
- "title": "EndCallTool"
- },
- {
- "$ref": "#/components/schemas/CreateFunctionToolDTO",
- "title": "FunctionTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
- "title": "GoHighLevelCalendarAvailabilityTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
- "title": "GoHighLevelCalendarEventCreateTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
- "title": "GoHighLevelContactCreateTool"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
- "title": "GoHighLevelContactGetTool"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
},
{
- "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
- "title": "GoogleCalendarCheckAvailabilityTool"
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
},
{
- "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
- "title": "GoogleCalendarCreateEventTool"
- },
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "google.calendar.event.create"
+ ],
+ "description": "The type of tool. \"google.calendar.event.create\" for Google Calendar Create Event tool."
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "CreateGoogleSheetsRowAppendToolDTO": {
+ "type": "object",
+ "properties": {
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "oneOf": [
{
- "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
- "title": "GoogleSheetsRowAppendTool"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/CreateHandoffToolDTO",
- "title": "HandoffTool"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
},
{
- "$ref": "#/components/schemas/CreateMcpToolDTO",
- "title": "McpTool"
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
},
{
- "$ref": "#/components/schemas/CreateQueryToolDTO",
- "title": "QueryTool"
- },
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "google.sheets.row.append"
+ ],
+ "description": "The type of tool. \"google.sheets.row.append\" for Google Sheets Row Append tool."
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "CreateGoogleCalendarCheckAvailabilityToolDTO": {
+ "type": "object",
+ "properties": {
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "oneOf": [
{
- "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO",
- "title": "SlackSendMessageTool"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/CreateSmsToolDTO",
- "title": "SmsTool"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
},
{
- "$ref": "#/components/schemas/CreateTextEditorToolDTO",
- "title": "TextEditorTool"
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
},
{
- "$ref": "#/components/schemas/CreateTransferCallToolDTO",
- "title": "TransferCallTool"
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
]
}
},
- "toolIds": {
- "description": "These are the tools that the assistant can use during the call. To use transient tools, use `tools`.\n\nBoth `tools` and `toolIds` can be used together.",
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "knowledgeBase": {
- "description": "These are the options for the knowledge base.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/CreateCustomKnowledgeBaseDTO",
- "title": "Custom"
- }
- ]
- },
- "knowledgeBaseId": {
- "type": "string",
- "description": "This is the ID of the knowledge base the model will use."
- },
- "model": {
+ "type": {
"type": "string",
- "description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b",
"enum": [
- "deepseek-chat",
- "deepseek-reasoner"
- ]
+ "google.calendar.availability.check"
+ ],
+ "description": "The type of tool. \"google.calendar.availability.check\" for Google Calendar Check Availability tool."
},
- "provider": {
- "type": "string",
- "enum": [
- "deep-seek"
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
]
- },
- "temperature": {
- "type": "number",
- "description": "This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.",
- "minimum": 0,
- "maximum": 2
- },
- "maxTokens": {
- "type": "number",
- "description": "This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250.",
- "minimum": 50,
- "maximum": 10000
- },
- "emotionRecognitionEnabled": {
- "type": "boolean",
- "description": "This determines whether we detect user's emotion while they speak and send it as an additional info to model.\n\nDefault `false` because the model is usually are good at understanding the user's emotion from text.\n\n@default false"
- },
- "numFastTurns": {
- "type": "number",
- "description": "This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai.\n\nDefault is 0.\n\n@default 0",
- "minimum": 0
}
},
"required": [
- "model",
- "provider"
+ "type"
]
},
- "GeminiMultimodalLivePrebuiltVoiceConfig": {
+ "CreateSlackSendMessageToolDTO": {
"type": "object",
"properties": {
- "voiceName": {
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
+ },
+ "type": {
"type": "string",
"enum": [
- "Puck",
- "Charon",
- "Kore",
- "Fenrir",
- "Aoede"
+ "slack.message.send"
+ ],
+ "description": "The type of tool. \"slack.message.send\" for Slack Send Message tool."
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
]
}
},
"required": [
- "voiceName"
+ "type"
]
},
- "GeminiMultimodalLiveVoiceConfig": {
+ "McpToolMessages": {
"type": "object",
"properties": {
- "prebuiltVoiceConfig": {
- "$ref": "#/components/schemas/GeminiMultimodalLivePrebuiltVoiceConfig"
- }
+ "name": {
+ "type": "string",
+ "description": "The name of the tool from the MCP server."
+ },
+ "messages": {
+ "type": "array",
+ "description": "Custom messages for this specific tool. Set to an empty array to suppress all messages for this tool. If not provided, the tool will use the default messages from the parent MCP tool configuration.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
+ }
},
"required": [
- "prebuiltVoiceConfig"
+ "name"
]
},
- "GeminiMultimodalLiveSpeechConfig": {
+ "McpToolMetadata": {
"type": "object",
"properties": {
- "voiceConfig": {
- "$ref": "#/components/schemas/GeminiMultimodalLiveVoiceConfig"
+ "protocol": {
+ "type": "string",
+ "enum": [
+ "sse",
+ "shttp"
+ ],
+ "description": "This is the protocol used for MCP communication. Defaults to Streamable HTTP."
+ }
+ }
+ },
+ "CreateMcpToolDTO": {
+ "type": "object",
+ "properties": {
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "mcp"
+ ],
+ "description": "The type of tool. \"mcp\" for MCP tool."
+ },
+ "server": {
+ "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
+ },
+ "toolMessages": {
+ "description": "Per-tool message overrides for individual tools loaded from the MCP server. Set messages to an empty array to suppress messages for a specific tool. Tools not listed here will use the default messages from the parent tool.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/McpToolMessages"
+ }
+ },
+ "metadata": {
+ "$ref": "#/components/schemas/McpToolMetadata"
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
+ ]
}
},
"required": [
- "voiceConfig"
+ "type"
]
},
- "GoogleRealtimeConfig": {
+ "CreateGoHighLevelCalendarAvailabilityToolDTO": {
"type": "object",
"properties": {
- "topP": {
- "type": "number",
- "description": "This is the nucleus sampling parameter that controls the cumulative probability of tokens considered during text generation.\nOnly applicable with the Gemini Flash 2.0 Multimodal Live API."
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
},
- "topK": {
- "type": "number",
- "description": "This is the top-k sampling parameter that limits the number of highest probability tokens considered during text generation.\nOnly applicable with the Gemini Flash 2.0 Multimodal Live API."
+ "type": {
+ "type": "string",
+ "enum": [
+ "gohighlevel.calendar.availability.check"
+ ],
+ "description": "The type of tool. \"gohighlevel.calendar.availability.check\" for GoHighLevel Calendar Availability Check tool."
},
- "presencePenalty": {
- "type": "number",
- "description": "This is the presence penalty parameter that influences the model's likelihood to repeat information by penalizing tokens based on their presence in the text.\nOnly applicable with the Gemini Flash 2.0 Multimodal Live API."
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "CreateGoHighLevelCalendarEventCreateToolDTO": {
+ "type": "object",
+ "properties": {
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
},
- "frequencyPenalty": {
- "type": "number",
- "description": "This is the frequency penalty parameter that influences the model's likelihood to repeat tokens by penalizing them based on their frequency in the text.\nOnly applicable with the Gemini Flash 2.0 Multimodal Live API."
+ "type": {
+ "type": "string",
+ "enum": [
+ "gohighlevel.calendar.event.create"
+ ],
+ "description": "The type of tool. \"gohighlevel.calendar.event.create\" for GoHighLevel Calendar Event Create tool."
},
- "speechConfig": {
- "description": "This is the speech configuration object that defines the voice settings to be used for the model's speech output.\nOnly applicable with the Gemini Flash 2.0 Multimodal Live API.",
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/GeminiMultimodalLiveSpeechConfig"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
}
- }
+ },
+ "required": [
+ "type"
+ ]
},
- "GoogleModel": {
+ "CreateGoHighLevelContactCreateToolDTO": {
+ "type": "object",
+ "properties": {
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "gohighlevel.contact.create"
+ ],
+ "description": "The type of tool. \"gohighlevel.contact.create\" for GoHighLevel Contact Create tool."
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "CreateGoHighLevelContactGetToolDTO": {
+ "type": "object",
+ "properties": {
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "gohighlevel.contact.get"
+ ],
+ "description": "The type of tool. \"gohighlevel.contact.get\" for GoHighLevel Contact Get tool."
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "OpenAIMessage": {
+ "type": "object",
+ "properties": {
+ "content": {
+ "type": "string",
+ "nullable": true,
+ "maxLength": 100000000
+ },
+ "role": {
+ "type": "string",
+ "enum": [
+ "assistant",
+ "function",
+ "user",
+ "system",
+ "tool"
+ ]
+ }
+ },
+ "required": [
+ "content",
+ "role"
+ ]
+ },
+ "AnyscaleModel": {
"type": "object",
"properties": {
"messages": {
@@ -13720,6 +13957,10 @@
"$ref": "#/components/schemas/CreateBashToolDTO",
"title": "BashTool"
},
+ {
+ "$ref": "#/components/schemas/CreateCodeToolDTO",
+ "title": "CodeTool"
+ },
{
"$ref": "#/components/schemas/CreateComputerToolDTO",
"title": "ComputerTool"
@@ -13791,6 +14032,14 @@
{
"$ref": "#/components/schemas/CreateTransferCallToolDTO",
"title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSipRequestToolDTO",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVoicemailToolDTO",
+ "title": "VoicemailTool"
}
]
}
@@ -13811,43 +14060,15 @@
}
]
},
- "knowledgeBaseId": {
- "type": "string",
- "description": "This is the ID of the knowledge base the model will use."
- },
- "model": {
- "type": "string",
- "description": "This is the Google model that will be used.",
- "enum": [
- "gemini-2.5-pro",
- "gemini-2.5-flash",
- "gemini-2.5-flash-lite",
- "gemini-2.0-flash-thinking-exp",
- "gemini-2.0-pro-exp-02-05",
- "gemini-2.0-flash",
- "gemini-2.0-flash-lite",
- "gemini-2.0-flash-exp",
- "gemini-2.0-flash-realtime-exp",
- "gemini-1.5-flash",
- "gemini-1.5-flash-002",
- "gemini-1.5-pro",
- "gemini-1.5-pro-002",
- "gemini-1.0-pro"
- ]
- },
"provider": {
"type": "string",
"enum": [
- "google"
+ "anyscale"
]
},
- "realtimeConfig": {
- "description": "This is the session configuration for the Gemini Flash 2.0 Multimodal Live API.\nOnly applicable if the model `gemini-2.0-flash-realtime-exp` is selected.",
- "allOf": [
- {
- "$ref": "#/components/schemas/GoogleRealtimeConfig"
- }
- ]
+ "model": {
+ "type": "string",
+ "description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b"
},
"temperature": {
"type": "number",
@@ -13872,11 +14093,32 @@
}
},
"required": [
- "model",
- "provider"
+ "provider",
+ "model"
]
},
- "GroqModel": {
+ "AnthropicThinkingConfig": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "enabled"
+ ]
+ },
+ "budgetTokens": {
+ "type": "number",
+ "description": "The maximum number of tokens to allocate for thinking.\nMust be between 1024 and 100000 tokens.",
+ "minimum": 1024,
+ "maximum": 100000
+ }
+ },
+ "required": [
+ "type",
+ "budgetTokens"
+ ]
+ },
+ "AnthropicModel": {
"type": "object",
"properties": {
"messages": {
@@ -13899,6 +14141,10 @@
"$ref": "#/components/schemas/CreateBashToolDTO",
"title": "BashTool"
},
+ {
+ "$ref": "#/components/schemas/CreateCodeToolDTO",
+ "title": "CodeTool"
+ },
{
"$ref": "#/components/schemas/CreateComputerToolDTO",
"title": "ComputerTool"
@@ -13970,6 +14216,14 @@
{
"$ref": "#/components/schemas/CreateTransferCallToolDTO",
"title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSipRequestToolDTO",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVoicemailToolDTO",
+ "title": "VoicemailTool"
}
]
}
@@ -13990,34 +14244,39 @@
}
]
},
- "knowledgeBaseId": {
- "type": "string",
- "description": "This is the ID of the knowledge base the model will use."
- },
"model": {
"type": "string",
- "description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b",
+ "description": "The specific Anthropic/Claude model that will be used.",
"enum": [
- "openai/gpt-oss-20b",
- "openai/gpt-oss-120b",
- "deepseek-r1-distill-llama-70b",
- "llama-3.3-70b-versatile",
- "llama-3.1-405b-reasoning",
- "llama-3.1-8b-instant",
- "llama3-8b-8192",
- "llama3-70b-8192",
- "gemma2-9b-it",
- "meta-llama/llama-4-maverick-17b-128e-instruct",
- "meta-llama/llama-4-scout-17b-16e-instruct",
- "mistral-saba-24b",
- "compound-beta",
- "compound-beta-mini"
+ "claude-3-opus-20240229",
+ "claude-3-sonnet-20240229",
+ "claude-3-haiku-20240307",
+ "claude-3-5-sonnet-20240620",
+ "claude-3-5-sonnet-20241022",
+ "claude-3-5-haiku-20241022",
+ "claude-3-7-sonnet-20250219",
+ "claude-opus-4-20250514",
+ "claude-opus-4-5-20251101",
+ "claude-opus-4-6",
+ "claude-sonnet-4-20250514",
+ "claude-sonnet-4-5-20250929",
+ "claude-sonnet-4-6",
+ "claude-haiku-4-5-20251001"
]
},
"provider": {
"type": "string",
+ "description": "The provider identifier for Anthropic.",
"enum": [
- "groq"
+ "anthropic"
+ ]
+ },
+ "thinking": {
+ "description": "Optional configuration for Anthropic's thinking feature.\nOnly applicable for claude-3-7-sonnet-20250219 model.\nIf provided, maxTokens must be greater than thinking.budgetTokens.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AnthropicThinkingConfig"
+ }
]
},
"temperature": {
@@ -14047,7 +14306,7 @@
"provider"
]
},
- "InflectionAIModel": {
+ "AnthropicBedrockModel": {
"type": "object",
"properties": {
"messages": {
@@ -14070,6 +14329,10 @@
"$ref": "#/components/schemas/CreateBashToolDTO",
"title": "BashTool"
},
+ {
+ "$ref": "#/components/schemas/CreateCodeToolDTO",
+ "title": "CodeTool"
+ },
{
"$ref": "#/components/schemas/CreateComputerToolDTO",
"title": "ComputerTool"
@@ -14141,6 +14404,14 @@
{
"$ref": "#/components/schemas/CreateTransferCallToolDTO",
"title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSipRequestToolDTO",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVoicemailToolDTO",
+ "title": "VoicemailTool"
}
]
}
@@ -14161,21 +14432,39 @@
}
]
},
- "knowledgeBaseId": {
+ "provider": {
"type": "string",
- "description": "This is the ID of the knowledge base the model will use."
+ "description": "The provider identifier for Anthropic via AWS Bedrock.",
+ "enum": [
+ "anthropic-bedrock"
+ ]
},
"model": {
"type": "string",
- "description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b",
+ "description": "The specific Anthropic/Claude model that will be used via Bedrock.",
"enum": [
- "inflection_3_pi"
+ "claude-3-opus-20240229",
+ "claude-3-sonnet-20240229",
+ "claude-3-haiku-20240307",
+ "claude-3-5-sonnet-20240620",
+ "claude-3-5-sonnet-20241022",
+ "claude-3-5-haiku-20241022",
+ "claude-3-7-sonnet-20250219",
+ "claude-opus-4-20250514",
+ "claude-opus-4-5-20251101",
+ "claude-opus-4-6",
+ "claude-sonnet-4-20250514",
+ "claude-sonnet-4-5-20250929",
+ "claude-sonnet-4-6",
+ "claude-haiku-4-5-20251001"
]
},
- "provider": {
- "type": "string",
- "enum": [
- "inflection-ai"
+ "thinking": {
+ "description": "Optional configuration for Anthropic's thinking feature.\nOnly applicable for claude-3-7-sonnet-20250219 model.\nIf provided, maxTokens must be greater than thinking.budgetTokens.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AnthropicThinkingConfig"
+ }
]
},
"temperature": {
@@ -14201,11 +14490,11 @@
}
},
"required": [
- "model",
- "provider"
+ "provider",
+ "model"
]
},
- "OpenAIModel": {
+ "CerebrasModel": {
"type": "object",
"properties": {
"messages": {
@@ -14228,6 +14517,10 @@
"$ref": "#/components/schemas/CreateBashToolDTO",
"title": "BashTool"
},
+ {
+ "$ref": "#/components/schemas/CreateCodeToolDTO",
+ "title": "CodeTool"
+ },
{
"$ref": "#/components/schemas/CreateComputerToolDTO",
"title": "ComputerTool"
@@ -14299,6 +14592,14 @@
{
"$ref": "#/components/schemas/CreateTransferCallToolDTO",
"title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSipRequestToolDTO",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVoicemailToolDTO",
+ "title": "VoicemailTool"
}
]
}
@@ -14319,332 +14620,18 @@
}
]
},
- "knowledgeBaseId": {
+ "model": {
"type": "string",
- "description": "This is the ID of the knowledge base the model will use."
+ "description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b",
+ "enum": [
+ "llama3.1-8b",
+ "llama-3.3-70b"
+ ]
},
"provider": {
"type": "string",
- "description": "This is the provider that will be used for the model.",
"enum": [
- "openai"
- ]
- },
- "model": {
- "type": "string",
- "description": "This is the OpenAI model that will be used.\n\nWhen using Vapi OpenAI or your own Azure Credentials, you have the option to specify the region for the selected model. This shouldn't be specified unless you have a specific reason to do so. Vapi will automatically find the fastest region that make sense.\nThis is helpful when you are required to comply with Data Residency rules. Learn more about Azure regions here https://azure.microsoft.com/en-us/explore/global-infrastructure/data-residency/.\n\n@default undefined",
- "enum": [
- "gpt-5",
- "gpt-5-mini",
- "gpt-5-nano",
- "gpt-4.1-2025-04-14",
- "gpt-4.1-mini-2025-04-14",
- "gpt-4.1-nano-2025-04-14",
- "gpt-4.1",
- "gpt-4.1-mini",
- "gpt-4.1-nano",
- "chatgpt-4o-latest",
- "o3",
- "o3-mini",
- "o4-mini",
- "o1-mini",
- "o1-mini-2024-09-12",
- "gpt-4o-realtime-preview-2024-10-01",
- "gpt-4o-realtime-preview-2024-12-17",
- "gpt-4o-mini-realtime-preview-2024-12-17",
- "gpt-4o-mini-2024-07-18",
- "gpt-4o-mini",
- "gpt-4o",
- "gpt-4o-2024-05-13",
- "gpt-4o-2024-08-06",
- "gpt-4o-2024-11-20",
- "gpt-4-turbo",
- "gpt-4-turbo-2024-04-09",
- "gpt-4-turbo-preview",
- "gpt-4-0125-preview",
- "gpt-4-1106-preview",
- "gpt-4",
- "gpt-4-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo-1106",
- "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo-0613",
- "gpt-4.1-2025-04-14:westus",
- "gpt-4.1-2025-04-14:eastus2",
- "gpt-4.1-2025-04-14:eastus",
- "gpt-4.1-2025-04-14:westus3",
- "gpt-4.1-2025-04-14:northcentralus",
- "gpt-4.1-2025-04-14:southcentralus",
- "gpt-4.1-mini-2025-04-14:westus",
- "gpt-4.1-mini-2025-04-14:eastus2",
- "gpt-4.1-mini-2025-04-14:eastus",
- "gpt-4.1-mini-2025-04-14:westus3",
- "gpt-4.1-mini-2025-04-14:northcentralus",
- "gpt-4.1-mini-2025-04-14:southcentralus",
- "gpt-4.1-nano-2025-04-14:westus",
- "gpt-4.1-nano-2025-04-14:eastus2",
- "gpt-4.1-nano-2025-04-14:westus3",
- "gpt-4.1-nano-2025-04-14:northcentralus",
- "gpt-4.1-nano-2025-04-14:southcentralus",
- "gpt-4o-2024-11-20:swedencentral",
- "gpt-4o-2024-11-20:westus",
- "gpt-4o-2024-11-20:eastus2",
- "gpt-4o-2024-11-20:eastus",
- "gpt-4o-2024-11-20:westus3",
- "gpt-4o-2024-11-20:southcentralus",
- "gpt-4o-2024-08-06:westus",
- "gpt-4o-2024-08-06:westus3",
- "gpt-4o-2024-08-06:eastus",
- "gpt-4o-2024-08-06:eastus2",
- "gpt-4o-2024-08-06:northcentralus",
- "gpt-4o-2024-08-06:southcentralus",
- "gpt-4o-mini-2024-07-18:westus",
- "gpt-4o-mini-2024-07-18:westus3",
- "gpt-4o-mini-2024-07-18:eastus",
- "gpt-4o-mini-2024-07-18:eastus2",
- "gpt-4o-mini-2024-07-18:northcentralus",
- "gpt-4o-mini-2024-07-18:southcentralus",
- "gpt-4o-2024-05-13:eastus2",
- "gpt-4o-2024-05-13:eastus",
- "gpt-4o-2024-05-13:northcentralus",
- "gpt-4o-2024-05-13:southcentralus",
- "gpt-4o-2024-05-13:westus3",
- "gpt-4o-2024-05-13:westus",
- "gpt-4-turbo-2024-04-09:eastus2",
- "gpt-4-0125-preview:eastus",
- "gpt-4-0125-preview:northcentralus",
- "gpt-4-0125-preview:southcentralus",
- "gpt-4-1106-preview:australia",
- "gpt-4-1106-preview:canadaeast",
- "gpt-4-1106-preview:france",
- "gpt-4-1106-preview:india",
- "gpt-4-1106-preview:norway",
- "gpt-4-1106-preview:swedencentral",
- "gpt-4-1106-preview:uk",
- "gpt-4-1106-preview:westus",
- "gpt-4-1106-preview:westus3",
- "gpt-4-0613:canadaeast",
- "gpt-3.5-turbo-0125:canadaeast",
- "gpt-3.5-turbo-0125:northcentralus",
- "gpt-3.5-turbo-0125:southcentralus",
- "gpt-3.5-turbo-1106:canadaeast",
- "gpt-3.5-turbo-1106:westus"
- ]
- },
- "fallbackModels": {
- "type": "array",
- "description": "These are the fallback models that will be used if the primary model fails. This shouldn't be specified unless you have a specific reason to do so. Vapi will automatically find the fastest fallbacks that make sense.",
- "enum": [
- "gpt-5",
- "gpt-5-mini",
- "gpt-5-nano",
- "gpt-4.1-2025-04-14",
- "gpt-4.1-mini-2025-04-14",
- "gpt-4.1-nano-2025-04-14",
- "gpt-4.1",
- "gpt-4.1-mini",
- "gpt-4.1-nano",
- "chatgpt-4o-latest",
- "o3",
- "o3-mini",
- "o4-mini",
- "o1-mini",
- "o1-mini-2024-09-12",
- "gpt-4o-realtime-preview-2024-10-01",
- "gpt-4o-realtime-preview-2024-12-17",
- "gpt-4o-mini-realtime-preview-2024-12-17",
- "gpt-4o-mini-2024-07-18",
- "gpt-4o-mini",
- "gpt-4o",
- "gpt-4o-2024-05-13",
- "gpt-4o-2024-08-06",
- "gpt-4o-2024-11-20",
- "gpt-4-turbo",
- "gpt-4-turbo-2024-04-09",
- "gpt-4-turbo-preview",
- "gpt-4-0125-preview",
- "gpt-4-1106-preview",
- "gpt-4",
- "gpt-4-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo-1106",
- "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo-0613",
- "gpt-4.1-2025-04-14:westus",
- "gpt-4.1-2025-04-14:eastus2",
- "gpt-4.1-2025-04-14:eastus",
- "gpt-4.1-2025-04-14:westus3",
- "gpt-4.1-2025-04-14:northcentralus",
- "gpt-4.1-2025-04-14:southcentralus",
- "gpt-4.1-mini-2025-04-14:westus",
- "gpt-4.1-mini-2025-04-14:eastus2",
- "gpt-4.1-mini-2025-04-14:eastus",
- "gpt-4.1-mini-2025-04-14:westus3",
- "gpt-4.1-mini-2025-04-14:northcentralus",
- "gpt-4.1-mini-2025-04-14:southcentralus",
- "gpt-4.1-nano-2025-04-14:westus",
- "gpt-4.1-nano-2025-04-14:eastus2",
- "gpt-4.1-nano-2025-04-14:westus3",
- "gpt-4.1-nano-2025-04-14:northcentralus",
- "gpt-4.1-nano-2025-04-14:southcentralus",
- "gpt-4o-2024-11-20:swedencentral",
- "gpt-4o-2024-11-20:westus",
- "gpt-4o-2024-11-20:eastus2",
- "gpt-4o-2024-11-20:eastus",
- "gpt-4o-2024-11-20:westus3",
- "gpt-4o-2024-11-20:southcentralus",
- "gpt-4o-2024-08-06:westus",
- "gpt-4o-2024-08-06:westus3",
- "gpt-4o-2024-08-06:eastus",
- "gpt-4o-2024-08-06:eastus2",
- "gpt-4o-2024-08-06:northcentralus",
- "gpt-4o-2024-08-06:southcentralus",
- "gpt-4o-mini-2024-07-18:westus",
- "gpt-4o-mini-2024-07-18:westus3",
- "gpt-4o-mini-2024-07-18:eastus",
- "gpt-4o-mini-2024-07-18:eastus2",
- "gpt-4o-mini-2024-07-18:northcentralus",
- "gpt-4o-mini-2024-07-18:southcentralus",
- "gpt-4o-2024-05-13:eastus2",
- "gpt-4o-2024-05-13:eastus",
- "gpt-4o-2024-05-13:northcentralus",
- "gpt-4o-2024-05-13:southcentralus",
- "gpt-4o-2024-05-13:westus3",
- "gpt-4o-2024-05-13:westus",
- "gpt-4-turbo-2024-04-09:eastus2",
- "gpt-4-0125-preview:eastus",
- "gpt-4-0125-preview:northcentralus",
- "gpt-4-0125-preview:southcentralus",
- "gpt-4-1106-preview:australia",
- "gpt-4-1106-preview:canadaeast",
- "gpt-4-1106-preview:france",
- "gpt-4-1106-preview:india",
- "gpt-4-1106-preview:norway",
- "gpt-4-1106-preview:swedencentral",
- "gpt-4-1106-preview:uk",
- "gpt-4-1106-preview:westus",
- "gpt-4-1106-preview:westus3",
- "gpt-4-0613:canadaeast",
- "gpt-3.5-turbo-0125:canadaeast",
- "gpt-3.5-turbo-0125:northcentralus",
- "gpt-3.5-turbo-0125:southcentralus",
- "gpt-3.5-turbo-1106:canadaeast",
- "gpt-3.5-turbo-1106:westus"
- ],
- "example": [
- "gpt-4-0125-preview",
- "gpt-4-0613"
- ],
- "items": {
- "type": "string",
- "enum": [
- "gpt-5",
- "gpt-5-mini",
- "gpt-5-nano",
- "gpt-4.1-2025-04-14",
- "gpt-4.1-mini-2025-04-14",
- "gpt-4.1-nano-2025-04-14",
- "gpt-4.1",
- "gpt-4.1-mini",
- "gpt-4.1-nano",
- "chatgpt-4o-latest",
- "o3",
- "o3-mini",
- "o4-mini",
- "o1-mini",
- "o1-mini-2024-09-12",
- "gpt-4o-realtime-preview-2024-10-01",
- "gpt-4o-realtime-preview-2024-12-17",
- "gpt-4o-mini-realtime-preview-2024-12-17",
- "gpt-4o-mini-2024-07-18",
- "gpt-4o-mini",
- "gpt-4o",
- "gpt-4o-2024-05-13",
- "gpt-4o-2024-08-06",
- "gpt-4o-2024-11-20",
- "gpt-4-turbo",
- "gpt-4-turbo-2024-04-09",
- "gpt-4-turbo-preview",
- "gpt-4-0125-preview",
- "gpt-4-1106-preview",
- "gpt-4",
- "gpt-4-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo-1106",
- "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo-0613",
- "gpt-4.1-2025-04-14:westus",
- "gpt-4.1-2025-04-14:eastus2",
- "gpt-4.1-2025-04-14:eastus",
- "gpt-4.1-2025-04-14:westus3",
- "gpt-4.1-2025-04-14:northcentralus",
- "gpt-4.1-2025-04-14:southcentralus",
- "gpt-4.1-mini-2025-04-14:westus",
- "gpt-4.1-mini-2025-04-14:eastus2",
- "gpt-4.1-mini-2025-04-14:eastus",
- "gpt-4.1-mini-2025-04-14:westus3",
- "gpt-4.1-mini-2025-04-14:northcentralus",
- "gpt-4.1-mini-2025-04-14:southcentralus",
- "gpt-4.1-nano-2025-04-14:westus",
- "gpt-4.1-nano-2025-04-14:eastus2",
- "gpt-4.1-nano-2025-04-14:westus3",
- "gpt-4.1-nano-2025-04-14:northcentralus",
- "gpt-4.1-nano-2025-04-14:southcentralus",
- "gpt-4o-2024-11-20:swedencentral",
- "gpt-4o-2024-11-20:westus",
- "gpt-4o-2024-11-20:eastus2",
- "gpt-4o-2024-11-20:eastus",
- "gpt-4o-2024-11-20:westus3",
- "gpt-4o-2024-11-20:southcentralus",
- "gpt-4o-2024-08-06:westus",
- "gpt-4o-2024-08-06:westus3",
- "gpt-4o-2024-08-06:eastus",
- "gpt-4o-2024-08-06:eastus2",
- "gpt-4o-2024-08-06:northcentralus",
- "gpt-4o-2024-08-06:southcentralus",
- "gpt-4o-mini-2024-07-18:westus",
- "gpt-4o-mini-2024-07-18:westus3",
- "gpt-4o-mini-2024-07-18:eastus",
- "gpt-4o-mini-2024-07-18:eastus2",
- "gpt-4o-mini-2024-07-18:northcentralus",
- "gpt-4o-mini-2024-07-18:southcentralus",
- "gpt-4o-2024-05-13:eastus2",
- "gpt-4o-2024-05-13:eastus",
- "gpt-4o-2024-05-13:northcentralus",
- "gpt-4o-2024-05-13:southcentralus",
- "gpt-4o-2024-05-13:westus3",
- "gpt-4o-2024-05-13:westus",
- "gpt-4-turbo-2024-04-09:eastus2",
- "gpt-4-0125-preview:eastus",
- "gpt-4-0125-preview:northcentralus",
- "gpt-4-0125-preview:southcentralus",
- "gpt-4-1106-preview:australia",
- "gpt-4-1106-preview:canadaeast",
- "gpt-4-1106-preview:france",
- "gpt-4-1106-preview:india",
- "gpt-4-1106-preview:norway",
- "gpt-4-1106-preview:swedencentral",
- "gpt-4-1106-preview:uk",
- "gpt-4-1106-preview:westus",
- "gpt-4-1106-preview:westus3",
- "gpt-4-0613:canadaeast",
- "gpt-3.5-turbo-0125:canadaeast",
- "gpt-3.5-turbo-0125:northcentralus",
- "gpt-3.5-turbo-0125:southcentralus",
- "gpt-3.5-turbo-1106:canadaeast",
- "gpt-3.5-turbo-1106:westus"
- ]
- }
- },
- "toolStrictCompatibilityMode": {
- "type": "string",
- "description": "Azure OpenAI doesn't support `maxLength` right now https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/structured-outputs?tabs=python-secure%2Cdotnet-entra-id&pivots=programming-language-csharp#unsupported-type-specific-keywords. Need to strip.\n\n- `strip-parameters-with-unsupported-validation` will strip parameters with unsupported validation.\n- `strip-unsupported-validation` will keep the parameters but strip unsupported validation.\n\n@default `strip-unsupported-validation`",
- "enum": [
- "strip-parameters-with-unsupported-validation",
- "strip-unsupported-validation"
+ "cerebras"
]
},
"temperature": {
@@ -14670,11 +14657,11 @@
}
},
"required": [
- "provider",
- "model"
+ "model",
+ "provider"
]
},
- "OpenRouterModel": {
+ "CustomLLMModel": {
"type": "object",
"properties": {
"messages": {
@@ -14697,6 +14684,10 @@
"$ref": "#/components/schemas/CreateBashToolDTO",
"title": "BashTool"
},
+ {
+ "$ref": "#/components/schemas/CreateCodeToolDTO",
+ "title": "CodeTool"
+ },
{
"$ref": "#/components/schemas/CreateComputerToolDTO",
"title": "ComputerTool"
@@ -14768,6 +14759,14 @@
{
"$ref": "#/components/schemas/CreateTransferCallToolDTO",
"title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSipRequestToolDTO",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVoicemailToolDTO",
+ "title": "VoicemailTool"
}
]
}
@@ -14788,16 +14787,46 @@
}
]
},
- "knowledgeBaseId": {
+ "provider": {
"type": "string",
- "description": "This is the ID of the knowledge base the model will use."
+ "description": "This is the provider that will be used for the model. Any service, including your own server, that is compatible with the OpenAI API can be used.",
+ "enum": [
+ "custom-llm"
+ ]
},
- "provider": {
+ "metadataSendMode": {
"type": "string",
+ "description": "This determines whether metadata is sent in requests to the custom provider.\n\n- `off` will not send any metadata. payload will look like `{ messages }`\n- `variable` will send `assistant.metadata` as a variable on the payload. payload will look like `{ messages, metadata }`\n- `destructured` will send `assistant.metadata` fields directly on the payload. payload will look like `{ messages, ...metadata }`\n\nFurther, `variable` and `destructured` will send `call`, `phoneNumber`, and `customer` objects in the payload.\n\nDefault is `variable`.",
"enum": [
- "openrouter"
+ "off",
+ "variable",
+ "destructured"
]
},
+ "headers": {
+ "type": "object",
+ "description": "Custom headers to send with requests. These headers can override default OpenAI headers except for Authorization (which should be specified using a custom-llm credential).",
+ "additionalProperties": {
+ "type": "string"
+ },
+ "example": {
+ "X-Custom-Header": "value"
+ }
+ },
+ "url": {
+ "type": "string",
+ "description": "These is the URL we'll use for the OpenAI client's `baseURL`. Ex. https://openrouter.ai/api/v1"
+ },
+ "wordLevelConfidenceEnabled": {
+ "type": "boolean",
+ "description": "This determines whether the transcriber's word level confidence is sent in requests to the custom provider. Default is false.\nThis only works for Deepgram transcribers."
+ },
+ "timeoutSeconds": {
+ "type": "number",
+ "description": "This sets the timeout for the connection to the custom provider without needing to stream any tokens back. Default is 20 seconds.",
+ "minimum": 0,
+ "maximum": 300
+ },
"model": {
"type": "string",
"description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b"
@@ -14826,10 +14855,11 @@
},
"required": [
"provider",
+ "url",
"model"
]
},
- "PerplexityAIModel": {
+ "DeepInfraModel": {
"type": "object",
"properties": {
"messages": {
@@ -14852,6 +14882,10 @@
"$ref": "#/components/schemas/CreateBashToolDTO",
"title": "BashTool"
},
+ {
+ "$ref": "#/components/schemas/CreateCodeToolDTO",
+ "title": "CodeTool"
+ },
{
"$ref": "#/components/schemas/CreateComputerToolDTO",
"title": "ComputerTool"
@@ -14923,6 +14957,14 @@
{
"$ref": "#/components/schemas/CreateTransferCallToolDTO",
"title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSipRequestToolDTO",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVoicemailToolDTO",
+ "title": "VoicemailTool"
}
]
}
@@ -14943,14 +14985,10 @@
}
]
},
- "knowledgeBaseId": {
- "type": "string",
- "description": "This is the ID of the knowledge base the model will use."
- },
"provider": {
"type": "string",
"enum": [
- "perplexity-ai"
+ "deepinfra"
]
},
"model": {
@@ -14984,7 +15022,7 @@
"model"
]
},
- "TogetherAIModel": {
+ "DeepSeekModel": {
"type": "object",
"properties": {
"messages": {
@@ -15007,6 +15045,10 @@
"$ref": "#/components/schemas/CreateBashToolDTO",
"title": "BashTool"
},
+ {
+ "$ref": "#/components/schemas/CreateCodeToolDTO",
+ "title": "CodeTool"
+ },
{
"$ref": "#/components/schemas/CreateComputerToolDTO",
"title": "ComputerTool"
@@ -15078,6 +15120,14 @@
{
"$ref": "#/components/schemas/CreateTransferCallToolDTO",
"title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSipRequestToolDTO",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVoicemailToolDTO",
+ "title": "VoicemailTool"
}
]
}
@@ -15098,20 +15148,20 @@
}
]
},
- "knowledgeBaseId": {
+ "model": {
"type": "string",
- "description": "This is the ID of the knowledge base the model will use."
+ "description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b",
+ "enum": [
+ "deepseek-chat",
+ "deepseek-reasoner"
+ ]
},
"provider": {
"type": "string",
"enum": [
- "together-ai"
+ "deep-seek"
]
},
- "model": {
- "type": "string",
- "description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b"
- },
"temperature": {
"type": "number",
"description": "This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.",
@@ -15135,232 +15185,210 @@
}
},
"required": [
- "provider",
- "model"
+ "model",
+ "provider"
]
},
- "HangupNode": {
+ "GeminiMultimodalLivePrebuiltVoiceConfig": {
"type": "object",
"properties": {
- "type": {
+ "voiceName": {
"type": "string",
"enum": [
- "hangup"
+ "Puck",
+ "Charon",
+ "Kore",
+ "Fenrir",
+ "Aoede"
]
- },
- "name": {
- "type": "string",
- "maxLength": 80
- },
- "isStart": {
- "type": "boolean",
- "description": "This is whether or not the node is the start of the workflow."
- },
- "metadata": {
- "type": "object",
- "description": "This is for metadata you want to store on the task."
}
},
"required": [
- "type",
- "name"
+ "voiceName"
]
},
- "WorkflowOpenAIModel": {
+ "GeminiMultimodalLiveVoiceConfig": {
"type": "object",
"properties": {
- "provider": {
- "type": "string",
- "description": "This is the provider of the model (`openai`).",
- "enum": [
- "openai"
- ]
- },
- "model": {
- "type": "string",
- "description": "This is the OpenAI model that will be used.\n\nWhen using Vapi OpenAI or your own Azure Credentials, you have the option to specify the region for the selected model. This shouldn't be specified unless you have a specific reason to do so. Vapi will automatically find the fastest region that make sense.\nThis is helpful when you are required to comply with Data Residency rules. Learn more about Azure regions here https://azure.microsoft.com/en-us/explore/global-infrastructure/data-residency/.",
- "maxLength": 100,
- "enum": [
- "gpt-5",
- "gpt-5-mini",
- "gpt-5-nano",
- "gpt-4.1-2025-04-14",
- "gpt-4.1-mini-2025-04-14",
- "gpt-4.1-nano-2025-04-14",
- "gpt-4.1",
- "gpt-4.1-mini",
- "gpt-4.1-nano",
- "chatgpt-4o-latest",
- "o3",
- "o3-mini",
- "o4-mini",
- "o1-mini",
- "o1-mini-2024-09-12",
- "gpt-4o-mini-2024-07-18",
- "gpt-4o-mini",
- "gpt-4o",
- "gpt-4o-2024-05-13",
- "gpt-4o-2024-08-06",
- "gpt-4o-2024-11-20",
- "gpt-4-turbo",
- "gpt-4-turbo-2024-04-09",
- "gpt-4-turbo-preview",
- "gpt-4-0125-preview",
- "gpt-4-1106-preview",
- "gpt-4",
- "gpt-4-0613",
- "gpt-3.5-turbo",
- "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo-1106",
- "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo-0613",
- "gpt-4.1-2025-04-14:westus",
- "gpt-4.1-2025-04-14:eastus2",
- "gpt-4.1-2025-04-14:eastus",
- "gpt-4.1-2025-04-14:westus3",
- "gpt-4.1-2025-04-14:northcentralus",
- "gpt-4.1-2025-04-14:southcentralus",
- "gpt-4.1-mini-2025-04-14:westus",
- "gpt-4.1-mini-2025-04-14:eastus2",
- "gpt-4.1-mini-2025-04-14:eastus",
- "gpt-4.1-mini-2025-04-14:westus3",
- "gpt-4.1-mini-2025-04-14:northcentralus",
- "gpt-4.1-mini-2025-04-14:southcentralus",
- "gpt-4.1-nano-2025-04-14:westus",
- "gpt-4.1-nano-2025-04-14:eastus2",
- "gpt-4.1-nano-2025-04-14:westus3",
- "gpt-4.1-nano-2025-04-14:northcentralus",
- "gpt-4.1-nano-2025-04-14:southcentralus",
- "gpt-4o-2024-11-20:swedencentral",
- "gpt-4o-2024-11-20:westus",
- "gpt-4o-2024-11-20:eastus2",
- "gpt-4o-2024-11-20:eastus",
- "gpt-4o-2024-11-20:westus3",
- "gpt-4o-2024-11-20:southcentralus",
- "gpt-4o-2024-08-06:westus",
- "gpt-4o-2024-08-06:westus3",
- "gpt-4o-2024-08-06:eastus",
- "gpt-4o-2024-08-06:eastus2",
- "gpt-4o-2024-08-06:northcentralus",
- "gpt-4o-2024-08-06:southcentralus",
- "gpt-4o-mini-2024-07-18:westus",
- "gpt-4o-mini-2024-07-18:westus3",
- "gpt-4o-mini-2024-07-18:eastus",
- "gpt-4o-mini-2024-07-18:eastus2",
- "gpt-4o-mini-2024-07-18:northcentralus",
- "gpt-4o-mini-2024-07-18:southcentralus",
- "gpt-4o-2024-05-13:eastus2",
- "gpt-4o-2024-05-13:eastus",
- "gpt-4o-2024-05-13:northcentralus",
- "gpt-4o-2024-05-13:southcentralus",
- "gpt-4o-2024-05-13:westus3",
- "gpt-4o-2024-05-13:westus",
- "gpt-4-turbo-2024-04-09:eastus2",
- "gpt-4-0125-preview:eastus",
- "gpt-4-0125-preview:northcentralus",
- "gpt-4-0125-preview:southcentralus",
- "gpt-4-1106-preview:australia",
- "gpt-4-1106-preview:canadaeast",
- "gpt-4-1106-preview:france",
- "gpt-4-1106-preview:india",
- "gpt-4-1106-preview:norway",
- "gpt-4-1106-preview:swedencentral",
- "gpt-4-1106-preview:uk",
- "gpt-4-1106-preview:westus",
- "gpt-4-1106-preview:westus3",
- "gpt-4-0613:canadaeast",
- "gpt-3.5-turbo-0125:canadaeast",
- "gpt-3.5-turbo-0125:northcentralus",
- "gpt-3.5-turbo-0125:southcentralus",
- "gpt-3.5-turbo-1106:canadaeast",
- "gpt-3.5-turbo-1106:westus"
- ]
- },
- "temperature": {
- "type": "number",
- "description": "This is the temperature of the model.",
- "minimum": 0,
- "maximum": 2
- },
- "maxTokens": {
- "type": "number",
- "description": "This is the max tokens of the model.",
- "minimum": 50,
- "maximum": 10000
+ "prebuiltVoiceConfig": {
+ "$ref": "#/components/schemas/GeminiMultimodalLivePrebuiltVoiceConfig"
}
},
"required": [
- "provider",
- "model"
+ "prebuiltVoiceConfig"
]
},
- "WorkflowAnthropicModel": {
+ "GeminiMultimodalLiveSpeechConfig": {
"type": "object",
"properties": {
- "provider": {
- "type": "string",
- "description": "This is the provider of the model (`anthropic`).",
- "enum": [
- "anthropic"
- ]
+ "voiceConfig": {
+ "$ref": "#/components/schemas/GeminiMultimodalLiveVoiceConfig"
+ }
+ },
+ "required": [
+ "voiceConfig"
+ ]
+ },
+ "GoogleRealtimeConfig": {
+ "type": "object",
+ "properties": {
+ "topP": {
+ "type": "number",
+ "description": "This is the nucleus sampling parameter that controls the cumulative probability of tokens considered during text generation.\nOnly applicable with the Gemini Flash 2.0 Multimodal Live API."
},
- "model": {
- "type": "string",
- "description": "This is the specific model that will be used.",
- "maxLength": 100,
- "enum": [
- "claude-3-opus-20240229",
- "claude-3-sonnet-20240229",
- "claude-3-haiku-20240307",
- "claude-3-5-sonnet-20240620",
- "claude-3-5-sonnet-20241022",
- "claude-3-5-haiku-20241022",
- "claude-3-7-sonnet-20250219",
- "claude-opus-4-20250514",
- "claude-sonnet-4-20250514"
- ]
+ "topK": {
+ "type": "number",
+ "description": "This is the top-k sampling parameter that limits the number of highest probability tokens considered during text generation.\nOnly applicable with the Gemini Flash 2.0 Multimodal Live API."
},
- "thinking": {
- "description": "This is the optional configuration for Anthropic's thinking feature.\n\n- Only applicable for `claude-3-7-sonnet-20250219` model.\n- If provided, `maxTokens` must be greater than `thinking.budgetTokens`.",
+ "presencePenalty": {
+ "type": "number",
+ "description": "This is the presence penalty parameter that influences the model's likelihood to repeat information by penalizing tokens based on their presence in the text.\nOnly applicable with the Gemini Flash 2.0 Multimodal Live API."
+ },
+ "frequencyPenalty": {
+ "type": "number",
+ "description": "This is the frequency penalty parameter that influences the model's likelihood to repeat tokens by penalizing them based on their frequency in the text.\nOnly applicable with the Gemini Flash 2.0 Multimodal Live API."
+ },
+ "speechConfig": {
+ "description": "This is the speech configuration object that defines the voice settings to be used for the model's speech output.\nOnly applicable with the Gemini Flash 2.0 Multimodal Live API.",
"allOf": [
{
- "$ref": "#/components/schemas/AnthropicThinkingConfig"
+ "$ref": "#/components/schemas/GeminiMultimodalLiveSpeechConfig"
}
]
- },
- "temperature": {
- "type": "number",
- "description": "This is the temperature of the model.",
- "minimum": 0,
- "maximum": 2
- },
- "maxTokens": {
- "type": "number",
- "description": "This is the max tokens of the model.",
- "minimum": 50,
- "maximum": 10000
}
- },
- "required": [
- "provider",
- "model"
- ]
+ }
},
- "WorkflowGoogleModel": {
+ "GoogleModel": {
"type": "object",
"properties": {
- "provider": {
- "type": "string",
- "description": "This is the provider of the model (`google`).",
- "enum": [
- "google"
+ "messages": {
+ "description": "This is the starting state for the conversation.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/OpenAIMessage"
+ }
+ },
+ "tools": {
+ "type": "array",
+ "description": "These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`.\n\nBoth `tools` and `toolIds` can be used together.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateApiRequestToolDTO",
+ "title": "ApiRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateBashToolDTO",
+ "title": "BashTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCodeToolDTO",
+ "title": "CodeTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateComputerToolDTO",
+ "title": "ComputerTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDtmfToolDTO",
+ "title": "DtmfTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateEndCallToolDTO",
+ "title": "EndCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateFunctionToolDTO",
+ "title": "FunctionTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
+ "title": "GoHighLevelCalendarAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
+ "title": "GoHighLevelCalendarEventCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
+ "title": "GoHighLevelContactCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
+ "title": "GoHighLevelContactGetTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
+ "title": "GoogleCalendarCheckAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
+ "title": "GoogleCalendarCreateEventTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
+ "title": "GoogleSheetsRowAppendTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateHandoffToolDTO",
+ "title": "HandoffTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMcpToolDTO",
+ "title": "McpTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateQueryToolDTO",
+ "title": "QueryTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO",
+ "title": "SlackSendMessageTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSmsToolDTO",
+ "title": "SmsTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTextEditorToolDTO",
+ "title": "TextEditorTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTransferCallToolDTO",
+ "title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSipRequestToolDTO",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVoicemailToolDTO",
+ "title": "VoicemailTool"
+ }
+ ]
+ }
+ },
+ "toolIds": {
+ "description": "These are the tools that the assistant can use during the call. To use transient tools, use `tools`.\n\nBoth `tools` and `toolIds` can be used together.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "knowledgeBase": {
+ "description": "These are the options for the knowledge base.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateCustomKnowledgeBaseDTO",
+ "title": "Custom"
+ }
]
},
"model": {
"type": "string",
- "description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b",
- "maxLength": 100,
+ "description": "This is the Google model that will be used.",
"enum": [
+ "gemini-3-flash-preview",
"gemini-2.5-pro",
"gemini-2.5-flash",
"gemini-2.5-flash-lite",
@@ -15377,253 +15405,60 @@
"gemini-1.0-pro"
]
},
- "temperature": {
- "type": "number",
- "description": "This is the temperature of the model.",
- "minimum": 0,
- "maximum": 2
- },
- "maxTokens": {
- "type": "number",
- "description": "This is the max tokens of the model.",
- "minimum": 50,
- "maximum": 10000
- }
- },
- "required": [
- "provider",
- "model"
- ]
- },
- "WorkflowCustomModel": {
- "type": "object",
- "properties": {
"provider": {
"type": "string",
- "description": "This is the provider of the model (`custom-llm`).",
"enum": [
- "custom-llm"
+ "google"
]
},
- "metadataSendMode": {
- "type": "string",
- "description": "This determines whether metadata is sent in requests to the custom provider.\n\n- `off` will not send any metadata. payload will look like `{ messages }`\n- `variable` will send `assistant.metadata` as a variable on the payload. payload will look like `{ messages, metadata }`\n- `destructured` will send `assistant.metadata` fields directly on the payload. payload will look like `{ messages, ...metadata }`\n\nFurther, `variable` and `destructured` will send `call`, `phoneNumber`, and `customer` objects in the payload.\n\nDefault is `variable`.",
- "enum": [
- "off",
- "variable",
- "destructured"
+ "realtimeConfig": {
+ "description": "This is the session configuration for the Gemini Flash 2.0 Multimodal Live API.\nOnly applicable if the model `gemini-2.0-flash-realtime-exp` is selected.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/GoogleRealtimeConfig"
+ }
]
},
- "url": {
- "type": "string",
- "description": "These is the URL we'll use for the OpenAI client's `baseURL`. Ex. https://openrouter.ai/api/v1"
- },
- "headers": {
- "type": "object",
- "description": "These are the headers we'll use for the OpenAI client's `headers`."
- },
- "timeoutSeconds": {
- "type": "number",
- "description": "This sets the timeout for the connection to the custom provider without needing to stream any tokens back. Default is 20 seconds.",
- "minimum": 20,
- "maximum": 600
- },
- "model": {
- "type": "string",
- "description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b",
- "maxLength": 100
- },
"temperature": {
"type": "number",
- "description": "This is the temperature of the model.",
+ "description": "This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.",
"minimum": 0,
"maximum": 2
},
"maxTokens": {
"type": "number",
- "description": "This is the max tokens of the model.",
+ "description": "This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250.",
"minimum": 50,
"maximum": 10000
+ },
+ "emotionRecognitionEnabled": {
+ "type": "boolean",
+ "description": "This determines whether we detect user's emotion while they speak and send it as an additional info to model.\n\nDefault `false` because the model is usually are good at understanding the user's emotion from text.\n\n@default false"
+ },
+ "numFastTurns": {
+ "type": "number",
+ "description": "This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai.\n\nDefault is 0.\n\n@default 0",
+ "minimum": 0
}
},
"required": [
- "provider",
- "url",
- "model"
+ "model",
+ "provider"
]
},
- "GlobalNodePlan": {
- "type": "object",
- "properties": {
- "enabled": {
- "type": "boolean",
- "description": "This is the flag to determine if this node is a global node\n\n@default false",
- "default": false
- },
- "enterCondition": {
- "type": "string",
- "description": "This is the condition that will be checked to determine if the global node should be executed.\n\n@default ''",
- "maxLength": 1000,
- "default": ""
- }
- }
- },
- "ConversationNode": {
+ "GroqModel": {
"type": "object",
"properties": {
- "type": {
- "type": "string",
- "description": "This is the Conversation node. This can be used to start a conversation with the customer.\n\nThe flow is:\n- Workflow starts the conversation node\n- Model is active with the `prompt` and global context.\n- Model will call a tool to exit this node.\n- Workflow will extract variables from the conversation.\n- Workflow continues.",
- "enum": [
- "conversation"
- ]
- },
- "model": {
- "description": "This is the model for the node.\n\nThis overrides `workflow.model`.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/WorkflowOpenAIModel",
- "title": "WorkflowOpenAIModel"
- },
- {
- "$ref": "#/components/schemas/WorkflowAnthropicModel",
- "title": "WorkflowAnthropicModel"
- },
- {
- "$ref": "#/components/schemas/WorkflowGoogleModel",
- "title": "WorkflowGoogleModel"
- },
- {
- "$ref": "#/components/schemas/WorkflowCustomModel",
- "title": "WorkflowCustomModel"
- }
- ]
- },
- "transcriber": {
- "description": "This is the transcriber for the node.\n\nThis overrides `workflow.transcriber`.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/AssemblyAITranscriber",
- "title": "AssemblyAITranscriber"
- },
- {
- "$ref": "#/components/schemas/AzureSpeechTranscriber",
- "title": "AzureSpeechTranscriber"
- },
- {
- "$ref": "#/components/schemas/CustomTranscriber",
- "title": "CustomTranscriber"
- },
- {
- "$ref": "#/components/schemas/DeepgramTranscriber",
- "title": "DeepgramTranscriber"
- },
- {
- "$ref": "#/components/schemas/ElevenLabsTranscriber",
- "title": "ElevenLabsTranscriber"
- },
- {
- "$ref": "#/components/schemas/GladiaTranscriber",
- "title": "GladiaTranscriber"
- },
- {
- "$ref": "#/components/schemas/GoogleTranscriber",
- "title": "GoogleTranscriber"
- },
- {
- "$ref": "#/components/schemas/SpeechmaticsTranscriber",
- "title": "SpeechmaticsTranscriber"
- },
- {
- "$ref": "#/components/schemas/TalkscriberTranscriber",
- "title": "TalkscriberTranscriber"
- },
- {
- "$ref": "#/components/schemas/OpenAITranscriber",
- "title": "OpenAITranscriber"
- },
- {
- "$ref": "#/components/schemas/CartesiaTranscriber",
- "title": "CartesiaTranscriber"
- }
- ]
- },
- "voice": {
- "description": "This is the voice for the node.\n\nThis overrides `workflow.voice`.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/AzureVoice",
- "title": "AzureVoice"
- },
- {
- "$ref": "#/components/schemas/CartesiaVoice",
- "title": "CartesiaVoice"
- },
- {
- "$ref": "#/components/schemas/CustomVoice",
- "title": "CustomVoice"
- },
- {
- "$ref": "#/components/schemas/DeepgramVoice",
- "title": "DeepgramVoice"
- },
- {
- "$ref": "#/components/schemas/ElevenLabsVoice",
- "title": "ElevenLabsVoice"
- },
- {
- "$ref": "#/components/schemas/HumeVoice",
- "title": "HumeVoice"
- },
- {
- "$ref": "#/components/schemas/LMNTVoice",
- "title": "LMNTVoice"
- },
- {
- "$ref": "#/components/schemas/NeuphonicVoice",
- "title": "NeuphonicVoice"
- },
- {
- "$ref": "#/components/schemas/OpenAIVoice",
- "title": "OpenAIVoice"
- },
- {
- "$ref": "#/components/schemas/PlayHTVoice",
- "title": "PlayHTVoice"
- },
- {
- "$ref": "#/components/schemas/RimeAIVoice",
- "title": "RimeAIVoice"
- },
- {
- "$ref": "#/components/schemas/SmallestAIVoice",
- "title": "SmallestAIVoice"
- },
- {
- "$ref": "#/components/schemas/TavusVoice",
- "title": "TavusVoice"
- },
- {
- "$ref": "#/components/schemas/VapiVoice",
- "title": "VapiVoice"
- },
- {
- "$ref": "#/components/schemas/SesameVoice",
- "title": "SesameVoice"
- },
- {
- "$ref": "#/components/schemas/InworldVoice",
- "title": "InworldVoice"
- },
- {
- "$ref": "#/components/schemas/MinimaxVoice",
- "title": "MinimaxVoice"
- }
- ]
+ "messages": {
+ "description": "This is the starting state for the conversation.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/OpenAIMessage"
+ }
},
"tools": {
"type": "array",
- "description": "These are the tools that the conversation node can use during the call. To use existing tools, use `toolIds`.\n\nBoth `tools` and `toolIds` can be used together.",
+ "description": "These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`.\n\nBoth `tools` and `toolIds` can be used together.",
"items": {
"oneOf": [
{
@@ -15634,6 +15469,10 @@
"$ref": "#/components/schemas/CreateBashToolDTO",
"title": "BashTool"
},
+ {
+ "$ref": "#/components/schemas/CreateCodeToolDTO",
+ "title": "CodeTool"
+ },
{
"$ref": "#/components/schemas/CreateComputerToolDTO",
"title": "ComputerTool"
@@ -15705,1656 +15544,1288 @@
{
"$ref": "#/components/schemas/CreateTransferCallToolDTO",
"title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSipRequestToolDTO",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVoicemailToolDTO",
+ "title": "VoicemailTool"
}
]
}
},
"toolIds": {
- "description": "These are the tools that the conversation node can use during the call. To use transient tools, use `tools`.\n\nBoth `tools` and `toolIds` can be used together.",
+ "description": "These are the tools that the assistant can use during the call. To use transient tools, use `tools`.\n\nBoth `tools` and `toolIds` can be used together.",
"type": "array",
"items": {
"type": "string"
}
},
- "prompt": {
- "type": "string",
- "maxLength": 5000
- },
- "globalNodePlan": {
- "description": "This is the plan for the global node.",
- "allOf": [
+ "knowledgeBase": {
+ "description": "These are the options for the knowledge base.",
+ "oneOf": [
{
- "$ref": "#/components/schemas/GlobalNodePlan"
+ "$ref": "#/components/schemas/CreateCustomKnowledgeBaseDTO",
+ "title": "Custom"
}
]
},
- "variableExtractionPlan": {
- "description": "This is the plan that controls the variable extraction from the user's responses.\n\nUsage:\nUse `schema` to specify what you want to extract from the user's responses.\n```json\n{\n \"schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"user\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"age\": {\n \"type\": \"number\"\n }\n }\n }\n }\n }\n}\n```\n\nThis will be extracted as `{{ user.name }}` and `{{ user.age }}` respectively.\n\n(Optional) Use `aliases` to create new variables.\n\n```json\n{\n \"aliases\": [\n {\n \"key\": \"userAge\",\n \"value\": \"{{user.age}}\"\n },\n {\n \"key\": \"userName\",\n \"value\": \"{{user.name}}\"\n }\n ]\n}\n```\n\nThis will be extracted as `{{ userAge }}` and `{{ userName }}` respectively.\n\nNote: The `schema` field is required for Conversation nodes if you want to extract variables from the user's responses. `aliases` is just a convenience.",
- "allOf": [
- {
- "$ref": "#/components/schemas/VariableExtractionPlan"
- }
+ "model": {
+ "type": "string",
+ "description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b",
+ "enum": [
+ "openai/gpt-oss-20b",
+ "openai/gpt-oss-120b",
+ "deepseek-r1-distill-llama-70b",
+ "llama-3.3-70b-versatile",
+ "llama-3.1-405b-reasoning",
+ "llama-3.1-8b-instant",
+ "llama3-8b-8192",
+ "llama3-70b-8192",
+ "gemma2-9b-it",
+ "moonshotai/kimi-k2-instruct-0905",
+ "meta-llama/llama-4-maverick-17b-128e-instruct",
+ "meta-llama/llama-4-scout-17b-16e-instruct",
+ "mistral-saba-24b",
+ "compound-beta",
+ "compound-beta-mini"
]
},
- "name": {
+ "provider": {
"type": "string",
- "maxLength": 80
+ "enum": [
+ "groq"
+ ]
},
- "isStart": {
- "type": "boolean",
- "description": "This is whether or not the node is the start of the workflow."
+ "temperature": {
+ "type": "number",
+ "description": "This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.",
+ "minimum": 0,
+ "maximum": 2
},
- "metadata": {
- "type": "object",
- "description": "This is for metadata you want to store on the task."
- }
- },
- "required": [
- "type",
- "name"
- ]
- },
- "ToolNode": {
- "type": "object",
- "properties": {
- "type": {
- "type": "string",
- "description": "This is the Tool node. This can be used to call a tool in your workflow.\n\nThe flow is:\n- Workflow starts the tool node\n- Model is called to extract parameters needed by the tool from the conversation history\n- Tool is called with the parameters\n- Server returns a response\n- Workflow continues with the response",
- "enum": [
- "tool"
- ]
- },
- "tool": {
- "description": "This is the tool to call. To use an existing tool, send `toolId` instead.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/CreateApiRequestToolDTO",
- "title": "ApiRequestTool"
- },
- {
- "$ref": "#/components/schemas/CreateBashToolDTO",
- "title": "BashTool"
- },
- {
- "$ref": "#/components/schemas/CreateComputerToolDTO",
- "title": "ComputerTool"
- },
- {
- "$ref": "#/components/schemas/CreateDtmfToolDTO",
- "title": "DtmfTool"
- },
- {
- "$ref": "#/components/schemas/CreateEndCallToolDTO",
- "title": "EndCallTool"
- },
- {
- "$ref": "#/components/schemas/CreateFunctionToolDTO",
- "title": "FunctionTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
- "title": "GoHighLevelCalendarAvailabilityTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
- "title": "GoHighLevelCalendarEventCreateTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
- "title": "GoHighLevelContactCreateTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
- "title": "GoHighLevelContactGetTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
- "title": "GoogleCalendarCheckAvailabilityTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
- "title": "GoogleCalendarCreateEventTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
- "title": "GoogleSheetsRowAppendTool"
- },
- {
- "$ref": "#/components/schemas/CreateHandoffToolDTO",
- "title": "HandoffTool"
- },
- {
- "$ref": "#/components/schemas/CreateMcpToolDTO",
- "title": "McpTool"
- },
- {
- "$ref": "#/components/schemas/CreateQueryToolDTO",
- "title": "QueryTool"
- },
- {
- "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO",
- "title": "SlackSendMessageTool"
- },
- {
- "$ref": "#/components/schemas/CreateSmsToolDTO",
- "title": "SmsTool"
- },
- {
- "$ref": "#/components/schemas/CreateTextEditorToolDTO",
- "title": "TextEditorTool"
- },
- {
- "$ref": "#/components/schemas/CreateTransferCallToolDTO",
- "title": "TransferCallTool"
- }
- ]
- },
- "toolId": {
- "type": "string",
- "description": "This is the tool to call. To use a transient tool, send `tool` instead."
- },
- "name": {
- "type": "string",
- "maxLength": 80
+ "maxTokens": {
+ "type": "number",
+ "description": "This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250.",
+ "minimum": 50,
+ "maximum": 10000
},
- "isStart": {
+ "emotionRecognitionEnabled": {
"type": "boolean",
- "description": "This is whether or not the node is the start of the workflow."
+ "description": "This determines whether we detect user's emotion while they speak and send it as an additional info to model.\n\nDefault `false` because the model is usually are good at understanding the user's emotion from text.\n\n@default false"
},
- "metadata": {
- "type": "object",
- "description": "This is for metadata you want to store on the task."
+ "numFastTurns": {
+ "type": "number",
+ "description": "This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai.\n\nDefault is 0.\n\n@default 0",
+ "minimum": 0
}
},
"required": [
- "type",
- "name"
+ "model",
+ "provider"
]
},
- "AIEdgeCondition": {
+ "InflectionAIModel": {
"type": "object",
"properties": {
- "type": {
- "type": "string",
- "enum": [
- "ai"
- ]
+ "messages": {
+ "description": "This is the starting state for the conversation.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/OpenAIMessage"
+ }
},
- "prompt": {
- "type": "string",
- "description": "This is the prompt for the AI edge condition. It should evaluate to a boolean.",
- "maxLength": 1000
- }
- },
- "required": [
- "type",
- "prompt"
- ]
- },
- "Edge": {
- "type": "object",
- "properties": {
- "condition": {
+ "tools": {
+ "type": "array",
+ "description": "These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`.\n\nBoth `tools` and `toolIds` can be used together.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateApiRequestToolDTO",
+ "title": "ApiRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateBashToolDTO",
+ "title": "BashTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCodeToolDTO",
+ "title": "CodeTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateComputerToolDTO",
+ "title": "ComputerTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDtmfToolDTO",
+ "title": "DtmfTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateEndCallToolDTO",
+ "title": "EndCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateFunctionToolDTO",
+ "title": "FunctionTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
+ "title": "GoHighLevelCalendarAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
+ "title": "GoHighLevelCalendarEventCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
+ "title": "GoHighLevelContactCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
+ "title": "GoHighLevelContactGetTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
+ "title": "GoogleCalendarCheckAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
+ "title": "GoogleCalendarCreateEventTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
+ "title": "GoogleSheetsRowAppendTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateHandoffToolDTO",
+ "title": "HandoffTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMcpToolDTO",
+ "title": "McpTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateQueryToolDTO",
+ "title": "QueryTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO",
+ "title": "SlackSendMessageTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSmsToolDTO",
+ "title": "SmsTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTextEditorToolDTO",
+ "title": "TextEditorTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTransferCallToolDTO",
+ "title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSipRequestToolDTO",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVoicemailToolDTO",
+ "title": "VoicemailTool"
+ }
+ ]
+ }
+ },
+ "toolIds": {
+ "description": "These are the tools that the assistant can use during the call. To use transient tools, use `tools`.\n\nBoth `tools` and `toolIds` can be used together.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "knowledgeBase": {
+ "description": "These are the options for the knowledge base.",
"oneOf": [
{
- "$ref": "#/components/schemas/AIEdgeCondition",
- "title": "AIEdgeCondition"
+ "$ref": "#/components/schemas/CreateCustomKnowledgeBaseDTO",
+ "title": "Custom"
}
]
},
- "from": {
+ "model": {
"type": "string",
- "maxLength": 80
+ "description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b",
+ "enum": [
+ "inflection_3_pi"
+ ]
},
- "to": {
+ "provider": {
"type": "string",
- "maxLength": 80
+ "enum": [
+ "inflection-ai"
+ ]
},
- "metadata": {
- "type": "object",
- "description": "This is for metadata you want to store on the edge."
+ "temperature": {
+ "type": "number",
+ "description": "This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.",
+ "minimum": 0,
+ "maximum": 2
+ },
+ "maxTokens": {
+ "type": "number",
+ "description": "This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250.",
+ "minimum": 50,
+ "maximum": 10000
+ },
+ "emotionRecognitionEnabled": {
+ "type": "boolean",
+ "description": "This determines whether we detect user's emotion while they speak and send it as an additional info to model.\n\nDefault `false` because the model is usually are good at understanding the user's emotion from text.\n\n@default false"
+ },
+ "numFastTurns": {
+ "type": "number",
+ "description": "This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai.\n\nDefault is 0.\n\n@default 0",
+ "minimum": 0
}
},
"required": [
- "from",
- "to"
+ "model",
+ "provider"
]
},
- "SecurityFilterBase": {
- "type": "object",
- "properties": {}
- },
- "SecurityFilterPlan": {
+ "MinimaxLLMModel": {
"type": "object",
"properties": {
- "enabled": {
- "type": "boolean",
- "description": "Whether the security filter is enabled.\n@default false",
- "default": false
+ "messages": {
+ "description": "This is the starting state for the conversation.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/OpenAIMessage"
+ }
},
- "filters": {
- "description": "Array of security filter types to apply.\nIf array is not empty, only those security filters are run.",
- "example": "[{ type: \"sql-injection\" }, { type: \"xss\" }]",
+ "tools": {
"type": "array",
+ "description": "These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`.\n\nBoth `tools` and `toolIds` can be used together.",
"items": {
- "$ref": "#/components/schemas/SecurityFilterBase"
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateApiRequestToolDTO",
+ "title": "ApiRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateBashToolDTO",
+ "title": "BashTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCodeToolDTO",
+ "title": "CodeTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateComputerToolDTO",
+ "title": "ComputerTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDtmfToolDTO",
+ "title": "DtmfTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateEndCallToolDTO",
+ "title": "EndCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateFunctionToolDTO",
+ "title": "FunctionTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
+ "title": "GoHighLevelCalendarAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
+ "title": "GoHighLevelCalendarEventCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
+ "title": "GoHighLevelContactCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
+ "title": "GoHighLevelContactGetTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
+ "title": "GoogleCalendarCheckAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
+ "title": "GoogleCalendarCreateEventTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
+ "title": "GoogleSheetsRowAppendTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateHandoffToolDTO",
+ "title": "HandoffTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMcpToolDTO",
+ "title": "McpTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateQueryToolDTO",
+ "title": "QueryTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO",
+ "title": "SlackSendMessageTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSmsToolDTO",
+ "title": "SmsTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTextEditorToolDTO",
+ "title": "TextEditorTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTransferCallToolDTO",
+ "title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSipRequestToolDTO",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVoicemailToolDTO",
+ "title": "VoicemailTool"
+ }
+ ]
}
},
- "mode": {
+ "toolIds": {
+ "description": "These are the tools that the assistant can use during the call. To use transient tools, use `tools`.\n\nBoth `tools` and `toolIds` can be used together.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "knowledgeBase": {
+ "description": "These are the options for the knowledge base.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateCustomKnowledgeBaseDTO",
+ "title": "Custom"
+ }
+ ]
+ },
+ "provider": {
"type": "string",
- "description": "Mode of operation when a security threat is detected.\n- 'sanitize': Remove or replace the threatening content\n- 'reject': Replace the entire transcript with replacement text\n- 'replace': Replace threatening patterns with replacement text\n@default 'sanitize'",
"enum": [
- "sanitize",
- "reject",
- "replace"
- ],
- "default": "sanitize"
+ "minimax"
+ ]
},
- "replacementText": {
+ "model": {
"type": "string",
- "description": "Text to use when replacing filtered content.\n@default '[FILTERED]'",
- "default": "[FILTERED]"
- }
- }
- },
- "CompliancePlan": {
- "type": "object",
- "properties": {
- "hipaaEnabled": {
- "type": "boolean",
- "description": "When this is enabled, no logs, recordings, or transcriptions will be stored.\nAt the end of the call, you will still receive an end-of-call-report message to store on your server. Defaults to false.",
- "example": {
- "hipaaEnabled": false
- }
+ "description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b",
+ "enum": [
+ "MiniMax-M2.7"
+ ]
},
- "pciEnabled": {
+ "temperature": {
+ "type": "number",
+ "description": "This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.",
+ "minimum": 0,
+ "maximum": 2
+ },
+ "maxTokens": {
+ "type": "number",
+ "description": "This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250.",
+ "minimum": 50,
+ "maximum": 10000
+ },
+ "emotionRecognitionEnabled": {
"type": "boolean",
- "description": "When this is enabled, the user will be restricted to use PCI-compliant providers, and no logs or transcripts are stored.\nAt the end of the call, you will receive an end-of-call-report message to store on your server. Defaults to false.",
- "example": {
- "pciEnabled": false
- }
+ "description": "This determines whether we detect user's emotion while they speak and send it as an additional info to model.\n\nDefault `false` because the model is usually are good at understanding the user's emotion from text.\n\n@default false"
},
- "securityFilterPlan": {
- "description": "This is the security filter plan for the assistant. It allows filtering of transcripts for security threats before sending to LLM.",
- "allOf": [
- {
- "$ref": "#/components/schemas/SecurityFilterPlan"
- }
- ]
+ "numFastTurns": {
+ "type": "number",
+ "description": "This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai.\n\nDefault is 0.\n\n@default 0",
+ "minimum": 0
}
- }
+ },
+ "required": [
+ "provider",
+ "model"
+ ]
},
- "StructuredDataPlan": {
+ "OpenAIModel": {
"type": "object",
"properties": {
"messages": {
- "description": "These are the messages used to generate the structured data.\n\n@default: ```\n[\n {\n \"role\": \"system\",\n \"content\": \"You are an expert data extractor. You will be given a transcript of a call. Extract structured data per the JSON Schema. DO NOT return anything except the structured data.\\n\\nJson Schema:\\\\n{{schema}}\\n\\nOnly respond with the JSON.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Here is the transcript:\\n\\n{{transcript}}\\n\\n. Here is the ended reason of the call:\\n\\n{{endedReason}}\\n\\n\"\n }\n]```\n\nYou can customize by providing any messages you want.\n\nHere are the template variables available:\n- {{transcript}}: the transcript of the call from `call.artifact.transcript`- {{systemPrompt}}: the system prompt of the call from `assistant.model.messages[type=system].content`- {{messages}}: the messages of the call from `assistant.model.messages`- {{schema}}: the schema of the structured data from `structuredDataPlan.schema`- {{endedReason}}: the ended reason of the call from `call.endedReason`",
+ "description": "This is the starting state for the conversation.",
"type": "array",
"items": {
- "type": "object"
+ "$ref": "#/components/schemas/OpenAIMessage"
}
},
- "enabled": {
- "type": "boolean",
- "description": "This determines whether structured data is generated and stored in `call.analysis.structuredData`. Defaults to false.\n\nUsage:\n- If you want to extract structured data, set this to true and provide a `schema`.\n\n@default false"
- },
- "schema": {
- "description": "This is the schema of the structured data. The output is stored in `call.analysis.structuredData`.\n\nComplete guide on JSON Schema can be found [here](https://ajv.js.org/json-schema.html#json-data-type).",
- "allOf": [
- {
- "$ref": "#/components/schemas/JsonSchema"
- }
- ]
- },
- "timeoutSeconds": {
- "type": "number",
- "description": "This is how long the request is tried before giving up. When request times out, `call.analysis.structuredData` will be empty.\n\nUsage:\n- To guarantee the structured data is generated, set this value high. Note, this will delay the end of call report in cases where model is slow to respond.\n\n@default 5 seconds",
- "minimum": 1,
- "maximum": 60
- }
- }
- },
- "StructuredDataMultiPlan": {
- "type": "object",
- "properties": {
- "key": {
- "type": "string",
- "description": "This is the key of the structured data plan in the catalog."
- },
- "plan": {
- "description": "This is an individual structured data plan in the catalog.",
- "allOf": [
- {
- "$ref": "#/components/schemas/StructuredDataPlan"
- }
- ]
- }
- },
- "required": [
- "key",
- "plan"
- ]
- },
- "SuccessEvaluationPlan": {
- "type": "object",
- "properties": {
- "rubric": {
- "type": "string",
- "enum": [
- "NumericScale",
- "DescriptiveScale",
- "Checklist",
- "Matrix",
- "PercentageScale",
- "LikertScale",
- "AutomaticRubric",
- "PassFail"
- ],
- "description": "This enforces the rubric of the evaluation. The output is stored in `call.analysis.successEvaluation`.\n\nOptions include:\n- 'NumericScale': A scale of 1 to 10.\n- 'DescriptiveScale': A scale of Excellent, Good, Fair, Poor.\n- 'Checklist': A checklist of criteria and their status.\n- 'Matrix': A grid that evaluates multiple criteria across different performance levels.\n- 'PercentageScale': A scale of 0% to 100%.\n- 'LikertScale': A scale of Strongly Agree, Agree, Neutral, Disagree, Strongly Disagree.\n- 'AutomaticRubric': Automatically break down evaluation into several criteria, each with its own score.\n- 'PassFail': A simple 'true' if call passed, 'false' if not.\n\nDefault is 'PassFail'."
- },
- "messages": {
- "description": "These are the messages used to generate the success evaluation.\n\n@default: ```\n[\n {\n \"role\": \"system\",\n \"content\": \"You are an expert call evaluator. You will be given a transcript of a call and the system prompt of the AI participant. Determine if the call was successful based on the objectives inferred from the system prompt. DO NOT return anything except the result.\\n\\nRubric:\\\\n{{rubric}}\\n\\nOnly respond with the result.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Here is the transcript:\\n\\n{{transcript}}\\n\\n\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Here was the system prompt of the call:\\n\\n{{systemPrompt}}\\n\\n. Here is the ended reason of the call:\\n\\n{{endedReason}}\\n\\n\"\n }\n]```\n\nYou can customize by providing any messages you want.\n\nHere are the template variables available:\n- {{transcript}}: the transcript of the call from `call.artifact.transcript`- {{systemPrompt}}: the system prompt of the call from `assistant.model.messages[type=system].content`- {{messages}}: the messages of the call from `assistant.model.messages`- {{rubric}}: the rubric of the success evaluation from `successEvaluationPlan.rubric`- {{endedReason}}: the ended reason of the call from `call.endedReason`",
+ "tools": {
"type": "array",
+ "description": "These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`.\n\nBoth `tools` and `toolIds` can be used together.",
"items": {
- "type": "object"
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateApiRequestToolDTO",
+ "title": "ApiRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateBashToolDTO",
+ "title": "BashTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCodeToolDTO",
+ "title": "CodeTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateComputerToolDTO",
+ "title": "ComputerTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDtmfToolDTO",
+ "title": "DtmfTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateEndCallToolDTO",
+ "title": "EndCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateFunctionToolDTO",
+ "title": "FunctionTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
+ "title": "GoHighLevelCalendarAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
+ "title": "GoHighLevelCalendarEventCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
+ "title": "GoHighLevelContactCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
+ "title": "GoHighLevelContactGetTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
+ "title": "GoogleCalendarCheckAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
+ "title": "GoogleCalendarCreateEventTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
+ "title": "GoogleSheetsRowAppendTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateHandoffToolDTO",
+ "title": "HandoffTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMcpToolDTO",
+ "title": "McpTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateQueryToolDTO",
+ "title": "QueryTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO",
+ "title": "SlackSendMessageTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSmsToolDTO",
+ "title": "SmsTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTextEditorToolDTO",
+ "title": "TextEditorTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTransferCallToolDTO",
+ "title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSipRequestToolDTO",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVoicemailToolDTO",
+ "title": "VoicemailTool"
+ }
+ ]
}
},
- "enabled": {
- "type": "boolean",
- "description": "This determines whether a success evaluation is generated and stored in `call.analysis.successEvaluation`. Defaults to true.\n\nUsage:\n- If you want to disable the success evaluation, set this to false.\n\n@default true"
- },
- "timeoutSeconds": {
- "type": "number",
- "description": "This is how long the request is tried before giving up. When request times out, `call.analysis.successEvaluation` will be empty.\n\nUsage:\n- To guarantee the success evaluation is generated, set this value high. Note, this will delay the end of call report in cases where model is slow to respond.\n\n@default 5 seconds",
- "minimum": 1,
- "maximum": 60
- }
- }
- },
- "AnalysisPlan": {
- "type": "object",
- "properties": {
- "minMessagesThreshold": {
- "type": "number",
- "description": "The minimum number of messages required to run the analysis plan.\nIf the number of messages is less than this, analysis will be skipped.\n@default 2",
- "minimum": 0
- },
- "summaryPlan": {
- "description": "This is the plan for generating the summary of the call. This outputs to `call.analysis.summary`.",
- "allOf": [
- {
- "$ref": "#/components/schemas/SummaryPlan"
- }
- ]
- },
- "structuredDataPlan": {
- "description": "This is the plan for generating the structured data from the call. This outputs to `call.analysis.structuredData`.",
- "allOf": [
- {
- "$ref": "#/components/schemas/StructuredDataPlan"
- }
- ]
- },
- "structuredDataMultiPlan": {
- "description": "This is an array of structured data plan catalogs. Each entry includes a `key` and a `plan` for generating the structured data from the call. This outputs to `call.analysis.structuredDataMulti`.",
+ "toolIds": {
+ "description": "These are the tools that the assistant can use during the call. To use transient tools, use `tools`.\n\nBoth `tools` and `toolIds` can be used together.",
"type": "array",
"items": {
- "$ref": "#/components/schemas/StructuredDataMultiPlan"
+ "type": "string"
}
},
- "successEvaluationPlan": {
- "description": "This is the plan for generating the success evaluation of the call. This outputs to `call.analysis.successEvaluation`.",
- "allOf": [
+ "knowledgeBase": {
+ "description": "These are the options for the knowledge base.",
+ "oneOf": [
{
- "$ref": "#/components/schemas/SuccessEvaluationPlan"
+ "$ref": "#/components/schemas/CreateCustomKnowledgeBaseDTO",
+ "title": "Custom"
}
]
},
- "outcomeIds": {
- "description": "This is an array of outcome UUIDs to be calculated during analysis.\nThe outcomes will be calculated and stored in `call.analysis.outcomes`.",
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- }
- },
- "RegexOption": {
- "type": "object",
- "properties": {
- "type": {
+ "provider": {
"type": "string",
- "description": "This is the type of the regex option. Options are:\n- `ignore-case`: Ignores the case of the text being matched. Add\n- `whole-word`: Matches whole words only.\n- `multi-line`: Matches across multiple lines.",
+ "description": "This is the provider that will be used for the model.",
"enum": [
- "ignore-case",
- "whole-word",
- "multi-line"
+ "openai"
]
},
- "enabled": {
- "type": "boolean",
- "description": "This is whether to enable the option.\n\n@default false"
- }
- },
- "required": [
- "type",
- "enabled"
- ]
- },
- "AssistantCustomEndpointingRule": {
- "type": "object",
- "properties": {
- "type": {
+ "model": {
"type": "string",
- "description": "This endpointing rule is based on the last assistant message before customer started speaking.\n\nFlow:\n- Assistant speaks\n- Customer starts speaking\n- Customer transcription comes in\n- This rule is evaluated on the last assistant message\n- If a match is found based on `regex`, the endpointing timeout is set to `timeoutSeconds`\n\nUsage:\n- If you have yes/no questions in your use case like \"are you interested in a loan?\", you can set a shorter timeout.\n- If you have questions where the customer may pause to look up information like \"what's my account number?\", you can set a longer timeout.",
+ "description": "This is the OpenAI model that will be used.\n\nWhen using Vapi OpenAI or your own Azure Credentials, you have the option to specify the region for the selected model. This shouldn't be specified unless you have a specific reason to do so. Vapi will automatically find the fastest region that make sense.\nThis is helpful when you are required to comply with Data Residency rules. Learn more about Azure regions here https://azure.microsoft.com/en-us/explore/global-infrastructure/data-residency/.\n\n@default undefined",
"enum": [
- "assistant"
+ "gpt-5.4",
+ "gpt-5.4-mini",
+ "gpt-5.4-nano",
+ "gpt-5.2",
+ "gpt-5.2-chat-latest",
+ "gpt-5.1",
+ "gpt-5.1-chat-latest",
+ "gpt-5",
+ "gpt-5-chat-latest",
+ "gpt-5-mini",
+ "gpt-5-nano",
+ "gpt-4.1-2025-04-14",
+ "gpt-4.1-mini-2025-04-14",
+ "gpt-4.1-nano-2025-04-14",
+ "gpt-4.1",
+ "gpt-4.1-mini",
+ "gpt-4.1-nano",
+ "chatgpt-4o-latest",
+ "o3",
+ "o3-mini",
+ "o4-mini",
+ "o1-mini",
+ "o1-mini-2024-09-12",
+ "gpt-4o-realtime-preview-2024-10-01",
+ "gpt-4o-realtime-preview-2024-12-17",
+ "gpt-4o-mini-realtime-preview-2024-12-17",
+ "gpt-realtime-2025-08-28",
+ "gpt-realtime-mini-2025-12-15",
+ "gpt-4o-mini-2024-07-18",
+ "gpt-4o-mini",
+ "gpt-4o",
+ "gpt-4o-2024-05-13",
+ "gpt-4o-2024-08-06",
+ "gpt-4o-2024-11-20",
+ "gpt-4-turbo",
+ "gpt-4-turbo-2024-04-09",
+ "gpt-4-turbo-preview",
+ "gpt-4-0125-preview",
+ "gpt-4-1106-preview",
+ "gpt-4",
+ "gpt-4-0613",
+ "gpt-3.5-turbo",
+ "gpt-3.5-turbo-0125",
+ "gpt-3.5-turbo-1106",
+ "gpt-3.5-turbo-16k",
+ "gpt-3.5-turbo-0613",
+ "gpt-4.1-2025-04-14:westus",
+ "gpt-4.1-2025-04-14:eastus2",
+ "gpt-4.1-2025-04-14:eastus",
+ "gpt-4.1-2025-04-14:westus3",
+ "gpt-4.1-2025-04-14:northcentralus",
+ "gpt-4.1-2025-04-14:southcentralus",
+ "gpt-4.1-2025-04-14:westeurope",
+ "gpt-4.1-2025-04-14:germanywestcentral",
+ "gpt-4.1-2025-04-14:polandcentral",
+ "gpt-4.1-2025-04-14:spaincentral",
+ "gpt-4.1-mini-2025-04-14:westus",
+ "gpt-4.1-mini-2025-04-14:eastus2",
+ "gpt-4.1-mini-2025-04-14:eastus",
+ "gpt-4.1-mini-2025-04-14:westus3",
+ "gpt-4.1-mini-2025-04-14:northcentralus",
+ "gpt-4.1-mini-2025-04-14:southcentralus",
+ "gpt-4.1-mini-2025-04-14:westeurope",
+ "gpt-4.1-mini-2025-04-14:germanywestcentral",
+ "gpt-4.1-mini-2025-04-14:polandcentral",
+ "gpt-4.1-mini-2025-04-14:spaincentral",
+ "gpt-4.1-nano-2025-04-14:westus",
+ "gpt-4.1-nano-2025-04-14:eastus2",
+ "gpt-4.1-nano-2025-04-14:westus3",
+ "gpt-4.1-nano-2025-04-14:northcentralus",
+ "gpt-4.1-nano-2025-04-14:southcentralus",
+ "gpt-4o-2024-11-20:swedencentral",
+ "gpt-4o-2024-11-20:westus",
+ "gpt-4o-2024-11-20:eastus2",
+ "gpt-4o-2024-11-20:eastus",
+ "gpt-4o-2024-11-20:westus3",
+ "gpt-4o-2024-11-20:southcentralus",
+ "gpt-4o-2024-11-20:westeurope",
+ "gpt-4o-2024-11-20:germanywestcentral",
+ "gpt-4o-2024-11-20:polandcentral",
+ "gpt-4o-2024-11-20:spaincentral",
+ "gpt-4o-2024-08-06:westus",
+ "gpt-4o-2024-08-06:westus3",
+ "gpt-4o-2024-08-06:eastus",
+ "gpt-4o-2024-08-06:eastus2",
+ "gpt-4o-2024-08-06:northcentralus",
+ "gpt-4o-2024-08-06:southcentralus",
+ "gpt-4o-mini-2024-07-18:westus",
+ "gpt-4o-mini-2024-07-18:westus3",
+ "gpt-4o-mini-2024-07-18:eastus",
+ "gpt-4o-mini-2024-07-18:eastus2",
+ "gpt-4o-mini-2024-07-18:northcentralus",
+ "gpt-4o-mini-2024-07-18:southcentralus",
+ "gpt-4o-2024-05-13:eastus2",
+ "gpt-4o-2024-05-13:eastus",
+ "gpt-4o-2024-05-13:northcentralus",
+ "gpt-4o-2024-05-13:southcentralus",
+ "gpt-4o-2024-05-13:westus3",
+ "gpt-4o-2024-05-13:westus",
+ "gpt-4-turbo-2024-04-09:eastus2",
+ "gpt-4-0125-preview:eastus",
+ "gpt-4-0125-preview:northcentralus",
+ "gpt-4-0125-preview:southcentralus",
+ "gpt-4-1106-preview:australiaeast",
+ "gpt-4-1106-preview:canadaeast",
+ "gpt-4-1106-preview:france",
+ "gpt-4-1106-preview:india",
+ "gpt-4-1106-preview:norway",
+ "gpt-4-1106-preview:swedencentral",
+ "gpt-4-1106-preview:uk",
+ "gpt-4-1106-preview:westus",
+ "gpt-4-1106-preview:westus3",
+ "gpt-4-0613:canadaeast",
+ "gpt-3.5-turbo-0125:canadaeast",
+ "gpt-3.5-turbo-0125:northcentralus",
+ "gpt-3.5-turbo-0125:southcentralus",
+ "gpt-3.5-turbo-1106:canadaeast",
+ "gpt-3.5-turbo-1106:westus"
]
},
- "regex": {
- "type": "string",
- "description": "This is the regex pattern to match.\n\nNote:\n- This works by using the `RegExp.test` method in Node.JS. Eg. `/hello/.test(\"hello there\")` will return `true`.\n\nHot tip:\n- In JavaScript, escape `\\` when sending the regex pattern. Eg. `\"hello\\sthere\"` will be sent over the wire as `\"hellosthere\"`. Send `\"hello\\\\sthere\"` instead.\n- `RegExp.test` does substring matching, so `/cat/.test(\"I love cats\")` will return `true`. To do full string matching, send \"^cat$\"."
- },
- "regexOptions": {
- "description": "These are the options for the regex match. Defaults to all disabled.\n\n@default []",
+ "fallbackModels": {
"type": "array",
+ "description": "These are the fallback models that will be used if the primary model fails. This shouldn't be specified unless you have a specific reason to do so. Vapi will automatically find the fastest fallbacks that make sense.",
+ "example": [
+ "gpt-4-0125-preview",
+ "gpt-4-0613"
+ ],
"items": {
- "$ref": "#/components/schemas/RegexOption"
+ "type": "string",
+ "enum": [
+ "gpt-5.4",
+ "gpt-5.4-mini",
+ "gpt-5.4-nano",
+ "gpt-5.2",
+ "gpt-5.2-chat-latest",
+ "gpt-5.1",
+ "gpt-5.1-chat-latest",
+ "gpt-5",
+ "gpt-5-chat-latest",
+ "gpt-5-mini",
+ "gpt-5-nano",
+ "gpt-4.1-2025-04-14",
+ "gpt-4.1-mini-2025-04-14",
+ "gpt-4.1-nano-2025-04-14",
+ "gpt-4.1",
+ "gpt-4.1-mini",
+ "gpt-4.1-nano",
+ "chatgpt-4o-latest",
+ "o3",
+ "o3-mini",
+ "o4-mini",
+ "o1-mini",
+ "o1-mini-2024-09-12",
+ "gpt-4o-realtime-preview-2024-10-01",
+ "gpt-4o-realtime-preview-2024-12-17",
+ "gpt-4o-mini-realtime-preview-2024-12-17",
+ "gpt-realtime-2025-08-28",
+ "gpt-realtime-mini-2025-12-15",
+ "gpt-4o-mini-2024-07-18",
+ "gpt-4o-mini",
+ "gpt-4o",
+ "gpt-4o-2024-05-13",
+ "gpt-4o-2024-08-06",
+ "gpt-4o-2024-11-20",
+ "gpt-4-turbo",
+ "gpt-4-turbo-2024-04-09",
+ "gpt-4-turbo-preview",
+ "gpt-4-0125-preview",
+ "gpt-4-1106-preview",
+ "gpt-4",
+ "gpt-4-0613",
+ "gpt-3.5-turbo",
+ "gpt-3.5-turbo-0125",
+ "gpt-3.5-turbo-1106",
+ "gpt-3.5-turbo-16k",
+ "gpt-3.5-turbo-0613",
+ "gpt-4.1-2025-04-14:westus",
+ "gpt-4.1-2025-04-14:eastus2",
+ "gpt-4.1-2025-04-14:eastus",
+ "gpt-4.1-2025-04-14:westus3",
+ "gpt-4.1-2025-04-14:northcentralus",
+ "gpt-4.1-2025-04-14:southcentralus",
+ "gpt-4.1-2025-04-14:westeurope",
+ "gpt-4.1-2025-04-14:germanywestcentral",
+ "gpt-4.1-2025-04-14:polandcentral",
+ "gpt-4.1-2025-04-14:spaincentral",
+ "gpt-4.1-mini-2025-04-14:westus",
+ "gpt-4.1-mini-2025-04-14:eastus2",
+ "gpt-4.1-mini-2025-04-14:eastus",
+ "gpt-4.1-mini-2025-04-14:westus3",
+ "gpt-4.1-mini-2025-04-14:northcentralus",
+ "gpt-4.1-mini-2025-04-14:southcentralus",
+ "gpt-4.1-mini-2025-04-14:westeurope",
+ "gpt-4.1-mini-2025-04-14:germanywestcentral",
+ "gpt-4.1-mini-2025-04-14:polandcentral",
+ "gpt-4.1-mini-2025-04-14:spaincentral",
+ "gpt-4.1-nano-2025-04-14:westus",
+ "gpt-4.1-nano-2025-04-14:eastus2",
+ "gpt-4.1-nano-2025-04-14:westus3",
+ "gpt-4.1-nano-2025-04-14:northcentralus",
+ "gpt-4.1-nano-2025-04-14:southcentralus",
+ "gpt-4o-2024-11-20:swedencentral",
+ "gpt-4o-2024-11-20:westus",
+ "gpt-4o-2024-11-20:eastus2",
+ "gpt-4o-2024-11-20:eastus",
+ "gpt-4o-2024-11-20:westus3",
+ "gpt-4o-2024-11-20:southcentralus",
+ "gpt-4o-2024-11-20:westeurope",
+ "gpt-4o-2024-11-20:germanywestcentral",
+ "gpt-4o-2024-11-20:polandcentral",
+ "gpt-4o-2024-11-20:spaincentral",
+ "gpt-4o-2024-08-06:westus",
+ "gpt-4o-2024-08-06:westus3",
+ "gpt-4o-2024-08-06:eastus",
+ "gpt-4o-2024-08-06:eastus2",
+ "gpt-4o-2024-08-06:northcentralus",
+ "gpt-4o-2024-08-06:southcentralus",
+ "gpt-4o-mini-2024-07-18:westus",
+ "gpt-4o-mini-2024-07-18:westus3",
+ "gpt-4o-mini-2024-07-18:eastus",
+ "gpt-4o-mini-2024-07-18:eastus2",
+ "gpt-4o-mini-2024-07-18:northcentralus",
+ "gpt-4o-mini-2024-07-18:southcentralus",
+ "gpt-4o-2024-05-13:eastus2",
+ "gpt-4o-2024-05-13:eastus",
+ "gpt-4o-2024-05-13:northcentralus",
+ "gpt-4o-2024-05-13:southcentralus",
+ "gpt-4o-2024-05-13:westus3",
+ "gpt-4o-2024-05-13:westus",
+ "gpt-4-turbo-2024-04-09:eastus2",
+ "gpt-4-0125-preview:eastus",
+ "gpt-4-0125-preview:northcentralus",
+ "gpt-4-0125-preview:southcentralus",
+ "gpt-4-1106-preview:australiaeast",
+ "gpt-4-1106-preview:canadaeast",
+ "gpt-4-1106-preview:france",
+ "gpt-4-1106-preview:india",
+ "gpt-4-1106-preview:norway",
+ "gpt-4-1106-preview:swedencentral",
+ "gpt-4-1106-preview:uk",
+ "gpt-4-1106-preview:westus",
+ "gpt-4-1106-preview:westus3",
+ "gpt-4-0613:canadaeast",
+ "gpt-3.5-turbo-0125:canadaeast",
+ "gpt-3.5-turbo-0125:northcentralus",
+ "gpt-3.5-turbo-0125:southcentralus",
+ "gpt-3.5-turbo-1106:canadaeast",
+ "gpt-3.5-turbo-1106:westus"
+ ]
}
},
- "timeoutSeconds": {
- "type": "number",
- "description": "This is the endpointing timeout in seconds, if the rule is matched.",
- "minimum": 0,
- "maximum": 15
- }
- },
- "required": [
- "type",
- "regex",
- "timeoutSeconds"
- ]
- },
- "CustomerCustomEndpointingRule": {
- "type": "object",
- "properties": {
- "type": {
+ "toolStrictCompatibilityMode": {
"type": "string",
- "description": "This endpointing rule is based on current customer message as they are speaking.\n\nFlow:\n- Assistant speaks\n- Customer starts speaking\n- Customer transcription comes in\n- This rule is evaluated on the current customer transcription\n- If a match is found based on `regex`, the endpointing timeout is set to `timeoutSeconds`\n\nUsage:\n- If you want to wait longer while customer is speaking numbers, you can set a longer timeout.",
+ "description": "Azure OpenAI doesn't support `maxLength` right now https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/structured-outputs?tabs=python-secure%2Cdotnet-entra-id&pivots=programming-language-csharp#unsupported-type-specific-keywords. Need to strip.\n\n- `strip-parameters-with-unsupported-validation` will strip parameters with unsupported validation.\n- `strip-unsupported-validation` will keep the parameters but strip unsupported validation.\n\n@default `strip-unsupported-validation`",
"enum": [
- "customer"
+ "strip-parameters-with-unsupported-validation",
+ "strip-unsupported-validation"
]
},
- "regex": {
+ "promptCacheRetention": {
"type": "string",
- "description": "This is the regex pattern to match.\n\nNote:\n- This works by using the `RegExp.test` method in Node.JS. Eg. `/hello/.test(\"hello there\")` will return `true`.\n\nHot tip:\n- In JavaScript, escape `\\` when sending the regex pattern. Eg. `\"hello\\sthere\"` will be sent over the wire as `\"hellosthere\"`. Send `\"hello\\\\sthere\"` instead.\n- `RegExp.test` does substring matching, so `/cat/.test(\"I love cats\")` will return `true`. To do full string matching, send \"^cat$\"."
+ "description": "This controls the prompt cache retention policy for models that support extended caching (GPT-4.1, GPT-5 series).\n\n- `in_memory`: Default behavior, cache retained in GPU memory only\n- `24h`: Extended caching, keeps cached prefixes active for up to 24 hours by offloading to GPU-local storage\n\nOnly applies to models: gpt-5.4, gpt-5.4-mini, gpt-5.4-nano, gpt-5.2, gpt-5.1, gpt-5.1-codex, gpt-5.1-codex-mini, gpt-5.1-chat-latest, gpt-5, gpt-5-codex, gpt-4.1\n\n@default undefined (uses API default which is 'in_memory')",
+ "enum": [
+ "in_memory",
+ "24h"
+ ]
},
- "regexOptions": {
- "description": "These are the options for the regex match. Defaults to all disabled.\n\n@default []",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/RegexOption"
- }
+ "promptCacheKey": {
+ "type": "string",
+ "description": "This is the prompt cache key for models that support extended caching (GPT-4.1, GPT-5 series).\n\nProviding a cache key allows you to share cached prefixes across requests.\n\n@default undefined",
+ "maxLength": 64
},
- "timeoutSeconds": {
+ "temperature": {
"type": "number",
- "description": "This is the endpointing timeout in seconds, if the rule is matched.",
+ "description": "This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.",
"minimum": 0,
- "maximum": 15
+ "maximum": 2
+ },
+ "maxTokens": {
+ "type": "number",
+ "description": "This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250.",
+ "minimum": 50,
+ "maximum": 10000
+ },
+ "emotionRecognitionEnabled": {
+ "type": "boolean",
+ "description": "This determines whether we detect user's emotion while they speak and send it as an additional info to model.\n\nDefault `false` because the model is usually are good at understanding the user's emotion from text.\n\n@default false"
+ },
+ "numFastTurns": {
+ "type": "number",
+ "description": "This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai.\n\nDefault is 0.\n\n@default 0",
+ "minimum": 0
}
},
"required": [
- "type",
- "regex",
- "timeoutSeconds"
+ "provider",
+ "model"
]
},
- "BothCustomEndpointingRule": {
+ "OpenRouterModel": {
"type": "object",
"properties": {
- "type": {
- "type": "string",
- "description": "This endpointing rule is based on both the last assistant message and the current customer message as they are speaking.\n\nFlow:\n- Assistant speaks\n- Customer starts speaking\n- Customer transcription comes in\n- This rule is evaluated on the last assistant message and the current customer transcription\n- If assistant message matches `assistantRegex` AND customer message matches `customerRegex`, the endpointing timeout is set to `timeoutSeconds`\n\nUsage:\n- If you want to wait longer while customer is speaking numbers, you can set a longer timeout.",
- "enum": [
- "both"
- ]
- },
- "assistantRegex": {
- "type": "string",
- "description": "This is the regex pattern to match the assistant's message.\n\nNote:\n- This works by using the `RegExp.test` method in Node.JS. Eg. `/hello/.test(\"hello there\")` will return `true`.\n\nHot tip:\n- In JavaScript, escape `\\` when sending the regex pattern. Eg. `\"hello\\sthere\"` will be sent over the wire as `\"hellosthere\"`. Send `\"hello\\\\sthere\"` instead.\n- `RegExp.test` does substring matching, so `/cat/.test(\"I love cats\")` will return `true`. To do full string matching, send \"^cat$\"."
- },
- "assistantRegexOptions": {
- "description": "These are the options for the assistant's message regex match. Defaults to all disabled.\n\n@default []",
+ "messages": {
+ "description": "This is the starting state for the conversation.",
"type": "array",
"items": {
- "$ref": "#/components/schemas/RegexOption"
+ "$ref": "#/components/schemas/OpenAIMessage"
}
},
- "customerRegex": {
- "type": "string"
- },
- "customerRegexOptions": {
- "description": "These are the options for the customer's message regex match. Defaults to all disabled.\n\n@default []",
+ "tools": {
"type": "array",
+ "description": "These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`.\n\nBoth `tools` and `toolIds` can be used together.",
"items": {
- "$ref": "#/components/schemas/RegexOption"
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateApiRequestToolDTO",
+ "title": "ApiRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateBashToolDTO",
+ "title": "BashTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCodeToolDTO",
+ "title": "CodeTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateComputerToolDTO",
+ "title": "ComputerTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDtmfToolDTO",
+ "title": "DtmfTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateEndCallToolDTO",
+ "title": "EndCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateFunctionToolDTO",
+ "title": "FunctionTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
+ "title": "GoHighLevelCalendarAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
+ "title": "GoHighLevelCalendarEventCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
+ "title": "GoHighLevelContactCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
+ "title": "GoHighLevelContactGetTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
+ "title": "GoogleCalendarCheckAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
+ "title": "GoogleCalendarCreateEventTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
+ "title": "GoogleSheetsRowAppendTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateHandoffToolDTO",
+ "title": "HandoffTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMcpToolDTO",
+ "title": "McpTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateQueryToolDTO",
+ "title": "QueryTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO",
+ "title": "SlackSendMessageTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSmsToolDTO",
+ "title": "SmsTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTextEditorToolDTO",
+ "title": "TextEditorTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTransferCallToolDTO",
+ "title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSipRequestToolDTO",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVoicemailToolDTO",
+ "title": "VoicemailTool"
+ }
+ ]
}
},
- "timeoutSeconds": {
- "type": "number",
- "description": "This is the endpointing timeout in seconds, if the rule is matched.",
- "minimum": 0,
- "maximum": 15
- }
- },
- "required": [
- "type",
- "assistantRegex",
- "customerRegex",
- "timeoutSeconds"
- ]
- },
- "VapiSmartEndpointingPlan": {
- "type": "object",
- "properties": {
- "provider": {
- "type": "string",
- "description": "This is the provider for the smart endpointing plan.",
- "enum": [
- "vapi",
- "livekit",
- "custom-endpointing-model"
- ],
- "example": "vapi"
- }
- },
- "required": [
- "provider"
- ]
- },
- "LivekitSmartEndpointingPlan": {
- "type": "object",
- "properties": {
- "provider": {
- "type": "string",
- "description": "This is the provider for the smart endpointing plan.",
- "enum": [
- "vapi",
- "livekit",
- "custom-endpointing-model"
- ],
- "example": "livekit"
+ "toolIds": {
+ "description": "These are the tools that the assistant can use during the call. To use transient tools, use `tools`.\n\nBoth `tools` and `toolIds` can be used together.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
},
- "waitFunction": {
- "type": "string",
- "description": "This expression describes how long the bot will wait to start speaking based on the likelihood that the user has reached an endpoint.\n\nThis is a millisecond valued function. It maps probabilities (real numbers on [0,1]) to milliseconds that the bot should wait before speaking ([0, \\infty]). Any negative values that are returned are set to zero (the bot can't start talking in the past).\n\nA probability of zero represents very high confidence that the caller has stopped speaking, and would like the bot to speak to them. A probability of one represents very high confidence that the caller is still speaking.\n\nUnder the hood, this is parsed into a mathjs expression. Whatever you use to write your expression needs to be valid with respect to mathjs\n\n@default \"20 + 500 * sqrt(x) + 2500 * x^3\"",
- "examples": [
- "70 + 4000 * x",
- "200 + 8000 * x",
- "4000 * (1 - cos(pi * x))"
+ "knowledgeBase": {
+ "description": "These are the options for the knowledge base.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateCustomKnowledgeBaseDTO",
+ "title": "Custom"
+ }
]
- }
- },
- "required": [
- "provider"
- ]
- },
- "CustomEndpointingModelSmartEndpointingPlan": {
- "type": "object",
- "properties": {
+ },
"provider": {
"type": "string",
- "description": "This is the provider for the smart endpointing plan. Use `custom-endpointing-model` for custom endpointing providers that are not natively supported.",
"enum": [
- "vapi",
- "livekit",
- "custom-endpointing-model"
- ],
- "example": "custom-endpointing-model"
- },
- "server": {
- "description": "This is where the endpointing request will be sent. If not provided, will be sent to `assistant.server`. If that does not exist either, will be sent to `org.server`.\n\nRequest Example:\n\nPOST https://{server.url}\nContent-Type: application/json\n\n{\n \"message\": {\n \"type\": \"call.endpointing.request\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"message\": \"Hello, how are you?\",\n \"time\": 1234567890,\n \"secondsFromStart\": 0\n }\n ],\n ...other metadata about the call...\n }\n}\n\nResponse Expected:\n{\n \"timeoutSeconds\": 0.5\n}\n\nThe timeout is the number of seconds to wait before considering the user's speech as finished. The endpointing timeout is automatically reset each time a new transcript is received (and another `call.endpointing.request` is sent).",
- "allOf": [
- {
- "$ref": "#/components/schemas/Server"
- }
+ "openrouter"
]
- }
- },
- "required": [
- "provider"
- ]
- },
- "TranscriptionEndpointingPlan": {
- "type": "object",
- "properties": {
- "onPunctuationSeconds": {
+ },
+ "model": {
+ "type": "string",
+ "description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b"
+ },
+ "temperature": {
"type": "number",
- "description": "The minimum number of seconds to wait after transcription ending with punctuation before sending a request to the model. Defaults to 0.1.\n\nThis setting exists because the transcriber punctuates the transcription when it's more confident that customer has completed a thought.\n\n@default 0.1",
+ "description": "This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.",
"minimum": 0,
- "maximum": 3,
- "example": 0.1
+ "maximum": 2
},
- "onNoPunctuationSeconds": {
+ "maxTokens": {
"type": "number",
- "description": "The minimum number of seconds to wait after transcription ending without punctuation before sending a request to the model. Defaults to 1.5.\n\nThis setting exists to catch the cases where the transcriber was not confident enough to punctuate the transcription, but the customer is done and has been silent for a long time.\n\n@default 1.5",
- "minimum": 0,
- "maximum": 3,
- "example": 1.5
+ "description": "This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250.",
+ "minimum": 50,
+ "maximum": 10000
},
- "onNumberSeconds": {
+ "emotionRecognitionEnabled": {
+ "type": "boolean",
+ "description": "This determines whether we detect user's emotion while they speak and send it as an additional info to model.\n\nDefault `false` because the model is usually are good at understanding the user's emotion from text.\n\n@default false"
+ },
+ "numFastTurns": {
"type": "number",
- "description": "The minimum number of seconds to wait after transcription ending with a number before sending a request to the model. Defaults to 0.4.\n\nThis setting exists because the transcriber will sometimes punctuate the transcription ending with a number, even though the customer hasn't uttered the full number. This happens commonly for long numbers when the customer reads the number in chunks.\n\n@default 0.5",
- "minimum": 0,
- "maximum": 3,
- "example": 0.5
+ "description": "This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai.\n\nDefault is 0.\n\n@default 0",
+ "minimum": 0
}
- }
+ },
+ "required": [
+ "provider",
+ "model"
+ ]
},
- "StartSpeakingPlan": {
+ "PerplexityAIModel": {
"type": "object",
"properties": {
- "waitSeconds": {
- "type": "number",
- "description": "This is how long assistant waits before speaking. Defaults to 0.4.\n\nThis is the minimum it will wait but if there is latency is the pipeline, this minimum will be exceeded. This is intended as a stopgap in case the pipeline is moving too fast.\n\nExample:\n- If model generates tokens and voice generates bytes within 100ms, the pipeline still waits 300ms before outputting speech.\n\nUsage:\n- If the customer is taking long pauses, set this to a higher value.\n- If the assistant is accidentally jumping in too much, set this to a higher value.\n\n@default 0.4",
- "minimum": 0,
- "maximum": 5,
- "example": 0.4
- },
- "smartEndpointingEnabled": {
- "example": false,
- "deprecated": true,
- "oneOf": [
- {
- "type": "boolean"
- },
- {
- "type": "string",
- "enum": [
- "livekit"
- ]
- }
- ]
- },
- "smartEndpointingPlan": {
- "description": "This is the plan for smart endpointing. Pick between Vapi smart endpointing or LiveKit smart endpointing (or nothing). We strongly recommend using livekit endpointing when working in English. LiveKit endpointing is not supported in other languages, yet.\n\nIf this is set, it will override and take precedence over `transcriptionEndpointingPlan`.\nThis plan will still be overridden by any matching `customEndpointingRules`.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/VapiSmartEndpointingPlan",
- "title": "Vapi"
- },
- {
- "$ref": "#/components/schemas/LivekitSmartEndpointingPlan",
- "title": "Livekit"
- },
- {
- "$ref": "#/components/schemas/CustomEndpointingModelSmartEndpointingPlan",
- "title": "Custom Endpointing Model"
- }
- ]
+ "messages": {
+ "description": "This is the starting state for the conversation.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/OpenAIMessage"
+ }
},
- "customEndpointingRules": {
+ "tools": {
"type": "array",
- "description": "These are the custom endpointing rules to set an endpointing timeout based on a regex on the customer's speech or the assistant's last message.\n\nUsage:\n- If you have yes/no questions like \"are you interested in a loan?\", you can set a shorter timeout.\n- If you have questions where the customer may pause to look up information like \"what's my account number?\", you can set a longer timeout.\n- If you want to wait longer while customer is enumerating a list of numbers, you can set a longer timeout.\n\nThese rules have the highest precedence and will override both `smartEndpointingPlan` and `transcriptionEndpointingPlan` when a rule is matched.\n\nThe rules are evaluated in order and the first one that matches will be used.\n\nOrder of precedence for endpointing:\n1. customEndpointingRules (if any match)\n2. smartEndpointingPlan (if set)\n3. transcriptionEndpointingPlan\n\n@default []",
+ "description": "These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`.\n\nBoth `tools` and `toolIds` can be used together.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/AssistantCustomEndpointingRule",
- "title": "Assistant"
+ "$ref": "#/components/schemas/CreateApiRequestToolDTO",
+ "title": "ApiRequestTool"
},
{
- "$ref": "#/components/schemas/CustomerCustomEndpointingRule",
- "title": "Customer"
+ "$ref": "#/components/schemas/CreateBashToolDTO",
+ "title": "BashTool"
},
{
- "$ref": "#/components/schemas/BothCustomEndpointingRule",
- "title": "Both"
+ "$ref": "#/components/schemas/CreateCodeToolDTO",
+ "title": "CodeTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateComputerToolDTO",
+ "title": "ComputerTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDtmfToolDTO",
+ "title": "DtmfTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateEndCallToolDTO",
+ "title": "EndCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateFunctionToolDTO",
+ "title": "FunctionTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
+ "title": "GoHighLevelCalendarAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
+ "title": "GoHighLevelCalendarEventCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
+ "title": "GoHighLevelContactCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
+ "title": "GoHighLevelContactGetTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
+ "title": "GoogleCalendarCheckAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
+ "title": "GoogleCalendarCreateEventTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
+ "title": "GoogleSheetsRowAppendTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateHandoffToolDTO",
+ "title": "HandoffTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMcpToolDTO",
+ "title": "McpTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateQueryToolDTO",
+ "title": "QueryTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO",
+ "title": "SlackSendMessageTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSmsToolDTO",
+ "title": "SmsTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTextEditorToolDTO",
+ "title": "TextEditorTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTransferCallToolDTO",
+ "title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSipRequestToolDTO",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVoicemailToolDTO",
+ "title": "VoicemailTool"
}
]
}
},
- "transcriptionEndpointingPlan": {
- "description": "This determines how a customer speech is considered done (endpointing) using the transcription of customer's speech.\n\nOnce an endpoint is triggered, the request is sent to `assistant.model`.\n\nNote: This plan is only used if `smartEndpointingPlan` is not set. If both are provided, `smartEndpointingPlan` takes precedence.\nThis plan will also be overridden by any matching `customEndpointingRules`.",
- "allOf": [
+ "toolIds": {
+ "description": "These are the tools that the assistant can use during the call. To use transient tools, use `tools`.\n\nBoth `tools` and `toolIds` can be used together.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "knowledgeBase": {
+ "description": "These are the options for the knowledge base.",
+ "oneOf": [
{
- "$ref": "#/components/schemas/TranscriptionEndpointingPlan"
+ "$ref": "#/components/schemas/CreateCustomKnowledgeBaseDTO",
+ "title": "Custom"
}
]
- }
- }
- },
- "StopSpeakingPlan": {
- "type": "object",
- "properties": {
- "numWords": {
- "type": "number",
- "description": "This is the number of words that the customer has to say before the assistant will stop talking.\n\nWords like \"stop\", \"actually\", \"no\", etc. will always interrupt immediately regardless of this value.\n\nWords like \"okay\", \"yeah\", \"right\" will never interrupt.\n\nWhen set to 0, `voiceSeconds` is used in addition to the transcriptions to determine the customer has started speaking.\n\nDefaults to 0.\n\n@default 0",
- "minimum": 0,
- "maximum": 10,
- "example": 0
},
- "voiceSeconds": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "perplexity-ai"
+ ]
+ },
+ "model": {
+ "type": "string",
+ "description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b"
+ },
+ "temperature": {
"type": "number",
- "description": "This is the seconds customer has to speak before the assistant stops talking. This uses the VAD (Voice Activity Detection) spike to determine if the customer has started speaking.\n\nConsiderations:\n- A lower value might be more responsive but could potentially pick up non-speech sounds.\n- A higher value reduces false positives but might slightly delay the detection of speech onset.\n\nThis is only used if `numWords` is set to 0.\n\nDefaults to 0.2\n\n@default 0.2",
+ "description": "This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.",
"minimum": 0,
- "maximum": 0.5,
- "example": 0.2
+ "maximum": 2
},
- "backoffSeconds": {
+ "maxTokens": {
"type": "number",
- "description": "This is the seconds to wait before the assistant will start talking again after being interrupted.\n\nDefaults to 1.\n\n@default 1",
- "minimum": 0,
- "maximum": 10,
- "example": 1
+ "description": "This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250.",
+ "minimum": 50,
+ "maximum": 10000
},
- "acknowledgementPhrases": {
- "description": "These are the phrases that will never interrupt the assistant, even if numWords threshold is met.\nThese are typically acknowledgement or backchanneling phrases.",
- "example": [
- "i understand",
- "i see",
- "i got it",
- "i hear you",
- "im listening",
- "im with you",
- "right",
- "okay",
- "ok",
- "sure",
- "alright",
- "got it",
- "understood",
- "yeah",
- "yes",
- "uh-huh",
- "mm-hmm",
- "gotcha",
- "mhmm",
- "ah",
- "yeah okay",
- "yeah sure"
- ],
- "default": [
- "i understand",
- "i see",
- "i got it",
- "i hear you",
- "im listening",
- "im with you",
- "right",
- "okay",
- "ok",
- "sure",
- "alright",
- "got it",
- "understood",
- "yeah",
- "yes",
- "uh-huh",
- "mm-hmm",
- "gotcha",
- "mhmm",
- "ah",
- "yeah okay",
- "yeah sure"
- ],
+ "emotionRecognitionEnabled": {
+ "type": "boolean",
+ "description": "This determines whether we detect user's emotion while they speak and send it as an additional info to model.\n\nDefault `false` because the model is usually are good at understanding the user's emotion from text.\n\n@default false"
+ },
+ "numFastTurns": {
+ "type": "number",
+ "description": "This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai.\n\nDefault is 0.\n\n@default 0",
+ "minimum": 0
+ }
+ },
+ "required": [
+ "provider",
+ "model"
+ ]
+ },
+ "TogetherAIModel": {
+ "type": "object",
+ "properties": {
+ "messages": {
+ "description": "This is the starting state for the conversation.",
"type": "array",
"items": {
- "type": "string",
- "maxLength": 240
+ "$ref": "#/components/schemas/OpenAIMessage"
}
},
- "interruptionPhrases": {
- "description": "These are the phrases that will always interrupt the assistant immediately, regardless of numWords.\nThese are typically phrases indicating disagreement or desire to stop.",
- "example": [
- "stop",
- "shut",
- "up",
- "enough",
- "quiet",
- "silence",
- "but",
- "dont",
- "not",
- "no",
- "hold",
- "wait",
- "cut",
- "pause",
- "nope",
- "nah",
- "nevermind",
- "never",
- "bad",
- "actually"
- ],
- "default": [
- "stop",
- "shut",
- "up",
- "enough",
- "quiet",
- "silence",
- "but",
- "dont",
- "not",
- "no",
- "hold",
- "wait",
- "cut",
- "pause",
- "nope",
- "nah",
- "nevermind",
- "never",
- "bad",
- "actually"
- ],
- "type": "array",
- "items": {
- "type": "string",
- "maxLength": 240
- }
- }
- }
- },
- "MonitorPlan": {
- "type": "object",
- "properties": {
- "listenEnabled": {
- "type": "boolean",
- "description": "This determines whether the assistant's calls allow live listening. Defaults to true.\n\nFetch `call.monitor.listenUrl` to get the live listening URL.\n\n@default true",
- "example": false
- },
- "listenAuthenticationEnabled": {
- "type": "boolean",
- "description": "This enables authentication on the `call.monitor.listenUrl`.\n\nIf `listenAuthenticationEnabled` is `true`, the `call.monitor.listenUrl` will require an `Authorization: Bearer ` header.\n\n@default false",
- "example": false
- },
- "controlEnabled": {
- "type": "boolean",
- "description": "This determines whether the assistant's calls allow live control. Defaults to true.\n\nFetch `call.monitor.controlUrl` to get the live control URL.\n\nTo use, send any control message via a POST request to `call.monitor.controlUrl`. Here are the types of controls supported: https://docs.vapi.ai/api-reference/messages/client-inbound-message\n\n@default true",
- "example": false
- },
- "controlAuthenticationEnabled": {
- "type": "boolean",
- "description": "This enables authentication on the `call.monitor.controlUrl`.\n\nIf `controlAuthenticationEnabled` is `true`, the `call.monitor.controlUrl` will require an `Authorization: Bearer ` header.\n\n@default false",
- "example": false
- }
- }
- },
- "SmartDenoisingPlan": {
- "type": "object",
- "properties": {
- "enabled": {
- "type": "boolean",
- "description": "Whether smart denoising using Krisp is enabled.",
- "default": false
- }
- }
- },
- "FourierDenoisingPlan": {
- "type": "object",
- "properties": {
- "enabled": {
- "type": "boolean",
- "description": "Whether Fourier denoising is enabled. Note that this is experimental and may not work as expected.",
- "default": false
- },
- "mediaDetectionEnabled": {
- "type": "boolean",
- "description": "Whether automatic media detection is enabled. When enabled, the filter will automatically\ndetect consistent background TV/music/radio and switch to more aggressive filtering settings.\nOnly applies when enabled is true.",
- "example": true,
- "default": true
- },
- "staticThreshold": {
- "type": "number",
- "description": "Static threshold in dB used as fallback when no baseline is established.",
- "example": -35,
- "minimum": -80,
- "maximum": 0,
- "default": -35
- },
- "baselineOffsetDb": {
- "type": "number",
- "description": "How far below the rolling baseline to filter audio, in dB.\nLower values (e.g., -10) are more aggressive, higher values (e.g., -20) are more conservative.",
- "example": -15,
- "minimum": -30,
- "maximum": -5,
- "default": -15
- },
- "windowSizeMs": {
- "type": "number",
- "description": "Rolling window size in milliseconds for calculating the audio baseline.\nLarger windows adapt more slowly but are more stable.",
- "example": 3000,
- "minimum": 1000,
- "maximum": 30000,
- "default": 3000
- },
- "baselinePercentile": {
- "type": "number",
- "description": "Percentile to use for baseline calculation (1-99).\nHigher percentiles (e.g., 85) focus on louder speech, lower percentiles (e.g., 50) include quieter speech.",
- "example": 85,
- "minimum": 1,
- "maximum": 99,
- "default": 85
- }
- }
- },
- "BackgroundSpeechDenoisingPlan": {
- "type": "object",
- "properties": {
- "smartDenoisingPlan": {
- "description": "Whether smart denoising using Krisp is enabled.",
- "allOf": [
- {
- "$ref": "#/components/schemas/SmartDenoisingPlan"
- }
- ]
- },
- "fourierDenoisingPlan": {
- "description": "Whether Fourier denoising is enabled. Note that this is experimental and may not work as expected.\n\nThis can be combined with smart denoising, and will be run afterwards.",
- "allOf": [
- {
- "$ref": "#/components/schemas/FourierDenoisingPlan"
- }
- ]
- }
- }
- },
- "KeypadInputPlan": {
- "type": "object",
- "properties": {
- "enabled": {
- "type": "boolean",
- "description": "This keeps track of whether the user has enabled keypad input.\nBy default, it is off.\n\n@default false"
- },
- "timeoutSeconds": {
- "type": "number",
- "description": "This is the time in seconds to wait before processing the input.\nIf the input is not received within this time, the input will be ignored.\nIf set to \"off\", the input will be processed when the user enters a delimiter or immediately if no delimiter is used.\n\n@default 2",
- "minimum": 0,
- "maximum": 10
- },
- "delimiters": {
- "type": "string",
- "description": "This is the delimiter(s) that will be used to process the input.\nCan be '#', '*', or an empty array.",
- "enum": [
- "#",
- "*",
- ""
- ]
- }
- }
- },
- "WorkflowUserEditable": {
- "type": "object",
- "properties": {
- "nodes": {
- "type": "array",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ConversationNode",
- "title": "ConversationNode"
- },
- {
- "$ref": "#/components/schemas/ToolNode",
- "title": "ToolNode"
- }
- ]
- }
- },
- "model": {
- "description": "This is the model for the workflow.\n\nThis can be overridden at node level using `nodes[n].model`.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/WorkflowOpenAIModel",
- "title": "WorkflowOpenAIModel"
- },
- {
- "$ref": "#/components/schemas/WorkflowAnthropicModel",
- "title": "WorkflowAnthropicModel"
- },
- {
- "$ref": "#/components/schemas/WorkflowGoogleModel",
- "title": "WorkflowGoogleModel"
- },
- {
- "$ref": "#/components/schemas/WorkflowCustomModel",
- "title": "WorkflowCustomModel"
- }
- ]
- },
- "transcriber": {
- "description": "This is the transcriber for the workflow.\n\nThis can be overridden at node level using `nodes[n].transcriber`.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/AssemblyAITranscriber",
- "title": "AssemblyAITranscriber"
- },
- {
- "$ref": "#/components/schemas/AzureSpeechTranscriber",
- "title": "AzureSpeechTranscriber"
- },
- {
- "$ref": "#/components/schemas/CustomTranscriber",
- "title": "CustomTranscriber"
- },
- {
- "$ref": "#/components/schemas/DeepgramTranscriber",
- "title": "DeepgramTranscriber"
- },
- {
- "$ref": "#/components/schemas/ElevenLabsTranscriber",
- "title": "ElevenLabsTranscriber"
- },
- {
- "$ref": "#/components/schemas/GladiaTranscriber",
- "title": "GladiaTranscriber"
- },
- {
- "$ref": "#/components/schemas/GoogleTranscriber",
- "title": "GoogleTranscriber"
- },
- {
- "$ref": "#/components/schemas/SpeechmaticsTranscriber",
- "title": "SpeechmaticsTranscriber"
- },
- {
- "$ref": "#/components/schemas/TalkscriberTranscriber",
- "title": "TalkscriberTranscriber"
- },
- {
- "$ref": "#/components/schemas/OpenAITranscriber",
- "title": "OpenAITranscriber"
- },
- {
- "$ref": "#/components/schemas/CartesiaTranscriber",
- "title": "CartesiaTranscriber"
- }
- ]
- },
- "voice": {
- "description": "This is the voice for the workflow.\n\nThis can be overridden at node level using `nodes[n].voice`.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/AzureVoice",
- "title": "AzureVoice"
- },
- {
- "$ref": "#/components/schemas/CartesiaVoice",
- "title": "CartesiaVoice"
- },
- {
- "$ref": "#/components/schemas/CustomVoice",
- "title": "CustomVoice"
- },
- {
- "$ref": "#/components/schemas/DeepgramVoice",
- "title": "DeepgramVoice"
- },
- {
- "$ref": "#/components/schemas/ElevenLabsVoice",
- "title": "ElevenLabsVoice"
- },
- {
- "$ref": "#/components/schemas/HumeVoice",
- "title": "HumeVoice"
- },
- {
- "$ref": "#/components/schemas/LMNTVoice",
- "title": "LMNTVoice"
- },
- {
- "$ref": "#/components/schemas/NeuphonicVoice",
- "title": "NeuphonicVoice"
- },
- {
- "$ref": "#/components/schemas/OpenAIVoice",
- "title": "OpenAIVoice"
- },
- {
- "$ref": "#/components/schemas/PlayHTVoice",
- "title": "PlayHTVoice"
- },
- {
- "$ref": "#/components/schemas/RimeAIVoice",
- "title": "RimeAIVoice"
- },
- {
- "$ref": "#/components/schemas/SmallestAIVoice",
- "title": "SmallestAIVoice"
- },
- {
- "$ref": "#/components/schemas/TavusVoice",
- "title": "TavusVoice"
- },
- {
- "$ref": "#/components/schemas/VapiVoice",
- "title": "VapiVoice"
- },
- {
- "$ref": "#/components/schemas/SesameVoice",
- "title": "SesameVoice"
- },
- {
- "$ref": "#/components/schemas/InworldVoice",
- "title": "InworldVoice"
- },
- {
- "$ref": "#/components/schemas/MinimaxVoice",
- "title": "MinimaxVoice"
- }
- ]
- },
- "observabilityPlan": {
- "description": "This is the plan for observability of workflow's calls.\n\nCurrently, only Langfuse is supported.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/LangfuseObservabilityPlan",
- "title": "Langfuse"
- }
- ],
- "allOf": [
- {
- "$ref": "#/components/schemas/LangfuseObservabilityPlan"
- }
- ]
- },
- "backgroundSound": {
- "description": "This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'.\nYou can also provide a custom sound by providing a URL to an audio file.",
- "oneOf": [
- {
- "type": "enum",
- "enum": [
- "off",
- "office"
- ],
- "example": "office"
- },
- {
- "type": "string",
- "format": "uri",
- "example": "https://www.soundjay.com/ambient/sounds/people-in-lounge-1.mp3"
- }
- ]
- },
- "hooks": {
- "type": "array",
- "description": "This is a set of actions that will be performed on certain events.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/CallHookCallEnding",
- "title": "CallHookCallEnding"
- },
- {
- "$ref": "#/components/schemas/CallHookAssistantSpeechInterrupted",
- "title": "CallHookAssistantSpeechInterrupted"
- },
- {
- "$ref": "#/components/schemas/CallHookCustomerSpeechInterrupted",
- "title": "CallHookCustomerSpeechInterrupted"
- },
- {
- "$ref": "#/components/schemas/CallHookCustomerSpeechTimeout",
- "title": "CallHookCustomerSpeechTimeout"
- }
- ]
- }
- },
- "credentials": {
+ "tools": {
"type": "array",
- "description": "These are dynamic credentials that will be used for the workflow calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials.",
+ "description": "These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`.\n\nBoth `tools` and `toolIds` can be used together.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/CreateAnthropicCredentialDTO",
- "title": "AnthropicCredential"
- },
- {
- "$ref": "#/components/schemas/CreateAnyscaleCredentialDTO",
- "title": "AnyscaleCredential"
- },
- {
- "$ref": "#/components/schemas/CreateAssemblyAICredentialDTO",
- "title": "AssemblyAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateAzureCredentialDTO",
- "title": "AzureCredential"
- },
- {
- "$ref": "#/components/schemas/CreateAzureOpenAICredentialDTO",
- "title": "AzureOpenAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
- "title": "ByoSipTrunkCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCartesiaCredentialDTO",
- "title": "CartesiaCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCerebrasCredentialDTO",
- "title": "CerebrasCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCloudflareCredentialDTO",
- "title": "CloudflareCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCustomLLMCredentialDTO",
- "title": "CustomLLMCredential"
- },
- {
- "$ref": "#/components/schemas/CreateDeepgramCredentialDTO",
- "title": "DeepgramCredential"
- },
- {
- "$ref": "#/components/schemas/CreateDeepInfraCredentialDTO",
- "title": "DeepInfraCredential"
- },
- {
- "$ref": "#/components/schemas/CreateDeepSeekCredentialDTO",
- "title": "DeepSeekCredential"
+ "$ref": "#/components/schemas/CreateApiRequestToolDTO",
+ "title": "ApiRequestTool"
},
{
- "$ref": "#/components/schemas/CreateElevenLabsCredentialDTO",
- "title": "ElevenLabsCredential"
+ "$ref": "#/components/schemas/CreateBashToolDTO",
+ "title": "BashTool"
},
{
- "$ref": "#/components/schemas/CreateGcpCredentialDTO",
- "title": "GcpCredential"
+ "$ref": "#/components/schemas/CreateCodeToolDTO",
+ "title": "CodeTool"
},
{
- "$ref": "#/components/schemas/CreateGladiaCredentialDTO",
- "title": "GladiaCredential"
+ "$ref": "#/components/schemas/CreateComputerToolDTO",
+ "title": "ComputerTool"
},
{
- "$ref": "#/components/schemas/CreateGoHighLevelCredentialDTO",
- "title": "GhlCredential"
+ "$ref": "#/components/schemas/CreateDtmfToolDTO",
+ "title": "DtmfTool"
},
{
- "$ref": "#/components/schemas/CreateGoogleCredentialDTO",
- "title": "GoogleCredential"
+ "$ref": "#/components/schemas/CreateEndCallToolDTO",
+ "title": "EndCallTool"
},
{
- "$ref": "#/components/schemas/CreateGroqCredentialDTO",
- "title": "GroqCredential"
+ "$ref": "#/components/schemas/CreateFunctionToolDTO",
+ "title": "FunctionTool"
},
{
- "$ref": "#/components/schemas/CreateHumeCredentialDTO",
- "title": "HumeCredential"
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
+ "title": "GoHighLevelCalendarAvailabilityTool"
},
{
- "$ref": "#/components/schemas/CreateInflectionAICredentialDTO",
- "title": "InflectionAICredential"
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
+ "title": "GoHighLevelCalendarEventCreateTool"
},
{
- "$ref": "#/components/schemas/CreateLangfuseCredentialDTO",
- "title": "LangfuseCredential"
+ "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
+ "title": "GoHighLevelContactCreateTool"
},
{
- "$ref": "#/components/schemas/CreateLmntCredentialDTO",
- "title": "LmntCredential"
+ "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
+ "title": "GoHighLevelContactGetTool"
},
{
- "$ref": "#/components/schemas/CreateMakeCredentialDTO",
- "title": "MakeCredential"
+ "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
+ "title": "GoogleCalendarCheckAvailabilityTool"
},
{
- "$ref": "#/components/schemas/CreateMistralCredentialDTO",
- "title": "MistralCredential"
+ "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
+ "title": "GoogleCalendarCreateEventTool"
},
{
- "$ref": "#/components/schemas/CreateNeuphonicCredentialDTO",
- "title": "NeuphonicCredential"
+ "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
+ "title": "GoogleSheetsRowAppendTool"
},
{
- "$ref": "#/components/schemas/CreateOpenAICredentialDTO",
- "title": "OpenAICredential"
+ "$ref": "#/components/schemas/CreateHandoffToolDTO",
+ "title": "HandoffTool"
},
{
- "$ref": "#/components/schemas/CreateOpenRouterCredentialDTO",
- "title": "OpenRouterCredential"
+ "$ref": "#/components/schemas/CreateMcpToolDTO",
+ "title": "McpTool"
},
{
- "$ref": "#/components/schemas/CreatePerplexityAICredentialDTO",
- "title": "PerplexityAICredential"
+ "$ref": "#/components/schemas/CreateQueryToolDTO",
+ "title": "QueryTool"
},
{
- "$ref": "#/components/schemas/CreatePlayHTCredentialDTO",
- "title": "PlayHTCredential"
+ "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO",
+ "title": "SlackSendMessageTool"
},
{
- "$ref": "#/components/schemas/CreateRimeAICredentialDTO",
- "title": "RimeAICredential"
+ "$ref": "#/components/schemas/CreateSmsToolDTO",
+ "title": "SmsTool"
},
{
- "$ref": "#/components/schemas/CreateRunpodCredentialDTO",
- "title": "RunpodCredential"
+ "$ref": "#/components/schemas/CreateTextEditorToolDTO",
+ "title": "TextEditorTool"
},
{
- "$ref": "#/components/schemas/CreateS3CredentialDTO",
- "title": "S3Credential"
+ "$ref": "#/components/schemas/CreateTransferCallToolDTO",
+ "title": "TransferCallTool"
},
{
- "$ref": "#/components/schemas/CreateSmallestAICredentialDTO",
- "title": "SmallestAICredential"
+ "$ref": "#/components/schemas/CreateSipRequestToolDTO",
+ "title": "SipRequestTool"
},
{
- "$ref": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
- "title": "SpeechmaticsCredential"
- },
- {
- "$ref": "#/components/schemas/CreateSupabaseCredentialDTO",
- "title": "SupabaseCredential"
- },
- {
- "$ref": "#/components/schemas/CreateTavusCredentialDTO",
- "title": "TavusCredential"
- },
- {
- "$ref": "#/components/schemas/CreateTogetherAICredentialDTO",
- "title": "TogetherAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateTrieveCredentialDTO",
- "title": "TrieveCredential"
- },
- {
- "$ref": "#/components/schemas/CreateTwilioCredentialDTO",
- "title": "TwilioCredential"
- },
- {
- "$ref": "#/components/schemas/CreateVonageCredentialDTO",
- "title": "VonageCredential"
- },
- {
- "$ref": "#/components/schemas/CreateWebhookCredentialDTO",
- "title": "WebhookCredential"
- },
- {
- "$ref": "#/components/schemas/CreateXAiCredentialDTO",
- "title": "XAiCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
- "title": "GoogleCalendarOAuth2ClientCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
- "title": "GoogleCalendarOAuth2AuthorizationCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
- "title": "GoogleSheetsOAuth2AuthorizationCredential"
- },
- {
- "$ref": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
- "title": "SlackOAuth2AuthorizationCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
- "title": "GoHighLevelMCPCredential"
- },
- {
- "$ref": "#/components/schemas/CreateInworldCredentialDTO",
- "title": "InworldCredential"
- }
- ],
- "discriminator": {
- "propertyName": "provider",
- "mapping": {
- "11labs": "#/components/schemas/CreateElevenLabsCredentialDTO",
- "anthropic": "#/components/schemas/CreateAnthropicCredentialDTO",
- "anyscale": "#/components/schemas/CreateAnyscaleCredentialDTO",
- "assembly-ai": "#/components/schemas/CreateAssemblyAICredentialDTO",
- "azure-openai": "#/components/schemas/CreateAzureOpenAICredentialDTO",
- "azure": "#/components/schemas/CreateAzureCredentialDTO",
- "byo-sip-trunk": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
- "cartesia": "#/components/schemas/CreateCartesiaCredentialDTO",
- "cerebras": "#/components/schemas/CreateCerebrasCredentialDTO",
- "cloudflare": "#/components/schemas/CreateCloudflareCredentialDTO",
- "custom-llm": "#/components/schemas/CreateCustomLLMCredentialDTO",
- "deepgram": "#/components/schemas/CreateDeepgramCredentialDTO",
- "deepinfra": "#/components/schemas/CreateDeepInfraCredentialDTO",
- "deep-seek": "#/components/schemas/CreateDeepSeekCredentialDTO",
- "gcp": "#/components/schemas/CreateGcpCredentialDTO",
- "gladia": "#/components/schemas/CreateGladiaCredentialDTO",
- "gohighlevel": "#/components/schemas/CreateGoHighLevelCredentialDTO",
- "google": "#/components/schemas/CreateGoogleCredentialDTO",
- "groq": "#/components/schemas/CreateGroqCredentialDTO",
- "inflection-ai": "#/components/schemas/CreateInflectionAICredentialDTO",
- "langfuse": "#/components/schemas/CreateLangfuseCredentialDTO",
- "lmnt": "#/components/schemas/CreateLmntCredentialDTO",
- "make": "#/components/schemas/CreateMakeCredentialDTO",
- "openai": "#/components/schemas/CreateOpenAICredentialDTO",
- "openrouter": "#/components/schemas/CreateOpenRouterCredentialDTO",
- "perplexity-ai": "#/components/schemas/CreatePerplexityAICredentialDTO",
- "playht": "#/components/schemas/CreatePlayHTCredentialDTO",
- "rime-ai": "#/components/schemas/CreateRimeAICredentialDTO",
- "runpod": "#/components/schemas/CreateRunpodCredentialDTO",
- "s3": "#/components/schemas/CreateS3CredentialDTO",
- "supabase": "#/components/schemas/CreateSupabaseCredentialDTO",
- "smallest-ai": "#/components/schemas/CreateSmallestAICredentialDTO",
- "tavus": "#/components/schemas/CreateTavusCredentialDTO",
- "together-ai": "#/components/schemas/CreateTogetherAICredentialDTO",
- "twilio": "#/components/schemas/CreateTwilioCredentialDTO",
- "vonage": "#/components/schemas/CreateVonageCredentialDTO",
- "webhook": "#/components/schemas/CreateWebhookCredentialDTO",
- "xai": "#/components/schemas/CreateXAiCredentialDTO",
- "neuphonic": "#/components/schemas/CreateNeuphonicCredentialDTO",
- "hume": "#/components/schemas/CreateHumeCredentialDTO",
- "mistral": "#/components/schemas/CreateMistralCredentialDTO",
- "speechmatics": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
- "trieve": "#/components/schemas/CreateTrieveCredentialDTO",
- "google.calendar.oauth2-client": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
- "google.calendar.oauth2-authorization": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
- "google.sheets.oauth2-authorization": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
- "slack.oauth2-authorization": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
- "ghl.oauth2-authorization": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
- "inworld": "#/components/schemas/CreateInworldCredentialDTO",
- "minimax": "#/components/schemas/CreateMinimaxCredentialDTO"
- }
- }
- }
- },
- "name": {
- "type": "string",
- "maxLength": 80
- },
- "edges": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/Edge"
- }
- },
- "globalPrompt": {
- "type": "string",
- "maxLength": 5000
- },
- "server": {
- "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. tool.server\n2. workflow.server / assistant.server\n3. phoneNumber.server\n4. org.server",
- "allOf": [
- {
- "$ref": "#/components/schemas/Server"
- }
- ]
- },
- "compliancePlan": {
- "description": "This is the compliance plan for the workflow. It allows you to configure HIPAA and other compliance settings.",
- "allOf": [
- {
- "$ref": "#/components/schemas/CompliancePlan"
- }
- ]
- },
- "analysisPlan": {
- "description": "This is the plan for analysis of workflow's calls. Stored in `call.analysis`.",
- "allOf": [
- {
- "$ref": "#/components/schemas/AnalysisPlan"
- }
- ]
- },
- "artifactPlan": {
- "description": "This is the plan for artifacts generated during workflow's calls. Stored in `call.artifact`.",
- "allOf": [
- {
- "$ref": "#/components/schemas/ArtifactPlan"
- }
- ]
- },
- "startSpeakingPlan": {
- "description": "This is the plan for when the workflow nodes should start talking.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to start talking after the customer is done speaking.\n- The assistant is too fast to start talking after the customer is done speaking.\n- The assistant is so fast that it's actually interrupting the customer.",
- "allOf": [
- {
- "$ref": "#/components/schemas/StartSpeakingPlan"
- }
- ]
- },
- "stopSpeakingPlan": {
- "description": "This is the plan for when workflow nodes should stop talking on customer interruption.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to recognize customer's interruption.\n- The assistant is too fast to recognize customer's interruption.\n- The assistant is getting interrupted by phrases that are just acknowledgments.\n- The assistant is getting interrupted by background noises.\n- The assistant is not properly stopping -- it starts talking right after getting interrupted.",
- "allOf": [
- {
- "$ref": "#/components/schemas/StopSpeakingPlan"
- }
- ]
- },
- "monitorPlan": {
- "description": "This is the plan for real-time monitoring of the workflow's calls.\n\nUsage:\n- To enable live listening of the workflow's calls, set `monitorPlan.listenEnabled` to `true`.\n- To enable live control of the workflow's calls, set `monitorPlan.controlEnabled` to `true`.",
- "allOf": [
- {
- "$ref": "#/components/schemas/MonitorPlan"
- }
- ]
- },
- "backgroundSpeechDenoisingPlan": {
- "description": "This enables filtering of noise and background speech while the user is talking.\n\nFeatures:\n- Smart denoising using Krisp\n- Fourier denoising\n\nBoth can be used together. Order of precedence:\n- Smart denoising\n- Fourier denoising",
- "allOf": [
- {
- "$ref": "#/components/schemas/BackgroundSpeechDenoisingPlan"
- }
- ]
- },
- "credentialIds": {
- "description": "These are the credentials that will be used for the workflow calls. By default, all the credentials are available for use in the call but you can provide a subset using this.",
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "keypadInputPlan": {
- "description": "This is the plan for keypad input handling during workflow calls.",
- "allOf": [
- {
- "$ref": "#/components/schemas/KeypadInputPlan"
- }
- ]
- }
- },
- "required": [
- "nodes",
- "name",
- "edges"
- ]
- },
- "VapiModel": {
- "type": "object",
- "properties": {
- "messages": {
- "description": "This is the starting state for the conversation.",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/OpenAIMessage"
- }
- },
- "tools": {
- "type": "array",
- "description": "These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`.\n\nBoth `tools` and `toolIds` can be used together.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/CreateApiRequestToolDTO",
- "title": "ApiRequestTool"
- },
- {
- "$ref": "#/components/schemas/CreateBashToolDTO",
- "title": "BashTool"
- },
- {
- "$ref": "#/components/schemas/CreateComputerToolDTO",
- "title": "ComputerTool"
- },
- {
- "$ref": "#/components/schemas/CreateDtmfToolDTO",
- "title": "DtmfTool"
- },
- {
- "$ref": "#/components/schemas/CreateEndCallToolDTO",
- "title": "EndCallTool"
- },
- {
- "$ref": "#/components/schemas/CreateFunctionToolDTO",
- "title": "FunctionTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
- "title": "GoHighLevelCalendarAvailabilityTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
- "title": "GoHighLevelCalendarEventCreateTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
- "title": "GoHighLevelContactCreateTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
- "title": "GoHighLevelContactGetTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
- "title": "GoogleCalendarCheckAvailabilityTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
- "title": "GoogleCalendarCreateEventTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
- "title": "GoogleSheetsRowAppendTool"
- },
- {
- "$ref": "#/components/schemas/CreateHandoffToolDTO",
- "title": "HandoffTool"
- },
- {
- "$ref": "#/components/schemas/CreateMcpToolDTO",
- "title": "McpTool"
- },
- {
- "$ref": "#/components/schemas/CreateQueryToolDTO",
- "title": "QueryTool"
- },
- {
- "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO",
- "title": "SlackSendMessageTool"
- },
- {
- "$ref": "#/components/schemas/CreateSmsToolDTO",
- "title": "SmsTool"
- },
- {
- "$ref": "#/components/schemas/CreateTextEditorToolDTO",
- "title": "TextEditorTool"
- },
- {
- "$ref": "#/components/schemas/CreateTransferCallToolDTO",
- "title": "TransferCallTool"
+ "$ref": "#/components/schemas/CreateVoicemailToolDTO",
+ "title": "VoicemailTool"
}
]
}
@@ -17375,26 +16846,10 @@
}
]
},
- "knowledgeBaseId": {
- "type": "string",
- "description": "This is the ID of the knowledge base the model will use."
- },
"provider": {
"type": "string",
"enum": [
- "vapi"
- ]
- },
- "workflowId": {
- "type": "string",
- "description": "This is the workflow that will be used for the call. To use a transient workflow, use `workflow` instead."
- },
- "workflow": {
- "description": "This is the workflow that will be used for the call. To use an existing workflow, use `workflowId` instead.",
- "allOf": [
- {
- "$ref": "#/components/schemas/WorkflowUserEditable"
- }
+ "together-ai"
]
},
"model": {
@@ -17428,4375 +16883,3922 @@
"model"
]
},
- "XaiModel": {
+ "HangupNode": {
"type": "object",
"properties": {
- "messages": {
- "description": "This is the starting state for the conversation.",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/OpenAIMessage"
- }
- },
- "tools": {
- "type": "array",
- "description": "These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`.\n\nBoth `tools` and `toolIds` can be used together.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/CreateApiRequestToolDTO",
- "title": "ApiRequestTool"
- },
- {
- "$ref": "#/components/schemas/CreateBashToolDTO",
- "title": "BashTool"
- },
- {
- "$ref": "#/components/schemas/CreateComputerToolDTO",
- "title": "ComputerTool"
- },
- {
- "$ref": "#/components/schemas/CreateDtmfToolDTO",
- "title": "DtmfTool"
- },
- {
- "$ref": "#/components/schemas/CreateEndCallToolDTO",
- "title": "EndCallTool"
- },
- {
- "$ref": "#/components/schemas/CreateFunctionToolDTO",
- "title": "FunctionTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
- "title": "GoHighLevelCalendarAvailabilityTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
- "title": "GoHighLevelCalendarEventCreateTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
- "title": "GoHighLevelContactCreateTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
- "title": "GoHighLevelContactGetTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
- "title": "GoogleCalendarCheckAvailabilityTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
- "title": "GoogleCalendarCreateEventTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
- "title": "GoogleSheetsRowAppendTool"
- },
- {
- "$ref": "#/components/schemas/CreateHandoffToolDTO",
- "title": "HandoffTool"
- },
- {
- "$ref": "#/components/schemas/CreateMcpToolDTO",
- "title": "McpTool"
- },
- {
- "$ref": "#/components/schemas/CreateQueryToolDTO",
- "title": "QueryTool"
- },
- {
- "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO",
- "title": "SlackSendMessageTool"
- },
- {
- "$ref": "#/components/schemas/CreateSmsToolDTO",
- "title": "SmsTool"
- },
- {
- "$ref": "#/components/schemas/CreateTextEditorToolDTO",
- "title": "TextEditorTool"
- },
- {
- "$ref": "#/components/schemas/CreateTransferCallToolDTO",
- "title": "TransferCallTool"
- }
- ]
- }
- },
- "toolIds": {
- "description": "These are the tools that the assistant can use during the call. To use transient tools, use `tools`.\n\nBoth `tools` and `toolIds` can be used together.",
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "knowledgeBase": {
- "description": "These are the options for the knowledge base.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/CreateCustomKnowledgeBaseDTO",
- "title": "Custom"
- }
- ]
- },
- "knowledgeBaseId": {
- "type": "string",
- "description": "This is the ID of the knowledge base the model will use."
- },
- "model": {
+ "type": {
"type": "string",
- "description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b",
"enum": [
- "grok-beta",
- "grok-2",
- "grok-3"
+ "hangup"
]
},
- "provider": {
+ "name": {
"type": "string",
- "enum": [
- "xai"
- ]
- },
- "temperature": {
- "type": "number",
- "description": "This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.",
- "minimum": 0,
- "maximum": 2
- },
- "maxTokens": {
- "type": "number",
- "description": "This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250.",
- "minimum": 50,
- "maximum": 10000
+ "maxLength": 80
},
- "emotionRecognitionEnabled": {
+ "isStart": {
"type": "boolean",
- "description": "This determines whether we detect user's emotion while they speak and send it as an additional info to model.\n\nDefault `false` because the model is usually are good at understanding the user's emotion from text.\n\n@default false"
+ "description": "This is whether or not the node is the start of the workflow."
},
- "numFastTurns": {
- "type": "number",
- "description": "This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai.\n\nDefault is 0.\n\n@default 0",
- "minimum": 0
+ "metadata": {
+ "type": "object",
+ "description": "This is for metadata you want to store on the task."
}
},
"required": [
- "model",
- "provider"
+ "type",
+ "name"
]
},
- "ExactReplacement": {
+ "WorkflowOpenAIModel": {
"type": "object",
"properties": {
- "type": {
+ "provider": {
"type": "string",
- "description": "This is the exact replacement type. You can use this to replace a specific word or phrase with a different word or phrase.\n\nUsage:\n- Replace \"hello\" with \"hi\": { type: 'exact', key: 'hello', value: 'hi' }\n- Replace \"good morning\" with \"good day\": { type: 'exact', key: 'good morning', value: 'good day' }\n- Replace a specific name: { type: 'exact', key: 'John Doe', value: 'Jane Smith' }\n- Replace an acronym: { type: 'exact', key: 'AI', value: 'Artificial Intelligence' }\n- Replace a company name with its phonetic pronunciation: { type: 'exact', key: 'Vapi', value: 'Vappy' }",
+ "description": "This is the provider of the model (`openai`).",
"enum": [
- "exact"
+ "openai"
]
},
- "replaceAllEnabled": {
- "type": "boolean",
- "description": "This option let's you control whether to replace all instances of the key or only the first one. By default, it only replaces the first instance.\nExamples:\n- For { type: 'exact', key: 'hello', value: 'hi', replaceAllEnabled: false }. Before: \"hello world, hello universe\" | After: \"hi world, hello universe\"\n- For { type: 'exact', key: 'hello', value: 'hi', replaceAllEnabled: true }. Before: \"hello world, hello universe\" | After: \"hi world, hi universe\"\n@default false",
- "default": false
- },
- "key": {
- "type": "string",
- "description": "This is the key to replace."
- },
- "value": {
- "type": "string",
- "description": "This is the value that will replace the match.",
- "maxLength": 1000
- }
- },
- "required": [
- "type",
- "key",
- "value"
- ]
- },
- "RegexReplacement": {
- "type": "object",
- "properties": {
- "type": {
+ "model": {
"type": "string",
- "description": "This is the regex replacement type. You can use this to replace a word or phrase that matches a pattern.\n\nUsage:\n- Replace all numbers with \"some number\": { type: 'regex', regex: '\\\\d+', value: 'some number' }\n- Replace email addresses with \"[EMAIL]\": { type: 'regex', regex: '\\\\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\\\.[A-Z|a-z]{2,}\\\\b', value: '[EMAIL]' }\n- Replace phone numbers with a formatted version: { type: 'regex', regex: '(\\\\d{3})(\\\\d{3})(\\\\d{4})', value: '($1) $2-$3' }\n- Replace all instances of \"color\" or \"colour\" with \"hue\": { type: 'regex', regex: 'colou?r', value: 'hue' }\n- Capitalize the first letter of every sentence: { type: 'regex', regex: '(?<=\\\\. |^)[a-z]', value: (match) => match.toUpperCase() }",
+ "description": "This is the OpenAI model that will be used.\n\nWhen using Vapi OpenAI or your own Azure Credentials, you have the option to specify the region for the selected model. This shouldn't be specified unless you have a specific reason to do so. Vapi will automatically find the fastest region that make sense.\nThis is helpful when you are required to comply with Data Residency rules. Learn more about Azure regions here https://azure.microsoft.com/en-us/explore/global-infrastructure/data-residency/.",
+ "maxLength": 100,
"enum": [
- "regex"
+ "gpt-5.4",
+ "gpt-5.4-mini",
+ "gpt-5.4-nano",
+ "gpt-5.2",
+ "gpt-5.2-chat-latest",
+ "gpt-5.1",
+ "gpt-5.1-chat-latest",
+ "gpt-5",
+ "gpt-5-chat-latest",
+ "gpt-5-mini",
+ "gpt-5-nano",
+ "gpt-4.1-2025-04-14",
+ "gpt-4.1-mini-2025-04-14",
+ "gpt-4.1-nano-2025-04-14",
+ "gpt-4.1",
+ "gpt-4.1-mini",
+ "gpt-4.1-nano",
+ "chatgpt-4o-latest",
+ "o3",
+ "o3-mini",
+ "o4-mini",
+ "o1-mini",
+ "o1-mini-2024-09-12",
+ "gpt-4o-mini-2024-07-18",
+ "gpt-4o-mini",
+ "gpt-4o",
+ "gpt-4o-2024-05-13",
+ "gpt-4o-2024-08-06",
+ "gpt-4o-2024-11-20",
+ "gpt-4-turbo",
+ "gpt-4-turbo-2024-04-09",
+ "gpt-4-turbo-preview",
+ "gpt-4-0125-preview",
+ "gpt-4-1106-preview",
+ "gpt-4",
+ "gpt-4-0613",
+ "gpt-3.5-turbo",
+ "gpt-3.5-turbo-0125",
+ "gpt-3.5-turbo-1106",
+ "gpt-3.5-turbo-16k",
+ "gpt-3.5-turbo-0613",
+ "gpt-4.1-2025-04-14:westus",
+ "gpt-4.1-2025-04-14:eastus2",
+ "gpt-4.1-2025-04-14:eastus",
+ "gpt-4.1-2025-04-14:westus3",
+ "gpt-4.1-2025-04-14:northcentralus",
+ "gpt-4.1-2025-04-14:southcentralus",
+ "gpt-4.1-2025-04-14:westeurope",
+ "gpt-4.1-2025-04-14:germanywestcentral",
+ "gpt-4.1-2025-04-14:polandcentral",
+ "gpt-4.1-2025-04-14:spaincentral",
+ "gpt-4.1-mini-2025-04-14:westus",
+ "gpt-4.1-mini-2025-04-14:eastus2",
+ "gpt-4.1-mini-2025-04-14:eastus",
+ "gpt-4.1-mini-2025-04-14:westus3",
+ "gpt-4.1-mini-2025-04-14:northcentralus",
+ "gpt-4.1-mini-2025-04-14:southcentralus",
+ "gpt-4.1-mini-2025-04-14:westeurope",
+ "gpt-4.1-mini-2025-04-14:germanywestcentral",
+ "gpt-4.1-mini-2025-04-14:polandcentral",
+ "gpt-4.1-mini-2025-04-14:spaincentral",
+ "gpt-4.1-nano-2025-04-14:westus",
+ "gpt-4.1-nano-2025-04-14:eastus2",
+ "gpt-4.1-nano-2025-04-14:westus3",
+ "gpt-4.1-nano-2025-04-14:northcentralus",
+ "gpt-4.1-nano-2025-04-14:southcentralus",
+ "gpt-4o-2024-11-20:swedencentral",
+ "gpt-4o-2024-11-20:westus",
+ "gpt-4o-2024-11-20:eastus2",
+ "gpt-4o-2024-11-20:eastus",
+ "gpt-4o-2024-11-20:westus3",
+ "gpt-4o-2024-11-20:southcentralus",
+ "gpt-4o-2024-11-20:westeurope",
+ "gpt-4o-2024-11-20:germanywestcentral",
+ "gpt-4o-2024-11-20:polandcentral",
+ "gpt-4o-2024-11-20:spaincentral",
+ "gpt-4o-2024-08-06:westus",
+ "gpt-4o-2024-08-06:westus3",
+ "gpt-4o-2024-08-06:eastus",
+ "gpt-4o-2024-08-06:eastus2",
+ "gpt-4o-2024-08-06:northcentralus",
+ "gpt-4o-2024-08-06:southcentralus",
+ "gpt-4o-mini-2024-07-18:westus",
+ "gpt-4o-mini-2024-07-18:westus3",
+ "gpt-4o-mini-2024-07-18:eastus",
+ "gpt-4o-mini-2024-07-18:eastus2",
+ "gpt-4o-mini-2024-07-18:northcentralus",
+ "gpt-4o-mini-2024-07-18:southcentralus",
+ "gpt-4o-2024-05-13:eastus2",
+ "gpt-4o-2024-05-13:eastus",
+ "gpt-4o-2024-05-13:northcentralus",
+ "gpt-4o-2024-05-13:southcentralus",
+ "gpt-4o-2024-05-13:westus3",
+ "gpt-4o-2024-05-13:westus",
+ "gpt-4-turbo-2024-04-09:eastus2",
+ "gpt-4-0125-preview:eastus",
+ "gpt-4-0125-preview:northcentralus",
+ "gpt-4-0125-preview:southcentralus",
+ "gpt-4-1106-preview:australiaeast",
+ "gpt-4-1106-preview:canadaeast",
+ "gpt-4-1106-preview:france",
+ "gpt-4-1106-preview:india",
+ "gpt-4-1106-preview:norway",
+ "gpt-4-1106-preview:swedencentral",
+ "gpt-4-1106-preview:uk",
+ "gpt-4-1106-preview:westus",
+ "gpt-4-1106-preview:westus3",
+ "gpt-4-0613:canadaeast",
+ "gpt-3.5-turbo-0125:canadaeast",
+ "gpt-3.5-turbo-0125:northcentralus",
+ "gpt-3.5-turbo-0125:southcentralus",
+ "gpt-3.5-turbo-1106:canadaeast",
+ "gpt-3.5-turbo-1106:westus"
]
},
- "regex": {
- "type": "string",
- "description": "This is the regex pattern to replace.\n\nNote:\n- This works by using the `string.replace` method in Node.JS. Eg. `\"hello there\".replace(/hello/g, \"hi\")` will return `\"hi there\"`.\n\nHot tip:\n- In JavaScript, escape `\\` when sending the regex pattern. Eg. `\"hello\\sthere\"` will be sent over the wire as `\"hellosthere\"`. Send `\"hello\\\\sthere\"` instead."
- },
- "options": {
- "description": "These are the options for the regex replacement. Defaults to all disabled.\n\n@default []",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/RegexOption"
- }
+ "temperature": {
+ "type": "number",
+ "description": "This is the temperature of the model.",
+ "minimum": 0,
+ "maximum": 2
},
- "value": {
- "type": "string",
- "description": "This is the value that will replace the match.",
- "maxLength": 1000
+ "maxTokens": {
+ "type": "number",
+ "description": "This is the max tokens of the model.",
+ "minimum": 50,
+ "maximum": 10000
}
},
"required": [
- "type",
- "regex",
- "value"
+ "provider",
+ "model"
]
},
- "FormatPlan": {
+ "WorkflowAnthropicModel": {
"type": "object",
"properties": {
- "enabled": {
- "type": "boolean",
- "description": "This determines whether the chunk is formatted before being sent to the voice provider. This helps with enunciation. This includes phone numbers, emails and addresses. Default `true`.\n\nUsage:\n- To rely on the voice provider's formatting logic, set this to `false`.\n\nIf `voice.chunkPlan.enabled` is `false`, this is automatically `false` since there's no chunk to format.\n\n@default true",
- "example": true
- },
- "numberToDigitsCutoff": {
- "type": "number",
- "description": "This is the cutoff after which a number is converted to individual digits instead of being spoken as words.\n\nExample:\n- If cutoff 2025, \"12345\" is converted to \"1 2 3 4 5\" while \"1200\" is converted to \"twelve hundred\".\n\nUsage:\n- If your use case doesn't involve IDs like zip codes, set this to a high value.\n- If your use case involves IDs that are shorter than 5 digits, set this to a lower value.\n\n@default 2025",
- "minimum": 0,
- "example": 2025
- },
- "replacements": {
- "type": "array",
- "description": "These are the custom replacements you can make to the chunk before it is sent to the voice provider.\n\nUsage:\n- To replace a specific word or phrase with a different word or phrase, use the `ExactReplacement` type. Eg. `{ type: 'exact', key: 'hello', value: 'hi' }`\n- To replace a word or phrase that matches a pattern, use the `RegexReplacement` type. Eg. `{ type: 'regex', regex: '\\\\b[a-zA-Z]{5}\\\\b', value: 'hi' }`\n\n@default []",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ExactReplacement",
- "title": "ExactReplacement"
- },
- {
- "$ref": "#/components/schemas/RegexReplacement",
- "title": "RegexReplacement"
- }
- ]
- }
- },
- "formattersEnabled": {
- "type": "array",
- "description": "List of formatters to apply. If not provided, all default formatters will be applied.\nIf provided, only the specified formatters will be applied.\nNote: Some essential formatters like angle bracket removal will always be applied.\n@default undefined",
+ "provider": {
+ "type": "string",
+ "description": "This is the provider of the model (`anthropic`).",
"enum": [
- "markdown",
- "asterisk",
- "quote",
- "dash",
- "newline",
- "colon",
- "acronym",
- "dollarAmount",
- "email",
- "date",
- "time",
- "distance",
- "unit",
- "percentage",
- "phoneNumber",
- "number",
- "stripAsterisk"
- ],
- "items": {
- "type": "string",
- "enum": [
- "markdown",
- "asterisk",
- "quote",
- "dash",
- "newline",
- "colon",
- "acronym",
- "dollarAmount",
- "email",
- "date",
- "time",
- "distance",
- "unit",
- "percentage",
- "phoneNumber",
- "number",
- "stripAsterisk"
- ]
- }
- }
- }
- },
- "ChunkPlan": {
- "type": "object",
- "properties": {
- "enabled": {
- "type": "boolean",
- "description": "This determines whether the model output is chunked before being sent to the voice provider. Default `true`.\n\nUsage:\n- To rely on the voice provider's audio generation logic, set this to `false`.\n- If seeing issues with quality, set this to `true`.\n\nIf disabled, Vapi-provided audio control tokens like will not work.\n\n@default true",
- "example": true
- },
- "minCharacters": {
- "type": "number",
- "description": "This is the minimum number of characters in a chunk.\n\nUsage:\n- To increase quality, set this to a higher value.\n- To decrease latency, set this to a lower value.\n\n@default 30",
- "minimum": 1,
- "maximum": 80,
- "example": 30
+ "anthropic"
+ ]
},
- "punctuationBoundaries": {
- "type": "array",
- "description": "These are the punctuations that are considered valid boundaries for a chunk to be created.\n\nUsage:\n- To increase quality, constrain to fewer boundaries.\n- To decrease latency, enable all.\n\nDefault is automatically set to balance the trade-off between quality and latency based on the provider.",
+ "model": {
+ "type": "string",
+ "description": "This is the specific model that will be used.",
+ "maxLength": 100,
"enum": [
- "。",
- ",",
- ".",
- "!",
- "?",
- ";",
- ")",
- "،",
- "۔",
- "।",
- "॥",
- "|",
- "||",
- ",",
- ":"
- ],
- "example": [
- "。",
- ",",
- ".",
- "!",
- "?",
- ";",
- "،",
- "۔",
- "।",
- "॥",
- "|",
- "||",
- ",",
- ":"
- ],
- "items": {
- "type": "string",
- "enum": [
- "。",
- ",",
- ".",
- "!",
- "?",
- ";",
- ")",
- "،",
- "۔",
- "।",
- "॥",
- "|",
- "||",
- ",",
- ":"
- ]
- }
+ "claude-3-opus-20240229",
+ "claude-3-sonnet-20240229",
+ "claude-3-haiku-20240307",
+ "claude-3-5-sonnet-20240620",
+ "claude-3-5-sonnet-20241022",
+ "claude-3-5-haiku-20241022",
+ "claude-3-7-sonnet-20250219",
+ "claude-opus-4-20250514",
+ "claude-opus-4-5-20251101",
+ "claude-opus-4-6",
+ "claude-sonnet-4-20250514",
+ "claude-sonnet-4-5-20250929",
+ "claude-sonnet-4-6",
+ "claude-haiku-4-5-20251001"
+ ]
},
- "formatPlan": {
- "description": "This is the plan for formatting the chunk before it is sent to the voice provider.",
+ "thinking": {
+ "description": "This is the optional configuration for Anthropic's thinking feature.\n\n- If provided, `maxTokens` must be greater than `thinking.budgetTokens`.",
"allOf": [
{
- "$ref": "#/components/schemas/FormatPlan"
+ "$ref": "#/components/schemas/AnthropicThinkingConfig"
}
]
- }
- }
- },
- "FallbackPlan": {
- "type": "object",
- "properties": {
- "voices": {
- "type": "array",
- "description": "This is the list of voices to fallback to in the event that the primary voice provider fails.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/FallbackAzureVoice",
- "title": "Azure"
- },
- {
- "$ref": "#/components/schemas/FallbackCartesiaVoice",
- "title": "Cartesia"
- },
- {
- "$ref": "#/components/schemas/FallbackHumeVoice",
- "title": "Hume"
- },
- {
- "$ref": "#/components/schemas/FallbackCustomVoice",
- "title": "CustomVoice"
- },
- {
- "$ref": "#/components/schemas/FallbackDeepgramVoice",
- "title": "Deepgram"
- },
- {
- "$ref": "#/components/schemas/FallbackElevenLabsVoice",
- "title": "ElevenLabs"
- },
- {
- "$ref": "#/components/schemas/FallbackVapiVoice",
- "title": "Vapi"
- },
- {
- "$ref": "#/components/schemas/FallbackLMNTVoice",
- "title": "LMNT"
- },
- {
- "$ref": "#/components/schemas/FallbackOpenAIVoice",
- "title": "OpenAI"
- },
- {
- "$ref": "#/components/schemas/FallbackPlayHTVoice",
- "title": "PlayHT"
- },
- {
- "$ref": "#/components/schemas/FallbackRimeAIVoice",
- "title": "RimeAI"
- },
- {
- "$ref": "#/components/schemas/FallbackSmallestAIVoice",
- "title": "Smallest AI"
- },
- {
- "$ref": "#/components/schemas/FallbackTavusVoice",
- "title": "TavusVoice"
- },
- {
- "$ref": "#/components/schemas/FallbackNeuphonicVoice",
- "title": "Neuphonic"
- },
- {
- "$ref": "#/components/schemas/FallbackSesameVoice",
- "title": "Sesame"
- },
- {
- "$ref": "#/components/schemas/FallbackInworldVoice",
- "title": "Inworld"
- }
- ]
- }
+ },
+ "temperature": {
+ "type": "number",
+ "description": "This is the temperature of the model.",
+ "minimum": 0,
+ "maximum": 2
+ },
+ "maxTokens": {
+ "type": "number",
+ "description": "This is the max tokens of the model.",
+ "minimum": 50,
+ "maximum": 10000
}
},
"required": [
- "voices"
+ "provider",
+ "model"
]
},
- "AzureVoice": {
+ "WorkflowAnthropicBedrockModel": {
"type": "object",
"properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
- },
"provider": {
"type": "string",
- "description": "This is the voice provider that will be used.",
+ "description": "This is the provider of the model (`anthropic-bedrock`).",
"enum": [
- "azure"
+ "anthropic-bedrock"
]
},
- "voiceId": {
- "description": "This is the provider-specific ID that will be used.",
- "oneOf": [
- {
- "type": "string",
- "enum": [
- "andrew",
- "brian",
- "emma"
- ],
- "title": "Preset Voice Options"
- },
- {
- "type": "string",
- "title": "Azure Voice ID"
- }
+ "model": {
+ "type": "string",
+ "description": "This is the specific model that will be used.",
+ "maxLength": 100,
+ "enum": [
+ "claude-3-opus-20240229",
+ "claude-3-sonnet-20240229",
+ "claude-3-haiku-20240307",
+ "claude-3-5-sonnet-20240620",
+ "claude-3-5-sonnet-20241022",
+ "claude-3-5-haiku-20241022",
+ "claude-3-7-sonnet-20250219",
+ "claude-opus-4-20250514",
+ "claude-opus-4-5-20251101",
+ "claude-opus-4-6",
+ "claude-sonnet-4-20250514",
+ "claude-sonnet-4-5-20250929",
+ "claude-sonnet-4-6",
+ "claude-haiku-4-5-20251001"
]
},
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "thinking": {
+ "description": "This is the optional configuration for Anthropic's thinking feature.\n\n- If provided, `maxTokens` must be greater than `thinking.budgetTokens`.",
"allOf": [
{
- "$ref": "#/components/schemas/ChunkPlan"
+ "$ref": "#/components/schemas/AnthropicThinkingConfig"
}
]
},
- "speed": {
+ "temperature": {
"type": "number",
- "description": "This is the speed multiplier that will be used.",
- "minimum": 0.5,
+ "description": "This is the temperature of the model.",
+ "minimum": 0,
"maximum": 2
},
- "fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
- "allOf": [
- {
- "$ref": "#/components/schemas/FallbackPlan"
- }
- ]
+ "maxTokens": {
+ "type": "number",
+ "description": "This is the max tokens of the model.",
+ "minimum": 50,
+ "maximum": 10000
}
},
"required": [
"provider",
- "voiceId"
+ "model"
]
},
- "CartesiaExperimentalControls": {
+ "WorkflowGoogleModel": {
"type": "object",
"properties": {
- "speed": {
- "oneOf": [
- {
- "type": "string",
- "enum": [
- "slowest",
- "slow",
- "normal",
- "fast",
- "fastest"
- ],
- "example": "normal"
- },
- {
- "type": "number",
- "minimum": -1,
- "maximum": 1,
- "example": 0.5
- }
- ]
- },
- "emotion": {
+ "provider": {
"type": "string",
+ "description": "This is the provider of the model (`google`).",
"enum": [
- "anger:lowest",
- "anger:low",
- "anger:high",
- "anger:highest",
- "positivity:lowest",
- "positivity:low",
- "positivity:high",
- "positivity:highest",
- "surprise:lowest",
- "surprise:low",
- "surprise:high",
- "surprise:highest",
- "sadness:lowest",
- "sadness:low",
- "sadness:high",
- "sadness:highest",
- "curiosity:lowest",
- "curiosity:low",
- "curiosity:high",
- "curiosity:highest"
- ],
- "example": [
- "happiness:high"
+ "google"
]
- }
- }
- },
- "CartesiaVoice": {
- "type": "object",
- "properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
},
- "provider": {
+ "model": {
"type": "string",
- "description": "This is the voice provider that will be used.",
- "enum": [
- "cartesia"
- ]
- },
- "voiceId": {
- "type": "string",
- "description": "The ID of the particular voice you want to use."
- },
- "model": {
- "type": "string",
- "description": "This is the model that will be used. This is optional and will default to the correct model for the voiceId.",
- "enum": [
- "sonic-2",
- "sonic-english",
- "sonic-multilingual",
- "sonic-preview",
- "sonic"
- ],
- "example": "sonic-english"
- },
- "language": {
- "type": "string",
- "description": "This is the language that will be used. This is optional and will default to the correct language for the voiceId.",
+ "description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b",
+ "maxLength": 100,
"enum": [
- "en",
- "de",
- "es",
- "fr",
- "ja",
- "pt",
- "zh",
- "hi",
- "it",
- "ko",
- "nl",
- "pl",
- "ru",
- "sv",
- "tr"
- ],
- "example": "en"
- },
- "experimentalControls": {
- "description": "Experimental controls for Cartesia voice generation",
- "allOf": [
- {
- "$ref": "#/components/schemas/CartesiaExperimentalControls"
- }
+ "gemini-3-flash-preview",
+ "gemini-2.5-pro",
+ "gemini-2.5-flash",
+ "gemini-2.5-flash-lite",
+ "gemini-2.0-flash-thinking-exp",
+ "gemini-2.0-pro-exp-02-05",
+ "gemini-2.0-flash",
+ "gemini-2.0-flash-lite",
+ "gemini-2.0-flash-exp",
+ "gemini-2.0-flash-realtime-exp",
+ "gemini-1.5-flash",
+ "gemini-1.5-flash-002",
+ "gemini-1.5-pro",
+ "gemini-1.5-pro-002",
+ "gemini-1.0-pro"
]
},
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
- "allOf": [
- {
- "$ref": "#/components/schemas/ChunkPlan"
- }
- ]
+ "temperature": {
+ "type": "number",
+ "description": "This is the temperature of the model.",
+ "minimum": 0,
+ "maximum": 2
},
- "fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
- "allOf": [
- {
- "$ref": "#/components/schemas/FallbackPlan"
- }
- ]
+ "maxTokens": {
+ "type": "number",
+ "description": "This is the max tokens of the model.",
+ "minimum": 50,
+ "maximum": 10000
}
},
"required": [
"provider",
- "voiceId"
+ "model"
]
},
- "CustomVoice": {
+ "WorkflowCustomModel": {
"type": "object",
"properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
- },
"provider": {
"type": "string",
- "description": "This is the voice provider that will be used. Use `custom-voice` for providers that are not natively supported.",
+ "description": "This is the provider of the model (`custom-llm`).",
"enum": [
- "custom-voice"
- ]
- },
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
- "allOf": [
- {
- "$ref": "#/components/schemas/ChunkPlan"
- }
- ]
- },
- "server": {
- "description": "This is where the voice request will be sent.\n\nRequest Example:\n\nPOST https://{server.url}\nContent-Type: application/json\n\n{\n \"message\": {\n \"type\": \"voice-request\",\n \"text\": \"Hello, world!\",\n \"sampleRate\": 24000,\n ...other metadata about the call...\n }\n}\n\nResponse Expected: 1-channel 16-bit raw PCM audio at the sample rate specified in the request. Here is how the response will be piped to the transport:\n```\nresponse.on('data', (chunk: Buffer) => {\n outputStream.write(chunk);\n});\n```",
- "allOf": [
- {
- "$ref": "#/components/schemas/Server"
- }
- ]
- },
- "fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
- "allOf": [
- {
- "$ref": "#/components/schemas/FallbackPlan"
- }
+ "custom-llm"
]
- }
- },
- "required": [
- "provider",
- "server"
- ]
- },
- "DeepgramVoice": {
- "type": "object",
- "properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
},
- "provider": {
+ "metadataSendMode": {
"type": "string",
- "description": "This is the voice provider that will be used.",
+ "description": "This determines whether metadata is sent in requests to the custom provider.\n\n- `off` will not send any metadata. payload will look like `{ messages }`\n- `variable` will send `assistant.metadata` as a variable on the payload. payload will look like `{ messages, metadata }`\n- `destructured` will send `assistant.metadata` fields directly on the payload. payload will look like `{ messages, ...metadata }`\n\nFurther, `variable` and `destructured` will send `call`, `phoneNumber`, and `customer` objects in the payload.\n\nDefault is `variable`.",
"enum": [
- "deepgram"
+ "off",
+ "variable",
+ "destructured"
]
},
- "voiceId": {
+ "url": {
"type": "string",
- "description": "This is the provider-specific ID that will be used.",
- "enum": [
- "asteria",
- "luna",
- "stella",
- "athena",
- "hera",
- "orion",
- "arcas",
- "perseus",
- "angus",
- "orpheus",
- "helios",
- "zeus",
- "thalia",
- "andromeda",
- "helena",
- "apollo",
- "arcas",
- "aries",
- "amalthea",
- "asteria",
- "athena",
- "atlas",
- "aurora",
- "callista",
- "cora",
- "cordelia",
- "delia",
- "draco",
- "electra",
- "harmonia",
- "hera",
- "hermes",
- "hyperion",
- "iris",
- "janus",
- "juno",
- "jupiter",
- "luna",
- "mars",
- "minerva",
- "neptune",
- "odysseus",
- "ophelia",
- "orion",
- "orpheus",
- "pandora",
- "phoebe",
- "pluto",
- "saturn",
- "selene",
- "theia",
- "vesta",
- "zeus"
- ],
- "title": "This is the Deepgram Voice ID"
+ "description": "These is the URL we'll use for the OpenAI client's `baseURL`. Ex. https://openrouter.ai/api/v1"
+ },
+ "headers": {
+ "type": "object",
+ "description": "These are the headers we'll use for the OpenAI client's `headers`."
+ },
+ "timeoutSeconds": {
+ "type": "number",
+ "description": "This sets the timeout for the connection to the custom provider without needing to stream any tokens back. Default is 20 seconds.",
+ "minimum": 20,
+ "maximum": 600
},
"model": {
"type": "string",
- "description": "This is the model that will be used. Defaults to 'aura-2' when not specified.",
- "enum": [
- "aura",
- "aura-2"
- ],
- "example": "aura-2"
- },
- "mipOptOut": {
- "type": "boolean",
- "description": "If set to true, this will add mip_opt_out=true as a query parameter of all API requests. See https://developers.deepgram.com/docs/the-deepgram-model-improvement-partnership-program#want-to-opt-out\n\nThis will only be used if you are using your own Deepgram API key.\n\n@default false",
- "example": false,
- "default": false
+ "description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b",
+ "maxLength": 100
},
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
- "allOf": [
- {
- "$ref": "#/components/schemas/ChunkPlan"
- }
- ]
+ "temperature": {
+ "type": "number",
+ "description": "This is the temperature of the model.",
+ "minimum": 0,
+ "maximum": 2
},
- "fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
- "allOf": [
- {
- "$ref": "#/components/schemas/FallbackPlan"
- }
- ]
+ "maxTokens": {
+ "type": "number",
+ "description": "This is the max tokens of the model.",
+ "minimum": 50,
+ "maximum": 10000
}
},
"required": [
"provider",
- "voiceId"
+ "url",
+ "model"
]
},
- "ElevenLabsPronunciationDictionaryLocator": {
+ "GlobalNodePlan": {
"type": "object",
"properties": {
- "pronunciationDictionaryId": {
- "type": "string",
- "description": "This is the ID of the pronunciation dictionary to use.",
- "title": "This is the ElevenLabs Pronunciation Dictionary ID"
+ "enabled": {
+ "type": "boolean",
+ "description": "This is the flag to determine if this node is a global node\n\n@default false",
+ "default": false
},
- "versionId": {
+ "enterCondition": {
"type": "string",
- "description": "This is the version ID of the pronunciation dictionary to use.",
- "title": "This is the ElevenLabs Pronunciation Dictionary Version ID"
+ "description": "This is the condition that will be checked to determine if the global node should be executed.\n\n@default ''",
+ "maxLength": 1000,
+ "default": ""
}
- },
- "required": [
- "pronunciationDictionaryId",
- "versionId"
- ]
+ }
},
- "ElevenLabsVoice": {
+ "ConversationNode": {
"type": "object",
"properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
- },
- "provider": {
+ "type": {
"type": "string",
- "description": "This is the voice provider that will be used.",
+ "description": "This is the Conversation node. This can be used to start a conversation with the customer.\n\nThe flow is:\n- Workflow starts the conversation node\n- Model is active with the `prompt` and global context.\n- Model will call a tool to exit this node.\n- Workflow will extract variables from the conversation.\n- Workflow continues.",
"enum": [
- "11labs"
+ "conversation"
]
},
- "voiceId": {
- "description": "This is the provider-specific ID that will be used. Ensure the Voice is present in your 11Labs Voice Library.",
+ "model": {
+ "description": "This is the model for the node.\n\nThis overrides `workflow.model`.",
"oneOf": [
{
- "type": "string",
- "enum": [
- "burt",
- "marissa",
- "andrea",
- "sarah",
- "phillip",
- "steve",
- "joseph",
- "myra",
- "paula",
- "ryan",
- "drew",
- "paul",
- "mrb",
- "matilda",
- "mark"
- ],
- "title": "Preset Voice Options"
+ "$ref": "#/components/schemas/WorkflowOpenAIModel",
+ "title": "WorkflowOpenAIModel"
},
{
- "type": "string",
- "title": "11Labs Voice ID"
- }
- ]
- },
- "stability": {
- "type": "number",
- "description": "Defines the stability for voice settings.",
- "minimum": 0,
- "maximum": 1,
- "example": 0.5
- },
- "similarityBoost": {
- "type": "number",
- "description": "Defines the similarity boost for voice settings.",
- "minimum": 0,
- "maximum": 1,
- "example": 0.75
- },
- "style": {
- "type": "number",
- "description": "Defines the style for voice settings.",
- "minimum": 0,
- "maximum": 1,
- "example": 0
- },
- "useSpeakerBoost": {
- "type": "boolean",
- "description": "Defines the use speaker boost for voice settings.",
- "example": false
- },
- "speed": {
- "type": "number",
- "description": "Defines the speed for voice settings.",
- "minimum": 0.7,
- "maximum": 1.2,
- "example": 0.9
- },
- "optimizeStreamingLatency": {
- "type": "number",
- "description": "Defines the optimize streaming latency for voice settings. Defaults to 3.",
- "minimum": 0,
- "maximum": 4,
- "example": 3
- },
- "enableSsmlParsing": {
- "type": "boolean",
- "description": "This enables the use of https://elevenlabs.io/docs/speech-synthesis/prompting#pronunciation. Defaults to false to save latency.\n\n@default false",
- "example": false
- },
- "autoMode": {
- "type": "boolean",
- "description": "Defines the auto mode for voice settings. Defaults to false.",
- "example": false
- },
- "model": {
- "type": "string",
- "description": "This is the model that will be used. Defaults to 'eleven_turbo_v2' if not specified.",
- "enum": [
- "eleven_multilingual_v2",
- "eleven_turbo_v2",
- "eleven_turbo_v2_5",
- "eleven_flash_v2",
- "eleven_flash_v2_5",
- "eleven_monolingual_v1"
- ],
- "example": "eleven_turbo_v2_5"
- },
- "language": {
- "type": "string",
- "description": "This is the language (ISO 639-1) that is enforced for the model. Currently only Turbo v2.5 supports language enforcement. For other models, an error will be returned if language code is provided."
- },
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
- "allOf": [
+ "$ref": "#/components/schemas/WorkflowAnthropicModel",
+ "title": "WorkflowAnthropicModel"
+ },
{
- "$ref": "#/components/schemas/ChunkPlan"
- }
- ]
- },
- "pronunciationDictionaryLocators": {
- "description": "This is the pronunciation dictionary locators to use.",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/ElevenLabsPronunciationDictionaryLocator"
- }
- },
- "fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
- "allOf": [
+ "$ref": "#/components/schemas/WorkflowAnthropicBedrockModel",
+ "title": "WorkflowAnthropicBedrockModel"
+ },
{
- "$ref": "#/components/schemas/FallbackPlan"
+ "$ref": "#/components/schemas/WorkflowGoogleModel",
+ "title": "WorkflowGoogleModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowCustomModel",
+ "title": "WorkflowCustomModel"
}
]
- }
- },
- "required": [
- "provider",
- "voiceId"
- ]
- },
- "HumeVoice": {
- "type": "object",
- "properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
- },
- "provider": {
- "type": "string",
- "description": "This is the voice provider that will be used.",
- "enum": [
- "hume"
- ]
- },
- "model": {
- "type": "string",
- "description": "This is the model that will be used.",
- "enum": [
- "octave"
- ],
- "example": "octave"
- },
- "voiceId": {
- "type": "string",
- "description": "The ID of the particular voice you want to use."
- },
- "isCustomHumeVoice": {
- "type": "boolean",
- "description": "Indicates whether the chosen voice is a preset Hume AI voice or a custom voice.",
- "example": false
},
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
- "allOf": [
+ "transcriber": {
+ "description": "This is the transcriber for the node.\n\nThis overrides `workflow.transcriber`.",
+ "oneOf": [
{
- "$ref": "#/components/schemas/ChunkPlan"
+ "$ref": "#/components/schemas/AssemblyAITranscriber",
+ "title": "AssemblyAITranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/AzureSpeechTranscriber",
+ "title": "AzureSpeechTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/CustomTranscriber",
+ "title": "CustomTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/DeepgramTranscriber",
+ "title": "DeepgramTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/ElevenLabsTranscriber",
+ "title": "ElevenLabsTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/GladiaTranscriber",
+ "title": "GladiaTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/GoogleTranscriber",
+ "title": "GoogleTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/SpeechmaticsTranscriber",
+ "title": "SpeechmaticsTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/TalkscriberTranscriber",
+ "title": "TalkscriberTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAITranscriber",
+ "title": "OpenAITranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/CartesiaTranscriber",
+ "title": "CartesiaTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/SonioxTranscriber",
+ "title": "SonioxTranscriber"
}
]
},
- "description": {
- "type": "string",
- "description": "Natural language instructions describing how the synthesized speech should sound, including but not limited to tone, intonation, pacing, and accent (e.g., 'a soft, gentle voice with a strong British accent').\n\nIf a Voice is specified in the request, this description serves as acting instructions.\nIf no Voice is specified, a new voice is generated based on this description."
- },
- "fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
- "allOf": [
+ "voice": {
+ "description": "This is the voice for the node.\n\nThis overrides `workflow.voice`.",
+ "oneOf": [
{
- "$ref": "#/components/schemas/FallbackPlan"
+ "$ref": "#/components/schemas/AzureVoice",
+ "title": "AzureVoice"
+ },
+ {
+ "$ref": "#/components/schemas/CartesiaVoice",
+ "title": "CartesiaVoice"
+ },
+ {
+ "$ref": "#/components/schemas/CustomVoice",
+ "title": "CustomVoice"
+ },
+ {
+ "$ref": "#/components/schemas/DeepgramVoice",
+ "title": "DeepgramVoice"
+ },
+ {
+ "$ref": "#/components/schemas/ElevenLabsVoice",
+ "title": "ElevenLabsVoice"
+ },
+ {
+ "$ref": "#/components/schemas/HumeVoice",
+ "title": "HumeVoice"
+ },
+ {
+ "$ref": "#/components/schemas/LMNTVoice",
+ "title": "LMNTVoice"
+ },
+ {
+ "$ref": "#/components/schemas/NeuphonicVoice",
+ "title": "NeuphonicVoice"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIVoice",
+ "title": "OpenAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/PlayHTVoice",
+ "title": "PlayHTVoice"
+ },
+ {
+ "$ref": "#/components/schemas/WellSaidVoice",
+ "title": "WellSaidVoice"
+ },
+ {
+ "$ref": "#/components/schemas/RimeAIVoice",
+ "title": "RimeAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/SmallestAIVoice",
+ "title": "SmallestAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/TavusVoice",
+ "title": "TavusVoice"
+ },
+ {
+ "$ref": "#/components/schemas/VapiVoice",
+ "title": "VapiVoice"
+ },
+ {
+ "$ref": "#/components/schemas/SesameVoice",
+ "title": "SesameVoice"
+ },
+ {
+ "$ref": "#/components/schemas/InworldVoice",
+ "title": "InworldVoice"
+ },
+ {
+ "$ref": "#/components/schemas/MinimaxVoice",
+ "title": "MinimaxVoice"
+ }
+ ]
+ },
+ "tools": {
+ "type": "array",
+ "description": "These are the tools that the conversation node can use during the call. To use existing tools, use `toolIds`.\n\nBoth `tools` and `toolIds` can be used together.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateApiRequestToolDTO",
+ "title": "ApiRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateBashToolDTO",
+ "title": "BashTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCodeToolDTO",
+ "title": "CodeTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateComputerToolDTO",
+ "title": "ComputerTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDtmfToolDTO",
+ "title": "DtmfTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateEndCallToolDTO",
+ "title": "EndCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateFunctionToolDTO",
+ "title": "FunctionTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
+ "title": "GoHighLevelCalendarAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
+ "title": "GoHighLevelCalendarEventCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
+ "title": "GoHighLevelContactCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
+ "title": "GoHighLevelContactGetTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
+ "title": "GoogleCalendarCheckAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
+ "title": "GoogleCalendarCreateEventTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
+ "title": "GoogleSheetsRowAppendTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateHandoffToolDTO",
+ "title": "HandoffTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMcpToolDTO",
+ "title": "McpTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateQueryToolDTO",
+ "title": "QueryTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO",
+ "title": "SlackSendMessageTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSmsToolDTO",
+ "title": "SmsTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTextEditorToolDTO",
+ "title": "TextEditorTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTransferCallToolDTO",
+ "title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSipRequestToolDTO",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVoicemailToolDTO",
+ "title": "VoicemailTool"
+ }
+ ]
+ }
+ },
+ "toolIds": {
+ "description": "These are the tools that the conversation node can use during the call. To use transient tools, use `tools`.\n\nBoth `tools` and `toolIds` can be used together.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "prompt": {
+ "type": "string",
+ "maxLength": 5000
+ },
+ "globalNodePlan": {
+ "description": "This is the plan for the global node.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/GlobalNodePlan"
+ }
+ ]
+ },
+ "variableExtractionPlan": {
+ "description": "This is the plan that controls the variable extraction from the user's responses.\n\nUsage:\nUse `schema` to specify what you want to extract from the user's responses.\n```json\n{\n \"schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"user\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"age\": {\n \"type\": \"number\"\n }\n }\n }\n }\n }\n}\n```\n\nThis will be extracted as `{{ user.name }}` and `{{ user.age }}` respectively.\n\n(Optional) Use `aliases` to create new variables.\n\n```json\n{\n \"aliases\": [\n {\n \"key\": \"userAge\",\n \"value\": \"{{user.age}}\"\n },\n {\n \"key\": \"userName\",\n \"value\": \"{{user.name}}\"\n }\n ]\n}\n```\n\nThis will be extracted as `{{ userAge }}` and `{{ userName }}` respectively.\n\nNote: The `schema` field is required for Conversation nodes if you want to extract variables from the user's responses. `aliases` is just a convenience.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/VariableExtractionPlan"
}
]
+ },
+ "name": {
+ "type": "string",
+ "maxLength": 80
+ },
+ "isStart": {
+ "type": "boolean",
+ "description": "This is whether or not the node is the start of the workflow."
+ },
+ "metadata": {
+ "type": "object",
+ "description": "This is for metadata you want to store on the task."
}
},
"required": [
- "provider",
- "voiceId"
+ "type",
+ "name"
]
},
- "LMNTVoice": {
+ "ToolNode": {
"type": "object",
"properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
- },
- "provider": {
+ "type": {
"type": "string",
- "description": "This is the voice provider that will be used.",
+ "description": "This is the Tool node. This can be used to call a tool in your workflow.\n\nThe flow is:\n- Workflow starts the tool node\n- Model is called to extract parameters needed by the tool from the conversation history\n- Tool is called with the parameters\n- Server returns a response\n- Workflow continues with the response",
"enum": [
- "lmnt"
+ "tool"
]
},
- "voiceId": {
- "description": "This is the provider-specific ID that will be used.",
+ "tool": {
+ "description": "This is the tool to call. To use an existing tool, send `toolId` instead.",
"oneOf": [
{
- "type": "string",
- "enum": [
- "amy",
- "ansel",
- "autumn",
- "ava",
- "brandon",
- "caleb",
- "cassian",
- "chloe",
- "dalton",
- "daniel",
- "dustin",
- "elowen",
- "evander",
- "huxley",
- "james",
- "juniper",
- "kennedy",
- "lauren",
- "leah",
- "lily",
- "lucas",
- "magnus",
- "miles",
- "morgan",
- "natalie",
- "nathan",
- "noah",
- "nyssa",
- "oliver",
- "paige",
- "ryan",
- "sadie",
- "sophie",
- "stella",
- "terrence",
- "tyler",
- "vesper",
- "violet",
- "warrick",
- "zain",
- "zeke",
- "zoe"
- ],
- "title": "Preset Voice Options"
+ "$ref": "#/components/schemas/CreateApiRequestToolDTO",
+ "title": "ApiRequestTool"
},
{
- "type": "string",
- "title": "LMNT Voice ID"
+ "$ref": "#/components/schemas/CreateBashToolDTO",
+ "title": "BashTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCodeToolDTO",
+ "title": "CodeTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateComputerToolDTO",
+ "title": "ComputerTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDtmfToolDTO",
+ "title": "DtmfTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateEndCallToolDTO",
+ "title": "EndCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateFunctionToolDTO",
+ "title": "FunctionTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
+ "title": "GoHighLevelCalendarAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
+ "title": "GoHighLevelCalendarEventCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
+ "title": "GoHighLevelContactCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
+ "title": "GoHighLevelContactGetTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
+ "title": "GoogleCalendarCheckAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
+ "title": "GoogleCalendarCreateEventTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
+ "title": "GoogleSheetsRowAppendTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateHandoffToolDTO",
+ "title": "HandoffTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMcpToolDTO",
+ "title": "McpTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateQueryToolDTO",
+ "title": "QueryTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO",
+ "title": "SlackSendMessageTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSmsToolDTO",
+ "title": "SmsTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTextEditorToolDTO",
+ "title": "TextEditorTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTransferCallToolDTO",
+ "title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSipRequestToolDTO",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVoicemailToolDTO",
+ "title": "VoicemailTool"
}
]
},
- "speed": {
+ "toolId": {
+ "type": "string",
+ "description": "This is the tool to call. To use a transient tool, send `tool` instead."
+ },
+ "name": {
+ "type": "string",
+ "maxLength": 80
+ },
+ "isStart": {
+ "type": "boolean",
+ "description": "This is whether or not the node is the start of the workflow."
+ },
+ "metadata": {
+ "type": "object",
+ "description": "This is for metadata you want to store on the task."
+ }
+ },
+ "required": [
+ "type",
+ "name"
+ ]
+ },
+ "VoicemailDetectionBackoffPlan": {
+ "type": "object",
+ "properties": {
+ "startAtSeconds": {
"type": "number",
- "description": "This is the speed multiplier that will be used.",
- "minimum": 0.25,
- "maximum": 2,
- "example": null
+ "description": "This is the number of seconds to wait before starting the first retry attempt.",
+ "minimum": 0,
+ "default": 5
},
- "language": {
- "description": "Two letter ISO 639-1 language code. Use \"auto\" for auto-detection.",
+ "frequencySeconds": {
+ "type": "number",
+ "description": "This is the interval in seconds between retry attempts.",
+ "minimum": 2.5,
+ "default": 5
+ },
+ "maxRetries": {
+ "type": "number",
+ "description": "This is the maximum number of retry attempts before giving up.",
+ "minimum": 1,
+ "maximum": 10,
+ "default": 6
+ }
+ }
+ },
+ "GoogleVoicemailDetectionPlan": {
+ "type": "object",
+ "properties": {
+ "beepMaxAwaitSeconds": {
+ "type": "number",
+ "description": "This is the maximum duration from the start of the call that we will wait for a voicemail beep, before speaking our message\n\n- If we detect a voicemail beep before this, we will speak the message at that point.\n\n- Setting too low a value means that the bot will start speaking its voicemail message too early. If it does so before the actual beep, it will get cut off. You should definitely tune this to your use case.\n\n@default 30\n@min 0\n@max 60",
+ "minimum": 0,
+ "maximum": 30,
+ "default": 30
+ },
+ "provider": {
+ "type": "string",
+ "description": "This is the provider to use for voicemail detection.",
"enum": [
- "aa",
- "ab",
- "ae",
- "af",
- "ak",
- "am",
- "an",
- "ar",
- "as",
- "av",
- "ay",
- "az",
- "ba",
- "be",
- "bg",
- "bh",
- "bi",
- "bm",
- "bn",
- "bo",
- "br",
- "bs",
- "ca",
- "ce",
- "ch",
- "co",
- "cr",
- "cs",
- "cu",
- "cv",
- "cy",
- "da",
- "de",
- "dv",
- "dz",
- "ee",
- "el",
- "en",
- "eo",
- "es",
- "et",
- "eu",
- "fa",
- "ff",
- "fi",
- "fj",
- "fo",
- "fr",
- "fy",
- "ga",
- "gd",
- "gl",
- "gn",
- "gu",
- "gv",
- "ha",
- "he",
- "hi",
- "ho",
- "hr",
- "ht",
- "hu",
- "hy",
- "hz",
- "ia",
- "id",
- "ie",
- "ig",
- "ii",
- "ik",
- "io",
- "is",
- "it",
- "iu",
- "ja",
- "jv",
- "ka",
- "kg",
- "ki",
- "kj",
- "kk",
- "kl",
- "km",
- "kn",
- "ko",
- "kr",
- "ks",
- "ku",
- "kv",
- "kw",
- "ky",
- "la",
- "lb",
- "lg",
- "li",
- "ln",
- "lo",
- "lt",
- "lu",
- "lv",
- "mg",
- "mh",
- "mi",
- "mk",
- "ml",
- "mn",
- "mr",
- "ms",
- "mt",
- "my",
- "na",
- "nb",
- "nd",
- "ne",
- "ng",
- "nl",
- "nn",
- "no",
- "nr",
- "nv",
- "ny",
- "oc",
- "oj",
- "om",
- "or",
- "os",
- "pa",
- "pi",
- "pl",
- "ps",
- "pt",
- "qu",
- "rm",
- "rn",
- "ro",
- "ru",
- "rw",
- "sa",
- "sc",
- "sd",
- "se",
- "sg",
- "si",
- "sk",
- "sl",
- "sm",
- "sn",
- "so",
- "sq",
- "sr",
- "ss",
- "st",
- "su",
- "sv",
- "sw",
- "ta",
- "te",
- "tg",
- "th",
- "ti",
- "tk",
- "tl",
- "tn",
- "to",
- "tr",
- "ts",
- "tt",
- "tw",
- "ty",
- "ug",
- "uk",
- "ur",
- "uz",
- "ve",
- "vi",
- "vo",
- "wa",
- "wo",
- "xh",
- "yi",
- "yue",
- "yo",
- "za",
- "zh",
- "zu",
- "auto"
- ],
- "example": "en",
- "oneOf": [
- {
- "type": "string",
- "enum": [
- "aa",
- "ab",
- "ae",
- "af",
- "ak",
- "am",
- "an",
- "ar",
- "as",
- "av",
- "ay",
- "az",
- "ba",
- "be",
- "bg",
- "bh",
- "bi",
- "bm",
- "bn",
- "bo",
- "br",
- "bs",
- "ca",
- "ce",
- "ch",
- "co",
- "cr",
- "cs",
- "cu",
- "cv",
- "cy",
- "da",
- "de",
- "dv",
- "dz",
- "ee",
- "el",
- "en",
- "eo",
- "es",
- "et",
- "eu",
- "fa",
- "ff",
- "fi",
- "fj",
- "fo",
- "fr",
- "fy",
- "ga",
- "gd",
- "gl",
- "gn",
- "gu",
- "gv",
- "ha",
- "he",
- "hi",
- "ho",
- "hr",
- "ht",
- "hu",
- "hy",
- "hz",
- "ia",
- "id",
- "ie",
- "ig",
- "ii",
- "ik",
- "io",
- "is",
- "it",
- "iu",
- "ja",
- "jv",
- "ka",
- "kg",
- "ki",
- "kj",
- "kk",
- "kl",
- "km",
- "kn",
- "ko",
- "kr",
- "ks",
- "ku",
- "kv",
- "kw",
- "ky",
- "la",
- "lb",
- "lg",
- "li",
- "ln",
- "lo",
- "lt",
- "lu",
- "lv",
- "mg",
- "mh",
- "mi",
- "mk",
- "ml",
- "mn",
- "mr",
- "ms",
- "mt",
- "my",
- "na",
- "nb",
- "nd",
- "ne",
- "ng",
- "nl",
- "nn",
- "no",
- "nr",
- "nv",
- "ny",
- "oc",
- "oj",
- "om",
- "or",
- "os",
- "pa",
- "pi",
- "pl",
- "ps",
- "pt",
- "qu",
- "rm",
- "rn",
- "ro",
- "ru",
- "rw",
- "sa",
- "sc",
- "sd",
- "se",
- "sg",
- "si",
- "sk",
- "sl",
- "sm",
- "sn",
- "so",
- "sq",
- "sr",
- "ss",
- "st",
- "su",
- "sv",
- "sw",
- "ta",
- "te",
- "tg",
- "th",
- "ti",
- "tk",
- "tl",
- "tn",
- "to",
- "tr",
- "ts",
- "tt",
- "tw",
- "ty",
- "ug",
- "uk",
- "ur",
- "uz",
- "ve",
- "vi",
- "vo",
- "wa",
- "wo",
- "xh",
- "yi",
- "yue",
- "yo",
- "za",
- "zh",
- "zu"
- ],
- "title": "ISO 639-1 Language Code"
- },
- {
- "type": "string",
- "enum": [
- "auto"
- ],
- "title": "Auto-detect"
- }
+ "google"
]
},
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "backoffPlan": {
+ "description": "This is the backoff plan for the voicemail detection.",
"allOf": [
{
- "$ref": "#/components/schemas/ChunkPlan"
+ "$ref": "#/components/schemas/VoicemailDetectionBackoffPlan"
}
]
},
- "fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
- "allOf": [
- {
- "$ref": "#/components/schemas/FallbackPlan"
- }
+ "type": {
+ "type": "string",
+ "description": "This is the detection type to use for voicemail detection.\n- 'audio': Uses native audio models (default)\n- 'transcript': Uses ASR/transcript-based detection\n@default 'audio' (audio detection)",
+ "enum": [
+ "audio",
+ "transcript"
]
}
},
"required": [
- "provider",
- "voiceId"
+ "provider"
]
},
- "NeuphonicVoice": {
+ "OpenAIVoicemailDetectionPlan": {
"type": "object",
"properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
+ "beepMaxAwaitSeconds": {
+ "type": "number",
+ "description": "This is the maximum duration from the start of the call that we will wait for a voicemail beep, before speaking our message\n\n- If we detect a voicemail beep before this, we will speak the message at that point.\n\n- Setting too low a value means that the bot will start speaking its voicemail message too early. If it does so before the actual beep, it will get cut off. You should definitely tune this to your use case.\n\n@default 30\n@min 0\n@max 60",
+ "minimum": 0,
+ "maximum": 30,
+ "default": 30
},
"provider": {
"type": "string",
- "description": "This is the voice provider that will be used.",
+ "description": "This is the provider to use for voicemail detection.",
"enum": [
- "neuphonic"
+ "openai"
]
},
- "voiceId": {
- "description": "This is the provider-specific ID that will be used.",
- "oneOf": [
- {
- "type": "string",
- "enum": [],
- "title": "Preset Voice Options"
- },
+ "backoffPlan": {
+ "description": "This is the backoff plan for the voicemail detection.",
+ "allOf": [
{
- "type": "string",
- "title": "Neuphonic Voice ID"
+ "$ref": "#/components/schemas/VoicemailDetectionBackoffPlan"
}
]
},
- "model": {
+ "type": {
"type": "string",
- "description": "This is the model that will be used. Defaults to 'neu_fast' if not specified.",
+ "description": "This is the detection type to use for voicemail detection.\n- 'audio': Uses native audio models (default)\n- 'transcript': Uses ASR/transcript-based detection\n@default 'audio' (audio detection)",
"enum": [
- "neu_hq",
- "neu_fast"
+ "audio",
+ "transcript"
+ ]
+ }
+ },
+ "required": [
+ "provider"
+ ]
+ },
+ "TwilioVoicemailDetectionPlan": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "description": "This is the provider to use for voicemail detection.",
+ "enum": [
+ "twilio"
+ ]
+ },
+ "voicemailDetectionTypes": {
+ "type": "array",
+ "description": "These are the AMD messages from Twilio that are considered as voicemail. Default is ['machine_end_beep', 'machine_end_silence'].\n\n@default {Array} ['machine_end_beep', 'machine_end_silence']",
+ "enum": [
+ "machine_start",
+ "human",
+ "fax",
+ "unknown",
+ "machine_end_beep",
+ "machine_end_silence",
+ "machine_end_other"
],
- "example": "neu_fast"
+ "example": [
+ "machine_end_beep",
+ "machine_end_silence"
+ ],
+ "items": {
+ "type": "string",
+ "enum": [
+ "machine_start",
+ "human",
+ "fax",
+ "unknown",
+ "machine_end_beep",
+ "machine_end_silence",
+ "machine_end_other"
+ ]
+ }
},
- "language": {
- "type": "object",
- "description": "This is the language (ISO 639-1) that is enforced for the model.",
- "example": "en"
+ "enabled": {
+ "type": "boolean",
+ "description": "This sets whether the assistant should detect voicemail. Defaults to true.\n\n@default true"
},
- "speed": {
+ "machineDetectionTimeout": {
"type": "number",
- "description": "This is the speed multiplier that will be used.",
- "minimum": 0.25,
- "maximum": 2,
- "example": null
+ "description": "The number of seconds that Twilio should attempt to perform answering machine detection before timing out and returning AnsweredBy as unknown. Default is 30 seconds.\n\nIncreasing this value will provide the engine more time to make a determination. This can be useful when DetectMessageEnd is provided in the MachineDetection parameter and there is an expectation of long answering machine greetings that can exceed 30 seconds.\n\nDecreasing this value will reduce the amount of time the engine has to make a determination. This can be particularly useful when the Enable option is provided in the MachineDetection parameter and you want to limit the time for initial detection.\n\nCheck the [Twilio docs](https://www.twilio.com/docs/voice/answering-machine-detection#optional-api-tuning-parameters) for more info.\n\n@default 30",
+ "minimum": 3,
+ "maximum": 59
},
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
- "allOf": [
- {
- "$ref": "#/components/schemas/ChunkPlan"
- }
- ]
+ "machineDetectionSpeechThreshold": {
+ "type": "number",
+ "description": "The number of milliseconds that is used as the measuring stick for the length of the speech activity. Durations lower than this value will be interpreted as a human, longer as a machine. Default is 2400 milliseconds.\n\nIncreasing this value will reduce the chance of a False Machine (detected machine, actually human) for a long human greeting (e.g., a business greeting) but increase the time it takes to detect a machine.\n\nDecreasing this value will reduce the chances of a False Human (detected human, actually machine) for short voicemail greetings. The value of this parameter may need to be reduced by more than 1000ms to detect very short voicemail greetings. A reduction of that significance can result in increased False Machine detections. Adjusting the MachineDetectionSpeechEndThreshold is likely the better approach for short voicemails. Decreasing MachineDetectionSpeechThreshold will also reduce the time it takes to detect a machine.\n\nCheck the [Twilio docs](https://www.twilio.com/docs/voice/answering-machine-detection#optional-api-tuning-parameters) for more info.\n\n@default 2400",
+ "minimum": 1000,
+ "maximum": 6000
},
- "fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
- "allOf": [
- {
- "$ref": "#/components/schemas/FallbackPlan"
- }
- ]
+ "machineDetectionSpeechEndThreshold": {
+ "type": "number",
+ "description": "The number of milliseconds of silence after speech activity at which point the speech activity is considered complete. Default is 1200 milliseconds.\n\nIncreasing this value will typically be used to better address the short voicemail greeting scenarios. For short voicemails, there is typically 1000-2000ms of audio followed by 1200-2400ms of silence and then additional audio before the beep. Increasing the MachineDetectionSpeechEndThreshold to ~2500ms will treat the 1200-2400ms of silence as a gap in the greeting but not the end of the greeting and will result in a machine detection. The downsides of such a change include:\n- Increasing the delay for human detection by the amount you increase this parameter, e.g., a change of 1200ms to 2500ms increases human detection delay by 1300ms.\n- Cases where a human has two utterances separated by a period of silence (e.g. a \"Hello\", then 2000ms of silence, and another \"Hello\") may be interpreted as a machine.\n\nDecreasing this value will result in faster human detection. The consequence is that it can lead to increased False Human (detected human, actually machine) detections because a silence gap in a voicemail greeting (not necessarily just in short voicemail scenarios) can be incorrectly interpreted as the end of speech.\n\nCheck the [Twilio docs](https://www.twilio.com/docs/voice/answering-machine-detection#optional-api-tuning-parameters) for more info.\n\n@default 1200",
+ "minimum": 500,
+ "maximum": 5000
+ },
+ "machineDetectionSilenceTimeout": {
+ "type": "number",
+ "description": "The number of milliseconds of initial silence after which an unknown AnsweredBy result will be returned. Default is 5000 milliseconds.\n\nIncreasing this value will result in waiting for a longer period of initial silence before returning an 'unknown' AMD result.\n\nDecreasing this value will result in waiting for a shorter period of initial silence before returning an 'unknown' AMD result.\n\nCheck the [Twilio docs](https://www.twilio.com/docs/voice/answering-machine-detection#optional-api-tuning-parameters) for more info.\n\n@default 5000",
+ "minimum": 2000,
+ "maximum": 10000
}
},
"required": [
- "provider",
- "voiceId",
- "language"
+ "provider"
]
},
- "OpenAIVoice": {
+ "VapiVoicemailDetectionPlan": {
"type": "object",
"properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
+ "beepMaxAwaitSeconds": {
+ "type": "number",
+ "description": "This is the maximum duration from the start of the call that we will wait for a voicemail beep, before speaking our message\n\n- If we detect a voicemail beep before this, we will speak the message at that point.\n\n- Setting too low a value means that the bot will start speaking its voicemail message too early. If it does so before the actual beep, it will get cut off. You should definitely tune this to your use case.\n\n@default 30\n@min 0\n@max 60",
+ "minimum": 0,
+ "maximum": 30,
+ "default": 30
},
"provider": {
"type": "string",
- "description": "This is the voice provider that will be used.",
+ "description": "This is the provider to use for voicemail detection.",
"enum": [
- "openai"
+ "vapi"
]
},
- "voiceId": {
- "description": "This is the provider-specific ID that will be used.\nPlease note that ash, ballad, coral, sage, and verse may only be used with realtime models.",
- "oneOf": [
- {
- "type": "string",
- "enum": [
- "alloy",
- "echo",
- "fable",
- "onyx",
- "nova",
- "shimmer"
- ],
- "title": "Preset Voice Options"
- },
+ "backoffPlan": {
+ "description": "This is the backoff plan for the voicemail detection.",
+ "allOf": [
{
- "type": "string",
- "title": "OpenAI Voice ID"
+ "$ref": "#/components/schemas/VoicemailDetectionBackoffPlan"
}
]
},
- "model": {
+ "type": {
"type": "string",
- "description": "This is the model that will be used for text-to-speech.",
+ "description": "This is the detection type to use for voicemail detection.\n- 'audio': Uses native audio models (default)\n- 'transcript': Uses ASR/transcript-based detection\n@default 'audio' (audio detection)",
"enum": [
- "tts-1",
- "tts-1-hd",
- "gpt-4o-mini-tts"
+ "audio",
+ "transcript"
]
- },
- "instructions": {
+ }
+ },
+ "required": [
+ "provider"
+ ]
+ },
+ "TransferHookAction": {
+ "type": "object",
+ "properties": {
+ "type": {
"type": "string",
- "description": "This is a prompt that allows you to control the voice of your generated audio.\nDoes not work with 'tts-1' or 'tts-1-hd' models.",
- "maxLength": 10000
- },
- "speed": {
- "type": "number",
- "description": "This is the speed multiplier that will be used.",
- "minimum": 0.25,
- "maximum": 4,
- "example": null
- },
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
- "allOf": [
- {
- "$ref": "#/components/schemas/ChunkPlan"
- }
+ "description": "This is the type of action - must be \"transfer\"",
+ "enum": [
+ "transfer"
]
},
- "fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
- "allOf": [
+ "destination": {
+ "description": "This is the destination details for the transfer - can be a phone number or SIP URI",
+ "oneOf": [
{
- "$ref": "#/components/schemas/FallbackPlan"
+ "$ref": "#/components/schemas/TransferDestinationNumber",
+ "title": "NumberTransferDestination"
+ },
+ {
+ "$ref": "#/components/schemas/TransferDestinationSip",
+ "title": "SipTransferDestination"
}
]
}
},
"required": [
- "provider",
- "voiceId"
+ "type"
]
},
- "PlayHTVoice": {
+ "FunctionCallHookAction": {
"type": "object",
"properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
},
- "provider": {
+ "type": {
"type": "string",
- "description": "This is the voice provider that will be used.",
"enum": [
- "playht"
- ]
+ "function"
+ ],
+ "description": "The type of tool. \"function\" for Function tool."
},
- "voiceId": {
- "description": "This is the provider-specific ID that will be used.",
- "oneOf": [
- {
- "type": "string",
- "enum": [
- "jennifer",
- "melissa",
- "will",
- "chris",
- "matt",
- "jack",
- "ruby",
- "davis",
- "donna",
- "michael"
- ],
- "title": "Preset Voice Options"
- },
+ "async": {
+ "type": "boolean",
+ "example": false,
+ "description": "This determines if the tool is async.\n\n If async, the assistant will move forward without waiting for your server to respond. This is useful if you just want to trigger something on your server.\n\n If sync, the assistant will wait for your server to respond. This is useful if want assistant to respond with the result from your server.\n\n Defaults to synchronous (`false`)."
+ },
+ "server": {
+ "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
+ "allOf": [
{
- "type": "string",
- "title": "PlayHT Voice ID"
+ "$ref": "#/components/schemas/Server"
}
]
},
- "speed": {
- "type": "number",
- "description": "This is the speed multiplier that will be used.",
- "minimum": 0.1,
- "maximum": 5,
- "example": null
- },
- "temperature": {
- "type": "number",
- "description": "A floating point number between 0, exclusive, and 2, inclusive. If equal to null or not provided, the model's default temperature will be used. The temperature parameter controls variance. Lower temperatures result in more predictable results, higher temperatures allow each run to vary more, so the voice may sound less like the baseline voice.",
- "minimum": 0.1,
- "maximum": 2,
- "example": null
- },
- "emotion": {
- "type": "string",
- "description": "An emotion to be applied to the speech.",
- "enum": [
- "female_happy",
- "female_sad",
- "female_angry",
- "female_fearful",
- "female_disgust",
- "female_surprised",
- "male_happy",
- "male_sad",
- "male_angry",
- "male_fearful",
- "male_disgust",
- "male_surprised"
- ],
- "example": null
- },
- "voiceGuidance": {
- "type": "number",
- "description": "A number between 1 and 6. Use lower numbers to reduce how unique your chosen voice will be compared to other voices.",
- "minimum": 1,
- "maximum": 6,
- "example": null
- },
- "styleGuidance": {
- "type": "number",
- "description": "A number between 1 and 30. Use lower numbers to to reduce how strong your chosen emotion will be. Higher numbers will create a very emotional performance.",
- "minimum": 1,
- "maximum": 30,
- "example": null
- },
- "textGuidance": {
- "type": "number",
- "description": "A number between 1 and 2. This number influences how closely the generated speech adheres to the input text. Use lower values to create more fluid speech, but with a higher chance of deviating from the input text. Higher numbers will make the generated speech more accurate to the input text, ensuring that the words spoken align closely with the provided text.",
- "minimum": 1,
- "maximum": 2,
- "example": null
- },
- "model": {
- "type": "string",
- "description": "Playht voice model/engine to use.",
- "enum": [
- "PlayHT2.0",
- "PlayHT2.0-turbo",
- "Play3.0-mini",
- "PlayDialog"
+ "variableExtractionPlan": {
+ "description": "Plan to extract variables from the tool response",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/VariableExtractionPlan"
+ }
]
},
- "language": {
- "type": "string",
- "description": "The language to use for the speech.",
- "enum": [
- "afrikaans",
- "albanian",
- "amharic",
- "arabic",
- "bengali",
- "bulgarian",
- "catalan",
- "croatian",
- "czech",
- "danish",
- "dutch",
- "english",
- "french",
- "galician",
- "german",
- "greek",
- "hebrew",
- "hindi",
- "hungarian",
- "indonesian",
- "italian",
- "japanese",
- "korean",
- "malay",
- "mandarin",
- "polish",
- "portuguese",
- "russian",
- "serbian",
- "spanish",
- "swedish",
- "tagalog",
- "thai",
- "turkish",
- "ukrainian",
- "urdu",
- "xhosa"
- ]
+ "parameters": {
+ "description": "Static key-value pairs merged into the request body. Values support Liquid templates.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/ToolParameter"
+ }
},
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/ChunkPlan"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
},
- "fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "function": {
+ "description": "This is the function definition of the tool.",
"allOf": [
{
- "$ref": "#/components/schemas/FallbackPlan"
+ "$ref": "#/components/schemas/OpenAIFunction"
}
]
}
},
"required": [
- "provider",
- "voiceId"
+ "type"
]
},
- "RimeAIVoice": {
+ "SayHookAction": {
"type": "object",
"properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
- },
- "provider": {
+ "type": {
"type": "string",
- "description": "This is the voice provider that will be used.",
+ "description": "This is the type of action - must be \"say\"",
"enum": [
- "rime-ai"
+ "say"
]
},
- "voiceId": {
- "description": "This is the provider-specific ID that will be used.",
+ "prompt": {
+ "description": "This is the prompt for the assistant to generate a response based on existing conversation.\nCan be a string or an array of chat messages.",
"oneOf": [
{
"type": "string",
- "enum": [
- "abbie",
- "allison",
- "ally",
- "alona",
- "amber",
- "ana",
- "antoine",
- "armon",
- "brenda",
- "brittany",
- "carol",
- "colin",
- "courtney",
- "elena",
- "elliot",
- "eva",
- "geoff",
- "gerald",
- "hank",
- "helen",
- "hera",
- "jen",
- "joe",
- "joy",
- "juan",
- "kendra",
- "kendrick",
- "kenneth",
- "kevin",
- "kris",
- "linda",
- "madison",
- "marge",
- "marina",
- "marissa",
- "marta",
- "maya",
- "nicholas",
- "nyles",
- "phil",
- "reba",
- "rex",
- "rick",
- "ritu",
- "rob",
- "rodney",
- "rohan",
- "rosco",
- "samantha",
- "sandy",
- "selena",
- "seth",
- "sharon",
- "stan",
- "tamra",
- "tanya",
- "tibur",
- "tj",
- "tyler",
- "viv",
- "yadira",
- "marsh",
- "bayou",
- "creek",
- "brook",
- "flower",
- "spore",
- "glacier",
- "gulch",
- "alpine",
- "cove",
- "lagoon",
- "tundra",
- "steppe",
- "mesa",
- "grove",
- "rainforest",
- "moraine",
- "wildflower",
- "peak",
- "boulder",
- "gypsum",
- "zest",
- "luna",
- "celeste",
- "orion",
- "ursa",
- "astra",
- "esther",
- "estelle",
- "andromeda"
- ],
- "title": "Preset Voice Options"
+ "title": "String"
},
{
- "type": "string",
- "title": "RimeAI Voice ID"
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/SystemMessage",
+ "title": "SystemMessage"
+ },
+ {
+ "$ref": "#/components/schemas/UserMessage",
+ "title": "UserMessage"
+ },
+ {
+ "$ref": "#/components/schemas/AssistantMessage",
+ "title": "AssistantMessage"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessage",
+ "title": "ToolMessage"
+ },
+ {
+ "$ref": "#/components/schemas/DeveloperMessage",
+ "title": "DeveloperMessage"
+ }
+ ]
+ },
+ "title": "MessageArray"
}
- ]
- },
- "model": {
- "type": "string",
- "description": "This is the model that will be used. Defaults to 'arcana' when not specified.",
- "enum": [
- "arcana",
- "mistv2",
- "mist"
],
- "example": "arcana"
- },
- "speed": {
- "type": "number",
- "description": "This is the speed multiplier that will be used.",
- "minimum": 0.1,
- "example": null
- },
- "pauseBetweenBrackets": {
- "type": "boolean",
- "description": "This is a flag that controls whether to add slight pauses using angle brackets. Example: \"Hi. <200> I'd love to have a conversation with you.\" adds a 200ms pause between the first and second sentences.",
- "example": false
- },
- "phonemizeBetweenBrackets": {
- "type": "boolean",
- "description": "This is a flag that controls whether text inside brackets should be phonemized (converted to phonetic pronunciation) - Example: \"{h'El.o} World\" will pronounce \"Hello\" as expected.",
- "example": false
- },
- "reduceLatency": {
- "type": "boolean",
- "description": "This is a flag that controls whether to optimize for reduced latency in streaming. https://docs.rime.ai/api-reference/endpoint/websockets#param-reduce-latency",
- "example": false
- },
- "inlineSpeedAlpha": {
- "type": "string",
- "description": "This is a string that allows inline speed control using alpha notation. https://docs.rime.ai/api-reference/endpoint/websockets#param-inline-speed-alpha",
- "example": null
- },
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
- "allOf": [
- {
- "$ref": "#/components/schemas/ChunkPlan"
- }
+ "examples": [
+ "Ask the user if they're still in the call",
+ [
+ {
+ "role": "system",
+ "content": "You are a helpful assistant, and would like to know if the user is still in the call based on the conversation history in {{transcript}}"
+ }
+ ]
]
},
- "fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
- "allOf": [
- {
- "$ref": "#/components/schemas/FallbackPlan"
- }
- ]
+ "exact": {
+ "type": "object",
+ "description": "This is the message to say"
}
},
"required": [
- "provider",
- "voiceId"
+ "type"
]
},
- "SesameVoice": {
+ "MessageAddHookAction": {
"type": "object",
"properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
- },
- "provider": {
- "type": "string",
- "description": "This is the voice provider that will be used.",
- "enum": [
- "sesame"
- ]
- },
- "voiceId": {
- "type": "string",
- "description": "This is the provider-specific ID that will be used.",
- "title": "Sesame Voice ID. This should be either a name (a built-in voice) or a UUID (a custom voice)."
- },
- "model": {
+ "type": {
"type": "string",
- "description": "This is the model that will be used.",
+ "description": "This is the type of action - must be \"message.add\"",
"enum": [
- "csm-1b"
+ "message.add"
]
},
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "message": {
+ "description": "The message to add to the conversation in OpenAI format",
+ "example": {
+ "role": "system",
+ "content": "Context update from hook"
+ },
"allOf": [
{
- "$ref": "#/components/schemas/ChunkPlan"
+ "$ref": "#/components/schemas/OpenAIMessage"
}
]
},
- "fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
- "allOf": [
- {
- "$ref": "#/components/schemas/FallbackPlan"
- }
- ]
+ "triggerResponseEnabled": {
+ "type": "boolean",
+ "description": "Whether to trigger an assistant response after adding the message",
+ "default": true
}
},
"required": [
- "provider",
- "voiceId",
- "model"
+ "type",
+ "message"
]
},
- "SmallestAIVoice": {
+ "CallHookFilter": {
"type": "object",
"properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
- },
- "provider": {
+ "type": {
"type": "string",
- "description": "This is the voice provider that will be used.",
+ "description": "This is the type of filter - currently only \"oneOf\" is supported",
"enum": [
- "smallest-ai"
- ]
- },
- "voiceId": {
- "description": "This is the provider-specific ID that will be used.",
- "oneOf": [
- {
- "type": "string",
- "enum": [
- "emily",
- "jasmine",
- "arman",
- "james",
- "mithali",
- "aravind",
- "raj",
- "diya",
- "raman",
- "ananya",
- "isha",
- "william",
- "aarav",
- "monika",
- "niharika",
- "deepika",
- "raghav",
- "kajal",
- "radhika",
- "mansi",
- "nisha",
- "saurabh",
- "pooja",
- "saina",
- "sanya"
- ],
- "title": "Preset Voice Options"
- },
- {
- "type": "string",
- "title": "Smallest AI Voice ID"
- }
- ]
+ "oneOf"
+ ],
+ "maxLength": 1000
},
- "model": {
+ "key": {
"type": "string",
- "description": "Smallest AI voice model to use. Defaults to 'lightning' when not specified.",
- "enum": [
- "lightning"
- ]
- },
- "speed": {
- "type": "number",
- "description": "This is the speed multiplier that will be used.",
- "example": null
- },
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
- "allOf": [
- {
- "$ref": "#/components/schemas/ChunkPlan"
- }
- ]
+ "description": "This is the key to filter on (e.g. \"call.endedReason\")",
+ "maxLength": 1000
},
- "fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
- "allOf": [
- {
- "$ref": "#/components/schemas/FallbackPlan"
- }
- ]
+ "oneOf": {
+ "description": "This is the array of possible values to match against",
+ "type": "array",
+ "items": {
+ "type": "string",
+ "maxLength": 1000
+ }
}
},
"required": [
- "provider",
- "voiceId"
+ "type",
+ "key",
+ "oneOf"
]
},
- "TavusConversationProperties": {
+ "CallHookCallEnding": {
"type": "object",
"properties": {
- "maxCallDuration": {
- "type": "number",
- "description": "The maximum duration of the call in seconds. The default `maxCallDuration` is 3600 seconds (1 hour).\nOnce the time limit specified by this parameter has been reached, the conversation will automatically shut down."
- },
- "participantLeftTimeout": {
- "type": "number",
- "description": "The duration in seconds after which the call will be automatically shut down once the last participant leaves."
- },
- "participantAbsentTimeout": {
- "type": "number",
- "description": "Starting from conversation creation, the duration in seconds after which the call will be automatically shut down if no participant joins the call.\nDefault is 300 seconds (5 minutes)."
- },
- "enableRecording": {
- "type": "boolean",
- "description": "If true, the user will be able to record the conversation."
- },
- "enableTranscription": {
- "type": "boolean",
- "description": "If true, the user will be able to transcribe the conversation.\nYou can find more instructions on displaying transcriptions if you are using your custom DailyJS components here.\nYou need to have an event listener on Daily that listens for `app-messages`."
- },
- "applyGreenscreen": {
- "type": "boolean",
- "description": "If true, the background will be replaced with a greenscreen (RGB values: `[0, 255, 155]`).\nYou can use WebGL on the frontend to make the greenscreen transparent or change its color."
- },
- "language": {
+ "on": {
"type": "string",
- "description": "The language of the conversation. Please provide the **full language name**, not the two-letter code.\nIf you are using your own TTS voice, please ensure it supports the language you provide.\nIf you are using a stock replica or default persona, please note that only ElevenLabs and Cartesia supported languages are available.\nYou can find a full list of supported languages for Cartesia here, for ElevenLabs here, and for PlayHT here."
+ "description": "This is the event that triggers this hook",
+ "enum": [
+ "call.ending"
+ ],
+ "maxLength": 1000
},
- "recordingS3BucketName": {
- "type": "string",
- "description": "The name of the S3 bucket where the recording will be stored."
+ "do": {
+ "type": "array",
+ "description": "This is the set of actions to perform when the hook triggers",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolCallHookAction",
+ "title": "ToolCallHookAction"
+ },
+ {
+ "$ref": "#/components/schemas/MessageAddHookAction",
+ "title": "MessageAddHookAction"
+ }
+ ]
+ }
},
- "recordingS3BucketRegion": {
+ "filters": {
+ "description": "This is the set of filters that must match for the hook to trigger",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/CallHookFilter"
+ }
+ }
+ },
+ "required": [
+ "on",
+ "do"
+ ]
+ },
+ "CallHookAssistantSpeechInterrupted": {
+ "type": "object",
+ "properties": {
+ "on": {
"type": "string",
- "description": "The region of the S3 bucket where the recording will be stored."
+ "description": "This is the event that triggers this hook",
+ "enum": [
+ "assistant.speech.interrupted"
+ ],
+ "maxLength": 1000
},
- "awsAssumeRoleArn": {
- "type": "string",
- "description": "The ARN of the role that will be assumed to access the S3 bucket."
+ "do": {
+ "type": "array",
+ "description": "This is the set of actions to perform when the hook triggers",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/SayHookAction",
+ "title": "SayHookAction"
+ },
+ {
+ "$ref": "#/components/schemas/ToolCallHookAction",
+ "title": "ToolCallHookAction"
+ },
+ {
+ "$ref": "#/components/schemas/MessageAddHookAction",
+ "title": "MessageAddHookAction"
+ }
+ ]
+ }
}
- }
+ },
+ "required": [
+ "on",
+ "do"
+ ]
},
- "TavusVoice": {
+ "CallHookCustomerSpeechInterrupted": {
"type": "object",
"properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
+ "on": {
+ "type": "string",
+ "description": "This is the event that triggers this hook",
+ "enum": [
+ "customer.speech.interrupted"
+ ],
+ "maxLength": 1000
},
- "provider": {
+ "do": {
+ "type": "array",
+ "description": "This is the set of actions to perform when the hook triggers",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/SayHookAction",
+ "title": "SayHookAction"
+ },
+ {
+ "$ref": "#/components/schemas/ToolCallHookAction",
+ "title": "ToolCallHookAction"
+ },
+ {
+ "$ref": "#/components/schemas/MessageAddHookAction",
+ "title": "MessageAddHookAction"
+ }
+ ]
+ }
+ }
+ },
+ "required": [
+ "on",
+ "do"
+ ]
+ },
+ "ToolCallHookAction": {
+ "type": "object",
+ "properties": {
+ "type": {
"type": "string",
- "description": "This is the voice provider that will be used.",
+ "description": "This is the type of action - must be \"tool\"",
"enum": [
- "tavus"
+ "tool"
]
},
- "voiceId": {
- "description": "This is the provider-specific ID that will be used.",
+ "tool": {
+ "description": "This is the tool to call. To use an existing tool, send `toolId` instead.",
"oneOf": [
{
- "type": "string",
- "enum": [
- "r52da2535a"
- ],
- "title": "Preset Voice Options"
+ "$ref": "#/components/schemas/CreateApiRequestToolDTO",
+ "title": "ApiRequestTool"
},
{
- "type": "string",
- "title": "Tavus Voice ID"
- }
- ]
- },
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
- "allOf": [
+ "$ref": "#/components/schemas/CreateBashToolDTO",
+ "title": "BashTool"
+ },
{
- "$ref": "#/components/schemas/ChunkPlan"
+ "$ref": "#/components/schemas/CreateCodeToolDTO",
+ "title": "CodeTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateComputerToolDTO",
+ "title": "ComputerTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDtmfToolDTO",
+ "title": "DtmfTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateEndCallToolDTO",
+ "title": "EndCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateFunctionToolDTO",
+ "title": "FunctionTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
+ "title": "GoHighLevelCalendarAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
+ "title": "GoHighLevelCalendarEventCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
+ "title": "GoHighLevelContactCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
+ "title": "GoHighLevelContactGetTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
+ "title": "GoogleCalendarCheckAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
+ "title": "GoogleCalendarCreateEventTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
+ "title": "GoogleSheetsRowAppendTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateHandoffToolDTO",
+ "title": "HandoffTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMcpToolDTO",
+ "title": "McpTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateQueryToolDTO",
+ "title": "QueryTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO",
+ "title": "SlackSendMessageTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSmsToolDTO",
+ "title": "SmsTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTextEditorToolDTO",
+ "title": "TextEditorTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTransferCallToolDTO",
+ "title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSipRequestToolDTO",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVoicemailToolDTO",
+ "title": "VoicemailTool"
}
]
},
- "personaId": {
- "type": "string",
- "description": "This is the unique identifier for the persona that the replica will use in the conversation."
- },
- "callbackUrl": {
+ "toolId": {
"type": "string",
- "description": "This is the url that will receive webhooks with updates regarding the conversation state."
+ "description": "This is the tool to call. To use a transient tool, send `tool` instead."
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "CustomerSpeechTimeoutOptions": {
+ "type": "object",
+ "properties": {
+ "timeoutSeconds": {
+ "type": "number",
+ "description": "This is the timeout in seconds before action is triggered.\nThe clock starts when the assistant finishes speaking and remains active until the user speaks.\n\n@default 7.5",
+ "minimum": 1,
+ "maximum": 1000
},
- "conversationName": {
- "type": "string",
- "description": "This is the name for the conversation."
+ "triggerMaxCount": {
+ "type": "number",
+ "description": "This is the maximum number of times the hook will trigger in a call.\n\n@default 3",
+ "minimum": 1,
+ "maximum": 10
},
- "conversationalContext": {
+ "triggerResetMode": {
+ "type": "object",
+ "description": "This is whether the counter for hook trigger resets the user speaks.\n\n@default never"
+ }
+ },
+ "required": [
+ "timeoutSeconds"
+ ]
+ },
+ "CallHookCustomerSpeechTimeout": {
+ "type": "object",
+ "properties": {
+ "on": {
"type": "string",
- "description": "This is the context that will be appended to any context provided in the persona, if one is provided."
+ "description": "Must be either \"customer.speech.timeout\" or match the pattern \"customer.speech.timeout[property=value]\"",
+ "maxLength": 1000
},
- "customGreeting": {
- "type": "string",
- "description": "This is the custom greeting that the replica will give once a participant joines the conversation."
+ "do": {
+ "type": "array",
+ "description": "This is the set of actions to perform when the hook triggers",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/SayHookAction",
+ "title": "SayHookAction"
+ },
+ {
+ "$ref": "#/components/schemas/ToolCallHookAction",
+ "title": "ToolCallHookAction"
+ },
+ {
+ "$ref": "#/components/schemas/MessageAddHookAction",
+ "title": "MessageAddHookAction"
+ }
+ ]
+ }
},
- "properties": {
- "description": "These are optional properties used to customize the conversation.",
+ "options": {
+ "description": "This is the set of filters that must match for the hook to trigger",
"allOf": [
{
- "$ref": "#/components/schemas/TavusConversationProperties"
+ "$ref": "#/components/schemas/CustomerSpeechTimeoutOptions"
}
]
},
- "fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
- "allOf": [
- {
- "$ref": "#/components/schemas/FallbackPlan"
- }
- ]
+ "name": {
+ "type": "string",
+ "description": "This is the name of the hook, it can be set by the user to identify the hook.\nIf no name is provided, the hook will be auto generated as UUID.\n\n@default UUID",
+ "maxLength": 1000
}
},
"required": [
- "provider",
- "voiceId"
+ "on",
+ "do"
]
},
- "VapiVoice": {
+ "CallHookModelResponseTimeout": {
"type": "object",
"properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
- },
- "provider": {
+ "on": {
"type": "string",
- "description": "This is the voice provider that will be used.",
+ "description": "This is the event that triggers this hook",
"enum": [
- "vapi"
- ]
+ "model.response.timeout"
+ ],
+ "maxLength": 1000
},
- "voiceId": {
+ "do": {
+ "type": "array",
+ "description": "This is the set of actions to perform when the hook triggers",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/SayHookAction",
+ "title": "SayHookAction"
+ },
+ {
+ "$ref": "#/components/schemas/ToolCallHookAction",
+ "title": "ToolCallHookAction"
+ },
+ {
+ "$ref": "#/components/schemas/MessageAddHookAction",
+ "title": "MessageAddHookAction"
+ }
+ ]
+ }
+ }
+ },
+ "required": [
+ "on",
+ "do"
+ ]
+ },
+ "AIEdgeCondition": {
+ "type": "object",
+ "properties": {
+ "type": {
"type": "string",
- "description": "The voices provided by Vapi",
"enum": [
- "Elliot",
- "Kylie",
- "Rohan",
- "Lily",
- "Savannah",
- "Hana",
- "Neha",
- "Cole",
- "Harry",
- "Paige",
- "Spencer"
+ "ai"
]
},
- "speed": {
- "type": "number",
- "description": "This is the speed multiplier that will be used.\n\n@default 1",
- "minimum": 0.25,
- "maximum": 2,
- "default": 1
- },
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
- "allOf": [
+ "prompt": {
+ "type": "string",
+ "description": "This is the prompt for the AI edge condition. It should evaluate to a boolean.",
+ "maxLength": 1000
+ }
+ },
+ "required": [
+ "type",
+ "prompt"
+ ]
+ },
+ "Edge": {
+ "type": "object",
+ "properties": {
+ "condition": {
+ "oneOf": [
{
- "$ref": "#/components/schemas/ChunkPlan"
+ "$ref": "#/components/schemas/AIEdgeCondition",
+ "title": "AIEdgeCondition"
}
]
},
- "fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
- "allOf": [
- {
- "$ref": "#/components/schemas/FallbackPlan"
- }
- ]
+ "from": {
+ "type": "string",
+ "maxLength": 80
+ },
+ "to": {
+ "type": "string",
+ "maxLength": 80
+ },
+ "metadata": {
+ "type": "object",
+ "description": "This is for metadata you want to store on the edge."
}
},
"required": [
- "provider",
- "voiceId"
+ "from",
+ "to"
]
},
- "InworldVoice": {
+ "RecordingConsentPlanStayOnLine": {
"type": "object",
"properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
- },
- "provider": {
+ "message": {
"type": "string",
- "description": "This is the voice provider that will be used.",
- "enum": [
- "inworld"
+ "description": "This is the message asking for consent to record the call.\nIf the type is `stay-on-line`, the message should ask the user to hang up if they do not consent.\nIf the type is `verbal`, the message should ask the user to verbally consent or decline.",
+ "maxLength": 1000,
+ "examples": [
+ "For quality purposes, this call may be recorded. Please stay on the line if you agree or end the call if you do not consent.",
+ "This call may be recorded for quality and training purposes. Say \"I agree\" if you consent to being recorded, or \"I disagree\" if you do not consent."
]
},
- "voiceId": {
- "type": "string",
- "description": "Available voices by language:\n• en: Alex, Ashley, Craig, Deborah, Dennis, Edward, Elizabeth, Hades, Julia, Pixie, Mark, Olivia, Priya, Ronald, Sarah, Shaun, Theodore, Timothy, Wendy, Dominus\n• zh: Yichen, Xiaoyin, Xinyi, Jing\n• nl: Erik, Katrien, Lennart, Lore\n• fr: Alain, Hélène, Mathieu, Étienne\n• de: Johanna, Josef\n• it: Gianni, Orietta\n• ja: Asuka, Satoshi\n• ko: Hyunwoo, Minji, Seojun, Yoona\n• pl: Szymon, Wojciech\n• pt: Heitor, Maitê\n• es: Diego, Lupita, Miguel, Rafael",
- "maxLength": 120,
- "title": "Inworld Voice ID",
- "enum": [
- "Alex",
- "Ashley",
- "Craig",
- "Deborah",
- "Dennis",
- "Edward",
- "Elizabeth",
- "Hades",
- "Julia",
- "Pixie",
- "Mark",
- "Olivia",
- "Priya",
- "Ronald",
- "Sarah",
- "Shaun",
- "Theodore",
- "Timothy",
- "Wendy",
- "Dominus",
- "Yichen",
- "Xiaoyin",
- "Xinyi",
- "Jing",
- "Erik",
- "Katrien",
- "Lennart",
- "Lore",
- "Alain",
- "Hélène",
- "Mathieu",
- "Étienne",
- "Johanna",
- "Josef",
- "Gianni",
- "Orietta",
- "Asuka",
- "Satoshi",
- "Hyunwoo",
- "Minji",
- "Seojun",
- "Yoona",
- "Szymon",
- "Wojciech",
- "Heitor",
- "Maitê",
- "Diego",
- "Lupita",
- "Miguel",
- "Rafael"
- ],
- "example": "Alex"
- },
- "model": {
- "type": "string",
- "description": "This is the model that will be used.",
- "enum": [
- "inworld-tts-1"
- ],
- "default": "inworld-tts-1"
- },
- "languageCode": {
- "type": "string",
- "description": "Language code for Inworld TTS synthesis",
- "default": "en",
- "enum": [
- "en",
- "zh",
- "ko",
- "nl",
- "fr",
- "es",
- "ja",
- "de",
- "it",
- "pl",
- "pt"
- ]
- },
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
- "allOf": [
+ "voice": {
+ "description": "This is the voice to use for the consent message. If not specified, inherits from the assistant's voice.\nUse a different voice for the consent message for a better user experience.",
+ "oneOf": [
{
- "$ref": "#/components/schemas/ChunkPlan"
- }
- ]
- },
- "fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
- "allOf": [
+ "$ref": "#/components/schemas/AzureVoice",
+ "title": "AzureVoice"
+ },
{
- "$ref": "#/components/schemas/FallbackPlan"
+ "$ref": "#/components/schemas/CartesiaVoice",
+ "title": "CartesiaVoice"
+ },
+ {
+ "$ref": "#/components/schemas/CustomVoice",
+ "title": "CustomVoice"
+ },
+ {
+ "$ref": "#/components/schemas/DeepgramVoice",
+ "title": "DeepgramVoice"
+ },
+ {
+ "$ref": "#/components/schemas/ElevenLabsVoice",
+ "title": "ElevenLabsVoice"
+ },
+ {
+ "$ref": "#/components/schemas/HumeVoice",
+ "title": "HumeVoice"
+ },
+ {
+ "$ref": "#/components/schemas/LMNTVoice",
+ "title": "LMNTVoice"
+ },
+ {
+ "$ref": "#/components/schemas/NeuphonicVoice",
+ "title": "NeuphonicVoice"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIVoice",
+ "title": "OpenAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/PlayHTVoice",
+ "title": "PlayHTVoice"
+ },
+ {
+ "$ref": "#/components/schemas/WellSaidVoice",
+ "title": "WellSaidVoice"
+ },
+ {
+ "$ref": "#/components/schemas/RimeAIVoice",
+ "title": "RimeAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/SmallestAIVoice",
+ "title": "SmallestAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/TavusVoice",
+ "title": "TavusVoice"
+ },
+ {
+ "$ref": "#/components/schemas/VapiVoice",
+ "title": "VapiVoice"
+ },
+ {
+ "$ref": "#/components/schemas/SesameVoice",
+ "title": "SesameVoice"
+ },
+ {
+ "$ref": "#/components/schemas/InworldVoice",
+ "title": "InworldVoice"
+ },
+ {
+ "$ref": "#/components/schemas/MinimaxVoice",
+ "title": "MinimaxVoice"
}
]
- }
- },
- "required": [
- "provider",
- "voiceId"
- ]
- },
- "MinimaxVoice": {
- "type": "object",
- "properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
- },
- "provider": {
- "type": "string",
- "description": "This is the voice provider that will be used.",
- "enum": [
- "minimax"
- ]
- },
- "voiceId": {
- "type": "string",
- "description": "This is the provider-specific ID that will be used. Use a voice from MINIMAX_PREDEFINED_VOICES or a custom cloned voice ID.",
- "title": "This is the Minimax Voice ID"
},
- "model": {
+ "type": {
"type": "string",
- "description": "This is the model that will be used. Options are 'speech-02-hd' and 'speech-02-turbo'.\nspeech-02-hd is optimized for high-fidelity applications like voiceovers and audiobooks.\nspeech-02-turbo is designed for real-time applications with low latency.\n\n@default \"speech-02-turbo\"",
+ "description": "This is the type of recording consent plan. This type assumes consent is granted if the user stays on the line.",
"enum": [
- "speech-02-hd",
- "speech-02-turbo"
+ "stay-on-line"
],
- "example": "speech-02-turbo",
- "default": "speech-02-turbo"
- },
- "emotion": {
- "type": "string",
- "description": "The emotion to use for the voice. If not provided, will use auto-detect mode.\nOptions include: 'happy', 'sad', 'angry', 'fearful', 'surprised', 'disgusted', 'neutral'",
- "example": "happy"
- },
- "pitch": {
- "type": "number",
- "description": "Voice pitch adjustment. Range from -12 to 12 semitones.\n@default 0",
- "minimum": -12,
- "maximum": 12,
- "example": 0,
- "default": 0
- },
- "speed": {
- "type": "number",
- "description": "Voice speed adjustment. Range from 0.5 to 2.0.\n@default 1.0",
- "minimum": 0.5,
- "maximum": 2,
- "example": 1,
- "default": 1
+ "example": "stay-on-line"
},
- "volume": {
+ "waitSeconds": {
"type": "number",
- "description": "Voice volume adjustment. Range from 0.5 to 2.0.\n@default 1.0",
- "minimum": 0.5,
- "maximum": 2,
- "example": 1,
- "default": 1
- },
- "region": {
- "type": "string",
- "description": "The region for Minimax API. Defaults to \"worldwide\".",
- "enum": [
- "worldwide",
- "china"
- ],
- "default": "worldwide"
- },
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
- "allOf": [
- {
- "$ref": "#/components/schemas/ChunkPlan"
- }
- ]
- },
- "fallbackPlan": {
- "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
- "allOf": [
- {
- "$ref": "#/components/schemas/FallbackPlan"
- }
- ]
+ "description": "Number of seconds to wait before transferring to the assistant if user stays on the call",
+ "minimum": 1,
+ "maximum": 6,
+ "default": 3,
+ "example": 3
}
},
"required": [
- "provider",
- "voiceId"
+ "message",
+ "type"
]
},
- "FallbackAzureVoice": {
+ "RecordingConsentPlanVerbal": {
"type": "object",
"properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
- },
- "provider": {
+ "message": {
"type": "string",
- "description": "This is the voice provider that will be used.",
- "enum": [
- "azure"
+ "description": "This is the message asking for consent to record the call.\nIf the type is `stay-on-line`, the message should ask the user to hang up if they do not consent.\nIf the type is `verbal`, the message should ask the user to verbally consent or decline.",
+ "maxLength": 1000,
+ "examples": [
+ "For quality purposes, this call may be recorded. Please stay on the line if you agree or end the call if you do not consent.",
+ "This call may be recorded for quality and training purposes. Say \"I agree\" if you consent to being recorded, or \"I disagree\" if you do not consent."
]
},
- "voiceId": {
- "description": "This is the provider-specific ID that will be used.",
+ "voice": {
+ "description": "This is the voice to use for the consent message. If not specified, inherits from the assistant's voice.\nUse a different voice for the consent message for a better user experience.",
"oneOf": [
{
- "type": "string",
- "enum": [
- "andrew",
- "brian",
- "emma"
- ],
- "title": "Preset Voice Options"
+ "$ref": "#/components/schemas/AzureVoice",
+ "title": "AzureVoice"
},
{
- "type": "string",
- "title": "Azure Voice ID"
+ "$ref": "#/components/schemas/CartesiaVoice",
+ "title": "CartesiaVoice"
+ },
+ {
+ "$ref": "#/components/schemas/CustomVoice",
+ "title": "CustomVoice"
+ },
+ {
+ "$ref": "#/components/schemas/DeepgramVoice",
+ "title": "DeepgramVoice"
+ },
+ {
+ "$ref": "#/components/schemas/ElevenLabsVoice",
+ "title": "ElevenLabsVoice"
+ },
+ {
+ "$ref": "#/components/schemas/HumeVoice",
+ "title": "HumeVoice"
+ },
+ {
+ "$ref": "#/components/schemas/LMNTVoice",
+ "title": "LMNTVoice"
+ },
+ {
+ "$ref": "#/components/schemas/NeuphonicVoice",
+ "title": "NeuphonicVoice"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIVoice",
+ "title": "OpenAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/PlayHTVoice",
+ "title": "PlayHTVoice"
+ },
+ {
+ "$ref": "#/components/schemas/WellSaidVoice",
+ "title": "WellSaidVoice"
+ },
+ {
+ "$ref": "#/components/schemas/RimeAIVoice",
+ "title": "RimeAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/SmallestAIVoice",
+ "title": "SmallestAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/TavusVoice",
+ "title": "TavusVoice"
+ },
+ {
+ "$ref": "#/components/schemas/VapiVoice",
+ "title": "VapiVoice"
+ },
+ {
+ "$ref": "#/components/schemas/SesameVoice",
+ "title": "SesameVoice"
+ },
+ {
+ "$ref": "#/components/schemas/InworldVoice",
+ "title": "InworldVoice"
+ },
+ {
+ "$ref": "#/components/schemas/MinimaxVoice",
+ "title": "MinimaxVoice"
}
]
},
- "speed": {
- "type": "number",
- "description": "This is the speed multiplier that will be used.",
- "minimum": 0.5,
- "maximum": 2
+ "type": {
+ "type": "string",
+ "description": "This is the type of recording consent plan. This type assumes consent is granted if the user verbally consents or declines.",
+ "enum": [
+ "verbal"
+ ],
+ "example": "verbal"
},
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
- "allOf": [
- {
- "$ref": "#/components/schemas/ChunkPlan"
- }
- ]
+ "declineTool": {
+ "type": "object",
+ "description": "Tool to execute if user verbally declines recording consent"
+ },
+ "declineToolId": {
+ "type": "string",
+ "description": "ID of existing tool to execute if user verbally declines recording consent"
}
},
"required": [
- "provider",
- "voiceId"
+ "message",
+ "type"
]
},
- "FallbackCartesiaVoice": {
+ "SecurityFilterBase": {
+ "type": "object",
+ "properties": {}
+ },
+ "SecurityFilterPlan": {
"type": "object",
"properties": {
- "cachingEnabled": {
+ "enabled": {
"type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
- },
- "provider": {
- "type": "string",
- "description": "This is the voice provider that will be used.",
- "enum": [
- "cartesia"
- ]
+ "description": "Whether the security filter is enabled.\n@default false",
+ "default": false
},
- "voiceId": {
- "type": "string",
- "description": "The ID of the particular voice you want to use."
+ "filters": {
+ "description": "Array of security filter types to apply.\nIf array is not empty, only those security filters are run.",
+ "example": "[{ type: \"sql-injection\" }, { type: \"xss\" }]",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/SecurityFilterBase"
+ }
},
- "model": {
+ "mode": {
"type": "string",
- "description": "This is the model that will be used. This is optional and will default to the correct model for the voiceId.",
+ "description": "Mode of operation when a security threat is detected.\n- 'sanitize': Remove or replace the threatening content\n- 'reject': Replace the entire transcript with replacement text\n- 'replace': Replace threatening patterns with replacement text\n@default 'sanitize'",
"enum": [
- "sonic-2",
- "sonic-english",
- "sonic-multilingual",
- "sonic-preview",
- "sonic"
+ "sanitize",
+ "reject",
+ "replace"
],
- "example": "sonic-english"
+ "default": "sanitize"
},
- "language": {
+ "replacementText": {
"type": "string",
- "description": "This is the language that will be used. This is optional and will default to the correct language for the voiceId.",
- "enum": [
- "en",
- "de",
- "es",
- "fr",
- "ja",
- "pt",
- "zh",
- "hi",
- "it",
- "ko",
- "nl",
- "pl",
- "ru",
- "sv",
- "tr"
- ],
- "example": "en"
+ "description": "Text to use when replacing filtered content.\n@default '[FILTERED]'",
+ "default": "[FILTERED]"
+ }
+ }
+ },
+ "CompliancePlan": {
+ "type": "object",
+ "properties": {
+ "hipaaEnabled": {
+ "type": "boolean",
+ "description": "When this is enabled, logs, recordings, and transcriptions will be stored in HIPAA-compliant storage. Defaults to false. Only HIPAA-compliant providers will be available for LLM, Voice, and Transcriber respectively. This setting is only honored if the organization is on an Enterprise subscription or has purchased the HIPAA add-on."
},
- "experimentalControls": {
- "description": "Experimental controls for Cartesia voice generation",
+ "pciEnabled": {
+ "type": "boolean",
+ "description": "When this is enabled, the user will be restricted to use PCI-compliant providers, and no logs or transcripts are stored.\nAt the end of the call, you will receive an end-of-call-report message to store on your server. Defaults to false.",
+ "example": {
+ "pciEnabled": false
+ }
+ },
+ "securityFilterPlan": {
+ "description": "This is the security filter plan for the assistant. It allows filtering of transcripts for security threats before sending to LLM.",
"allOf": [
{
- "$ref": "#/components/schemas/CartesiaExperimentalControls"
+ "$ref": "#/components/schemas/SecurityFilterPlan"
}
]
},
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
- "allOf": [
+ "recordingConsentPlan": {
+ "oneOf": [
{
- "$ref": "#/components/schemas/ChunkPlan"
+ "$ref": "#/components/schemas/RecordingConsentPlanStayOnLine",
+ "title": "RecordingConsentStayOnLinePlan"
+ },
+ {
+ "$ref": "#/components/schemas/RecordingConsentPlanVerbal",
+ "title": "RecordingConsentPlanVerbal"
}
- ]
+ ],
+ "discriminator": {
+ "propertyName": "type",
+ "mapping": {
+ "stay-on-line": "#/components/schemas/RecordingConsentPlanStayOnLine",
+ "verbal": "#/components/schemas/RecordingConsentPlanVerbal"
+ }
+ }
}
- },
- "required": [
- "provider",
- "voiceId"
- ]
+ }
},
- "FallbackCustomVoice": {
+ "StructuredDataPlan": {
"type": "object",
"properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
+ "messages": {
+ "description": "These are the messages used to generate the structured data.\n\n@default: ```\n[\n {\n \"role\": \"system\",\n \"content\": \"You are an expert data extractor. You will be given a transcript of a call. Extract structured data per the JSON Schema. DO NOT return anything except the structured data.\\n\\nJson Schema:\\\\n{{schema}}\\n\\nOnly respond with the JSON.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Here is the transcript:\\n\\n{{transcript}}\\n\\n. Here is the ended reason of the call:\\n\\n{{endedReason}}\\n\\n\"\n }\n]```\n\nYou can customize by providing any messages you want.\n\nHere are the template variables available:\n- {{transcript}}: the transcript of the call from `call.artifact.transcript`- {{systemPrompt}}: the system prompt of the call from `assistant.model.messages[type=system].content`- {{messages}}: the messages of the call from `assistant.model.messages`- {{schema}}: the schema of the structured data from `structuredDataPlan.schema`- {{endedReason}}: the ended reason of the call from `call.endedReason`",
+ "type": "array",
+ "items": {
+ "type": "object"
+ }
},
- "provider": {
- "type": "string",
- "description": "This is the voice provider that will be used. Use `custom-voice` for providers that are not natively supported.",
- "enum": [
- "custom-voice"
- ]
+ "enabled": {
+ "type": "boolean",
+ "description": "This determines whether structured data is generated and stored in `call.analysis.structuredData`. Defaults to false.\n\nUsage:\n- If you want to extract structured data, set this to true and provide a `schema`.\n\n@default false"
},
- "server": {
- "description": "This is where the voice request will be sent.\n\nRequest Example:\n\nPOST https://{server.url}\nContent-Type: application/json\n\n{\n \"message\": {\n \"type\": \"voice-request\",\n \"text\": \"Hello, world!\",\n \"sampleRate\": 24000,\n ...other metadata about the call...\n }\n}\n\nResponse Expected: 1-channel 16-bit raw PCM audio at the sample rate specified in the request. Here is how the response will be piped to the transport:\n```\nresponse.on('data', (chunk: Buffer) => {\n outputStream.write(chunk);\n});\n```",
+ "schema": {
+ "description": "This is the schema of the structured data. The output is stored in `call.analysis.structuredData`.\n\nComplete guide on JSON Schema can be found [here](https://ajv.js.org/json-schema.html#json-data-type).",
"allOf": [
{
- "$ref": "#/components/schemas/Server"
+ "$ref": "#/components/schemas/JsonSchema"
}
]
},
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "timeoutSeconds": {
+ "type": "number",
+ "description": "This is how long the request is tried before giving up. When request times out, `call.analysis.structuredData` will be empty.\n\nUsage:\n- To guarantee the structured data is generated, set this value high. Note, this will delay the end of call report in cases where model is slow to respond.\n\n@default 5 seconds",
+ "minimum": 1,
+ "maximum": 60
+ }
+ }
+ },
+ "StructuredDataMultiPlan": {
+ "type": "object",
+ "properties": {
+ "key": {
+ "type": "string",
+ "description": "This is the key of the structured data plan in the catalog."
+ },
+ "plan": {
+ "description": "This is an individual structured data plan in the catalog.",
"allOf": [
{
- "$ref": "#/components/schemas/ChunkPlan"
+ "$ref": "#/components/schemas/StructuredDataPlan"
}
]
}
},
"required": [
- "provider",
- "server"
+ "key",
+ "plan"
]
},
- "FallbackDeepgramVoice": {
+ "SuccessEvaluationPlan": {
"type": "object",
"properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
- },
- "provider": {
+ "rubric": {
"type": "string",
- "description": "This is the voice provider that will be used.",
"enum": [
- "deepgram"
- ]
+ "NumericScale",
+ "DescriptiveScale",
+ "Checklist",
+ "Matrix",
+ "PercentageScale",
+ "LikertScale",
+ "AutomaticRubric",
+ "PassFail"
+ ],
+ "description": "This enforces the rubric of the evaluation. The output is stored in `call.analysis.successEvaluation`.\n\nOptions include:\n- 'NumericScale': A scale of 1 to 10.\n- 'DescriptiveScale': A scale of Excellent, Good, Fair, Poor.\n- 'Checklist': A checklist of criteria and their status.\n- 'Matrix': A grid that evaluates multiple criteria across different performance levels.\n- 'PercentageScale': A scale of 0% to 100%.\n- 'LikertScale': A scale of Strongly Agree, Agree, Neutral, Disagree, Strongly Disagree.\n- 'AutomaticRubric': Automatically break down evaluation into several criteria, each with its own score.\n- 'PassFail': A simple 'true' if call passed, 'false' if not.\n\nDefault is 'PassFail'."
},
- "voiceId": {
- "type": "string",
- "description": "This is the provider-specific ID that will be used.",
- "enum": [
- "asteria",
- "luna",
- "stella",
- "athena",
- "hera",
- "orion",
- "arcas",
- "perseus",
- "angus",
- "orpheus",
- "helios",
- "zeus",
- "thalia",
- "andromeda",
- "helena",
- "apollo",
- "arcas",
- "aries",
- "amalthea",
- "asteria",
- "athena",
- "atlas",
- "aurora",
- "callista",
- "cora",
- "cordelia",
- "delia",
- "draco",
- "electra",
- "harmonia",
- "hera",
- "hermes",
- "hyperion",
- "iris",
- "janus",
- "juno",
- "jupiter",
- "luna",
- "mars",
- "minerva",
- "neptune",
- "odysseus",
- "ophelia",
- "orion",
- "orpheus",
- "pandora",
- "phoebe",
- "pluto",
- "saturn",
- "selene",
- "theia",
- "vesta",
- "zeus"
- ],
- "title": "This is the Deepgram Voice ID"
- },
- "model": {
- "type": "string",
- "description": "This is the model that will be used. Defaults to 'aura-2' when not specified.",
- "enum": [
- "aura",
- "aura-2"
- ],
- "example": "aura-2"
+ "messages": {
+ "description": "These are the messages used to generate the success evaluation.\n\n@default: ```\n[\n {\n \"role\": \"system\",\n \"content\": \"You are an expert call evaluator. You will be given a transcript of a call and the system prompt of the AI participant. Determine if the call was successful based on the objectives inferred from the system prompt. DO NOT return anything except the result.\\n\\nRubric:\\\\n{{rubric}}\\n\\nOnly respond with the result.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Here is the transcript:\\n\\n{{transcript}}\\n\\n\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Here was the system prompt of the call:\\n\\n{{systemPrompt}}\\n\\n. Here is the ended reason of the call:\\n\\n{{endedReason}}\\n\\n\"\n }\n]```\n\nYou can customize by providing any messages you want.\n\nHere are the template variables available:\n- {{transcript}}: the transcript of the call from `call.artifact.transcript`- {{systemPrompt}}: the system prompt of the call from `assistant.model.messages[type=system].content`- {{messages}}: the messages of the call from `assistant.model.messages`- {{rubric}}: the rubric of the success evaluation from `successEvaluationPlan.rubric`- {{endedReason}}: the ended reason of the call from `call.endedReason`",
+ "type": "array",
+ "items": {
+ "type": "object"
+ }
},
- "mipOptOut": {
+ "enabled": {
"type": "boolean",
- "description": "If set to true, this will add mip_opt_out=true as a query parameter of all API requests. See https://developers.deepgram.com/docs/the-deepgram-model-improvement-partnership-program#want-to-opt-out\n\nThis will only be used if you are using your own Deepgram API key.\n\n@default false",
- "example": false,
- "default": false
+ "description": "This determines whether a success evaluation is generated and stored in `call.analysis.successEvaluation`. Defaults to true.\n\nUsage:\n- If you want to disable the success evaluation, set this to false.\n\n@default true"
},
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "timeoutSeconds": {
+ "type": "number",
+ "description": "This is how long the request is tried before giving up. When request times out, `call.analysis.successEvaluation` will be empty.\n\nUsage:\n- To guarantee the success evaluation is generated, set this value high. Note, this will delay the end of call report in cases where model is slow to respond.\n\n@default 5 seconds",
+ "minimum": 1,
+ "maximum": 60
+ }
+ }
+ },
+ "AnalysisPlan": {
+ "type": "object",
+ "properties": {
+ "minMessagesThreshold": {
+ "type": "number",
+ "description": "The minimum number of messages required to run the analysis plan.\nIf the number of messages is less than this, analysis will be skipped.\n\n@default 2",
+ "deprecated": true,
+ "minimum": 0
+ },
+ "summaryPlan": {
+ "description": "This is the plan for generating the summary of the call. This outputs to `call.analysis.summary`.",
+ "deprecated": true,
"allOf": [
{
- "$ref": "#/components/schemas/ChunkPlan"
+ "$ref": "#/components/schemas/SummaryPlan"
+ }
+ ]
+ },
+ "structuredDataPlan": {
+ "description": "This is the plan for generating the structured data from the call. This outputs to `call.analysis.structuredData`.",
+ "deprecated": true,
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/StructuredDataPlan"
+ }
+ ]
+ },
+ "structuredDataMultiPlan": {
+ "description": "This is an array of structured data plan catalogs. Each entry includes a `key` and a `plan` for generating the structured data from the call. This outputs to `call.analysis.structuredDataMulti`.",
+ "deprecated": true,
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/StructuredDataMultiPlan"
+ }
+ },
+ "successEvaluationPlan": {
+ "description": "This is the plan for generating the success evaluation of the call. This outputs to `call.analysis.successEvaluation`.",
+ "deprecated": true,
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/SuccessEvaluationPlan"
}
]
+ },
+ "outcomeIds": {
+ "description": "This is an array of outcome UUIDs to be calculated during analysis.\nThe outcomes will be calculated and stored in `call.analysis.outcomes`.",
+ "deprecated": true,
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
}
- },
- "required": [
- "provider",
- "voiceId"
- ]
+ }
},
- "FallbackElevenLabsVoice": {
+ "TranscriptPlan": {
"type": "object",
"properties": {
- "cachingEnabled": {
+ "enabled": {
"type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
+ "description": "This determines whether the transcript is stored in `call.artifact.transcript`. Defaults to true.\n\n@default true",
+ "example": true
},
- "provider": {
+ "assistantName": {
"type": "string",
- "description": "This is the voice provider that will be used.",
+ "description": "This is the name of the assistant in the transcript. Defaults to 'AI'.\n\nUsage:\n- If you want to change the name of the assistant in the transcript, set this. Example, here is what the transcript would look like with `assistantName` set to 'Buyer':\n```\nUser: Hello, how are you?\nBuyer: I'm fine.\nUser: Do you want to buy a car?\nBuyer: No.\n```\n\n@default 'AI'"
+ },
+ "userName": {
+ "type": "string",
+ "description": "This is the name of the user in the transcript. Defaults to 'User'.\n\nUsage:\n- If you want to change the name of the user in the transcript, set this. Example, here is what the transcript would look like with `userName` set to 'Seller':\n```\nSeller: Hello, how are you?\nAI: I'm fine.\nSeller: Do you want to buy a car?\nAI: No.\n```\n\n@default 'User'"
+ }
+ }
+ },
+ "ComplianceOverride": {
+ "type": "object",
+ "properties": {
+ "forceStoreOnHipaaEnabled": {
+ "type": "boolean",
+ "description": "Force storage for this output under HIPAA. Only enable if output contains no sensitive data.",
+ "example": false
+ }
+ }
+ },
+ "CreateStructuredOutputDTO": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "This is the type of structured output.\n\n- 'ai': Uses an LLM to extract structured data from the conversation (default).\n- 'regex': Uses a regex pattern to extract data from the transcript without an LLM.\n\nDefaults to 'ai' if not specified.",
"enum": [
- "11labs"
- ]
+ "ai",
+ "regex"
+ ],
+ "default": "ai"
},
- "voiceId": {
- "description": "This is the provider-specific ID that will be used. Ensure the Voice is present in your 11Labs Voice Library.",
+ "regex": {
+ "type": "string",
+ "description": "This is the regex pattern to match against the transcript.\n\nOnly used when type is 'regex'. Supports both raw patterns (e.g. '\\d+') and\nregex literal format (e.g. '/\\d+/gi'). Uses RE2 syntax for safety.\n\nThe result depends on the schema type:\n- boolean: true if the pattern matches, false otherwise\n- string: the first match or first capture group\n- number/integer: the first match parsed as a number\n- array: all matches",
+ "minLength": 1,
+ "maxLength": 1000
+ },
+ "model": {
+ "description": "This is the model that will be used to extract the structured output.\n\nTo provide your own custom system and user prompts for structured output extraction, populate the messages array with your system and user messages. You can specify liquid templating in your system and user messages.\nBetween the system or user messages, you must reference either 'transcript' or 'messages' with the `{{}}` syntax to access the conversation history.\nBetween the system or user messages, you must reference a variation of the structured output with the `{{}}` syntax to access the structured output definition.\ni.e.:\n`{{structuredOutput}}`\n`{{structuredOutput.name}}`\n`{{structuredOutput.description}}`\n`{{structuredOutput.schema}}`\n\nIf model is not specified, GPT-4.1 will be used by default for extraction, utilizing default system and user prompts.\nIf messages or required fields are not specified, the default system and user prompts will be used.",
"oneOf": [
{
- "type": "string",
- "enum": [
- "burt",
- "marissa",
- "andrea",
- "sarah",
- "phillip",
- "steve",
- "joseph",
- "myra",
- "paula",
- "ryan",
- "drew",
- "paul",
- "mrb",
- "matilda",
- "mark"
- ],
- "title": "Preset Voice Options"
+ "$ref": "#/components/schemas/WorkflowOpenAIModel",
+ "title": "WorkflowOpenAIModel"
},
{
- "type": "string",
- "title": "11Labs Voice ID"
+ "$ref": "#/components/schemas/WorkflowAnthropicModel",
+ "title": "WorkflowAnthropicModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowAnthropicBedrockModel",
+ "title": "WorkflowAnthropicBedrockModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowGoogleModel",
+ "title": "WorkflowGoogleModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowCustomModel",
+ "title": "WorkflowCustomModel"
}
]
},
- "stability": {
- "type": "number",
- "description": "Defines the stability for voice settings.",
- "minimum": 0,
- "maximum": 1,
- "example": 0.5
- },
- "similarityBoost": {
- "type": "number",
- "description": "Defines the similarity boost for voice settings.",
- "minimum": 0,
- "maximum": 1,
- "example": 0.75
- },
- "style": {
- "type": "number",
- "description": "Defines the style for voice settings.",
- "minimum": 0,
- "maximum": 1,
- "example": 0
+ "compliancePlan": {
+ "description": "Compliance configuration for this output. Only enable overrides if no sensitive data will be stored.",
+ "example": {
+ "forceStoreOnHipaaEnabled": false
+ },
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ComplianceOverride"
+ }
+ ]
},
- "useSpeakerBoost": {
- "type": "boolean",
- "description": "Defines the use speaker boost for voice settings.",
- "example": false
+ "name": {
+ "type": "string",
+ "description": "This is the name of the structured output.",
+ "minLength": 1,
+ "maxLength": 40
},
- "speed": {
- "type": "number",
- "description": "Defines the speed for voice settings.",
- "minimum": 0.7,
- "maximum": 1.2,
- "example": 0.9
+ "schema": {
+ "description": "This is the JSON Schema definition for the structured output.\n\nThis is required when creating a structured output. Defines the structure and validation rules for the data that will be extracted. Supports all JSON Schema features including:\n- Objects and nested properties\n- Arrays and array validation\n- String, number, boolean, and null types\n- Enums and const values\n- Validation constraints (min/max, patterns, etc.)\n- Composition with allOf, anyOf, oneOf",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/JsonSchema"
+ }
+ ]
},
- "optimizeStreamingLatency": {
- "type": "number",
- "description": "Defines the optimize streaming latency for voice settings. Defaults to 3.",
- "minimum": 0,
- "maximum": 4,
- "example": 3
+ "description": {
+ "type": "string",
+ "description": "This is the description of what the structured output extracts.\n\nUse this to provide context about what data will be extracted and how it will be used."
},
- "enableSsmlParsing": {
- "type": "boolean",
- "description": "This enables the use of https://elevenlabs.io/docs/speech-synthesis/prompting#pronunciation. Defaults to false to save latency.\n\n@default false",
- "example": false
+ "assistantIds": {
+ "description": "These are the assistant IDs that this structured output is linked to.\n\nWhen linked to assistants, this structured output will be available for extraction during those assistant's calls.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
},
- "autoMode": {
- "type": "boolean",
- "description": "Defines the auto mode for voice settings. Defaults to false.",
- "example": false
+ "workflowIds": {
+ "description": "These are the workflow IDs that this structured output is linked to.\n\nWhen linked to workflows, this structured output will be available for extraction during those workflow's execution.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ },
+ "required": [
+ "name",
+ "schema"
+ ]
+ },
+ "ScorecardMetric": {
+ "type": "object",
+ "properties": {
+ "structuredOutputId": {
+ "type": "string",
+ "description": "This is the unique identifier for the structured output that will be used to evaluate the scorecard.\nThe structured output must be of type number or boolean only for now."
},
- "model": {
+ "conditions": {
+ "description": "These are the conditions that will be used to evaluate the scorecard.\nEach condition will have a comparator, value, and points that will be used to calculate the final score.\nThe points will be added to the overall score if the condition is met.\nThe overall score will be normalized to a 100 point scale to ensure uniformity across different scorecards.",
+ "type": "array",
+ "items": {
+ "type": "object"
+ }
+ }
+ },
+ "required": [
+ "structuredOutputId",
+ "conditions"
+ ]
+ },
+ "CreateScorecardDTO": {
+ "type": "object",
+ "properties": {
+ "name": {
"type": "string",
- "description": "This is the model that will be used. Defaults to 'eleven_turbo_v2' if not specified.",
- "enum": [
- "eleven_multilingual_v2",
- "eleven_turbo_v2",
- "eleven_turbo_v2_5",
- "eleven_flash_v2",
- "eleven_flash_v2_5",
- "eleven_monolingual_v1"
- ],
- "example": "eleven_turbo_v2_5"
+ "description": "This is the name of the scorecard. It is only for user reference and will not be used for any evaluation.",
+ "maxLength": 80
},
- "language": {
+ "description": {
"type": "string",
- "description": "This is the language (ISO 639-1) that is enforced for the model. Currently only Turbo v2.5 supports language enforcement. For other models, an error will be returned if language code is provided."
+ "description": "This is the description of the scorecard. It is only for user reference and will not be used for any evaluation.",
+ "maxLength": 500
},
- "pronunciationDictionaryLocators": {
- "description": "This is the pronunciation dictionary locators to use.",
+ "metrics": {
+ "description": "These are the metrics that will be used to evaluate the scorecard.\nEach metric will have a set of conditions and points that will be used to generate the score.",
"type": "array",
"items": {
- "$ref": "#/components/schemas/ElevenLabsPronunciationDictionaryLocator"
+ "$ref": "#/components/schemas/ScorecardMetric"
}
},
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
- "allOf": [
- {
- "$ref": "#/components/schemas/ChunkPlan"
- }
- ]
+ "assistantIds": {
+ "description": "These are the assistant IDs that this scorecard is linked to.\nWhen linked to assistants, this scorecard will be available for evaluation during those assistants' calls.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
}
},
"required": [
- "provider",
- "voiceId"
+ "metrics"
]
},
- "FallbackHumeVoice": {
+ "ArtifactPlan": {
"type": "object",
"properties": {
- "cachingEnabled": {
+ "recordingEnabled": {
"type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
+ "description": "This determines whether assistant's calls are recorded. Defaults to true.\n\nUsage:\n- If you don't want to record the calls, set this to false.\n- If you want to record the calls when `assistant.hipaaEnabled` (deprecated) or `assistant.compliancePlan.hipaaEnabled` explicity set this to true and make sure to provide S3 or GCP credentials on the Provider Credentials page in the Dashboard.\n\nYou can find the recording at `call.artifact.recordingUrl` and `call.artifact.stereoRecordingUrl` after the call is ended.\n\n@default true",
+ "example": true
},
- "provider": {
+ "recordingFormat": {
"type": "string",
- "description": "This is the voice provider that will be used.",
+ "description": "This determines the format of the recording. Defaults to `wav;l16`.\n\n@default 'wav;l16'",
"enum": [
- "hume"
+ "wav;l16",
+ "mp3"
]
},
- "model": {
- "type": "string",
- "description": "This is the model that will be used.",
- "enum": [
- "octave"
- ],
- "example": "octave"
+ "recordingUseCustomStorageEnabled": {
+ "type": "boolean",
+ "description": "This determines whether to use custom storage (S3 or GCP) for call recordings when storage credentials are configured.\n\nWhen set to false, recordings will be stored on Vapi's storage instead of your custom storage, even if you have custom storage credentials configured.\n\nUsage:\n- Set to false if you have custom storage configured but want to store recordings on Vapi's storage for this assistant.\n- Set to true (or leave unset) to use your custom storage for recordings when available.\n\n@default true",
+ "example": true
},
- "voiceId": {
- "type": "string",
- "description": "The ID of the particular voice you want to use."
+ "videoRecordingEnabled": {
+ "type": "boolean",
+ "description": "This determines whether the video is recorded during the call. Defaults to false. Only relevant for `webCall` type.\n\nYou can find the video recording at `call.artifact.videoRecordingUrl` after the call is ended.\n\n@default false",
+ "example": false
},
- "isCustomHumeVoice": {
+ "fullMessageHistoryEnabled": {
"type": "boolean",
- "description": "Indicates whether the chosen voice is a preset Hume AI voice or a custom voice.",
+ "description": "This determines whether the artifact contains the full message history, even after handoff context engineering. Defaults to false.",
"example": false
},
- "description": {
+ "pcapEnabled": {
+ "type": "boolean",
+ "description": "This determines whether the SIP packet capture is enabled. Defaults to true. Only relevant for `phone` type calls where phone number's provider is `vapi` or `byo-phone-number`.\n\nYou can find the packet capture at `call.artifact.pcapUrl` after the call is ended.\n\n@default true",
+ "example": true
+ },
+ "pcapS3PathPrefix": {
"type": "string",
- "description": "Natural language instructions describing how the synthesized speech should sound, including but not limited to tone, intonation, pacing, and accent (e.g., 'a soft, gentle voice with a strong British accent').\n\nIf a Voice is specified in the request, this description serves as acting instructions.\nIf no Voice is specified, a new voice is generated based on this description."
+ "description": "This is the path where the SIP packet capture will be uploaded. This is only used if you have provided S3 or GCP credentials on the Provider Credentials page in the Dashboard.\n\nIf credential.s3PathPrefix or credential.bucketPlan.path is set, this will append to it.\n\nUsage:\n- If you want to upload the packet capture to a specific path, set this to the path. Example: `/my-assistant-captures`.\n- If you want to upload the packet capture to the root of the bucket, set this to `/`.\n\n@default '/'",
+ "example": "/pcaps"
},
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "pcapUseCustomStorageEnabled": {
+ "type": "boolean",
+ "description": "This determines whether to use custom storage (S3 or GCP) for SIP packet captures when storage credentials are configured.\n\nWhen set to false, packet captures will be stored on Vapi's storage instead of your custom storage, even if you have custom storage credentials configured.\n\nUsage:\n- Set to false if you have custom storage configured but want to store packet captures on Vapi's storage for this assistant.\n- Set to true (or leave unset) to use your custom storage for packet captures when available.\n\n@default true",
+ "example": true
+ },
+ "loggingEnabled": {
+ "type": "boolean",
+ "description": "This determines whether the call logs are enabled. Defaults to true.\n\n@default true",
+ "example": true
+ },
+ "loggingUseCustomStorageEnabled": {
+ "type": "boolean",
+ "description": "This determines whether to use custom storage (S3 or GCP) for call logs when storage credentials are configured.\n\nWhen set to false, logs will be stored on Vapi's storage instead of your custom storage, even if you have custom storage credentials configured.\n\nUsage:\n- Set to false if you have custom storage configured but want to store logs on Vapi's storage for this assistant.\n- Set to true (or leave unset) to use your custom storage for logs when available.\n\n@default true",
+ "example": true
+ },
+ "transcriptPlan": {
+ "description": "This is the plan for `call.artifact.transcript`. To disable, set `transcriptPlan.enabled` to false.",
"allOf": [
{
- "$ref": "#/components/schemas/ChunkPlan"
+ "$ref": "#/components/schemas/TranscriptPlan"
}
]
- }
- },
- "required": [
- "provider",
- "voiceId"
- ]
- },
- "FallbackLMNTVoice": {
- "type": "object",
- "properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
},
- "provider": {
+ "recordingPath": {
"type": "string",
- "description": "This is the voice provider that will be used.",
- "enum": [
- "lmnt"
- ]
+ "description": "This is the path where the recording will be uploaded. This is only used if you have provided S3 or GCP credentials on the Provider Credentials page in the Dashboard.\n\nIf credential.s3PathPrefix or credential.bucketPlan.path is set, this will append to it.\n\nUsage:\n- If you want to upload the recording to a specific path, set this to the path. Example: `/my-assistant-recordings`.\n- If you want to upload the recording to the root of the bucket, set this to `/`.\n\n@default '/'"
},
- "voiceId": {
- "description": "This is the provider-specific ID that will be used.",
- "oneOf": [
- {
- "type": "string",
- "enum": [
- "amy",
- "ansel",
- "autumn",
- "ava",
- "brandon",
- "caleb",
- "cassian",
- "chloe",
- "dalton",
- "daniel",
- "dustin",
- "elowen",
- "evander",
- "huxley",
- "james",
- "juniper",
- "kennedy",
- "lauren",
- "leah",
- "lily",
- "lucas",
- "magnus",
- "miles",
- "morgan",
- "natalie",
- "nathan",
- "noah",
- "nyssa",
- "oliver",
- "paige",
- "ryan",
- "sadie",
- "sophie",
- "stella",
- "terrence",
- "tyler",
- "vesper",
- "violet",
- "warrick",
- "zain",
- "zeke",
- "zoe"
- ],
- "title": "Preset Voice Options"
- },
- {
- "type": "string",
- "title": "LMNT Voice ID"
- }
- ]
+ "structuredOutputIds": {
+ "description": "This is an array of structured output IDs to be calculated during the call.\nThe outputs will be extracted and stored in `call.artifact.structuredOutputs` after the call is ended.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
},
- "speed": {
+ "structuredOutputs": {
+ "description": "This is an array of transient structured outputs to be calculated during the call.\nThe outputs will be extracted and stored in `call.artifact.structuredOutputs` after the call is ended.\nUse this to provide inline structured output configurations instead of referencing existing ones via structuredOutputIds.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/CreateStructuredOutputDTO"
+ }
+ },
+ "scorecardIds": {
+ "description": "This is an array of scorecard IDs that will be evaluated based on the structured outputs extracted during the call.\nThe scorecards will be evaluated and the results will be stored in `call.artifact.scorecards` after the call has ended.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "scorecards": {
+ "description": "This is the array of scorecards that will be evaluated based on the structured outputs extracted during the call.\nThe scorecards will be evaluated and the results will be stored in `call.artifact.scorecards` after the call has ended.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/CreateScorecardDTO"
+ }
+ },
+ "loggingPath": {
+ "type": "string",
+ "description": "This is the path where the call logs will be uploaded. This is only used if you have provided S3 or GCP credentials on the Provider Credentials page in the Dashboard.\n\nIf credential.s3PathPrefix or credential.bucketPlan.path is set, this will append to it.\n\nUsage:\n- If you want to upload the call logs to a specific path, set this to the path. Example: `/my-assistant-logs`.\n- If you want to upload the call logs to the root of the bucket, set this to `/`.\n\n@default '/'"
+ }
+ }
+ },
+ "StopSpeakingPlan": {
+ "type": "object",
+ "properties": {
+ "numWords": {
"type": "number",
- "description": "This is the speed multiplier that will be used.",
- "minimum": 0.25,
- "maximum": 2,
- "example": null
+ "description": "This is the number of words that the customer has to say before the assistant will stop talking.\n\nWords like \"stop\", \"actually\", \"no\", etc. will always interrupt immediately regardless of this value.\n\nWords like \"okay\", \"yeah\", \"right\" will never interrupt.\n\nWhen set to 0, `voiceSeconds` is used in addition to the transcriptions to determine the customer has started speaking.\n\nDefaults to 0.\n\n@default 0",
+ "minimum": 0,
+ "maximum": 10,
+ "example": 0
},
- "language": {
- "description": "Two letter ISO 639-1 language code. Use \"auto\" for auto-detection.",
- "enum": [
- "aa",
- "ab",
- "ae",
- "af",
- "ak",
- "am",
- "an",
- "ar",
- "as",
- "av",
- "ay",
- "az",
- "ba",
- "be",
- "bg",
- "bh",
- "bi",
- "bm",
- "bn",
- "bo",
- "br",
- "bs",
- "ca",
- "ce",
- "ch",
- "co",
- "cr",
- "cs",
- "cu",
- "cv",
- "cy",
- "da",
- "de",
- "dv",
- "dz",
- "ee",
- "el",
- "en",
- "eo",
- "es",
- "et",
- "eu",
- "fa",
- "ff",
- "fi",
- "fj",
- "fo",
- "fr",
- "fy",
- "ga",
- "gd",
- "gl",
- "gn",
- "gu",
- "gv",
- "ha",
- "he",
- "hi",
- "ho",
- "hr",
- "ht",
- "hu",
- "hy",
- "hz",
- "ia",
- "id",
- "ie",
- "ig",
- "ii",
- "ik",
- "io",
- "is",
- "it",
- "iu",
- "ja",
- "jv",
- "ka",
- "kg",
- "ki",
- "kj",
- "kk",
- "kl",
- "km",
- "kn",
- "ko",
- "kr",
- "ks",
- "ku",
- "kv",
- "kw",
- "ky",
- "la",
- "lb",
- "lg",
- "li",
- "ln",
- "lo",
- "lt",
- "lu",
- "lv",
- "mg",
- "mh",
- "mi",
- "mk",
- "ml",
- "mn",
- "mr",
- "ms",
- "mt",
- "my",
- "na",
- "nb",
- "nd",
- "ne",
- "ng",
- "nl",
- "nn",
- "no",
- "nr",
- "nv",
- "ny",
- "oc",
- "oj",
- "om",
- "or",
- "os",
- "pa",
- "pi",
- "pl",
- "ps",
- "pt",
- "qu",
- "rm",
- "rn",
- "ro",
- "ru",
- "rw",
- "sa",
- "sc",
- "sd",
- "se",
- "sg",
- "si",
- "sk",
- "sl",
- "sm",
- "sn",
- "so",
- "sq",
- "sr",
- "ss",
- "st",
- "su",
- "sv",
- "sw",
- "ta",
- "te",
- "tg",
- "th",
- "ti",
- "tk",
- "tl",
- "tn",
- "to",
- "tr",
- "ts",
- "tt",
- "tw",
- "ty",
- "ug",
- "uk",
- "ur",
- "uz",
- "ve",
- "vi",
- "vo",
- "wa",
- "wo",
- "xh",
- "yi",
- "yue",
- "yo",
- "za",
- "zh",
- "zu",
- "auto"
- ],
- "example": "en",
- "oneOf": [
- {
- "type": "string",
- "enum": [
- "aa",
- "ab",
- "ae",
- "af",
- "ak",
- "am",
- "an",
- "ar",
- "as",
- "av",
- "ay",
- "az",
- "ba",
- "be",
- "bg",
- "bh",
- "bi",
- "bm",
- "bn",
- "bo",
- "br",
- "bs",
- "ca",
- "ce",
- "ch",
- "co",
- "cr",
- "cs",
- "cu",
- "cv",
- "cy",
- "da",
- "de",
- "dv",
- "dz",
- "ee",
- "el",
- "en",
- "eo",
- "es",
- "et",
- "eu",
- "fa",
- "ff",
- "fi",
- "fj",
- "fo",
- "fr",
- "fy",
- "ga",
- "gd",
- "gl",
- "gn",
- "gu",
- "gv",
- "ha",
- "he",
- "hi",
- "ho",
- "hr",
- "ht",
- "hu",
- "hy",
- "hz",
- "ia",
- "id",
- "ie",
- "ig",
- "ii",
- "ik",
- "io",
- "is",
- "it",
- "iu",
- "ja",
- "jv",
- "ka",
- "kg",
- "ki",
- "kj",
- "kk",
- "kl",
- "km",
- "kn",
- "ko",
- "kr",
- "ks",
- "ku",
- "kv",
- "kw",
- "ky",
- "la",
- "lb",
- "lg",
- "li",
- "ln",
- "lo",
- "lt",
- "lu",
- "lv",
- "mg",
- "mh",
- "mi",
- "mk",
- "ml",
- "mn",
- "mr",
- "ms",
- "mt",
- "my",
- "na",
- "nb",
- "nd",
- "ne",
- "ng",
- "nl",
- "nn",
- "no",
- "nr",
- "nv",
- "ny",
- "oc",
- "oj",
- "om",
- "or",
- "os",
- "pa",
- "pi",
- "pl",
- "ps",
- "pt",
- "qu",
- "rm",
- "rn",
- "ro",
- "ru",
- "rw",
- "sa",
- "sc",
- "sd",
- "se",
- "sg",
- "si",
- "sk",
- "sl",
- "sm",
- "sn",
- "so",
- "sq",
- "sr",
- "ss",
- "st",
- "su",
- "sv",
- "sw",
- "ta",
- "te",
- "tg",
- "th",
- "ti",
- "tk",
- "tl",
- "tn",
- "to",
- "tr",
- "ts",
- "tt",
- "tw",
- "ty",
- "ug",
- "uk",
- "ur",
- "uz",
- "ve",
- "vi",
- "vo",
- "wa",
- "wo",
- "xh",
- "yi",
- "yue",
- "yo",
- "za",
- "zh",
- "zu"
- ],
- "title": "ISO 639-1 Language Code"
- },
- {
- "type": "string",
- "enum": [
- "auto"
- ],
- "title": "Auto-detect"
- }
- ]
- },
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
- "allOf": [
- {
- "$ref": "#/components/schemas/ChunkPlan"
- }
- ]
- }
- },
- "required": [
- "provider",
- "voiceId"
- ]
- },
- "FallbackNeuphonicVoice": {
- "type": "object",
- "properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
- },
- "provider": {
- "type": "string",
- "description": "This is the voice provider that will be used.",
- "enum": [
- "neuphonic"
- ]
+ "voiceSeconds": {
+ "type": "number",
+ "description": "This is the seconds customer has to speak before the assistant stops talking. This uses the VAD (Voice Activity Detection) spike to determine if the customer has started speaking.\n\nConsiderations:\n- A lower value might be more responsive but could potentially pick up non-speech sounds.\n- A higher value reduces false positives but might slightly delay the detection of speech onset.\n\nThis is only used if `numWords` is set to 0.\n\nDefaults to 0.2\n\n@default 0.2",
+ "minimum": 0,
+ "maximum": 0.5,
+ "example": 0.2
},
- "voiceId": {
- "description": "This is the provider-specific ID that will be used.",
- "oneOf": [
- {
- "type": "string",
- "enum": [],
- "title": "Preset Voice Options"
- },
- {
- "type": "string",
- "title": "Neuphonic Voice ID"
- }
- ]
+ "backoffSeconds": {
+ "type": "number",
+ "description": "This is the seconds to wait before the assistant will start talking again after being interrupted.\n\nDefaults to 1.\n\n@default 1",
+ "minimum": 0,
+ "maximum": 10,
+ "example": 1
},
- "model": {
- "type": "string",
- "description": "This is the model that will be used. Defaults to 'neu_fast' if not specified.",
- "enum": [
- "neu_hq",
- "neu_fast"
+ "acknowledgementPhrases": {
+ "description": "These are the phrases that will never interrupt the assistant, even if numWords threshold is met.\nThese are typically acknowledgement or backchanneling phrases.",
+ "example": [
+ "i understand",
+ "i see",
+ "i got it",
+ "i hear you",
+ "im listening",
+ "im with you",
+ "right",
+ "okay",
+ "ok",
+ "sure",
+ "alright",
+ "got it",
+ "understood",
+ "yeah",
+ "yes",
+ "uh-huh",
+ "mm-hmm",
+ "gotcha",
+ "mhmm",
+ "ah",
+ "yeah okay",
+ "yeah sure"
],
- "example": "neu_fast"
- },
- "language": {
- "type": "object",
- "description": "This is the language (ISO 639-1) that is enforced for the model.",
- "example": "en"
+ "default": [
+ "i understand",
+ "i see",
+ "i got it",
+ "i hear you",
+ "im listening",
+ "im with you",
+ "right",
+ "okay",
+ "ok",
+ "sure",
+ "alright",
+ "got it",
+ "understood",
+ "yeah",
+ "yes",
+ "uh-huh",
+ "mm-hmm",
+ "gotcha",
+ "mhmm",
+ "ah",
+ "yeah okay",
+ "yeah sure"
+ ],
+ "type": "array",
+ "items": {
+ "type": "string",
+ "maxLength": 240
+ }
},
- "speed": {
- "type": "number",
- "description": "This is the speed multiplier that will be used.",
- "minimum": 0.25,
- "maximum": 2,
- "example": null
- },
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
- "allOf": [
- {
- "$ref": "#/components/schemas/ChunkPlan"
- }
- ]
+ "interruptionPhrases": {
+ "description": "These are the phrases that will always interrupt the assistant immediately, regardless of numWords.\nThese are typically phrases indicating disagreement or desire to stop.",
+ "example": [
+ "stop",
+ "shut",
+ "up",
+ "enough",
+ "quiet",
+ "silence",
+ "but",
+ "dont",
+ "not",
+ "no",
+ "hold",
+ "wait",
+ "cut",
+ "pause",
+ "nope",
+ "nah",
+ "nevermind",
+ "never",
+ "bad",
+ "actually"
+ ],
+ "default": [
+ "stop",
+ "shut",
+ "up",
+ "enough",
+ "quiet",
+ "silence",
+ "but",
+ "dont",
+ "not",
+ "no",
+ "hold",
+ "wait",
+ "cut",
+ "pause",
+ "nope",
+ "nah",
+ "nevermind",
+ "never",
+ "bad",
+ "actually"
+ ],
+ "type": "array",
+ "items": {
+ "type": "string",
+ "maxLength": 240
+ }
}
- },
- "required": [
- "provider",
- "voiceId",
- "language"
- ]
+ }
},
- "FallbackOpenAIVoice": {
+ "MonitorPlan": {
"type": "object",
"properties": {
- "cachingEnabled": {
+ "listenEnabled": {
"type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
- },
- "provider": {
- "type": "string",
- "description": "This is the voice provider that will be used.",
- "enum": [
- "openai"
- ]
- },
- "voiceId": {
- "description": "This is the provider-specific ID that will be used.\nPlease note that ash, ballad, coral, sage, and verse may only be used with realtime models.",
- "oneOf": [
- {
- "type": "string",
- "enum": [
- "alloy",
- "echo",
- "fable",
- "onyx",
- "nova",
- "shimmer"
- ],
- "title": "Preset Voice Options"
- },
- {
- "type": "string",
- "title": "OpenAI Voice ID"
- }
- ]
+ "description": "This determines whether the assistant's calls allow live listening. Defaults to true.\n\nFetch `call.monitor.listenUrl` to get the live listening URL.\n\n@default true",
+ "example": false
},
- "model": {
- "type": "string",
- "description": "This is the model that will be used for text-to-speech.",
- "enum": [
- "tts-1",
- "tts-1-hd",
- "gpt-4o-mini-tts"
- ]
+ "listenAuthenticationEnabled": {
+ "type": "boolean",
+ "description": "This enables authentication on the `call.monitor.listenUrl`.\n\nIf `listenAuthenticationEnabled` is `true`, the `call.monitor.listenUrl` will require an `Authorization: Bearer ` header.\n\n@default false",
+ "example": false
},
- "instructions": {
- "type": "string",
- "description": "This is a prompt that allows you to control the voice of your generated audio.\nDoes not work with 'tts-1' or 'tts-1-hd' models.",
- "maxLength": 10000
+ "controlEnabled": {
+ "type": "boolean",
+ "description": "This determines whether the assistant's calls allow live control. Defaults to true.\n\nFetch `call.monitor.controlUrl` to get the live control URL.\n\nTo use, send any control message via a POST request to `call.monitor.controlUrl`. Here are the types of controls supported: https://docs.vapi.ai/api-reference/messages/client-inbound-message\n\n@default true",
+ "example": false
},
- "speed": {
- "type": "number",
- "description": "This is the speed multiplier that will be used.",
- "minimum": 0.25,
- "maximum": 4,
- "example": null
+ "controlAuthenticationEnabled": {
+ "type": "boolean",
+ "description": "This enables authentication on the `call.monitor.controlUrl`.\n\nIf `controlAuthenticationEnabled` is `true`, the `call.monitor.controlUrl` will require an `Authorization: Bearer ` header.\n\n@default false",
+ "example": false
},
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
- "allOf": [
- {
- "$ref": "#/components/schemas/ChunkPlan"
- }
- ]
+ "monitorIds": {
+ "description": "This the set of monitor ids that are attached to the assistant.\nThe source of truth for the monitor ids is the assistant_monitor join table.\nThis field can be used for transient assistants and to update assistants with new monitor ids.\n\n@default []",
+ "example": [
+ "123e4567-e89b-12d3-a456-426614174000"
+ ],
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
}
- },
- "required": [
- "provider",
- "voiceId"
- ]
+ }
},
- "FallbackPlayHTVoice": {
+ "KeypadInputPlan": {
"type": "object",
"properties": {
- "cachingEnabled": {
+ "enabled": {
"type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
+ "description": "This keeps track of whether the user has enabled keypad input.\nBy default, it is off.\n\n@default false"
},
- "provider": {
+ "timeoutSeconds": {
+ "type": "number",
+ "description": "This is the time in seconds to wait before processing the input.\nIf the input is not received within this time, the input will be ignored.\nIf set to \"off\", the input will be processed when the user enters a delimiter or immediately if no delimiter is used.\n\n@default 2",
+ "minimum": 0,
+ "maximum": 10
+ },
+ "delimiters": {
"type": "string",
- "description": "This is the voice provider that will be used.",
+ "description": "This is the delimiter(s) that will be used to process the input.\nCan be '#', '*', or an empty array.",
"enum": [
- "playht"
+ "#",
+ "*",
+ ""
]
+ }
+ }
+ },
+ "WorkflowUserEditable": {
+ "type": "object",
+ "properties": {
+ "nodes": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ConversationNode",
+ "title": "ConversationNode"
+ },
+ {
+ "$ref": "#/components/schemas/ToolNode",
+ "title": "ToolNode"
+ }
+ ]
+ }
},
- "voiceId": {
- "description": "This is the provider-specific ID that will be used.",
+ "model": {
+ "description": "This is the model for the workflow.\n\nThis can be overridden at node level using `nodes[n].model`.",
"oneOf": [
{
- "type": "string",
- "enum": [
- "jennifer",
- "melissa",
- "will",
- "chris",
- "matt",
- "jack",
- "ruby",
- "davis",
- "donna",
- "michael"
- ],
- "title": "Preset Voice Options"
+ "$ref": "#/components/schemas/WorkflowOpenAIModel",
+ "title": "WorkflowOpenAIModel"
},
{
- "type": "string",
- "title": "PlayHT Voice ID"
+ "$ref": "#/components/schemas/WorkflowAnthropicModel",
+ "title": "WorkflowAnthropicModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowAnthropicBedrockModel",
+ "title": "WorkflowAnthropicBedrockModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowGoogleModel",
+ "title": "WorkflowGoogleModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowCustomModel",
+ "title": "WorkflowCustomModel"
}
]
},
- "speed": {
- "type": "number",
- "description": "This is the speed multiplier that will be used.",
- "minimum": 0.1,
- "maximum": 5,
- "example": null
- },
- "temperature": {
- "type": "number",
- "description": "A floating point number between 0, exclusive, and 2, inclusive. If equal to null or not provided, the model's default temperature will be used. The temperature parameter controls variance. Lower temperatures result in more predictable results, higher temperatures allow each run to vary more, so the voice may sound less like the baseline voice.",
- "minimum": 0.1,
- "maximum": 2,
- "example": null
- },
- "emotion": {
- "type": "string",
- "description": "An emotion to be applied to the speech.",
- "enum": [
- "female_happy",
- "female_sad",
- "female_angry",
- "female_fearful",
- "female_disgust",
- "female_surprised",
- "male_happy",
- "male_sad",
- "male_angry",
- "male_fearful",
- "male_disgust",
- "male_surprised"
- ],
- "example": null
- },
- "voiceGuidance": {
- "type": "number",
- "description": "A number between 1 and 6. Use lower numbers to reduce how unique your chosen voice will be compared to other voices.",
- "minimum": 1,
- "maximum": 6,
- "example": null
- },
- "styleGuidance": {
- "type": "number",
- "description": "A number between 1 and 30. Use lower numbers to to reduce how strong your chosen emotion will be. Higher numbers will create a very emotional performance.",
- "minimum": 1,
- "maximum": 30,
- "example": null
- },
- "textGuidance": {
- "type": "number",
- "description": "A number between 1 and 2. This number influences how closely the generated speech adheres to the input text. Use lower values to create more fluid speech, but with a higher chance of deviating from the input text. Higher numbers will make the generated speech more accurate to the input text, ensuring that the words spoken align closely with the provided text.",
- "minimum": 1,
- "maximum": 2,
- "example": null
- },
- "model": {
- "type": "string",
- "description": "Playht voice model/engine to use.",
- "enum": [
- "PlayHT2.0",
- "PlayHT2.0-turbo",
- "Play3.0-mini",
- "PlayDialog"
+ "transcriber": {
+ "description": "This is the transcriber for the workflow.\n\nThis can be overridden at node level using `nodes[n].transcriber`.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AssemblyAITranscriber",
+ "title": "AssemblyAITranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/AzureSpeechTranscriber",
+ "title": "AzureSpeechTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/CustomTranscriber",
+ "title": "CustomTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/DeepgramTranscriber",
+ "title": "DeepgramTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/ElevenLabsTranscriber",
+ "title": "ElevenLabsTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/GladiaTranscriber",
+ "title": "GladiaTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/GoogleTranscriber",
+ "title": "GoogleTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/SpeechmaticsTranscriber",
+ "title": "SpeechmaticsTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/TalkscriberTranscriber",
+ "title": "TalkscriberTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAITranscriber",
+ "title": "OpenAITranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/CartesiaTranscriber",
+ "title": "CartesiaTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/SonioxTranscriber",
+ "title": "SonioxTranscriber"
+ }
]
},
- "language": {
- "type": "string",
- "description": "The language to use for the speech.",
- "enum": [
- "afrikaans",
- "albanian",
- "amharic",
- "arabic",
- "bengali",
- "bulgarian",
- "catalan",
- "croatian",
- "czech",
- "danish",
- "dutch",
- "english",
- "french",
- "galician",
- "german",
- "greek",
- "hebrew",
- "hindi",
- "hungarian",
- "indonesian",
- "italian",
- "japanese",
- "korean",
- "malay",
- "mandarin",
- "polish",
- "portuguese",
- "russian",
- "serbian",
- "spanish",
- "swedish",
- "tagalog",
- "thai",
- "turkish",
- "ukrainian",
- "urdu",
- "xhosa"
+ "voice": {
+ "description": "This is the voice for the workflow.\n\nThis can be overridden at node level using `nodes[n].voice`.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AzureVoice",
+ "title": "AzureVoice"
+ },
+ {
+ "$ref": "#/components/schemas/CartesiaVoice",
+ "title": "CartesiaVoice"
+ },
+ {
+ "$ref": "#/components/schemas/CustomVoice",
+ "title": "CustomVoice"
+ },
+ {
+ "$ref": "#/components/schemas/DeepgramVoice",
+ "title": "DeepgramVoice"
+ },
+ {
+ "$ref": "#/components/schemas/ElevenLabsVoice",
+ "title": "ElevenLabsVoice"
+ },
+ {
+ "$ref": "#/components/schemas/HumeVoice",
+ "title": "HumeVoice"
+ },
+ {
+ "$ref": "#/components/schemas/LMNTVoice",
+ "title": "LMNTVoice"
+ },
+ {
+ "$ref": "#/components/schemas/NeuphonicVoice",
+ "title": "NeuphonicVoice"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIVoice",
+ "title": "OpenAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/PlayHTVoice",
+ "title": "PlayHTVoice"
+ },
+ {
+ "$ref": "#/components/schemas/WellSaidVoice",
+ "title": "WellSaidVoice"
+ },
+ {
+ "$ref": "#/components/schemas/RimeAIVoice",
+ "title": "RimeAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/SmallestAIVoice",
+ "title": "SmallestAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/TavusVoice",
+ "title": "TavusVoice"
+ },
+ {
+ "$ref": "#/components/schemas/VapiVoice",
+ "title": "VapiVoice"
+ },
+ {
+ "$ref": "#/components/schemas/SesameVoice",
+ "title": "SesameVoice"
+ },
+ {
+ "$ref": "#/components/schemas/InworldVoice",
+ "title": "InworldVoice"
+ },
+ {
+ "$ref": "#/components/schemas/MinimaxVoice",
+ "title": "MinimaxVoice"
+ }
]
},
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "observabilityPlan": {
+ "description": "This is the plan for observability of workflow's calls.\n\nCurrently, only Langfuse is supported.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/LangfuseObservabilityPlan",
+ "title": "Langfuse"
+ }
+ ],
"allOf": [
{
- "$ref": "#/components/schemas/ChunkPlan"
+ "$ref": "#/components/schemas/LangfuseObservabilityPlan"
}
]
- }
- },
- "required": [
- "provider",
- "voiceId"
- ]
- },
- "FallbackRimeAIVoice": {
- "type": "object",
- "properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
- },
- "provider": {
- "type": "string",
- "description": "This is the voice provider that will be used.",
- "enum": [
- "rime-ai"
- ]
},
- "voiceId": {
- "description": "This is the provider-specific ID that will be used.",
+ "backgroundSound": {
+ "description": "This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'.\nYou can also provide a custom sound by providing a URL to an audio file.",
"oneOf": [
{
- "type": "string",
+ "type": "enum",
"enum": [
- "abbie",
- "allison",
- "ally",
- "alona",
- "amber",
- "ana",
- "antoine",
- "armon",
- "brenda",
- "brittany",
- "carol",
- "colin",
- "courtney",
- "elena",
- "elliot",
- "eva",
- "geoff",
- "gerald",
- "hank",
- "helen",
- "hera",
- "jen",
- "joe",
- "joy",
- "juan",
- "kendra",
- "kendrick",
- "kenneth",
- "kevin",
- "kris",
- "linda",
- "madison",
- "marge",
- "marina",
- "marissa",
- "marta",
- "maya",
- "nicholas",
- "nyles",
- "phil",
- "reba",
- "rex",
- "rick",
- "ritu",
- "rob",
- "rodney",
- "rohan",
- "rosco",
- "samantha",
- "sandy",
- "selena",
- "seth",
- "sharon",
- "stan",
- "tamra",
- "tanya",
- "tibur",
- "tj",
- "tyler",
- "viv",
- "yadira",
- "marsh",
- "bayou",
- "creek",
- "brook",
- "flower",
- "spore",
- "glacier",
- "gulch",
- "alpine",
- "cove",
- "lagoon",
- "tundra",
- "steppe",
- "mesa",
- "grove",
- "rainforest",
- "moraine",
- "wildflower",
- "peak",
- "boulder",
- "gypsum",
- "zest",
- "luna",
- "celeste",
- "orion",
- "ursa",
- "astra",
- "esther",
- "estelle",
- "andromeda"
+ "off",
+ "office"
],
- "title": "Preset Voice Options"
+ "example": "office"
},
{
"type": "string",
- "title": "RimeAI Voice ID"
+ "format": "uri",
+ "example": "https://www.soundjay.com/ambient/sounds/people-in-lounge-1.mp3"
}
]
},
- "model": {
- "type": "string",
- "description": "This is the model that will be used. Defaults to 'arcana' when not specified.",
- "enum": [
- "arcana",
- "mistv2",
- "mist"
- ],
- "example": "arcana"
+ "hooks": {
+ "type": "array",
+ "description": "This is a set of actions that will be performed on certain events.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CallHookCallEnding",
+ "title": "CallHookCallEnding"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookAssistantSpeechInterrupted",
+ "title": "CallHookAssistantSpeechInterrupted"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookCustomerSpeechInterrupted",
+ "title": "CallHookCustomerSpeechInterrupted"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookCustomerSpeechTimeout",
+ "title": "CallHookCustomerSpeechTimeout"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookModelResponseTimeout",
+ "title": "CallHookModelResponseTimeout"
+ }
+ ]
+ }
},
- "speed": {
- "type": "number",
- "description": "This is the speed multiplier that will be used.",
- "minimum": 0.1,
- "example": null
+ "credentials": {
+ "type": "array",
+ "description": "These are dynamic credentials that will be used for the workflow calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateAnthropicCredentialDTO",
+ "title": "AnthropicCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAnthropicBedrockCredentialDTO",
+ "title": "AnthropicBedrockCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAnyscaleCredentialDTO",
+ "title": "AnyscaleCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAssemblyAICredentialDTO",
+ "title": "AssemblyAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAzureCredentialDTO",
+ "title": "AzureCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAzureOpenAICredentialDTO",
+ "title": "AzureOpenAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
+ "title": "ByoSipTrunkCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCartesiaCredentialDTO",
+ "title": "CartesiaCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCerebrasCredentialDTO",
+ "title": "CerebrasCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCloudflareCredentialDTO",
+ "title": "CloudflareCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCustomLLMCredentialDTO",
+ "title": "CustomLLMCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDeepgramCredentialDTO",
+ "title": "DeepgramCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDeepInfraCredentialDTO",
+ "title": "DeepInfraCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDeepSeekCredentialDTO",
+ "title": "DeepSeekCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateElevenLabsCredentialDTO",
+ "title": "ElevenLabsCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGcpCredentialDTO",
+ "title": "GcpCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGladiaCredentialDTO",
+ "title": "GladiaCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCredentialDTO",
+ "title": "GhlCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCredentialDTO",
+ "title": "GoogleCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGroqCredentialDTO",
+ "title": "GroqCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateHumeCredentialDTO",
+ "title": "HumeCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateInflectionAICredentialDTO",
+ "title": "InflectionAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateLangfuseCredentialDTO",
+ "title": "LangfuseCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateLmntCredentialDTO",
+ "title": "LmntCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMakeCredentialDTO",
+ "title": "MakeCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMistralCredentialDTO",
+ "title": "MistralCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateNeuphonicCredentialDTO",
+ "title": "NeuphonicCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateOpenAICredentialDTO",
+ "title": "OpenAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateOpenRouterCredentialDTO",
+ "title": "OpenRouterCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreatePerplexityAICredentialDTO",
+ "title": "PerplexityAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreatePlayHTCredentialDTO",
+ "title": "PlayHTCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateRimeAICredentialDTO",
+ "title": "RimeAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateRunpodCredentialDTO",
+ "title": "RunpodCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateS3CredentialDTO",
+ "title": "S3Credential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSmallestAICredentialDTO",
+ "title": "SmallestAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
+ "title": "SpeechmaticsCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSonioxCredentialDTO",
+ "title": "SonioxCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSupabaseCredentialDTO",
+ "title": "SupabaseCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTavusCredentialDTO",
+ "title": "TavusCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTogetherAICredentialDTO",
+ "title": "TogetherAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTrieveCredentialDTO",
+ "title": "TrieveCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTwilioCredentialDTO",
+ "title": "TwilioCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVonageCredentialDTO",
+ "title": "VonageCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateWebhookCredentialDTO",
+ "title": "WebhookCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCustomCredentialDTO",
+ "title": "CustomCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateXAiCredentialDTO",
+ "title": "XAiCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
+ "title": "GoogleCalendarOAuth2ClientCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
+ "title": "GoogleCalendarOAuth2AuthorizationCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
+ "title": "GoogleSheetsOAuth2AuthorizationCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
+ "title": "SlackOAuth2AuthorizationCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
+ "title": "GoHighLevelMCPCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateInworldCredentialDTO",
+ "title": "InworldCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMinimaxCredentialDTO",
+ "title": "MinimaxCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateWellSaidCredentialDTO",
+ "title": "WellSaidCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateEmailCredentialDTO",
+ "title": "EmailCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackWebhookCredentialDTO",
+ "title": "SlackWebhookCredential"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "provider",
+ "mapping": {
+ "11labs": "#/components/schemas/CreateElevenLabsCredentialDTO",
+ "anthropic": "#/components/schemas/CreateAnthropicCredentialDTO",
+ "anthropic-bedrock": "#/components/schemas/CreateAnthropicBedrockCredentialDTO",
+ "anyscale": "#/components/schemas/CreateAnyscaleCredentialDTO",
+ "assembly-ai": "#/components/schemas/CreateAssemblyAICredentialDTO",
+ "azure-openai": "#/components/schemas/CreateAzureOpenAICredentialDTO",
+ "azure": "#/components/schemas/CreateAzureCredentialDTO",
+ "byo-sip-trunk": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
+ "cartesia": "#/components/schemas/CreateCartesiaCredentialDTO",
+ "cerebras": "#/components/schemas/CreateCerebrasCredentialDTO",
+ "cloudflare": "#/components/schemas/CreateCloudflareCredentialDTO",
+ "custom-llm": "#/components/schemas/CreateCustomLLMCredentialDTO",
+ "deepgram": "#/components/schemas/CreateDeepgramCredentialDTO",
+ "deepinfra": "#/components/schemas/CreateDeepInfraCredentialDTO",
+ "deep-seek": "#/components/schemas/CreateDeepSeekCredentialDTO",
+ "gcp": "#/components/schemas/CreateGcpCredentialDTO",
+ "gladia": "#/components/schemas/CreateGladiaCredentialDTO",
+ "gohighlevel": "#/components/schemas/CreateGoHighLevelCredentialDTO",
+ "google": "#/components/schemas/CreateGoogleCredentialDTO",
+ "groq": "#/components/schemas/CreateGroqCredentialDTO",
+ "inflection-ai": "#/components/schemas/CreateInflectionAICredentialDTO",
+ "langfuse": "#/components/schemas/CreateLangfuseCredentialDTO",
+ "lmnt": "#/components/schemas/CreateLmntCredentialDTO",
+ "make": "#/components/schemas/CreateMakeCredentialDTO",
+ "openai": "#/components/schemas/CreateOpenAICredentialDTO",
+ "openrouter": "#/components/schemas/CreateOpenRouterCredentialDTO",
+ "perplexity-ai": "#/components/schemas/CreatePerplexityAICredentialDTO",
+ "playht": "#/components/schemas/CreatePlayHTCredentialDTO",
+ "rime-ai": "#/components/schemas/CreateRimeAICredentialDTO",
+ "runpod": "#/components/schemas/CreateRunpodCredentialDTO",
+ "s3": "#/components/schemas/CreateS3CredentialDTO",
+ "supabase": "#/components/schemas/CreateSupabaseCredentialDTO",
+ "smallest-ai": "#/components/schemas/CreateSmallestAICredentialDTO",
+ "tavus": "#/components/schemas/CreateTavusCredentialDTO",
+ "together-ai": "#/components/schemas/CreateTogetherAICredentialDTO",
+ "twilio": "#/components/schemas/CreateTwilioCredentialDTO",
+ "vonage": "#/components/schemas/CreateVonageCredentialDTO",
+ "webhook": "#/components/schemas/CreateWebhookCredentialDTO",
+ "custom-credential": "#/components/schemas/CreateCustomCredentialDTO",
+ "xai": "#/components/schemas/CreateXAiCredentialDTO",
+ "neuphonic": "#/components/schemas/CreateNeuphonicCredentialDTO",
+ "hume": "#/components/schemas/CreateHumeCredentialDTO",
+ "mistral": "#/components/schemas/CreateMistralCredentialDTO",
+ "speechmatics": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
+ "soniox": "#/components/schemas/CreateSonioxCredentialDTO",
+ "trieve": "#/components/schemas/CreateTrieveCredentialDTO",
+ "google.calendar.oauth2-client": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
+ "google.calendar.oauth2-authorization": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
+ "google.sheets.oauth2-authorization": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
+ "slack.oauth2-authorization": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
+ "ghl.oauth2-authorization": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
+ "inworld": "#/components/schemas/CreateInworldCredentialDTO",
+ "minimax": "#/components/schemas/CreateMinimaxCredentialDTO",
+ "wellsaid": "#/components/schemas/CreateWellSaidCredentialDTO",
+ "email": "#/components/schemas/CreateEmailCredentialDTO",
+ "slack-webhook": "#/components/schemas/CreateSlackWebhookCredentialDTO"
+ }
+ }
+ }
},
- "pauseBetweenBrackets": {
- "type": "boolean",
- "description": "This is a flag that controls whether to add slight pauses using angle brackets. Example: \"Hi. <200> I'd love to have a conversation with you.\" adds a 200ms pause between the first and second sentences.",
- "example": false
+ "voicemailDetection": {
+ "description": "This is the voicemail detection plan for the workflow.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "off"
+ ]
+ },
+ {
+ "$ref": "#/components/schemas/GoogleVoicemailDetectionPlan",
+ "title": "Google"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIVoicemailDetectionPlan",
+ "title": "OpenAI"
+ },
+ {
+ "$ref": "#/components/schemas/TwilioVoicemailDetectionPlan",
+ "title": "Twilio"
+ },
+ {
+ "$ref": "#/components/schemas/VapiVoicemailDetectionPlan",
+ "title": "Vapi"
+ }
+ ]
},
- "phonemizeBetweenBrackets": {
- "type": "boolean",
- "description": "This is a flag that controls whether text inside brackets should be phonemized (converted to phonetic pronunciation) - Example: \"{h'El.o} World\" will pronounce \"Hello\" as expected.",
- "example": false
+ "maxDurationSeconds": {
+ "type": "number",
+ "description": "This is the maximum duration of the call in seconds.\n\nAfter this duration, the call will automatically end.\n\nDefault is 1800 (30 minutes), max is 43200 (12 hours), and min is 10 seconds.",
+ "minimum": 10,
+ "maximum": 43200,
+ "example": 600
},
- "reduceLatency": {
- "type": "boolean",
- "description": "This is a flag that controls whether to optimize for reduced latency in streaming. https://docs.rime.ai/api-reference/endpoint/websockets#param-reduce-latency",
- "example": false
+ "name": {
+ "type": "string",
+ "maxLength": 80
},
- "inlineSpeedAlpha": {
+ "edges": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/Edge"
+ }
+ },
+ "globalPrompt": {
"type": "string",
- "description": "This is a string that allows inline speed control using alpha notation. https://docs.rime.ai/api-reference/endpoint/websockets#param-inline-speed-alpha",
- "example": null
+ "maxLength": 5000
},
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "server": {
+ "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. tool.server\n2. workflow.server / assistant.server\n3. phoneNumber.server\n4. org.server",
"allOf": [
{
- "$ref": "#/components/schemas/ChunkPlan"
+ "$ref": "#/components/schemas/Server"
}
]
- }
- },
- "required": [
- "provider",
- "voiceId"
- ]
- },
- "FallbackSesameVoice": {
- "type": "object",
- "properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
},
- "provider": {
- "type": "string",
- "description": "This is the voice provider that will be used.",
- "enum": [
- "sesame"
+ "compliancePlan": {
+ "description": "This is the compliance plan for the workflow. It allows you to configure HIPAA and other compliance settings.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CompliancePlan"
+ }
]
},
- "voiceId": {
- "type": "string",
- "description": "This is the provider-specific ID that will be used.",
- "title": "Sesame Voice ID. This should be either a name (a built-in voice) or a UUID (a custom voice)."
+ "analysisPlan": {
+ "description": "This is the plan for analysis of workflow's calls. Stored in `call.analysis`.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AnalysisPlan"
+ }
+ ]
},
- "model": {
- "type": "string",
- "description": "This is the model that will be used.",
- "enum": [
- "csm-1b"
+ "artifactPlan": {
+ "description": "This is the plan for artifacts generated during workflow's calls. Stored in `call.artifact`.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ArtifactPlan"
+ }
]
},
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "startSpeakingPlan": {
+ "description": "This is the plan for when the workflow nodes should start talking.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to start talking after the customer is done speaking.\n- The assistant is too fast to start talking after the customer is done speaking.\n- The assistant is so fast that it's actually interrupting the customer.",
"allOf": [
{
- "$ref": "#/components/schemas/ChunkPlan"
+ "$ref": "#/components/schemas/StartSpeakingPlan"
+ }
+ ]
+ },
+ "stopSpeakingPlan": {
+ "description": "This is the plan for when workflow nodes should stop talking on customer interruption.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to recognize customer's interruption.\n- The assistant is too fast to recognize customer's interruption.\n- The assistant is getting interrupted by phrases that are just acknowledgments.\n- The assistant is getting interrupted by background noises.\n- The assistant is not properly stopping -- it starts talking right after getting interrupted.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/StopSpeakingPlan"
+ }
+ ]
+ },
+ "monitorPlan": {
+ "description": "This is the plan for real-time monitoring of the workflow's calls.\n\nUsage:\n- To enable live listening of the workflow's calls, set `monitorPlan.listenEnabled` to `true`.\n- To enable live control of the workflow's calls, set `monitorPlan.controlEnabled` to `true`.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/MonitorPlan"
+ }
+ ]
+ },
+ "backgroundSpeechDenoisingPlan": {
+ "description": "This enables filtering of noise and background speech while the user is talking.\n\nFeatures:\n- Smart denoising using Krisp\n- Fourier denoising\n\nBoth can be used together. Order of precedence:\n- Smart denoising\n- Fourier denoising",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/BackgroundSpeechDenoisingPlan"
+ }
+ ]
+ },
+ "credentialIds": {
+ "description": "These are the credentials that will be used for the workflow calls. By default, all the credentials are available for use in the call but you can provide a subset using this.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "keypadInputPlan": {
+ "description": "This is the plan for keypad input handling during workflow calls.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/KeypadInputPlan"
}
]
+ },
+ "voicemailMessage": {
+ "type": "string",
+ "description": "This is the message that the assistant will say if the call is forwarded to voicemail.\n\nIf unspecified, it will hang up.",
+ "maxLength": 1000
}
},
"required": [
- "provider",
- "voiceId",
- "model"
+ "nodes",
+ "name",
+ "edges"
]
},
- "FallbackSmallestAIVoice": {
+ "VapiModel": {
"type": "object",
"properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
+ "messages": {
+ "description": "This is the starting state for the conversation.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/OpenAIMessage"
+ }
},
- "provider": {
- "type": "string",
- "description": "This is the voice provider that will be used.",
- "enum": [
- "smallest-ai"
- ]
+ "tools": {
+ "type": "array",
+ "description": "These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`.\n\nBoth `tools` and `toolIds` can be used together.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateApiRequestToolDTO",
+ "title": "ApiRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateBashToolDTO",
+ "title": "BashTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCodeToolDTO",
+ "title": "CodeTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateComputerToolDTO",
+ "title": "ComputerTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDtmfToolDTO",
+ "title": "DtmfTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateEndCallToolDTO",
+ "title": "EndCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateFunctionToolDTO",
+ "title": "FunctionTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
+ "title": "GoHighLevelCalendarAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
+ "title": "GoHighLevelCalendarEventCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
+ "title": "GoHighLevelContactCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
+ "title": "GoHighLevelContactGetTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
+ "title": "GoogleCalendarCheckAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
+ "title": "GoogleCalendarCreateEventTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
+ "title": "GoogleSheetsRowAppendTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateHandoffToolDTO",
+ "title": "HandoffTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMcpToolDTO",
+ "title": "McpTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateQueryToolDTO",
+ "title": "QueryTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO",
+ "title": "SlackSendMessageTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSmsToolDTO",
+ "title": "SmsTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTextEditorToolDTO",
+ "title": "TextEditorTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTransferCallToolDTO",
+ "title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSipRequestToolDTO",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVoicemailToolDTO",
+ "title": "VoicemailTool"
+ }
+ ]
+ }
},
- "voiceId": {
- "description": "This is the provider-specific ID that will be used.",
+ "toolIds": {
+ "description": "These are the tools that the assistant can use during the call. To use transient tools, use `tools`.\n\nBoth `tools` and `toolIds` can be used together.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "knowledgeBase": {
+ "description": "These are the options for the knowledge base.",
"oneOf": [
{
- "type": "string",
- "enum": [
- "emily",
- "jasmine",
- "arman",
- "james",
- "mithali",
- "aravind",
- "raj",
- "diya",
- "raman",
- "ananya",
- "isha",
- "william",
- "aarav",
- "monika",
- "niharika",
- "deepika",
- "raghav",
- "kajal",
- "radhika",
- "mansi",
- "nisha",
- "saurabh",
- "pooja",
- "saina",
- "sanya"
- ],
- "title": "Preset Voice Options"
- },
- {
- "type": "string",
- "title": "Smallest AI Voice ID"
+ "$ref": "#/components/schemas/CreateCustomKnowledgeBaseDTO",
+ "title": "Custom"
}
]
},
- "model": {
+ "provider": {
"type": "string",
- "description": "Smallest AI voice model to use. Defaults to 'lightning' when not specified.",
"enum": [
- "lightning"
+ "vapi"
]
},
- "speed": {
- "type": "number",
- "description": "This is the speed multiplier that will be used.",
- "example": null
+ "workflowId": {
+ "type": "string",
+ "description": "This is the workflow that will be used for the call. To use a transient workflow, use `workflow` instead."
},
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "workflow": {
+ "description": "This is the workflow that will be used for the call. To use an existing workflow, use `workflowId` instead.",
"allOf": [
{
- "$ref": "#/components/schemas/ChunkPlan"
+ "$ref": "#/components/schemas/WorkflowUserEditable"
}
]
+ },
+ "model": {
+ "type": "string",
+ "description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b"
+ },
+ "temperature": {
+ "type": "number",
+ "description": "This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.",
+ "minimum": 0,
+ "maximum": 2
+ },
+ "maxTokens": {
+ "type": "number",
+ "description": "This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250.",
+ "minimum": 50,
+ "maximum": 10000
+ },
+ "emotionRecognitionEnabled": {
+ "type": "boolean",
+ "description": "This determines whether we detect user's emotion while they speak and send it as an additional info to model.\n\nDefault `false` because the model is usually are good at understanding the user's emotion from text.\n\n@default false"
+ },
+ "numFastTurns": {
+ "type": "number",
+ "description": "This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai.\n\nDefault is 0.\n\n@default 0",
+ "minimum": 0
}
},
"required": [
"provider",
- "voiceId"
+ "model"
]
},
- "FallbackTavusVoice": {
+ "XaiModel": {
"type": "object",
"properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
+ "messages": {
+ "description": "This is the starting state for the conversation.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/OpenAIMessage"
+ }
},
- "provider": {
- "type": "string",
- "description": "This is the voice provider that will be used.",
- "enum": [
- "tavus"
- ]
+ "tools": {
+ "type": "array",
+ "description": "These are the tools that the assistant can use during the call. To use existing tools, use `toolIds`.\n\nBoth `tools` and `toolIds` can be used together.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateApiRequestToolDTO",
+ "title": "ApiRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateBashToolDTO",
+ "title": "BashTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCodeToolDTO",
+ "title": "CodeTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateComputerToolDTO",
+ "title": "ComputerTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDtmfToolDTO",
+ "title": "DtmfTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateEndCallToolDTO",
+ "title": "EndCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateFunctionToolDTO",
+ "title": "FunctionTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
+ "title": "GoHighLevelCalendarAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
+ "title": "GoHighLevelCalendarEventCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
+ "title": "GoHighLevelContactCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
+ "title": "GoHighLevelContactGetTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
+ "title": "GoogleCalendarCheckAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
+ "title": "GoogleCalendarCreateEventTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
+ "title": "GoogleSheetsRowAppendTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateHandoffToolDTO",
+ "title": "HandoffTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMcpToolDTO",
+ "title": "McpTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateQueryToolDTO",
+ "title": "QueryTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO",
+ "title": "SlackSendMessageTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSmsToolDTO",
+ "title": "SmsTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTextEditorToolDTO",
+ "title": "TextEditorTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTransferCallToolDTO",
+ "title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSipRequestToolDTO",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVoicemailToolDTO",
+ "title": "VoicemailTool"
+ }
+ ]
+ }
},
- "voiceId": {
- "description": "This is the provider-specific ID that will be used.",
+ "toolIds": {
+ "description": "These are the tools that the assistant can use during the call. To use transient tools, use `tools`.\n\nBoth `tools` and `toolIds` can be used together.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "knowledgeBase": {
+ "description": "These are the options for the knowledge base.",
"oneOf": [
{
- "type": "string",
- "enum": [
- "r52da2535a"
- ],
- "title": "Preset Voice Options"
- },
- {
- "type": "string",
- "title": "Tavus Voice ID"
+ "$ref": "#/components/schemas/CreateCustomKnowledgeBaseDTO",
+ "title": "Custom"
}
]
},
- "personaId": {
- "type": "string",
- "description": "This is the unique identifier for the persona that the replica will use in the conversation."
- },
- "callbackUrl": {
+ "model": {
"type": "string",
- "description": "This is the url that will receive webhooks with updates regarding the conversation state."
+ "description": "This is the name of the model. Ex. cognitivecomputations/dolphin-mixtral-8x7b",
+ "enum": [
+ "grok-beta",
+ "grok-2",
+ "grok-3",
+ "grok-4-fast-reasoning",
+ "grok-4-fast-non-reasoning"
+ ]
},
- "conversationName": {
+ "provider": {
"type": "string",
- "description": "This is the name for the conversation."
+ "enum": [
+ "xai"
+ ]
},
- "conversationalContext": {
- "type": "string",
- "description": "This is the context that will be appended to any context provided in the persona, if one is provided."
+ "temperature": {
+ "type": "number",
+ "description": "This is the temperature that will be used for calls. Default is 0 to leverage caching for lower latency.",
+ "minimum": 0,
+ "maximum": 2
},
- "customGreeting": {
- "type": "string",
- "description": "This is the custom greeting that the replica will give once a participant joines the conversation."
+ "maxTokens": {
+ "type": "number",
+ "description": "This is the max number of tokens that the assistant will be allowed to generate in each turn of the conversation. Default is 250.",
+ "minimum": 50,
+ "maximum": 10000
},
- "properties": {
- "description": "These are optional properties used to customize the conversation.",
- "allOf": [
- {
- "$ref": "#/components/schemas/TavusConversationProperties"
- }
- ]
+ "emotionRecognitionEnabled": {
+ "type": "boolean",
+ "description": "This determines whether we detect user's emotion while they speak and send it as an additional info to model.\n\nDefault `false` because the model is usually are good at understanding the user's emotion from text.\n\n@default false"
},
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
- "allOf": [
- {
- "$ref": "#/components/schemas/ChunkPlan"
- }
- ]
+ "numFastTurns": {
+ "type": "number",
+ "description": "This sets how many turns at the start of the conversation to use a smaller, faster model from the same provider before switching to the primary model. Example, gpt-3.5-turbo if provider is openai.\n\nDefault is 0.\n\n@default 0",
+ "minimum": 0
}
},
"required": [
- "provider",
- "voiceId"
+ "model",
+ "provider"
]
},
- "FallbackVapiVoice": {
+ "ExactReplacement": {
"type": "object",
"properties": {
- "cachingEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
- },
- "provider": {
+ "type": {
"type": "string",
- "description": "This is the voice provider that will be used.",
+ "description": "This is the exact replacement type. You can use this to replace a specific word or phrase with a different word or phrase.\n\nUsage:\n- Replace \"hello\" with \"hi\": { type: 'exact', key: 'hello', value: 'hi' }\n- Replace \"good morning\" with \"good day\": { type: 'exact', key: 'good morning', value: 'good day' }\n- Replace a specific name: { type: 'exact', key: 'John Doe', value: 'Jane Smith' }\n- Replace an acronym: { type: 'exact', key: 'AI', value: 'Artificial Intelligence' }\n- Replace a company name with its phonetic pronunciation: { type: 'exact', key: 'Vapi', value: 'Vappy' }",
"enum": [
- "vapi"
+ "exact"
]
},
- "voiceId": {
+ "replaceAllEnabled": {
+ "type": "boolean",
+ "description": "This option let's you control whether to replace all instances of the key or only the first one. By default, it only replaces the first instance.\nExamples:\n- For { type: 'exact', key: 'hello', value: 'hi', replaceAllEnabled: false }. Before: \"hello world, hello universe\" | After: \"hi world, hello universe\"\n- For { type: 'exact', key: 'hello', value: 'hi', replaceAllEnabled: true }. Before: \"hello world, hello universe\" | After: \"hi world, hi universe\"\n@default false",
+ "default": false
+ },
+ "key": {
"type": "string",
- "description": "The voices provided by Vapi",
+ "description": "This is the key to replace."
+ },
+ "value": {
+ "type": "string",
+ "description": "This is the value that will replace the match.",
+ "maxLength": 1000
+ }
+ },
+ "required": [
+ "type",
+ "key",
+ "value"
+ ]
+ },
+ "RegexReplacement": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "This is the regex replacement type. You can use this to replace a word or phrase that matches a pattern.\n\nUsage:\n- Replace all numbers with \"some number\": { type: 'regex', regex: '\\\\d+', value: 'some number' }\n- Replace email addresses with \"[EMAIL]\": { type: 'regex', regex: '\\\\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\\\.[A-Z|a-z]{2,}\\\\b', value: '[EMAIL]' }\n- Replace phone numbers with a formatted version: { type: 'regex', regex: '(\\\\d{3})(\\\\d{3})(\\\\d{4})', value: '($1) $2-$3' }\n- Replace all instances of \"color\" or \"colour\" with \"hue\": { type: 'regex', regex: 'colou?r', value: 'hue' }\n- Capitalize the first letter of every sentence: { type: 'regex', regex: '(?<=\\\\. |^)[a-z]', value: (match) => match.toUpperCase() }",
"enum": [
- "Elliot",
- "Kylie",
- "Rohan",
- "Lily",
- "Savannah",
- "Hana",
- "Neha",
- "Cole",
- "Harry",
- "Paige",
- "Spencer"
+ "regex"
]
},
- "speed": {
- "type": "number",
- "description": "This is the speed multiplier that will be used.\n\n@default 1",
- "minimum": 0.25,
- "maximum": 2,
- "default": 1
+ "regex": {
+ "type": "string",
+ "description": "This is the regex pattern to replace.\n\nNote:\n- This works by using the `string.replace` method in Node.JS. Eg. `\"hello there\".replace(/hello/g, \"hi\")` will return `\"hi there\"`.\n\nHot tip:\n- In JavaScript, escape `\\` when sending the regex pattern. Eg. `\"hello\\sthere\"` will be sent over the wire as `\"hellosthere\"`. Send `\"hello\\\\sthere\"` instead."
},
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
- "allOf": [
- {
- "$ref": "#/components/schemas/ChunkPlan"
- }
- ]
+ "options": {
+ "description": "These are the options for the regex replacement. Defaults to all disabled.\n\n@default []",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/RegexOption"
+ }
+ },
+ "value": {
+ "type": "string",
+ "description": "This is the value that will replace the match.",
+ "maxLength": 1000
}
},
"required": [
- "provider",
- "voiceId"
+ "type",
+ "regex",
+ "value"
]
},
- "FallbackInworldVoice": {
+ "FormatPlan": {
"type": "object",
"properties": {
- "cachingEnabled": {
+ "enabled": {
"type": "boolean",
- "description": "This is the flag to toggle voice caching for the assistant.",
- "example": true,
- "default": true
+ "description": "This determines whether the chunk is formatted before being sent to the voice provider. This helps with enunciation. This includes phone numbers, emails and addresses. Default `true`.\n\nUsage:\n- To rely on the voice provider's formatting logic, set this to `false`.\n\nIf `voice.chunkPlan.enabled` is `false`, this is automatically `false` since there's no chunk to format.\n\n@default true",
+ "example": true
},
- "provider": {
- "type": "string",
- "description": "This is the voice provider that will be used.",
- "enum": [
- "inworld"
- ]
+ "numberToDigitsCutoff": {
+ "type": "number",
+ "description": "This is the cutoff after which a number is converted to individual digits instead of being spoken as words.\n\nExample:\n- If cutoff 2025, \"12345\" is converted to \"1 2 3 4 5\" while \"1200\" is converted to \"twelve hundred\".\n\nUsage:\n- If your use case doesn't involve IDs like zip codes, set this to a high value.\n- If your use case involves IDs that are shorter than 5 digits, set this to a lower value.\n\n@default 2025",
+ "minimum": 0,
+ "example": 2025
},
- "voiceId": {
- "type": "string",
- "description": "Available voices by language:\n• en: Alex, Ashley, Craig, Deborah, Dennis, Edward, Elizabeth, Hades, Julia, Pixie, Mark, Olivia, Priya, Ronald, Sarah, Shaun, Theodore, Timothy, Wendy, Dominus\n• zh: Yichen, Xiaoyin, Xinyi, Jing\n• nl: Erik, Katrien, Lennart, Lore\n• fr: Alain, Hélène, Mathieu, Étienne\n• de: Johanna, Josef\n• it: Gianni, Orietta\n• ja: Asuka, Satoshi\n• ko: Hyunwoo, Minji, Seojun, Yoona\n• pl: Szymon, Wojciech\n• pt: Heitor, Maitê\n• es: Diego, Lupita, Miguel, Rafael",
- "maxLength": 120,
- "title": "Inworld Voice ID",
+ "replacements": {
+ "type": "array",
+ "description": "These are the custom replacements you can make to the chunk before it is sent to the voice provider.\n\nUsage:\n- To replace a specific word or phrase with a different word or phrase, use the `ExactReplacement` type. Eg. `{ type: 'exact', key: 'hello', value: 'hi' }`\n- To replace a word or phrase that matches a pattern, use the `RegexReplacement` type. Eg. `{ type: 'regex', regex: '\\\\b[a-zA-Z]{5}\\\\b', value: 'hi' }`\n\n@default []",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ExactReplacement",
+ "title": "ExactReplacement"
+ },
+ {
+ "$ref": "#/components/schemas/RegexReplacement",
+ "title": "RegexReplacement"
+ }
+ ]
+ }
+ },
+ "formattersEnabled": {
+ "type": "array",
+ "description": "List of formatters to apply. If not provided, all default formatters will be applied.\nIf provided, only the specified formatters will be applied.\nNote: Some essential formatters like angle bracket removal will always be applied.\n@default undefined",
"enum": [
- "Alex",
- "Ashley",
- "Craig",
- "Deborah",
- "Dennis",
- "Edward",
- "Elizabeth",
- "Hades",
- "Julia",
- "Pixie",
- "Mark",
- "Olivia",
- "Priya",
- "Ronald",
- "Sarah",
- "Shaun",
- "Theodore",
- "Timothy",
- "Wendy",
- "Dominus",
- "Yichen",
- "Xiaoyin",
- "Xinyi",
- "Jing",
- "Erik",
- "Katrien",
- "Lennart",
- "Lore",
- "Alain",
- "Hélène",
- "Mathieu",
- "Étienne",
- "Johanna",
- "Josef",
- "Gianni",
- "Orietta",
- "Asuka",
- "Satoshi",
- "Hyunwoo",
- "Minji",
- "Seojun",
- "Yoona",
- "Szymon",
- "Wojciech",
- "Heitor",
- "Maitê",
- "Diego",
- "Lupita",
- "Miguel",
- "Rafael"
+ "markdown",
+ "asterisk",
+ "quote",
+ "dash",
+ "newline",
+ "colon",
+ "acronym",
+ "dollarAmount",
+ "email",
+ "date",
+ "time",
+ "distance",
+ "unit",
+ "percentage",
+ "phoneNumber",
+ "number",
+ "stripAsterisk"
],
- "example": "Alex"
+ "items": {
+ "type": "string",
+ "enum": [
+ "markdown",
+ "asterisk",
+ "quote",
+ "dash",
+ "newline",
+ "colon",
+ "acronym",
+ "dollarAmount",
+ "email",
+ "date",
+ "time",
+ "distance",
+ "unit",
+ "percentage",
+ "phoneNumber",
+ "number",
+ "stripAsterisk"
+ ]
+ }
+ }
+ }
+ },
+ "ChunkPlan": {
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "description": "This determines whether the model output is chunked before being sent to the voice provider. Default `true`.\n\nUsage:\n- To rely on the voice provider's audio generation logic, set this to `false`.\n- If seeing issues with quality, set this to `true`.\n\nIf disabled, Vapi-provided audio control tokens like will not work.\n\n@default true",
+ "example": true
},
- "model": {
- "type": "string",
- "description": "This is the model that will be used.",
- "enum": [
- "inworld-tts-1"
- ],
- "default": "inworld-tts-1"
+ "minCharacters": {
+ "type": "number",
+ "description": "This is the minimum number of characters in a chunk.\n\nUsage:\n- To increase quality, set this to a higher value.\n- To decrease latency, set this to a lower value.\n\n@default 30",
+ "minimum": 1,
+ "maximum": 80,
+ "example": 30
},
- "languageCode": {
- "type": "string",
- "description": "Language code for Inworld TTS synthesis",
- "default": "en",
+ "punctuationBoundaries": {
+ "type": "array",
+ "description": "These are the punctuations that are considered valid boundaries for a chunk to be created.\n\nUsage:\n- To increase quality, constrain to fewer boundaries.\n- To decrease latency, enable all.\n\nDefault is automatically set to balance the trade-off between quality and latency based on the provider.",
"enum": [
- "en",
- "zh",
- "ko",
- "nl",
- "fr",
- "es",
- "ja",
- "de",
- "it",
- "pl",
- "pt"
- ]
+ "。",
+ ",",
+ ".",
+ "!",
+ "?",
+ ";",
+ ")",
+ "،",
+ "۔",
+ "।",
+ "॥",
+ "|",
+ "||",
+ ",",
+ ":"
+ ],
+ "example": [
+ "。",
+ ",",
+ ".",
+ "!",
+ "?",
+ ";",
+ "،",
+ "۔",
+ "।",
+ "॥",
+ "|",
+ "||",
+ ",",
+ ":"
+ ],
+ "items": {
+ "type": "string",
+ "enum": [
+ "。",
+ ",",
+ ".",
+ "!",
+ "?",
+ ";",
+ ")",
+ "،",
+ "۔",
+ "।",
+ "॥",
+ "|",
+ "||",
+ ",",
+ ":"
+ ]
+ }
},
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "formatPlan": {
+ "description": "This is the plan for formatting the chunk before it is sent to the voice provider.",
"allOf": [
{
- "$ref": "#/components/schemas/ChunkPlan"
+ "$ref": "#/components/schemas/FormatPlan"
}
]
}
+ }
+ },
+ "FallbackPlan": {
+ "type": "object",
+ "properties": {
+ "voices": {
+ "type": "array",
+ "description": "This is the list of voices to fallback to in the event that the primary voice provider fails.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/FallbackAzureVoice",
+ "title": "Azure"
+ },
+ {
+ "$ref": "#/components/schemas/FallbackCartesiaVoice",
+ "title": "Cartesia"
+ },
+ {
+ "$ref": "#/components/schemas/FallbackHumeVoice",
+ "title": "Hume"
+ },
+ {
+ "$ref": "#/components/schemas/FallbackCustomVoice",
+ "title": "CustomVoice"
+ },
+ {
+ "$ref": "#/components/schemas/FallbackDeepgramVoice",
+ "title": "Deepgram"
+ },
+ {
+ "$ref": "#/components/schemas/FallbackElevenLabsVoice",
+ "title": "ElevenLabs"
+ },
+ {
+ "$ref": "#/components/schemas/FallbackVapiVoice",
+ "title": "Vapi"
+ },
+ {
+ "$ref": "#/components/schemas/FallbackLMNTVoice",
+ "title": "LMNT"
+ },
+ {
+ "$ref": "#/components/schemas/FallbackOpenAIVoice",
+ "title": "OpenAI"
+ },
+ {
+ "$ref": "#/components/schemas/FallbackPlayHTVoice",
+ "title": "PlayHT"
+ },
+ {
+ "$ref": "#/components/schemas/FallbackWellSaidVoice",
+ "title": "WellSaid"
+ },
+ {
+ "$ref": "#/components/schemas/FallbackRimeAIVoice",
+ "title": "RimeAI"
+ },
+ {
+ "$ref": "#/components/schemas/FallbackSmallestAIVoice",
+ "title": "Smallest AI"
+ },
+ {
+ "$ref": "#/components/schemas/FallbackTavusVoice",
+ "title": "TavusVoice"
+ },
+ {
+ "$ref": "#/components/schemas/FallbackNeuphonicVoice",
+ "title": "Neuphonic"
+ },
+ {
+ "$ref": "#/components/schemas/FallbackSesameVoice",
+ "title": "Sesame"
+ },
+ {
+ "$ref": "#/components/schemas/FallbackInworldVoice",
+ "title": "Inworld"
+ }
+ ]
+ }
+ }
},
"required": [
- "provider",
- "voiceId"
+ "voices"
]
},
- "FallbackMinimaxVoice": {
+ "AzureVoice": {
"type": "object",
"properties": {
"cachingEnabled": {
@@ -21809,67 +20811,46 @@
"type": "string",
"description": "This is the voice provider that will be used.",
"enum": [
- "minimax"
+ "azure"
]
},
"voiceId": {
- "type": "string",
- "description": "This is the provider-specific ID that will be used. Use a voice from MINIMAX_PREDEFINED_VOICES or a custom cloned voice ID.",
- "title": "This is the Minimax Voice ID"
- },
- "model": {
- "type": "string",
- "description": "This is the model that will be used. Options are 'speech-02-hd' and 'speech-02-turbo'.\nspeech-02-hd is optimized for high-fidelity applications like voiceovers and audiobooks.\nspeech-02-turbo is designed for real-time applications with low latency.\n\n@default \"speech-02-turbo\"",
- "enum": [
- "speech-02-hd",
- "speech-02-turbo"
- ],
- "example": "speech-02-turbo",
- "default": "speech-02-turbo"
- },
- "emotion": {
- "type": "string",
- "description": "The emotion to use for the voice. If not provided, will use auto-detect mode.\nOptions include: 'happy', 'sad', 'angry', 'fearful', 'surprised', 'disgusted', 'neutral'",
- "example": "happy"
+ "description": "This is the provider-specific ID that will be used.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "andrew",
+ "brian",
+ "emma"
+ ],
+ "title": "Preset Voice Options"
+ },
+ {
+ "type": "string",
+ "title": "Azure Voice ID"
+ }
+ ]
},
- "pitch": {
- "type": "number",
- "description": "Voice pitch adjustment. Range from -12 to 12 semitones.\n@default 0",
- "minimum": -12,
- "maximum": 12,
- "example": 0,
- "default": 0
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
},
"speed": {
"type": "number",
- "description": "Voice speed adjustment. Range from 0.5 to 2.0.\n@default 1.0",
- "minimum": 0.5,
- "maximum": 2,
- "example": 1,
- "default": 1
- },
- "volume": {
- "type": "number",
- "description": "Voice volume adjustment. Range from 0.5 to 2.0.\n@default 1.0",
+ "description": "This is the speed multiplier that will be used.",
"minimum": 0.5,
- "maximum": 2,
- "example": 1,
- "default": 1
- },
- "region": {
- "type": "string",
- "description": "The region for Minimax API. Defaults to \"worldwide\".",
- "enum": [
- "worldwide",
- "china"
- ],
- "default": "worldwide"
+ "maximum": 2
},
- "chunkPlan": {
- "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "fallbackPlan": {
+ "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
"allOf": [
{
- "$ref": "#/components/schemas/ChunkPlan"
+ "$ref": "#/components/schemas/FallbackPlan"
}
]
}
@@ -21879,1664 +20860,13712 @@
"voiceId"
]
},
- "TransportConfigurationTwilio": {
+ "CartesiaExperimentalControls": {
"type": "object",
"properties": {
- "provider": {
- "type": "string",
- "enum": [
- "twilio"
+ "speed": {
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "slowest",
+ "slow",
+ "normal",
+ "fast",
+ "fastest"
+ ],
+ "example": "normal"
+ },
+ {
+ "type": "number",
+ "minimum": -1,
+ "maximum": 1,
+ "example": 0.5
+ }
]
},
- "timeout": {
- "type": "number",
- "description": "The integer number of seconds that we should allow the phone to ring before assuming there is no answer.\nThe default is `60` seconds and the maximum is `600` seconds.\nFor some call flows, we will add a 5-second buffer to the timeout value you provide.\nFor this reason, a timeout value of 10 seconds could result in an actual timeout closer to 15 seconds.\nYou can set this to a short time, such as `15` seconds, to hang up before reaching an answering machine or voicemail.\n\n@default 60",
- "minimum": 1,
- "maximum": 600,
- "example": 60
- },
- "record": {
- "type": "boolean",
- "description": "Whether to record the call.\nCan be `true` to record the phone call, or `false` to not.\nThe default is `false`.\n\n@default false",
- "example": false
- },
- "recordingChannels": {
+ "emotion": {
"type": "string",
- "description": "The number of channels in the final recording.\nCan be: `mono` or `dual`.\nThe default is `mono`.\n`mono` records both legs of the call in a single channel of the recording file.\n`dual` records each leg to a separate channel of the recording file.\nThe first channel of a dual-channel recording contains the parent call and the second channel contains the child call.\n\n@default 'mono'",
"enum": [
- "mono",
- "dual"
+ "anger:lowest",
+ "anger:low",
+ "anger:high",
+ "anger:highest",
+ "positivity:lowest",
+ "positivity:low",
+ "positivity:high",
+ "positivity:highest",
+ "surprise:lowest",
+ "surprise:low",
+ "surprise:high",
+ "surprise:highest",
+ "sadness:lowest",
+ "sadness:low",
+ "sadness:high",
+ "sadness:highest",
+ "curiosity:lowest",
+ "curiosity:low",
+ "curiosity:high",
+ "curiosity:highest"
],
- "example": "mono"
+ "example": [
+ "happiness:high"
+ ]
}
- },
- "required": [
- "provider"
- ]
+ }
},
- "CreateAnthropicCredentialDTO": {
+ "CartesiaGenerationConfigExperimental": {
"type": "object",
"properties": {
- "provider": {
- "type": "string",
- "enum": [
- "anthropic"
- ]
- },
- "apiKey": {
- "type": "string",
- "maxLength": 10000,
- "description": "This is not returned in the API."
- },
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
+ "accentLocalization": {
+ "type": "integer",
+ "description": "Toggle accent localization for sonic-3: 0 (disabled, default) or 1 (enabled). When enabled, the voice adapts to match the transcript language accent while preserving vocal characteristics.",
+ "example": 0,
+ "minimum": 0,
+ "maximum": 1,
+ "default": 0
}
- },
- "required": [
- "provider",
- "apiKey"
- ]
+ }
},
- "CreateAnyscaleCredentialDTO": {
+ "CartesiaGenerationConfig": {
"type": "object",
"properties": {
- "provider": {
- "type": "string",
- "enum": [
- "anyscale"
- ]
+ "speed": {
+ "type": "number",
+ "description": "Fine-grained speed control for sonic-3. Only available for sonic-3 model.",
+ "example": 1,
+ "minimum": 0.6,
+ "maximum": 1.5,
+ "default": 1
},
- "apiKey": {
- "type": "string",
- "maxLength": 10000,
- "description": "This is not returned in the API."
+ "volume": {
+ "type": "number",
+ "description": "Fine-grained volume control for sonic-3. Only available for sonic-3 model.",
+ "example": 1,
+ "minimum": 0.5,
+ "maximum": 2,
+ "default": 1
},
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
+ "experimental": {
+ "description": "Experimental model controls for sonic-3. These are subject to breaking changes.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CartesiaGenerationConfigExperimental"
+ }
+ ]
}
- },
- "required": [
- "provider",
- "apiKey"
- ]
+ }
},
- "CreateAssemblyAICredentialDTO": {
+ "CartesiaVoice": {
"type": "object",
"properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
"provider": {
"type": "string",
+ "description": "This is the voice provider that will be used.",
"enum": [
- "assembly-ai"
+ "cartesia"
]
},
- "apiKey": {
- "type": "string",
- "description": "This is not returned in the API."
- },
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
- }
- },
- "required": [
- "provider",
- "apiKey"
- ]
- },
- "AzureBlobStorageBucketPlan": {
- "type": "object",
- "properties": {
- "connectionString": {
+ "voiceId": {
"type": "string",
- "description": "This is the blob storage connection string for the Azure resource."
+ "description": "The ID of the particular voice you want to use."
},
- "containerName": {
+ "model": {
"type": "string",
- "description": "This is the container name for the Azure blob storage."
- },
- "path": {
- "type": "string",
- "description": "This is the path where call artifacts will be stored.\n\nUsage:\n- To store call artifacts in a specific folder, set this to the full path. Eg. \"/folder-name1/folder-name2\".\n- To store call artifacts in the root of the bucket, leave this blank.\n\n@default \"/\""
- }
- },
- "required": [
- "connectionString",
- "containerName"
- ]
- },
- "CreateAzureCredentialDTO": {
- "type": "object",
- "properties": {
- "provider": {
- "type": "string",
- "enum": [
- "azure"
- ]
- },
- "service": {
- "type": "string",
- "description": "This is the service being used in Azure.",
+ "description": "This is the model that will be used. This is optional and will default to the correct model for the voiceId.",
"enum": [
- "speech",
- "blob_storage"
+ "sonic-3",
+ "sonic-3-2026-01-12",
+ "sonic-3-2025-10-27",
+ "sonic-2",
+ "sonic-2-2025-06-11",
+ "sonic-english",
+ "sonic-multilingual",
+ "sonic-preview",
+ "sonic"
],
- "default": "speech"
+ "example": "sonic-english"
},
- "region": {
+ "language": {
"type": "string",
- "description": "This is the region of the Azure resource.",
+ "description": "This is the language that will be used. This is optional and will default to the correct language for the voiceId.",
"enum": [
- "australia",
- "canadaeast",
- "canadacentral",
- "eastus2",
- "eastus",
- "france",
- "india",
- "japaneast",
- "japanwest",
- "uaenorth",
- "northcentralus",
- "norway",
- "southcentralus",
- "swedencentral",
- "switzerland",
+ "ar",
+ "bg",
+ "bn",
+ "cs",
+ "da",
+ "de",
+ "el",
+ "en",
+ "es",
+ "fi",
+ "fr",
+ "gu",
+ "he",
+ "hi",
+ "hr",
+ "hu",
+ "id",
+ "it",
+ "ja",
+ "ka",
+ "kn",
+ "ko",
+ "ml",
+ "mr",
+ "ms",
+ "nl",
+ "no",
+ "pa",
+ "pl",
+ "pt",
+ "ro",
+ "ru",
+ "sk",
+ "sv",
+ "ta",
+ "te",
+ "th",
+ "tl",
+ "tr",
"uk",
- "westus",
- "westus3"
- ]
- },
- "apiKey": {
- "type": "string",
- "description": "This is not returned in the API.",
- "maxLength": 10000
- },
- "fallbackIndex": {
- "type": "number",
- "minimum": 1,
- "description": "This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order."
+ "vi",
+ "zh"
+ ],
+ "example": "en"
},
- "bucketPlan": {
- "description": "This is the bucket plan that can be provided to store call artifacts in Azure Blob Storage.",
+ "experimentalControls": {
+ "description": "Experimental controls for Cartesia voice generation",
"allOf": [
{
- "$ref": "#/components/schemas/AzureBlobStorageBucketPlan"
+ "$ref": "#/components/schemas/CartesiaExperimentalControls"
}
]
},
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
- }
- },
- "required": [
- "provider",
- "service"
- ]
- },
- "CreateAzureOpenAICredentialDTO": {
- "type": "object",
- "properties": {
- "provider": {
- "type": "string",
- "enum": [
- "azure-openai"
- ]
- },
- "region": {
- "type": "string",
- "enum": [
- "australia",
- "canadaeast",
- "canadacentral",
- "eastus2",
- "eastus",
- "france",
- "india",
- "japaneast",
- "japanwest",
- "uaenorth",
- "northcentralus",
- "norway",
- "southcentralus",
- "swedencentral",
- "switzerland",
- "uk",
- "westus",
- "westus3"
+ "generationConfig": {
+ "description": "Generation config for fine-grained control of sonic-3 voice output (speed, volume, and experimental controls). Only available for sonic-3 model.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CartesiaGenerationConfig"
+ }
]
},
- "models": {
- "type": "array",
- "enum": [
- "gpt-5",
- "gpt-5-mini",
- "gpt-5-nano",
- "gpt-4.1-2025-04-14",
- "gpt-4.1-mini-2025-04-14",
- "gpt-4.1-nano-2025-04-14",
- "gpt-4o-2024-11-20",
- "gpt-4o-2024-08-06",
- "gpt-4o-2024-05-13",
- "gpt-4o-mini-2024-07-18",
- "gpt-4-turbo-2024-04-09",
- "gpt-4-0125-preview",
- "gpt-4-1106-preview",
- "gpt-4-0613",
- "gpt-35-turbo-0125",
- "gpt-35-turbo-1106"
- ],
- "example": [
- "gpt-4-0125-preview",
- "gpt-4-0613"
- ],
- "items": {
- "type": "string",
- "enum": [
- "gpt-5",
- "gpt-5-mini",
- "gpt-5-nano",
- "gpt-4.1-2025-04-14",
- "gpt-4.1-mini-2025-04-14",
- "gpt-4.1-nano-2025-04-14",
- "gpt-4o-2024-11-20",
- "gpt-4o-2024-08-06",
- "gpt-4o-2024-05-13",
- "gpt-4o-mini-2024-07-18",
- "gpt-4-turbo-2024-04-09",
- "gpt-4-0125-preview",
- "gpt-4-1106-preview",
- "gpt-4-0613",
- "gpt-35-turbo-0125",
- "gpt-35-turbo-1106"
- ]
- }
- },
- "openAIKey": {
- "type": "string",
- "maxLength": 10000,
- "description": "This is not returned in the API."
- },
- "ocpApimSubscriptionKey": {
+ "pronunciationDictId": {
"type": "string",
- "description": "This is not returned in the API."
+ "description": "Pronunciation dictionary ID for sonic-3. Allows custom pronunciations for specific words. Only available for sonic-3 model.",
+ "example": "dict_abc123"
},
- "openAIEndpoint": {
- "type": "string",
- "maxLength": 10000
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
},
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
+ "fallbackPlan": {
+ "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/FallbackPlan"
+ }
+ ]
}
},
"required": [
"provider",
- "region",
- "models",
- "openAIKey",
- "openAIEndpoint"
+ "voiceId"
]
},
- "SipTrunkGateway": {
+ "CustomVoice": {
"type": "object",
"properties": {
- "ip": {
- "type": "string",
- "description": "This is the address of the gateway. It can be an IPv4 address like 1.1.1.1 or a fully qualified domain name like my-sip-trunk.pstn.twilio.com."
- },
- "port": {
- "type": "number",
- "description": "This is the port number of the gateway. Default is 5060.\n\n@default 5060",
- "minimum": 1,
- "maximum": 65535
- },
- "netmask": {
- "type": "number",
- "description": "This is the netmask of the gateway. Defaults to 32.\n\n@default 32",
- "minimum": 24,
- "maximum": 32
- },
- "inboundEnabled": {
- "type": "boolean",
- "description": "This is whether inbound calls are allowed from this gateway. Default is true.\n\n@default true"
- },
- "outboundEnabled": {
+ "cachingEnabled": {
"type": "boolean",
- "description": "This is whether outbound calls should be sent to this gateway. Default is true.\n\nNote, if netmask is less than 32, it doesn't affect the outbound IPs that are tried. 1 attempt is made to `ip:port`.\n\n@default true"
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
},
- "outboundProtocol": {
+ "provider": {
"type": "string",
- "description": "This is the protocol to use for SIP signaling outbound calls. Default is udp.\n\n@default udp",
+ "description": "This is the voice provider that will be used. Use `custom-voice` for providers that are not natively supported.",
"enum": [
- "tls/srtp",
- "tcp",
- "tls",
- "udp"
+ "custom-voice"
]
},
- "optionsPingEnabled": {
- "type": "boolean",
- "description": "This is whether to send options ping to the gateway. This can be used to check if the gateway is reachable. Default is false.\n\nThis is useful for high availability setups where you want to check if the gateway is reachable before routing calls to it. Note, if no gateway for a trunk is reachable, outbound calls will be rejected.\n\n@default false"
- }
- },
- "required": [
- "ip"
- ]
- },
- "SipTrunkOutboundSipRegisterPlan": {
- "type": "object",
- "properties": {
- "domain": {
- "type": "string"
- },
- "username": {
- "type": "string"
- },
- "realm": {
- "type": "string"
- }
- }
- },
- "SipTrunkOutboundAuthenticationPlan": {
- "type": "object",
- "properties": {
- "authPassword": {
+ "voiceId": {
"type": "string",
- "description": "This is not returned in the API."
- },
- "authUsername": {
- "type": "string"
+ "description": "This is the provider-specific ID that will be used. This is passed in the voice request payload to identify the voice to use."
},
- "sipRegisterPlan": {
- "description": "This can be used to configure if SIP register is required by the SIP trunk. If not provided, no SIP registration will be attempted.",
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
"allOf": [
{
- "$ref": "#/components/schemas/SipTrunkOutboundSipRegisterPlan"
+ "$ref": "#/components/schemas/ChunkPlan"
}
]
- }
- }
- },
- "SbcConfiguration": {
- "type": "object",
- "properties": {}
- },
- "CreateByoSipTrunkCredentialDTO": {
- "type": "object",
- "properties": {
- "provider": {
- "type": "string",
- "description": "This can be used to bring your own SIP trunks or to connect to a Carrier.",
- "enum": [
- "byo-sip-trunk"
- ]
},
- "gateways": {
- "description": "This is the list of SIP trunk's gateways.",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/SipTrunkGateway"
- }
- },
- "outboundAuthenticationPlan": {
- "description": "This can be used to configure the outbound authentication if required by the SIP trunk.",
+ "server": {
+ "description": "This is where the voice request will be sent.\n\nRequest Example:\n\nPOST https://{server.url}\nContent-Type: application/json\n\n{\n \"message\": {\n \"type\": \"voice-request\",\n \"text\": \"Hello, world!\",\n \"sampleRate\": 24000,\n ...other metadata about the call...\n }\n}\n\nResponse Expected: 1-channel 16-bit raw PCM audio at the sample rate specified in the request. Here is how the response will be piped to the transport:\n```\nresponse.on('data', (chunk: Buffer) => {\n outputStream.write(chunk);\n});\n```",
"allOf": [
{
- "$ref": "#/components/schemas/SipTrunkOutboundAuthenticationPlan"
+ "$ref": "#/components/schemas/Server"
}
]
},
- "outboundLeadingPlusEnabled": {
- "type": "boolean",
- "description": "This ensures the outbound origination attempts have a leading plus. Defaults to false to match conventional telecom behavior.\n\nUsage:\n- Vonage/Twilio requires leading plus for all outbound calls. Set this to true.\n\n@default false"
- },
- "techPrefix": {
- "type": "string",
- "description": "This can be used to configure the tech prefix on outbound calls. This is an advanced property.",
- "maxLength": 10000
- },
- "sipDiversionHeader": {
- "type": "string",
- "description": "This can be used to enable the SIP diversion header for authenticating the calling number if the SIP trunk supports it. This is an advanced property.",
- "maxLength": 10000
- },
- "sbcConfiguration": {
- "description": "This is an advanced configuration for enterprise deployments. This uses the onprem SBC to trunk into the SIP trunk's `gateways`, rather than the managed SBC provided by Vapi.",
+ "fallbackPlan": {
+ "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
"allOf": [
{
- "$ref": "#/components/schemas/SbcConfiguration"
+ "$ref": "#/components/schemas/FallbackPlan"
}
]
- },
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
}
},
"required": [
- "gateways"
+ "provider",
+ "server"
]
},
- "CreateCartesiaCredentialDTO": {
+ "DeepgramVoice": {
"type": "object",
"properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
"provider": {
"type": "string",
+ "description": "This is the voice provider that will be used.",
"enum": [
- "cartesia"
+ "deepgram"
]
},
- "apiKey": {
+ "voiceId": {
"type": "string",
- "description": "This is not returned in the API."
+ "description": "This is the provider-specific ID that will be used.",
+ "enum": [
+ "asteria",
+ "luna",
+ "stella",
+ "athena",
+ "hera",
+ "orion",
+ "arcas",
+ "perseus",
+ "angus",
+ "orpheus",
+ "helios",
+ "zeus",
+ "thalia",
+ "andromeda",
+ "helena",
+ "apollo",
+ "arcas",
+ "aries",
+ "amalthea",
+ "asteria",
+ "athena",
+ "atlas",
+ "aurora",
+ "callista",
+ "cora",
+ "cordelia",
+ "delia",
+ "draco",
+ "electra",
+ "harmonia",
+ "hera",
+ "hermes",
+ "hyperion",
+ "iris",
+ "janus",
+ "juno",
+ "jupiter",
+ "luna",
+ "mars",
+ "minerva",
+ "neptune",
+ "odysseus",
+ "ophelia",
+ "orion",
+ "orpheus",
+ "pandora",
+ "phoebe",
+ "pluto",
+ "saturn",
+ "selene",
+ "theia",
+ "vesta",
+ "zeus",
+ "celeste",
+ "estrella",
+ "nestor",
+ "sirio",
+ "carina",
+ "alvaro",
+ "diana",
+ "aquila",
+ "selena",
+ "javier"
+ ],
+ "title": "This is the Deepgram Voice ID"
},
- "name": {
+ "model": {
"type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
+ "description": "This is the model that will be used. Defaults to 'aura-2' when not specified.",
+ "enum": [
+ "aura",
+ "aura-2"
+ ],
+ "example": "aura-2"
+ },
+ "mipOptOut": {
+ "type": "boolean",
+ "description": "If set to true, this will add mip_opt_out=true as a query parameter of all API requests. See https://developers.deepgram.com/docs/the-deepgram-model-improvement-partnership-program#want-to-opt-out\n\nThis will only be used if you are using your own Deepgram API key.\n\n@default false",
+ "example": false,
+ "default": false
+ },
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
+ },
+ "fallbackPlan": {
+ "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/FallbackPlan"
+ }
+ ]
}
},
"required": [
"provider",
- "apiKey"
+ "voiceId"
]
},
- "CloudflareR2BucketPlan": {
+ "ElevenLabsPronunciationDictionaryLocator": {
"type": "object",
"properties": {
- "accessKeyId": {
- "type": "string",
- "description": "Cloudflare R2 Access key ID."
- },
- "secretAccessKey": {
- "type": "string",
- "description": "Cloudflare R2 access key secret. This is not returned in the API."
- },
- "url": {
- "type": "string",
- "description": "Cloudflare R2 base url."
- },
- "name": {
+ "pronunciationDictionaryId": {
"type": "string",
- "description": "This is the name of the bucket."
+ "description": "This is the ID of the pronunciation dictionary to use.",
+ "title": "This is the ElevenLabs Pronunciation Dictionary ID"
},
- "path": {
+ "versionId": {
"type": "string",
- "description": "This is the path where call artifacts will be stored.\n\nUsage:\n- To store call artifacts in a specific folder, set this to the full path. Eg. \"/folder-name1/folder-name2\".\n- To store call artifacts in the root of the bucket, leave this blank.\n\n@default \"/\""
+ "description": "This is the version ID of the pronunciation dictionary to use.",
+ "title": "This is the ElevenLabs Pronunciation Dictionary Version ID"
}
},
"required": [
- "name"
+ "pronunciationDictionaryId",
+ "versionId"
]
},
- "CreateCloudflareCredentialDTO": {
+ "ElevenLabsVoice": {
"type": "object",
"properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
"provider": {
"type": "string",
+ "description": "This is the voice provider that will be used.",
"enum": [
- "cloudflare"
- ],
- "description": "Credential provider. Only allowed value is cloudflare"
+ "11labs"
+ ]
},
- "accountId": {
- "type": "string",
- "description": "Cloudflare Account Id."
+ "voiceId": {
+ "description": "This is the provider-specific ID that will be used. Ensure the Voice is present in your 11Labs Voice Library.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "burt",
+ "marissa",
+ "andrea",
+ "sarah",
+ "phillip",
+ "steve",
+ "joseph",
+ "myra",
+ "paula",
+ "ryan",
+ "drew",
+ "paul",
+ "mrb",
+ "matilda",
+ "mark"
+ ],
+ "title": "Preset Voice Options"
+ },
+ {
+ "type": "string",
+ "title": "11Labs Voice ID"
+ }
+ ]
},
- "apiKey": {
- "type": "string",
- "description": "Cloudflare API Key / Token."
+ "stability": {
+ "type": "number",
+ "description": "Defines the stability for voice settings.",
+ "minimum": 0,
+ "maximum": 1,
+ "example": 0.5
},
- "accountEmail": {
- "type": "string",
- "description": "Cloudflare Account Email."
+ "similarityBoost": {
+ "type": "number",
+ "description": "Defines the similarity boost for voice settings.",
+ "minimum": 0,
+ "maximum": 1,
+ "example": 0.75
},
- "fallbackIndex": {
+ "style": {
"type": "number",
- "description": "This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order.",
- "minimum": 1
+ "description": "Defines the style for voice settings.",
+ "minimum": 0,
+ "maximum": 1,
+ "example": 0
},
- "bucketPlan": {
- "description": "This is the bucket plan that can be provided to store call artifacts in R2",
- "allOf": [
- {
- "$ref": "#/components/schemas/CloudflareR2BucketPlan"
- }
- ]
+ "useSpeakerBoost": {
+ "type": "boolean",
+ "description": "Defines the use speaker boost for voice settings.",
+ "example": false
},
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
- }
- },
- "required": [
- "provider"
- ]
- },
- "OAuth2AuthenticationPlan": {
- "type": "object",
- "properties": {
- "type": {
- "type": "string",
- "enum": [
- "oauth2"
- ]
+ "speed": {
+ "type": "number",
+ "description": "Defines the speed for voice settings.",
+ "minimum": 0.7,
+ "maximum": 1.2,
+ "example": 0.9
},
- "url": {
- "type": "string",
- "description": "This is the OAuth2 URL."
+ "optimizeStreamingLatency": {
+ "type": "number",
+ "description": "Defines the optimize streaming latency for voice settings. Defaults to 3.",
+ "minimum": 0,
+ "maximum": 4,
+ "example": 3
},
- "clientId": {
- "type": "string",
- "description": "This is the OAuth2 client ID."
+ "enableSsmlParsing": {
+ "type": "boolean",
+ "description": "This enables the use of https://elevenlabs.io/docs/speech-synthesis/prompting#pronunciation. Defaults to false to save latency.\n\n@default false",
+ "example": false
},
- "clientSecret": {
- "type": "string",
- "description": "This is the OAuth2 client secret."
+ "autoMode": {
+ "type": "boolean",
+ "description": "Defines the auto mode for voice settings. Defaults to false.",
+ "example": false
},
- "scope": {
- "type": "string",
- "description": "This is the scope of the OAuth2 token.",
- "maxLength": 1000
- }
- },
- "required": [
- "type",
- "url",
- "clientId",
- "clientSecret"
- ]
- },
- "CreateCustomLLMCredentialDTO": {
- "type": "object",
- "properties": {
- "provider": {
+ "model": {
"type": "string",
+ "description": "This is the model that will be used. Defaults to 'eleven_turbo_v2' if not specified.",
"enum": [
- "custom-llm"
- ]
+ "eleven_multilingual_v2",
+ "eleven_turbo_v2",
+ "eleven_turbo_v2_5",
+ "eleven_flash_v2",
+ "eleven_flash_v2_5",
+ "eleven_monolingual_v1",
+ "eleven_v3"
+ ],
+ "example": "eleven_turbo_v2_5"
},
- "apiKey": {
+ "language": {
"type": "string",
- "maxLength": 10000,
- "description": "This is not returned in the API."
+ "description": "This is the language (ISO 639-1) that is enforced for the model. Currently only Turbo v2.5 supports language enforcement. For other models, an error will be returned if language code is provided."
},
- "authenticationPlan": {
- "description": "This is the authentication plan. Currently supports OAuth2 RFC 6749. To use Bearer authentication, use apiKey",
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
"allOf": [
{
- "$ref": "#/components/schemas/OAuth2AuthenticationPlan"
+ "$ref": "#/components/schemas/ChunkPlan"
}
]
},
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
- }
- },
- "required": [
- "provider",
- "apiKey"
- ]
- },
- "CreateDeepgramCredentialDTO": {
- "type": "object",
- "properties": {
- "provider": {
- "type": "string",
- "enum": [
- "deepgram"
- ]
- },
- "apiKey": {
- "type": "string",
- "description": "This is not returned in the API."
- },
- "apiUrl": {
- "type": "string",
- "description": "This can be used to point to an onprem Deepgram instance. Defaults to api.deepgram.com."
+ "pronunciationDictionaryLocators": {
+ "description": "This is the pronunciation dictionary locators to use.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/ElevenLabsPronunciationDictionaryLocator"
+ }
},
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
- }
- },
- "required": [
- "provider",
- "apiKey"
- ]
- },
- "CreateDeepInfraCredentialDTO": {
- "type": "object",
- "properties": {
- "provider": {
- "type": "string",
- "enum": [
- "deepinfra"
+ "fallbackPlan": {
+ "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/FallbackPlan"
+ }
]
- },
- "apiKey": {
- "type": "string",
- "description": "This is not returned in the API."
- },
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
}
},
"required": [
"provider",
- "apiKey"
+ "voiceId"
]
},
- "CreateDeepSeekCredentialDTO": {
+ "WellSaidVoice": {
"type": "object",
"properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
"provider": {
"type": "string",
+ "description": "This is the voice provider that will be used.",
"enum": [
- "deep-seek"
+ "wellsaid"
]
},
- "apiKey": {
+ "voiceId": {
"type": "string",
- "description": "This is not returned in the API."
+ "description": "The WellSaid speaker ID to synthesize."
},
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
- }
- },
- "required": [
- "provider",
- "apiKey"
- ]
- },
- "CreateElevenLabsCredentialDTO": {
- "type": "object",
- "properties": {
- "provider": {
+ "model": {
"type": "string",
+ "description": "This is the model that will be used.",
"enum": [
- "11labs"
+ "caruso",
+ "legacy"
]
},
- "apiKey": {
- "type": "string",
- "maxLength": 10000,
- "description": "This is not returned in the API."
- },
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
- }
- },
- "required": [
- "provider",
- "apiKey"
- ]
- },
- "GcpKey": {
- "type": "object",
- "properties": {
- "type": {
- "type": "string",
- "description": "This is the type of the key. Most likely, this is \"service_account\"."
- },
- "projectId": {
- "type": "string",
- "description": "This is the ID of the Google Cloud project associated with this key."
- },
- "privateKeyId": {
- "type": "string",
- "description": "This is the unique identifier for the private key."
- },
- "privateKey": {
- "type": "string",
- "description": "This is the private key in PEM format.\n\nNote: This is not returned in the API."
- },
- "clientEmail": {
- "type": "string",
- "description": "This is the email address associated with the service account."
- },
- "clientId": {
- "type": "string",
- "description": "This is the unique identifier for the client."
- },
- "authUri": {
- "type": "string",
- "description": "This is the URI for the auth provider's authorization endpoint."
- },
- "tokenUri": {
- "type": "string",
- "description": "This is the URI for the auth provider's token endpoint."
- },
- "authProviderX509CertUrl": {
- "type": "string",
- "description": "This is the URL of the public x509 certificate for the auth provider."
- },
- "clientX509CertUrl": {
- "type": "string",
- "description": "This is the URL of the public x509 certificate for the client."
- },
- "universeDomain": {
- "type": "string",
- "description": "This is the domain associated with the universe this service account belongs to."
- }
- },
- "required": [
- "type",
- "projectId",
- "privateKeyId",
- "privateKey",
- "clientEmail",
- "clientId",
- "authUri",
- "tokenUri",
- "authProviderX509CertUrl",
- "clientX509CertUrl",
- "universeDomain"
- ]
- },
- "BucketPlan": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string",
- "description": "This is the name of the bucket."
- },
- "region": {
- "type": "string",
- "description": "This is the region of the bucket.\n\nUsage:\n- If `credential.type` is `aws`, then this is required.\n- If `credential.type` is `gcp`, then this is optional since GCP allows buckets to be accessed without a region but region is required for data residency requirements. Read here: https://cloud.google.com/storage/docs/request-endpoints\n\nThis overrides the `credential.region` field if it is provided."
- },
- "path": {
- "type": "string",
- "description": "This is the path where call artifacts will be stored.\n\nUsage:\n- To store call artifacts in a specific folder, set this to the full path. Eg. \"/folder-name1/folder-name2\".\n- To store call artifacts in the root of the bucket, leave this blank.\n\n@default \"/\""
+ "enableSsml": {
+ "type": "boolean",
+ "description": "Enables limited SSML translation for input text."
},
- "hmacAccessKey": {
- "type": "string",
- "description": "This is the HMAC access key offered by GCP for interoperability with S3 clients. Here is the guide on how to create: https://cloud.google.com/storage/docs/authentication/managing-hmackeys#console\n\nUsage:\n- If `credential.type` is `gcp`, then this is required.\n- If `credential.type` is `aws`, then this is not required since credential.awsAccessKeyId is used instead."
+ "libraryIds": {
+ "description": "Array of library IDs to use for voice synthesis.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
},
- "hmacSecret": {
- "type": "string",
- "description": "This is the secret for the HMAC access key. Here is the guide on how to create: https://cloud.google.com/storage/docs/authentication/managing-hmackeys#console\n\nUsage:\n- If `credential.type` is `gcp`, then this is required.\n- If `credential.type` is `aws`, then this is not required since credential.awsSecretAccessKey is used instead.\n\nNote: This is not returned in the API."
- }
- },
- "required": [
- "name"
- ]
- },
- "CreateGcpCredentialDTO": {
- "type": "object",
- "properties": {
- "provider": {
- "type": "string",
- "enum": [
- "gcp"
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
]
},
- "fallbackIndex": {
- "type": "number",
- "description": "This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order.",
- "minimum": 1
- },
- "gcpKey": {
- "description": "This is the GCP key. This is the JSON that can be generated in the Google Cloud Console at https://console.cloud.google.com/iam-admin/serviceaccounts/details//keys.\n\nThe schema is identical to the JSON that GCP outputs.",
+ "fallbackPlan": {
+ "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
"allOf": [
{
- "$ref": "#/components/schemas/GcpKey"
+ "$ref": "#/components/schemas/FallbackPlan"
}
]
- },
- "region": {
- "type": "string",
- "description": "This is the region of the GCP resource.",
- "maxLength": 40
- },
- "bucketPlan": {
- "$ref": "#/components/schemas/BucketPlan"
- },
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
}
},
"required": [
"provider",
- "gcpKey"
+ "voiceId"
]
},
- "CreateGladiaCredentialDTO": {
+ "HumeVoice": {
"type": "object",
"properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
"provider": {
"type": "string",
+ "description": "This is the voice provider that will be used.",
"enum": [
- "gladia"
+ "hume"
]
},
- "apiKey": {
- "type": "string",
- "description": "This is not returned in the API."
- },
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
- }
- },
- "required": [
- "provider",
- "apiKey"
- ]
- },
- "CreateGoHighLevelCredentialDTO": {
- "type": "object",
- "properties": {
- "provider": {
+ "model": {
"type": "string",
+ "description": "This is the model that will be used.",
"enum": [
- "gohighlevel"
- ]
+ "octave",
+ "octave2"
+ ],
+ "example": "octave2"
},
- "apiKey": {
+ "voiceId": {
"type": "string",
- "description": "This is not returned in the API."
+ "description": "The ID of the particular voice you want to use."
},
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
- }
- },
- "required": [
- "provider",
- "apiKey"
- ]
- },
- "CreateGroqCredentialDTO": {
- "type": "object",
- "properties": {
- "provider": {
- "type": "string",
- "enum": [
- "groq"
+ "isCustomHumeVoice": {
+ "type": "boolean",
+ "description": "Indicates whether the chosen voice is a preset Hume AI voice or a custom voice.",
+ "example": false
+ },
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
]
},
- "apiKey": {
+ "description": {
"type": "string",
- "description": "This is not returned in the API."
+ "description": "Natural language instructions describing how the synthesized speech should sound, including but not limited to tone, intonation, pacing, and accent (e.g., 'a soft, gentle voice with a strong British accent').\n\nIf a Voice is specified in the request, this description serves as acting instructions.\nIf no Voice is specified, a new voice is generated based on this description."
},
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
+ "fallbackPlan": {
+ "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/FallbackPlan"
+ }
+ ]
}
},
"required": [
"provider",
- "apiKey"
+ "voiceId"
]
},
- "CreateLangfuseCredentialDTO": {
+ "LMNTVoice": {
"type": "object",
"properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
"provider": {
"type": "string",
+ "description": "This is the voice provider that will be used.",
"enum": [
- "langfuse"
+ "lmnt"
]
},
- "publicKey": {
- "type": "string",
- "description": "The public key for Langfuse project. Eg: pk-lf-..."
+ "voiceId": {
+ "description": "This is the provider-specific ID that will be used.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "amy",
+ "ansel",
+ "autumn",
+ "ava",
+ "brandon",
+ "caleb",
+ "cassian",
+ "chloe",
+ "dalton",
+ "daniel",
+ "dustin",
+ "elowen",
+ "evander",
+ "huxley",
+ "james",
+ "juniper",
+ "kennedy",
+ "lauren",
+ "leah",
+ "lily",
+ "lucas",
+ "magnus",
+ "miles",
+ "morgan",
+ "natalie",
+ "nathan",
+ "noah",
+ "nyssa",
+ "oliver",
+ "paige",
+ "ryan",
+ "sadie",
+ "sophie",
+ "stella",
+ "terrence",
+ "tyler",
+ "vesper",
+ "violet",
+ "warrick",
+ "zain",
+ "zeke",
+ "zoe"
+ ],
+ "title": "Preset Voice Options"
+ },
+ {
+ "type": "string",
+ "title": "LMNT Voice ID"
+ }
+ ]
},
- "apiKey": {
- "type": "string",
- "description": "The secret key for Langfuse project. Eg: sk-lf-... .This is not returned in the API."
+ "speed": {
+ "type": "number",
+ "description": "This is the speed multiplier that will be used.",
+ "minimum": 0.25,
+ "maximum": 2,
+ "example": null
},
- "apiUrl": {
- "type": "string",
- "description": "The host URL for Langfuse project. Eg: https://cloud.langfuse.com"
+ "language": {
+ "description": "Two letter ISO 639-1 language code. Use \"auto\" for auto-detection.",
+ "enum": [
+ "aa",
+ "ab",
+ "ae",
+ "af",
+ "ak",
+ "am",
+ "an",
+ "ar",
+ "as",
+ "av",
+ "ay",
+ "az",
+ "ba",
+ "be",
+ "bg",
+ "bh",
+ "bi",
+ "bm",
+ "bn",
+ "bo",
+ "br",
+ "bs",
+ "ca",
+ "ce",
+ "ch",
+ "co",
+ "cr",
+ "cs",
+ "cu",
+ "cv",
+ "cy",
+ "da",
+ "de",
+ "dv",
+ "dz",
+ "ee",
+ "el",
+ "en",
+ "eo",
+ "es",
+ "et",
+ "eu",
+ "fa",
+ "ff",
+ "fi",
+ "fj",
+ "fo",
+ "fr",
+ "fy",
+ "ga",
+ "gd",
+ "gl",
+ "gn",
+ "gu",
+ "gv",
+ "ha",
+ "he",
+ "hi",
+ "ho",
+ "hr",
+ "ht",
+ "hu",
+ "hy",
+ "hz",
+ "ia",
+ "id",
+ "ie",
+ "ig",
+ "ii",
+ "ik",
+ "io",
+ "is",
+ "it",
+ "iu",
+ "ja",
+ "jv",
+ "ka",
+ "kg",
+ "ki",
+ "kj",
+ "kk",
+ "kl",
+ "km",
+ "kn",
+ "ko",
+ "kr",
+ "ks",
+ "ku",
+ "kv",
+ "kw",
+ "ky",
+ "la",
+ "lb",
+ "lg",
+ "li",
+ "ln",
+ "lo",
+ "lt",
+ "lu",
+ "lv",
+ "mg",
+ "mh",
+ "mi",
+ "mk",
+ "ml",
+ "mn",
+ "mr",
+ "ms",
+ "mt",
+ "my",
+ "na",
+ "nb",
+ "nd",
+ "ne",
+ "ng",
+ "nl",
+ "nn",
+ "no",
+ "nr",
+ "nv",
+ "ny",
+ "oc",
+ "oj",
+ "om",
+ "or",
+ "os",
+ "pa",
+ "pi",
+ "pl",
+ "ps",
+ "pt",
+ "qu",
+ "rm",
+ "rn",
+ "ro",
+ "ru",
+ "rw",
+ "sa",
+ "sc",
+ "sd",
+ "se",
+ "sg",
+ "si",
+ "sk",
+ "sl",
+ "sm",
+ "sn",
+ "so",
+ "sq",
+ "sr",
+ "ss",
+ "st",
+ "su",
+ "sv",
+ "sw",
+ "ta",
+ "te",
+ "tg",
+ "th",
+ "ti",
+ "tk",
+ "tl",
+ "tn",
+ "to",
+ "tr",
+ "ts",
+ "tt",
+ "tw",
+ "ty",
+ "ug",
+ "uk",
+ "ur",
+ "uz",
+ "ve",
+ "vi",
+ "vo",
+ "wa",
+ "wo",
+ "xh",
+ "yi",
+ "yue",
+ "yo",
+ "za",
+ "zh",
+ "zu",
+ "auto"
+ ],
+ "example": "en",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "aa",
+ "ab",
+ "ae",
+ "af",
+ "ak",
+ "am",
+ "an",
+ "ar",
+ "as",
+ "av",
+ "ay",
+ "az",
+ "ba",
+ "be",
+ "bg",
+ "bh",
+ "bi",
+ "bm",
+ "bn",
+ "bo",
+ "br",
+ "bs",
+ "ca",
+ "ce",
+ "ch",
+ "co",
+ "cr",
+ "cs",
+ "cu",
+ "cv",
+ "cy",
+ "da",
+ "de",
+ "dv",
+ "dz",
+ "ee",
+ "el",
+ "en",
+ "eo",
+ "es",
+ "et",
+ "eu",
+ "fa",
+ "ff",
+ "fi",
+ "fj",
+ "fo",
+ "fr",
+ "fy",
+ "ga",
+ "gd",
+ "gl",
+ "gn",
+ "gu",
+ "gv",
+ "ha",
+ "he",
+ "hi",
+ "ho",
+ "hr",
+ "ht",
+ "hu",
+ "hy",
+ "hz",
+ "ia",
+ "id",
+ "ie",
+ "ig",
+ "ii",
+ "ik",
+ "io",
+ "is",
+ "it",
+ "iu",
+ "ja",
+ "jv",
+ "ka",
+ "kg",
+ "ki",
+ "kj",
+ "kk",
+ "kl",
+ "km",
+ "kn",
+ "ko",
+ "kr",
+ "ks",
+ "ku",
+ "kv",
+ "kw",
+ "ky",
+ "la",
+ "lb",
+ "lg",
+ "li",
+ "ln",
+ "lo",
+ "lt",
+ "lu",
+ "lv",
+ "mg",
+ "mh",
+ "mi",
+ "mk",
+ "ml",
+ "mn",
+ "mr",
+ "ms",
+ "mt",
+ "my",
+ "na",
+ "nb",
+ "nd",
+ "ne",
+ "ng",
+ "nl",
+ "nn",
+ "no",
+ "nr",
+ "nv",
+ "ny",
+ "oc",
+ "oj",
+ "om",
+ "or",
+ "os",
+ "pa",
+ "pi",
+ "pl",
+ "ps",
+ "pt",
+ "qu",
+ "rm",
+ "rn",
+ "ro",
+ "ru",
+ "rw",
+ "sa",
+ "sc",
+ "sd",
+ "se",
+ "sg",
+ "si",
+ "sk",
+ "sl",
+ "sm",
+ "sn",
+ "so",
+ "sq",
+ "sr",
+ "ss",
+ "st",
+ "su",
+ "sv",
+ "sw",
+ "ta",
+ "te",
+ "tg",
+ "th",
+ "ti",
+ "tk",
+ "tl",
+ "tn",
+ "to",
+ "tr",
+ "ts",
+ "tt",
+ "tw",
+ "ty",
+ "ug",
+ "uk",
+ "ur",
+ "uz",
+ "ve",
+ "vi",
+ "vo",
+ "wa",
+ "wo",
+ "xh",
+ "yi",
+ "yue",
+ "yo",
+ "za",
+ "zh",
+ "zu"
+ ],
+ "title": "ISO 639-1 Language Code"
+ },
+ {
+ "type": "string",
+ "enum": [
+ "auto"
+ ],
+ "title": "Auto-detect"
+ }
+ ]
},
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
+ },
+ "fallbackPlan": {
+ "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/FallbackPlan"
+ }
+ ]
}
},
"required": [
"provider",
- "publicKey",
- "apiKey",
- "apiUrl"
+ "voiceId"
]
},
- "CreateLmntCredentialDTO": {
+ "NeuphonicVoice": {
"type": "object",
"properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
"provider": {
"type": "string",
+ "description": "This is the voice provider that will be used.",
"enum": [
- "lmnt"
+ "neuphonic"
]
},
- "apiKey": {
- "type": "string",
- "description": "This is not returned in the API."
- },
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
- }
- },
- "required": [
- "provider",
- "apiKey"
- ]
- },
- "CreateMakeCredentialDTO": {
- "type": "object",
- "properties": {
- "provider": {
- "type": "string",
- "enum": [
- "make"
+ "voiceId": {
+ "description": "This is the provider-specific ID that will be used.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [],
+ "title": "Preset Voice Options"
+ },
+ {
+ "type": "string",
+ "title": "Neuphonic Voice ID"
+ }
]
},
- "teamId": {
+ "model": {
"type": "string",
- "description": "Team ID"
+ "description": "This is the model that will be used. Defaults to 'neu_fast' if not specified.",
+ "enum": [
+ "neu_hq",
+ "neu_fast"
+ ],
+ "example": "neu_fast"
},
- "region": {
- "type": "string",
- "description": "Region of your application. For example: eu1, eu2, us1, us2"
+ "language": {
+ "type": "object",
+ "description": "This is the language (ISO 639-1) that is enforced for the model.",
+ "example": "en"
},
- "apiKey": {
- "type": "string",
- "description": "This is not returned in the API."
+ "speed": {
+ "type": "number",
+ "description": "This is the speed multiplier that will be used.",
+ "minimum": 0.25,
+ "maximum": 2,
+ "example": null
},
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
+ },
+ "fallbackPlan": {
+ "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/FallbackPlan"
+ }
+ ]
}
},
"required": [
"provider",
- "teamId",
- "region",
- "apiKey"
+ "voiceId",
+ "language"
]
},
- "CreateOpenAICredentialDTO": {
+ "OpenAIVoice": {
"type": "object",
"properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
"provider": {
"type": "string",
+ "description": "This is the voice provider that will be used.",
"enum": [
"openai"
]
},
- "apiKey": {
- "type": "string",
- "description": "This is not returned in the API."
+ "voiceId": {
+ "description": "This is the provider-specific ID that will be used.\nPlease note that ash, ballad, coral, sage, and verse may only be used with realtime models.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "alloy",
+ "echo",
+ "fable",
+ "onyx",
+ "nova",
+ "shimmer",
+ "marin",
+ "cedar"
+ ],
+ "title": "Preset Voice Options"
+ },
+ {
+ "type": "string",
+ "title": "OpenAI Voice ID"
+ }
+ ]
},
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
- }
- },
- "required": [
- "provider",
- "apiKey"
- ]
- },
- "CreateOpenRouterCredentialDTO": {
- "type": "object",
- "properties": {
- "provider": {
+ "model": {
"type": "string",
+ "description": "This is the model that will be used for text-to-speech.",
"enum": [
- "openrouter"
+ "tts-1",
+ "tts-1-hd",
+ "gpt-4o-mini-tts"
]
},
- "apiKey": {
+ "instructions": {
"type": "string",
- "description": "This is not returned in the API."
+ "description": "This is a prompt that allows you to control the voice of your generated audio.\nDoes not work with 'tts-1' or 'tts-1-hd' models.",
+ "maxLength": 10000
},
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
+ "speed": {
+ "type": "number",
+ "description": "This is the speed multiplier that will be used.",
+ "minimum": 0.25,
+ "maximum": 4,
+ "example": null
+ },
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
+ },
+ "fallbackPlan": {
+ "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/FallbackPlan"
+ }
+ ]
}
},
"required": [
"provider",
- "apiKey"
+ "voiceId"
]
},
- "CreatePerplexityAICredentialDTO": {
+ "PlayHTVoice": {
"type": "object",
"properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
"provider": {
"type": "string",
+ "description": "This is the voice provider that will be used.",
"enum": [
- "perplexity-ai"
+ "playht"
]
},
- "apiKey": {
- "type": "string",
- "description": "This is not returned in the API."
+ "voiceId": {
+ "description": "This is the provider-specific ID that will be used.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "jennifer",
+ "melissa",
+ "will",
+ "chris",
+ "matt",
+ "jack",
+ "ruby",
+ "davis",
+ "donna",
+ "michael"
+ ],
+ "title": "Preset Voice Options"
+ },
+ {
+ "type": "string",
+ "title": "PlayHT Voice ID"
+ }
+ ]
},
- "name": {
+ "speed": {
+ "type": "number",
+ "description": "This is the speed multiplier that will be used.",
+ "minimum": 0.1,
+ "maximum": 5,
+ "example": null
+ },
+ "temperature": {
+ "type": "number",
+ "description": "A floating point number between 0, exclusive, and 2, inclusive. If equal to null or not provided, the model's default temperature will be used. The temperature parameter controls variance. Lower temperatures result in more predictable results, higher temperatures allow each run to vary more, so the voice may sound less like the baseline voice.",
+ "minimum": 0.1,
+ "maximum": 2,
+ "example": null
+ },
+ "emotion": {
"type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
- }
- },
- "required": [
- "provider",
- "apiKey"
- ]
- },
- "CreatePlayHTCredentialDTO": {
- "type": "object",
- "properties": {
- "provider": {
+ "description": "An emotion to be applied to the speech.",
+ "enum": [
+ "female_happy",
+ "female_sad",
+ "female_angry",
+ "female_fearful",
+ "female_disgust",
+ "female_surprised",
+ "male_happy",
+ "male_sad",
+ "male_angry",
+ "male_fearful",
+ "male_disgust",
+ "male_surprised"
+ ],
+ "example": null
+ },
+ "voiceGuidance": {
+ "type": "number",
+ "description": "A number between 1 and 6. Use lower numbers to reduce how unique your chosen voice will be compared to other voices.",
+ "minimum": 1,
+ "maximum": 6,
+ "example": null
+ },
+ "styleGuidance": {
+ "type": "number",
+ "description": "A number between 1 and 30. Use lower numbers to to reduce how strong your chosen emotion will be. Higher numbers will create a very emotional performance.",
+ "minimum": 1,
+ "maximum": 30,
+ "example": null
+ },
+ "textGuidance": {
+ "type": "number",
+ "description": "A number between 1 and 2. This number influences how closely the generated speech adheres to the input text. Use lower values to create more fluid speech, but with a higher chance of deviating from the input text. Higher numbers will make the generated speech more accurate to the input text, ensuring that the words spoken align closely with the provided text.",
+ "minimum": 1,
+ "maximum": 2,
+ "example": null
+ },
+ "model": {
"type": "string",
+ "description": "Playht voice model/engine to use.",
"enum": [
- "playht"
+ "PlayHT2.0",
+ "PlayHT2.0-turbo",
+ "Play3.0-mini",
+ "PlayDialog"
]
},
- "apiKey": {
+ "language": {
"type": "string",
- "description": "This is not returned in the API."
+ "description": "The language to use for the speech.",
+ "enum": [
+ "afrikaans",
+ "albanian",
+ "amharic",
+ "arabic",
+ "bengali",
+ "bulgarian",
+ "catalan",
+ "croatian",
+ "czech",
+ "danish",
+ "dutch",
+ "english",
+ "french",
+ "galician",
+ "german",
+ "greek",
+ "hebrew",
+ "hindi",
+ "hungarian",
+ "indonesian",
+ "italian",
+ "japanese",
+ "korean",
+ "malay",
+ "mandarin",
+ "polish",
+ "portuguese",
+ "russian",
+ "serbian",
+ "spanish",
+ "swedish",
+ "tagalog",
+ "thai",
+ "turkish",
+ "ukrainian",
+ "urdu",
+ "xhosa"
+ ]
},
- "userId": {
- "type": "string"
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
},
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
+ "fallbackPlan": {
+ "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/FallbackPlan"
+ }
+ ]
}
},
"required": [
"provider",
- "apiKey",
- "userId"
+ "voiceId"
]
},
- "CreateRimeAICredentialDTO": {
+ "RimeAIVoice": {
"type": "object",
"properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
"provider": {
"type": "string",
+ "description": "This is the voice provider that will be used.",
"enum": [
"rime-ai"
]
},
- "apiKey": {
+ "voiceId": {
+ "description": "This is the provider-specific ID that will be used.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "cove",
+ "moon",
+ "wildflower",
+ "eva",
+ "amber",
+ "maya",
+ "lagoon",
+ "breeze",
+ "helen",
+ "joy",
+ "marsh",
+ "creek",
+ "cedar",
+ "alpine",
+ "summit",
+ "nicholas",
+ "tyler",
+ "colin",
+ "hank",
+ "thunder",
+ "astra",
+ "eucalyptus",
+ "moraine",
+ "peak",
+ "tundra",
+ "mesa_extra",
+ "talon",
+ "marlu",
+ "glacier",
+ "falcon",
+ "luna",
+ "celeste",
+ "estelle",
+ "andromeda",
+ "esther",
+ "lyra",
+ "lintel",
+ "oculus",
+ "vespera",
+ "transom",
+ "bond",
+ "arcade",
+ "atrium",
+ "cupola",
+ "fern",
+ "sirius",
+ "orion",
+ "masonry",
+ "albion",
+ "parapet"
+ ],
+ "title": "Suggested Voice Options",
+ "description": "Popular Rime AI voices across mist, mistv2, and arcana models. Any valid Rime AI voice ID is accepted, not just these suggestions."
+ },
+ {
+ "type": "string",
+ "title": "Any Rime AI Voice ID",
+ "description": "Any valid Rime AI voice ID. See https://docs.rime.ai/docs/voices for the full catalog."
+ }
+ ]
+ },
+ "model": {
"type": "string",
- "description": "This is not returned in the API."
+ "description": "This is the model that will be used. Defaults to 'arcana' when not specified.",
+ "enum": [
+ "arcana",
+ "mistv2",
+ "mist"
+ ],
+ "example": "arcana"
},
- "name": {
+ "speed": {
+ "type": "number",
+ "description": "This is the speed multiplier that will be used.",
+ "minimum": 0.1,
+ "example": null
+ },
+ "pauseBetweenBrackets": {
+ "type": "boolean",
+ "description": "This is a flag that controls whether to add slight pauses using angle brackets. Example: \"Hi. <200> I'd love to have a conversation with you.\" adds a 200ms pause between the first and second sentences.",
+ "example": false
+ },
+ "phonemizeBetweenBrackets": {
+ "type": "boolean",
+ "description": "This is a flag that controls whether text inside brackets should be phonemized (converted to phonetic pronunciation) - Example: \"{h'El.o} World\" will pronounce \"Hello\" as expected.",
+ "example": false
+ },
+ "reduceLatency": {
+ "type": "boolean",
+ "description": "This is a flag that controls whether to optimize for reduced latency in streaming. https://docs.rime.ai/api-reference/endpoint/websockets#param-reduce-latency",
+ "example": false
+ },
+ "inlineSpeedAlpha": {
"type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
+ "description": "This is a string that allows inline speed control using alpha notation. https://docs.rime.ai/api-reference/endpoint/websockets#param-inline-speed-alpha",
+ "example": null
+ },
+ "language": {
+ "type": "string",
+ "description": "Language for speech synthesis. Uses ISO 639 codes. Supported: en, es, de, fr, ar, hi, ja, he, pt, ta, si.",
+ "enum": [
+ "en",
+ "es",
+ "de",
+ "fr",
+ "ar",
+ "hi",
+ "ja",
+ "he",
+ "pt",
+ "ta",
+ "si"
+ ],
+ "example": "en"
+ },
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
+ },
+ "fallbackPlan": {
+ "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/FallbackPlan"
+ }
+ ]
}
},
"required": [
"provider",
- "apiKey"
+ "voiceId"
]
},
- "CreateRunpodCredentialDTO": {
+ "SesameVoice": {
"type": "object",
"properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
"provider": {
"type": "string",
+ "description": "This is the voice provider that will be used.",
"enum": [
- "runpod"
+ "sesame"
]
},
- "apiKey": {
+ "voiceId": {
"type": "string",
- "description": "This is not returned in the API."
+ "description": "This is the provider-specific ID that will be used.",
+ "title": "Sesame Voice ID. This should be either a name (a built-in voice) or a UUID (a custom voice)."
},
- "name": {
+ "model": {
"type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
+ "description": "This is the model that will be used.",
+ "enum": [
+ "csm-1b"
+ ]
+ },
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
+ },
+ "fallbackPlan": {
+ "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/FallbackPlan"
+ }
+ ]
}
},
"required": [
"provider",
- "apiKey"
+ "voiceId",
+ "model"
]
},
- "CreateS3CredentialDTO": {
+ "SmallestAIVoice": {
"type": "object",
"properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
"provider": {
"type": "string",
+ "description": "This is the voice provider that will be used.",
"enum": [
- "s3"
- ],
- "description": "Credential provider. Only allowed value is s3"
- },
- "awsAccessKeyId": {
- "type": "string",
- "description": "AWS access key ID."
- },
- "awsSecretAccessKey": {
- "type": "string",
- "description": "AWS access key secret. This is not returned in the API."
- },
- "region": {
- "type": "string",
- "description": "AWS region in which the S3 bucket is located."
+ "smallest-ai"
+ ]
},
- "s3BucketName": {
- "type": "string",
- "description": "AWS S3 bucket name."
+ "voiceId": {
+ "description": "This is the provider-specific ID that will be used.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "emily",
+ "jasmine",
+ "arman",
+ "james",
+ "mithali",
+ "aravind",
+ "raj",
+ "diya",
+ "raman",
+ "ananya",
+ "isha",
+ "william",
+ "aarav",
+ "monika",
+ "niharika",
+ "deepika",
+ "raghav",
+ "kajal",
+ "radhika",
+ "mansi",
+ "nisha",
+ "saurabh",
+ "pooja",
+ "saina",
+ "sanya"
+ ],
+ "title": "Preset Voice Options"
+ },
+ {
+ "type": "string",
+ "title": "Smallest AI Voice ID"
+ }
+ ]
},
- "s3PathPrefix": {
+ "model": {
"type": "string",
- "description": "The path prefix for the uploaded recording. Ex. \"recordings/\""
+ "description": "Smallest AI voice model to use. Defaults to 'lightning' when not specified.",
+ "enum": [
+ "lightning"
+ ]
},
- "fallbackIndex": {
+ "speed": {
"type": "number",
- "minimum": 1,
- "description": "This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order."
+ "description": "This is the speed multiplier that will be used.",
+ "example": null
},
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
+ },
+ "fallbackPlan": {
+ "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/FallbackPlan"
+ }
+ ]
}
},
"required": [
"provider",
- "awsAccessKeyId",
- "awsSecretAccessKey",
- "region",
- "s3BucketName",
- "s3PathPrefix"
+ "voiceId"
]
},
- "SupabaseBucketPlan": {
+ "TavusConversationProperties": {
"type": "object",
"properties": {
- "region": {
- "type": "string",
- "description": "This is the S3 Region. It should look like us-east-1\nIt should be one of the supabase regions defined in the SUPABASE_REGION enum\nCheck https://supabase.com/docs/guides/platform/regions for up to date regions",
- "enum": [
- "us-west-1",
- "us-east-1",
- "us-east-2",
- "ca-central-1",
- "eu-west-1",
- "eu-west-2",
- "eu-west-3",
- "eu-central-1",
- "eu-central-2",
- "eu-north-1",
- "ap-south-1",
- "ap-southeast-1",
- "ap-northeast-1",
- "ap-northeast-2",
- "ap-southeast-2",
- "sa-east-1"
- ]
+ "maxCallDuration": {
+ "type": "number",
+ "description": "The maximum duration of the call in seconds. The default `maxCallDuration` is 3600 seconds (1 hour).\nOnce the time limit specified by this parameter has been reached, the conversation will automatically shut down."
},
- "url": {
- "type": "string",
- "description": "This is the S3 compatible URL for Supabase S3\nThis should look like https://.supabase.co/storage/v1/s3"
+ "participantLeftTimeout": {
+ "type": "number",
+ "description": "The duration in seconds after which the call will be automatically shut down once the last participant leaves."
},
- "accessKeyId": {
+ "participantAbsentTimeout": {
+ "type": "number",
+ "description": "Starting from conversation creation, the duration in seconds after which the call will be automatically shut down if no participant joins the call.\nDefault is 300 seconds (5 minutes)."
+ },
+ "enableRecording": {
+ "type": "boolean",
+ "description": "If true, the user will be able to record the conversation."
+ },
+ "enableTranscription": {
+ "type": "boolean",
+ "description": "If true, the user will be able to transcribe the conversation.\nYou can find more instructions on displaying transcriptions if you are using your custom DailyJS components here.\nYou need to have an event listener on Daily that listens for `app-messages`."
+ },
+ "applyGreenscreen": {
+ "type": "boolean",
+ "description": "If true, the background will be replaced with a greenscreen (RGB values: `[0, 255, 155]`).\nYou can use WebGL on the frontend to make the greenscreen transparent or change its color."
+ },
+ "language": {
"type": "string",
- "description": "This is the Supabase S3 Access Key ID.\nThe user creates this in the Supabase project Storage settings"
+ "description": "The language of the conversation. Please provide the **full language name**, not the two-letter code.\nIf you are using your own TTS voice, please ensure it supports the language you provide.\nIf you are using a stock replica or default persona, please note that only ElevenLabs and Cartesia supported languages are available.\nYou can find a full list of supported languages for Cartesia here, for ElevenLabs here, and for PlayHT here."
},
- "secretAccessKey": {
+ "recordingS3BucketName": {
"type": "string",
- "description": "This is the Supabase S3 Secret Access Key.\nThe user creates this in the Supabase project Storage settings along with the access key id"
+ "description": "The name of the S3 bucket where the recording will be stored."
},
- "name": {
+ "recordingS3BucketRegion": {
"type": "string",
- "description": "This is the Supabase S3 Bucket Name.\nThe user must create this in Supabase under Storage > Buckets\nA bucket that does not exist will not be checked now, but file uploads will fail"
+ "description": "The region of the S3 bucket where the recording will be stored."
},
- "path": {
+ "awsAssumeRoleArn": {
"type": "string",
- "description": "This is the Supabase S3 Bucket Folder Path.\nThe user can create this in Supabase under Storage > Buckets\nA path that does not exist will not be checked now, but file uploads will fail\nA Path is like a folder in the bucket\nEg. If the bucket is called \"my-bucket\" and the path is \"my-folder\", the full path is \"my-bucket/my-folder\""
+ "description": "The ARN of the role that will be assumed to access the S3 bucket."
}
- },
- "required": [
- "region",
- "url",
- "accessKeyId",
- "secretAccessKey",
- "name"
- ]
+ }
},
- "CreateSupabaseCredentialDTO": {
+ "TavusVoice": {
"type": "object",
"properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
"provider": {
"type": "string",
+ "description": "This is the voice provider that will be used.",
"enum": [
- "supabase"
- ],
- "description": "This is for supabase storage."
+ "tavus"
+ ]
},
- "fallbackIndex": {
- "type": "number",
- "minimum": 1,
- "description": "This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order."
+ "voiceId": {
+ "description": "This is the provider-specific ID that will be used.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "r52da2535a"
+ ],
+ "title": "Preset Voice Options"
+ },
+ {
+ "type": "string",
+ "title": "Tavus Voice ID"
+ }
+ ]
},
- "bucketPlan": {
- "$ref": "#/components/schemas/SupabaseBucketPlan"
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
},
- "name": {
+ "personaId": {
"type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
- }
- },
- "required": [
- "provider"
- ]
- },
- "CreateSmallestAICredentialDTO": {
- "type": "object",
- "properties": {
- "provider": {
+ "description": "This is the unique identifier for the persona that the replica will use in the conversation."
+ },
+ "callbackUrl": {
"type": "string",
- "enum": [
- "smallest-ai"
- ]
+ "description": "This is the url that will receive webhooks with updates regarding the conversation state."
},
- "apiKey": {
+ "conversationName": {
"type": "string",
- "description": "This is not returned in the API."
+ "description": "This is the name for the conversation."
},
- "name": {
+ "conversationalContext": {
"type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
+ "description": "This is the context that will be appended to any context provided in the persona, if one is provided."
+ },
+ "customGreeting": {
+ "type": "string",
+ "description": "This is the custom greeting that the replica will give once a participant joines the conversation."
+ },
+ "properties": {
+ "description": "These are optional properties used to customize the conversation.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/TavusConversationProperties"
+ }
+ ]
+ },
+ "fallbackPlan": {
+ "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/FallbackPlan"
+ }
+ ]
}
},
"required": [
"provider",
- "apiKey"
+ "voiceId"
]
},
- "CreateTavusCredentialDTO": {
+ "VapiPronunciationDictionaryLocator": {
"type": "object",
"properties": {
- "provider": {
- "type": "string",
- "enum": [
- "tavus"
- ]
- },
- "apiKey": {
+ "pronunciationDictId": {
"type": "string",
- "description": "This is not returned in the API."
+ "description": "The pronunciation dictionary ID",
+ "example": "pdict_abc123"
},
- "name": {
+ "versionId": {
"type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
+ "description": "Version ID (only required for ElevenLabs, ignored for Cartesia)",
+ "example": "ver_abc123"
}
},
"required": [
- "provider",
- "apiKey"
+ "pronunciationDictId"
]
},
- "CreateTogetherAICredentialDTO": {
+ "VapiVoice": {
"type": "object",
"properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
"provider": {
"type": "string",
+ "description": "This is the voice provider that will be used.",
"enum": [
- "together-ai"
+ "vapi"
]
},
- "apiKey": {
+ "voiceId": {
"type": "string",
- "description": "This is not returned in the API."
+ "description": "The voices provided by Vapi",
+ "enum": [
+ "Clara",
+ "Godfrey",
+ "Layla",
+ "Sid",
+ "Gustavo",
+ "Elliot",
+ "Kylie",
+ "Rohan",
+ "Lily",
+ "Savannah",
+ "Hana",
+ "Neha",
+ "Cole",
+ "Harry",
+ "Paige",
+ "Spencer",
+ "Nico",
+ "Kai",
+ "Emma",
+ "Sagar",
+ "Neil",
+ "Naina",
+ "Leah",
+ "Tara",
+ "Jess",
+ "Leo",
+ "Dan",
+ "Mia",
+ "Zac",
+ "Zoe"
+ ]
},
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
+ "speed": {
+ "type": "number",
+ "description": "This is the speed multiplier that will be used.\n\n@default 1",
+ "minimum": 0.25,
+ "maximum": 2,
+ "default": 1
+ },
+ "pronunciationDictionary": {
+ "description": "List of pronunciation dictionary locators for custom word pronunciations.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/VapiPronunciationDictionaryLocator"
+ }
+ },
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
+ },
+ "fallbackPlan": {
+ "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/FallbackPlan"
+ }
+ ]
}
},
"required": [
"provider",
- "apiKey"
+ "voiceId"
]
},
- "CreateTwilioCredentialDTO": {
+ "InworldVoice": {
"type": "object",
"properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
"provider": {
"type": "string",
+ "description": "This is the voice provider that will be used.",
"enum": [
- "twilio"
+ "inworld"
]
},
- "authToken": {
+ "voiceId": {
"type": "string",
- "description": "This is not returned in the API."
+ "description": "Available voices by language:\n• en: Alex, Ashley, Craig, Deborah, Dennis, Edward, Elizabeth, Hades, Julia, Pixie, Mark, Olivia, Priya, Ronald, Sarah, Shaun, Theodore, Timothy, Wendy, Dominus, Hana, Clive, Carter, Blake, Luna\n• zh: Yichen, Xiaoyin, Xinyi, Jing\n• nl: Erik, Katrien, Lennart, Lore\n• fr: Alain, Hélène, Mathieu, Étienne\n• de: Johanna, Josef\n• it: Gianni, Orietta\n• ja: Asuka, Satoshi\n• ko: Hyunwoo, Minji, Seojun, Yoona\n• pl: Szymon, Wojciech\n• pt: Heitor, Maitê\n• es: Diego, Lupita, Miguel, Rafael\n• ru: Svetlana, Elena, Dmitry, Nikolai\n• hi: Riya, Manoj\n• he: Yael, Oren\n• ar: Nour, Omar",
+ "maxLength": 120,
+ "title": "Inworld Voice ID",
+ "enum": [
+ "Alex",
+ "Ashley",
+ "Craig",
+ "Deborah",
+ "Dennis",
+ "Edward",
+ "Elizabeth",
+ "Hades",
+ "Julia",
+ "Pixie",
+ "Mark",
+ "Olivia",
+ "Priya",
+ "Ronald",
+ "Sarah",
+ "Shaun",
+ "Theodore",
+ "Timothy",
+ "Wendy",
+ "Dominus",
+ "Hana",
+ "Clive",
+ "Carter",
+ "Blake",
+ "Luna",
+ "Yichen",
+ "Xiaoyin",
+ "Xinyi",
+ "Jing",
+ "Erik",
+ "Katrien",
+ "Lennart",
+ "Lore",
+ "Alain",
+ "Hélène",
+ "Mathieu",
+ "Étienne",
+ "Johanna",
+ "Josef",
+ "Gianni",
+ "Orietta",
+ "Asuka",
+ "Satoshi",
+ "Hyunwoo",
+ "Minji",
+ "Seojun",
+ "Yoona",
+ "Szymon",
+ "Wojciech",
+ "Heitor",
+ "Maitê",
+ "Diego",
+ "Lupita",
+ "Miguel",
+ "Rafael",
+ "Svetlana",
+ "Elena",
+ "Dmitry",
+ "Nikolai",
+ "Riya",
+ "Manoj",
+ "Yael",
+ "Oren",
+ "Nour",
+ "Omar"
+ ],
+ "example": "Alex"
},
- "apiKey": {
+ "model": {
"type": "string",
- "description": "This is not returned in the API."
+ "description": "This is the model that will be used.",
+ "enum": [
+ "inworld-tts-1"
+ ],
+ "default": "inworld-tts-1"
},
- "apiSecret": {
+ "languageCode": {
"type": "string",
- "description": "This is not returned in the API."
+ "description": "Language code for Inworld TTS synthesis",
+ "default": "en",
+ "enum": [
+ "en",
+ "zh",
+ "ko",
+ "nl",
+ "fr",
+ "es",
+ "ja",
+ "de",
+ "it",
+ "pl",
+ "pt",
+ "ru",
+ "hi",
+ "he",
+ "ar"
+ ]
},
- "accountSid": {
- "type": "string"
+ "temperature": {
+ "type": "number",
+ "description": "A floating point number between 0, exclusive, and 2, inclusive. If equal to null or not provided, the model's default temperature of 1.1 will be used. The temperature parameter controls variance.\nHigher values will make the output more random and can lead to more expressive results. Lower values will make it more deterministic.\nSee https://docs.inworld.ai/docs/tts/capabilities/generating-audio#additional-configurations for more details.",
+ "minimum": 0.1,
+ "maximum": 2,
+ "default": 1.1,
+ "example": null
},
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
+ "speakingRate": {
+ "type": "number",
+ "description": "A floating point number between 0.5, inclusive, and 1.5, inclusive. If equal to null or not provided, the model's default speaking speed of 1.0 will be used.\nValues above 0.8 are recommended for higher quality.\nSee https://docs.inworld.ai/docs/tts/capabilities/generating-audio#additional-configurations for more details.",
+ "minimum": 0.5,
+ "maximum": 1.5,
+ "default": 1,
+ "example": null
+ },
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
+ },
+ "fallbackPlan": {
+ "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/FallbackPlan"
+ }
+ ]
}
},
"required": [
"provider",
- "accountSid"
+ "voiceId"
]
},
- "CreateVonageCredentialDTO": {
+ "MinimaxVoice": {
"type": "object",
"properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
"provider": {
"type": "string",
+ "description": "This is the voice provider that will be used.",
"enum": [
- "vonage"
+ "minimax"
]
},
- "apiSecret": {
+ "voiceId": {
"type": "string",
- "description": "This is not returned in the API."
- },
- "apiKey": {
- "type": "string"
+ "description": "This is the provider-specific ID that will be used. Use a voice from MINIMAX_PREDEFINED_VOICES or a custom cloned voice ID.",
+ "title": "This is the Minimax Voice ID"
},
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
- }
- },
- "required": [
- "provider",
- "apiSecret",
- "apiKey"
- ]
- },
- "CreateWebhookCredentialDTO": {
- "type": "object",
- "properties": {
- "provider": {
+ "model": {
"type": "string",
+ "description": "This is the model that will be used. Options are 'speech-02-hd' and 'speech-02-turbo'.\nspeech-02-hd is optimized for high-fidelity applications like voiceovers and audiobooks.\nspeech-02-turbo is designed for real-time applications with low latency.\n\n@default \"speech-02-turbo\"",
"enum": [
- "webhook"
- ]
- },
- "authenticationPlan": {
- "description": "This is the authentication plan. Supports OAuth2 RFC 6749 and HMAC signing.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/OAuth2AuthenticationPlan"
- },
- {
- "$ref": "#/components/schemas/HMACAuthenticationPlan"
- }
+ "speech-02-hd",
+ "speech-02-turbo",
+ "speech-2.5-turbo-preview"
],
- "discriminator": {
- "propertyName": "type",
- "mapping": {
- "oauth2": "#/components/schemas/OAuth2AuthenticationPlan",
- "hmac": "#/components/schemas/HMACAuthenticationPlan"
- }
- }
+ "example": "speech-02-turbo",
+ "default": "speech-02-turbo"
},
- "name": {
+ "emotion": {
"type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
- }
- },
- "required": [
- "provider",
- "authenticationPlan"
- ]
- },
- "CreateXAiCredentialDTO": {
- "type": "object",
- "properties": {
- "provider": {
+ "description": "The emotion to use for the voice. If not provided, will use auto-detect mode.\nOptions include: 'happy', 'sad', 'angry', 'fearful', 'surprised', 'disgusted', 'neutral'",
+ "example": "happy"
+ },
+ "subtitleType": {
"type": "string",
- "description": "This is the api key for Grok in XAi's console. Get it from here: https://console.x.ai",
+ "description": "Controls the granularity of subtitle/timing data returned by Minimax\nduring synthesis. Set to 'word' to receive per-word timestamps in\nassistant.speechStarted events for karaoke-style caption rendering.\n\n@default \"sentence\"",
"enum": [
- "xai"
- ]
+ "word",
+ "sentence"
+ ],
+ "default": "sentence"
},
- "apiKey": {
+ "pitch": {
+ "type": "number",
+ "description": "Voice pitch adjustment. Range from -12 to 12 semitones.\n@default 0",
+ "minimum": -12,
+ "maximum": 12,
+ "example": 0,
+ "default": 0
+ },
+ "speed": {
+ "type": "number",
+ "description": "Voice speed adjustment. Range from 0.5 to 2.0.\n@default 1.0",
+ "minimum": 0.5,
+ "maximum": 2,
+ "example": 1,
+ "default": 1
+ },
+ "volume": {
+ "type": "number",
+ "description": "Voice volume adjustment. Range from 0.5 to 2.0.\n@default 1.0",
+ "minimum": 0.5,
+ "maximum": 2,
+ "example": 1,
+ "default": 1
+ },
+ "region": {
"type": "string",
- "maxLength": 10000,
- "description": "This is not returned in the API."
+ "description": "The region for Minimax API. Defaults to \"worldwide\".",
+ "enum": [
+ "worldwide",
+ "china"
+ ],
+ "default": "worldwide"
},
- "name": {
+ "languageBoost": {
"type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
+ "description": "Language hint for MiniMax T2A. Example: yue (Cantonese), zh (Chinese), en (English).",
+ "enum": [
+ "Chinese",
+ "Chinese,Yue",
+ "English",
+ "Arabic",
+ "Russian",
+ "Spanish",
+ "French",
+ "Portuguese",
+ "German",
+ "Turkish",
+ "Dutch",
+ "Ukrainian",
+ "Vietnamese",
+ "Indonesian",
+ "Japanese",
+ "Italian",
+ "Korean",
+ "Thai",
+ "Polish",
+ "Romanian",
+ "Greek",
+ "Czech",
+ "Finnish",
+ "Hindi",
+ "Bulgarian",
+ "Danish",
+ "Hebrew",
+ "Malay",
+ "Persian",
+ "Slovak",
+ "Swedish",
+ "Croatian",
+ "Filipino",
+ "Hungarian",
+ "Norwegian",
+ "Slovenian",
+ "Catalan",
+ "Nynorsk",
+ "Tamil",
+ "Afrikaans",
+ "auto"
+ ]
+ },
+ "textNormalizationEnabled": {
+ "type": "boolean",
+ "description": "Enable MiniMax text normalization to improve number reading and formatting.",
+ "default": true
+ },
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
+ },
+ "fallbackPlan": {
+ "description": "This is the plan for voice provider fallbacks in the event that the primary voice provider fails.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/FallbackPlan"
+ }
+ ]
}
},
"required": [
"provider",
- "apiKey"
+ "voiceId"
]
},
- "CreateGoogleCalendarOAuth2ClientCredentialDTO": {
+ "FallbackMinimaxVoice": {
"type": "object",
"properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
"provider": {
"type": "string",
+ "description": "This is the voice provider that will be used.",
"enum": [
- "google.calendar.oauth2-client"
+ "minimax"
]
},
- "name": {
+ "voiceId": {
"type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
+ "description": "This is the provider-specific ID that will be used. Use a voice from MINIMAX_PREDEFINED_VOICES or a custom cloned voice ID.",
+ "title": "This is the Minimax Voice ID"
+ },
+ "model": {
+ "type": "string",
+ "description": "This is the model that will be used. Options are 'speech-02-hd' and 'speech-02-turbo'.\nspeech-02-hd is optimized for high-fidelity applications like voiceovers and audiobooks.\nspeech-02-turbo is designed for real-time applications with low latency.\n\n@default \"speech-02-turbo\"",
+ "enum": [
+ "speech-02-hd",
+ "speech-02-turbo",
+ "speech-2.5-turbo-preview"
+ ],
+ "example": "speech-02-turbo",
+ "default": "speech-02-turbo"
+ },
+ "emotion": {
+ "type": "string",
+ "description": "The emotion to use for the voice. If not provided, will use auto-detect mode.\nOptions include: 'happy', 'sad', 'angry', 'fearful', 'surprised', 'disgusted', 'neutral'",
+ "example": "happy"
+ },
+ "subtitleType": {
+ "type": "string",
+ "description": "Controls the granularity of subtitle/timing data returned by Minimax\nduring synthesis. Set to 'word' to receive per-word timestamps in\nassistant.speechStarted events for karaoke-style caption rendering.\n\n@default \"sentence\"",
+ "enum": [
+ "word",
+ "sentence"
+ ],
+ "default": "sentence"
+ },
+ "pitch": {
+ "type": "number",
+ "description": "Voice pitch adjustment. Range from -12 to 12 semitones.\n@default 0",
+ "minimum": -12,
+ "maximum": 12,
+ "example": 0,
+ "default": 0
+ },
+ "speed": {
+ "type": "number",
+ "description": "Voice speed adjustment. Range from 0.5 to 2.0.\n@default 1.0",
+ "minimum": 0.5,
+ "maximum": 2,
+ "example": 1,
+ "default": 1
+ },
+ "volume": {
+ "type": "number",
+ "description": "Voice volume adjustment. Range from 0.5 to 2.0.\n@default 1.0",
+ "minimum": 0.5,
+ "maximum": 2,
+ "example": 1,
+ "default": 1
+ },
+ "region": {
+ "type": "string",
+ "description": "The region for Minimax API. Defaults to \"worldwide\".",
+ "enum": [
+ "worldwide",
+ "china"
+ ],
+ "default": "worldwide"
+ },
+ "languageBoost": {
+ "type": "string",
+ "description": "Language hint for MiniMax T2A. Example: yue (Cantonese), zh (Chinese), en (English).",
+ "enum": [
+ "Chinese",
+ "Chinese,Yue",
+ "English",
+ "Arabic",
+ "Russian",
+ "Spanish",
+ "French",
+ "Portuguese",
+ "German",
+ "Turkish",
+ "Dutch",
+ "Ukrainian",
+ "Vietnamese",
+ "Indonesian",
+ "Japanese",
+ "Italian",
+ "Korean",
+ "Thai",
+ "Polish",
+ "Romanian",
+ "Greek",
+ "Czech",
+ "Finnish",
+ "Hindi",
+ "Bulgarian",
+ "Danish",
+ "Hebrew",
+ "Malay",
+ "Persian",
+ "Slovak",
+ "Swedish",
+ "Croatian",
+ "Filipino",
+ "Hungarian",
+ "Norwegian",
+ "Slovenian",
+ "Catalan",
+ "Nynorsk",
+ "Tamil",
+ "Afrikaans",
+ "auto"
+ ]
+ },
+ "textNormalizationEnabled": {
+ "type": "boolean",
+ "description": "Enable MiniMax text normalization to improve number reading and formatting.",
+ "default": true
+ },
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
}
},
"required": [
- "provider"
+ "provider",
+ "voiceId"
]
},
- "CreateGoogleCalendarOAuth2AuthorizationCredentialDTO": {
+ "FallbackWellSaidVoice": {
"type": "object",
"properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
"provider": {
"type": "string",
+ "description": "This is the voice provider that will be used.",
"enum": [
- "google.calendar.oauth2-authorization"
+ "wellsaid"
]
},
- "authorizationId": {
+ "voiceId": {
"type": "string",
- "description": "The authorization ID for the OAuth2 authorization"
+ "description": "The WellSaid speaker ID to synthesize."
},
- "name": {
+ "model": {
"type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
+ "description": "This is the model that will be used.",
+ "enum": [
+ "caruso",
+ "legacy"
+ ]
+ },
+ "enableSsml": {
+ "type": "boolean",
+ "description": "Enables limited SSML translation for input text."
+ },
+ "libraryIds": {
+ "description": "Array of library IDs to use for voice synthesis.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
}
},
"required": [
"provider",
- "authorizationId"
+ "voiceId"
]
},
- "CreateGoogleSheetsOAuth2AuthorizationCredentialDTO": {
+ "FallbackAzureVoice": {
"type": "object",
"properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
"provider": {
"type": "string",
+ "description": "This is the voice provider that will be used.",
"enum": [
- "google.sheets.oauth2-authorization"
+ "azure"
]
},
- "authorizationId": {
- "type": "string",
- "description": "The authorization ID for the OAuth2 authorization"
+ "voiceId": {
+ "description": "This is the provider-specific ID that will be used.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "andrew",
+ "brian",
+ "emma"
+ ],
+ "title": "Preset Voice Options"
+ },
+ {
+ "type": "string",
+ "title": "Azure Voice ID"
+ }
+ ]
},
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
+ "speed": {
+ "type": "number",
+ "description": "This is the speed multiplier that will be used.",
+ "minimum": 0.5,
+ "maximum": 2
+ },
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
}
},
"required": [
"provider",
- "authorizationId"
+ "voiceId"
]
},
- "CreateSlackOAuth2AuthorizationCredentialDTO": {
+ "FallbackCartesiaVoice": {
"type": "object",
"properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
"provider": {
"type": "string",
+ "description": "This is the voice provider that will be used.",
"enum": [
- "slack.oauth2-authorization"
+ "cartesia"
]
},
- "authorizationId": {
+ "voiceId": {
"type": "string",
- "description": "The authorization ID for the OAuth2 authorization"
+ "description": "The ID of the particular voice you want to use."
},
- "name": {
+ "model": {
"type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
+ "description": "This is the model that will be used. This is optional and will default to the correct model for the voiceId.",
+ "enum": [
+ "sonic-3",
+ "sonic-3-2026-01-12",
+ "sonic-3-2025-10-27",
+ "sonic-2",
+ "sonic-2-2025-06-11",
+ "sonic-english",
+ "sonic-multilingual",
+ "sonic-preview",
+ "sonic"
+ ],
+ "example": "sonic-english"
+ },
+ "language": {
+ "type": "string",
+ "description": "This is the language that will be used. This is optional and will default to the correct language for the voiceId.",
+ "enum": [
+ "ar",
+ "bg",
+ "bn",
+ "cs",
+ "da",
+ "de",
+ "el",
+ "en",
+ "es",
+ "fi",
+ "fr",
+ "gu",
+ "he",
+ "hi",
+ "hr",
+ "hu",
+ "id",
+ "it",
+ "ja",
+ "ka",
+ "kn",
+ "ko",
+ "ml",
+ "mr",
+ "ms",
+ "nl",
+ "no",
+ "pa",
+ "pl",
+ "pt",
+ "ro",
+ "ru",
+ "sk",
+ "sv",
+ "ta",
+ "te",
+ "th",
+ "tl",
+ "tr",
+ "uk",
+ "vi",
+ "zh"
+ ],
+ "example": "en"
+ },
+ "experimentalControls": {
+ "description": "Experimental controls for Cartesia voice generation",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CartesiaExperimentalControls"
+ }
+ ]
+ },
+ "generationConfig": {
+ "description": "Generation config for fine-grained control of sonic-3 voice output (speed, volume, and experimental controls). Only available for sonic-3 model.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CartesiaGenerationConfig"
+ }
+ ]
+ },
+ "pronunciationDictId": {
+ "type": "string",
+ "description": "Pronunciation dictionary ID for sonic-3. Allows custom pronunciations for specific words. Only available for sonic-3 model.",
+ "example": "dict_abc123"
+ },
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
}
},
"required": [
"provider",
- "authorizationId"
+ "voiceId"
]
},
- "CreateMinimaxCredentialDTO": {
+ "FallbackCustomVoice": {
"type": "object",
"properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
"provider": {
"type": "string",
+ "description": "This is the voice provider that will be used. Use `custom-voice` for providers that are not natively supported.",
"enum": [
- "minimax"
+ "custom-voice"
]
},
- "apiKey": {
+ "voiceId": {
"type": "string",
- "description": "This is not returned in the API."
+ "description": "This is the provider-specific ID that will be used. This is passed in the voice request payload to identify the voice to use."
},
- "groupId": {
- "type": "string",
- "description": "This is the Minimax Group ID."
+ "server": {
+ "description": "This is where the voice request will be sent.\n\nRequest Example:\n\nPOST https://{server.url}\nContent-Type: application/json\n\n{\n \"message\": {\n \"type\": \"voice-request\",\n \"text\": \"Hello, world!\",\n \"sampleRate\": 24000,\n ...other metadata about the call...\n }\n}\n\nResponse Expected: 1-channel 16-bit raw PCM audio at the sample rate specified in the request. Here is how the response will be piped to the transport:\n```\nresponse.on('data', (chunk: Buffer) => {\n outputStream.write(chunk);\n});\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
},
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
}
},
"required": [
"provider",
- "apiKey",
- "groupId"
+ "server"
]
},
- "TransferHookAction": {
+ "FallbackDeepgramVoice": {
"type": "object",
"properties": {
- "type": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
+ "provider": {
"type": "string",
- "description": "This is the type of action - must be \"transfer\"",
+ "description": "This is the voice provider that will be used.",
"enum": [
- "transfer"
+ "deepgram"
+ ]
+ },
+ "voiceId": {
+ "type": "string",
+ "description": "This is the provider-specific ID that will be used.",
+ "enum": [
+ "asteria",
+ "luna",
+ "stella",
+ "athena",
+ "hera",
+ "orion",
+ "arcas",
+ "perseus",
+ "angus",
+ "orpheus",
+ "helios",
+ "zeus",
+ "thalia",
+ "andromeda",
+ "helena",
+ "apollo",
+ "arcas",
+ "aries",
+ "amalthea",
+ "asteria",
+ "athena",
+ "atlas",
+ "aurora",
+ "callista",
+ "cora",
+ "cordelia",
+ "delia",
+ "draco",
+ "electra",
+ "harmonia",
+ "hera",
+ "hermes",
+ "hyperion",
+ "iris",
+ "janus",
+ "juno",
+ "jupiter",
+ "luna",
+ "mars",
+ "minerva",
+ "neptune",
+ "odysseus",
+ "ophelia",
+ "orion",
+ "orpheus",
+ "pandora",
+ "phoebe",
+ "pluto",
+ "saturn",
+ "selene",
+ "theia",
+ "vesta",
+ "zeus",
+ "celeste",
+ "estrella",
+ "nestor",
+ "sirio",
+ "carina",
+ "alvaro",
+ "diana",
+ "aquila",
+ "selena",
+ "javier"
+ ],
+ "title": "This is the Deepgram Voice ID"
+ },
+ "model": {
+ "type": "string",
+ "description": "This is the model that will be used. Defaults to 'aura-2' when not specified.",
+ "enum": [
+ "aura",
+ "aura-2"
+ ],
+ "example": "aura-2"
+ },
+ "mipOptOut": {
+ "type": "boolean",
+ "description": "If set to true, this will add mip_opt_out=true as a query parameter of all API requests. See https://developers.deepgram.com/docs/the-deepgram-model-improvement-partnership-program#want-to-opt-out\n\nThis will only be used if you are using your own Deepgram API key.\n\n@default false",
+ "example": false,
+ "default": false
+ },
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "provider",
+ "voiceId"
+ ]
+ },
+ "FallbackElevenLabsVoice": {
+ "type": "object",
+ "properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
+ "provider": {
+ "type": "string",
+ "description": "This is the voice provider that will be used.",
+ "enum": [
+ "11labs"
+ ]
+ },
+ "voiceId": {
+ "description": "This is the provider-specific ID that will be used. Ensure the Voice is present in your 11Labs Voice Library.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "burt",
+ "marissa",
+ "andrea",
+ "sarah",
+ "phillip",
+ "steve",
+ "joseph",
+ "myra",
+ "paula",
+ "ryan",
+ "drew",
+ "paul",
+ "mrb",
+ "matilda",
+ "mark"
+ ],
+ "title": "Preset Voice Options"
+ },
+ {
+ "type": "string",
+ "title": "11Labs Voice ID"
+ }
+ ]
+ },
+ "stability": {
+ "type": "number",
+ "description": "Defines the stability for voice settings.",
+ "minimum": 0,
+ "maximum": 1,
+ "example": 0.5
+ },
+ "similarityBoost": {
+ "type": "number",
+ "description": "Defines the similarity boost for voice settings.",
+ "minimum": 0,
+ "maximum": 1,
+ "example": 0.75
+ },
+ "style": {
+ "type": "number",
+ "description": "Defines the style for voice settings.",
+ "minimum": 0,
+ "maximum": 1,
+ "example": 0
+ },
+ "useSpeakerBoost": {
+ "type": "boolean",
+ "description": "Defines the use speaker boost for voice settings.",
+ "example": false
+ },
+ "speed": {
+ "type": "number",
+ "description": "Defines the speed for voice settings.",
+ "minimum": 0.7,
+ "maximum": 1.2,
+ "example": 0.9
+ },
+ "optimizeStreamingLatency": {
+ "type": "number",
+ "description": "Defines the optimize streaming latency for voice settings. Defaults to 3.",
+ "minimum": 0,
+ "maximum": 4,
+ "example": 3
+ },
+ "enableSsmlParsing": {
+ "type": "boolean",
+ "description": "This enables the use of https://elevenlabs.io/docs/speech-synthesis/prompting#pronunciation. Defaults to false to save latency.\n\n@default false",
+ "example": false
+ },
+ "autoMode": {
+ "type": "boolean",
+ "description": "Defines the auto mode for voice settings. Defaults to false.",
+ "example": false
+ },
+ "model": {
+ "type": "string",
+ "description": "This is the model that will be used. Defaults to 'eleven_turbo_v2' if not specified.",
+ "enum": [
+ "eleven_multilingual_v2",
+ "eleven_turbo_v2",
+ "eleven_turbo_v2_5",
+ "eleven_flash_v2",
+ "eleven_flash_v2_5",
+ "eleven_monolingual_v1",
+ "eleven_v3"
+ ],
+ "example": "eleven_turbo_v2_5"
+ },
+ "language": {
+ "type": "string",
+ "description": "This is the language (ISO 639-1) that is enforced for the model. Currently only Turbo v2.5 supports language enforcement. For other models, an error will be returned if language code is provided."
+ },
+ "pronunciationDictionaryLocators": {
+ "description": "This is the pronunciation dictionary locators to use.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/ElevenLabsPronunciationDictionaryLocator"
+ }
+ },
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "provider",
+ "voiceId"
+ ]
+ },
+ "FallbackHumeVoice": {
+ "type": "object",
+ "properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
+ "provider": {
+ "type": "string",
+ "description": "This is the voice provider that will be used.",
+ "enum": [
+ "hume"
+ ]
+ },
+ "model": {
+ "type": "string",
+ "description": "This is the model that will be used.",
+ "enum": [
+ "octave",
+ "octave2"
+ ],
+ "example": "octave2"
+ },
+ "voiceId": {
+ "type": "string",
+ "description": "The ID of the particular voice you want to use."
+ },
+ "isCustomHumeVoice": {
+ "type": "boolean",
+ "description": "Indicates whether the chosen voice is a preset Hume AI voice or a custom voice.",
+ "example": false
+ },
+ "description": {
+ "type": "string",
+ "description": "Natural language instructions describing how the synthesized speech should sound, including but not limited to tone, intonation, pacing, and accent (e.g., 'a soft, gentle voice with a strong British accent').\n\nIf a Voice is specified in the request, this description serves as acting instructions.\nIf no Voice is specified, a new voice is generated based on this description."
+ },
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "provider",
+ "voiceId"
+ ]
+ },
+ "FallbackLMNTVoice": {
+ "type": "object",
+ "properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
+ "provider": {
+ "type": "string",
+ "description": "This is the voice provider that will be used.",
+ "enum": [
+ "lmnt"
+ ]
+ },
+ "voiceId": {
+ "description": "This is the provider-specific ID that will be used.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "amy",
+ "ansel",
+ "autumn",
+ "ava",
+ "brandon",
+ "caleb",
+ "cassian",
+ "chloe",
+ "dalton",
+ "daniel",
+ "dustin",
+ "elowen",
+ "evander",
+ "huxley",
+ "james",
+ "juniper",
+ "kennedy",
+ "lauren",
+ "leah",
+ "lily",
+ "lucas",
+ "magnus",
+ "miles",
+ "morgan",
+ "natalie",
+ "nathan",
+ "noah",
+ "nyssa",
+ "oliver",
+ "paige",
+ "ryan",
+ "sadie",
+ "sophie",
+ "stella",
+ "terrence",
+ "tyler",
+ "vesper",
+ "violet",
+ "warrick",
+ "zain",
+ "zeke",
+ "zoe"
+ ],
+ "title": "Preset Voice Options"
+ },
+ {
+ "type": "string",
+ "title": "LMNT Voice ID"
+ }
+ ]
+ },
+ "speed": {
+ "type": "number",
+ "description": "This is the speed multiplier that will be used.",
+ "minimum": 0.25,
+ "maximum": 2,
+ "example": null
+ },
+ "language": {
+ "description": "Two letter ISO 639-1 language code. Use \"auto\" for auto-detection.",
+ "enum": [
+ "aa",
+ "ab",
+ "ae",
+ "af",
+ "ak",
+ "am",
+ "an",
+ "ar",
+ "as",
+ "av",
+ "ay",
+ "az",
+ "ba",
+ "be",
+ "bg",
+ "bh",
+ "bi",
+ "bm",
+ "bn",
+ "bo",
+ "br",
+ "bs",
+ "ca",
+ "ce",
+ "ch",
+ "co",
+ "cr",
+ "cs",
+ "cu",
+ "cv",
+ "cy",
+ "da",
+ "de",
+ "dv",
+ "dz",
+ "ee",
+ "el",
+ "en",
+ "eo",
+ "es",
+ "et",
+ "eu",
+ "fa",
+ "ff",
+ "fi",
+ "fj",
+ "fo",
+ "fr",
+ "fy",
+ "ga",
+ "gd",
+ "gl",
+ "gn",
+ "gu",
+ "gv",
+ "ha",
+ "he",
+ "hi",
+ "ho",
+ "hr",
+ "ht",
+ "hu",
+ "hy",
+ "hz",
+ "ia",
+ "id",
+ "ie",
+ "ig",
+ "ii",
+ "ik",
+ "io",
+ "is",
+ "it",
+ "iu",
+ "ja",
+ "jv",
+ "ka",
+ "kg",
+ "ki",
+ "kj",
+ "kk",
+ "kl",
+ "km",
+ "kn",
+ "ko",
+ "kr",
+ "ks",
+ "ku",
+ "kv",
+ "kw",
+ "ky",
+ "la",
+ "lb",
+ "lg",
+ "li",
+ "ln",
+ "lo",
+ "lt",
+ "lu",
+ "lv",
+ "mg",
+ "mh",
+ "mi",
+ "mk",
+ "ml",
+ "mn",
+ "mr",
+ "ms",
+ "mt",
+ "my",
+ "na",
+ "nb",
+ "nd",
+ "ne",
+ "ng",
+ "nl",
+ "nn",
+ "no",
+ "nr",
+ "nv",
+ "ny",
+ "oc",
+ "oj",
+ "om",
+ "or",
+ "os",
+ "pa",
+ "pi",
+ "pl",
+ "ps",
+ "pt",
+ "qu",
+ "rm",
+ "rn",
+ "ro",
+ "ru",
+ "rw",
+ "sa",
+ "sc",
+ "sd",
+ "se",
+ "sg",
+ "si",
+ "sk",
+ "sl",
+ "sm",
+ "sn",
+ "so",
+ "sq",
+ "sr",
+ "ss",
+ "st",
+ "su",
+ "sv",
+ "sw",
+ "ta",
+ "te",
+ "tg",
+ "th",
+ "ti",
+ "tk",
+ "tl",
+ "tn",
+ "to",
+ "tr",
+ "ts",
+ "tt",
+ "tw",
+ "ty",
+ "ug",
+ "uk",
+ "ur",
+ "uz",
+ "ve",
+ "vi",
+ "vo",
+ "wa",
+ "wo",
+ "xh",
+ "yi",
+ "yue",
+ "yo",
+ "za",
+ "zh",
+ "zu",
+ "auto"
+ ],
+ "example": "en",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "aa",
+ "ab",
+ "ae",
+ "af",
+ "ak",
+ "am",
+ "an",
+ "ar",
+ "as",
+ "av",
+ "ay",
+ "az",
+ "ba",
+ "be",
+ "bg",
+ "bh",
+ "bi",
+ "bm",
+ "bn",
+ "bo",
+ "br",
+ "bs",
+ "ca",
+ "ce",
+ "ch",
+ "co",
+ "cr",
+ "cs",
+ "cu",
+ "cv",
+ "cy",
+ "da",
+ "de",
+ "dv",
+ "dz",
+ "ee",
+ "el",
+ "en",
+ "eo",
+ "es",
+ "et",
+ "eu",
+ "fa",
+ "ff",
+ "fi",
+ "fj",
+ "fo",
+ "fr",
+ "fy",
+ "ga",
+ "gd",
+ "gl",
+ "gn",
+ "gu",
+ "gv",
+ "ha",
+ "he",
+ "hi",
+ "ho",
+ "hr",
+ "ht",
+ "hu",
+ "hy",
+ "hz",
+ "ia",
+ "id",
+ "ie",
+ "ig",
+ "ii",
+ "ik",
+ "io",
+ "is",
+ "it",
+ "iu",
+ "ja",
+ "jv",
+ "ka",
+ "kg",
+ "ki",
+ "kj",
+ "kk",
+ "kl",
+ "km",
+ "kn",
+ "ko",
+ "kr",
+ "ks",
+ "ku",
+ "kv",
+ "kw",
+ "ky",
+ "la",
+ "lb",
+ "lg",
+ "li",
+ "ln",
+ "lo",
+ "lt",
+ "lu",
+ "lv",
+ "mg",
+ "mh",
+ "mi",
+ "mk",
+ "ml",
+ "mn",
+ "mr",
+ "ms",
+ "mt",
+ "my",
+ "na",
+ "nb",
+ "nd",
+ "ne",
+ "ng",
+ "nl",
+ "nn",
+ "no",
+ "nr",
+ "nv",
+ "ny",
+ "oc",
+ "oj",
+ "om",
+ "or",
+ "os",
+ "pa",
+ "pi",
+ "pl",
+ "ps",
+ "pt",
+ "qu",
+ "rm",
+ "rn",
+ "ro",
+ "ru",
+ "rw",
+ "sa",
+ "sc",
+ "sd",
+ "se",
+ "sg",
+ "si",
+ "sk",
+ "sl",
+ "sm",
+ "sn",
+ "so",
+ "sq",
+ "sr",
+ "ss",
+ "st",
+ "su",
+ "sv",
+ "sw",
+ "ta",
+ "te",
+ "tg",
+ "th",
+ "ti",
+ "tk",
+ "tl",
+ "tn",
+ "to",
+ "tr",
+ "ts",
+ "tt",
+ "tw",
+ "ty",
+ "ug",
+ "uk",
+ "ur",
+ "uz",
+ "ve",
+ "vi",
+ "vo",
+ "wa",
+ "wo",
+ "xh",
+ "yi",
+ "yue",
+ "yo",
+ "za",
+ "zh",
+ "zu"
+ ],
+ "title": "ISO 639-1 Language Code"
+ },
+ {
+ "type": "string",
+ "enum": [
+ "auto"
+ ],
+ "title": "Auto-detect"
+ }
+ ]
+ },
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "provider",
+ "voiceId"
+ ]
+ },
+ "FallbackNeuphonicVoice": {
+ "type": "object",
+ "properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
+ "provider": {
+ "type": "string",
+ "description": "This is the voice provider that will be used.",
+ "enum": [
+ "neuphonic"
+ ]
+ },
+ "voiceId": {
+ "description": "This is the provider-specific ID that will be used.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [],
+ "title": "Preset Voice Options"
+ },
+ {
+ "type": "string",
+ "title": "Neuphonic Voice ID"
+ }
+ ]
+ },
+ "model": {
+ "type": "string",
+ "description": "This is the model that will be used. Defaults to 'neu_fast' if not specified.",
+ "enum": [
+ "neu_hq",
+ "neu_fast"
+ ],
+ "example": "neu_fast"
+ },
+ "language": {
+ "type": "object",
+ "description": "This is the language (ISO 639-1) that is enforced for the model.",
+ "example": "en"
+ },
+ "speed": {
+ "type": "number",
+ "description": "This is the speed multiplier that will be used.",
+ "minimum": 0.25,
+ "maximum": 2,
+ "example": null
+ },
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "provider",
+ "voiceId",
+ "language"
+ ]
+ },
+ "FallbackOpenAIVoice": {
+ "type": "object",
+ "properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
+ "provider": {
+ "type": "string",
+ "description": "This is the voice provider that will be used.",
+ "enum": [
+ "openai"
+ ]
+ },
+ "voiceId": {
+ "description": "This is the provider-specific ID that will be used.\nPlease note that ash, ballad, coral, sage, and verse may only be used with realtime models.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "alloy",
+ "echo",
+ "fable",
+ "onyx",
+ "nova",
+ "shimmer",
+ "marin",
+ "cedar"
+ ],
+ "title": "Preset Voice Options"
+ },
+ {
+ "type": "string",
+ "title": "OpenAI Voice ID"
+ }
+ ]
+ },
+ "model": {
+ "type": "string",
+ "description": "This is the model that will be used for text-to-speech.",
+ "enum": [
+ "tts-1",
+ "tts-1-hd",
+ "gpt-4o-mini-tts"
+ ]
+ },
+ "instructions": {
+ "type": "string",
+ "description": "This is a prompt that allows you to control the voice of your generated audio.\nDoes not work with 'tts-1' or 'tts-1-hd' models.",
+ "maxLength": 10000
+ },
+ "speed": {
+ "type": "number",
+ "description": "This is the speed multiplier that will be used.",
+ "minimum": 0.25,
+ "maximum": 4,
+ "example": null
+ },
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "provider",
+ "voiceId"
+ ]
+ },
+ "FallbackPlayHTVoice": {
+ "type": "object",
+ "properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
+ "provider": {
+ "type": "string",
+ "description": "This is the voice provider that will be used.",
+ "enum": [
+ "playht"
+ ]
+ },
+ "voiceId": {
+ "description": "This is the provider-specific ID that will be used.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "jennifer",
+ "melissa",
+ "will",
+ "chris",
+ "matt",
+ "jack",
+ "ruby",
+ "davis",
+ "donna",
+ "michael"
+ ],
+ "title": "Preset Voice Options"
+ },
+ {
+ "type": "string",
+ "title": "PlayHT Voice ID"
+ }
+ ]
+ },
+ "speed": {
+ "type": "number",
+ "description": "This is the speed multiplier that will be used.",
+ "minimum": 0.1,
+ "maximum": 5,
+ "example": null
+ },
+ "temperature": {
+ "type": "number",
+ "description": "A floating point number between 0, exclusive, and 2, inclusive. If equal to null or not provided, the model's default temperature will be used. The temperature parameter controls variance. Lower temperatures result in more predictable results, higher temperatures allow each run to vary more, so the voice may sound less like the baseline voice.",
+ "minimum": 0.1,
+ "maximum": 2,
+ "example": null
+ },
+ "emotion": {
+ "type": "string",
+ "description": "An emotion to be applied to the speech.",
+ "enum": [
+ "female_happy",
+ "female_sad",
+ "female_angry",
+ "female_fearful",
+ "female_disgust",
+ "female_surprised",
+ "male_happy",
+ "male_sad",
+ "male_angry",
+ "male_fearful",
+ "male_disgust",
+ "male_surprised"
+ ],
+ "example": null
+ },
+ "voiceGuidance": {
+ "type": "number",
+ "description": "A number between 1 and 6. Use lower numbers to reduce how unique your chosen voice will be compared to other voices.",
+ "minimum": 1,
+ "maximum": 6,
+ "example": null
+ },
+ "styleGuidance": {
+ "type": "number",
+ "description": "A number between 1 and 30. Use lower numbers to to reduce how strong your chosen emotion will be. Higher numbers will create a very emotional performance.",
+ "minimum": 1,
+ "maximum": 30,
+ "example": null
+ },
+ "textGuidance": {
+ "type": "number",
+ "description": "A number between 1 and 2. This number influences how closely the generated speech adheres to the input text. Use lower values to create more fluid speech, but with a higher chance of deviating from the input text. Higher numbers will make the generated speech more accurate to the input text, ensuring that the words spoken align closely with the provided text.",
+ "minimum": 1,
+ "maximum": 2,
+ "example": null
+ },
+ "model": {
+ "type": "string",
+ "description": "Playht voice model/engine to use.",
+ "enum": [
+ "PlayHT2.0",
+ "PlayHT2.0-turbo",
+ "Play3.0-mini",
+ "PlayDialog"
+ ]
+ },
+ "language": {
+ "type": "string",
+ "description": "The language to use for the speech.",
+ "enum": [
+ "afrikaans",
+ "albanian",
+ "amharic",
+ "arabic",
+ "bengali",
+ "bulgarian",
+ "catalan",
+ "croatian",
+ "czech",
+ "danish",
+ "dutch",
+ "english",
+ "french",
+ "galician",
+ "german",
+ "greek",
+ "hebrew",
+ "hindi",
+ "hungarian",
+ "indonesian",
+ "italian",
+ "japanese",
+ "korean",
+ "malay",
+ "mandarin",
+ "polish",
+ "portuguese",
+ "russian",
+ "serbian",
+ "spanish",
+ "swedish",
+ "tagalog",
+ "thai",
+ "turkish",
+ "ukrainian",
+ "urdu",
+ "xhosa"
+ ]
+ },
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "provider",
+ "voiceId"
+ ]
+ },
+ "FallbackRimeAIVoice": {
+ "type": "object",
+ "properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
+ "provider": {
+ "type": "string",
+ "description": "This is the voice provider that will be used.",
+ "enum": [
+ "rime-ai"
+ ]
+ },
+ "voiceId": {
+ "description": "This is the provider-specific ID that will be used.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "cove",
+ "moon",
+ "wildflower",
+ "eva",
+ "amber",
+ "maya",
+ "lagoon",
+ "breeze",
+ "helen",
+ "joy",
+ "marsh",
+ "creek",
+ "cedar",
+ "alpine",
+ "summit",
+ "nicholas",
+ "tyler",
+ "colin",
+ "hank",
+ "thunder",
+ "astra",
+ "eucalyptus",
+ "moraine",
+ "peak",
+ "tundra",
+ "mesa_extra",
+ "talon",
+ "marlu",
+ "glacier",
+ "falcon",
+ "luna",
+ "celeste",
+ "estelle",
+ "andromeda",
+ "esther",
+ "lyra",
+ "lintel",
+ "oculus",
+ "vespera",
+ "transom",
+ "bond",
+ "arcade",
+ "atrium",
+ "cupola",
+ "fern",
+ "sirius",
+ "orion",
+ "masonry",
+ "albion",
+ "parapet"
+ ],
+ "title": "Suggested Voice Options",
+ "description": "Popular Rime AI voices across mist, mistv2, and arcana models. Any valid Rime AI voice ID is accepted, not just these suggestions."
+ },
+ {
+ "type": "string",
+ "title": "Any Rime AI Voice ID",
+ "description": "Any valid Rime AI voice ID. See https://docs.rime.ai/docs/voices for the full catalog."
+ }
+ ]
+ },
+ "model": {
+ "type": "string",
+ "description": "This is the model that will be used. Defaults to 'arcana' when not specified.",
+ "enum": [
+ "arcana",
+ "mistv2",
+ "mist"
+ ],
+ "example": "arcana"
+ },
+ "speed": {
+ "type": "number",
+ "description": "This is the speed multiplier that will be used.",
+ "minimum": 0.1,
+ "example": null
+ },
+ "pauseBetweenBrackets": {
+ "type": "boolean",
+ "description": "This is a flag that controls whether to add slight pauses using angle brackets. Example: \"Hi. <200> I'd love to have a conversation with you.\" adds a 200ms pause between the first and second sentences.",
+ "example": false
+ },
+ "phonemizeBetweenBrackets": {
+ "type": "boolean",
+ "description": "This is a flag that controls whether text inside brackets should be phonemized (converted to phonetic pronunciation) - Example: \"{h'El.o} World\" will pronounce \"Hello\" as expected.",
+ "example": false
+ },
+ "reduceLatency": {
+ "type": "boolean",
+ "description": "This is a flag that controls whether to optimize for reduced latency in streaming. https://docs.rime.ai/api-reference/endpoint/websockets#param-reduce-latency",
+ "example": false
+ },
+ "inlineSpeedAlpha": {
+ "type": "string",
+ "description": "This is a string that allows inline speed control using alpha notation. https://docs.rime.ai/api-reference/endpoint/websockets#param-inline-speed-alpha",
+ "example": null
+ },
+ "language": {
+ "type": "string",
+ "description": "Language for speech synthesis. Uses ISO 639 codes. Supported: en, es, de, fr, ar, hi, ja, he, pt, ta, si.",
+ "enum": [
+ "en",
+ "es",
+ "de",
+ "fr",
+ "ar",
+ "hi",
+ "ja",
+ "he",
+ "pt",
+ "ta",
+ "si"
+ ],
+ "example": "en"
+ },
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "provider",
+ "voiceId"
+ ]
+ },
+ "FallbackSesameVoice": {
+ "type": "object",
+ "properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
+ "provider": {
+ "type": "string",
+ "description": "This is the voice provider that will be used.",
+ "enum": [
+ "sesame"
+ ]
+ },
+ "voiceId": {
+ "type": "string",
+ "description": "This is the provider-specific ID that will be used.",
+ "title": "Sesame Voice ID. This should be either a name (a built-in voice) or a UUID (a custom voice)."
+ },
+ "model": {
+ "type": "string",
+ "description": "This is the model that will be used.",
+ "enum": [
+ "csm-1b"
+ ]
+ },
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "provider",
+ "voiceId",
+ "model"
+ ]
+ },
+ "FallbackSmallestAIVoice": {
+ "type": "object",
+ "properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
+ "provider": {
+ "type": "string",
+ "description": "This is the voice provider that will be used.",
+ "enum": [
+ "smallest-ai"
+ ]
+ },
+ "voiceId": {
+ "description": "This is the provider-specific ID that will be used.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "emily",
+ "jasmine",
+ "arman",
+ "james",
+ "mithali",
+ "aravind",
+ "raj",
+ "diya",
+ "raman",
+ "ananya",
+ "isha",
+ "william",
+ "aarav",
+ "monika",
+ "niharika",
+ "deepika",
+ "raghav",
+ "kajal",
+ "radhika",
+ "mansi",
+ "nisha",
+ "saurabh",
+ "pooja",
+ "saina",
+ "sanya"
+ ],
+ "title": "Preset Voice Options"
+ },
+ {
+ "type": "string",
+ "title": "Smallest AI Voice ID"
+ }
+ ]
+ },
+ "model": {
+ "type": "string",
+ "description": "Smallest AI voice model to use. Defaults to 'lightning' when not specified.",
+ "enum": [
+ "lightning"
+ ]
+ },
+ "speed": {
+ "type": "number",
+ "description": "This is the speed multiplier that will be used.",
+ "example": null
+ },
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "provider",
+ "voiceId"
+ ]
+ },
+ "FallbackTavusVoice": {
+ "type": "object",
+ "properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
+ "provider": {
+ "type": "string",
+ "description": "This is the voice provider that will be used.",
+ "enum": [
+ "tavus"
+ ]
+ },
+ "voiceId": {
+ "description": "This is the provider-specific ID that will be used.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "r52da2535a"
+ ],
+ "title": "Preset Voice Options"
+ },
+ {
+ "type": "string",
+ "title": "Tavus Voice ID"
+ }
+ ]
+ },
+ "personaId": {
+ "type": "string",
+ "description": "This is the unique identifier for the persona that the replica will use in the conversation."
+ },
+ "callbackUrl": {
+ "type": "string",
+ "description": "This is the url that will receive webhooks with updates regarding the conversation state."
+ },
+ "conversationName": {
+ "type": "string",
+ "description": "This is the name for the conversation."
+ },
+ "conversationalContext": {
+ "type": "string",
+ "description": "This is the context that will be appended to any context provided in the persona, if one is provided."
+ },
+ "customGreeting": {
+ "type": "string",
+ "description": "This is the custom greeting that the replica will give once a participant joines the conversation."
+ },
+ "properties": {
+ "description": "These are optional properties used to customize the conversation.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/TavusConversationProperties"
+ }
+ ]
+ },
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "provider",
+ "voiceId"
+ ]
+ },
+ "FallbackVapiVoice": {
+ "type": "object",
+ "properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
+ "provider": {
+ "type": "string",
+ "description": "This is the voice provider that will be used.",
+ "enum": [
+ "vapi"
+ ]
+ },
+ "voiceId": {
+ "type": "string",
+ "description": "The voices provided by Vapi",
+ "enum": [
+ "Clara",
+ "Godfrey",
+ "Layla",
+ "Sid",
+ "Gustavo",
+ "Elliot",
+ "Kylie",
+ "Rohan",
+ "Lily",
+ "Savannah",
+ "Hana",
+ "Neha",
+ "Cole",
+ "Harry",
+ "Paige",
+ "Spencer",
+ "Nico",
+ "Kai",
+ "Emma",
+ "Sagar",
+ "Neil",
+ "Naina",
+ "Leah",
+ "Tara",
+ "Jess",
+ "Leo",
+ "Dan",
+ "Mia",
+ "Zac",
+ "Zoe"
+ ]
+ },
+ "speed": {
+ "type": "number",
+ "description": "This is the speed multiplier that will be used.\n\n@default 1",
+ "minimum": 0.25,
+ "maximum": 2,
+ "default": 1
+ },
+ "pronunciationDictionary": {
+ "description": "List of pronunciation dictionary locators for custom word pronunciations.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/VapiPronunciationDictionaryLocator"
+ }
+ },
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "provider",
+ "voiceId"
+ ]
+ },
+ "FallbackInworldVoice": {
+ "type": "object",
+ "properties": {
+ "cachingEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle voice caching for the assistant.",
+ "example": true,
+ "default": true
+ },
+ "provider": {
+ "type": "string",
+ "description": "This is the voice provider that will be used.",
+ "enum": [
+ "inworld"
+ ]
+ },
+ "voiceId": {
+ "type": "string",
+ "description": "Available voices by language:\n• en: Alex, Ashley, Craig, Deborah, Dennis, Edward, Elizabeth, Hades, Julia, Pixie, Mark, Olivia, Priya, Ronald, Sarah, Shaun, Theodore, Timothy, Wendy, Dominus, Hana, Clive, Carter, Blake, Luna\n• zh: Yichen, Xiaoyin, Xinyi, Jing\n• nl: Erik, Katrien, Lennart, Lore\n• fr: Alain, Hélène, Mathieu, Étienne\n• de: Johanna, Josef\n• it: Gianni, Orietta\n• ja: Asuka, Satoshi\n• ko: Hyunwoo, Minji, Seojun, Yoona\n• pl: Szymon, Wojciech\n• pt: Heitor, Maitê\n• es: Diego, Lupita, Miguel, Rafael\n• ru: Svetlana, Elena, Dmitry, Nikolai\n• hi: Riya, Manoj\n• he: Yael, Oren\n• ar: Nour, Omar",
+ "maxLength": 120,
+ "title": "Inworld Voice ID",
+ "enum": [
+ "Alex",
+ "Ashley",
+ "Craig",
+ "Deborah",
+ "Dennis",
+ "Edward",
+ "Elizabeth",
+ "Hades",
+ "Julia",
+ "Pixie",
+ "Mark",
+ "Olivia",
+ "Priya",
+ "Ronald",
+ "Sarah",
+ "Shaun",
+ "Theodore",
+ "Timothy",
+ "Wendy",
+ "Dominus",
+ "Hana",
+ "Clive",
+ "Carter",
+ "Blake",
+ "Luna",
+ "Yichen",
+ "Xiaoyin",
+ "Xinyi",
+ "Jing",
+ "Erik",
+ "Katrien",
+ "Lennart",
+ "Lore",
+ "Alain",
+ "Hélène",
+ "Mathieu",
+ "Étienne",
+ "Johanna",
+ "Josef",
+ "Gianni",
+ "Orietta",
+ "Asuka",
+ "Satoshi",
+ "Hyunwoo",
+ "Minji",
+ "Seojun",
+ "Yoona",
+ "Szymon",
+ "Wojciech",
+ "Heitor",
+ "Maitê",
+ "Diego",
+ "Lupita",
+ "Miguel",
+ "Rafael",
+ "Svetlana",
+ "Elena",
+ "Dmitry",
+ "Nikolai",
+ "Riya",
+ "Manoj",
+ "Yael",
+ "Oren",
+ "Nour",
+ "Omar"
+ ],
+ "example": "Alex"
+ },
+ "model": {
+ "type": "string",
+ "description": "This is the model that will be used.",
+ "enum": [
+ "inworld-tts-1"
+ ],
+ "default": "inworld-tts-1"
+ },
+ "languageCode": {
+ "type": "string",
+ "description": "Language code for Inworld TTS synthesis",
+ "default": "en",
+ "enum": [
+ "en",
+ "zh",
+ "ko",
+ "nl",
+ "fr",
+ "es",
+ "ja",
+ "de",
+ "it",
+ "pl",
+ "pt",
+ "ru",
+ "hi",
+ "he",
+ "ar"
+ ]
+ },
+ "temperature": {
+ "type": "number",
+ "description": "A floating point number between 0, exclusive, and 2, inclusive. If equal to null or not provided, the model's default temperature of 1.1 will be used. The temperature parameter controls variance.\nHigher values will make the output more random and can lead to more expressive results. Lower values will make it more deterministic.\nSee https://docs.inworld.ai/docs/tts/capabilities/generating-audio#additional-configurations for more details.",
+ "minimum": 0.1,
+ "maximum": 2,
+ "default": 1.1,
+ "example": null
+ },
+ "speakingRate": {
+ "type": "number",
+ "description": "A floating point number between 0.5, inclusive, and 1.5, inclusive. If equal to null or not provided, the model's default speaking speed of 1.0 will be used.\nValues above 0.8 are recommended for higher quality.\nSee https://docs.inworld.ai/docs/tts/capabilities/generating-audio#additional-configurations for more details.",
+ "minimum": 0.5,
+ "maximum": 1.5,
+ "default": 1,
+ "example": null
+ },
+ "chunkPlan": {
+ "description": "This is the plan for chunking the model output before it is sent to the voice provider.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ChunkPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "provider",
+ "voiceId"
+ ]
+ },
+ "TransportConfigurationTwilio": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "twilio"
+ ]
+ },
+ "timeout": {
+ "type": "number",
+ "description": "The integer number of seconds that we should allow the phone to ring before assuming there is no answer.\nThe default is `60` seconds and the maximum is `600` seconds.\nFor some call flows, we will add a 5-second buffer to the timeout value you provide.\nFor this reason, a timeout value of 10 seconds could result in an actual timeout closer to 15 seconds.\nYou can set this to a short time, such as `15` seconds, to hang up before reaching an answering machine or voicemail.\n\n@default 60",
+ "minimum": 1,
+ "maximum": 600,
+ "example": 60
+ },
+ "record": {
+ "type": "boolean",
+ "description": "Whether to record the call.\nCan be `true` to record the phone call, or `false` to not.\nThe default is `false`.\n\n@default false",
+ "example": false
+ },
+ "recordingChannels": {
+ "type": "string",
+ "description": "The number of channels in the final recording.\nCan be: `mono` or `dual`.\nThe default is `mono`.\n`mono` records both legs of the call in a single channel of the recording file.\n`dual` records each leg to a separate channel of the recording file.\nThe first channel of a dual-channel recording contains the parent call and the second channel contains the child call.\n\n@default 'mono'",
+ "enum": [
+ "mono",
+ "dual"
+ ],
+ "example": "mono"
+ }
+ },
+ "required": [
+ "provider"
+ ]
+ },
+ "CreateAnthropicCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "anthropic"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "maxLength": 10000,
+ "description": "This is not returned in the API."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey"
+ ]
+ },
+ "CreateAnthropicBedrockCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "anthropic-bedrock"
+ ]
+ },
+ "region": {
+ "type": "string",
+ "description": "AWS region where Bedrock is configured.",
+ "enum": [
+ "us-east-1",
+ "us-west-2",
+ "eu-west-1",
+ "eu-west-3",
+ "ap-northeast-1",
+ "ap-southeast-2"
+ ]
+ },
+ "authenticationPlan": {
+ "description": "Authentication method - either direct IAM credentials or cross-account role assumption.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AWSIAMCredentialsAuthenticationPlan"
+ },
+ {
+ "$ref": "#/components/schemas/AWSStsAuthenticationPlan"
+ }
+ ]
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "region",
+ "authenticationPlan"
+ ]
+ },
+ "CreateAnyscaleCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "anyscale"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "maxLength": 10000,
+ "description": "This is not returned in the API."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey"
+ ]
+ },
+ "CreateAssemblyAICredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "assembly-ai"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey"
+ ]
+ },
+ "AzureBlobStorageBucketPlan": {
+ "type": "object",
+ "properties": {
+ "connectionString": {
+ "type": "string",
+ "description": "This is the blob storage connection string for the Azure resource."
+ },
+ "containerName": {
+ "type": "string",
+ "description": "This is the container name for the Azure blob storage."
+ },
+ "path": {
+ "type": "string",
+ "description": "This is the path where call artifacts will be stored.\n\nUsage:\n- To store call artifacts in a specific folder, set this to the full path. Eg. \"/folder-name1/folder-name2\".\n- To store call artifacts in the root of the bucket, leave this blank.\n\n@default \"/\""
+ }
+ },
+ "required": [
+ "connectionString",
+ "containerName"
+ ]
+ },
+ "CreateAzureCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "azure"
+ ]
+ },
+ "service": {
+ "type": "string",
+ "description": "This is the service being used in Azure.",
+ "enum": [
+ "speech",
+ "blob_storage"
+ ],
+ "default": "speech"
+ },
+ "region": {
+ "type": "string",
+ "description": "This is the region of the Azure resource.",
+ "enum": [
+ "australiaeast",
+ "canadaeast",
+ "canadacentral",
+ "centralus",
+ "eastus2",
+ "eastus",
+ "france",
+ "germanywestcentral",
+ "india",
+ "japaneast",
+ "japanwest",
+ "northcentralus",
+ "norway",
+ "polandcentral",
+ "southcentralus",
+ "spaincentral",
+ "swedencentral",
+ "switzerland",
+ "uaenorth",
+ "uk",
+ "westeurope",
+ "westus",
+ "westus3"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API.",
+ "maxLength": 10000
+ },
+ "fallbackIndex": {
+ "type": "number",
+ "minimum": 1,
+ "description": "This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order."
+ },
+ "bucketPlan": {
+ "description": "This is the bucket plan that can be provided to store call artifacts in Azure Blob Storage.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AzureBlobStorageBucketPlan"
+ }
+ ]
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "service"
+ ]
+ },
+ "CreateAzureOpenAICredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "azure-openai"
+ ]
+ },
+ "region": {
+ "type": "string",
+ "enum": [
+ "australiaeast",
+ "canadaeast",
+ "canadacentral",
+ "centralus",
+ "eastus2",
+ "eastus",
+ "france",
+ "germanywestcentral",
+ "india",
+ "japaneast",
+ "japanwest",
+ "northcentralus",
+ "norway",
+ "polandcentral",
+ "southcentralus",
+ "spaincentral",
+ "swedencentral",
+ "switzerland",
+ "uaenorth",
+ "uk",
+ "westeurope",
+ "westus",
+ "westus3"
+ ]
+ },
+ "models": {
+ "type": "array",
+ "enum": [
+ "gpt-5.4",
+ "gpt-5.4-mini",
+ "gpt-5.4-nano",
+ "gpt-5.2",
+ "gpt-5.2-chat",
+ "gpt-5.1",
+ "gpt-5.1-chat",
+ "gpt-5",
+ "gpt-5-mini",
+ "gpt-5-nano",
+ "gpt-4.1-2025-04-14",
+ "gpt-4.1-mini-2025-04-14",
+ "gpt-4.1-nano-2025-04-14",
+ "gpt-4o-2024-11-20",
+ "gpt-4o-2024-08-06",
+ "gpt-4o-2024-05-13",
+ "gpt-4o-mini-2024-07-18",
+ "gpt-4-turbo-2024-04-09",
+ "gpt-4-0125-preview",
+ "gpt-4-1106-preview",
+ "gpt-4-0613",
+ "gpt-35-turbo-0125",
+ "gpt-35-turbo-1106"
+ ],
+ "example": [
+ "gpt-4-0125-preview",
+ "gpt-4-0613"
+ ],
+ "items": {
+ "type": "string",
+ "enum": [
+ "gpt-5.4",
+ "gpt-5.4-mini",
+ "gpt-5.4-nano",
+ "gpt-5.2",
+ "gpt-5.2-chat",
+ "gpt-5.1",
+ "gpt-5.1-chat",
+ "gpt-5",
+ "gpt-5-mini",
+ "gpt-5-nano",
+ "gpt-4.1-2025-04-14",
+ "gpt-4.1-mini-2025-04-14",
+ "gpt-4.1-nano-2025-04-14",
+ "gpt-4o-2024-11-20",
+ "gpt-4o-2024-08-06",
+ "gpt-4o-2024-05-13",
+ "gpt-4o-mini-2024-07-18",
+ "gpt-4-turbo-2024-04-09",
+ "gpt-4-0125-preview",
+ "gpt-4-1106-preview",
+ "gpt-4-0613",
+ "gpt-35-turbo-0125",
+ "gpt-35-turbo-1106"
+ ]
+ }
+ },
+ "openAIKey": {
+ "type": "string",
+ "maxLength": 10000,
+ "description": "This is not returned in the API."
+ },
+ "ocpApimSubscriptionKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "openAIEndpoint": {
+ "type": "string",
+ "maxLength": 10000
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "region",
+ "models",
+ "openAIKey",
+ "openAIEndpoint"
+ ]
+ },
+ "SipTrunkGateway": {
+ "type": "object",
+ "properties": {
+ "ip": {
+ "type": "string",
+ "description": "This is the address of the gateway. It can be an IPv4 address like 1.1.1.1 or a fully qualified domain name like my-sip-trunk.pstn.twilio.com."
+ },
+ "port": {
+ "type": "number",
+ "description": "This is the port number of the gateway. Default is 5060.\n\n@default 5060",
+ "minimum": 1,
+ "maximum": 65535
+ },
+ "netmask": {
+ "type": "number",
+ "description": "This is the netmask of the gateway. Defaults to 32.\n\n@default 32",
+ "minimum": 24,
+ "maximum": 32
+ },
+ "inboundEnabled": {
+ "type": "boolean",
+ "description": "This is whether inbound calls are allowed from this gateway. Default is true.\n\n@default true"
+ },
+ "outboundEnabled": {
+ "type": "boolean",
+ "description": "This is whether outbound calls should be sent to this gateway. Default is true.\n\nNote, if netmask is less than 32, it doesn't affect the outbound IPs that are tried. 1 attempt is made to `ip:port`.\n\n@default true"
+ },
+ "outboundProtocol": {
+ "type": "string",
+ "description": "This is the protocol to use for SIP signaling outbound calls. Default is udp.\n\n@default udp",
+ "enum": [
+ "tls/srtp",
+ "tcp",
+ "tls",
+ "udp"
+ ]
+ },
+ "optionsPingEnabled": {
+ "type": "boolean",
+ "description": "This is whether to send options ping to the gateway. This can be used to check if the gateway is reachable. Default is false.\n\nThis is useful for high availability setups where you want to check if the gateway is reachable before routing calls to it. Note, if no gateway for a trunk is reachable, outbound calls will be rejected.\n\n@default false"
+ }
+ },
+ "required": [
+ "ip"
+ ]
+ },
+ "SipTrunkOutboundSipRegisterPlan": {
+ "type": "object",
+ "properties": {
+ "domain": {
+ "type": "string"
+ },
+ "username": {
+ "type": "string"
+ },
+ "realm": {
+ "type": "string"
+ }
+ }
+ },
+ "SipTrunkOutboundAuthenticationPlan": {
+ "type": "object",
+ "properties": {
+ "authPassword": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "authUsername": {
+ "type": "string"
+ },
+ "sipRegisterPlan": {
+ "description": "This can be used to configure if SIP register is required by the SIP trunk. If not provided, no SIP registration will be attempted.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/SipTrunkOutboundSipRegisterPlan"
+ }
+ ]
+ }
+ }
+ },
+ "SbcConfiguration": {
+ "type": "object",
+ "properties": {}
+ },
+ "CreateByoSipTrunkCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "description": "This can be used to bring your own SIP trunks or to connect to a Carrier.",
+ "enum": [
+ "byo-sip-trunk"
+ ]
+ },
+ "gateways": {
+ "description": "This is the list of SIP trunk's gateways.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/SipTrunkGateway"
+ }
+ },
+ "outboundAuthenticationPlan": {
+ "description": "This can be used to configure the outbound authentication if required by the SIP trunk.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/SipTrunkOutboundAuthenticationPlan"
+ }
+ ]
+ },
+ "outboundLeadingPlusEnabled": {
+ "type": "boolean",
+ "description": "This ensures the outbound origination attempts have a leading plus. Defaults to false to match conventional telecom behavior.\n\nUsage:\n- Vonage/Twilio requires leading plus for all outbound calls. Set this to true.\n\n@default false"
+ },
+ "techPrefix": {
+ "type": "string",
+ "description": "This can be used to configure the tech prefix on outbound calls. This is an advanced property.",
+ "maxLength": 10000
+ },
+ "sipDiversionHeader": {
+ "type": "string",
+ "description": "This can be used to enable the SIP diversion header for authenticating the calling number if the SIP trunk supports it. This is an advanced property.",
+ "maxLength": 10000
+ },
+ "sbcConfiguration": {
+ "description": "This is an advanced configuration for enterprise deployments. This uses the onprem SBC to trunk into the SIP trunk's `gateways`, rather than the managed SBC provided by Vapi.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/SbcConfiguration"
+ }
+ ]
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "gateways"
+ ]
+ },
+ "CreateCartesiaCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "cartesia"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey"
+ ]
+ },
+ "CloudflareR2BucketPlan": {
+ "type": "object",
+ "properties": {
+ "accessKeyId": {
+ "type": "string",
+ "description": "Cloudflare R2 Access key ID."
+ },
+ "secretAccessKey": {
+ "type": "string",
+ "description": "Cloudflare R2 access key secret. This is not returned in the API."
+ },
+ "url": {
+ "type": "string",
+ "description": "Cloudflare R2 base url."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the bucket."
+ },
+ "path": {
+ "type": "string",
+ "description": "This is the path where call artifacts will be stored.\n\nUsage:\n- To store call artifacts in a specific folder, set this to the full path. Eg. \"/folder-name1/folder-name2\".\n- To store call artifacts in the root of the bucket, leave this blank.\n\n@default \"/\""
+ }
+ },
+ "required": [
+ "name"
+ ]
+ },
+ "CreateCloudflareCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "cloudflare"
+ ],
+ "description": "Credential provider. Only allowed value is cloudflare"
+ },
+ "accountId": {
+ "type": "string",
+ "description": "Cloudflare Account Id."
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "Cloudflare API Key / Token."
+ },
+ "accountEmail": {
+ "type": "string",
+ "description": "Cloudflare Account Email."
+ },
+ "fallbackIndex": {
+ "type": "number",
+ "description": "This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order.",
+ "minimum": 1
+ },
+ "bucketPlan": {
+ "description": "This is the bucket plan that can be provided to store call artifacts in R2",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CloudflareR2BucketPlan"
+ }
+ ]
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider"
+ ]
+ },
+ "OAuth2AuthenticationPlan": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "oauth2"
+ ]
+ },
+ "url": {
+ "type": "string",
+ "description": "This is the OAuth2 URL."
+ },
+ "clientId": {
+ "type": "string",
+ "description": "This is the OAuth2 client ID."
+ },
+ "clientSecret": {
+ "type": "string",
+ "description": "This is the OAuth2 client secret."
+ },
+ "scope": {
+ "type": "string",
+ "description": "This is the scope of the OAuth2 token.",
+ "maxLength": 1000
+ }
+ },
+ "required": [
+ "type",
+ "url",
+ "clientId",
+ "clientSecret"
+ ]
+ },
+ "CreateCustomLLMCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "custom-llm"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "maxLength": 10000,
+ "description": "This is not returned in the API."
+ },
+ "authenticationPlan": {
+ "description": "This is the authentication plan. Currently supports OAuth2 RFC 6749. To use Bearer authentication, use apiKey",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/OAuth2AuthenticationPlan"
+ }
+ ]
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey"
+ ]
+ },
+ "CreateDeepgramCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "deepgram"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "apiUrl": {
+ "type": "string",
+ "description": "This can be used to point to an onprem Deepgram instance. Defaults to api.deepgram.com."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey"
+ ]
+ },
+ "CreateDeepInfraCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "deepinfra"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey"
+ ]
+ },
+ "CreateDeepSeekCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "deep-seek"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey"
+ ]
+ },
+ "CreateElevenLabsCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "11labs"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "maxLength": 10000,
+ "description": "This is not returned in the API."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey"
+ ]
+ },
+ "GcpKey": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "This is the type of the key. Most likely, this is \"service_account\"."
+ },
+ "projectId": {
+ "type": "string",
+ "description": "This is the ID of the Google Cloud project associated with this key."
+ },
+ "privateKeyId": {
+ "type": "string",
+ "description": "This is the unique identifier for the private key."
+ },
+ "privateKey": {
+ "type": "string",
+ "description": "This is the private key in PEM format.\n\nNote: This is not returned in the API."
+ },
+ "clientEmail": {
+ "type": "string",
+ "description": "This is the email address associated with the service account."
+ },
+ "clientId": {
+ "type": "string",
+ "description": "This is the unique identifier for the client."
+ },
+ "authUri": {
+ "type": "string",
+ "description": "This is the URI for the auth provider's authorization endpoint."
+ },
+ "tokenUri": {
+ "type": "string",
+ "description": "This is the URI for the auth provider's token endpoint."
+ },
+ "authProviderX509CertUrl": {
+ "type": "string",
+ "description": "This is the URL of the public x509 certificate for the auth provider."
+ },
+ "clientX509CertUrl": {
+ "type": "string",
+ "description": "This is the URL of the public x509 certificate for the client."
+ },
+ "universeDomain": {
+ "type": "string",
+ "description": "This is the domain associated with the universe this service account belongs to."
+ }
+ },
+ "required": [
+ "type",
+ "projectId",
+ "privateKeyId",
+ "privateKey",
+ "clientEmail",
+ "clientId",
+ "authUri",
+ "tokenUri",
+ "authProviderX509CertUrl",
+ "clientX509CertUrl",
+ "universeDomain"
+ ]
+ },
+ "BucketPlan": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the bucket."
+ },
+ "region": {
+ "type": "string",
+ "description": "This is the region of the bucket.\n\nUsage:\n- If `credential.type` is `aws`, then this is required.\n- If `credential.type` is `gcp`, then this is optional since GCP allows buckets to be accessed without a region but region is required for data residency requirements. Read here: https://cloud.google.com/storage/docs/request-endpoints\n\nThis overrides the `credential.region` field if it is provided."
+ },
+ "path": {
+ "type": "string",
+ "description": "This is the path where call artifacts will be stored.\n\nUsage:\n- To store call artifacts in a specific folder, set this to the full path. Eg. \"/folder-name1/folder-name2\".\n- To store call artifacts in the root of the bucket, leave this blank.\n\n@default \"/\""
+ },
+ "hmacAccessKey": {
+ "type": "string",
+ "description": "This is the HMAC access key offered by GCP for interoperability with S3 clients. Here is the guide on how to create: https://cloud.google.com/storage/docs/authentication/managing-hmackeys#console\n\nUsage:\n- If `credential.type` is `gcp`, then this is required.\n- If `credential.type` is `aws`, then this is not required since credential.awsAccessKeyId is used instead."
+ },
+ "hmacSecret": {
+ "type": "string",
+ "description": "This is the secret for the HMAC access key. Here is the guide on how to create: https://cloud.google.com/storage/docs/authentication/managing-hmackeys#console\n\nUsage:\n- If `credential.type` is `gcp`, then this is required.\n- If `credential.type` is `aws`, then this is not required since credential.awsSecretAccessKey is used instead.\n\nNote: This is not returned in the API."
+ }
+ },
+ "required": [
+ "name"
+ ]
+ },
+ "CreateGcpCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "gcp"
+ ]
+ },
+ "fallbackIndex": {
+ "type": "number",
+ "description": "This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order.",
+ "minimum": 1
+ },
+ "gcpKey": {
+ "description": "This is the GCP key. This is the JSON that can be generated in the Google Cloud Console at https://console.cloud.google.com/iam-admin/serviceaccounts/details//keys.\n\nThe schema is identical to the JSON that GCP outputs.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/GcpKey"
+ }
+ ]
+ },
+ "region": {
+ "type": "string",
+ "description": "This is the region of the GCP resource.",
+ "maxLength": 40
+ },
+ "bucketPlan": {
+ "$ref": "#/components/schemas/BucketPlan"
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "gcpKey"
+ ]
+ },
+ "CreateGladiaCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "gladia"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey"
+ ]
+ },
+ "CreateGoHighLevelCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "gohighlevel"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey"
+ ]
+ },
+ "CreateGroqCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "groq"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey"
+ ]
+ },
+ "CreateLangfuseCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "langfuse"
+ ]
+ },
+ "publicKey": {
+ "type": "string",
+ "description": "The public key for Langfuse project. Eg: pk-lf-..."
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "The secret key for Langfuse project. Eg: sk-lf-... .This is not returned in the API."
+ },
+ "apiUrl": {
+ "type": "string",
+ "description": "The host URL for Langfuse project. Eg: https://cloud.langfuse.com"
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "publicKey",
+ "apiKey",
+ "apiUrl"
+ ]
+ },
+ "CreateLmntCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "lmnt"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey"
+ ]
+ },
+ "CreateMakeCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "make"
+ ]
+ },
+ "teamId": {
+ "type": "string",
+ "description": "Team ID"
+ },
+ "region": {
+ "type": "string",
+ "description": "Region of your application. For example: eu1, eu2, us1, us2"
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "teamId",
+ "region",
+ "apiKey"
+ ]
+ },
+ "CreateOpenAICredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "openai"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey"
+ ]
+ },
+ "CreateOpenRouterCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "openrouter"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey"
+ ]
+ },
+ "CreatePerplexityAICredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "perplexity-ai"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey"
+ ]
+ },
+ "CreatePlayHTCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "playht"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "userId": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey",
+ "userId"
+ ]
+ },
+ "CreateRimeAICredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "rime-ai"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey"
+ ]
+ },
+ "CreateRunpodCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "runpod"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey"
+ ]
+ },
+ "CreateS3CredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "s3"
+ ],
+ "description": "Credential provider. Only allowed value is s3"
+ },
+ "awsAccessKeyId": {
+ "type": "string",
+ "description": "AWS access key ID."
+ },
+ "awsSecretAccessKey": {
+ "type": "string",
+ "description": "AWS access key secret. This is not returned in the API."
+ },
+ "region": {
+ "type": "string",
+ "description": "AWS region in which the S3 bucket is located."
+ },
+ "s3BucketName": {
+ "type": "string",
+ "description": "AWS S3 bucket name."
+ },
+ "s3PathPrefix": {
+ "type": "string",
+ "description": "The path prefix for the uploaded recording. Ex. \"recordings/\""
+ },
+ "fallbackIndex": {
+ "type": "number",
+ "minimum": 1,
+ "description": "This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "awsAccessKeyId",
+ "awsSecretAccessKey",
+ "region",
+ "s3BucketName",
+ "s3PathPrefix"
+ ]
+ },
+ "SupabaseBucketPlan": {
+ "type": "object",
+ "properties": {
+ "region": {
+ "type": "string",
+ "description": "This is the S3 Region. It should look like us-east-1\nIt should be one of the supabase regions defined in the SUPABASE_REGION enum\nCheck https://supabase.com/docs/guides/platform/regions for up to date regions",
+ "enum": [
+ "us-west-1",
+ "us-east-1",
+ "us-east-2",
+ "ca-central-1",
+ "eu-west-1",
+ "eu-west-2",
+ "eu-west-3",
+ "eu-central-1",
+ "eu-central-2",
+ "eu-north-1",
+ "ap-south-1",
+ "ap-southeast-1",
+ "ap-northeast-1",
+ "ap-northeast-2",
+ "ap-southeast-2",
+ "sa-east-1"
+ ]
+ },
+ "url": {
+ "type": "string",
+ "description": "This is the S3 compatible URL for Supabase S3\nThis should look like https://.supabase.co/storage/v1/s3"
+ },
+ "accessKeyId": {
+ "type": "string",
+ "description": "This is the Supabase S3 Access Key ID.\nThe user creates this in the Supabase project Storage settings"
+ },
+ "secretAccessKey": {
+ "type": "string",
+ "description": "This is the Supabase S3 Secret Access Key.\nThe user creates this in the Supabase project Storage settings along with the access key id"
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the Supabase S3 Bucket Name.\nThe user must create this in Supabase under Storage > Buckets\nA bucket that does not exist will not be checked now, but file uploads will fail"
+ },
+ "path": {
+ "type": "string",
+ "description": "This is the Supabase S3 Bucket Folder Path.\nThe user can create this in Supabase under Storage > Buckets\nA path that does not exist will not be checked now, but file uploads will fail\nA Path is like a folder in the bucket\nEg. If the bucket is called \"my-bucket\" and the path is \"my-folder\", the full path is \"my-bucket/my-folder\""
+ }
+ },
+ "required": [
+ "region",
+ "url",
+ "accessKeyId",
+ "secretAccessKey",
+ "name"
+ ]
+ },
+ "CreateSupabaseCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "supabase"
+ ],
+ "description": "This is for supabase storage."
+ },
+ "fallbackIndex": {
+ "type": "number",
+ "minimum": 1,
+ "description": "This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order."
+ },
+ "bucketPlan": {
+ "$ref": "#/components/schemas/SupabaseBucketPlan"
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider"
+ ]
+ },
+ "CreateSmallestAICredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "smallest-ai"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey"
+ ]
+ },
+ "CreateTavusCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "tavus"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey"
+ ]
+ },
+ "CreateTogetherAICredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "together-ai"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey"
+ ]
+ },
+ "CreateTwilioCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "twilio"
+ ]
+ },
+ "authToken": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "apiSecret": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "accountSid": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "accountSid"
+ ]
+ },
+ "CreateVonageCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "vonage"
+ ]
+ },
+ "apiSecret": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "apiKey": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiSecret",
+ "apiKey"
+ ]
+ },
+ "CreateWebhookCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "webhook"
+ ]
+ },
+ "authenticationPlan": {
+ "description": "This is the authentication plan. Supports OAuth2 RFC 6749, HMAC signing, and Bearer authentication.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/OAuth2AuthenticationPlan"
+ },
+ {
+ "$ref": "#/components/schemas/HMACAuthenticationPlan"
+ },
+ {
+ "$ref": "#/components/schemas/BearerAuthenticationPlan"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "type",
+ "mapping": {
+ "oauth2": "#/components/schemas/OAuth2AuthenticationPlan",
+ "hmac": "#/components/schemas/HMACAuthenticationPlan",
+ "bearer": "#/components/schemas/BearerAuthenticationPlan"
+ }
+ }
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "authenticationPlan"
+ ]
+ },
+ "CreateXAiCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "description": "This is the api key for Grok in XAi's console. Get it from here: https://console.x.ai",
+ "enum": [
+ "xai"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "maxLength": 10000,
+ "description": "This is not returned in the API."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey"
+ ]
+ },
+ "CreateGoogleCalendarOAuth2ClientCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "google.calendar.oauth2-client"
+ ]
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider"
+ ]
+ },
+ "CreateGoogleCalendarOAuth2AuthorizationCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "google.calendar.oauth2-authorization"
+ ]
+ },
+ "authorizationId": {
+ "type": "string",
+ "description": "The authorization ID for the OAuth2 authorization"
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "authorizationId"
+ ]
+ },
+ "CreateGoogleSheetsOAuth2AuthorizationCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "google.sheets.oauth2-authorization"
+ ]
+ },
+ "authorizationId": {
+ "type": "string",
+ "description": "The authorization ID for the OAuth2 authorization"
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "authorizationId"
+ ]
+ },
+ "CreateSlackOAuth2AuthorizationCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "slack.oauth2-authorization"
+ ]
+ },
+ "authorizationId": {
+ "type": "string",
+ "description": "The authorization ID for the OAuth2 authorization"
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "authorizationId"
+ ]
+ },
+ "CreateMinimaxCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "minimax"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "groupId": {
+ "type": "string",
+ "description": "This is the Minimax Group ID."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey",
+ "groupId"
+ ]
+ },
+ "EndpointedSpeechLowConfidenceOptions": {
+ "type": "object",
+ "properties": {
+ "confidenceMin": {
+ "type": "number",
+ "description": "This is the minimum confidence threshold.\nTranscripts with confidence below this value will be discarded.\n\n@default confidenceMax - 0.2",
+ "minimum": 0,
+ "maximum": 1
+ },
+ "confidenceMax": {
+ "type": "number",
+ "description": "This is the maximum confidence threshold.\nTranscripts with confidence at or above this value will be processed normally.\n\n@default transcriber's confidenceThreshold",
+ "minimum": 0,
+ "maximum": 1
+ }
+ }
+ },
+ "CallHookTranscriberEndpointedSpeechLowConfidence": {
+ "type": "object",
+ "properties": {
+ "do": {
+ "type": "array",
+ "description": "This is the set of actions to perform when the hook triggers",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/SayHookAction",
+ "title": "SayHookAction"
+ },
+ {
+ "$ref": "#/components/schemas/ToolCallHookAction",
+ "title": "ToolCallHookAction"
+ },
+ {
+ "$ref": "#/components/schemas/MessageAddHookAction",
+ "title": "MessageAddHookAction"
+ }
+ ]
+ }
+ },
+ "on": {
+ "type": "string",
+ "description": "This is the event that triggers this hook",
+ "maxLength": 1000
+ },
+ "options": {
+ "description": "This is the options for the hook including confidence thresholds",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/EndpointedSpeechLowConfidenceOptions"
+ }
+ ]
+ }
+ },
+ "required": [
+ "do",
+ "on"
+ ]
+ },
+ "SessionCreatedHook": {
+ "type": "object",
+ "properties": {
+ "on": {
+ "type": "string",
+ "description": "This is the event that triggers this hook",
+ "enum": [
+ "session.created"
+ ],
+ "maxLength": 1000
+ },
+ "do": {
+ "type": "array",
+ "description": "This is the set of actions to perform when the hook triggers.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolCallHookAction",
+ "title": "ToolCallHookAction"
+ }
+ ]
+ }
+ },
+ "name": {
+ "type": "string",
+ "description": "Optional name for this hook instance.\nIf no name is provided, the hook will be auto generated as UUID.\n\n@default UUID",
+ "maxLength": 1000
+ }
+ },
+ "required": [
+ "on",
+ "do"
+ ]
+ },
+ "SQLInjectionSecurityFilter": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "The type of security threat to filter.",
+ "enum": [
+ "sql-injection"
+ ]
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "XSSSecurityFilter": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "The type of security threat to filter.",
+ "enum": [
+ "xss"
+ ]
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "SSRFSecurityFilter": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "The type of security threat to filter.",
+ "enum": [
+ "ssrf"
+ ]
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "RCESecurityFilter": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "The type of security threat to filter.",
+ "enum": [
+ "rce"
+ ]
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "PromptInjectionSecurityFilter": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "The type of security threat to filter.",
+ "enum": [
+ "prompt-injection"
+ ]
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "RegexSecurityFilter": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "The type of security threat to filter.",
+ "enum": [
+ "regex"
+ ]
+ },
+ "regex": {
+ "type": "string",
+ "description": "The regex pattern to filter.",
+ "example": "badword1|badword2"
+ }
+ },
+ "required": [
+ "type",
+ "regex"
+ ]
+ },
+ "AssistantOverrides": {
+ "type": "object",
+ "properties": {
+ "transcriber": {
+ "description": "These are the options for the assistant's transcriber.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AssemblyAITranscriber",
+ "title": "AssemblyAITranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/AzureSpeechTranscriber",
+ "title": "AzureSpeechTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/CustomTranscriber",
+ "title": "CustomTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/DeepgramTranscriber",
+ "title": "DeepgramTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/ElevenLabsTranscriber",
+ "title": "ElevenLabsTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/GladiaTranscriber",
+ "title": "GladiaTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/GoogleTranscriber",
+ "title": "GoogleTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/SpeechmaticsTranscriber",
+ "title": "SpeechmaticsTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/TalkscriberTranscriber",
+ "title": "TalkscriberTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAITranscriber",
+ "title": "OpenAITranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/CartesiaTranscriber",
+ "title": "CartesiaTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/SonioxTranscriber",
+ "title": "SonioxTranscriber"
+ }
+ ]
+ },
+ "model": {
+ "description": "These are the options for the assistant's LLM.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AnthropicModel",
+ "title": "Anthropic"
+ },
+ {
+ "$ref": "#/components/schemas/AnthropicBedrockModel",
+ "title": "AnthropicBedrock"
+ },
+ {
+ "$ref": "#/components/schemas/AnyscaleModel",
+ "title": "Anyscale"
+ },
+ {
+ "$ref": "#/components/schemas/CerebrasModel",
+ "title": "Cerebras"
+ },
+ {
+ "$ref": "#/components/schemas/CustomLLMModel",
+ "title": "CustomLLM"
+ },
+ {
+ "$ref": "#/components/schemas/DeepInfraModel",
+ "title": "DeepInfra"
+ },
+ {
+ "$ref": "#/components/schemas/DeepSeekModel",
+ "title": "DeepSeek"
+ },
+ {
+ "$ref": "#/components/schemas/GoogleModel",
+ "title": "Google"
+ },
+ {
+ "$ref": "#/components/schemas/GroqModel",
+ "title": "Groq"
+ },
+ {
+ "$ref": "#/components/schemas/InflectionAIModel",
+ "title": "InflectionAI"
+ },
+ {
+ "$ref": "#/components/schemas/MinimaxLLMModel",
+ "title": "MiniMaxLLM"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIModel",
+ "title": "OpenAI"
+ },
+ {
+ "$ref": "#/components/schemas/OpenRouterModel",
+ "title": "OpenRouter"
+ },
+ {
+ "$ref": "#/components/schemas/PerplexityAIModel",
+ "title": "PerplexityAI"
+ },
+ {
+ "$ref": "#/components/schemas/TogetherAIModel",
+ "title": "Together"
+ },
+ {
+ "$ref": "#/components/schemas/XaiModel",
+ "title": "XAI"
+ }
+ ]
+ },
+ "voice": {
+ "description": "These are the options for the assistant's voice.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AzureVoice",
+ "title": "AzureVoice"
+ },
+ {
+ "$ref": "#/components/schemas/CartesiaVoice",
+ "title": "CartesiaVoice"
+ },
+ {
+ "$ref": "#/components/schemas/CustomVoice",
+ "title": "CustomVoice"
+ },
+ {
+ "$ref": "#/components/schemas/DeepgramVoice",
+ "title": "DeepgramVoice"
+ },
+ {
+ "$ref": "#/components/schemas/ElevenLabsVoice",
+ "title": "ElevenLabsVoice"
+ },
+ {
+ "$ref": "#/components/schemas/HumeVoice",
+ "title": "HumeVoice"
+ },
+ {
+ "$ref": "#/components/schemas/LMNTVoice",
+ "title": "LMNTVoice"
+ },
+ {
+ "$ref": "#/components/schemas/NeuphonicVoice",
+ "title": "NeuphonicVoice"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIVoice",
+ "title": "OpenAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/PlayHTVoice",
+ "title": "PlayHTVoice"
+ },
+ {
+ "$ref": "#/components/schemas/WellSaidVoice",
+ "title": "WellSaidVoice"
+ },
+ {
+ "$ref": "#/components/schemas/RimeAIVoice",
+ "title": "RimeAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/SmallestAIVoice",
+ "title": "SmallestAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/TavusVoice",
+ "title": "TavusVoice"
+ },
+ {
+ "$ref": "#/components/schemas/VapiVoice",
+ "title": "VapiVoice"
+ },
+ {
+ "$ref": "#/components/schemas/SesameVoice",
+ "title": "SesameVoice"
+ },
+ {
+ "$ref": "#/components/schemas/InworldVoice",
+ "title": "InworldVoice"
+ },
+ {
+ "$ref": "#/components/schemas/MinimaxVoice",
+ "title": "MinimaxVoice"
+ }
+ ]
+ },
+ "firstMessage": {
+ "type": "string",
+ "description": "This is the first message that the assistant will say. This can also be a URL to a containerized audio file (mp3, wav, etc.).\n\nIf unspecified, assistant will wait for user to speak and use the model to respond once they speak.",
+ "example": "Hello! How can I help you today?"
+ },
+ "firstMessageInterruptionsEnabled": {
+ "type": "boolean",
+ "default": false
+ },
+ "firstMessageMode": {
+ "type": "string",
+ "description": "This is the mode for the first message. Default is 'assistant-speaks-first'.\n\nUse:\n- 'assistant-speaks-first' to have the assistant speak first.\n- 'assistant-waits-for-user' to have the assistant wait for the user to speak first.\n- 'assistant-speaks-first-with-model-generated-message' to have the assistant speak first with a message generated by the model based on the conversation state. (`assistant.model.messages` at call start, `call.messages` at squad transfer points).\n\n@default 'assistant-speaks-first'",
+ "enum": [
+ "assistant-speaks-first",
+ "assistant-speaks-first-with-model-generated-message",
+ "assistant-waits-for-user"
+ ],
+ "example": "assistant-speaks-first"
+ },
+ "voicemailDetection": {
+ "description": "These are the settings to configure or disable voicemail detection. Alternatively, voicemail detection can be configured using the model.tools=[VoicemailTool].\nBy default, voicemail detection is disabled.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "off"
+ ]
+ },
+ {
+ "$ref": "#/components/schemas/GoogleVoicemailDetectionPlan",
+ "title": "Google"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIVoicemailDetectionPlan",
+ "title": "OpenAI"
+ },
+ {
+ "$ref": "#/components/schemas/TwilioVoicemailDetectionPlan",
+ "title": "Twilio"
+ },
+ {
+ "$ref": "#/components/schemas/VapiVoicemailDetectionPlan",
+ "title": "Vapi"
+ }
+ ]
+ },
+ "clientMessages": {
+ "type": "array",
+ "enum": [
+ "conversation-update",
+ "assistant.speechStarted",
+ "function-call",
+ "function-call-result",
+ "hang",
+ "language-changed",
+ "metadata",
+ "model-output",
+ "speech-update",
+ "status-update",
+ "transcript",
+ "tool-calls",
+ "tool-calls-result",
+ "tool.completed",
+ "transfer-update",
+ "user-interrupted",
+ "voice-input",
+ "workflow.node.started",
+ "assistant.started"
+ ],
+ "example": [
+ "conversation-update",
+ "function-call",
+ "hang",
+ "model-output",
+ "speech-update",
+ "status-update",
+ "transfer-update",
+ "transcript",
+ "tool-calls",
+ "user-interrupted",
+ "voice-input",
+ "workflow.node.started",
+ "assistant.started"
+ ],
+ "description": "These are the messages that will be sent to your Client SDKs. Default is conversation-update,function-call,hang,model-output,speech-update,status-update,transfer-update,transcript,tool-calls,user-interrupted,voice-input,workflow.node.started,assistant.started. You can check the shape of the messages in ClientMessage schema.",
+ "items": {
+ "type": "string",
+ "enum": [
+ "conversation-update",
+ "assistant.speechStarted",
+ "function-call",
+ "function-call-result",
+ "hang",
+ "language-changed",
+ "metadata",
+ "model-output",
+ "speech-update",
+ "status-update",
+ "transcript",
+ "tool-calls",
+ "tool-calls-result",
+ "tool.completed",
+ "transfer-update",
+ "user-interrupted",
+ "voice-input",
+ "workflow.node.started",
+ "assistant.started"
+ ]
+ }
+ },
+ "serverMessages": {
+ "type": "array",
+ "enum": [
+ "assistant.started",
+ "assistant.speechStarted",
+ "conversation-update",
+ "end-of-call-report",
+ "function-call",
+ "hang",
+ "language-changed",
+ "language-change-detected",
+ "model-output",
+ "phone-call-control",
+ "speech-update",
+ "status-update",
+ "transcript",
+ "transcript[transcriptType=\"final\"]",
+ "tool-calls",
+ "transfer-destination-request",
+ "handoff-destination-request",
+ "transfer-update",
+ "user-interrupted",
+ "voice-input",
+ "chat.created",
+ "chat.deleted",
+ "session.created",
+ "session.updated",
+ "session.deleted",
+ "call.deleted",
+ "call.delete.failed"
+ ],
+ "example": [
+ "conversation-update",
+ "end-of-call-report",
+ "function-call",
+ "hang",
+ "speech-update",
+ "status-update",
+ "tool-calls",
+ "transfer-destination-request",
+ "handoff-destination-request",
+ "user-interrupted",
+ "assistant.started"
+ ],
+ "description": "These are the messages that will be sent to your Server URL. Default is conversation-update,end-of-call-report,function-call,hang,speech-update,status-update,tool-calls,transfer-destination-request,handoff-destination-request,user-interrupted,assistant.started. You can check the shape of the messages in ServerMessage schema.",
+ "items": {
+ "type": "string",
+ "enum": [
+ "assistant.started",
+ "assistant.speechStarted",
+ "conversation-update",
+ "end-of-call-report",
+ "function-call",
+ "hang",
+ "language-changed",
+ "language-change-detected",
+ "model-output",
+ "phone-call-control",
+ "speech-update",
+ "status-update",
+ "transcript",
+ "transcript[transcriptType=\"final\"]",
+ "tool-calls",
+ "transfer-destination-request",
+ "handoff-destination-request",
+ "transfer-update",
+ "user-interrupted",
+ "voice-input",
+ "chat.created",
+ "chat.deleted",
+ "session.created",
+ "session.updated",
+ "session.deleted",
+ "call.deleted",
+ "call.delete.failed"
+ ]
+ }
+ },
+ "maxDurationSeconds": {
+ "type": "number",
+ "description": "This is the maximum number of seconds that the call will last. When the call reaches this duration, it will be ended.\n\n@default 600 (10 minutes)",
+ "minimum": 10,
+ "maximum": 43200,
+ "example": 600
+ },
+ "backgroundSound": {
+ "description": "This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'.\nYou can also provide a custom sound by providing a URL to an audio file.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "off",
+ "office"
+ ],
+ "example": "office"
+ },
+ {
+ "type": "string",
+ "format": "uri",
+ "example": "https://www.soundjay.com/ambient/sounds/people-in-lounge-1.mp3"
+ }
+ ]
+ },
+ "modelOutputInMessagesEnabled": {
+ "type": "boolean",
+ "description": "This determines whether the model's output is used in conversation history rather than the transcription of assistant's speech.\n\n@default false",
+ "example": false
+ },
+ "transportConfigurations": {
+ "type": "array",
+ "description": "These are the configurations to be passed to the transport providers of assistant's calls, like Twilio. You can store multiple configurations for different transport providers. For a call, only the configuration matching the call transport provider is used.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TransportConfigurationTwilio",
+ "title": "Twilio"
+ }
+ ]
+ }
+ },
+ "observabilityPlan": {
+ "description": "This is the plan for observability of assistant's calls.\n\nCurrently, only Langfuse is supported.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/LangfuseObservabilityPlan",
+ "title": "Langfuse"
+ }
+ ],
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/LangfuseObservabilityPlan"
+ }
+ ]
+ },
+ "credentials": {
+ "type": "array",
+ "description": "These are dynamic credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateAnthropicCredentialDTO",
+ "title": "AnthropicCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAnthropicBedrockCredentialDTO",
+ "title": "AnthropicBedrockCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAnyscaleCredentialDTO",
+ "title": "AnyscaleCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAssemblyAICredentialDTO",
+ "title": "AssemblyAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAzureCredentialDTO",
+ "title": "AzureCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAzureOpenAICredentialDTO",
+ "title": "AzureOpenAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
+ "title": "ByoSipTrunkCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCartesiaCredentialDTO",
+ "title": "CartesiaCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCerebrasCredentialDTO",
+ "title": "CerebrasCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCloudflareCredentialDTO",
+ "title": "CloudflareCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCustomLLMCredentialDTO",
+ "title": "CustomLLMCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDeepgramCredentialDTO",
+ "title": "DeepgramCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDeepInfraCredentialDTO",
+ "title": "DeepInfraCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDeepSeekCredentialDTO",
+ "title": "DeepSeekCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateElevenLabsCredentialDTO",
+ "title": "ElevenLabsCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGcpCredentialDTO",
+ "title": "GcpCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGladiaCredentialDTO",
+ "title": "GladiaCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCredentialDTO",
+ "title": "GhlCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCredentialDTO",
+ "title": "GoogleCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGroqCredentialDTO",
+ "title": "GroqCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateHumeCredentialDTO",
+ "title": "HumeCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateInflectionAICredentialDTO",
+ "title": "InflectionAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateLangfuseCredentialDTO",
+ "title": "LangfuseCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateLmntCredentialDTO",
+ "title": "LmntCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMakeCredentialDTO",
+ "title": "MakeCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMistralCredentialDTO",
+ "title": "MistralCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateNeuphonicCredentialDTO",
+ "title": "NeuphonicCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateOpenAICredentialDTO",
+ "title": "OpenAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateOpenRouterCredentialDTO",
+ "title": "OpenRouterCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreatePerplexityAICredentialDTO",
+ "title": "PerplexityAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreatePlayHTCredentialDTO",
+ "title": "PlayHTCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateRimeAICredentialDTO",
+ "title": "RimeAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateRunpodCredentialDTO",
+ "title": "RunpodCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateS3CredentialDTO",
+ "title": "S3Credential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSmallestAICredentialDTO",
+ "title": "SmallestAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
+ "title": "SpeechmaticsCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSonioxCredentialDTO",
+ "title": "SonioxCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSupabaseCredentialDTO",
+ "title": "SupabaseCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTavusCredentialDTO",
+ "title": "TavusCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTogetherAICredentialDTO",
+ "title": "TogetherAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTrieveCredentialDTO",
+ "title": "TrieveCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTwilioCredentialDTO",
+ "title": "TwilioCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVonageCredentialDTO",
+ "title": "VonageCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateWebhookCredentialDTO",
+ "title": "WebhookCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCustomCredentialDTO",
+ "title": "CustomCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateXAiCredentialDTO",
+ "title": "XAiCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
+ "title": "GoogleCalendarOAuth2ClientCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
+ "title": "GoogleCalendarOAuth2AuthorizationCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
+ "title": "GoogleSheetsOAuth2AuthorizationCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
+ "title": "SlackOAuth2AuthorizationCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
+ "title": "GoHighLevelMCPCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateInworldCredentialDTO",
+ "title": "InworldCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMinimaxCredentialDTO",
+ "title": "MinimaxCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateWellSaidCredentialDTO",
+ "title": "WellSaidCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateEmailCredentialDTO",
+ "title": "EmailCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackWebhookCredentialDTO",
+ "title": "SlackWebhookCredential"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "provider",
+ "mapping": {
+ "11labs": "#/components/schemas/CreateElevenLabsCredentialDTO",
+ "anthropic": "#/components/schemas/CreateAnthropicCredentialDTO",
+ "anthropic-bedrock": "#/components/schemas/CreateAnthropicBedrockCredentialDTO",
+ "anyscale": "#/components/schemas/CreateAnyscaleCredentialDTO",
+ "assembly-ai": "#/components/schemas/CreateAssemblyAICredentialDTO",
+ "azure-openai": "#/components/schemas/CreateAzureOpenAICredentialDTO",
+ "azure": "#/components/schemas/CreateAzureCredentialDTO",
+ "byo-sip-trunk": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
+ "cartesia": "#/components/schemas/CreateCartesiaCredentialDTO",
+ "cerebras": "#/components/schemas/CreateCerebrasCredentialDTO",
+ "cloudflare": "#/components/schemas/CreateCloudflareCredentialDTO",
+ "custom-llm": "#/components/schemas/CreateCustomLLMCredentialDTO",
+ "deepgram": "#/components/schemas/CreateDeepgramCredentialDTO",
+ "deepinfra": "#/components/schemas/CreateDeepInfraCredentialDTO",
+ "deep-seek": "#/components/schemas/CreateDeepSeekCredentialDTO",
+ "gcp": "#/components/schemas/CreateGcpCredentialDTO",
+ "gladia": "#/components/schemas/CreateGladiaCredentialDTO",
+ "gohighlevel": "#/components/schemas/CreateGoHighLevelCredentialDTO",
+ "google": "#/components/schemas/CreateGoogleCredentialDTO",
+ "groq": "#/components/schemas/CreateGroqCredentialDTO",
+ "inflection-ai": "#/components/schemas/CreateInflectionAICredentialDTO",
+ "langfuse": "#/components/schemas/CreateLangfuseCredentialDTO",
+ "lmnt": "#/components/schemas/CreateLmntCredentialDTO",
+ "make": "#/components/schemas/CreateMakeCredentialDTO",
+ "openai": "#/components/schemas/CreateOpenAICredentialDTO",
+ "openrouter": "#/components/schemas/CreateOpenRouterCredentialDTO",
+ "perplexity-ai": "#/components/schemas/CreatePerplexityAICredentialDTO",
+ "playht": "#/components/schemas/CreatePlayHTCredentialDTO",
+ "rime-ai": "#/components/schemas/CreateRimeAICredentialDTO",
+ "runpod": "#/components/schemas/CreateRunpodCredentialDTO",
+ "s3": "#/components/schemas/CreateS3CredentialDTO",
+ "supabase": "#/components/schemas/CreateSupabaseCredentialDTO",
+ "smallest-ai": "#/components/schemas/CreateSmallestAICredentialDTO",
+ "tavus": "#/components/schemas/CreateTavusCredentialDTO",
+ "together-ai": "#/components/schemas/CreateTogetherAICredentialDTO",
+ "twilio": "#/components/schemas/CreateTwilioCredentialDTO",
+ "vonage": "#/components/schemas/CreateVonageCredentialDTO",
+ "webhook": "#/components/schemas/CreateWebhookCredentialDTO",
+ "custom-credential": "#/components/schemas/CreateCustomCredentialDTO",
+ "xai": "#/components/schemas/CreateXAiCredentialDTO",
+ "neuphonic": "#/components/schemas/CreateNeuphonicCredentialDTO",
+ "hume": "#/components/schemas/CreateHumeCredentialDTO",
+ "mistral": "#/components/schemas/CreateMistralCredentialDTO",
+ "speechmatics": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
+ "soniox": "#/components/schemas/CreateSonioxCredentialDTO",
+ "trieve": "#/components/schemas/CreateTrieveCredentialDTO",
+ "google.calendar.oauth2-client": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
+ "google.calendar.oauth2-authorization": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
+ "google.sheets.oauth2-authorization": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
+ "slack.oauth2-authorization": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
+ "ghl.oauth2-authorization": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
+ "inworld": "#/components/schemas/CreateInworldCredentialDTO",
+ "minimax": "#/components/schemas/CreateMinimaxCredentialDTO",
+ "wellsaid": "#/components/schemas/CreateWellSaidCredentialDTO",
+ "email": "#/components/schemas/CreateEmailCredentialDTO",
+ "slack-webhook": "#/components/schemas/CreateSlackWebhookCredentialDTO"
+ }
+ }
+ }
+ },
+ "hooks": {
+ "type": "array",
+ "description": "This is a set of actions that will be performed on certain events.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CallHookCallEnding",
+ "title": "CallHookCallEnding"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookAssistantSpeechInterrupted",
+ "title": "CallHookAssistantSpeechInterrupted"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookCustomerSpeechInterrupted",
+ "title": "CallHookCustomerSpeechInterrupted"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookCustomerSpeechTimeout",
+ "title": "CallHookCustomerSpeechTimeout"
+ },
+ {
+ "$ref": "#/components/schemas/SessionCreatedHook",
+ "title": "SessionCreatedHook"
+ }
+ ]
+ }
+ },
+ "tools:append": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateApiRequestToolDTO",
+ "title": "ApiRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateBashToolDTO",
+ "title": "BashTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCodeToolDTO",
+ "title": "CodeTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateComputerToolDTO",
+ "title": "ComputerTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDtmfToolDTO",
+ "title": "DtmfTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateEndCallToolDTO",
+ "title": "EndCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateFunctionToolDTO",
+ "title": "FunctionTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
+ "title": "GoHighLevelCalendarAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
+ "title": "GoHighLevelCalendarEventCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
+ "title": "GoHighLevelContactCreateTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
+ "title": "GoHighLevelContactGetTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
+ "title": "GoogleCalendarCheckAvailabilityTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
+ "title": "GoogleCalendarCreateEventTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
+ "title": "GoogleSheetsRowAppendTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateHandoffToolDTO",
+ "title": "HandoffTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMcpToolDTO",
+ "title": "McpTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateQueryToolDTO",
+ "title": "QueryTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO",
+ "title": "SlackSendMessageTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSmsToolDTO",
+ "title": "SmsTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTextEditorToolDTO",
+ "title": "TextEditorTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTransferCallToolDTO",
+ "title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSipRequestToolDTO",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVoicemailToolDTO",
+ "title": "VoicemailTool"
+ }
+ ]
+ }
+ },
+ "variableValues": {
+ "type": "object",
+ "description": "These are values that will be used to replace the template variables in the assistant messages and other text-based fields.\nThis uses LiquidJS syntax. https://liquidjs.com/tutorials/intro-to-liquid.html\n\nSo for example, `{{ name }}` will be replaced with the value of `name` in `variableValues`.\n`{{\"now\" | date: \"%b %d, %Y, %I:%M %p\", \"America/New_York\"}}` will be replaced with the current date and time in New York.\n Some VAPI reserved defaults:\n - *customer* - the customer object"
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the assistant.\n\nThis is required when you want to transfer between assistants in a call.",
+ "maxLength": 40
+ },
+ "voicemailMessage": {
+ "type": "string",
+ "description": "This is the message that the assistant will say if the call is forwarded to voicemail.\n\nIf unspecified, it will hang up.",
+ "maxLength": 1000
+ },
+ "endCallMessage": {
+ "type": "string",
+ "description": "This is the message that the assistant will say if it ends the call.\n\nIf unspecified, it will hang up without saying anything.",
+ "maxLength": 1000
+ },
+ "endCallPhrases": {
+ "description": "This list contains phrases that, if spoken by the assistant, will trigger the call to be hung up. Case insensitive.",
+ "type": "array",
+ "items": {
+ "type": "string",
+ "maxLength": 140,
+ "minLength": 2
+ }
+ },
+ "compliancePlan": {
+ "$ref": "#/components/schemas/CompliancePlan"
+ },
+ "metadata": {
+ "type": "object",
+ "description": "This is for metadata you want to store on the assistant."
+ },
+ "backgroundSpeechDenoisingPlan": {
+ "description": "This enables filtering of noise and background speech while the user is talking.\n\nFeatures:\n- Smart denoising using Krisp\n- Fourier denoising\n\nSmart denoising can be combined with or used independently of Fourier denoising.\n\nOrder of precedence:\n- Smart denoising\n- Fourier denoising",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/BackgroundSpeechDenoisingPlan"
+ }
+ ]
+ },
+ "analysisPlan": {
+ "description": "This is the plan for analysis of assistant's calls. Stored in `call.analysis`.",
+ "deprecated": true,
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AnalysisPlan"
+ }
+ ]
+ },
+ "artifactPlan": {
+ "description": "This is the plan for artifacts generated during assistant's calls. Stored in `call.artifact`.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ArtifactPlan"
+ }
+ ]
+ },
+ "startSpeakingPlan": {
+ "description": "This is the plan for when the assistant should start talking.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to start talking after the customer is done speaking.\n- The assistant is too fast to start talking after the customer is done speaking.\n- The assistant is so fast that it's actually interrupting the customer.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/StartSpeakingPlan"
+ }
+ ]
+ },
+ "stopSpeakingPlan": {
+ "description": "This is the plan for when assistant should stop talking on customer interruption.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to recognize customer's interruption.\n- The assistant is too fast to recognize customer's interruption.\n- The assistant is getting interrupted by phrases that are just acknowledgments.\n- The assistant is getting interrupted by background noises.\n- The assistant is not properly stopping -- it starts talking right after getting interrupted.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/StopSpeakingPlan"
+ }
+ ]
+ },
+ "monitorPlan": {
+ "description": "This is the plan for real-time monitoring of the assistant's calls.\n\nUsage:\n- To enable live listening of the assistant's calls, set `monitorPlan.listenEnabled` to `true`.\n- To enable live control of the assistant's calls, set `monitorPlan.controlEnabled` to `true`.\n- To attach monitors to the assistant, set `monitorPlan.monitorIds` to the set of monitor ids.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/MonitorPlan"
+ }
+ ]
+ },
+ "credentialIds": {
+ "description": "These are the credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can provide a subset using this.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "server": {
+ "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server.url\n2. phoneNumber.serverUrl\n3. org.serverUrl",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
+ },
+ "keypadInputPlan": {
+ "$ref": "#/components/schemas/KeypadInputPlan"
+ }
+ }
+ },
+ "CreateAssistantDTO": {
+ "type": "object",
+ "properties": {
+ "transcriber": {
+ "description": "These are the options for the assistant's transcriber.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AssemblyAITranscriber",
+ "title": "AssemblyAITranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/AzureSpeechTranscriber",
+ "title": "AzureSpeechTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/CustomTranscriber",
+ "title": "CustomTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/DeepgramTranscriber",
+ "title": "DeepgramTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/ElevenLabsTranscriber",
+ "title": "ElevenLabsTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/GladiaTranscriber",
+ "title": "GladiaTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/GoogleTranscriber",
+ "title": "GoogleTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/SpeechmaticsTranscriber",
+ "title": "SpeechmaticsTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/TalkscriberTranscriber",
+ "title": "TalkscriberTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAITranscriber",
+ "title": "OpenAITranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/CartesiaTranscriber",
+ "title": "CartesiaTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/SonioxTranscriber",
+ "title": "SonioxTranscriber"
+ }
+ ]
+ },
+ "model": {
+ "description": "These are the options for the assistant's LLM.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AnthropicModel",
+ "title": "Anthropic"
+ },
+ {
+ "$ref": "#/components/schemas/AnthropicBedrockModel",
+ "title": "AnthropicBedrock"
+ },
+ {
+ "$ref": "#/components/schemas/AnyscaleModel",
+ "title": "Anyscale"
+ },
+ {
+ "$ref": "#/components/schemas/CerebrasModel",
+ "title": "Cerebras"
+ },
+ {
+ "$ref": "#/components/schemas/CustomLLMModel",
+ "title": "CustomLLM"
+ },
+ {
+ "$ref": "#/components/schemas/DeepInfraModel",
+ "title": "DeepInfra"
+ },
+ {
+ "$ref": "#/components/schemas/DeepSeekModel",
+ "title": "DeepSeek"
+ },
+ {
+ "$ref": "#/components/schemas/GoogleModel",
+ "title": "Google"
+ },
+ {
+ "$ref": "#/components/schemas/GroqModel",
+ "title": "Groq"
+ },
+ {
+ "$ref": "#/components/schemas/InflectionAIModel",
+ "title": "InflectionAI"
+ },
+ {
+ "$ref": "#/components/schemas/MinimaxLLMModel",
+ "title": "MiniMaxLLM"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIModel",
+ "title": "OpenAI"
+ },
+ {
+ "$ref": "#/components/schemas/OpenRouterModel",
+ "title": "OpenRouter"
+ },
+ {
+ "$ref": "#/components/schemas/PerplexityAIModel",
+ "title": "PerplexityAI"
+ },
+ {
+ "$ref": "#/components/schemas/TogetherAIModel",
+ "title": "Together"
+ },
+ {
+ "$ref": "#/components/schemas/XaiModel",
+ "title": "XAI"
+ }
+ ]
+ },
+ "voice": {
+ "description": "These are the options for the assistant's voice.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AzureVoice",
+ "title": "AzureVoice"
+ },
+ {
+ "$ref": "#/components/schemas/CartesiaVoice",
+ "title": "CartesiaVoice"
+ },
+ {
+ "$ref": "#/components/schemas/CustomVoice",
+ "title": "CustomVoice"
+ },
+ {
+ "$ref": "#/components/schemas/DeepgramVoice",
+ "title": "DeepgramVoice"
+ },
+ {
+ "$ref": "#/components/schemas/ElevenLabsVoice",
+ "title": "ElevenLabsVoice"
+ },
+ {
+ "$ref": "#/components/schemas/HumeVoice",
+ "title": "HumeVoice"
+ },
+ {
+ "$ref": "#/components/schemas/LMNTVoice",
+ "title": "LMNTVoice"
+ },
+ {
+ "$ref": "#/components/schemas/NeuphonicVoice",
+ "title": "NeuphonicVoice"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIVoice",
+ "title": "OpenAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/PlayHTVoice",
+ "title": "PlayHTVoice"
+ },
+ {
+ "$ref": "#/components/schemas/WellSaidVoice",
+ "title": "WellSaidVoice"
+ },
+ {
+ "$ref": "#/components/schemas/RimeAIVoice",
+ "title": "RimeAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/SmallestAIVoice",
+ "title": "SmallestAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/TavusVoice",
+ "title": "TavusVoice"
+ },
+ {
+ "$ref": "#/components/schemas/VapiVoice",
+ "title": "VapiVoice"
+ },
+ {
+ "$ref": "#/components/schemas/SesameVoice",
+ "title": "SesameVoice"
+ },
+ {
+ "$ref": "#/components/schemas/InworldVoice",
+ "title": "InworldVoice"
+ },
+ {
+ "$ref": "#/components/schemas/MinimaxVoice",
+ "title": "MinimaxVoice"
+ }
+ ]
+ },
+ "firstMessage": {
+ "type": "string",
+ "description": "This is the first message that the assistant will say. This can also be a URL to a containerized audio file (mp3, wav, etc.).\n\nIf unspecified, assistant will wait for user to speak and use the model to respond once they speak.",
+ "example": "Hello! How can I help you today?"
+ },
+ "firstMessageInterruptionsEnabled": {
+ "type": "boolean",
+ "default": false
+ },
+ "firstMessageMode": {
+ "type": "string",
+ "description": "This is the mode for the first message. Default is 'assistant-speaks-first'.\n\nUse:\n- 'assistant-speaks-first' to have the assistant speak first.\n- 'assistant-waits-for-user' to have the assistant wait for the user to speak first.\n- 'assistant-speaks-first-with-model-generated-message' to have the assistant speak first with a message generated by the model based on the conversation state. (`assistant.model.messages` at call start, `call.messages` at squad transfer points).\n\n@default 'assistant-speaks-first'",
+ "enum": [
+ "assistant-speaks-first",
+ "assistant-speaks-first-with-model-generated-message",
+ "assistant-waits-for-user"
+ ],
+ "example": "assistant-speaks-first"
+ },
+ "voicemailDetection": {
+ "description": "These are the settings to configure or disable voicemail detection. Alternatively, voicemail detection can be configured using the model.tools=[VoicemailTool].\nBy default, voicemail detection is disabled.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "off"
+ ]
+ },
+ {
+ "$ref": "#/components/schemas/GoogleVoicemailDetectionPlan",
+ "title": "Google"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIVoicemailDetectionPlan",
+ "title": "OpenAI"
+ },
+ {
+ "$ref": "#/components/schemas/TwilioVoicemailDetectionPlan",
+ "title": "Twilio"
+ },
+ {
+ "$ref": "#/components/schemas/VapiVoicemailDetectionPlan",
+ "title": "Vapi"
+ }
+ ]
+ },
+ "clientMessages": {
+ "type": "array",
+ "enum": [
+ "conversation-update",
+ "assistant.speechStarted",
+ "function-call",
+ "function-call-result",
+ "hang",
+ "language-changed",
+ "metadata",
+ "model-output",
+ "speech-update",
+ "status-update",
+ "transcript",
+ "tool-calls",
+ "tool-calls-result",
+ "tool.completed",
+ "transfer-update",
+ "user-interrupted",
+ "voice-input",
+ "workflow.node.started",
+ "assistant.started"
+ ],
+ "example": [
+ "conversation-update",
+ "function-call",
+ "hang",
+ "model-output",
+ "speech-update",
+ "status-update",
+ "transfer-update",
+ "transcript",
+ "tool-calls",
+ "user-interrupted",
+ "voice-input",
+ "workflow.node.started",
+ "assistant.started"
+ ],
+ "description": "These are the messages that will be sent to your Client SDKs. Default is conversation-update,function-call,hang,model-output,speech-update,status-update,transfer-update,transcript,tool-calls,user-interrupted,voice-input,workflow.node.started,assistant.started. You can check the shape of the messages in ClientMessage schema.",
+ "items": {
+ "type": "string",
+ "enum": [
+ "conversation-update",
+ "assistant.speechStarted",
+ "function-call",
+ "function-call-result",
+ "hang",
+ "language-changed",
+ "metadata",
+ "model-output",
+ "speech-update",
+ "status-update",
+ "transcript",
+ "tool-calls",
+ "tool-calls-result",
+ "tool.completed",
+ "transfer-update",
+ "user-interrupted",
+ "voice-input",
+ "workflow.node.started",
+ "assistant.started"
+ ]
+ }
+ },
+ "serverMessages": {
+ "type": "array",
+ "enum": [
+ "assistant.started",
+ "assistant.speechStarted",
+ "conversation-update",
+ "end-of-call-report",
+ "function-call",
+ "hang",
+ "language-changed",
+ "language-change-detected",
+ "model-output",
+ "phone-call-control",
+ "speech-update",
+ "status-update",
+ "transcript",
+ "transcript[transcriptType=\"final\"]",
+ "tool-calls",
+ "transfer-destination-request",
+ "handoff-destination-request",
+ "transfer-update",
+ "user-interrupted",
+ "voice-input",
+ "chat.created",
+ "chat.deleted",
+ "session.created",
+ "session.updated",
+ "session.deleted",
+ "call.deleted",
+ "call.delete.failed"
+ ],
+ "example": [
+ "conversation-update",
+ "end-of-call-report",
+ "function-call",
+ "hang",
+ "speech-update",
+ "status-update",
+ "tool-calls",
+ "transfer-destination-request",
+ "handoff-destination-request",
+ "user-interrupted",
+ "assistant.started"
+ ],
+ "description": "These are the messages that will be sent to your Server URL. Default is conversation-update,end-of-call-report,function-call,hang,speech-update,status-update,tool-calls,transfer-destination-request,handoff-destination-request,user-interrupted,assistant.started. You can check the shape of the messages in ServerMessage schema.",
+ "items": {
+ "type": "string",
+ "enum": [
+ "assistant.started",
+ "assistant.speechStarted",
+ "conversation-update",
+ "end-of-call-report",
+ "function-call",
+ "hang",
+ "language-changed",
+ "language-change-detected",
+ "model-output",
+ "phone-call-control",
+ "speech-update",
+ "status-update",
+ "transcript",
+ "transcript[transcriptType=\"final\"]",
+ "tool-calls",
+ "transfer-destination-request",
+ "handoff-destination-request",
+ "transfer-update",
+ "user-interrupted",
+ "voice-input",
+ "chat.created",
+ "chat.deleted",
+ "session.created",
+ "session.updated",
+ "session.deleted",
+ "call.deleted",
+ "call.delete.failed"
+ ]
+ }
+ },
+ "maxDurationSeconds": {
+ "type": "number",
+ "description": "This is the maximum number of seconds that the call will last. When the call reaches this duration, it will be ended.\n\n@default 600 (10 minutes)",
+ "minimum": 10,
+ "maximum": 43200,
+ "example": 600
+ },
+ "backgroundSound": {
+ "description": "This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'.\nYou can also provide a custom sound by providing a URL to an audio file.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "off",
+ "office"
+ ],
+ "example": "office"
+ },
+ {
+ "type": "string",
+ "format": "uri",
+ "example": "https://www.soundjay.com/ambient/sounds/people-in-lounge-1.mp3"
+ }
+ ]
+ },
+ "modelOutputInMessagesEnabled": {
+ "type": "boolean",
+ "description": "This determines whether the model's output is used in conversation history rather than the transcription of assistant's speech.\n\n@default false",
+ "example": false
+ },
+ "transportConfigurations": {
+ "type": "array",
+ "description": "These are the configurations to be passed to the transport providers of assistant's calls, like Twilio. You can store multiple configurations for different transport providers. For a call, only the configuration matching the call transport provider is used.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TransportConfigurationTwilio",
+ "title": "Twilio"
+ }
+ ]
+ }
+ },
+ "observabilityPlan": {
+ "description": "This is the plan for observability of assistant's calls.\n\nCurrently, only Langfuse is supported.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/LangfuseObservabilityPlan",
+ "title": "Langfuse"
+ }
+ ],
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/LangfuseObservabilityPlan"
+ }
+ ]
+ },
+ "credentials": {
+ "type": "array",
+ "description": "These are dynamic credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateAnthropicCredentialDTO",
+ "title": "AnthropicCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAnthropicBedrockCredentialDTO",
+ "title": "AnthropicBedrockCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAnyscaleCredentialDTO",
+ "title": "AnyscaleCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAssemblyAICredentialDTO",
+ "title": "AssemblyAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAzureCredentialDTO",
+ "title": "AzureCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAzureOpenAICredentialDTO",
+ "title": "AzureOpenAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
+ "title": "ByoSipTrunkCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCartesiaCredentialDTO",
+ "title": "CartesiaCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCerebrasCredentialDTO",
+ "title": "CerebrasCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCloudflareCredentialDTO",
+ "title": "CloudflareCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCustomLLMCredentialDTO",
+ "title": "CustomLLMCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDeepgramCredentialDTO",
+ "title": "DeepgramCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDeepInfraCredentialDTO",
+ "title": "DeepInfraCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDeepSeekCredentialDTO",
+ "title": "DeepSeekCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateElevenLabsCredentialDTO",
+ "title": "ElevenLabsCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGcpCredentialDTO",
+ "title": "GcpCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGladiaCredentialDTO",
+ "title": "GladiaCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCredentialDTO",
+ "title": "GhlCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCredentialDTO",
+ "title": "GoogleCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGroqCredentialDTO",
+ "title": "GroqCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateHumeCredentialDTO",
+ "title": "HumeCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateInflectionAICredentialDTO",
+ "title": "InflectionAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateLangfuseCredentialDTO",
+ "title": "LangfuseCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateLmntCredentialDTO",
+ "title": "LmntCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMakeCredentialDTO",
+ "title": "MakeCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMistralCredentialDTO",
+ "title": "MistralCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateNeuphonicCredentialDTO",
+ "title": "NeuphonicCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateOpenAICredentialDTO",
+ "title": "OpenAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateOpenRouterCredentialDTO",
+ "title": "OpenRouterCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreatePerplexityAICredentialDTO",
+ "title": "PerplexityAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreatePlayHTCredentialDTO",
+ "title": "PlayHTCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateRimeAICredentialDTO",
+ "title": "RimeAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateRunpodCredentialDTO",
+ "title": "RunpodCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateS3CredentialDTO",
+ "title": "S3Credential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSmallestAICredentialDTO",
+ "title": "SmallestAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
+ "title": "SpeechmaticsCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSonioxCredentialDTO",
+ "title": "SonioxCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSupabaseCredentialDTO",
+ "title": "SupabaseCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTavusCredentialDTO",
+ "title": "TavusCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTogetherAICredentialDTO",
+ "title": "TogetherAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTrieveCredentialDTO",
+ "title": "TrieveCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTwilioCredentialDTO",
+ "title": "TwilioCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVonageCredentialDTO",
+ "title": "VonageCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateWebhookCredentialDTO",
+ "title": "WebhookCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCustomCredentialDTO",
+ "title": "CustomCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateXAiCredentialDTO",
+ "title": "XAiCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
+ "title": "GoogleCalendarOAuth2ClientCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
+ "title": "GoogleCalendarOAuth2AuthorizationCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
+ "title": "GoogleSheetsOAuth2AuthorizationCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
+ "title": "SlackOAuth2AuthorizationCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
+ "title": "GoHighLevelMCPCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateInworldCredentialDTO",
+ "title": "InworldCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMinimaxCredentialDTO",
+ "title": "MinimaxCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateWellSaidCredentialDTO",
+ "title": "WellSaidCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateEmailCredentialDTO",
+ "title": "EmailCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackWebhookCredentialDTO",
+ "title": "SlackWebhookCredential"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "provider",
+ "mapping": {
+ "11labs": "#/components/schemas/CreateElevenLabsCredentialDTO",
+ "anthropic": "#/components/schemas/CreateAnthropicCredentialDTO",
+ "anthropic-bedrock": "#/components/schemas/CreateAnthropicBedrockCredentialDTO",
+ "anyscale": "#/components/schemas/CreateAnyscaleCredentialDTO",
+ "assembly-ai": "#/components/schemas/CreateAssemblyAICredentialDTO",
+ "azure-openai": "#/components/schemas/CreateAzureOpenAICredentialDTO",
+ "azure": "#/components/schemas/CreateAzureCredentialDTO",
+ "byo-sip-trunk": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
+ "cartesia": "#/components/schemas/CreateCartesiaCredentialDTO",
+ "cerebras": "#/components/schemas/CreateCerebrasCredentialDTO",
+ "cloudflare": "#/components/schemas/CreateCloudflareCredentialDTO",
+ "custom-llm": "#/components/schemas/CreateCustomLLMCredentialDTO",
+ "deepgram": "#/components/schemas/CreateDeepgramCredentialDTO",
+ "deepinfra": "#/components/schemas/CreateDeepInfraCredentialDTO",
+ "deep-seek": "#/components/schemas/CreateDeepSeekCredentialDTO",
+ "gcp": "#/components/schemas/CreateGcpCredentialDTO",
+ "gladia": "#/components/schemas/CreateGladiaCredentialDTO",
+ "gohighlevel": "#/components/schemas/CreateGoHighLevelCredentialDTO",
+ "google": "#/components/schemas/CreateGoogleCredentialDTO",
+ "groq": "#/components/schemas/CreateGroqCredentialDTO",
+ "inflection-ai": "#/components/schemas/CreateInflectionAICredentialDTO",
+ "langfuse": "#/components/schemas/CreateLangfuseCredentialDTO",
+ "lmnt": "#/components/schemas/CreateLmntCredentialDTO",
+ "make": "#/components/schemas/CreateMakeCredentialDTO",
+ "openai": "#/components/schemas/CreateOpenAICredentialDTO",
+ "openrouter": "#/components/schemas/CreateOpenRouterCredentialDTO",
+ "perplexity-ai": "#/components/schemas/CreatePerplexityAICredentialDTO",
+ "playht": "#/components/schemas/CreatePlayHTCredentialDTO",
+ "rime-ai": "#/components/schemas/CreateRimeAICredentialDTO",
+ "runpod": "#/components/schemas/CreateRunpodCredentialDTO",
+ "s3": "#/components/schemas/CreateS3CredentialDTO",
+ "supabase": "#/components/schemas/CreateSupabaseCredentialDTO",
+ "smallest-ai": "#/components/schemas/CreateSmallestAICredentialDTO",
+ "tavus": "#/components/schemas/CreateTavusCredentialDTO",
+ "together-ai": "#/components/schemas/CreateTogetherAICredentialDTO",
+ "twilio": "#/components/schemas/CreateTwilioCredentialDTO",
+ "vonage": "#/components/schemas/CreateVonageCredentialDTO",
+ "webhook": "#/components/schemas/CreateWebhookCredentialDTO",
+ "custom-credential": "#/components/schemas/CreateCustomCredentialDTO",
+ "xai": "#/components/schemas/CreateXAiCredentialDTO",
+ "neuphonic": "#/components/schemas/CreateNeuphonicCredentialDTO",
+ "hume": "#/components/schemas/CreateHumeCredentialDTO",
+ "mistral": "#/components/schemas/CreateMistralCredentialDTO",
+ "speechmatics": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
+ "soniox": "#/components/schemas/CreateSonioxCredentialDTO",
+ "trieve": "#/components/schemas/CreateTrieveCredentialDTO",
+ "google.calendar.oauth2-client": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
+ "google.calendar.oauth2-authorization": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
+ "google.sheets.oauth2-authorization": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
+ "slack.oauth2-authorization": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
+ "ghl.oauth2-authorization": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
+ "inworld": "#/components/schemas/CreateInworldCredentialDTO",
+ "minimax": "#/components/schemas/CreateMinimaxCredentialDTO",
+ "wellsaid": "#/components/schemas/CreateWellSaidCredentialDTO",
+ "email": "#/components/schemas/CreateEmailCredentialDTO",
+ "slack-webhook": "#/components/schemas/CreateSlackWebhookCredentialDTO"
+ }
+ }
+ }
+ },
+ "hooks": {
+ "type": "array",
+ "description": "This is a set of actions that will be performed on certain events.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CallHookCallEnding",
+ "title": "CallHookCallEnding"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookAssistantSpeechInterrupted",
+ "title": "CallHookAssistantSpeechInterrupted"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookCustomerSpeechInterrupted",
+ "title": "CallHookCustomerSpeechInterrupted"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookCustomerSpeechTimeout",
+ "title": "CallHookCustomerSpeechTimeout"
+ },
+ {
+ "$ref": "#/components/schemas/SessionCreatedHook",
+ "title": "SessionCreatedHook"
+ }
+ ]
+ }
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the assistant.\n\nThis is required when you want to transfer between assistants in a call.",
+ "maxLength": 40
+ },
+ "voicemailMessage": {
+ "type": "string",
+ "description": "This is the message that the assistant will say if the call is forwarded to voicemail.\n\nIf unspecified, it will hang up.",
+ "maxLength": 1000
+ },
+ "endCallMessage": {
+ "type": "string",
+ "description": "This is the message that the assistant will say if it ends the call.\n\nIf unspecified, it will hang up without saying anything.",
+ "maxLength": 1000
+ },
+ "endCallPhrases": {
+ "description": "This list contains phrases that, if spoken by the assistant, will trigger the call to be hung up. Case insensitive.",
+ "type": "array",
+ "items": {
+ "type": "string",
+ "maxLength": 140,
+ "minLength": 2
+ }
+ },
+ "compliancePlan": {
+ "$ref": "#/components/schemas/CompliancePlan"
+ },
+ "metadata": {
+ "type": "object",
+ "description": "This is for metadata you want to store on the assistant."
+ },
+ "backgroundSpeechDenoisingPlan": {
+ "description": "This enables filtering of noise and background speech while the user is talking.\n\nFeatures:\n- Smart denoising using Krisp\n- Fourier denoising\n\nSmart denoising can be combined with or used independently of Fourier denoising.\n\nOrder of precedence:\n- Smart denoising\n- Fourier denoising",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/BackgroundSpeechDenoisingPlan"
+ }
+ ]
+ },
+ "analysisPlan": {
+ "description": "This is the plan for analysis of assistant's calls. Stored in `call.analysis`.",
+ "deprecated": true,
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AnalysisPlan"
+ }
+ ]
+ },
+ "artifactPlan": {
+ "description": "This is the plan for artifacts generated during assistant's calls. Stored in `call.artifact`.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ArtifactPlan"
+ }
+ ]
+ },
+ "startSpeakingPlan": {
+ "description": "This is the plan for when the assistant should start talking.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to start talking after the customer is done speaking.\n- The assistant is too fast to start talking after the customer is done speaking.\n- The assistant is so fast that it's actually interrupting the customer.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/StartSpeakingPlan"
+ }
+ ]
+ },
+ "stopSpeakingPlan": {
+ "description": "This is the plan for when assistant should stop talking on customer interruption.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to recognize customer's interruption.\n- The assistant is too fast to recognize customer's interruption.\n- The assistant is getting interrupted by phrases that are just acknowledgments.\n- The assistant is getting interrupted by background noises.\n- The assistant is not properly stopping -- it starts talking right after getting interrupted.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/StopSpeakingPlan"
+ }
+ ]
+ },
+ "monitorPlan": {
+ "description": "This is the plan for real-time monitoring of the assistant's calls.\n\nUsage:\n- To enable live listening of the assistant's calls, set `monitorPlan.listenEnabled` to `true`.\n- To enable live control of the assistant's calls, set `monitorPlan.controlEnabled` to `true`.\n- To attach monitors to the assistant, set `monitorPlan.monitorIds` to the set of monitor ids.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/MonitorPlan"
+ }
+ ]
+ },
+ "credentialIds": {
+ "description": "These are the credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can provide a subset using this.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "server": {
+ "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server.url\n2. phoneNumber.serverUrl\n3. org.serverUrl",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
+ },
+ "keypadInputPlan": {
+ "$ref": "#/components/schemas/KeypadInputPlan"
+ }
+ }
+ },
+ "Assistant": {
+ "type": "object",
+ "properties": {
+ "transcriber": {
+ "description": "These are the options for the assistant's transcriber.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AssemblyAITranscriber",
+ "title": "AssemblyAITranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/AzureSpeechTranscriber",
+ "title": "AzureSpeechTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/CustomTranscriber",
+ "title": "CustomTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/DeepgramTranscriber",
+ "title": "DeepgramTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/ElevenLabsTranscriber",
+ "title": "ElevenLabsTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/GladiaTranscriber",
+ "title": "GladiaTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/GoogleTranscriber",
+ "title": "GoogleTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/SpeechmaticsTranscriber",
+ "title": "SpeechmaticsTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/TalkscriberTranscriber",
+ "title": "TalkscriberTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAITranscriber",
+ "title": "OpenAITranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/CartesiaTranscriber",
+ "title": "CartesiaTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/SonioxTranscriber",
+ "title": "SonioxTranscriber"
+ }
+ ]
+ },
+ "model": {
+ "description": "These are the options for the assistant's LLM.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AnthropicModel",
+ "title": "Anthropic"
+ },
+ {
+ "$ref": "#/components/schemas/AnthropicBedrockModel",
+ "title": "AnthropicBedrock"
+ },
+ {
+ "$ref": "#/components/schemas/AnyscaleModel",
+ "title": "Anyscale"
+ },
+ {
+ "$ref": "#/components/schemas/CerebrasModel",
+ "title": "Cerebras"
+ },
+ {
+ "$ref": "#/components/schemas/CustomLLMModel",
+ "title": "CustomLLM"
+ },
+ {
+ "$ref": "#/components/schemas/DeepInfraModel",
+ "title": "DeepInfra"
+ },
+ {
+ "$ref": "#/components/schemas/DeepSeekModel",
+ "title": "DeepSeek"
+ },
+ {
+ "$ref": "#/components/schemas/GoogleModel",
+ "title": "Google"
+ },
+ {
+ "$ref": "#/components/schemas/GroqModel",
+ "title": "Groq"
+ },
+ {
+ "$ref": "#/components/schemas/InflectionAIModel",
+ "title": "InflectionAI"
+ },
+ {
+ "$ref": "#/components/schemas/MinimaxLLMModel",
+ "title": "MiniMaxLLM"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIModel",
+ "title": "OpenAI"
+ },
+ {
+ "$ref": "#/components/schemas/OpenRouterModel",
+ "title": "OpenRouter"
+ },
+ {
+ "$ref": "#/components/schemas/PerplexityAIModel",
+ "title": "PerplexityAI"
+ },
+ {
+ "$ref": "#/components/schemas/TogetherAIModel",
+ "title": "Together"
+ },
+ {
+ "$ref": "#/components/schemas/XaiModel",
+ "title": "XAI"
+ }
+ ]
+ },
+ "voice": {
+ "description": "These are the options for the assistant's voice.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AzureVoice",
+ "title": "AzureVoice"
+ },
+ {
+ "$ref": "#/components/schemas/CartesiaVoice",
+ "title": "CartesiaVoice"
+ },
+ {
+ "$ref": "#/components/schemas/CustomVoice",
+ "title": "CustomVoice"
+ },
+ {
+ "$ref": "#/components/schemas/DeepgramVoice",
+ "title": "DeepgramVoice"
+ },
+ {
+ "$ref": "#/components/schemas/ElevenLabsVoice",
+ "title": "ElevenLabsVoice"
+ },
+ {
+ "$ref": "#/components/schemas/HumeVoice",
+ "title": "HumeVoice"
+ },
+ {
+ "$ref": "#/components/schemas/LMNTVoice",
+ "title": "LMNTVoice"
+ },
+ {
+ "$ref": "#/components/schemas/NeuphonicVoice",
+ "title": "NeuphonicVoice"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIVoice",
+ "title": "OpenAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/PlayHTVoice",
+ "title": "PlayHTVoice"
+ },
+ {
+ "$ref": "#/components/schemas/WellSaidVoice",
+ "title": "WellSaidVoice"
+ },
+ {
+ "$ref": "#/components/schemas/RimeAIVoice",
+ "title": "RimeAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/SmallestAIVoice",
+ "title": "SmallestAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/TavusVoice",
+ "title": "TavusVoice"
+ },
+ {
+ "$ref": "#/components/schemas/VapiVoice",
+ "title": "VapiVoice"
+ },
+ {
+ "$ref": "#/components/schemas/SesameVoice",
+ "title": "SesameVoice"
+ },
+ {
+ "$ref": "#/components/schemas/InworldVoice",
+ "title": "InworldVoice"
+ },
+ {
+ "$ref": "#/components/schemas/MinimaxVoice",
+ "title": "MinimaxVoice"
+ }
+ ]
+ },
+ "firstMessage": {
+ "type": "string",
+ "description": "This is the first message that the assistant will say. This can also be a URL to a containerized audio file (mp3, wav, etc.).\n\nIf unspecified, assistant will wait for user to speak and use the model to respond once they speak.",
+ "example": "Hello! How can I help you today?"
+ },
+ "firstMessageInterruptionsEnabled": {
+ "type": "boolean",
+ "default": false
+ },
+ "firstMessageMode": {
+ "type": "string",
+ "description": "This is the mode for the first message. Default is 'assistant-speaks-first'.\n\nUse:\n- 'assistant-speaks-first' to have the assistant speak first.\n- 'assistant-waits-for-user' to have the assistant wait for the user to speak first.\n- 'assistant-speaks-first-with-model-generated-message' to have the assistant speak first with a message generated by the model based on the conversation state. (`assistant.model.messages` at call start, `call.messages` at squad transfer points).\n\n@default 'assistant-speaks-first'",
+ "enum": [
+ "assistant-speaks-first",
+ "assistant-speaks-first-with-model-generated-message",
+ "assistant-waits-for-user"
+ ],
+ "example": "assistant-speaks-first"
+ },
+ "voicemailDetection": {
+ "description": "These are the settings to configure or disable voicemail detection. Alternatively, voicemail detection can be configured using the model.tools=[VoicemailTool].\nBy default, voicemail detection is disabled.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "off"
+ ]
+ },
+ {
+ "$ref": "#/components/schemas/GoogleVoicemailDetectionPlan",
+ "title": "Google"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIVoicemailDetectionPlan",
+ "title": "OpenAI"
+ },
+ {
+ "$ref": "#/components/schemas/TwilioVoicemailDetectionPlan",
+ "title": "Twilio"
+ },
+ {
+ "$ref": "#/components/schemas/VapiVoicemailDetectionPlan",
+ "title": "Vapi"
+ }
+ ]
+ },
+ "clientMessages": {
+ "type": "array",
+ "enum": [
+ "conversation-update",
+ "assistant.speechStarted",
+ "function-call",
+ "function-call-result",
+ "hang",
+ "language-changed",
+ "metadata",
+ "model-output",
+ "speech-update",
+ "status-update",
+ "transcript",
+ "tool-calls",
+ "tool-calls-result",
+ "tool.completed",
+ "transfer-update",
+ "user-interrupted",
+ "voice-input",
+ "workflow.node.started",
+ "assistant.started"
+ ],
+ "example": [
+ "conversation-update",
+ "function-call",
+ "hang",
+ "model-output",
+ "speech-update",
+ "status-update",
+ "transfer-update",
+ "transcript",
+ "tool-calls",
+ "user-interrupted",
+ "voice-input",
+ "workflow.node.started",
+ "assistant.started"
+ ],
+ "description": "These are the messages that will be sent to your Client SDKs. Default is conversation-update,function-call,hang,model-output,speech-update,status-update,transfer-update,transcript,tool-calls,user-interrupted,voice-input,workflow.node.started,assistant.started. You can check the shape of the messages in ClientMessage schema.",
+ "items": {
+ "type": "string",
+ "enum": [
+ "conversation-update",
+ "assistant.speechStarted",
+ "function-call",
+ "function-call-result",
+ "hang",
+ "language-changed",
+ "metadata",
+ "model-output",
+ "speech-update",
+ "status-update",
+ "transcript",
+ "tool-calls",
+ "tool-calls-result",
+ "tool.completed",
+ "transfer-update",
+ "user-interrupted",
+ "voice-input",
+ "workflow.node.started",
+ "assistant.started"
+ ]
+ }
+ },
+ "serverMessages": {
+ "type": "array",
+ "enum": [
+ "assistant.started",
+ "assistant.speechStarted",
+ "conversation-update",
+ "end-of-call-report",
+ "function-call",
+ "hang",
+ "language-changed",
+ "language-change-detected",
+ "model-output",
+ "phone-call-control",
+ "speech-update",
+ "status-update",
+ "transcript",
+ "transcript[transcriptType=\"final\"]",
+ "tool-calls",
+ "transfer-destination-request",
+ "handoff-destination-request",
+ "transfer-update",
+ "user-interrupted",
+ "voice-input",
+ "chat.created",
+ "chat.deleted",
+ "session.created",
+ "session.updated",
+ "session.deleted",
+ "call.deleted",
+ "call.delete.failed"
+ ],
+ "example": [
+ "conversation-update",
+ "end-of-call-report",
+ "function-call",
+ "hang",
+ "speech-update",
+ "status-update",
+ "tool-calls",
+ "transfer-destination-request",
+ "handoff-destination-request",
+ "user-interrupted",
+ "assistant.started"
+ ],
+ "description": "These are the messages that will be sent to your Server URL. Default is conversation-update,end-of-call-report,function-call,hang,speech-update,status-update,tool-calls,transfer-destination-request,handoff-destination-request,user-interrupted,assistant.started. You can check the shape of the messages in ServerMessage schema.",
+ "items": {
+ "type": "string",
+ "enum": [
+ "assistant.started",
+ "assistant.speechStarted",
+ "conversation-update",
+ "end-of-call-report",
+ "function-call",
+ "hang",
+ "language-changed",
+ "language-change-detected",
+ "model-output",
+ "phone-call-control",
+ "speech-update",
+ "status-update",
+ "transcript",
+ "transcript[transcriptType=\"final\"]",
+ "tool-calls",
+ "transfer-destination-request",
+ "handoff-destination-request",
+ "transfer-update",
+ "user-interrupted",
+ "voice-input",
+ "chat.created",
+ "chat.deleted",
+ "session.created",
+ "session.updated",
+ "session.deleted",
+ "call.deleted",
+ "call.delete.failed"
+ ]
+ }
+ },
+ "maxDurationSeconds": {
+ "type": "number",
+ "description": "This is the maximum number of seconds that the call will last. When the call reaches this duration, it will be ended.\n\n@default 600 (10 minutes)",
+ "minimum": 10,
+ "maximum": 43200,
+ "example": 600
+ },
+ "backgroundSound": {
+ "description": "This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'.\nYou can also provide a custom sound by providing a URL to an audio file.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "off",
+ "office"
+ ],
+ "example": "office"
+ },
+ {
+ "type": "string",
+ "format": "uri",
+ "example": "https://www.soundjay.com/ambient/sounds/people-in-lounge-1.mp3"
+ }
+ ]
+ },
+ "modelOutputInMessagesEnabled": {
+ "type": "boolean",
+ "description": "This determines whether the model's output is used in conversation history rather than the transcription of assistant's speech.\n\n@default false",
+ "example": false
+ },
+ "transportConfigurations": {
+ "type": "array",
+ "description": "These are the configurations to be passed to the transport providers of assistant's calls, like Twilio. You can store multiple configurations for different transport providers. For a call, only the configuration matching the call transport provider is used.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TransportConfigurationTwilio",
+ "title": "Twilio"
+ }
+ ]
+ }
+ },
+ "observabilityPlan": {
+ "description": "This is the plan for observability of assistant's calls.\n\nCurrently, only Langfuse is supported.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/LangfuseObservabilityPlan",
+ "title": "Langfuse"
+ }
+ ],
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/LangfuseObservabilityPlan"
+ }
+ ]
+ },
+ "credentials": {
+ "type": "array",
+ "description": "These are dynamic credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateAnthropicCredentialDTO",
+ "title": "AnthropicCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAnthropicBedrockCredentialDTO",
+ "title": "AnthropicBedrockCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAnyscaleCredentialDTO",
+ "title": "AnyscaleCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAssemblyAICredentialDTO",
+ "title": "AssemblyAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAzureCredentialDTO",
+ "title": "AzureCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAzureOpenAICredentialDTO",
+ "title": "AzureOpenAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
+ "title": "ByoSipTrunkCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCartesiaCredentialDTO",
+ "title": "CartesiaCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCerebrasCredentialDTO",
+ "title": "CerebrasCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCloudflareCredentialDTO",
+ "title": "CloudflareCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCustomLLMCredentialDTO",
+ "title": "CustomLLMCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDeepgramCredentialDTO",
+ "title": "DeepgramCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDeepInfraCredentialDTO",
+ "title": "DeepInfraCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDeepSeekCredentialDTO",
+ "title": "DeepSeekCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateElevenLabsCredentialDTO",
+ "title": "ElevenLabsCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGcpCredentialDTO",
+ "title": "GcpCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGladiaCredentialDTO",
+ "title": "GladiaCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCredentialDTO",
+ "title": "GhlCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCredentialDTO",
+ "title": "GoogleCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGroqCredentialDTO",
+ "title": "GroqCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateHumeCredentialDTO",
+ "title": "HumeCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateInflectionAICredentialDTO",
+ "title": "InflectionAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateLangfuseCredentialDTO",
+ "title": "LangfuseCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateLmntCredentialDTO",
+ "title": "LmntCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMakeCredentialDTO",
+ "title": "MakeCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMistralCredentialDTO",
+ "title": "MistralCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateNeuphonicCredentialDTO",
+ "title": "NeuphonicCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateOpenAICredentialDTO",
+ "title": "OpenAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateOpenRouterCredentialDTO",
+ "title": "OpenRouterCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreatePerplexityAICredentialDTO",
+ "title": "PerplexityAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreatePlayHTCredentialDTO",
+ "title": "PlayHTCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateRimeAICredentialDTO",
+ "title": "RimeAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateRunpodCredentialDTO",
+ "title": "RunpodCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateS3CredentialDTO",
+ "title": "S3Credential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSmallestAICredentialDTO",
+ "title": "SmallestAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
+ "title": "SpeechmaticsCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSonioxCredentialDTO",
+ "title": "SonioxCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSupabaseCredentialDTO",
+ "title": "SupabaseCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTavusCredentialDTO",
+ "title": "TavusCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTogetherAICredentialDTO",
+ "title": "TogetherAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTrieveCredentialDTO",
+ "title": "TrieveCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTwilioCredentialDTO",
+ "title": "TwilioCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVonageCredentialDTO",
+ "title": "VonageCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateWebhookCredentialDTO",
+ "title": "WebhookCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCustomCredentialDTO",
+ "title": "CustomCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateXAiCredentialDTO",
+ "title": "XAiCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
+ "title": "GoogleCalendarOAuth2ClientCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
+ "title": "GoogleCalendarOAuth2AuthorizationCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
+ "title": "GoogleSheetsOAuth2AuthorizationCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
+ "title": "SlackOAuth2AuthorizationCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
+ "title": "GoHighLevelMCPCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateInworldCredentialDTO",
+ "title": "InworldCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMinimaxCredentialDTO",
+ "title": "MinimaxCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateWellSaidCredentialDTO",
+ "title": "WellSaidCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateEmailCredentialDTO",
+ "title": "EmailCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackWebhookCredentialDTO",
+ "title": "SlackWebhookCredential"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "provider",
+ "mapping": {
+ "11labs": "#/components/schemas/CreateElevenLabsCredentialDTO",
+ "anthropic": "#/components/schemas/CreateAnthropicCredentialDTO",
+ "anthropic-bedrock": "#/components/schemas/CreateAnthropicBedrockCredentialDTO",
+ "anyscale": "#/components/schemas/CreateAnyscaleCredentialDTO",
+ "assembly-ai": "#/components/schemas/CreateAssemblyAICredentialDTO",
+ "azure-openai": "#/components/schemas/CreateAzureOpenAICredentialDTO",
+ "azure": "#/components/schemas/CreateAzureCredentialDTO",
+ "byo-sip-trunk": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
+ "cartesia": "#/components/schemas/CreateCartesiaCredentialDTO",
+ "cerebras": "#/components/schemas/CreateCerebrasCredentialDTO",
+ "cloudflare": "#/components/schemas/CreateCloudflareCredentialDTO",
+ "custom-llm": "#/components/schemas/CreateCustomLLMCredentialDTO",
+ "deepgram": "#/components/schemas/CreateDeepgramCredentialDTO",
+ "deepinfra": "#/components/schemas/CreateDeepInfraCredentialDTO",
+ "deep-seek": "#/components/schemas/CreateDeepSeekCredentialDTO",
+ "gcp": "#/components/schemas/CreateGcpCredentialDTO",
+ "gladia": "#/components/schemas/CreateGladiaCredentialDTO",
+ "gohighlevel": "#/components/schemas/CreateGoHighLevelCredentialDTO",
+ "google": "#/components/schemas/CreateGoogleCredentialDTO",
+ "groq": "#/components/schemas/CreateGroqCredentialDTO",
+ "inflection-ai": "#/components/schemas/CreateInflectionAICredentialDTO",
+ "langfuse": "#/components/schemas/CreateLangfuseCredentialDTO",
+ "lmnt": "#/components/schemas/CreateLmntCredentialDTO",
+ "make": "#/components/schemas/CreateMakeCredentialDTO",
+ "openai": "#/components/schemas/CreateOpenAICredentialDTO",
+ "openrouter": "#/components/schemas/CreateOpenRouterCredentialDTO",
+ "perplexity-ai": "#/components/schemas/CreatePerplexityAICredentialDTO",
+ "playht": "#/components/schemas/CreatePlayHTCredentialDTO",
+ "rime-ai": "#/components/schemas/CreateRimeAICredentialDTO",
+ "runpod": "#/components/schemas/CreateRunpodCredentialDTO",
+ "s3": "#/components/schemas/CreateS3CredentialDTO",
+ "supabase": "#/components/schemas/CreateSupabaseCredentialDTO",
+ "smallest-ai": "#/components/schemas/CreateSmallestAICredentialDTO",
+ "tavus": "#/components/schemas/CreateTavusCredentialDTO",
+ "together-ai": "#/components/schemas/CreateTogetherAICredentialDTO",
+ "twilio": "#/components/schemas/CreateTwilioCredentialDTO",
+ "vonage": "#/components/schemas/CreateVonageCredentialDTO",
+ "webhook": "#/components/schemas/CreateWebhookCredentialDTO",
+ "custom-credential": "#/components/schemas/CreateCustomCredentialDTO",
+ "xai": "#/components/schemas/CreateXAiCredentialDTO",
+ "neuphonic": "#/components/schemas/CreateNeuphonicCredentialDTO",
+ "hume": "#/components/schemas/CreateHumeCredentialDTO",
+ "mistral": "#/components/schemas/CreateMistralCredentialDTO",
+ "speechmatics": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
+ "soniox": "#/components/schemas/CreateSonioxCredentialDTO",
+ "trieve": "#/components/schemas/CreateTrieveCredentialDTO",
+ "google.calendar.oauth2-client": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
+ "google.calendar.oauth2-authorization": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
+ "google.sheets.oauth2-authorization": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
+ "slack.oauth2-authorization": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
+ "ghl.oauth2-authorization": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
+ "inworld": "#/components/schemas/CreateInworldCredentialDTO",
+ "minimax": "#/components/schemas/CreateMinimaxCredentialDTO",
+ "wellsaid": "#/components/schemas/CreateWellSaidCredentialDTO",
+ "email": "#/components/schemas/CreateEmailCredentialDTO",
+ "slack-webhook": "#/components/schemas/CreateSlackWebhookCredentialDTO"
+ }
+ }
+ }
+ },
+ "hooks": {
+ "type": "array",
+ "description": "This is a set of actions that will be performed on certain events.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CallHookCallEnding",
+ "title": "CallHookCallEnding"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookAssistantSpeechInterrupted",
+ "title": "CallHookAssistantSpeechInterrupted"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookCustomerSpeechInterrupted",
+ "title": "CallHookCustomerSpeechInterrupted"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookCustomerSpeechTimeout",
+ "title": "CallHookCustomerSpeechTimeout"
+ },
+ {
+ "$ref": "#/components/schemas/SessionCreatedHook",
+ "title": "SessionCreatedHook"
+ }
+ ]
+ }
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the assistant.\n\nThis is required when you want to transfer between assistants in a call.",
+ "maxLength": 40
+ },
+ "voicemailMessage": {
+ "type": "string",
+ "description": "This is the message that the assistant will say if the call is forwarded to voicemail.\n\nIf unspecified, it will hang up.",
+ "maxLength": 1000
+ },
+ "endCallMessage": {
+ "type": "string",
+ "description": "This is the message that the assistant will say if it ends the call.\n\nIf unspecified, it will hang up without saying anything.",
+ "maxLength": 1000
+ },
+ "endCallPhrases": {
+ "description": "This list contains phrases that, if spoken by the assistant, will trigger the call to be hung up. Case insensitive.",
+ "type": "array",
+ "items": {
+ "type": "string",
+ "maxLength": 140,
+ "minLength": 2
+ }
+ },
+ "compliancePlan": {
+ "$ref": "#/components/schemas/CompliancePlan"
+ },
+ "metadata": {
+ "type": "object",
+ "description": "This is for metadata you want to store on the assistant."
+ },
+ "backgroundSpeechDenoisingPlan": {
+ "description": "This enables filtering of noise and background speech while the user is talking.\n\nFeatures:\n- Smart denoising using Krisp\n- Fourier denoising\n\nSmart denoising can be combined with or used independently of Fourier denoising.\n\nOrder of precedence:\n- Smart denoising\n- Fourier denoising",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/BackgroundSpeechDenoisingPlan"
+ }
+ ]
+ },
+ "analysisPlan": {
+ "description": "This is the plan for analysis of assistant's calls. Stored in `call.analysis`.",
+ "deprecated": true,
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AnalysisPlan"
+ }
+ ]
+ },
+ "artifactPlan": {
+ "description": "This is the plan for artifacts generated during assistant's calls. Stored in `call.artifact`.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ArtifactPlan"
+ }
+ ]
+ },
+ "startSpeakingPlan": {
+ "description": "This is the plan for when the assistant should start talking.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to start talking after the customer is done speaking.\n- The assistant is too fast to start talking after the customer is done speaking.\n- The assistant is so fast that it's actually interrupting the customer.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/StartSpeakingPlan"
+ }
+ ]
+ },
+ "stopSpeakingPlan": {
+ "description": "This is the plan for when assistant should stop talking on customer interruption.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to recognize customer's interruption.\n- The assistant is too fast to recognize customer's interruption.\n- The assistant is getting interrupted by phrases that are just acknowledgments.\n- The assistant is getting interrupted by background noises.\n- The assistant is not properly stopping -- it starts talking right after getting interrupted.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/StopSpeakingPlan"
+ }
+ ]
+ },
+ "monitorPlan": {
+ "description": "This is the plan for real-time monitoring of the assistant's calls.\n\nUsage:\n- To enable live listening of the assistant's calls, set `monitorPlan.listenEnabled` to `true`.\n- To enable live control of the assistant's calls, set `monitorPlan.controlEnabled` to `true`.\n- To attach monitors to the assistant, set `monitorPlan.monitorIds` to the set of monitor ids.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/MonitorPlan"
+ }
+ ]
+ },
+ "credentialIds": {
+ "description": "These are the credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can provide a subset using this.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "server": {
+ "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server.url\n2. phoneNumber.serverUrl\n3. org.serverUrl",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
+ },
+ "keypadInputPlan": {
+ "$ref": "#/components/schemas/KeypadInputPlan"
+ },
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the assistant."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the org that this assistant belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the assistant was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the assistant was last updated."
+ }
+ },
+ "required": [
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "PaginationMeta": {
+ "type": "object",
+ "properties": {
+ "itemsPerPage": {
+ "type": "number"
+ },
+ "totalItems": {
+ "type": "number"
+ },
+ "currentPage": {
+ "type": "number"
+ },
+ "itemsBeyondRetention": {
+ "type": "boolean"
+ },
+ "createdAtLe": {
+ "format": "date-time",
+ "type": "string"
+ },
+ "createdAtGe": {
+ "format": "date-time",
+ "type": "string"
+ }
+ },
+ "required": [
+ "itemsPerPage",
+ "totalItems",
+ "currentPage"
+ ]
+ },
+ "AssistantPaginatedResponse": {
+ "type": "object",
+ "properties": {
+ "results": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/Assistant"
+ }
+ },
+ "metadata": {
+ "$ref": "#/components/schemas/PaginationMeta"
+ }
+ },
+ "required": [
+ "results",
+ "metadata"
+ ]
+ },
+ "AssistantVersionPaginatedResponse": {
+ "type": "object",
+ "properties": {
+ "results": {
+ "type": "array"
+ },
+ "metadata": {
+ "$ref": "#/components/schemas/PaginationMeta"
+ },
+ "nextPageState": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "results",
+ "metadata"
+ ]
+ },
+ "UpdateAssistantDTO": {
+ "type": "object",
+ "properties": {
+ "transcriber": {
+ "description": "These are the options for the assistant's transcriber.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AssemblyAITranscriber",
+ "title": "AssemblyAITranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/AzureSpeechTranscriber",
+ "title": "AzureSpeechTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/CustomTranscriber",
+ "title": "CustomTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/DeepgramTranscriber",
+ "title": "DeepgramTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/ElevenLabsTranscriber",
+ "title": "ElevenLabsTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/GladiaTranscriber",
+ "title": "GladiaTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/GoogleTranscriber",
+ "title": "GoogleTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/SpeechmaticsTranscriber",
+ "title": "SpeechmaticsTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/TalkscriberTranscriber",
+ "title": "TalkscriberTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAITranscriber",
+ "title": "OpenAITranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/CartesiaTranscriber",
+ "title": "CartesiaTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/SonioxTranscriber",
+ "title": "SonioxTranscriber"
+ }
+ ]
+ },
+ "model": {
+ "description": "These are the options for the assistant's LLM.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AnthropicModel",
+ "title": "Anthropic"
+ },
+ {
+ "$ref": "#/components/schemas/AnthropicBedrockModel",
+ "title": "AnthropicBedrock"
+ },
+ {
+ "$ref": "#/components/schemas/AnyscaleModel",
+ "title": "Anyscale"
+ },
+ {
+ "$ref": "#/components/schemas/CerebrasModel",
+ "title": "Cerebras"
+ },
+ {
+ "$ref": "#/components/schemas/CustomLLMModel",
+ "title": "CustomLLM"
+ },
+ {
+ "$ref": "#/components/schemas/DeepInfraModel",
+ "title": "DeepInfra"
+ },
+ {
+ "$ref": "#/components/schemas/DeepSeekModel",
+ "title": "DeepSeek"
+ },
+ {
+ "$ref": "#/components/schemas/GoogleModel",
+ "title": "Google"
+ },
+ {
+ "$ref": "#/components/schemas/GroqModel",
+ "title": "Groq"
+ },
+ {
+ "$ref": "#/components/schemas/InflectionAIModel",
+ "title": "InflectionAI"
+ },
+ {
+ "$ref": "#/components/schemas/MinimaxLLMModel",
+ "title": "MiniMaxLLM"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIModel",
+ "title": "OpenAI"
+ },
+ {
+ "$ref": "#/components/schemas/OpenRouterModel",
+ "title": "OpenRouter"
+ },
+ {
+ "$ref": "#/components/schemas/PerplexityAIModel",
+ "title": "PerplexityAI"
+ },
+ {
+ "$ref": "#/components/schemas/TogetherAIModel",
+ "title": "Together"
+ },
+ {
+ "$ref": "#/components/schemas/XaiModel",
+ "title": "XAI"
+ }
+ ]
+ },
+ "voice": {
+ "description": "These are the options for the assistant's voice.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AzureVoice",
+ "title": "AzureVoice"
+ },
+ {
+ "$ref": "#/components/schemas/CartesiaVoice",
+ "title": "CartesiaVoice"
+ },
+ {
+ "$ref": "#/components/schemas/CustomVoice",
+ "title": "CustomVoice"
+ },
+ {
+ "$ref": "#/components/schemas/DeepgramVoice",
+ "title": "DeepgramVoice"
+ },
+ {
+ "$ref": "#/components/schemas/ElevenLabsVoice",
+ "title": "ElevenLabsVoice"
+ },
+ {
+ "$ref": "#/components/schemas/HumeVoice",
+ "title": "HumeVoice"
+ },
+ {
+ "$ref": "#/components/schemas/LMNTVoice",
+ "title": "LMNTVoice"
+ },
+ {
+ "$ref": "#/components/schemas/NeuphonicVoice",
+ "title": "NeuphonicVoice"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIVoice",
+ "title": "OpenAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/PlayHTVoice",
+ "title": "PlayHTVoice"
+ },
+ {
+ "$ref": "#/components/schemas/WellSaidVoice",
+ "title": "WellSaidVoice"
+ },
+ {
+ "$ref": "#/components/schemas/RimeAIVoice",
+ "title": "RimeAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/SmallestAIVoice",
+ "title": "SmallestAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/TavusVoice",
+ "title": "TavusVoice"
+ },
+ {
+ "$ref": "#/components/schemas/VapiVoice",
+ "title": "VapiVoice"
+ },
+ {
+ "$ref": "#/components/schemas/SesameVoice",
+ "title": "SesameVoice"
+ },
+ {
+ "$ref": "#/components/schemas/InworldVoice",
+ "title": "InworldVoice"
+ },
+ {
+ "$ref": "#/components/schemas/MinimaxVoice",
+ "title": "MinimaxVoice"
+ }
+ ]
+ },
+ "firstMessage": {
+ "type": "string",
+ "description": "This is the first message that the assistant will say. This can also be a URL to a containerized audio file (mp3, wav, etc.).\n\nIf unspecified, assistant will wait for user to speak and use the model to respond once they speak.",
+ "example": "Hello! How can I help you today?"
+ },
+ "firstMessageInterruptionsEnabled": {
+ "type": "boolean",
+ "default": false
+ },
+ "firstMessageMode": {
+ "type": "string",
+ "description": "This is the mode for the first message. Default is 'assistant-speaks-first'.\n\nUse:\n- 'assistant-speaks-first' to have the assistant speak first.\n- 'assistant-waits-for-user' to have the assistant wait for the user to speak first.\n- 'assistant-speaks-first-with-model-generated-message' to have the assistant speak first with a message generated by the model based on the conversation state. (`assistant.model.messages` at call start, `call.messages` at squad transfer points).\n\n@default 'assistant-speaks-first'",
+ "enum": [
+ "assistant-speaks-first",
+ "assistant-speaks-first-with-model-generated-message",
+ "assistant-waits-for-user"
+ ],
+ "example": "assistant-speaks-first"
+ },
+ "voicemailDetection": {
+ "description": "These are the settings to configure or disable voicemail detection. Alternatively, voicemail detection can be configured using the model.tools=[VoicemailTool].\nBy default, voicemail detection is disabled.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "off"
+ ]
+ },
+ {
+ "$ref": "#/components/schemas/GoogleVoicemailDetectionPlan",
+ "title": "Google"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIVoicemailDetectionPlan",
+ "title": "OpenAI"
+ },
+ {
+ "$ref": "#/components/schemas/TwilioVoicemailDetectionPlan",
+ "title": "Twilio"
+ },
+ {
+ "$ref": "#/components/schemas/VapiVoicemailDetectionPlan",
+ "title": "Vapi"
+ }
+ ]
+ },
+ "clientMessages": {
+ "type": "array",
+ "enum": [
+ "conversation-update",
+ "assistant.speechStarted",
+ "function-call",
+ "function-call-result",
+ "hang",
+ "language-changed",
+ "metadata",
+ "model-output",
+ "speech-update",
+ "status-update",
+ "transcript",
+ "tool-calls",
+ "tool-calls-result",
+ "tool.completed",
+ "transfer-update",
+ "user-interrupted",
+ "voice-input",
+ "workflow.node.started",
+ "assistant.started"
+ ],
+ "example": [
+ "conversation-update",
+ "function-call",
+ "hang",
+ "model-output",
+ "speech-update",
+ "status-update",
+ "transfer-update",
+ "transcript",
+ "tool-calls",
+ "user-interrupted",
+ "voice-input",
+ "workflow.node.started",
+ "assistant.started"
+ ],
+ "description": "These are the messages that will be sent to your Client SDKs. Default is conversation-update,function-call,hang,model-output,speech-update,status-update,transfer-update,transcript,tool-calls,user-interrupted,voice-input,workflow.node.started,assistant.started. You can check the shape of the messages in ClientMessage schema.",
+ "items": {
+ "type": "string",
+ "enum": [
+ "conversation-update",
+ "assistant.speechStarted",
+ "function-call",
+ "function-call-result",
+ "hang",
+ "language-changed",
+ "metadata",
+ "model-output",
+ "speech-update",
+ "status-update",
+ "transcript",
+ "tool-calls",
+ "tool-calls-result",
+ "tool.completed",
+ "transfer-update",
+ "user-interrupted",
+ "voice-input",
+ "workflow.node.started",
+ "assistant.started"
+ ]
+ }
+ },
+ "serverMessages": {
+ "type": "array",
+ "enum": [
+ "assistant.started",
+ "assistant.speechStarted",
+ "conversation-update",
+ "end-of-call-report",
+ "function-call",
+ "hang",
+ "language-changed",
+ "language-change-detected",
+ "model-output",
+ "phone-call-control",
+ "speech-update",
+ "status-update",
+ "transcript",
+ "transcript[transcriptType=\"final\"]",
+ "tool-calls",
+ "transfer-destination-request",
+ "handoff-destination-request",
+ "transfer-update",
+ "user-interrupted",
+ "voice-input",
+ "chat.created",
+ "chat.deleted",
+ "session.created",
+ "session.updated",
+ "session.deleted",
+ "call.deleted",
+ "call.delete.failed"
+ ],
+ "example": [
+ "conversation-update",
+ "end-of-call-report",
+ "function-call",
+ "hang",
+ "speech-update",
+ "status-update",
+ "tool-calls",
+ "transfer-destination-request",
+ "handoff-destination-request",
+ "user-interrupted",
+ "assistant.started"
+ ],
+ "description": "These are the messages that will be sent to your Server URL. Default is conversation-update,end-of-call-report,function-call,hang,speech-update,status-update,tool-calls,transfer-destination-request,handoff-destination-request,user-interrupted,assistant.started. You can check the shape of the messages in ServerMessage schema.",
+ "items": {
+ "type": "string",
+ "enum": [
+ "assistant.started",
+ "assistant.speechStarted",
+ "conversation-update",
+ "end-of-call-report",
+ "function-call",
+ "hang",
+ "language-changed",
+ "language-change-detected",
+ "model-output",
+ "phone-call-control",
+ "speech-update",
+ "status-update",
+ "transcript",
+ "transcript[transcriptType=\"final\"]",
+ "tool-calls",
+ "transfer-destination-request",
+ "handoff-destination-request",
+ "transfer-update",
+ "user-interrupted",
+ "voice-input",
+ "chat.created",
+ "chat.deleted",
+ "session.created",
+ "session.updated",
+ "session.deleted",
+ "call.deleted",
+ "call.delete.failed"
+ ]
+ }
+ },
+ "maxDurationSeconds": {
+ "type": "number",
+ "description": "This is the maximum number of seconds that the call will last. When the call reaches this duration, it will be ended.\n\n@default 600 (10 minutes)",
+ "minimum": 10,
+ "maximum": 43200,
+ "example": 600
+ },
+ "backgroundSound": {
+ "description": "This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'.\nYou can also provide a custom sound by providing a URL to an audio file.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "off",
+ "office"
+ ],
+ "example": "office"
+ },
+ {
+ "type": "string",
+ "format": "uri",
+ "example": "https://www.soundjay.com/ambient/sounds/people-in-lounge-1.mp3"
+ }
+ ]
+ },
+ "modelOutputInMessagesEnabled": {
+ "type": "boolean",
+ "description": "This determines whether the model's output is used in conversation history rather than the transcription of assistant's speech.\n\n@default false",
+ "example": false
+ },
+ "transportConfigurations": {
+ "type": "array",
+ "description": "These are the configurations to be passed to the transport providers of assistant's calls, like Twilio. You can store multiple configurations for different transport providers. For a call, only the configuration matching the call transport provider is used.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TransportConfigurationTwilio",
+ "title": "Twilio"
+ }
+ ]
+ }
+ },
+ "observabilityPlan": {
+ "description": "This is the plan for observability of assistant's calls.\n\nCurrently, only Langfuse is supported.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/LangfuseObservabilityPlan",
+ "title": "Langfuse"
+ }
+ ],
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/LangfuseObservabilityPlan"
+ }
+ ]
+ },
+ "credentials": {
+ "type": "array",
+ "description": "These are dynamic credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateAnthropicCredentialDTO",
+ "title": "AnthropicCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAnthropicBedrockCredentialDTO",
+ "title": "AnthropicBedrockCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAnyscaleCredentialDTO",
+ "title": "AnyscaleCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAssemblyAICredentialDTO",
+ "title": "AssemblyAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAzureCredentialDTO",
+ "title": "AzureCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAzureOpenAICredentialDTO",
+ "title": "AzureOpenAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
+ "title": "ByoSipTrunkCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCartesiaCredentialDTO",
+ "title": "CartesiaCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCerebrasCredentialDTO",
+ "title": "CerebrasCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCloudflareCredentialDTO",
+ "title": "CloudflareCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCustomLLMCredentialDTO",
+ "title": "CustomLLMCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDeepgramCredentialDTO",
+ "title": "DeepgramCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDeepInfraCredentialDTO",
+ "title": "DeepInfraCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDeepSeekCredentialDTO",
+ "title": "DeepSeekCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateElevenLabsCredentialDTO",
+ "title": "ElevenLabsCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGcpCredentialDTO",
+ "title": "GcpCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGladiaCredentialDTO",
+ "title": "GladiaCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCredentialDTO",
+ "title": "GhlCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCredentialDTO",
+ "title": "GoogleCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGroqCredentialDTO",
+ "title": "GroqCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateHumeCredentialDTO",
+ "title": "HumeCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateInflectionAICredentialDTO",
+ "title": "InflectionAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateLangfuseCredentialDTO",
+ "title": "LangfuseCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateLmntCredentialDTO",
+ "title": "LmntCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMakeCredentialDTO",
+ "title": "MakeCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMistralCredentialDTO",
+ "title": "MistralCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateNeuphonicCredentialDTO",
+ "title": "NeuphonicCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateOpenAICredentialDTO",
+ "title": "OpenAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateOpenRouterCredentialDTO",
+ "title": "OpenRouterCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreatePerplexityAICredentialDTO",
+ "title": "PerplexityAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreatePlayHTCredentialDTO",
+ "title": "PlayHTCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateRimeAICredentialDTO",
+ "title": "RimeAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateRunpodCredentialDTO",
+ "title": "RunpodCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateS3CredentialDTO",
+ "title": "S3Credential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSmallestAICredentialDTO",
+ "title": "SmallestAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
+ "title": "SpeechmaticsCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSonioxCredentialDTO",
+ "title": "SonioxCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSupabaseCredentialDTO",
+ "title": "SupabaseCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTavusCredentialDTO",
+ "title": "TavusCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTogetherAICredentialDTO",
+ "title": "TogetherAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTrieveCredentialDTO",
+ "title": "TrieveCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTwilioCredentialDTO",
+ "title": "TwilioCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVonageCredentialDTO",
+ "title": "VonageCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateWebhookCredentialDTO",
+ "title": "WebhookCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCustomCredentialDTO",
+ "title": "CustomCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateXAiCredentialDTO",
+ "title": "XAiCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
+ "title": "GoogleCalendarOAuth2ClientCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
+ "title": "GoogleCalendarOAuth2AuthorizationCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
+ "title": "GoogleSheetsOAuth2AuthorizationCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
+ "title": "SlackOAuth2AuthorizationCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
+ "title": "GoHighLevelMCPCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateInworldCredentialDTO",
+ "title": "InworldCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMinimaxCredentialDTO",
+ "title": "MinimaxCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateWellSaidCredentialDTO",
+ "title": "WellSaidCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateEmailCredentialDTO",
+ "title": "EmailCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackWebhookCredentialDTO",
+ "title": "SlackWebhookCredential"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "provider",
+ "mapping": {
+ "11labs": "#/components/schemas/CreateElevenLabsCredentialDTO",
+ "anthropic": "#/components/schemas/CreateAnthropicCredentialDTO",
+ "anthropic-bedrock": "#/components/schemas/CreateAnthropicBedrockCredentialDTO",
+ "anyscale": "#/components/schemas/CreateAnyscaleCredentialDTO",
+ "assembly-ai": "#/components/schemas/CreateAssemblyAICredentialDTO",
+ "azure-openai": "#/components/schemas/CreateAzureOpenAICredentialDTO",
+ "azure": "#/components/schemas/CreateAzureCredentialDTO",
+ "byo-sip-trunk": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
+ "cartesia": "#/components/schemas/CreateCartesiaCredentialDTO",
+ "cerebras": "#/components/schemas/CreateCerebrasCredentialDTO",
+ "cloudflare": "#/components/schemas/CreateCloudflareCredentialDTO",
+ "custom-llm": "#/components/schemas/CreateCustomLLMCredentialDTO",
+ "deepgram": "#/components/schemas/CreateDeepgramCredentialDTO",
+ "deepinfra": "#/components/schemas/CreateDeepInfraCredentialDTO",
+ "deep-seek": "#/components/schemas/CreateDeepSeekCredentialDTO",
+ "gcp": "#/components/schemas/CreateGcpCredentialDTO",
+ "gladia": "#/components/schemas/CreateGladiaCredentialDTO",
+ "gohighlevel": "#/components/schemas/CreateGoHighLevelCredentialDTO",
+ "google": "#/components/schemas/CreateGoogleCredentialDTO",
+ "groq": "#/components/schemas/CreateGroqCredentialDTO",
+ "inflection-ai": "#/components/schemas/CreateInflectionAICredentialDTO",
+ "langfuse": "#/components/schemas/CreateLangfuseCredentialDTO",
+ "lmnt": "#/components/schemas/CreateLmntCredentialDTO",
+ "make": "#/components/schemas/CreateMakeCredentialDTO",
+ "openai": "#/components/schemas/CreateOpenAICredentialDTO",
+ "openrouter": "#/components/schemas/CreateOpenRouterCredentialDTO",
+ "perplexity-ai": "#/components/schemas/CreatePerplexityAICredentialDTO",
+ "playht": "#/components/schemas/CreatePlayHTCredentialDTO",
+ "rime-ai": "#/components/schemas/CreateRimeAICredentialDTO",
+ "runpod": "#/components/schemas/CreateRunpodCredentialDTO",
+ "s3": "#/components/schemas/CreateS3CredentialDTO",
+ "supabase": "#/components/schemas/CreateSupabaseCredentialDTO",
+ "smallest-ai": "#/components/schemas/CreateSmallestAICredentialDTO",
+ "tavus": "#/components/schemas/CreateTavusCredentialDTO",
+ "together-ai": "#/components/schemas/CreateTogetherAICredentialDTO",
+ "twilio": "#/components/schemas/CreateTwilioCredentialDTO",
+ "vonage": "#/components/schemas/CreateVonageCredentialDTO",
+ "webhook": "#/components/schemas/CreateWebhookCredentialDTO",
+ "custom-credential": "#/components/schemas/CreateCustomCredentialDTO",
+ "xai": "#/components/schemas/CreateXAiCredentialDTO",
+ "neuphonic": "#/components/schemas/CreateNeuphonicCredentialDTO",
+ "hume": "#/components/schemas/CreateHumeCredentialDTO",
+ "mistral": "#/components/schemas/CreateMistralCredentialDTO",
+ "speechmatics": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
+ "soniox": "#/components/schemas/CreateSonioxCredentialDTO",
+ "trieve": "#/components/schemas/CreateTrieveCredentialDTO",
+ "google.calendar.oauth2-client": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
+ "google.calendar.oauth2-authorization": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
+ "google.sheets.oauth2-authorization": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
+ "slack.oauth2-authorization": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
+ "ghl.oauth2-authorization": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
+ "inworld": "#/components/schemas/CreateInworldCredentialDTO",
+ "minimax": "#/components/schemas/CreateMinimaxCredentialDTO",
+ "wellsaid": "#/components/schemas/CreateWellSaidCredentialDTO",
+ "email": "#/components/schemas/CreateEmailCredentialDTO",
+ "slack-webhook": "#/components/schemas/CreateSlackWebhookCredentialDTO"
+ }
+ }
+ }
+ },
+ "hooks": {
+ "type": "array",
+ "description": "This is a set of actions that will be performed on certain events.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CallHookCallEnding",
+ "title": "CallHookCallEnding"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookAssistantSpeechInterrupted",
+ "title": "CallHookAssistantSpeechInterrupted"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookCustomerSpeechInterrupted",
+ "title": "CallHookCustomerSpeechInterrupted"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookCustomerSpeechTimeout",
+ "title": "CallHookCustomerSpeechTimeout"
+ },
+ {
+ "$ref": "#/components/schemas/SessionCreatedHook",
+ "title": "SessionCreatedHook"
+ }
+ ]
+ }
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the assistant.\n\nThis is required when you want to transfer between assistants in a call.",
+ "maxLength": 40
+ },
+ "voicemailMessage": {
+ "type": "string",
+ "description": "This is the message that the assistant will say if the call is forwarded to voicemail.\n\nIf unspecified, it will hang up.",
+ "maxLength": 1000
+ },
+ "endCallMessage": {
+ "type": "string",
+ "description": "This is the message that the assistant will say if it ends the call.\n\nIf unspecified, it will hang up without saying anything.",
+ "maxLength": 1000
+ },
+ "endCallPhrases": {
+ "description": "This list contains phrases that, if spoken by the assistant, will trigger the call to be hung up. Case insensitive.",
+ "type": "array",
+ "items": {
+ "type": "string",
+ "maxLength": 140,
+ "minLength": 2
+ }
+ },
+ "compliancePlan": {
+ "$ref": "#/components/schemas/CompliancePlan"
+ },
+ "metadata": {
+ "type": "object",
+ "description": "This is for metadata you want to store on the assistant."
+ },
+ "backgroundSpeechDenoisingPlan": {
+ "description": "This enables filtering of noise and background speech while the user is talking.\n\nFeatures:\n- Smart denoising using Krisp\n- Fourier denoising\n\nSmart denoising can be combined with or used independently of Fourier denoising.\n\nOrder of precedence:\n- Smart denoising\n- Fourier denoising",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/BackgroundSpeechDenoisingPlan"
+ }
+ ]
+ },
+ "analysisPlan": {
+ "description": "This is the plan for analysis of assistant's calls. Stored in `call.analysis`.",
+ "deprecated": true,
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AnalysisPlan"
+ }
+ ]
+ },
+ "artifactPlan": {
+ "description": "This is the plan for artifacts generated during assistant's calls. Stored in `call.artifact`.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ArtifactPlan"
+ }
+ ]
+ },
+ "startSpeakingPlan": {
+ "description": "This is the plan for when the assistant should start talking.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to start talking after the customer is done speaking.\n- The assistant is too fast to start talking after the customer is done speaking.\n- The assistant is so fast that it's actually interrupting the customer.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/StartSpeakingPlan"
+ }
+ ]
+ },
+ "stopSpeakingPlan": {
+ "description": "This is the plan for when assistant should stop talking on customer interruption.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to recognize customer's interruption.\n- The assistant is too fast to recognize customer's interruption.\n- The assistant is getting interrupted by phrases that are just acknowledgments.\n- The assistant is getting interrupted by background noises.\n- The assistant is not properly stopping -- it starts talking right after getting interrupted.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/StopSpeakingPlan"
+ }
+ ]
+ },
+ "monitorPlan": {
+ "description": "This is the plan for real-time monitoring of the assistant's calls.\n\nUsage:\n- To enable live listening of the assistant's calls, set `monitorPlan.listenEnabled` to `true`.\n- To enable live control of the assistant's calls, set `monitorPlan.controlEnabled` to `true`.\n- To attach monitors to the assistant, set `monitorPlan.monitorIds` to the set of monitor ids.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/MonitorPlan"
+ }
+ ]
+ },
+ "credentialIds": {
+ "description": "These are the credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can provide a subset using this.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "server": {
+ "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server.url\n2. phoneNumber.serverUrl\n3. org.serverUrl",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
+ },
+ "keypadInputPlan": {
+ "$ref": "#/components/schemas/KeypadInputPlan"
+ }
+ }
+ },
+ "Squad": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the squad."
+ },
+ "members": {
+ "description": "This is the list of assistants that make up the squad.\n\nThe call will start with the first assistant in the list.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/SquadMemberDTO"
+ }
+ },
+ "membersOverrides": {
+ "description": "This can be used to override all the assistants' settings and provide values for their template variables.\n\nBoth `membersOverrides` and `members[n].assistantOverrides` can be used together. First, `members[n].assistantOverrides` is applied. Then, `membersOverrides` is applied as a global override.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AssistantOverrides"
+ }
+ ]
+ },
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the squad."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the org that this squad belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the squad was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the squad was last updated."
+ }
+ },
+ "required": [
+ "members",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "UpdateSquadDTO": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the squad."
+ },
+ "members": {
+ "description": "This is the list of assistants that make up the squad.\n\nThe call will start with the first assistant in the list.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/SquadMemberDTO"
+ }
+ },
+ "membersOverrides": {
+ "description": "This can be used to override all the assistants' settings and provide values for their template variables.\n\nBoth `membersOverrides` and `members[n].assistantOverrides` can be used together. First, `members[n].assistantOverrides` is applied. Then, `membersOverrides` is applied as a global override.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AssistantOverrides"
+ }
+ ]
+ }
+ },
+ "required": [
+ "members"
+ ]
+ },
+ "Workflow": {
+ "type": "object",
+ "properties": {
+ "nodes": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ConversationNode",
+ "title": "ConversationNode"
+ },
+ {
+ "$ref": "#/components/schemas/ToolNode",
+ "title": "ToolNode"
+ }
+ ]
+ }
+ },
+ "model": {
+ "description": "This is the model for the workflow.\n\nThis can be overridden at node level using `nodes[n].model`.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/WorkflowOpenAIModel",
+ "title": "WorkflowOpenAIModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowAnthropicModel",
+ "title": "WorkflowAnthropicModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowAnthropicBedrockModel",
+ "title": "WorkflowAnthropicBedrockModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowGoogleModel",
+ "title": "WorkflowGoogleModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowCustomModel",
+ "title": "WorkflowCustomModel"
+ }
+ ]
+ },
+ "transcriber": {
+ "description": "This is the transcriber for the workflow.\n\nThis can be overridden at node level using `nodes[n].transcriber`.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AssemblyAITranscriber",
+ "title": "AssemblyAITranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/AzureSpeechTranscriber",
+ "title": "AzureSpeechTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/CustomTranscriber",
+ "title": "CustomTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/DeepgramTranscriber",
+ "title": "DeepgramTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/ElevenLabsTranscriber",
+ "title": "ElevenLabsTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/GladiaTranscriber",
+ "title": "GladiaTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/GoogleTranscriber",
+ "title": "GoogleTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/SpeechmaticsTranscriber",
+ "title": "SpeechmaticsTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/TalkscriberTranscriber",
+ "title": "TalkscriberTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAITranscriber",
+ "title": "OpenAITranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/CartesiaTranscriber",
+ "title": "CartesiaTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/SonioxTranscriber",
+ "title": "SonioxTranscriber"
+ }
+ ]
+ },
+ "voice": {
+ "description": "This is the voice for the workflow.\n\nThis can be overridden at node level using `nodes[n].voice`.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AzureVoice",
+ "title": "AzureVoice"
+ },
+ {
+ "$ref": "#/components/schemas/CartesiaVoice",
+ "title": "CartesiaVoice"
+ },
+ {
+ "$ref": "#/components/schemas/CustomVoice",
+ "title": "CustomVoice"
+ },
+ {
+ "$ref": "#/components/schemas/DeepgramVoice",
+ "title": "DeepgramVoice"
+ },
+ {
+ "$ref": "#/components/schemas/ElevenLabsVoice",
+ "title": "ElevenLabsVoice"
+ },
+ {
+ "$ref": "#/components/schemas/HumeVoice",
+ "title": "HumeVoice"
+ },
+ {
+ "$ref": "#/components/schemas/LMNTVoice",
+ "title": "LMNTVoice"
+ },
+ {
+ "$ref": "#/components/schemas/NeuphonicVoice",
+ "title": "NeuphonicVoice"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIVoice",
+ "title": "OpenAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/PlayHTVoice",
+ "title": "PlayHTVoice"
+ },
+ {
+ "$ref": "#/components/schemas/WellSaidVoice",
+ "title": "WellSaidVoice"
+ },
+ {
+ "$ref": "#/components/schemas/RimeAIVoice",
+ "title": "RimeAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/SmallestAIVoice",
+ "title": "SmallestAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/TavusVoice",
+ "title": "TavusVoice"
+ },
+ {
+ "$ref": "#/components/schemas/VapiVoice",
+ "title": "VapiVoice"
+ },
+ {
+ "$ref": "#/components/schemas/SesameVoice",
+ "title": "SesameVoice"
+ },
+ {
+ "$ref": "#/components/schemas/InworldVoice",
+ "title": "InworldVoice"
+ },
+ {
+ "$ref": "#/components/schemas/MinimaxVoice",
+ "title": "MinimaxVoice"
+ }
+ ]
+ },
+ "observabilityPlan": {
+ "description": "This is the plan for observability of workflow's calls.\n\nCurrently, only Langfuse is supported.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/LangfuseObservabilityPlan",
+ "title": "Langfuse"
+ }
+ ],
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/LangfuseObservabilityPlan"
+ }
+ ]
+ },
+ "backgroundSound": {
+ "description": "This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'.\nYou can also provide a custom sound by providing a URL to an audio file.",
+ "oneOf": [
+ {
+ "type": "enum",
+ "enum": [
+ "off",
+ "office"
+ ],
+ "example": "office"
+ },
+ {
+ "type": "string",
+ "format": "uri",
+ "example": "https://www.soundjay.com/ambient/sounds/people-in-lounge-1.mp3"
+ }
+ ]
+ },
+ "hooks": {
+ "type": "array",
+ "description": "This is a set of actions that will be performed on certain events.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CallHookCallEnding",
+ "title": "CallHookCallEnding"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookAssistantSpeechInterrupted",
+ "title": "CallHookAssistantSpeechInterrupted"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookCustomerSpeechInterrupted",
+ "title": "CallHookCustomerSpeechInterrupted"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookCustomerSpeechTimeout",
+ "title": "CallHookCustomerSpeechTimeout"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookModelResponseTimeout",
+ "title": "CallHookModelResponseTimeout"
+ }
+ ]
+ }
+ },
+ "credentials": {
+ "type": "array",
+ "description": "These are dynamic credentials that will be used for the workflow calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateAnthropicCredentialDTO",
+ "title": "AnthropicCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAnthropicBedrockCredentialDTO",
+ "title": "AnthropicBedrockCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAnyscaleCredentialDTO",
+ "title": "AnyscaleCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAssemblyAICredentialDTO",
+ "title": "AssemblyAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAzureCredentialDTO",
+ "title": "AzureCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAzureOpenAICredentialDTO",
+ "title": "AzureOpenAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
+ "title": "ByoSipTrunkCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCartesiaCredentialDTO",
+ "title": "CartesiaCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCerebrasCredentialDTO",
+ "title": "CerebrasCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCloudflareCredentialDTO",
+ "title": "CloudflareCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCustomLLMCredentialDTO",
+ "title": "CustomLLMCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDeepgramCredentialDTO",
+ "title": "DeepgramCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDeepInfraCredentialDTO",
+ "title": "DeepInfraCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDeepSeekCredentialDTO",
+ "title": "DeepSeekCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateElevenLabsCredentialDTO",
+ "title": "ElevenLabsCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGcpCredentialDTO",
+ "title": "GcpCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGladiaCredentialDTO",
+ "title": "GladiaCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCredentialDTO",
+ "title": "GhlCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCredentialDTO",
+ "title": "GoogleCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGroqCredentialDTO",
+ "title": "GroqCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateHumeCredentialDTO",
+ "title": "HumeCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateInflectionAICredentialDTO",
+ "title": "InflectionAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateLangfuseCredentialDTO",
+ "title": "LangfuseCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateLmntCredentialDTO",
+ "title": "LmntCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMakeCredentialDTO",
+ "title": "MakeCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMistralCredentialDTO",
+ "title": "MistralCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateNeuphonicCredentialDTO",
+ "title": "NeuphonicCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateOpenAICredentialDTO",
+ "title": "OpenAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateOpenRouterCredentialDTO",
+ "title": "OpenRouterCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreatePerplexityAICredentialDTO",
+ "title": "PerplexityAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreatePlayHTCredentialDTO",
+ "title": "PlayHTCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateRimeAICredentialDTO",
+ "title": "RimeAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateRunpodCredentialDTO",
+ "title": "RunpodCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateS3CredentialDTO",
+ "title": "S3Credential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSmallestAICredentialDTO",
+ "title": "SmallestAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
+ "title": "SpeechmaticsCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSonioxCredentialDTO",
+ "title": "SonioxCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSupabaseCredentialDTO",
+ "title": "SupabaseCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTavusCredentialDTO",
+ "title": "TavusCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTogetherAICredentialDTO",
+ "title": "TogetherAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTrieveCredentialDTO",
+ "title": "TrieveCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTwilioCredentialDTO",
+ "title": "TwilioCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVonageCredentialDTO",
+ "title": "VonageCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateWebhookCredentialDTO",
+ "title": "WebhookCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCustomCredentialDTO",
+ "title": "CustomCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateXAiCredentialDTO",
+ "title": "XAiCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
+ "title": "GoogleCalendarOAuth2ClientCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
+ "title": "GoogleCalendarOAuth2AuthorizationCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
+ "title": "GoogleSheetsOAuth2AuthorizationCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
+ "title": "SlackOAuth2AuthorizationCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
+ "title": "GoHighLevelMCPCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateInworldCredentialDTO",
+ "title": "InworldCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMinimaxCredentialDTO",
+ "title": "MinimaxCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateWellSaidCredentialDTO",
+ "title": "WellSaidCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateEmailCredentialDTO",
+ "title": "EmailCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackWebhookCredentialDTO",
+ "title": "SlackWebhookCredential"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "provider",
+ "mapping": {
+ "11labs": "#/components/schemas/CreateElevenLabsCredentialDTO",
+ "anthropic": "#/components/schemas/CreateAnthropicCredentialDTO",
+ "anthropic-bedrock": "#/components/schemas/CreateAnthropicBedrockCredentialDTO",
+ "anyscale": "#/components/schemas/CreateAnyscaleCredentialDTO",
+ "assembly-ai": "#/components/schemas/CreateAssemblyAICredentialDTO",
+ "azure-openai": "#/components/schemas/CreateAzureOpenAICredentialDTO",
+ "azure": "#/components/schemas/CreateAzureCredentialDTO",
+ "byo-sip-trunk": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
+ "cartesia": "#/components/schemas/CreateCartesiaCredentialDTO",
+ "cerebras": "#/components/schemas/CreateCerebrasCredentialDTO",
+ "cloudflare": "#/components/schemas/CreateCloudflareCredentialDTO",
+ "custom-llm": "#/components/schemas/CreateCustomLLMCredentialDTO",
+ "deepgram": "#/components/schemas/CreateDeepgramCredentialDTO",
+ "deepinfra": "#/components/schemas/CreateDeepInfraCredentialDTO",
+ "deep-seek": "#/components/schemas/CreateDeepSeekCredentialDTO",
+ "gcp": "#/components/schemas/CreateGcpCredentialDTO",
+ "gladia": "#/components/schemas/CreateGladiaCredentialDTO",
+ "gohighlevel": "#/components/schemas/CreateGoHighLevelCredentialDTO",
+ "google": "#/components/schemas/CreateGoogleCredentialDTO",
+ "groq": "#/components/schemas/CreateGroqCredentialDTO",
+ "inflection-ai": "#/components/schemas/CreateInflectionAICredentialDTO",
+ "langfuse": "#/components/schemas/CreateLangfuseCredentialDTO",
+ "lmnt": "#/components/schemas/CreateLmntCredentialDTO",
+ "make": "#/components/schemas/CreateMakeCredentialDTO",
+ "openai": "#/components/schemas/CreateOpenAICredentialDTO",
+ "openrouter": "#/components/schemas/CreateOpenRouterCredentialDTO",
+ "perplexity-ai": "#/components/schemas/CreatePerplexityAICredentialDTO",
+ "playht": "#/components/schemas/CreatePlayHTCredentialDTO",
+ "rime-ai": "#/components/schemas/CreateRimeAICredentialDTO",
+ "runpod": "#/components/schemas/CreateRunpodCredentialDTO",
+ "s3": "#/components/schemas/CreateS3CredentialDTO",
+ "supabase": "#/components/schemas/CreateSupabaseCredentialDTO",
+ "smallest-ai": "#/components/schemas/CreateSmallestAICredentialDTO",
+ "tavus": "#/components/schemas/CreateTavusCredentialDTO",
+ "together-ai": "#/components/schemas/CreateTogetherAICredentialDTO",
+ "twilio": "#/components/schemas/CreateTwilioCredentialDTO",
+ "vonage": "#/components/schemas/CreateVonageCredentialDTO",
+ "webhook": "#/components/schemas/CreateWebhookCredentialDTO",
+ "custom-credential": "#/components/schemas/CreateCustomCredentialDTO",
+ "xai": "#/components/schemas/CreateXAiCredentialDTO",
+ "neuphonic": "#/components/schemas/CreateNeuphonicCredentialDTO",
+ "hume": "#/components/schemas/CreateHumeCredentialDTO",
+ "mistral": "#/components/schemas/CreateMistralCredentialDTO",
+ "speechmatics": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
+ "soniox": "#/components/schemas/CreateSonioxCredentialDTO",
+ "trieve": "#/components/schemas/CreateTrieveCredentialDTO",
+ "google.calendar.oauth2-client": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
+ "google.calendar.oauth2-authorization": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
+ "google.sheets.oauth2-authorization": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
+ "slack.oauth2-authorization": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
+ "ghl.oauth2-authorization": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
+ "inworld": "#/components/schemas/CreateInworldCredentialDTO",
+ "minimax": "#/components/schemas/CreateMinimaxCredentialDTO",
+ "wellsaid": "#/components/schemas/CreateWellSaidCredentialDTO",
+ "email": "#/components/schemas/CreateEmailCredentialDTO",
+ "slack-webhook": "#/components/schemas/CreateSlackWebhookCredentialDTO"
+ }
+ }
+ }
+ },
+ "voicemailDetection": {
+ "description": "This is the voicemail detection plan for the workflow.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "off"
+ ]
+ },
+ {
+ "$ref": "#/components/schemas/GoogleVoicemailDetectionPlan",
+ "title": "Google"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIVoicemailDetectionPlan",
+ "title": "OpenAI"
+ },
+ {
+ "$ref": "#/components/schemas/TwilioVoicemailDetectionPlan",
+ "title": "Twilio"
+ },
+ {
+ "$ref": "#/components/schemas/VapiVoicemailDetectionPlan",
+ "title": "Vapi"
+ }
+ ]
+ },
+ "maxDurationSeconds": {
+ "type": "number",
+ "description": "This is the maximum duration of the call in seconds.\n\nAfter this duration, the call will automatically end.\n\nDefault is 1800 (30 minutes), max is 43200 (12 hours), and min is 10 seconds.",
+ "minimum": 10,
+ "maximum": 43200,
+ "example": 600
+ },
+ "id": {
+ "type": "string"
+ },
+ "orgId": {
+ "type": "string"
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string"
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string"
+ },
+ "name": {
+ "type": "string",
+ "maxLength": 80
+ },
+ "edges": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/Edge"
+ }
+ },
+ "globalPrompt": {
+ "type": "string",
+ "maxLength": 5000
+ },
+ "server": {
+ "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. tool.server\n2. workflow.server / assistant.server\n3. phoneNumber.server\n4. org.server",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
+ },
+ "compliancePlan": {
+ "description": "This is the compliance plan for the workflow. It allows you to configure HIPAA and other compliance settings.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CompliancePlan"
+ }
+ ]
+ },
+ "analysisPlan": {
+ "description": "This is the plan for analysis of workflow's calls. Stored in `call.analysis`.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AnalysisPlan"
+ }
+ ]
+ },
+ "artifactPlan": {
+ "description": "This is the plan for artifacts generated during workflow's calls. Stored in `call.artifact`.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ArtifactPlan"
+ }
+ ]
+ },
+ "startSpeakingPlan": {
+ "description": "This is the plan for when the workflow nodes should start talking.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to start talking after the customer is done speaking.\n- The assistant is too fast to start talking after the customer is done speaking.\n- The assistant is so fast that it's actually interrupting the customer.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/StartSpeakingPlan"
+ }
+ ]
+ },
+ "stopSpeakingPlan": {
+ "description": "This is the plan for when workflow nodes should stop talking on customer interruption.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to recognize customer's interruption.\n- The assistant is too fast to recognize customer's interruption.\n- The assistant is getting interrupted by phrases that are just acknowledgments.\n- The assistant is getting interrupted by background noises.\n- The assistant is not properly stopping -- it starts talking right after getting interrupted.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/StopSpeakingPlan"
+ }
+ ]
+ },
+ "monitorPlan": {
+ "description": "This is the plan for real-time monitoring of the workflow's calls.\n\nUsage:\n- To enable live listening of the workflow's calls, set `monitorPlan.listenEnabled` to `true`.\n- To enable live control of the workflow's calls, set `monitorPlan.controlEnabled` to `true`.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/MonitorPlan"
+ }
+ ]
+ },
+ "backgroundSpeechDenoisingPlan": {
+ "description": "This enables filtering of noise and background speech while the user is talking.\n\nFeatures:\n- Smart denoising using Krisp\n- Fourier denoising\n\nBoth can be used together. Order of precedence:\n- Smart denoising\n- Fourier denoising",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/BackgroundSpeechDenoisingPlan"
+ }
+ ]
+ },
+ "credentialIds": {
+ "description": "These are the credentials that will be used for the workflow calls. By default, all the credentials are available for use in the call but you can provide a subset using this.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "keypadInputPlan": {
+ "description": "This is the plan for keypad input handling during workflow calls.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/KeypadInputPlan"
+ }
+ ]
+ },
+ "voicemailMessage": {
+ "type": "string",
+ "description": "This is the message that the assistant will say if the call is forwarded to voicemail.\n\nIf unspecified, it will hang up.",
+ "maxLength": 1000
+ }
+ },
+ "required": [
+ "nodes",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt",
+ "name",
+ "edges"
+ ]
+ },
+ "CreateWorkflowDTO": {
+ "type": "object",
+ "properties": {
+ "nodes": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ConversationNode",
+ "title": "ConversationNode"
+ },
+ {
+ "$ref": "#/components/schemas/ToolNode",
+ "title": "ToolNode"
+ }
+ ]
+ }
+ },
+ "model": {
+ "description": "This is the model for the workflow.\n\nThis can be overridden at node level using `nodes[n].model`.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/WorkflowOpenAIModel",
+ "title": "WorkflowOpenAIModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowAnthropicModel",
+ "title": "WorkflowAnthropicModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowAnthropicBedrockModel",
+ "title": "WorkflowAnthropicBedrockModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowGoogleModel",
+ "title": "WorkflowGoogleModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowCustomModel",
+ "title": "WorkflowCustomModel"
+ }
+ ]
+ },
+ "transcriber": {
+ "description": "This is the transcriber for the workflow.\n\nThis can be overridden at node level using `nodes[n].transcriber`.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AssemblyAITranscriber",
+ "title": "AssemblyAITranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/AzureSpeechTranscriber",
+ "title": "AzureSpeechTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/CustomTranscriber",
+ "title": "CustomTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/DeepgramTranscriber",
+ "title": "DeepgramTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/ElevenLabsTranscriber",
+ "title": "ElevenLabsTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/GladiaTranscriber",
+ "title": "GladiaTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/GoogleTranscriber",
+ "title": "GoogleTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/SpeechmaticsTranscriber",
+ "title": "SpeechmaticsTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/TalkscriberTranscriber",
+ "title": "TalkscriberTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAITranscriber",
+ "title": "OpenAITranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/CartesiaTranscriber",
+ "title": "CartesiaTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/SonioxTranscriber",
+ "title": "SonioxTranscriber"
+ }
+ ]
+ },
+ "voice": {
+ "description": "This is the voice for the workflow.\n\nThis can be overridden at node level using `nodes[n].voice`.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AzureVoice",
+ "title": "AzureVoice"
+ },
+ {
+ "$ref": "#/components/schemas/CartesiaVoice",
+ "title": "CartesiaVoice"
+ },
+ {
+ "$ref": "#/components/schemas/CustomVoice",
+ "title": "CustomVoice"
+ },
+ {
+ "$ref": "#/components/schemas/DeepgramVoice",
+ "title": "DeepgramVoice"
+ },
+ {
+ "$ref": "#/components/schemas/ElevenLabsVoice",
+ "title": "ElevenLabsVoice"
+ },
+ {
+ "$ref": "#/components/schemas/HumeVoice",
+ "title": "HumeVoice"
+ },
+ {
+ "$ref": "#/components/schemas/LMNTVoice",
+ "title": "LMNTVoice"
+ },
+ {
+ "$ref": "#/components/schemas/NeuphonicVoice",
+ "title": "NeuphonicVoice"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIVoice",
+ "title": "OpenAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/PlayHTVoice",
+ "title": "PlayHTVoice"
+ },
+ {
+ "$ref": "#/components/schemas/WellSaidVoice",
+ "title": "WellSaidVoice"
+ },
+ {
+ "$ref": "#/components/schemas/RimeAIVoice",
+ "title": "RimeAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/SmallestAIVoice",
+ "title": "SmallestAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/TavusVoice",
+ "title": "TavusVoice"
+ },
+ {
+ "$ref": "#/components/schemas/VapiVoice",
+ "title": "VapiVoice"
+ },
+ {
+ "$ref": "#/components/schemas/SesameVoice",
+ "title": "SesameVoice"
+ },
+ {
+ "$ref": "#/components/schemas/InworldVoice",
+ "title": "InworldVoice"
+ },
+ {
+ "$ref": "#/components/schemas/MinimaxVoice",
+ "title": "MinimaxVoice"
+ }
+ ]
+ },
+ "observabilityPlan": {
+ "description": "This is the plan for observability of workflow's calls.\n\nCurrently, only Langfuse is supported.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/LangfuseObservabilityPlan",
+ "title": "Langfuse"
+ }
+ ],
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/LangfuseObservabilityPlan"
+ }
+ ]
+ },
+ "backgroundSound": {
+ "description": "This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'.\nYou can also provide a custom sound by providing a URL to an audio file.",
+ "oneOf": [
+ {
+ "type": "enum",
+ "enum": [
+ "off",
+ "office"
+ ],
+ "example": "office"
+ },
+ {
+ "type": "string",
+ "format": "uri",
+ "example": "https://www.soundjay.com/ambient/sounds/people-in-lounge-1.mp3"
+ }
+ ]
+ },
+ "hooks": {
+ "type": "array",
+ "description": "This is a set of actions that will be performed on certain events.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CallHookCallEnding",
+ "title": "CallHookCallEnding"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookAssistantSpeechInterrupted",
+ "title": "CallHookAssistantSpeechInterrupted"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookCustomerSpeechInterrupted",
+ "title": "CallHookCustomerSpeechInterrupted"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookCustomerSpeechTimeout",
+ "title": "CallHookCustomerSpeechTimeout"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookModelResponseTimeout",
+ "title": "CallHookModelResponseTimeout"
+ }
+ ]
+ }
+ },
+ "credentials": {
+ "type": "array",
+ "description": "These are dynamic credentials that will be used for the workflow calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateAnthropicCredentialDTO",
+ "title": "AnthropicCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAnthropicBedrockCredentialDTO",
+ "title": "AnthropicBedrockCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAnyscaleCredentialDTO",
+ "title": "AnyscaleCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAssemblyAICredentialDTO",
+ "title": "AssemblyAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAzureCredentialDTO",
+ "title": "AzureCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAzureOpenAICredentialDTO",
+ "title": "AzureOpenAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
+ "title": "ByoSipTrunkCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCartesiaCredentialDTO",
+ "title": "CartesiaCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCerebrasCredentialDTO",
+ "title": "CerebrasCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCloudflareCredentialDTO",
+ "title": "CloudflareCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCustomLLMCredentialDTO",
+ "title": "CustomLLMCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDeepgramCredentialDTO",
+ "title": "DeepgramCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDeepInfraCredentialDTO",
+ "title": "DeepInfraCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDeepSeekCredentialDTO",
+ "title": "DeepSeekCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateElevenLabsCredentialDTO",
+ "title": "ElevenLabsCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGcpCredentialDTO",
+ "title": "GcpCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGladiaCredentialDTO",
+ "title": "GladiaCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCredentialDTO",
+ "title": "GhlCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCredentialDTO",
+ "title": "GoogleCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGroqCredentialDTO",
+ "title": "GroqCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateHumeCredentialDTO",
+ "title": "HumeCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateInflectionAICredentialDTO",
+ "title": "InflectionAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateLangfuseCredentialDTO",
+ "title": "LangfuseCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateLmntCredentialDTO",
+ "title": "LmntCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMakeCredentialDTO",
+ "title": "MakeCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMistralCredentialDTO",
+ "title": "MistralCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateNeuphonicCredentialDTO",
+ "title": "NeuphonicCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateOpenAICredentialDTO",
+ "title": "OpenAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateOpenRouterCredentialDTO",
+ "title": "OpenRouterCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreatePerplexityAICredentialDTO",
+ "title": "PerplexityAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreatePlayHTCredentialDTO",
+ "title": "PlayHTCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateRimeAICredentialDTO",
+ "title": "RimeAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateRunpodCredentialDTO",
+ "title": "RunpodCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateS3CredentialDTO",
+ "title": "S3Credential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSmallestAICredentialDTO",
+ "title": "SmallestAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
+ "title": "SpeechmaticsCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSonioxCredentialDTO",
+ "title": "SonioxCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSupabaseCredentialDTO",
+ "title": "SupabaseCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTavusCredentialDTO",
+ "title": "TavusCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTogetherAICredentialDTO",
+ "title": "TogetherAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTrieveCredentialDTO",
+ "title": "TrieveCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTwilioCredentialDTO",
+ "title": "TwilioCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVonageCredentialDTO",
+ "title": "VonageCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateWebhookCredentialDTO",
+ "title": "WebhookCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCustomCredentialDTO",
+ "title": "CustomCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateXAiCredentialDTO",
+ "title": "XAiCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
+ "title": "GoogleCalendarOAuth2ClientCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
+ "title": "GoogleCalendarOAuth2AuthorizationCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
+ "title": "GoogleSheetsOAuth2AuthorizationCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
+ "title": "SlackOAuth2AuthorizationCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
+ "title": "GoHighLevelMCPCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateInworldCredentialDTO",
+ "title": "InworldCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMinimaxCredentialDTO",
+ "title": "MinimaxCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateWellSaidCredentialDTO",
+ "title": "WellSaidCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateEmailCredentialDTO",
+ "title": "EmailCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackWebhookCredentialDTO",
+ "title": "SlackWebhookCredential"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "provider",
+ "mapping": {
+ "11labs": "#/components/schemas/CreateElevenLabsCredentialDTO",
+ "anthropic": "#/components/schemas/CreateAnthropicCredentialDTO",
+ "anthropic-bedrock": "#/components/schemas/CreateAnthropicBedrockCredentialDTO",
+ "anyscale": "#/components/schemas/CreateAnyscaleCredentialDTO",
+ "assembly-ai": "#/components/schemas/CreateAssemblyAICredentialDTO",
+ "azure-openai": "#/components/schemas/CreateAzureOpenAICredentialDTO",
+ "azure": "#/components/schemas/CreateAzureCredentialDTO",
+ "byo-sip-trunk": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
+ "cartesia": "#/components/schemas/CreateCartesiaCredentialDTO",
+ "cerebras": "#/components/schemas/CreateCerebrasCredentialDTO",
+ "cloudflare": "#/components/schemas/CreateCloudflareCredentialDTO",
+ "custom-llm": "#/components/schemas/CreateCustomLLMCredentialDTO",
+ "deepgram": "#/components/schemas/CreateDeepgramCredentialDTO",
+ "deepinfra": "#/components/schemas/CreateDeepInfraCredentialDTO",
+ "deep-seek": "#/components/schemas/CreateDeepSeekCredentialDTO",
+ "gcp": "#/components/schemas/CreateGcpCredentialDTO",
+ "gladia": "#/components/schemas/CreateGladiaCredentialDTO",
+ "gohighlevel": "#/components/schemas/CreateGoHighLevelCredentialDTO",
+ "google": "#/components/schemas/CreateGoogleCredentialDTO",
+ "groq": "#/components/schemas/CreateGroqCredentialDTO",
+ "inflection-ai": "#/components/schemas/CreateInflectionAICredentialDTO",
+ "langfuse": "#/components/schemas/CreateLangfuseCredentialDTO",
+ "lmnt": "#/components/schemas/CreateLmntCredentialDTO",
+ "make": "#/components/schemas/CreateMakeCredentialDTO",
+ "openai": "#/components/schemas/CreateOpenAICredentialDTO",
+ "openrouter": "#/components/schemas/CreateOpenRouterCredentialDTO",
+ "perplexity-ai": "#/components/schemas/CreatePerplexityAICredentialDTO",
+ "playht": "#/components/schemas/CreatePlayHTCredentialDTO",
+ "rime-ai": "#/components/schemas/CreateRimeAICredentialDTO",
+ "runpod": "#/components/schemas/CreateRunpodCredentialDTO",
+ "s3": "#/components/schemas/CreateS3CredentialDTO",
+ "supabase": "#/components/schemas/CreateSupabaseCredentialDTO",
+ "smallest-ai": "#/components/schemas/CreateSmallestAICredentialDTO",
+ "tavus": "#/components/schemas/CreateTavusCredentialDTO",
+ "together-ai": "#/components/schemas/CreateTogetherAICredentialDTO",
+ "twilio": "#/components/schemas/CreateTwilioCredentialDTO",
+ "vonage": "#/components/schemas/CreateVonageCredentialDTO",
+ "webhook": "#/components/schemas/CreateWebhookCredentialDTO",
+ "custom-credential": "#/components/schemas/CreateCustomCredentialDTO",
+ "xai": "#/components/schemas/CreateXAiCredentialDTO",
+ "neuphonic": "#/components/schemas/CreateNeuphonicCredentialDTO",
+ "hume": "#/components/schemas/CreateHumeCredentialDTO",
+ "mistral": "#/components/schemas/CreateMistralCredentialDTO",
+ "speechmatics": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
+ "soniox": "#/components/schemas/CreateSonioxCredentialDTO",
+ "trieve": "#/components/schemas/CreateTrieveCredentialDTO",
+ "google.calendar.oauth2-client": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
+ "google.calendar.oauth2-authorization": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
+ "google.sheets.oauth2-authorization": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
+ "slack.oauth2-authorization": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
+ "ghl.oauth2-authorization": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
+ "inworld": "#/components/schemas/CreateInworldCredentialDTO",
+ "minimax": "#/components/schemas/CreateMinimaxCredentialDTO",
+ "wellsaid": "#/components/schemas/CreateWellSaidCredentialDTO",
+ "email": "#/components/schemas/CreateEmailCredentialDTO",
+ "slack-webhook": "#/components/schemas/CreateSlackWebhookCredentialDTO"
+ }
+ }
+ }
+ },
+ "voicemailDetection": {
+ "description": "This is the voicemail detection plan for the workflow.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "off"
+ ]
+ },
+ {
+ "$ref": "#/components/schemas/GoogleVoicemailDetectionPlan",
+ "title": "Google"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIVoicemailDetectionPlan",
+ "title": "OpenAI"
+ },
+ {
+ "$ref": "#/components/schemas/TwilioVoicemailDetectionPlan",
+ "title": "Twilio"
+ },
+ {
+ "$ref": "#/components/schemas/VapiVoicemailDetectionPlan",
+ "title": "Vapi"
+ }
+ ]
+ },
+ "maxDurationSeconds": {
+ "type": "number",
+ "description": "This is the maximum duration of the call in seconds.\n\nAfter this duration, the call will automatically end.\n\nDefault is 1800 (30 minutes), max is 43200 (12 hours), and min is 10 seconds.",
+ "minimum": 10,
+ "maximum": 43200,
+ "example": 600
+ },
+ "name": {
+ "type": "string",
+ "maxLength": 80
+ },
+ "edges": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/Edge"
+ }
+ },
+ "globalPrompt": {
+ "type": "string",
+ "maxLength": 5000
+ },
+ "server": {
+ "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. tool.server\n2. workflow.server / assistant.server\n3. phoneNumber.server\n4. org.server",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
+ },
+ "compliancePlan": {
+ "description": "This is the compliance plan for the workflow. It allows you to configure HIPAA and other compliance settings.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CompliancePlan"
+ }
+ ]
+ },
+ "analysisPlan": {
+ "description": "This is the plan for analysis of workflow's calls. Stored in `call.analysis`.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AnalysisPlan"
+ }
+ ]
+ },
+ "artifactPlan": {
+ "description": "This is the plan for artifacts generated during workflow's calls. Stored in `call.artifact`.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ArtifactPlan"
+ }
+ ]
+ },
+ "startSpeakingPlan": {
+ "description": "This is the plan for when the workflow nodes should start talking.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to start talking after the customer is done speaking.\n- The assistant is too fast to start talking after the customer is done speaking.\n- The assistant is so fast that it's actually interrupting the customer.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/StartSpeakingPlan"
+ }
+ ]
+ },
+ "stopSpeakingPlan": {
+ "description": "This is the plan for when workflow nodes should stop talking on customer interruption.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to recognize customer's interruption.\n- The assistant is too fast to recognize customer's interruption.\n- The assistant is getting interrupted by phrases that are just acknowledgments.\n- The assistant is getting interrupted by background noises.\n- The assistant is not properly stopping -- it starts talking right after getting interrupted.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/StopSpeakingPlan"
+ }
+ ]
+ },
+ "monitorPlan": {
+ "description": "This is the plan for real-time monitoring of the workflow's calls.\n\nUsage:\n- To enable live listening of the workflow's calls, set `monitorPlan.listenEnabled` to `true`.\n- To enable live control of the workflow's calls, set `monitorPlan.controlEnabled` to `true`.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/MonitorPlan"
+ }
+ ]
+ },
+ "backgroundSpeechDenoisingPlan": {
+ "description": "This enables filtering of noise and background speech while the user is talking.\n\nFeatures:\n- Smart denoising using Krisp\n- Fourier denoising\n\nBoth can be used together. Order of precedence:\n- Smart denoising\n- Fourier denoising",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/BackgroundSpeechDenoisingPlan"
+ }
+ ]
+ },
+ "credentialIds": {
+ "description": "These are the credentials that will be used for the workflow calls. By default, all the credentials are available for use in the call but you can provide a subset using this.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "keypadInputPlan": {
+ "description": "This is the plan for keypad input handling during workflow calls.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/KeypadInputPlan"
+ }
+ ]
+ },
+ "voicemailMessage": {
+ "type": "string",
+ "description": "This is the message that the assistant will say if the call is forwarded to voicemail.\n\nIf unspecified, it will hang up.",
+ "maxLength": 1000
+ }
+ },
+ "required": [
+ "nodes",
+ "name",
+ "edges"
+ ]
+ },
+ "UpdateWorkflowDTO": {
+ "type": "object",
+ "properties": {
+ "nodes": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ConversationNode",
+ "title": "ConversationNode"
+ },
+ {
+ "$ref": "#/components/schemas/ToolNode",
+ "title": "ToolNode"
+ }
+ ]
+ }
+ },
+ "model": {
+ "description": "This is the model for the workflow.\n\nThis can be overridden at node level using `nodes[n].model`.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/WorkflowOpenAIModel",
+ "title": "WorkflowOpenAIModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowAnthropicModel",
+ "title": "WorkflowAnthropicModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowAnthropicBedrockModel",
+ "title": "WorkflowAnthropicBedrockModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowGoogleModel",
+ "title": "WorkflowGoogleModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowCustomModel",
+ "title": "WorkflowCustomModel"
+ }
+ ]
+ },
+ "transcriber": {
+ "description": "This is the transcriber for the workflow.\n\nThis can be overridden at node level using `nodes[n].transcriber`.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AssemblyAITranscriber",
+ "title": "AssemblyAITranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/AzureSpeechTranscriber",
+ "title": "AzureSpeechTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/CustomTranscriber",
+ "title": "CustomTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/DeepgramTranscriber",
+ "title": "DeepgramTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/ElevenLabsTranscriber",
+ "title": "ElevenLabsTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/GladiaTranscriber",
+ "title": "GladiaTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/GoogleTranscriber",
+ "title": "GoogleTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/SpeechmaticsTranscriber",
+ "title": "SpeechmaticsTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/TalkscriberTranscriber",
+ "title": "TalkscriberTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAITranscriber",
+ "title": "OpenAITranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/CartesiaTranscriber",
+ "title": "CartesiaTranscriber"
+ },
+ {
+ "$ref": "#/components/schemas/SonioxTranscriber",
+ "title": "SonioxTranscriber"
+ }
+ ]
+ },
+ "voice": {
+ "description": "This is the voice for the workflow.\n\nThis can be overridden at node level using `nodes[n].voice`.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AzureVoice",
+ "title": "AzureVoice"
+ },
+ {
+ "$ref": "#/components/schemas/CartesiaVoice",
+ "title": "CartesiaVoice"
+ },
+ {
+ "$ref": "#/components/schemas/CustomVoice",
+ "title": "CustomVoice"
+ },
+ {
+ "$ref": "#/components/schemas/DeepgramVoice",
+ "title": "DeepgramVoice"
+ },
+ {
+ "$ref": "#/components/schemas/ElevenLabsVoice",
+ "title": "ElevenLabsVoice"
+ },
+ {
+ "$ref": "#/components/schemas/HumeVoice",
+ "title": "HumeVoice"
+ },
+ {
+ "$ref": "#/components/schemas/LMNTVoice",
+ "title": "LMNTVoice"
+ },
+ {
+ "$ref": "#/components/schemas/NeuphonicVoice",
+ "title": "NeuphonicVoice"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIVoice",
+ "title": "OpenAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/PlayHTVoice",
+ "title": "PlayHTVoice"
+ },
+ {
+ "$ref": "#/components/schemas/WellSaidVoice",
+ "title": "WellSaidVoice"
+ },
+ {
+ "$ref": "#/components/schemas/RimeAIVoice",
+ "title": "RimeAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/SmallestAIVoice",
+ "title": "SmallestAIVoice"
+ },
+ {
+ "$ref": "#/components/schemas/TavusVoice",
+ "title": "TavusVoice"
+ },
+ {
+ "$ref": "#/components/schemas/VapiVoice",
+ "title": "VapiVoice"
+ },
+ {
+ "$ref": "#/components/schemas/SesameVoice",
+ "title": "SesameVoice"
+ },
+ {
+ "$ref": "#/components/schemas/InworldVoice",
+ "title": "InworldVoice"
+ },
+ {
+ "$ref": "#/components/schemas/MinimaxVoice",
+ "title": "MinimaxVoice"
+ }
+ ]
+ },
+ "observabilityPlan": {
+ "description": "This is the plan for observability of workflow's calls.\n\nCurrently, only Langfuse is supported.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/LangfuseObservabilityPlan",
+ "title": "Langfuse"
+ }
+ ],
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/LangfuseObservabilityPlan"
+ }
+ ]
+ },
+ "backgroundSound": {
+ "description": "This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'.\nYou can also provide a custom sound by providing a URL to an audio file.",
+ "oneOf": [
+ {
+ "type": "enum",
+ "enum": [
+ "off",
+ "office"
+ ],
+ "example": "office"
+ },
+ {
+ "type": "string",
+ "format": "uri",
+ "example": "https://www.soundjay.com/ambient/sounds/people-in-lounge-1.mp3"
+ }
+ ]
+ },
+ "hooks": {
+ "type": "array",
+ "description": "This is a set of actions that will be performed on certain events.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CallHookCallEnding",
+ "title": "CallHookCallEnding"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookAssistantSpeechInterrupted",
+ "title": "CallHookAssistantSpeechInterrupted"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookCustomerSpeechInterrupted",
+ "title": "CallHookCustomerSpeechInterrupted"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookCustomerSpeechTimeout",
+ "title": "CallHookCustomerSpeechTimeout"
+ },
+ {
+ "$ref": "#/components/schemas/CallHookModelResponseTimeout",
+ "title": "CallHookModelResponseTimeout"
+ }
+ ]
+ }
+ },
+ "credentials": {
+ "type": "array",
+ "description": "These are dynamic credentials that will be used for the workflow calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateAnthropicCredentialDTO",
+ "title": "AnthropicCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAnthropicBedrockCredentialDTO",
+ "title": "AnthropicBedrockCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAnyscaleCredentialDTO",
+ "title": "AnyscaleCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAssemblyAICredentialDTO",
+ "title": "AssemblyAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAzureCredentialDTO",
+ "title": "AzureCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateAzureOpenAICredentialDTO",
+ "title": "AzureOpenAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
+ "title": "ByoSipTrunkCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCartesiaCredentialDTO",
+ "title": "CartesiaCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCerebrasCredentialDTO",
+ "title": "CerebrasCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCloudflareCredentialDTO",
+ "title": "CloudflareCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCustomLLMCredentialDTO",
+ "title": "CustomLLMCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDeepgramCredentialDTO",
+ "title": "DeepgramCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDeepInfraCredentialDTO",
+ "title": "DeepInfraCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateDeepSeekCredentialDTO",
+ "title": "DeepSeekCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateElevenLabsCredentialDTO",
+ "title": "ElevenLabsCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGcpCredentialDTO",
+ "title": "GcpCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGladiaCredentialDTO",
+ "title": "GladiaCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelCredentialDTO",
+ "title": "GhlCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCredentialDTO",
+ "title": "GoogleCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGroqCredentialDTO",
+ "title": "GroqCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateHumeCredentialDTO",
+ "title": "HumeCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateInflectionAICredentialDTO",
+ "title": "InflectionAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateLangfuseCredentialDTO",
+ "title": "LangfuseCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateLmntCredentialDTO",
+ "title": "LmntCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMakeCredentialDTO",
+ "title": "MakeCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMistralCredentialDTO",
+ "title": "MistralCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateNeuphonicCredentialDTO",
+ "title": "NeuphonicCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateOpenAICredentialDTO",
+ "title": "OpenAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateOpenRouterCredentialDTO",
+ "title": "OpenRouterCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreatePerplexityAICredentialDTO",
+ "title": "PerplexityAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreatePlayHTCredentialDTO",
+ "title": "PlayHTCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateRimeAICredentialDTO",
+ "title": "RimeAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateRunpodCredentialDTO",
+ "title": "RunpodCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateS3CredentialDTO",
+ "title": "S3Credential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSmallestAICredentialDTO",
+ "title": "SmallestAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
+ "title": "SpeechmaticsCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSonioxCredentialDTO",
+ "title": "SonioxCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSupabaseCredentialDTO",
+ "title": "SupabaseCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTavusCredentialDTO",
+ "title": "TavusCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTogetherAICredentialDTO",
+ "title": "TogetherAICredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTrieveCredentialDTO",
+ "title": "TrieveCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTwilioCredentialDTO",
+ "title": "TwilioCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVonageCredentialDTO",
+ "title": "VonageCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateWebhookCredentialDTO",
+ "title": "WebhookCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateCustomCredentialDTO",
+ "title": "CustomCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateXAiCredentialDTO",
+ "title": "XAiCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
+ "title": "GoogleCalendarOAuth2ClientCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
+ "title": "GoogleCalendarOAuth2AuthorizationCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
+ "title": "GoogleSheetsOAuth2AuthorizationCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
+ "title": "SlackOAuth2AuthorizationCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
+ "title": "GoHighLevelMCPCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateInworldCredentialDTO",
+ "title": "InworldCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateMinimaxCredentialDTO",
+ "title": "MinimaxCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateWellSaidCredentialDTO",
+ "title": "WellSaidCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateEmailCredentialDTO",
+ "title": "EmailCredential"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSlackWebhookCredentialDTO",
+ "title": "SlackWebhookCredential"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "provider",
+ "mapping": {
+ "11labs": "#/components/schemas/CreateElevenLabsCredentialDTO",
+ "anthropic": "#/components/schemas/CreateAnthropicCredentialDTO",
+ "anthropic-bedrock": "#/components/schemas/CreateAnthropicBedrockCredentialDTO",
+ "anyscale": "#/components/schemas/CreateAnyscaleCredentialDTO",
+ "assembly-ai": "#/components/schemas/CreateAssemblyAICredentialDTO",
+ "azure-openai": "#/components/schemas/CreateAzureOpenAICredentialDTO",
+ "azure": "#/components/schemas/CreateAzureCredentialDTO",
+ "byo-sip-trunk": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
+ "cartesia": "#/components/schemas/CreateCartesiaCredentialDTO",
+ "cerebras": "#/components/schemas/CreateCerebrasCredentialDTO",
+ "cloudflare": "#/components/schemas/CreateCloudflareCredentialDTO",
+ "custom-llm": "#/components/schemas/CreateCustomLLMCredentialDTO",
+ "deepgram": "#/components/schemas/CreateDeepgramCredentialDTO",
+ "deepinfra": "#/components/schemas/CreateDeepInfraCredentialDTO",
+ "deep-seek": "#/components/schemas/CreateDeepSeekCredentialDTO",
+ "gcp": "#/components/schemas/CreateGcpCredentialDTO",
+ "gladia": "#/components/schemas/CreateGladiaCredentialDTO",
+ "gohighlevel": "#/components/schemas/CreateGoHighLevelCredentialDTO",
+ "google": "#/components/schemas/CreateGoogleCredentialDTO",
+ "groq": "#/components/schemas/CreateGroqCredentialDTO",
+ "inflection-ai": "#/components/schemas/CreateInflectionAICredentialDTO",
+ "langfuse": "#/components/schemas/CreateLangfuseCredentialDTO",
+ "lmnt": "#/components/schemas/CreateLmntCredentialDTO",
+ "make": "#/components/schemas/CreateMakeCredentialDTO",
+ "openai": "#/components/schemas/CreateOpenAICredentialDTO",
+ "openrouter": "#/components/schemas/CreateOpenRouterCredentialDTO",
+ "perplexity-ai": "#/components/schemas/CreatePerplexityAICredentialDTO",
+ "playht": "#/components/schemas/CreatePlayHTCredentialDTO",
+ "rime-ai": "#/components/schemas/CreateRimeAICredentialDTO",
+ "runpod": "#/components/schemas/CreateRunpodCredentialDTO",
+ "s3": "#/components/schemas/CreateS3CredentialDTO",
+ "supabase": "#/components/schemas/CreateSupabaseCredentialDTO",
+ "smallest-ai": "#/components/schemas/CreateSmallestAICredentialDTO",
+ "tavus": "#/components/schemas/CreateTavusCredentialDTO",
+ "together-ai": "#/components/schemas/CreateTogetherAICredentialDTO",
+ "twilio": "#/components/schemas/CreateTwilioCredentialDTO",
+ "vonage": "#/components/schemas/CreateVonageCredentialDTO",
+ "webhook": "#/components/schemas/CreateWebhookCredentialDTO",
+ "custom-credential": "#/components/schemas/CreateCustomCredentialDTO",
+ "xai": "#/components/schemas/CreateXAiCredentialDTO",
+ "neuphonic": "#/components/schemas/CreateNeuphonicCredentialDTO",
+ "hume": "#/components/schemas/CreateHumeCredentialDTO",
+ "mistral": "#/components/schemas/CreateMistralCredentialDTO",
+ "speechmatics": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
+ "soniox": "#/components/schemas/CreateSonioxCredentialDTO",
+ "trieve": "#/components/schemas/CreateTrieveCredentialDTO",
+ "google.calendar.oauth2-client": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
+ "google.calendar.oauth2-authorization": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
+ "google.sheets.oauth2-authorization": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
+ "slack.oauth2-authorization": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
+ "ghl.oauth2-authorization": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
+ "inworld": "#/components/schemas/CreateInworldCredentialDTO",
+ "minimax": "#/components/schemas/CreateMinimaxCredentialDTO",
+ "wellsaid": "#/components/schemas/CreateWellSaidCredentialDTO",
+ "email": "#/components/schemas/CreateEmailCredentialDTO",
+ "slack-webhook": "#/components/schemas/CreateSlackWebhookCredentialDTO"
+ }
+ }
+ }
+ },
+ "voicemailDetection": {
+ "description": "This is the voicemail detection plan for the workflow.",
+ "oneOf": [
+ {
+ "type": "string",
+ "enum": [
+ "off"
+ ]
+ },
+ {
+ "$ref": "#/components/schemas/GoogleVoicemailDetectionPlan",
+ "title": "Google"
+ },
+ {
+ "$ref": "#/components/schemas/OpenAIVoicemailDetectionPlan",
+ "title": "OpenAI"
+ },
+ {
+ "$ref": "#/components/schemas/TwilioVoicemailDetectionPlan",
+ "title": "Twilio"
+ },
+ {
+ "$ref": "#/components/schemas/VapiVoicemailDetectionPlan",
+ "title": "Vapi"
+ }
+ ]
+ },
+ "maxDurationSeconds": {
+ "type": "number",
+ "description": "This is the maximum duration of the call in seconds.\n\nAfter this duration, the call will automatically end.\n\nDefault is 1800 (30 minutes), max is 43200 (12 hours), and min is 10 seconds.",
+ "minimum": 10,
+ "maximum": 43200,
+ "example": 600
+ },
+ "name": {
+ "type": "string",
+ "maxLength": 80
+ },
+ "edges": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/Edge"
+ }
+ },
+ "globalPrompt": {
+ "type": "string",
+ "maxLength": 5000
+ },
+ "server": {
+ "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. tool.server\n2. workflow.server / assistant.server\n3. phoneNumber.server\n4. org.server",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
+ },
+ "compliancePlan": {
+ "description": "This is the compliance plan for the workflow. It allows you to configure HIPAA and other compliance settings.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CompliancePlan"
+ }
+ ]
+ },
+ "analysisPlan": {
+ "description": "This is the plan for analysis of workflow's calls. Stored in `call.analysis`.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AnalysisPlan"
+ }
+ ]
+ },
+ "artifactPlan": {
+ "description": "This is the plan for artifacts generated during workflow's calls. Stored in `call.artifact`.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ArtifactPlan"
+ }
+ ]
+ },
+ "startSpeakingPlan": {
+ "description": "This is the plan for when the workflow nodes should start talking.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to start talking after the customer is done speaking.\n- The assistant is too fast to start talking after the customer is done speaking.\n- The assistant is so fast that it's actually interrupting the customer.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/StartSpeakingPlan"
+ }
+ ]
+ },
+ "stopSpeakingPlan": {
+ "description": "This is the plan for when workflow nodes should stop talking on customer interruption.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to recognize customer's interruption.\n- The assistant is too fast to recognize customer's interruption.\n- The assistant is getting interrupted by phrases that are just acknowledgments.\n- The assistant is getting interrupted by background noises.\n- The assistant is not properly stopping -- it starts talking right after getting interrupted.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/StopSpeakingPlan"
+ }
+ ]
+ },
+ "monitorPlan": {
+ "description": "This is the plan for real-time monitoring of the workflow's calls.\n\nUsage:\n- To enable live listening of the workflow's calls, set `monitorPlan.listenEnabled` to `true`.\n- To enable live control of the workflow's calls, set `monitorPlan.controlEnabled` to `true`.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/MonitorPlan"
+ }
+ ]
+ },
+ "backgroundSpeechDenoisingPlan": {
+ "description": "This enables filtering of noise and background speech while the user is talking.\n\nFeatures:\n- Smart denoising using Krisp\n- Fourier denoising\n\nBoth can be used together. Order of precedence:\n- Smart denoising\n- Fourier denoising",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/BackgroundSpeechDenoisingPlan"
+ }
+ ]
+ },
+ "credentialIds": {
+ "description": "These are the credentials that will be used for the workflow calls. By default, all the credentials are available for use in the call but you can provide a subset using this.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "keypadInputPlan": {
+ "description": "This is the plan for keypad input handling during workflow calls.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/KeypadInputPlan"
+ }
+ ]
+ },
+ "voicemailMessage": {
+ "type": "string",
+ "description": "This is the message that the assistant will say if the call is forwarded to voicemail.\n\nIf unspecified, it will hang up.",
+ "maxLength": 1000
+ }
+ }
+ },
+ "SubscriptionLimits": {
+ "type": "object",
+ "properties": {
+ "concurrencyBlocked": {
+ "type": "boolean",
+ "description": "True if this call was blocked by the Call Concurrency limit",
+ "default": false
+ },
+ "concurrencyLimit": {
+ "type": "number",
+ "description": "Account Call Concurrency limit"
+ },
+ "remainingConcurrentCalls": {
+ "type": "number",
+ "description": "Incremental number of concurrent calls that will be allowed, including this call"
+ }
+ }
+ },
+ "AnalysisCostBreakdown": {
+ "type": "object",
+ "properties": {
+ "summary": {
+ "type": "number",
+ "description": "This is the cost to summarize the call."
+ },
+ "summaryPromptTokens": {
+ "type": "number",
+ "description": "This is the number of prompt tokens used to summarize the call."
+ },
+ "summaryCompletionTokens": {
+ "type": "number",
+ "description": "This is the number of completion tokens used to summarize the call."
+ },
+ "summaryCachedPromptTokens": {
+ "type": "number",
+ "description": "This is the number of cached prompt tokens used to summarize the call."
+ },
+ "structuredData": {
+ "type": "number",
+ "description": "This is the cost to extract structured data from the call."
+ },
+ "structuredDataPromptTokens": {
+ "type": "number",
+ "description": "This is the number of prompt tokens used to extract structured data from the call."
+ },
+ "structuredDataCompletionTokens": {
+ "type": "number",
+ "description": "This is the number of completion tokens used to extract structured data from the call."
+ },
+ "structuredDataCachedPromptTokens": {
+ "type": "number",
+ "description": "This is the number of cached prompt tokens used to extract structured data from the call."
+ },
+ "successEvaluation": {
+ "type": "number",
+ "description": "This is the cost to evaluate if the call was successful."
+ },
+ "successEvaluationPromptTokens": {
+ "type": "number",
+ "description": "This is the number of prompt tokens used to evaluate if the call was successful."
+ },
+ "successEvaluationCompletionTokens": {
+ "type": "number",
+ "description": "This is the number of completion tokens used to evaluate if the call was successful."
+ },
+ "successEvaluationCachedPromptTokens": {
+ "type": "number",
+ "description": "This is the number of cached prompt tokens used to evaluate if the call was successful."
+ },
+ "structuredOutput": {
+ "type": "number",
+ "description": "This is the cost to evaluate structuredOutputs from the call."
+ },
+ "structuredOutputPromptTokens": {
+ "type": "number",
+ "description": "This is the number of prompt tokens used to evaluate structuredOutputs from the call."
+ },
+ "structuredOutputCompletionTokens": {
+ "type": "number",
+ "description": "This is the number of completion tokens used to evaluate structuredOutputs from the call."
+ },
+ "structuredOutputCachedPromptTokens": {
+ "type": "number",
+ "description": "This is the number of cached prompt tokens used to evaluate structuredOutputs from the call."
+ }
+ }
+ },
+ "CostBreakdown": {
+ "type": "object",
+ "properties": {
+ "transport": {
+ "type": "number",
+ "description": "This is the cost of the transport provider, like Twilio or Vonage."
+ },
+ "stt": {
+ "type": "number",
+ "description": "This is the cost of the speech-to-text service."
+ },
+ "llm": {
+ "type": "number",
+ "description": "This is the cost of the language model."
+ },
+ "tts": {
+ "type": "number",
+ "description": "This is the cost of the text-to-speech service."
+ },
+ "vapi": {
+ "type": "number",
+ "description": "This is the cost of Vapi."
+ },
+ "chat": {
+ "type": "number",
+ "description": "This is the cost of chat interactions."
+ },
+ "total": {
+ "type": "number",
+ "description": "This is the total cost of the call."
+ },
+ "llmPromptTokens": {
+ "type": "number",
+ "description": "This is the LLM prompt tokens used for the call."
+ },
+ "llmCompletionTokens": {
+ "type": "number",
+ "description": "This is the LLM completion tokens used for the call."
+ },
+ "llmCachedPromptTokens": {
+ "type": "number",
+ "description": "This is the LLM cached prompt tokens used for the call."
+ },
+ "ttsCharacters": {
+ "type": "number",
+ "description": "This is the TTS characters used for the call."
+ },
+ "analysisCostBreakdown": {
+ "description": "This is the cost of the analysis.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AnalysisCostBreakdown"
+ }
+ ]
+ }
+ }
+ },
+ "Analysis": {
+ "type": "object",
+ "properties": {
+ "summary": {
+ "type": "string",
+ "description": "This is the summary of the call. Customize by setting `assistant.analysisPlan.summaryPrompt`."
+ },
+ "structuredData": {
+ "type": "object",
+ "description": "This is the structured data extracted from the call. Customize by setting `assistant.analysisPlan.structuredDataPrompt` and/or `assistant.analysisPlan.structuredDataSchema`."
+ },
+ "structuredDataMulti": {
+ "description": "This is the structured data catalog of the call. Customize by setting `assistant.analysisPlan.structuredDataMultiPlan`.",
+ "type": "array",
+ "items": {
+ "type": "object"
+ }
+ },
+ "successEvaluation": {
+ "type": "string",
+ "description": "This is the evaluation of the call. Customize by setting `assistant.analysisPlan.successEvaluationPrompt` and/or `assistant.analysisPlan.successEvaluationRubric`."
+ }
+ }
+ },
+ "MonitorResult": {
+ "type": "object",
+ "properties": {
+ "monitorId": {
+ "type": "string"
+ },
+ "filterPassed": {
+ "type": "boolean"
+ }
+ },
+ "required": [
+ "monitorId",
+ "filterPassed"
+ ]
+ },
+ "Monitor": {
+ "type": "object",
+ "properties": {
+ "monitors": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/MonitorResult"
+ }
+ },
+ "listenUrl": {
+ "type": "string",
+ "description": "This is the URL where the assistant's calls can be listened to in real-time. To enable, set `assistant.monitorPlan.listenEnabled` to `true`."
+ },
+ "controlUrl": {
+ "type": "string",
+ "description": "This is the URL where the assistant's calls can be controlled in real-time. To enable, set `assistant.monitorPlan.controlEnabled` to `true`."
+ }
+ }
+ },
+ "Mono": {
+ "type": "object",
+ "properties": {
+ "combinedUrl": {
+ "type": "string",
+ "description": "This is the combined recording url for the call. To enable, set `assistant.artifactPlan.recordingEnabled`."
+ },
+ "assistantUrl": {
+ "type": "string",
+ "description": "This is the mono recording url for the assistant. To enable, set `assistant.artifactPlan.recordingEnabled`."
+ },
+ "customerUrl": {
+ "type": "string",
+ "description": "This is the mono recording url for the customer. To enable, set `assistant.artifactPlan.recordingEnabled`."
+ }
+ }
+ },
+ "Recording": {
+ "type": "object",
+ "properties": {
+ "stereoUrl": {
+ "type": "string",
+ "description": "This is the stereo recording url for the call. To enable, set `assistant.artifactPlan.recordingEnabled`."
+ },
+ "videoUrl": {
+ "type": "string",
+ "description": "This is the video recording url for the call. To enable, set `assistant.artifactPlan.videoRecordingEnabled`."
+ },
+ "videoRecordingStartDelaySeconds": {
+ "type": "number",
+ "description": "This is video recording start delay in ms. To enable, set `assistant.artifactPlan.videoRecordingEnabled`. This can be used to align the playback of the recording with artifact.messages timestamps."
+ },
+ "mono": {
+ "description": "This is the mono recording url for the call. To enable, set `assistant.artifactPlan.recordingEnabled`.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Mono"
+ }
+ ]
+ }
+ }
+ },
+ "NodeArtifact": {
+ "type": "object",
+ "properties": {
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that were spoken during the node.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/UserMessage",
+ "title": "UserMessage"
+ },
+ {
+ "$ref": "#/components/schemas/SystemMessage",
+ "title": "SystemMessage"
+ },
+ {
+ "$ref": "#/components/schemas/BotMessage",
+ "title": "BotMessage"
+ },
+ {
+ "$ref": "#/components/schemas/ToolCallMessage",
+ "title": "ToolCallMessage"
+ },
+ {
+ "$ref": "#/components/schemas/ToolCallResultMessage",
+ "title": "ToolCallResultMessage"
+ }
+ ]
+ }
+ },
+ "nodeName": {
+ "type": "string",
+ "description": "This is the node name."
+ },
+ "variableValues": {
+ "type": "object",
+ "description": "These are the variable values that were extracted from the node."
+ }
+ }
+ },
+ "AssistantActivation": {
+ "type": "object",
+ "properties": {
+ "assistantName": {
+ "type": "string",
+ "description": "This is the name of the assistant that was active during the call."
+ },
+ "assistantId": {
+ "type": "string",
+ "description": "This is the ID of the assistant that was active during the call."
+ }
+ },
+ "required": [
+ "assistantName"
+ ]
+ },
+ "TurnLatency": {
+ "type": "object",
+ "properties": {
+ "modelLatency": {
+ "type": "number",
+ "description": "This is the model latency for the first token."
+ },
+ "voiceLatency": {
+ "type": "number",
+ "description": "This is the voice latency from the model output."
+ },
+ "transcriberLatency": {
+ "type": "number",
+ "description": "This is the transcriber latency from the user speech."
+ },
+ "endpointingLatency": {
+ "type": "number",
+ "description": "This is the endpointing latency."
+ },
+ "turnLatency": {
+ "type": "number",
+ "description": "This is the latency for the whole turn."
+ }
+ }
+ },
+ "PerformanceMetrics": {
+ "type": "object",
+ "properties": {
+ "turnLatencies": {
+ "description": "These are the individual latencies for each turn.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/TurnLatency"
+ }
+ },
+ "modelLatencyAverage": {
+ "type": "number",
+ "description": "This is the average latency for the model to output the first token."
+ },
+ "voiceLatencyAverage": {
+ "type": "number",
+ "description": "This is the average latency for the text to speech."
+ },
+ "transcriberLatencyAverage": {
+ "type": "number",
+ "description": "This is the average latency for the transcriber."
+ },
+ "endpointingLatencyAverage": {
+ "type": "number",
+ "description": "This is the average latency for the endpointing."
+ },
+ "turnLatencyAverage": {
+ "type": "number",
+ "description": "This is the average latency for complete turns."
+ },
+ "fromTransportLatencyAverage": {
+ "type": "number",
+ "description": "This is the average latency for packets received from the transport provider in milliseconds."
+ },
+ "toTransportLatencyAverage": {
+ "type": "number",
+ "description": "This is the average latency for packets sent to the transport provider in milliseconds."
+ },
+ "numUserInterrupted": {
+ "type": "number",
+ "description": "This is the number of times the user was interrupted by the assistant during the call."
+ },
+ "numAssistantInterrupted": {
+ "type": "number",
+ "description": "This is the number of times the assistant was interrupted by the user during the call."
+ }
+ }
+ },
+ "Artifact": {
+ "type": "object",
+ "properties": {
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that were spoken during the call.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/UserMessage",
+ "title": "UserMessage"
+ },
+ {
+ "$ref": "#/components/schemas/SystemMessage",
+ "title": "SystemMessage"
+ },
+ {
+ "$ref": "#/components/schemas/BotMessage",
+ "title": "BotMessage"
+ },
+ {
+ "$ref": "#/components/schemas/ToolCallMessage",
+ "title": "ToolCallMessage"
+ },
+ {
+ "$ref": "#/components/schemas/ToolCallResultMessage",
+ "title": "ToolCallResultMessage"
+ }
+ ]
+ }
+ },
+ "messagesOpenAIFormatted": {
+ "description": "These are the messages that were spoken during the call, formatted for OpenAI.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/OpenAIMessage"
+ }
+ },
+ "recordingUrl": {
+ "type": "string",
+ "description": "This is the recording url for the call. To enable, set `assistant.artifactPlan.recordingEnabled`.",
+ "deprecated": true
+ },
+ "stereoRecordingUrl": {
+ "type": "string",
+ "description": "This is the stereo recording url for the call. To enable, set `assistant.artifactPlan.recordingEnabled`.",
+ "deprecated": true
+ },
+ "videoRecordingUrl": {
+ "type": "string",
+ "description": "This is video recording url for the call. To enable, set `assistant.artifactPlan.videoRecordingEnabled`.",
+ "deprecated": true
+ },
+ "videoRecordingStartDelaySeconds": {
+ "type": "number",
+ "description": "This is video recording start delay in ms. To enable, set `assistant.artifactPlan.videoRecordingEnabled`. This can be used to align the playback of the recording with artifact.messages timestamps.",
+ "deprecated": true
+ },
+ "recording": {
+ "description": "This is the recording url for the call. To enable, set `assistant.artifactPlan.recordingEnabled`.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Recording"
+ }
+ ]
+ },
+ "transcript": {
+ "type": "string",
+ "description": "This is the transcript of the call. This is derived from `artifact.messages` but provided for convenience."
+ },
+ "pcapUrl": {
+ "type": "string",
+ "description": "This is the packet capture url for the call. This is only available for `phone` type calls where phone number's provider is `vapi` or `byo-phone-number`."
+ },
+ "logUrl": {
+ "type": "string",
+ "description": "This is the url for the call logs. This includes all logging output during the call for debugging purposes."
+ },
+ "nodes": {
+ "description": "This is the history of workflow nodes that were executed during the call.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/NodeArtifact"
+ }
+ },
+ "assistantActivations": {
+ "description": "Ordered list of assistants that were active during the call, including after transfers and handoffs.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/AssistantActivation"
+ }
+ },
+ "variableValues": {
+ "type": "object",
+ "description": "These are the variable values at the end of the workflow execution."
+ },
+ "performanceMetrics": {
+ "description": "This is the performance metrics for the call. It contains the turn latency, broken down by component.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/PerformanceMetrics"
+ }
+ ]
+ },
+ "structuredOutputs": {
+ "type": "object",
+ "description": "These are the structured outputs that will be extracted from the call.\nTo enable, set `assistant.artifactPlan.structuredOutputIds` with the IDs of the structured outputs you want to extract."
+ },
+ "scorecards": {
+ "type": "object",
+ "description": "These are the scorecards that have been evaluated based on the structured outputs extracted during the call.\nTo enable, set `assistant.artifactPlan.scorecardIds` or `assistant.artifactPlan.scorecards` with the IDs or objects of the scorecards you want to evaluate."
+ },
+ "transfers": {
+ "description": "These are the transfer records from warm transfers, including destinations, transcripts, and status.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "structuredOutputsLastUpdatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is when the structured outputs were last updated"
+ }
+ }
+ },
+ "RecordingConsent": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "object",
+ "description": "This is the type of recording consent."
+ },
+ "grantedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the date and time the recording consent was granted.\nIf not specified, it means the recording consent was not granted."
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "Compliance": {
+ "type": "object",
+ "properties": {
+ "recordingConsent": {
+ "description": "This is the recording consent of the call. Configure in `assistant.compliancePlan.recordingConsentPlan`.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/RecordingConsent"
+ }
+ ]
+ }
+ }
+ },
+ "WorkflowOverrides": {
+ "type": "object",
+ "properties": {
+ "variableValues": {
+ "type": "object",
+ "description": "These are values that will be used to replace the template variables in the workflow messages and other text-based fields.\nThis uses LiquidJS syntax. https://liquidjs.com/tutorials/intro-to-liquid.html\n\nSo for example, `{{ name }}` will be replaced with the value of `name` in `variableValues`.\n`{{\"now\" | date: \"%b %d, %Y, %I:%M %p\", \"America/New_York\"}}` will be replaced with the current date and time in New York.\n Some VAPI reserved defaults:\n - *customer* - the customer object"
+ }
+ }
+ },
+ "TransferPhoneNumberHookAction": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "This is the type of action - must be \"transfer\"",
+ "enum": [
+ "transfer"
+ ]
+ },
+ "destination": {
+ "description": "This is the destination details for the transfer - can be a phone number or SIP URI",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TransferDestinationNumber",
+ "title": "NumberTransferDestination"
+ },
+ {
+ "$ref": "#/components/schemas/TransferDestinationSip",
+ "title": "SipTransferDestination"
+ }
+ ]
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "SayPhoneNumberHookAction": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "This is the type of action - must be \"say\"",
+ "enum": [
+ "say"
+ ]
+ },
+ "exact": {
+ "type": "string",
+ "description": "This is the message to say",
+ "maxLength": 4000
+ }
+ },
+ "required": [
+ "type",
+ "exact"
+ ]
+ },
+ "PhoneNumberCallRingingHookFilter": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "This is the type of filter - matches when the specified field starts with any of the given prefixes",
+ "enum": [
+ "startsWith"
+ ],
+ "maxLength": 1000
+ },
+ "key": {
+ "type": "string",
+ "description": "The field to check. Currently only \"number\" (the caller's phone number) is supported.",
+ "enum": [
+ "number"
+ ],
+ "maxLength": 1000
+ },
+ "startsWith": {
+ "type": "array",
+ "description": "Array of prefixes to match. Do not include the + prefix. Inbound calls from numbers starting with any of these prefixes will trigger the hook actions.",
+ "maxLength": 20,
+ "items": {
+ "type": "string",
+ "maxLength": 20
+ },
+ "example": [
+ "91",
+ "86",
+ "7"
+ ]
+ }
+ },
+ "required": [
+ "type",
+ "key",
+ "startsWith"
+ ]
+ },
+ "PhoneNumberHookCallRinging": {
+ "type": "object",
+ "properties": {
+ "on": {
+ "type": "string",
+ "description": "This is the event to trigger the hook on",
+ "enum": [
+ "call.ringing"
+ ],
+ "maxLength": 1000
+ },
+ "filters": {
+ "type": "array",
+ "description": "Optional filters to decide when to trigger the hook. Currently supports filtering by caller country code.",
+ "items": {
+ "$ref": "#/components/schemas/PhoneNumberCallRingingHookFilter"
+ }
+ },
+ "do": {
+ "type": "array",
+ "description": "Only the first action will be executed. Additional actions will be ignored.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TransferPhoneNumberHookAction",
+ "title": "TransferPhoneNumberHookAction"
+ },
+ {
+ "$ref": "#/components/schemas/SayPhoneNumberHookAction",
+ "title": "SayPhoneNumberHookAction"
+ }
+ ]
+ }
+ }
+ },
+ "required": [
+ "on",
+ "do"
+ ]
+ },
+ "PhoneNumberCallEndingHookFilter": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "This is the type of filter - currently only \"oneOf\" is supported",
+ "enum": [
+ "oneOf"
+ ],
+ "maxLength": 1000
+ },
+ "key": {
+ "type": "string",
+ "description": "This is the key to filter on - only \"call.endedReason\" is allowed for phone number call ending hooks",
+ "enum": [
+ "call.endedReason"
+ ],
+ "maxLength": 1000
+ },
+ "oneOf": {
+ "type": "array",
+ "description": "This is the array of assistant-request related ended reasons to match against",
+ "enum": [
+ "assistant-request-failed",
+ "assistant-request-returned-error",
+ "assistant-request-returned-unspeakable-error",
+ "assistant-request-returned-invalid-assistant",
+ "assistant-request-returned-no-assistant",
+ "assistant-request-returned-forwarding-phone-number"
+ ],
+ "items": {
+ "type": "string",
+ "enum": [
+ "assistant-request-failed",
+ "assistant-request-returned-error",
+ "assistant-request-returned-unspeakable-error",
+ "assistant-request-returned-invalid-assistant",
+ "assistant-request-returned-no-assistant",
+ "assistant-request-returned-forwarding-phone-number"
+ ]
+ }
+ }
+ },
+ "required": [
+ "type",
+ "key",
+ "oneOf"
+ ]
+ },
+ "PhoneNumberHookCallEnding": {
+ "type": "object",
+ "properties": {
+ "on": {
+ "type": "string",
+ "description": "This is the event to trigger the hook on",
+ "enum": [
+ "call.ending"
+ ],
+ "maxLength": 1000
+ },
+ "filters": {
+ "type": "array",
+ "description": "Optional filters to decide when to trigger - restricted to assistant-request related ended reasons",
+ "items": {
+ "$ref": "#/components/schemas/PhoneNumberCallEndingHookFilter"
+ }
+ },
+ "do": {
+ "description": "This is the action to perform when the hook triggers",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TransferPhoneNumberHookAction",
+ "title": "TransferPhoneNumberHookAction"
+ },
+ {
+ "$ref": "#/components/schemas/SayPhoneNumberHookAction",
+ "title": "SayPhoneNumberHookAction"
+ }
+ ]
+ }
+ },
+ "required": [
+ "on"
+ ]
+ },
+ "ImportTwilioPhoneNumberDTO": {
+ "type": "object",
+ "properties": {
+ "fallbackDestination": {
+ "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TransferDestinationNumber",
+ "title": "NumberTransferDestination"
+ },
+ {
+ "$ref": "#/components/schemas/TransferDestinationSip",
+ "title": "SipTransferDestination"
+ }
+ ]
+ },
+ "hooks": {
+ "type": "array",
+ "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
+ "title": "PhoneNumberHookCallRinging"
+ },
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
+ "title": "PhoneNumberHookCallEnding"
+ }
+ ]
+ }
+ },
+ "smsEnabled": {
+ "type": "boolean",
+ "description": "Controls whether Vapi sets the messaging webhook URL on the Twilio number during import.\n\nIf set to `false`, Vapi will not update the Twilio messaging URL, leaving it as is.\nIf `true` or omitted (default), Vapi will configure both the voice and messaging URLs.\n\n@default true",
+ "default": true
+ },
+ "twilioPhoneNumber": {
+ "type": "string",
+ "description": "These are the digits of the phone number you own on your Twilio.",
+ "deprecated": true
+ },
+ "twilioAccountSid": {
+ "type": "string",
+ "description": "This is your Twilio Account SID that will be used to handle this phone number."
+ },
+ "twilioAuthToken": {
+ "type": "string",
+ "description": "This is the Twilio Auth Token that will be used to handle this phone number."
+ },
+ "twilioApiKey": {
+ "type": "string",
+ "description": "This is the Twilio API Key that will be used to handle this phone number. If AuthToken is provided, this will be ignored."
+ },
+ "twilioApiSecret": {
+ "type": "string",
+ "description": "This is the Twilio API Secret that will be used to handle this phone number. If AuthToken is provided, this will be ignored."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the phone number. This is just for your own reference.",
+ "maxLength": 40
+ },
+ "assistantId": {
+ "type": "string",
+ "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ },
+ "workflowId": {
+ "type": "string",
+ "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ },
+ "squadId": {
+ "type": "string",
+ "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ },
+ "server": {
+ "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
+ }
+ },
+ "required": [
+ "twilioPhoneNumber",
+ "twilioAccountSid"
+ ]
+ },
+ "CreateCustomerDTO": {
+ "type": "object",
+ "properties": {
+ "numberE164CheckEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle the E164 check for the `number` field. This is an advanced property which should be used if you know your use case requires it.\n\nUse cases:\n- `false`: To allow non-E164 numbers like `+001234567890`, `1234`, or `abc`. This is useful for dialing out to non-E164 numbers on your SIP trunks.\n- `true` (default): To allow only E164 numbers like `+14155551234`. This is standard for PSTN calls.\n\nIf `false`, the `number` is still required to only contain alphanumeric characters (regex: `/^\\+?[a-zA-Z0-9]+$/`).\n\n@default true (E164 check is enabled)",
+ "default": true
+ },
+ "extension": {
+ "type": "string",
+ "description": "This is the extension that will be dialed after the call is answered.",
+ "maxLength": 10,
+ "example": null
+ },
+ "assistantOverrides": {
+ "description": "These are the overrides for the assistant's settings and template variables specific to this customer.\nThis allows customization of the assistant's behavior for individual customers in batch calls.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AssistantOverrides"
+ }
+ ]
+ },
+ "number": {
+ "type": "string",
+ "description": "This is the number of the customer.",
+ "minLength": 3,
+ "maxLength": 40
+ },
+ "sipUri": {
+ "type": "string",
+ "description": "This is the SIP URI of the customer."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the customer. This is just for your own reference.\n\nFor SIP inbound calls, this is extracted from the `From` SIP header with format `\"Display Name\" `.",
+ "maxLength": 40
+ },
+ "email": {
+ "type": "string",
+ "description": "This is the email of the customer.",
+ "maxLength": 40
+ },
+ "externalId": {
+ "type": "string",
+ "description": "This is the external ID of the customer.",
+ "maxLength": 40
+ }
+ }
+ },
+ "SchedulePlan": {
+ "type": "object",
+ "properties": {
+ "earliestAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of the earliest time the call can be scheduled."
+ },
+ "latestAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of the latest time the call can be scheduled."
+ }
+ },
+ "required": [
+ "earliestAt"
+ ]
+ },
+ "Call": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "This is the type of call.",
+ "enum": [
+ "inboundPhoneCall",
+ "outboundPhoneCall",
+ "webCall",
+ "vapi.websocketCall"
+ ]
+ },
+ "costs": {
+ "type": "array",
+ "description": "These are the costs of individual components of the call in USD.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TransportCost",
+ "title": "TransportCost"
+ },
+ {
+ "$ref": "#/components/schemas/TranscriberCost",
+ "title": "TranscriberCost"
+ },
+ {
+ "$ref": "#/components/schemas/ModelCost",
+ "title": "ModelCost"
+ },
+ {
+ "$ref": "#/components/schemas/VoiceCost",
+ "title": "VoiceCost"
+ },
+ {
+ "$ref": "#/components/schemas/VapiCost",
+ "title": "VapiCost"
+ },
+ {
+ "$ref": "#/components/schemas/VoicemailDetectionCost",
+ "title": "VoicemailDetectionCost"
+ },
+ {
+ "$ref": "#/components/schemas/AnalysisCost",
+ "title": "AnalysisCost"
+ },
+ {
+ "$ref": "#/components/schemas/KnowledgeBaseCost",
+ "title": "KnowledgeBaseCost"
+ }
+ ]
+ }
+ },
+ "messages": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/UserMessage",
+ "title": "UserMessage"
+ },
+ {
+ "$ref": "#/components/schemas/SystemMessage",
+ "title": "SystemMessage"
+ },
+ {
+ "$ref": "#/components/schemas/BotMessage",
+ "title": "BotMessage"
+ },
+ {
+ "$ref": "#/components/schemas/ToolCallMessage",
+ "title": "ToolCallMessage"
+ },
+ {
+ "$ref": "#/components/schemas/ToolCallResultMessage",
+ "title": "ToolCallResultMessage"
+ }
+ ]
+ }
+ },
+ "phoneCallProvider": {
+ "type": "string",
+ "description": "This is the provider of the call.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type.",
+ "deprecated": true,
+ "enum": [
+ "twilio",
+ "vonage",
+ "vapi",
+ "telnyx"
+ ]
+ },
+ "phoneCallTransport": {
+ "type": "string",
+ "description": "This is the transport of the phone call.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type.",
+ "enum": [
+ "sip",
+ "pstn"
+ ]
+ },
+ "status": {
+ "type": "string",
+ "description": "This is the status of the call.",
+ "enum": [
+ "scheduled",
+ "queued",
+ "ringing",
+ "in-progress",
+ "forwarding",
+ "ended",
+ "not-found",
+ "deletion-failed"
+ ]
+ },
+ "endedReason": {
+ "type": "string",
+ "description": "This is the explanation for how the call ended.",
+ "enum": [
+ "call-start-error-neither-assistant-nor-server-set",
+ "assistant-request-failed",
+ "assistant-request-returned-error",
+ "assistant-request-returned-unspeakable-error",
+ "assistant-request-returned-invalid-assistant",
+ "assistant-request-returned-no-assistant",
+ "assistant-request-returned-forwarding-phone-number",
+ "scheduled-call-deleted",
+ "call.start.error-vapifault-get-org",
+ "call.start.error-vapifault-get-subscription",
+ "call.start.error-get-assistant",
+ "call.start.error-get-phone-number",
+ "call.start.error-get-customer",
+ "call.start.error-get-resources-validation",
+ "call.start.error-vapi-number-international",
+ "call.start.error-vapi-number-outbound-daily-limit",
+ "call.start.error-get-transport",
+ "call.start.error-subscription-wallet-does-not-exist",
+ "call.start.error-fraud-check-failed",
+ "call.start.error-subscription-frozen",
+ "call.start.error-subscription-insufficient-credits",
+ "call.start.error-subscription-upgrade-failed",
+ "call.start.error-subscription-concurrency-limit-reached",
+ "call.start.error-enterprise-feature-not-available-recording-consent",
+ "assistant-not-valid",
+ "call.start.error-vapifault-database-error",
+ "assistant-not-found",
+ "pipeline-error-openai-voice-failed",
+ "pipeline-error-cartesia-voice-failed",
+ "pipeline-error-deepgram-voice-failed",
+ "pipeline-error-eleven-labs-voice-failed",
+ "pipeline-error-playht-voice-failed",
+ "pipeline-error-lmnt-voice-failed",
+ "pipeline-error-azure-voice-failed",
+ "pipeline-error-rime-ai-voice-failed",
+ "pipeline-error-smallest-ai-voice-failed",
+ "pipeline-error-vapi-voice-failed",
+ "pipeline-error-neuphonic-voice-failed",
+ "pipeline-error-hume-voice-failed",
+ "pipeline-error-sesame-voice-failed",
+ "pipeline-error-inworld-voice-failed",
+ "pipeline-error-minimax-voice-failed",
+ "pipeline-error-wellsaid-voice-failed",
+ "pipeline-error-tavus-video-failed",
+ "call.in-progress.error-vapifault-openai-voice-failed",
+ "call.in-progress.error-vapifault-cartesia-voice-failed",
+ "call.in-progress.error-vapifault-deepgram-voice-failed",
+ "call.in-progress.error-vapifault-eleven-labs-voice-failed",
+ "call.in-progress.error-vapifault-playht-voice-failed",
+ "call.in-progress.error-vapifault-lmnt-voice-failed",
+ "call.in-progress.error-vapifault-azure-voice-failed",
+ "call.in-progress.error-vapifault-rime-ai-voice-failed",
+ "call.in-progress.error-vapifault-smallest-ai-voice-failed",
+ "call.in-progress.error-vapifault-vapi-voice-failed",
+ "call.in-progress.error-vapifault-neuphonic-voice-failed",
+ "call.in-progress.error-vapifault-hume-voice-failed",
+ "call.in-progress.error-vapifault-sesame-voice-failed",
+ "call.in-progress.error-vapifault-inworld-voice-failed",
+ "call.in-progress.error-vapifault-minimax-voice-failed",
+ "call.in-progress.error-vapifault-wellsaid-voice-failed",
+ "call.in-progress.error-vapifault-tavus-video-failed",
+ "pipeline-error-vapi-llm-failed",
+ "pipeline-error-vapi-400-bad-request-validation-failed",
+ "pipeline-error-vapi-401-unauthorized",
+ "pipeline-error-vapi-403-model-access-denied",
+ "pipeline-error-vapi-429-exceeded-quota",
+ "pipeline-error-vapi-500-server-error",
+ "pipeline-error-vapi-503-server-overloaded-error",
+ "call.in-progress.error-providerfault-vapi-llm-failed",
+ "call.in-progress.error-vapifault-vapi-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-vapi-401-unauthorized",
+ "call.in-progress.error-vapifault-vapi-403-model-access-denied",
+ "call.in-progress.error-vapifault-vapi-429-exceeded-quota",
+ "call.in-progress.error-providerfault-vapi-500-server-error",
+ "call.in-progress.error-providerfault-vapi-503-server-overloaded-error",
+ "pipeline-error-deepgram-transcriber-failed",
+ "pipeline-error-deepgram-transcriber-api-key-missing",
+ "call.in-progress.error-vapifault-deepgram-transcriber-failed",
+ "pipeline-error-gladia-transcriber-failed",
+ "call.in-progress.error-vapifault-gladia-transcriber-failed",
+ "pipeline-error-speechmatics-transcriber-failed",
+ "call.in-progress.error-vapifault-speechmatics-transcriber-failed",
+ "pipeline-error-assembly-ai-transcriber-failed",
+ "pipeline-error-assembly-ai-returning-400-insufficent-funds",
+ "pipeline-error-assembly-ai-returning-400-paid-only-feature",
+ "pipeline-error-assembly-ai-returning-401-invalid-credentials",
+ "pipeline-error-assembly-ai-returning-500-invalid-schema",
+ "pipeline-error-assembly-ai-returning-500-word-boost-parsing-failed",
+ "call.in-progress.error-vapifault-assembly-ai-transcriber-failed",
+ "call.in-progress.error-vapifault-assembly-ai-returning-400-insufficent-funds",
+ "call.in-progress.error-vapifault-assembly-ai-returning-400-paid-only-feature",
+ "call.in-progress.error-vapifault-assembly-ai-returning-401-invalid-credentials",
+ "call.in-progress.error-vapifault-assembly-ai-returning-500-invalid-schema",
+ "call.in-progress.error-vapifault-assembly-ai-returning-500-word-boost-parsing-failed",
+ "pipeline-error-talkscriber-transcriber-failed",
+ "call.in-progress.error-vapifault-talkscriber-transcriber-failed",
+ "pipeline-error-azure-speech-transcriber-failed",
+ "call.in-progress.error-vapifault-azure-speech-transcriber-failed",
+ "pipeline-error-eleven-labs-transcriber-failed",
+ "call.in-progress.error-vapifault-eleven-labs-transcriber-failed",
+ "pipeline-error-google-transcriber-failed",
+ "call.in-progress.error-vapifault-google-transcriber-failed",
+ "pipeline-error-openai-transcriber-failed",
+ "call.in-progress.error-vapifault-openai-transcriber-failed",
+ "pipeline-error-soniox-transcriber-auth-failed",
+ "pipeline-error-soniox-transcriber-rate-limited",
+ "pipeline-error-soniox-transcriber-invalid-config",
+ "pipeline-error-soniox-transcriber-server-error",
+ "pipeline-error-soniox-transcriber-failed",
+ "call.in-progress.error-vapifault-soniox-transcriber-auth-failed",
+ "call.in-progress.error-vapifault-soniox-transcriber-rate-limited",
+ "call.in-progress.error-vapifault-soniox-transcriber-invalid-config",
+ "call.in-progress.error-vapifault-soniox-transcriber-server-error",
+ "call.in-progress.error-vapifault-soniox-transcriber-failed",
+ "call.in-progress.error-pipeline-no-available-llm-model",
+ "worker-shutdown",
+ "vonage-disconnected",
+ "vonage-failed-to-connect-call",
+ "vonage-completed",
+ "phone-call-provider-bypass-enabled-but-no-call-received",
+ "call.in-progress.error-providerfault-transport-never-connected",
+ "call.in-progress.error-vapifault-worker-not-available",
+ "call.in-progress.error-vapifault-transport-never-connected",
+ "call.in-progress.error-vapifault-transport-connected-but-call-not-active",
+ "call.in-progress.error-vapifault-call-started-but-connection-to-transport-missing",
+ "call.in-progress.error-vapifault-worker-died",
+ "call.in-progress.twilio-completed-call",
+ "call.in-progress.sip-completed-call",
+ "call.in-progress.error-sip-inbound-call-failed-to-connect",
+ "call.in-progress.error-providerfault-outbound-sip-503-service-unavailable",
+ "call.in-progress.error-sip-outbound-call-failed-to-connect",
+ "call.ringing.error-sip-inbound-call-failed-to-connect",
+ "call.in-progress.error-providerfault-openai-llm-failed",
+ "call.in-progress.error-providerfault-azure-openai-llm-failed",
+ "call.in-progress.error-providerfault-groq-llm-failed",
+ "call.in-progress.error-providerfault-google-llm-failed",
+ "call.in-progress.error-providerfault-xai-llm-failed",
+ "call.in-progress.error-providerfault-mistral-llm-failed",
+ "call.in-progress.error-providerfault-minimax-llm-failed",
+ "call.in-progress.error-providerfault-inflection-ai-llm-failed",
+ "call.in-progress.error-providerfault-cerebras-llm-failed",
+ "call.in-progress.error-providerfault-deep-seek-llm-failed",
+ "call.in-progress.error-providerfault-baseten-llm-failed",
+ "call.in-progress.error-vapifault-chat-pipeline-failed-to-start",
+ "pipeline-error-openai-400-bad-request-validation-failed",
+ "pipeline-error-openai-401-unauthorized",
+ "pipeline-error-openai-401-incorrect-api-key",
+ "pipeline-error-openai-401-account-not-in-organization",
+ "pipeline-error-openai-403-model-access-denied",
+ "pipeline-error-openai-429-exceeded-quota",
+ "pipeline-error-openai-429-rate-limit-reached",
+ "pipeline-error-openai-500-server-error",
+ "pipeline-error-openai-503-server-overloaded-error",
+ "pipeline-error-openai-llm-failed",
+ "call.in-progress.error-vapifault-openai-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-openai-401-unauthorized",
+ "call.in-progress.error-vapifault-openai-401-incorrect-api-key",
+ "call.in-progress.error-vapifault-openai-401-account-not-in-organization",
+ "call.in-progress.error-vapifault-openai-403-model-access-denied",
+ "call.in-progress.error-vapifault-openai-429-exceeded-quota",
+ "call.in-progress.error-vapifault-openai-429-rate-limit-reached",
+ "call.in-progress.error-providerfault-openai-500-server-error",
+ "call.in-progress.error-providerfault-openai-503-server-overloaded-error",
+ "pipeline-error-azure-openai-400-bad-request-validation-failed",
+ "pipeline-error-azure-openai-401-unauthorized",
+ "pipeline-error-azure-openai-403-model-access-denied",
+ "pipeline-error-azure-openai-429-exceeded-quota",
+ "pipeline-error-azure-openai-500-server-error",
+ "pipeline-error-azure-openai-503-server-overloaded-error",
+ "pipeline-error-azure-openai-llm-failed",
+ "call.in-progress.error-vapifault-azure-openai-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-azure-openai-401-unauthorized",
+ "call.in-progress.error-vapifault-azure-openai-403-model-access-denied",
+ "call.in-progress.error-vapifault-azure-openai-429-exceeded-quota",
+ "call.in-progress.error-providerfault-azure-openai-500-server-error",
+ "call.in-progress.error-providerfault-azure-openai-503-server-overloaded-error",
+ "pipeline-error-google-400-bad-request-validation-failed",
+ "pipeline-error-google-401-unauthorized",
+ "pipeline-error-google-403-model-access-denied",
+ "pipeline-error-google-429-exceeded-quota",
+ "pipeline-error-google-500-server-error",
+ "pipeline-error-google-503-server-overloaded-error",
+ "pipeline-error-google-llm-failed",
+ "call.in-progress.error-vapifault-google-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-google-401-unauthorized",
+ "call.in-progress.error-vapifault-google-403-model-access-denied",
+ "call.in-progress.error-vapifault-google-429-exceeded-quota",
+ "call.in-progress.error-providerfault-google-500-server-error",
+ "call.in-progress.error-providerfault-google-503-server-overloaded-error",
+ "pipeline-error-xai-400-bad-request-validation-failed",
+ "pipeline-error-xai-401-unauthorized",
+ "pipeline-error-xai-403-model-access-denied",
+ "pipeline-error-xai-429-exceeded-quota",
+ "pipeline-error-xai-500-server-error",
+ "pipeline-error-xai-503-server-overloaded-error",
+ "pipeline-error-xai-llm-failed",
+ "call.in-progress.error-vapifault-xai-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-xai-401-unauthorized",
+ "call.in-progress.error-vapifault-xai-403-model-access-denied",
+ "call.in-progress.error-vapifault-xai-429-exceeded-quota",
+ "call.in-progress.error-providerfault-xai-500-server-error",
+ "call.in-progress.error-providerfault-xai-503-server-overloaded-error",
+ "pipeline-error-baseten-400-bad-request-validation-failed",
+ "pipeline-error-baseten-401-unauthorized",
+ "pipeline-error-baseten-403-model-access-denied",
+ "pipeline-error-baseten-429-exceeded-quota",
+ "pipeline-error-baseten-500-server-error",
+ "pipeline-error-baseten-503-server-overloaded-error",
+ "pipeline-error-baseten-llm-failed",
+ "call.in-progress.error-vapifault-baseten-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-baseten-401-unauthorized",
+ "call.in-progress.error-vapifault-baseten-403-model-access-denied",
+ "call.in-progress.error-vapifault-baseten-429-exceeded-quota",
+ "call.in-progress.error-providerfault-baseten-500-server-error",
+ "call.in-progress.error-providerfault-baseten-503-server-overloaded-error",
+ "pipeline-error-mistral-400-bad-request-validation-failed",
+ "pipeline-error-mistral-401-unauthorized",
+ "pipeline-error-mistral-403-model-access-denied",
+ "pipeline-error-mistral-429-exceeded-quota",
+ "pipeline-error-mistral-500-server-error",
+ "pipeline-error-mistral-503-server-overloaded-error",
+ "pipeline-error-mistral-llm-failed",
+ "call.in-progress.error-vapifault-mistral-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-mistral-401-unauthorized",
+ "call.in-progress.error-vapifault-mistral-403-model-access-denied",
+ "call.in-progress.error-vapifault-mistral-429-exceeded-quota",
+ "call.in-progress.error-providerfault-mistral-500-server-error",
+ "call.in-progress.error-providerfault-mistral-503-server-overloaded-error",
+ "pipeline-error-minimax-400-bad-request-validation-failed",
+ "pipeline-error-minimax-401-unauthorized",
+ "pipeline-error-minimax-403-model-access-denied",
+ "pipeline-error-minimax-429-exceeded-quota",
+ "pipeline-error-minimax-500-server-error",
+ "pipeline-error-minimax-503-server-overloaded-error",
+ "pipeline-error-minimax-llm-failed",
+ "call.in-progress.error-vapifault-minimax-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-minimax-401-unauthorized",
+ "call.in-progress.error-vapifault-minimax-403-model-access-denied",
+ "call.in-progress.error-vapifault-minimax-429-exceeded-quota",
+ "call.in-progress.error-providerfault-minimax-500-server-error",
+ "call.in-progress.error-providerfault-minimax-503-server-overloaded-error",
+ "pipeline-error-inflection-ai-400-bad-request-validation-failed",
+ "pipeline-error-inflection-ai-401-unauthorized",
+ "pipeline-error-inflection-ai-403-model-access-denied",
+ "pipeline-error-inflection-ai-429-exceeded-quota",
+ "pipeline-error-inflection-ai-500-server-error",
+ "pipeline-error-inflection-ai-503-server-overloaded-error",
+ "pipeline-error-inflection-ai-llm-failed",
+ "call.in-progress.error-vapifault-inflection-ai-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-inflection-ai-401-unauthorized",
+ "call.in-progress.error-vapifault-inflection-ai-403-model-access-denied",
+ "call.in-progress.error-vapifault-inflection-ai-429-exceeded-quota",
+ "call.in-progress.error-providerfault-inflection-ai-500-server-error",
+ "call.in-progress.error-providerfault-inflection-ai-503-server-overloaded-error",
+ "pipeline-error-deep-seek-400-bad-request-validation-failed",
+ "pipeline-error-deep-seek-401-unauthorized",
+ "pipeline-error-deep-seek-403-model-access-denied",
+ "pipeline-error-deep-seek-429-exceeded-quota",
+ "pipeline-error-deep-seek-500-server-error",
+ "pipeline-error-deep-seek-503-server-overloaded-error",
+ "pipeline-error-deep-seek-llm-failed",
+ "call.in-progress.error-vapifault-deep-seek-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-deep-seek-401-unauthorized",
+ "call.in-progress.error-vapifault-deep-seek-403-model-access-denied",
+ "call.in-progress.error-vapifault-deep-seek-429-exceeded-quota",
+ "call.in-progress.error-providerfault-deep-seek-500-server-error",
+ "call.in-progress.error-providerfault-deep-seek-503-server-overloaded-error",
+ "pipeline-error-groq-400-bad-request-validation-failed",
+ "pipeline-error-groq-401-unauthorized",
+ "pipeline-error-groq-403-model-access-denied",
+ "pipeline-error-groq-429-exceeded-quota",
+ "pipeline-error-groq-500-server-error",
+ "pipeline-error-groq-503-server-overloaded-error",
+ "pipeline-error-groq-llm-failed",
+ "call.in-progress.error-vapifault-groq-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-groq-401-unauthorized",
+ "call.in-progress.error-vapifault-groq-403-model-access-denied",
+ "call.in-progress.error-vapifault-groq-429-exceeded-quota",
+ "call.in-progress.error-providerfault-groq-500-server-error",
+ "call.in-progress.error-providerfault-groq-503-server-overloaded-error",
+ "pipeline-error-cerebras-400-bad-request-validation-failed",
+ "pipeline-error-cerebras-401-unauthorized",
+ "pipeline-error-cerebras-403-model-access-denied",
+ "pipeline-error-cerebras-429-exceeded-quota",
+ "pipeline-error-cerebras-500-server-error",
+ "pipeline-error-cerebras-503-server-overloaded-error",
+ "pipeline-error-cerebras-llm-failed",
+ "call.in-progress.error-vapifault-cerebras-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-cerebras-401-unauthorized",
+ "call.in-progress.error-vapifault-cerebras-403-model-access-denied",
+ "call.in-progress.error-vapifault-cerebras-429-exceeded-quota",
+ "call.in-progress.error-providerfault-cerebras-500-server-error",
+ "call.in-progress.error-providerfault-cerebras-503-server-overloaded-error",
+ "pipeline-error-anthropic-400-bad-request-validation-failed",
+ "pipeline-error-anthropic-401-unauthorized",
+ "pipeline-error-anthropic-403-model-access-denied",
+ "pipeline-error-anthropic-429-exceeded-quota",
+ "pipeline-error-anthropic-500-server-error",
+ "pipeline-error-anthropic-503-server-overloaded-error",
+ "pipeline-error-anthropic-llm-failed",
+ "call.in-progress.error-providerfault-anthropic-llm-failed",
+ "call.in-progress.error-vapifault-anthropic-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-anthropic-401-unauthorized",
+ "call.in-progress.error-vapifault-anthropic-403-model-access-denied",
+ "call.in-progress.error-vapifault-anthropic-429-exceeded-quota",
+ "call.in-progress.error-providerfault-anthropic-500-server-error",
+ "call.in-progress.error-providerfault-anthropic-503-server-overloaded-error",
+ "pipeline-error-anthropic-bedrock-400-bad-request-validation-failed",
+ "pipeline-error-anthropic-bedrock-401-unauthorized",
+ "pipeline-error-anthropic-bedrock-403-model-access-denied",
+ "pipeline-error-anthropic-bedrock-429-exceeded-quota",
+ "pipeline-error-anthropic-bedrock-500-server-error",
+ "pipeline-error-anthropic-bedrock-503-server-overloaded-error",
+ "pipeline-error-anthropic-bedrock-llm-failed",
+ "call.in-progress.error-providerfault-anthropic-bedrock-llm-failed",
+ "call.in-progress.error-vapifault-anthropic-bedrock-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-anthropic-bedrock-401-unauthorized",
+ "call.in-progress.error-vapifault-anthropic-bedrock-403-model-access-denied",
+ "call.in-progress.error-vapifault-anthropic-bedrock-429-exceeded-quota",
+ "call.in-progress.error-providerfault-anthropic-bedrock-500-server-error",
+ "call.in-progress.error-providerfault-anthropic-bedrock-503-server-overloaded-error",
+ "pipeline-error-anthropic-vertex-400-bad-request-validation-failed",
+ "pipeline-error-anthropic-vertex-401-unauthorized",
+ "pipeline-error-anthropic-vertex-403-model-access-denied",
+ "pipeline-error-anthropic-vertex-429-exceeded-quota",
+ "pipeline-error-anthropic-vertex-500-server-error",
+ "pipeline-error-anthropic-vertex-503-server-overloaded-error",
+ "pipeline-error-anthropic-vertex-llm-failed",
+ "call.in-progress.error-providerfault-anthropic-vertex-llm-failed",
+ "call.in-progress.error-vapifault-anthropic-vertex-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-anthropic-vertex-401-unauthorized",
+ "call.in-progress.error-vapifault-anthropic-vertex-403-model-access-denied",
+ "call.in-progress.error-vapifault-anthropic-vertex-429-exceeded-quota",
+ "call.in-progress.error-providerfault-anthropic-vertex-500-server-error",
+ "call.in-progress.error-providerfault-anthropic-vertex-503-server-overloaded-error",
+ "pipeline-error-together-ai-400-bad-request-validation-failed",
+ "pipeline-error-together-ai-401-unauthorized",
+ "pipeline-error-together-ai-403-model-access-denied",
+ "pipeline-error-together-ai-429-exceeded-quota",
+ "pipeline-error-together-ai-500-server-error",
+ "pipeline-error-together-ai-503-server-overloaded-error",
+ "pipeline-error-together-ai-llm-failed",
+ "call.in-progress.error-providerfault-together-ai-llm-failed",
+ "call.in-progress.error-vapifault-together-ai-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-together-ai-401-unauthorized",
+ "call.in-progress.error-vapifault-together-ai-403-model-access-denied",
+ "call.in-progress.error-vapifault-together-ai-429-exceeded-quota",
+ "call.in-progress.error-providerfault-together-ai-500-server-error",
+ "call.in-progress.error-providerfault-together-ai-503-server-overloaded-error",
+ "pipeline-error-anyscale-400-bad-request-validation-failed",
+ "pipeline-error-anyscale-401-unauthorized",
+ "pipeline-error-anyscale-403-model-access-denied",
+ "pipeline-error-anyscale-429-exceeded-quota",
+ "pipeline-error-anyscale-500-server-error",
+ "pipeline-error-anyscale-503-server-overloaded-error",
+ "pipeline-error-anyscale-llm-failed",
+ "call.in-progress.error-providerfault-anyscale-llm-failed",
+ "call.in-progress.error-vapifault-anyscale-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-anyscale-401-unauthorized",
+ "call.in-progress.error-vapifault-anyscale-403-model-access-denied",
+ "call.in-progress.error-vapifault-anyscale-429-exceeded-quota",
+ "call.in-progress.error-providerfault-anyscale-500-server-error",
+ "call.in-progress.error-providerfault-anyscale-503-server-overloaded-error",
+ "pipeline-error-openrouter-400-bad-request-validation-failed",
+ "pipeline-error-openrouter-401-unauthorized",
+ "pipeline-error-openrouter-403-model-access-denied",
+ "pipeline-error-openrouter-429-exceeded-quota",
+ "pipeline-error-openrouter-500-server-error",
+ "pipeline-error-openrouter-503-server-overloaded-error",
+ "pipeline-error-openrouter-llm-failed",
+ "call.in-progress.error-providerfault-openrouter-llm-failed",
+ "call.in-progress.error-vapifault-openrouter-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-openrouter-401-unauthorized",
+ "call.in-progress.error-vapifault-openrouter-403-model-access-denied",
+ "call.in-progress.error-vapifault-openrouter-429-exceeded-quota",
+ "call.in-progress.error-providerfault-openrouter-500-server-error",
+ "call.in-progress.error-providerfault-openrouter-503-server-overloaded-error",
+ "pipeline-error-perplexity-ai-400-bad-request-validation-failed",
+ "pipeline-error-perplexity-ai-401-unauthorized",
+ "pipeline-error-perplexity-ai-403-model-access-denied",
+ "pipeline-error-perplexity-ai-429-exceeded-quota",
+ "pipeline-error-perplexity-ai-500-server-error",
+ "pipeline-error-perplexity-ai-503-server-overloaded-error",
+ "pipeline-error-perplexity-ai-llm-failed",
+ "call.in-progress.error-providerfault-perplexity-ai-llm-failed",
+ "call.in-progress.error-vapifault-perplexity-ai-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-perplexity-ai-401-unauthorized",
+ "call.in-progress.error-vapifault-perplexity-ai-403-model-access-denied",
+ "call.in-progress.error-vapifault-perplexity-ai-429-exceeded-quota",
+ "call.in-progress.error-providerfault-perplexity-ai-500-server-error",
+ "call.in-progress.error-providerfault-perplexity-ai-503-server-overloaded-error",
+ "pipeline-error-deepinfra-400-bad-request-validation-failed",
+ "pipeline-error-deepinfra-401-unauthorized",
+ "pipeline-error-deepinfra-403-model-access-denied",
+ "pipeline-error-deepinfra-429-exceeded-quota",
+ "pipeline-error-deepinfra-500-server-error",
+ "pipeline-error-deepinfra-503-server-overloaded-error",
+ "pipeline-error-deepinfra-llm-failed",
+ "call.in-progress.error-providerfault-deepinfra-llm-failed",
+ "call.in-progress.error-vapifault-deepinfra-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-deepinfra-401-unauthorized",
+ "call.in-progress.error-vapifault-deepinfra-403-model-access-denied",
+ "call.in-progress.error-vapifault-deepinfra-429-exceeded-quota",
+ "call.in-progress.error-providerfault-deepinfra-500-server-error",
+ "call.in-progress.error-providerfault-deepinfra-503-server-overloaded-error",
+ "pipeline-error-runpod-400-bad-request-validation-failed",
+ "pipeline-error-runpod-401-unauthorized",
+ "pipeline-error-runpod-403-model-access-denied",
+ "pipeline-error-runpod-429-exceeded-quota",
+ "pipeline-error-runpod-500-server-error",
+ "pipeline-error-runpod-503-server-overloaded-error",
+ "pipeline-error-runpod-llm-failed",
+ "call.in-progress.error-providerfault-runpod-llm-failed",
+ "call.in-progress.error-vapifault-runpod-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-runpod-401-unauthorized",
+ "call.in-progress.error-vapifault-runpod-403-model-access-denied",
+ "call.in-progress.error-vapifault-runpod-429-exceeded-quota",
+ "call.in-progress.error-providerfault-runpod-500-server-error",
+ "call.in-progress.error-providerfault-runpod-503-server-overloaded-error",
+ "pipeline-error-custom-llm-400-bad-request-validation-failed",
+ "pipeline-error-custom-llm-401-unauthorized",
+ "pipeline-error-custom-llm-403-model-access-denied",
+ "pipeline-error-custom-llm-429-exceeded-quota",
+ "pipeline-error-custom-llm-500-server-error",
+ "pipeline-error-custom-llm-503-server-overloaded-error",
+ "pipeline-error-custom-llm-llm-failed",
+ "call.in-progress.error-providerfault-custom-llm-llm-failed",
+ "call.in-progress.error-vapifault-custom-llm-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-custom-llm-401-unauthorized",
+ "call.in-progress.error-vapifault-custom-llm-403-model-access-denied",
+ "call.in-progress.error-vapifault-custom-llm-429-exceeded-quota",
+ "call.in-progress.error-providerfault-custom-llm-500-server-error",
+ "call.in-progress.error-providerfault-custom-llm-503-server-overloaded-error",
+ "call.in-progress.error-pipeline-ws-model-connection-failed",
+ "pipeline-error-custom-voice-failed",
+ "pipeline-error-cartesia-socket-hang-up",
+ "pipeline-error-cartesia-requested-payment",
+ "pipeline-error-cartesia-500-server-error",
+ "pipeline-error-cartesia-502-server-error",
+ "pipeline-error-cartesia-503-server-error",
+ "pipeline-error-cartesia-522-server-error",
+ "call.in-progress.error-vapifault-cartesia-socket-hang-up",
+ "call.in-progress.error-vapifault-cartesia-requested-payment",
+ "call.in-progress.error-providerfault-cartesia-500-server-error",
+ "call.in-progress.error-providerfault-cartesia-503-server-error",
+ "call.in-progress.error-providerfault-cartesia-522-server-error",
+ "pipeline-error-eleven-labs-voice-not-found",
+ "pipeline-error-eleven-labs-quota-exceeded",
+ "pipeline-error-eleven-labs-unauthorized-access",
+ "pipeline-error-eleven-labs-unauthorized-to-access-model",
+ "pipeline-error-eleven-labs-professional-voices-only-for-creator-plus",
+ "pipeline-error-eleven-labs-blocked-free-plan-and-requested-upgrade",
+ "pipeline-error-eleven-labs-blocked-concurrent-requests-and-requested-upgrade",
+ "pipeline-error-eleven-labs-blocked-using-instant-voice-clone-and-requested-upgrade",
+ "pipeline-error-eleven-labs-system-busy-and-requested-upgrade",
+ "pipeline-error-eleven-labs-voice-not-fine-tuned",
+ "pipeline-error-eleven-labs-invalid-api-key",
+ "pipeline-error-eleven-labs-invalid-voice-samples",
+ "pipeline-error-eleven-labs-voice-disabled-by-owner",
+ "pipeline-error-eleven-labs-vapi-voice-disabled-by-owner",
+ "pipeline-error-eleven-labs-blocked-account-in-probation",
+ "pipeline-error-eleven-labs-blocked-content-against-their-policy",
+ "pipeline-error-eleven-labs-missing-samples-for-voice-clone",
+ "pipeline-error-eleven-labs-voice-not-fine-tuned-and-cannot-be-used",
+ "pipeline-error-eleven-labs-voice-not-allowed-for-free-users",
+ "pipeline-error-eleven-labs-max-character-limit-exceeded",
+ "pipeline-error-eleven-labs-blocked-voice-potentially-against-terms-of-service-and-awaiting-verification",
+ "pipeline-error-eleven-labs-500-server-error",
+ "pipeline-error-eleven-labs-503-server-error",
+ "call.in-progress.error-vapifault-eleven-labs-voice-not-found",
+ "call.in-progress.error-vapifault-eleven-labs-quota-exceeded",
+ "call.in-progress.error-vapifault-eleven-labs-unauthorized-access",
+ "call.in-progress.error-vapifault-eleven-labs-unauthorized-to-access-model",
+ "call.in-progress.error-vapifault-eleven-labs-professional-voices-only-for-creator-plus",
+ "call.in-progress.error-vapifault-eleven-labs-blocked-free-plan-and-requested-upgrade",
+ "call.in-progress.error-vapifault-eleven-labs-blocked-concurrent-requests-and-requested-upgrade",
+ "call.in-progress.error-vapifault-eleven-labs-blocked-using-instant-voice-clone-and-requested-upgrade",
+ "call.in-progress.error-vapifault-eleven-labs-system-busy-and-requested-upgrade",
+ "call.in-progress.error-vapifault-eleven-labs-voice-not-fine-tuned",
+ "call.in-progress.error-vapifault-eleven-labs-invalid-api-key",
+ "call.in-progress.error-vapifault-eleven-labs-invalid-voice-samples",
+ "call.in-progress.error-vapifault-eleven-labs-voice-disabled-by-owner",
+ "call.in-progress.error-vapifault-eleven-labs-blocked-account-in-probation",
+ "call.in-progress.error-vapifault-eleven-labs-blocked-content-against-their-policy",
+ "call.in-progress.error-vapifault-eleven-labs-missing-samples-for-voice-clone",
+ "call.in-progress.error-vapifault-eleven-labs-voice-not-fine-tuned-and-cannot-be-used",
+ "call.in-progress.error-vapifault-eleven-labs-voice-not-allowed-for-free-users",
+ "call.in-progress.error-vapifault-eleven-labs-max-character-limit-exceeded",
+ "call.in-progress.error-vapifault-eleven-labs-blocked-voice-potentially-against-terms-of-service-and-awaiting-verification",
+ "call.in-progress.error-providerfault-eleven-labs-system-busy-and-requested-upgrade",
+ "call.in-progress.error-providerfault-eleven-labs-500-server-error",
+ "call.in-progress.error-providerfault-eleven-labs-503-server-error",
+ "pipeline-error-playht-request-timed-out",
+ "pipeline-error-playht-invalid-voice",
+ "pipeline-error-playht-unexpected-error",
+ "pipeline-error-playht-out-of-credits",
+ "pipeline-error-playht-invalid-emotion",
+ "pipeline-error-playht-voice-must-be-a-valid-voice-manifest-uri",
+ "pipeline-error-playht-401-unauthorized",
+ "pipeline-error-playht-403-forbidden-out-of-characters",
+ "pipeline-error-playht-403-forbidden-api-access-not-available",
+ "pipeline-error-playht-429-exceeded-quota",
+ "pipeline-error-playht-502-gateway-error",
+ "pipeline-error-playht-504-gateway-error",
+ "call.in-progress.error-vapifault-playht-request-timed-out",
+ "call.in-progress.error-vapifault-playht-invalid-voice",
+ "call.in-progress.error-vapifault-playht-unexpected-error",
+ "call.in-progress.error-vapifault-playht-out-of-credits",
+ "call.in-progress.error-vapifault-playht-invalid-emotion",
+ "call.in-progress.error-vapifault-playht-voice-must-be-a-valid-voice-manifest-uri",
+ "call.in-progress.error-vapifault-playht-401-unauthorized",
+ "call.in-progress.error-vapifault-playht-403-forbidden-out-of-characters",
+ "call.in-progress.error-vapifault-playht-403-forbidden-api-access-not-available",
+ "call.in-progress.error-vapifault-playht-429-exceeded-quota",
+ "call.in-progress.error-providerfault-playht-502-gateway-error",
+ "call.in-progress.error-providerfault-playht-504-gateway-error",
+ "pipeline-error-custom-transcriber-failed",
+ "call.in-progress.error-vapifault-custom-transcriber-failed",
+ "pipeline-error-eleven-labs-transcriber-failed",
+ "call.in-progress.error-vapifault-eleven-labs-transcriber-failed",
+ "pipeline-error-deepgram-returning-400-no-such-model-language-tier-combination",
+ "pipeline-error-deepgram-returning-401-invalid-credentials",
+ "pipeline-error-deepgram-returning-403-model-access-denied",
+ "pipeline-error-deepgram-returning-404-not-found",
+ "pipeline-error-deepgram-returning-500-invalid-json",
+ "pipeline-error-deepgram-returning-502-network-error",
+ "pipeline-error-deepgram-returning-502-bad-gateway-ehostunreach",
+ "pipeline-error-deepgram-returning-econnreset",
+ "call.in-progress.error-vapifault-deepgram-returning-400-no-such-model-language-tier-combination",
+ "call.in-progress.error-vapifault-deepgram-returning-401-invalid-credentials",
+ "call.in-progress.error-vapifault-deepgram-returning-404-not-found",
+ "call.in-progress.error-vapifault-deepgram-returning-403-model-access-denied",
+ "call.in-progress.error-providerfault-deepgram-returning-500-invalid-json",
+ "call.in-progress.error-providerfault-deepgram-returning-502-network-error",
+ "call.in-progress.error-providerfault-deepgram-returning-502-bad-gateway-ehostunreach",
+ "pipeline-error-google-transcriber-failed",
+ "call.in-progress.error-vapifault-google-transcriber-failed",
+ "pipeline-error-openai-transcriber-failed",
+ "call.in-progress.error-vapifault-openai-transcriber-failed",
+ "pipeline-error-soniox-transcriber-auth-failed",
+ "pipeline-error-soniox-transcriber-rate-limited",
+ "pipeline-error-soniox-transcriber-invalid-config",
+ "pipeline-error-soniox-transcriber-server-error",
+ "pipeline-error-soniox-transcriber-failed",
+ "call.in-progress.error-vapifault-soniox-transcriber-auth-failed",
+ "call.in-progress.error-vapifault-soniox-transcriber-rate-limited",
+ "call.in-progress.error-vapifault-soniox-transcriber-invalid-config",
+ "call.in-progress.error-vapifault-soniox-transcriber-server-error",
+ "call.in-progress.error-vapifault-soniox-transcriber-failed",
+ "call.in-progress.error-warm-transfer-max-duration",
+ "call.in-progress.error-warm-transfer-assistant-cancelled",
+ "call.in-progress.error-warm-transfer-silence-timeout",
+ "call.in-progress.error-warm-transfer-microphone-timeout",
+ "assistant-ended-call",
+ "assistant-said-end-call-phrase",
+ "assistant-ended-call-with-hangup-task",
+ "assistant-ended-call-after-message-spoken",
+ "assistant-forwarded-call",
+ "assistant-join-timed-out",
+ "call.in-progress.error-assistant-did-not-receive-customer-audio",
+ "call.in-progress.error-transfer-failed",
+ "customer-busy",
+ "customer-ended-call",
+ "customer-ended-call-before-warm-transfer",
+ "customer-ended-call-after-warm-transfer-attempt",
+ "customer-ended-call-during-transfer",
+ "customer-did-not-answer",
+ "customer-did-not-give-microphone-permission",
+ "exceeded-max-duration",
+ "manually-canceled",
+ "phone-call-provider-closed-websocket",
+ "call.forwarding.operator-busy",
+ "silence-timed-out",
+ "call.in-progress.error-sip-inbound-call-failed-to-connect",
+ "call.in-progress.error-providerfault-outbound-sip-403-forbidden",
+ "call.in-progress.error-providerfault-outbound-sip-407-proxy-authentication-required",
+ "call.in-progress.error-providerfault-outbound-sip-408-request-timeout",
+ "call.in-progress.error-providerfault-outbound-sip-503-service-unavailable",
+ "call.in-progress.error-providerfault-outbound-sip-480-temporarily-unavailable",
+ "call.in-progress.error-sip-outbound-call-failed-to-connect",
+ "call.ringing.hook-executed-say",
+ "call.ringing.hook-executed-transfer",
+ "call.ending.hook-executed-say",
+ "call.ending.hook-executed-transfer",
+ "call.ringing.sip-inbound-caller-hungup-before-call-connect",
+ "call.ringing.error-sip-inbound-call-failed-to-connect",
+ "twilio-failed-to-connect-call",
+ "twilio-reported-customer-misdialed",
+ "vonage-rejected",
+ "voicemail",
+ "call-deleted"
]
},
+ "endedMessage": {
+ "type": "string",
+ "description": "This is the message that adds more context to the ended reason. It can be used to provide potential error messages or warnings."
+ },
"destination": {
- "description": "This is the destination details for the transfer - can be a phone number or SIP URI",
+ "description": "This is the destination where the call ended up being transferred to. If the call was not transferred, this will be empty.",
"oneOf": [
{
"$ref": "#/components/schemas/TransferDestinationNumber",
@@ -23547,4050 +34576,2807 @@
"title": "SipTransferDestination"
}
]
- }
- },
- "required": [
- "type"
- ]
- },
- "FunctionCallHookAction": {
- "type": "object",
- "properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
},
- "type": {
+ "id": {
"type": "string",
- "enum": [
- "function"
- ],
- "description": "The type of tool. \"function\" for Function tool."
- },
- "async": {
- "type": "boolean",
- "example": false,
- "description": "This determines if the tool is async.\n\n If async, the assistant will move forward without waiting for your server to respond. This is useful if you just want to trigger something on your server.\n\n If sync, the assistant will wait for your server to respond. This is useful if want assistant to respond with the result from your server.\n\n Defaults to synchronous (`false`)."
- },
- "server": {
- "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
- "allOf": [
- {
- "$ref": "#/components/schemas/Server"
- }
- ]
- },
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
- "allOf": [
- {
- "$ref": "#/components/schemas/ToolRejectionPlan"
- }
- ]
+ "description": "This is the unique identifier for the call."
},
- "function": {
- "description": "This is the function definition of the tool.",
- "allOf": [
- {
- "$ref": "#/components/schemas/OpenAIFunction"
- }
- ]
- }
- },
- "required": [
- "type"
- ]
- },
- "SayHookAction": {
- "type": "object",
- "properties": {
- "type": {
+ "orgId": {
"type": "string",
- "description": "This is the type of action - must be \"say\"",
- "enum": [
- "say"
- ]
- },
- "prompt": {
- "description": "This is the prompt for the assistant to generate a response based on existing conversation.\nCan be a string or an array of chat messages.",
- "oneOf": [
- {
- "type": "string",
- "title": "String"
- },
- {
- "type": "array",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/SystemMessage",
- "title": "SystemMessage"
- },
- {
- "$ref": "#/components/schemas/UserMessage",
- "title": "UserMessage"
- },
- {
- "$ref": "#/components/schemas/AssistantMessage",
- "title": "AssistantMessage"
- },
- {
- "$ref": "#/components/schemas/ToolMessage",
- "title": "ToolMessage"
- },
- {
- "$ref": "#/components/schemas/DeveloperMessage",
- "title": "DeveloperMessage"
- }
- ]
- },
- "title": "MessageArray"
- }
- ],
- "examples": [
- "Ask the user if they're still in the call",
- [
- {
- "role": "system",
- "content": "You are a helpful assistant, and would like to know if the user is still in the call based on the conversation history in {{transcript}}"
- }
- ]
- ]
+ "description": "This is the unique identifier for the org that this call belongs to."
},
- "exact": {
- "type": "object",
- "description": "This is the message to say"
- }
- },
- "required": [
- "type"
- ]
- },
- "CallHookFilter": {
- "type": "object",
- "properties": {
- "type": {
+ "createdAt": {
+ "format": "date-time",
"type": "string",
- "description": "This is the type of filter - currently only \"oneOf\" is supported",
- "enum": [
- "oneOf"
- ],
- "maxLength": 1000
+ "description": "This is the ISO 8601 date-time string of when the call was created."
},
- "key": {
+ "updatedAt": {
+ "format": "date-time",
"type": "string",
- "description": "This is the key to filter on (e.g. \"call.endedReason\")",
- "maxLength": 1000
+ "description": "This is the ISO 8601 date-time string of when the call was last updated."
},
- "oneOf": {
- "description": "This is the array of possible values to match against",
- "type": "array",
- "items": {
- "type": "string",
- "maxLength": 1000
- }
- }
- },
- "required": [
- "type",
- "key",
- "oneOf"
- ]
- },
- "CallHookCallEnding": {
- "type": "object",
- "properties": {
- "on": {
+ "startedAt": {
+ "format": "date-time",
"type": "string",
- "description": "This is the event that triggers this hook",
- "enum": [
- "call.ending"
- ],
- "maxLength": 1000
- },
- "do": {
- "type": "array",
- "description": "This is the set of actions to perform when the hook triggers",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolCallHookAction",
- "title": "ToolCallHookAction"
- }
- ]
- }
+ "description": "This is the ISO 8601 date-time string of when the call was started."
},
- "filters": {
- "description": "This is the set of filters that must match for the hook to trigger",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/CallHookFilter"
- }
- }
- },
- "required": [
- "on",
- "do"
- ]
- },
- "CallHookAssistantSpeechInterrupted": {
- "type": "object",
- "properties": {
- "on": {
+ "endedAt": {
+ "format": "date-time",
"type": "string",
- "description": "This is the event that triggers this hook",
- "enum": [
- "assistant.speech.interrupted"
- ],
- "maxLength": 1000
+ "description": "This is the ISO 8601 date-time string of when the call was ended."
},
- "do": {
- "type": "array",
- "description": "This is the set of actions to perform when the hook triggers",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/SayHookAction",
- "title": "SayHookAction"
- },
- {
- "$ref": "#/components/schemas/ToolCallHookAction",
- "title": "ToolCallHookAction"
- }
- ]
- }
- }
- },
- "required": [
- "on",
- "do"
- ]
- },
- "CallHookCustomerSpeechInterrupted": {
- "type": "object",
- "properties": {
- "on": {
- "type": "string",
- "description": "This is the event that triggers this hook",
- "enum": [
- "customer.speech.interrupted"
- ],
- "maxLength": 1000
+ "cost": {
+ "type": "number",
+ "description": "This is the cost of the call in USD."
},
- "do": {
- "type": "array",
- "description": "This is the set of actions to perform when the hook triggers",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/SayHookAction",
- "title": "SayHookAction"
- },
- {
- "$ref": "#/components/schemas/ToolCallHookAction",
- "title": "ToolCallHookAction"
- }
- ]
- }
- }
- },
- "required": [
- "on",
- "do"
- ]
- },
- "ToolCallHookAction": {
- "type": "object",
- "properties": {
- "type": {
- "type": "string",
- "description": "This is the type of action - must be \"tool\"",
- "enum": [
- "tool"
+ "costBreakdown": {
+ "description": "This is the cost of the call in USD.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CostBreakdown"
+ }
]
},
- "tool": {
- "description": "This is the tool to call. To use an existing tool, send `toolId` instead.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/CreateApiRequestToolDTO",
- "title": "ApiRequestTool"
- },
- {
- "$ref": "#/components/schemas/CreateBashToolDTO",
- "title": "BashTool"
- },
- {
- "$ref": "#/components/schemas/CreateComputerToolDTO",
- "title": "ComputerTool"
- },
- {
- "$ref": "#/components/schemas/CreateDtmfToolDTO",
- "title": "DtmfTool"
- },
- {
- "$ref": "#/components/schemas/CreateEndCallToolDTO",
- "title": "EndCallTool"
- },
- {
- "$ref": "#/components/schemas/CreateFunctionToolDTO",
- "title": "FunctionTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCalendarAvailabilityToolDTO",
- "title": "GoHighLevelCalendarAvailabilityTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCalendarEventCreateToolDTO",
- "title": "GoHighLevelCalendarEventCreateTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelContactCreateToolDTO",
- "title": "GoHighLevelContactCreateTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelContactGetToolDTO",
- "title": "GoHighLevelContactGetTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarCheckAvailabilityToolDTO",
- "title": "GoogleCalendarCheckAvailabilityTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarCreateEventToolDTO",
- "title": "GoogleCalendarCreateEventTool"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleSheetsRowAppendToolDTO",
- "title": "GoogleSheetsRowAppendTool"
- },
- {
- "$ref": "#/components/schemas/CreateHandoffToolDTO",
- "title": "HandoffTool"
- },
- {
- "$ref": "#/components/schemas/CreateMcpToolDTO",
- "title": "McpTool"
- },
- {
- "$ref": "#/components/schemas/CreateQueryToolDTO",
- "title": "QueryTool"
- },
- {
- "$ref": "#/components/schemas/CreateSlackSendMessageToolDTO",
- "title": "SlackSendMessageTool"
- },
- {
- "$ref": "#/components/schemas/CreateSmsToolDTO",
- "title": "SmsTool"
- },
- {
- "$ref": "#/components/schemas/CreateTextEditorToolDTO",
- "title": "TextEditorTool"
- },
+ "artifactPlan": {
+ "description": "This is a copy of assistant artifact plan. This isn't actually stored on the call but rather just returned in POST /call/web to enable artifact creation client side.",
+ "allOf": [
{
- "$ref": "#/components/schemas/CreateTransferCallToolDTO",
- "title": "TransferCallTool"
+ "$ref": "#/components/schemas/ArtifactPlan"
}
]
},
- "toolId": {
- "type": "string",
- "description": "This is the tool to call. To use a transient tool, send `tool` instead."
- }
- },
- "required": [
- "type"
- ]
- },
- "CustomerSpeechTimeoutOptions": {
- "type": "object",
- "properties": {
- "timeoutSeconds": {
- "type": "number",
- "description": "This is the timeout in seconds before action is triggered.\nThe clock starts when the assistant finishes speaking and remains active until the user speaks.\n\n@default 7.5",
- "minimum": 1,
- "maximum": 1000
- },
- "triggerMaxCount": {
- "type": "number",
- "description": "This is the maximum number of times the hook will trigger in a call.\n\n@default 3",
- "minimum": 1,
- "maximum": 10
- },
- "triggerResetMode": {
- "type": "object",
- "description": "This is whether the counter for hook trigger resets the user speaks.\n\n@default never"
- }
- },
- "required": [
- "timeoutSeconds"
- ]
- },
- "CallHookCustomerSpeechTimeout": {
- "type": "object",
- "properties": {
- "on": {
- "type": "string",
- "description": "Must be either \"customer.speech.timeout\" or match the pattern \"customer.speech.timeout[property=value]\"",
- "maxLength": 1000
- },
- "do": {
- "type": "array",
- "description": "This is the set of actions to perform when the hook triggers",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/SayHookAction",
- "title": "SayHookAction"
- },
- {
- "$ref": "#/components/schemas/ToolCallHookAction",
- "title": "ToolCallHookAction"
- }
- ]
- }
- },
- "options": {
- "description": "This is the set of filters that must match for the hook to trigger",
+ "analysis": {
+ "description": "This is the analysis of the call. Configure in `assistant.analysisPlan`.",
"allOf": [
{
- "$ref": "#/components/schemas/CustomerSpeechTimeoutOptions"
+ "$ref": "#/components/schemas/Analysis"
}
]
},
- "name": {
- "type": "string",
- "description": "This is the name of the hook, it can be set by the user to identify the hook.\nIf no name is provided, the hook will be auto generated as UUID.\n\n@default UUID",
- "maxLength": 1000
- }
- },
- "required": [
- "on",
- "do"
- ]
- },
- "VoicemailDetectionBackoffPlan": {
- "type": "object",
- "properties": {
- "startAtSeconds": {
- "type": "number",
- "description": "This is the number of seconds to wait before starting the first retry attempt.",
- "minimum": 0,
- "default": 5
- },
- "frequencySeconds": {
- "type": "number",
- "description": "This is the interval in seconds between retry attempts.",
- "minimum": 2.5,
- "default": 5
- },
- "maxRetries": {
- "type": "number",
- "description": "This is the maximum number of retry attempts before giving up.",
- "minimum": 1,
- "maximum": 10,
- "default": 6
- }
- }
- },
- "GoogleVoicemailDetectionPlan": {
- "type": "object",
- "properties": {
- "beepMaxAwaitSeconds": {
- "type": "number",
- "description": "This is the maximum duration from the start of the call that we will wait for a voicemail beep, before speaking our message\n\n- If we detect a voicemail beep before this, we will speak the message at that point.\n\n- Setting too low a value means that the bot will start speaking its voicemail message too early. If it does so before the actual beep, it will get cut off. You should definitely tune this to your use case.\n\n@default 30\n@min 0\n@max 60",
- "minimum": 0,
- "maximum": 30,
- "default": 30
+ "monitor": {
+ "description": "This is to real-time monitor the call. Configure in `assistant.monitorPlan`.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Monitor"
+ }
+ ]
},
- "provider": {
- "type": "string",
- "description": "This is the provider to use for voicemail detection.",
- "enum": [
- "google"
+ "artifact": {
+ "description": "These are the artifacts created from the call. Configure in `assistant.artifactPlan`.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Artifact"
+ }
]
},
- "backoffPlan": {
- "description": "This is the backoff plan for the voicemail detection.",
+ "compliance": {
+ "description": "This is the compliance of the call. Configure in `assistant.compliancePlan`.",
"allOf": [
{
- "$ref": "#/components/schemas/VoicemailDetectionBackoffPlan"
+ "$ref": "#/components/schemas/Compliance"
}
]
},
- "type": {
+ "phoneCallProviderId": {
"type": "string",
- "description": "This is the detection type to use for voicemail detection.\n- 'audio': Uses native audio models (default)\n- 'transcript': Uses ASR/transcript-based detection\n@default 'audio' (audio detection)",
- "enum": [
- "audio",
- "transcript"
- ]
- }
- },
- "required": [
- "provider"
- ]
- },
- "OpenAIVoicemailDetectionPlan": {
- "type": "object",
- "properties": {
- "beepMaxAwaitSeconds": {
- "type": "number",
- "description": "This is the maximum duration from the start of the call that we will wait for a voicemail beep, before speaking our message\n\n- If we detect a voicemail beep before this, we will speak the message at that point.\n\n- Setting too low a value means that the bot will start speaking its voicemail message too early. If it does so before the actual beep, it will get cut off. You should definitely tune this to your use case.\n\n@default 30\n@min 0\n@max 60",
- "minimum": 0,
- "maximum": 30,
- "default": 30
+ "description": "The ID of the call as provided by the phone number service. callSid in Twilio. conversationUuid in Vonage. callControlId in Telnyx.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type.",
+ "deprecated": true
},
- "provider": {
+ "campaignId": {
"type": "string",
- "description": "This is the provider to use for voicemail detection.",
- "enum": [
- "openai"
- ]
+ "description": "This is the campaign ID that the call belongs to."
},
- "backoffPlan": {
- "description": "This is the backoff plan for the voicemail detection.",
+ "assistantId": {
+ "type": "string",
+ "description": "This is the assistant ID that will be used for the call. To use a transient assistant, use `assistant` instead.\n\nTo start a call with:\n- Assistant, use `assistantId` or `assistant`\n- Squad, use `squadId` or `squad`\n- Workflow, use `workflowId` or `workflow`"
+ },
+ "assistant": {
+ "description": "This is the assistant that will be used for the call. To use an existing assistant, use `assistantId` instead.\n\nTo start a call with:\n- Assistant, use `assistant`\n- Squad, use `squad`\n- Workflow, use `workflow`",
"allOf": [
{
- "$ref": "#/components/schemas/VoicemailDetectionBackoffPlan"
+ "$ref": "#/components/schemas/CreateAssistantDTO"
}
]
},
- "type": {
- "type": "string",
- "description": "This is the detection type to use for voicemail detection.\n- 'audio': Uses native audio models (default)\n- 'transcript': Uses ASR/transcript-based detection\n@default 'audio' (audio detection)",
- "enum": [
- "audio",
- "transcript"
- ]
- }
- },
- "required": [
- "provider"
- ]
- },
- "TwilioVoicemailDetectionPlan": {
- "type": "object",
- "properties": {
- "provider": {
- "type": "string",
- "description": "This is the provider to use for voicemail detection.",
- "enum": [
- "twilio"
+ "assistantOverrides": {
+ "description": "These are the overrides for the `assistant` or `assistantId`'s settings and template variables.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AssistantOverrides"
+ }
]
},
- "voicemailDetectionTypes": {
- "type": "array",
- "description": "These are the AMD messages from Twilio that are considered as voicemail. Default is ['machine_end_beep', 'machine_end_silence'].\n\n@default {Array} ['machine_end_beep', 'machine_end_silence']",
- "enum": [
- "machine_start",
- "human",
- "fax",
- "unknown",
- "machine_end_beep",
- "machine_end_silence",
- "machine_end_other"
- ],
- "example": [
- "machine_end_beep",
- "machine_end_silence"
- ],
- "items": {
- "type": "string",
- "enum": [
- "machine_start",
- "human",
- "fax",
- "unknown",
- "machine_end_beep",
- "machine_end_silence",
- "machine_end_other"
- ]
- }
- },
- "enabled": {
- "type": "boolean",
- "description": "This sets whether the assistant should detect voicemail. Defaults to true.\n\n@default true"
- },
- "machineDetectionTimeout": {
- "type": "number",
- "description": "The number of seconds that Twilio should attempt to perform answering machine detection before timing out and returning AnsweredBy as unknown. Default is 30 seconds.\n\nIncreasing this value will provide the engine more time to make a determination. This can be useful when DetectMessageEnd is provided in the MachineDetection parameter and there is an expectation of long answering machine greetings that can exceed 30 seconds.\n\nDecreasing this value will reduce the amount of time the engine has to make a determination. This can be particularly useful when the Enable option is provided in the MachineDetection parameter and you want to limit the time for initial detection.\n\nCheck the [Twilio docs](https://www.twilio.com/docs/voice/answering-machine-detection#optional-api-tuning-parameters) for more info.\n\n@default 30",
- "minimum": 3,
- "maximum": 59
- },
- "machineDetectionSpeechThreshold": {
- "type": "number",
- "description": "The number of milliseconds that is used as the measuring stick for the length of the speech activity. Durations lower than this value will be interpreted as a human, longer as a machine. Default is 2400 milliseconds.\n\nIncreasing this value will reduce the chance of a False Machine (detected machine, actually human) for a long human greeting (e.g., a business greeting) but increase the time it takes to detect a machine.\n\nDecreasing this value will reduce the chances of a False Human (detected human, actually machine) for short voicemail greetings. The value of this parameter may need to be reduced by more than 1000ms to detect very short voicemail greetings. A reduction of that significance can result in increased False Machine detections. Adjusting the MachineDetectionSpeechEndThreshold is likely the better approach for short voicemails. Decreasing MachineDetectionSpeechThreshold will also reduce the time it takes to detect a machine.\n\nCheck the [Twilio docs](https://www.twilio.com/docs/voice/answering-machine-detection#optional-api-tuning-parameters) for more info.\n\n@default 2400",
- "minimum": 1000,
- "maximum": 6000
+ "squadId": {
+ "type": "string",
+ "description": "This is the squad that will be used for the call. To use a transient squad, use `squad` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`"
},
- "machineDetectionSpeechEndThreshold": {
- "type": "number",
- "description": "The number of milliseconds of silence after speech activity at which point the speech activity is considered complete. Default is 1200 milliseconds.\n\nIncreasing this value will typically be used to better address the short voicemail greeting scenarios. For short voicemails, there is typically 1000-2000ms of audio followed by 1200-2400ms of silence and then additional audio before the beep. Increasing the MachineDetectionSpeechEndThreshold to ~2500ms will treat the 1200-2400ms of silence as a gap in the greeting but not the end of the greeting and will result in a machine detection. The downsides of such a change include:\n- Increasing the delay for human detection by the amount you increase this parameter, e.g., a change of 1200ms to 2500ms increases human detection delay by 1300ms.\n- Cases where a human has two utterances separated by a period of silence (e.g. a \"Hello\", then 2000ms of silence, and another \"Hello\") may be interpreted as a machine.\n\nDecreasing this value will result in faster human detection. The consequence is that it can lead to increased False Human (detected human, actually machine) detections because a silence gap in a voicemail greeting (not necessarily just in short voicemail scenarios) can be incorrectly interpreted as the end of speech.\n\nCheck the [Twilio docs](https://www.twilio.com/docs/voice/answering-machine-detection#optional-api-tuning-parameters) for more info.\n\n@default 1200",
- "minimum": 500,
- "maximum": 5000
+ "squad": {
+ "description": "This is a squad that will be used for the call. To use an existing squad, use `squadId` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateSquadDTO"
+ }
+ ]
},
- "machineDetectionSilenceTimeout": {
- "type": "number",
- "description": "The number of milliseconds of initial silence after which an unknown AnsweredBy result will be returned. Default is 5000 milliseconds.\n\nIncreasing this value will result in waiting for a longer period of initial silence before returning an 'unknown' AMD result.\n\nDecreasing this value will result in waiting for a shorter period of initial silence before returning an 'unknown' AMD result.\n\nCheck the [Twilio docs](https://www.twilio.com/docs/voice/answering-machine-detection#optional-api-tuning-parameters) for more info.\n\n@default 5000",
- "minimum": 2000,
- "maximum": 10000
- }
- },
- "required": [
- "provider"
- ]
- },
- "VapiVoicemailDetectionPlan": {
- "type": "object",
- "properties": {
- "beepMaxAwaitSeconds": {
- "type": "number",
- "description": "This is the maximum duration from the start of the call that we will wait for a voicemail beep, before speaking our message\n\n- If we detect a voicemail beep before this, we will speak the message at that point.\n\n- Setting too low a value means that the bot will start speaking its voicemail message too early. If it does so before the actual beep, it will get cut off. You should definitely tune this to your use case.\n\n@default 30\n@min 0\n@max 60",
- "minimum": 0,
- "maximum": 30,
- "default": 30
+ "squadOverrides": {
+ "description": "These are the overrides for the `squad` or `squadId`'s member settings and template variables.\nThis will apply to all members of the squad.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AssistantOverrides"
+ }
+ ]
},
- "provider": {
+ "workflowId": {
"type": "string",
- "description": "This is the provider to use for voicemail detection.",
- "enum": [
- "vapi"
- ]
+ "description": "This is the workflow that will be used for the call. To use a transient workflow, use `workflow` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`"
},
- "backoffPlan": {
- "description": "This is the backoff plan for the voicemail detection.",
+ "workflow": {
+ "description": "This is a workflow that will be used for the call. To use an existing workflow, use `workflowId` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`",
"allOf": [
{
- "$ref": "#/components/schemas/VoicemailDetectionBackoffPlan"
+ "$ref": "#/components/schemas/CreateWorkflowDTO"
}
]
},
- "type": {
- "type": "string",
- "description": "This is the detection type to use for voicemail detection.\n- 'audio': Uses native audio models (default)\n- 'transcript': Uses ASR/transcript-based detection\n@default 'audio' (audio detection)",
- "enum": [
- "audio",
- "transcript"
+ "workflowOverrides": {
+ "description": "These are the overrides for the `workflow` or `workflowId`'s settings and template variables.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/WorkflowOverrides"
+ }
]
- }
- },
- "required": [
- "provider"
- ]
- },
- "SQLInjectionSecurityFilter": {
- "type": "object",
- "properties": {
- "type": {
+ },
+ "phoneNumberId": {
"type": "string",
- "description": "The type of security threat to filter.",
- "enum": [
- "sql-injection"
+ "description": "This is the phone number that will be used for the call. To use a transient number, use `phoneNumber` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type."
+ },
+ "phoneNumber": {
+ "description": "This is the phone number that will be used for the call. To use an existing number, use `phoneNumberId` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ImportTwilioPhoneNumberDTO"
+ }
]
- }
- },
- "required": [
- "type"
- ]
- },
- "XSSSecurityFilter": {
- "type": "object",
- "properties": {
- "type": {
+ },
+ "customerId": {
"type": "string",
- "description": "The type of security threat to filter.",
- "enum": [
- "xss"
+ "description": "This is the customer that will be called. To call a transient customer , use `customer` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type."
+ },
+ "customer": {
+ "description": "This is the customer that will be called. To call an existing customer, use `customerId` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateCustomerDTO"
+ }
]
- }
- },
- "required": [
- "type"
- ]
- },
- "SSRFSecurityFilter": {
- "type": "object",
- "properties": {
- "type": {
+ },
+ "name": {
"type": "string",
- "description": "The type of security threat to filter.",
- "enum": [
- "ssrf"
+ "description": "This is the name of the call. This is just for your own reference.",
+ "maxLength": 40
+ },
+ "schedulePlan": {
+ "description": "This is the schedule plan of the call.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/SchedulePlan"
+ }
]
+ },
+ "transport": {
+ "type": "object",
+ "description": "This is the transport of the call."
}
},
"required": [
- "type"
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
]
},
- "RCESecurityFilter": {
+ "CallBatchError": {
"type": "object",
"properties": {
- "type": {
- "type": "string",
- "description": "The type of security threat to filter.",
- "enum": [
- "rce"
- ]
+ "customer": {
+ "$ref": "#/components/schemas/CreateCustomerDTO"
+ },
+ "error": {
+ "type": "string"
}
},
"required": [
- "type"
+ "customer",
+ "error"
]
},
- "PromptInjectionSecurityFilter": {
+ "CallBatchResponse": {
"type": "object",
"properties": {
- "type": {
- "type": "string",
- "description": "The type of security threat to filter.",
- "enum": [
- "prompt-injection"
+ "subscriptionLimits": {
+ "description": "Subscription limits at the end of this batch",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/SubscriptionLimits"
+ }
]
+ },
+ "results": {
+ "description": "This is the list of calls that were created.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/Call"
+ }
+ },
+ "errors": {
+ "description": "This is the list of calls that failed to be created.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/CallBatchError"
+ }
}
},
"required": [
- "type"
+ "results",
+ "errors"
]
},
- "RegexSecurityFilter": {
+ "AssistantSpeechWordAlignmentTiming": {
"type": "object",
"properties": {
"type": {
"type": "string",
- "description": "The type of security threat to filter.",
+ "description": "Discriminator for exact per-word timing (e.g. ElevenLabs alignment).",
"enum": [
- "regex"
+ "word-alignment"
]
},
- "regex": {
- "type": "string",
- "description": "The regex pattern to filter.",
- "example": "badword1|badword2"
+ "words": {
+ "description": "The individual words in this audio segment.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "wordsStartTimesMs": {
+ "description": "Start time in milliseconds for each word (parallel to `words`).",
+ "type": "array",
+ "items": {
+ "type": "number"
+ }
+ },
+ "wordsEndTimesMs": {
+ "description": "End time in milliseconds for each word (parallel to `words`).",
+ "type": "array",
+ "items": {
+ "type": "number"
+ }
}
},
"required": [
"type",
- "regex"
+ "words",
+ "wordsStartTimesMs",
+ "wordsEndTimesMs"
]
},
- "CreateAssistantDTO": {
+ "AssistantSpeechWordTimestamp": {
"type": "object",
"properties": {
- "transcriber": {
- "description": "These are the options for the assistant's transcriber.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/AssemblyAITranscriber",
- "title": "AssemblyAITranscriber"
- },
- {
- "$ref": "#/components/schemas/AzureSpeechTranscriber",
- "title": "AzureSpeechTranscriber"
- },
- {
- "$ref": "#/components/schemas/CustomTranscriber",
- "title": "CustomTranscriber"
- },
- {
- "$ref": "#/components/schemas/DeepgramTranscriber",
- "title": "DeepgramTranscriber"
- },
- {
- "$ref": "#/components/schemas/ElevenLabsTranscriber",
- "title": "ElevenLabsTranscriber"
- },
- {
- "$ref": "#/components/schemas/GladiaTranscriber",
- "title": "GladiaTranscriber"
- },
- {
- "$ref": "#/components/schemas/GoogleTranscriber",
- "title": "GoogleTranscriber"
- },
- {
- "$ref": "#/components/schemas/SpeechmaticsTranscriber",
- "title": "SpeechmaticsTranscriber"
- },
- {
- "$ref": "#/components/schemas/TalkscriberTranscriber",
- "title": "TalkscriberTranscriber"
- },
- {
- "$ref": "#/components/schemas/OpenAITranscriber",
- "title": "OpenAITranscriber"
- },
- {
- "$ref": "#/components/schemas/CartesiaTranscriber",
- "title": "CartesiaTranscriber"
- }
- ]
- },
- "model": {
- "description": "These are the options for the assistant's LLM.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/AnthropicModel",
- "title": "Anthropic"
- },
- {
- "$ref": "#/components/schemas/AnyscaleModel",
- "title": "Anyscale"
- },
- {
- "$ref": "#/components/schemas/CerebrasModel",
- "title": "Cerebras"
- },
- {
- "$ref": "#/components/schemas/CustomLLMModel",
- "title": "CustomLLM"
- },
- {
- "$ref": "#/components/schemas/DeepInfraModel",
- "title": "DeepInfra"
- },
- {
- "$ref": "#/components/schemas/DeepSeekModel",
- "title": "DeepSeek"
- },
- {
- "$ref": "#/components/schemas/GoogleModel",
- "title": "Google"
- },
- {
- "$ref": "#/components/schemas/GroqModel",
- "title": "Groq"
- },
- {
- "$ref": "#/components/schemas/InflectionAIModel",
- "title": "InflectionAI"
- },
- {
- "$ref": "#/components/schemas/OpenAIModel",
- "title": "OpenAI"
- },
- {
- "$ref": "#/components/schemas/OpenRouterModel",
- "title": "OpenRouter"
- },
- {
- "$ref": "#/components/schemas/PerplexityAIModel",
- "title": "PerplexityAI"
- },
- {
- "$ref": "#/components/schemas/TogetherAIModel",
- "title": "Together"
- },
- {
- "$ref": "#/components/schemas/XaiModel",
- "title": "XAI"
- }
- ]
- },
- "voice": {
- "description": "These are the options for the assistant's voice.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/AzureVoice",
- "title": "AzureVoice"
- },
- {
- "$ref": "#/components/schemas/CartesiaVoice",
- "title": "CartesiaVoice"
- },
- {
- "$ref": "#/components/schemas/CustomVoice",
- "title": "CustomVoice"
- },
- {
- "$ref": "#/components/schemas/DeepgramVoice",
- "title": "DeepgramVoice"
- },
- {
- "$ref": "#/components/schemas/ElevenLabsVoice",
- "title": "ElevenLabsVoice"
- },
- {
- "$ref": "#/components/schemas/HumeVoice",
- "title": "HumeVoice"
- },
- {
- "$ref": "#/components/schemas/LMNTVoice",
- "title": "LMNTVoice"
- },
- {
- "$ref": "#/components/schemas/NeuphonicVoice",
- "title": "NeuphonicVoice"
- },
- {
- "$ref": "#/components/schemas/OpenAIVoice",
- "title": "OpenAIVoice"
- },
- {
- "$ref": "#/components/schemas/PlayHTVoice",
- "title": "PlayHTVoice"
- },
- {
- "$ref": "#/components/schemas/RimeAIVoice",
- "title": "RimeAIVoice"
- },
- {
- "$ref": "#/components/schemas/SmallestAIVoice",
- "title": "SmallestAIVoice"
- },
- {
- "$ref": "#/components/schemas/TavusVoice",
- "title": "TavusVoice"
- },
- {
- "$ref": "#/components/schemas/VapiVoice",
- "title": "VapiVoice"
- },
- {
- "$ref": "#/components/schemas/SesameVoice",
- "title": "SesameVoice"
- },
- {
- "$ref": "#/components/schemas/InworldVoice",
- "title": "InworldVoice"
- },
- {
- "$ref": "#/components/schemas/MinimaxVoice",
- "title": "MinimaxVoice"
- }
- ]
- },
- "firstMessage": {
+ "word": {
"type": "string",
- "description": "This is the first message that the assistant will say. This can also be a URL to a containerized audio file (mp3, wav, etc.).\n\nIf unspecified, assistant will wait for user to speak and use the model to respond once they speak.",
- "example": "Hello! How can I help you today?"
+ "description": "The full word text (syllables aggregated into complete words)."
},
- "firstMessageInterruptionsEnabled": {
- "type": "boolean",
- "default": false
+ "startMs": {
+ "type": "number",
+ "description": "Start time in milliseconds relative to the segment start."
},
- "firstMessageMode": {
+ "endMs": {
+ "type": "number",
+ "description": "End time in milliseconds relative to the segment start."
+ }
+ },
+ "required": [
+ "word",
+ "startMs",
+ "endMs"
+ ]
+ },
+ "AssistantSpeechWordProgressTiming": {
+ "type": "object",
+ "properties": {
+ "type": {
"type": "string",
- "description": "This is the mode for the first message. Default is 'assistant-speaks-first'.\n\nUse:\n- 'assistant-speaks-first' to have the assistant speak first.\n- 'assistant-waits-for-user' to have the assistant wait for the user to speak first.\n- 'assistant-speaks-first-with-model-generated-message' to have the assistant speak first with a message generated by the model based on the conversation state. (`assistant.model.messages` at call start, `call.messages` at squad transfer points).\n\n@default 'assistant-speaks-first'",
+ "description": "Discriminator for cursor-based word progress (e.g. Minimax subtitle data).",
"enum": [
- "assistant-speaks-first",
- "assistant-speaks-first-with-model-generated-message",
- "assistant-waits-for-user"
- ],
- "example": "assistant-speaks-first"
- },
- "voicemailDetection": {
- "description": "These are the settings to configure or disable voicemail detection. Alternatively, voicemail detection can be configured using the model.tools=[VoicemailTool].\nThis uses Twilio's built-in detection while the VoicemailTool relies on the model to detect if a voicemail was reached.\nYou can use neither of them, one of them, or both of them. By default, Twilio built-in detection is enabled while VoicemailTool is not.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/GoogleVoicemailDetectionPlan",
- "title": "Google"
- },
- {
- "$ref": "#/components/schemas/OpenAIVoicemailDetectionPlan",
- "title": "OpenAI"
- },
- {
- "$ref": "#/components/schemas/TwilioVoicemailDetectionPlan",
- "title": "Twilio"
- },
- {
- "$ref": "#/components/schemas/VapiVoicemailDetectionPlan",
- "title": "Vapi"
- }
+ "word-progress"
]
},
- "clientMessages": {
+ "wordsSpoken": {
+ "type": "number",
+ "description": "Number of words spoken so far in this turn."
+ },
+ "totalWords": {
+ "type": "number",
+ "description": "Total number of words sent to the TTS provider for this turn.\n\n**Important**: this value grows across events within a single turn because\nMinimax synthesizes audio incrementally as the LLM streams tokens. Treat\nit as \"best known total so far\" — it will stabilize once synthesis is\ncomplete.\n\nA value of `0` is a valid sentinel meaning \"not yet known\". This can occur\non the very first `assistant-speech` event of a turn if audio begins\nplaying before the TTS provider has confirmed word-count data. Clients\n**must** guard against divide-by-zero when computing a progress fraction:\n\n```ts\nconst pct = totalWords > 0 ? wordsSpoken / totalWords : 0;\n```"
+ },
+ "segment": {
+ "type": "string",
+ "description": "The text of the latest spoken segment (sentence or clause). Use this\nfor caption display — it corresponds to the chunk just confirmed by\nthe TTS provider, unlike `text` on the parent message which carries\nthe full turn text."
+ },
+ "segmentDurationMs": {
+ "type": "number",
+ "description": "Audio duration in milliseconds for the latest spoken segment. Pair\nwith `segment` to animate karaoke-style word reveals — divide the\nsegment text across this duration for approximate per-word timing."
+ },
+ "words": {
+ "description": "Per-word timestamps for the latest spoken segment. Available when the\nTTS provider supports word-level timing (e.g. Minimax with\nsubtitle_type: \"word\"). Syllables from the provider are aggregated\ninto whole words with start/end times relative to the segment start.\n\nUse these for precise karaoke-style highlighting instead of\ninterpolating from segmentDurationMs.",
"type": "array",
- "enum": [
- "conversation-update",
- "function-call",
- "function-call-result",
- "hang",
- "language-changed",
- "metadata",
- "model-output",
- "speech-update",
- "status-update",
- "transcript",
- "tool-calls",
- "tool-calls-result",
- "tool.completed",
- "transfer-update",
- "user-interrupted",
- "voice-input",
- "workflow.node.started"
- ],
- "example": [
- "conversation-update",
- "function-call",
- "hang",
- "model-output",
- "speech-update",
- "status-update",
- "transfer-update",
- "transcript",
- "tool-calls",
- "user-interrupted",
- "voice-input",
- "workflow.node.started"
- ],
- "description": "These are the messages that will be sent to your Client SDKs. Default is conversation-update,function-call,hang,model-output,speech-update,status-update,transfer-update,transcript,tool-calls,user-interrupted,voice-input,workflow.node.started. You can check the shape of the messages in ClientMessage schema.",
"items": {
- "type": "string",
- "enum": [
- "conversation-update",
- "function-call",
- "function-call-result",
- "hang",
- "language-changed",
- "metadata",
- "model-output",
- "speech-update",
- "status-update",
- "transcript",
- "tool-calls",
- "tool-calls-result",
- "tool.completed",
- "transfer-update",
- "user-interrupted",
- "voice-input",
- "workflow.node.started"
- ]
+ "$ref": "#/components/schemas/AssistantSpeechWordTimestamp"
}
- },
- "serverMessages": {
+ }
+ },
+ "required": [
+ "type",
+ "wordsSpoken",
+ "totalWords"
+ ]
+ },
+ "CreateCallDTO": {
+ "type": "object",
+ "properties": {
+ "customers": {
+ "description": "This is used to issue batch calls to multiple customers.\n\nOnly relevant for `outboundPhoneCall`. To call a single customer, use `customer` instead.",
"type": "array",
- "enum": [
- "conversation-update",
- "end-of-call-report",
- "function-call",
- "hang",
- "language-changed",
- "language-change-detected",
- "model-output",
- "phone-call-control",
- "speech-update",
- "status-update",
- "transcript",
- "transcript[transcriptType=\"final\"]",
- "tool-calls",
- "transfer-destination-request",
- "handoff-destination-request",
- "transfer-update",
- "user-interrupted",
- "voice-input",
- "chat.created",
- "chat.deleted",
- "session.created",
- "session.updated",
- "session.deleted"
- ],
- "example": [
- "conversation-update",
- "end-of-call-report",
- "function-call",
- "hang",
- "speech-update",
- "status-update",
- "tool-calls",
- "transfer-destination-request",
- "handoff-destination-request",
- "user-interrupted"
- ],
- "description": "These are the messages that will be sent to your Server URL. Default is conversation-update,end-of-call-report,function-call,hang,speech-update,status-update,tool-calls,transfer-destination-request,handoff-destination-request,user-interrupted. You can check the shape of the messages in ServerMessage schema.",
"items": {
- "type": "string",
- "enum": [
- "conversation-update",
- "end-of-call-report",
- "function-call",
- "hang",
- "language-changed",
- "language-change-detected",
- "model-output",
- "phone-call-control",
- "speech-update",
- "status-update",
- "transcript",
- "transcript[transcriptType=\"final\"]",
- "tool-calls",
- "transfer-destination-request",
- "handoff-destination-request",
- "transfer-update",
- "user-interrupted",
- "voice-input",
- "chat.created",
- "chat.deleted",
- "session.created",
- "session.updated",
- "session.deleted"
- ]
+ "$ref": "#/components/schemas/CreateCustomerDTO"
}
},
- "maxDurationSeconds": {
- "type": "number",
- "description": "This is the maximum number of seconds that the call will last. When the call reaches this duration, it will be ended.\n\n@default 600 (10 minutes)",
- "minimum": 10,
- "maximum": 43200,
- "example": 600
+ "name": {
+ "type": "string",
+ "description": "This is the name of the call. This is just for your own reference.",
+ "maxLength": 40
},
- "backgroundSound": {
- "description": "This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'.\nYou can also provide a custom sound by providing a URL to an audio file.",
- "oneOf": [
- {
- "type": "enum",
- "enum": [
- "off",
- "office"
- ],
- "example": "office"
- },
+ "schedulePlan": {
+ "description": "This is the schedule plan of the call.",
+ "allOf": [
{
- "type": "string",
- "format": "uri",
- "example": "https://www.soundjay.com/ambient/sounds/people-in-lounge-1.mp3"
+ "$ref": "#/components/schemas/SchedulePlan"
}
]
},
- "modelOutputInMessagesEnabled": {
- "type": "boolean",
- "description": "This determines whether the model's output is used in conversation history rather than the transcription of assistant's speech.\n\nDefault `false` while in beta.\n\n@default false",
- "example": false
+ "transport": {
+ "type": "object",
+ "description": "This is the transport of the call."
},
- "transportConfigurations": {
- "type": "array",
- "description": "These are the configurations to be passed to the transport providers of assistant's calls, like Twilio. You can store multiple configurations for different transport providers. For a call, only the configuration matching the call transport provider is used.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/TransportConfigurationTwilio",
- "title": "Twilio"
- }
- ]
- }
+ "assistantId": {
+ "type": "string",
+ "description": "This is the assistant ID that will be used for the call. To use a transient assistant, use `assistant` instead.\n\nTo start a call with:\n- Assistant, use `assistantId` or `assistant`\n- Squad, use `squadId` or `squad`\n- Workflow, use `workflowId` or `workflow`"
},
- "observabilityPlan": {
- "description": "This is the plan for observability of assistant's calls.\n\nCurrently, only Langfuse is supported.",
- "oneOf": [
+ "assistant": {
+ "description": "This is the assistant that will be used for the call. To use an existing assistant, use `assistantId` instead.\n\nTo start a call with:\n- Assistant, use `assistant`\n- Squad, use `squad`\n- Workflow, use `workflow`",
+ "allOf": [
{
- "$ref": "#/components/schemas/LangfuseObservabilityPlan",
- "title": "Langfuse"
+ "$ref": "#/components/schemas/CreateAssistantDTO"
}
- ],
+ ]
+ },
+ "assistantOverrides": {
+ "description": "These are the overrides for the `assistant` or `assistantId`'s settings and template variables.",
"allOf": [
{
- "$ref": "#/components/schemas/LangfuseObservabilityPlan"
+ "$ref": "#/components/schemas/AssistantOverrides"
}
]
},
- "credentials": {
- "type": "array",
- "description": "These are dynamic credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/CreateAnthropicCredentialDTO",
- "title": "AnthropicCredential"
- },
- {
- "$ref": "#/components/schemas/CreateAnyscaleCredentialDTO",
- "title": "AnyscaleCredential"
- },
- {
- "$ref": "#/components/schemas/CreateAssemblyAICredentialDTO",
- "title": "AssemblyAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateAzureCredentialDTO",
- "title": "AzureCredential"
- },
- {
- "$ref": "#/components/schemas/CreateAzureOpenAICredentialDTO",
- "title": "AzureOpenAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
- "title": "ByoSipTrunkCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCartesiaCredentialDTO",
- "title": "CartesiaCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCerebrasCredentialDTO",
- "title": "CerebrasCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCloudflareCredentialDTO",
- "title": "CloudflareCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCustomLLMCredentialDTO",
- "title": "CustomLLMCredential"
- },
- {
- "$ref": "#/components/schemas/CreateDeepgramCredentialDTO",
- "title": "DeepgramCredential"
- },
- {
- "$ref": "#/components/schemas/CreateDeepInfraCredentialDTO",
- "title": "DeepInfraCredential"
- },
- {
- "$ref": "#/components/schemas/CreateDeepSeekCredentialDTO",
- "title": "DeepSeekCredential"
- },
- {
- "$ref": "#/components/schemas/CreateElevenLabsCredentialDTO",
- "title": "ElevenLabsCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGcpCredentialDTO",
- "title": "GcpCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGladiaCredentialDTO",
- "title": "GladiaCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCredentialDTO",
- "title": "GhlCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCredentialDTO",
- "title": "GoogleCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGroqCredentialDTO",
- "title": "GroqCredential"
- },
- {
- "$ref": "#/components/schemas/CreateHumeCredentialDTO",
- "title": "HumeCredential"
- },
- {
- "$ref": "#/components/schemas/CreateInflectionAICredentialDTO",
- "title": "InflectionAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateLangfuseCredentialDTO",
- "title": "LangfuseCredential"
- },
- {
- "$ref": "#/components/schemas/CreateLmntCredentialDTO",
- "title": "LmntCredential"
- },
- {
- "$ref": "#/components/schemas/CreateMakeCredentialDTO",
- "title": "MakeCredential"
- },
- {
- "$ref": "#/components/schemas/CreateMistralCredentialDTO",
- "title": "MistralCredential"
- },
- {
- "$ref": "#/components/schemas/CreateNeuphonicCredentialDTO",
- "title": "NeuphonicCredential"
- },
- {
- "$ref": "#/components/schemas/CreateOpenAICredentialDTO",
- "title": "OpenAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateOpenRouterCredentialDTO",
- "title": "OpenRouterCredential"
- },
- {
- "$ref": "#/components/schemas/CreatePerplexityAICredentialDTO",
- "title": "PerplexityAICredential"
- },
- {
- "$ref": "#/components/schemas/CreatePlayHTCredentialDTO",
- "title": "PlayHTCredential"
- },
- {
- "$ref": "#/components/schemas/CreateRimeAICredentialDTO",
- "title": "RimeAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateRunpodCredentialDTO",
- "title": "RunpodCredential"
- },
- {
- "$ref": "#/components/schemas/CreateS3CredentialDTO",
- "title": "S3Credential"
- },
- {
- "$ref": "#/components/schemas/CreateSmallestAICredentialDTO",
- "title": "SmallestAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
- "title": "SpeechmaticsCredential"
- },
- {
- "$ref": "#/components/schemas/CreateSupabaseCredentialDTO",
- "title": "SupabaseCredential"
- },
- {
- "$ref": "#/components/schemas/CreateTavusCredentialDTO",
- "title": "TavusCredential"
- },
- {
- "$ref": "#/components/schemas/CreateTogetherAICredentialDTO",
- "title": "TogetherAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateTrieveCredentialDTO",
- "title": "TrieveCredential"
- },
- {
- "$ref": "#/components/schemas/CreateTwilioCredentialDTO",
- "title": "TwilioCredential"
- },
- {
- "$ref": "#/components/schemas/CreateVonageCredentialDTO",
- "title": "VonageCredential"
- },
- {
- "$ref": "#/components/schemas/CreateWebhookCredentialDTO",
- "title": "WebhookCredential"
- },
- {
- "$ref": "#/components/schemas/CreateXAiCredentialDTO",
- "title": "XAiCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
- "title": "GoogleCalendarOAuth2ClientCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
- "title": "GoogleCalendarOAuth2AuthorizationCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
- "title": "GoogleSheetsOAuth2AuthorizationCredential"
- },
- {
- "$ref": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
- "title": "SlackOAuth2AuthorizationCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
- "title": "GoHighLevelMCPCredential"
- },
- {
- "$ref": "#/components/schemas/CreateInworldCredentialDTO",
- "title": "InworldCredential"
- }
- ],
- "discriminator": {
- "propertyName": "provider",
- "mapping": {
- "11labs": "#/components/schemas/CreateElevenLabsCredentialDTO",
- "anthropic": "#/components/schemas/CreateAnthropicCredentialDTO",
- "anyscale": "#/components/schemas/CreateAnyscaleCredentialDTO",
- "assembly-ai": "#/components/schemas/CreateAssemblyAICredentialDTO",
- "azure-openai": "#/components/schemas/CreateAzureOpenAICredentialDTO",
- "azure": "#/components/schemas/CreateAzureCredentialDTO",
- "byo-sip-trunk": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
- "cartesia": "#/components/schemas/CreateCartesiaCredentialDTO",
- "cerebras": "#/components/schemas/CreateCerebrasCredentialDTO",
- "cloudflare": "#/components/schemas/CreateCloudflareCredentialDTO",
- "custom-llm": "#/components/schemas/CreateCustomLLMCredentialDTO",
- "deepgram": "#/components/schemas/CreateDeepgramCredentialDTO",
- "deepinfra": "#/components/schemas/CreateDeepInfraCredentialDTO",
- "deep-seek": "#/components/schemas/CreateDeepSeekCredentialDTO",
- "gcp": "#/components/schemas/CreateGcpCredentialDTO",
- "gladia": "#/components/schemas/CreateGladiaCredentialDTO",
- "gohighlevel": "#/components/schemas/CreateGoHighLevelCredentialDTO",
- "google": "#/components/schemas/CreateGoogleCredentialDTO",
- "groq": "#/components/schemas/CreateGroqCredentialDTO",
- "inflection-ai": "#/components/schemas/CreateInflectionAICredentialDTO",
- "langfuse": "#/components/schemas/CreateLangfuseCredentialDTO",
- "lmnt": "#/components/schemas/CreateLmntCredentialDTO",
- "make": "#/components/schemas/CreateMakeCredentialDTO",
- "openai": "#/components/schemas/CreateOpenAICredentialDTO",
- "openrouter": "#/components/schemas/CreateOpenRouterCredentialDTO",
- "perplexity-ai": "#/components/schemas/CreatePerplexityAICredentialDTO",
- "playht": "#/components/schemas/CreatePlayHTCredentialDTO",
- "rime-ai": "#/components/schemas/CreateRimeAICredentialDTO",
- "runpod": "#/components/schemas/CreateRunpodCredentialDTO",
- "s3": "#/components/schemas/CreateS3CredentialDTO",
- "supabase": "#/components/schemas/CreateSupabaseCredentialDTO",
- "smallest-ai": "#/components/schemas/CreateSmallestAICredentialDTO",
- "tavus": "#/components/schemas/CreateTavusCredentialDTO",
- "together-ai": "#/components/schemas/CreateTogetherAICredentialDTO",
- "twilio": "#/components/schemas/CreateTwilioCredentialDTO",
- "vonage": "#/components/schemas/CreateVonageCredentialDTO",
- "webhook": "#/components/schemas/CreateWebhookCredentialDTO",
- "xai": "#/components/schemas/CreateXAiCredentialDTO",
- "neuphonic": "#/components/schemas/CreateNeuphonicCredentialDTO",
- "hume": "#/components/schemas/CreateHumeCredentialDTO",
- "mistral": "#/components/schemas/CreateMistralCredentialDTO",
- "speechmatics": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
- "trieve": "#/components/schemas/CreateTrieveCredentialDTO",
- "google.calendar.oauth2-client": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
- "google.calendar.oauth2-authorization": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
- "google.sheets.oauth2-authorization": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
- "slack.oauth2-authorization": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
- "ghl.oauth2-authorization": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
- "inworld": "#/components/schemas/CreateInworldCredentialDTO",
- "minimax": "#/components/schemas/CreateMinimaxCredentialDTO"
- }
+ "squadId": {
+ "type": "string",
+ "description": "This is the squad that will be used for the call. To use a transient squad, use `squad` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`"
+ },
+ "squad": {
+ "description": "This is a squad that will be used for the call. To use an existing squad, use `squadId` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateSquadDTO"
+ }
+ ]
+ },
+ "squadOverrides": {
+ "description": "These are the overrides for the `squad` or `squadId`'s member settings and template variables.\nThis will apply to all members of the squad.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AssistantOverrides"
}
- }
+ ]
},
- "hooks": {
- "type": "array",
- "description": "This is a set of actions that will be performed on certain events.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/CallHookCallEnding",
- "title": "CallHookCallEnding"
- },
- {
- "$ref": "#/components/schemas/CallHookAssistantSpeechInterrupted",
- "title": "CallHookAssistantSpeechInterrupted"
- },
- {
- "$ref": "#/components/schemas/CallHookCustomerSpeechInterrupted",
- "title": "CallHookCustomerSpeechInterrupted"
- },
- {
- "$ref": "#/components/schemas/CallHookCustomerSpeechTimeout",
- "title": "CallHookCustomerSpeechTimeout"
- }
- ]
- }
+ "workflowId": {
+ "type": "string",
+ "description": "This is the workflow that will be used for the call. To use a transient workflow, use `workflow` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`"
},
- "name": {
+ "workflow": {
+ "description": "This is a workflow that will be used for the call. To use an existing workflow, use `workflowId` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateWorkflowDTO"
+ }
+ ]
+ },
+ "workflowOverrides": {
+ "description": "These are the overrides for the `workflow` or `workflowId`'s settings and template variables.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/WorkflowOverrides"
+ }
+ ]
+ },
+ "phoneNumberId": {
"type": "string",
- "description": "This is the name of the assistant.\n\nThis is required when you want to transfer between assistants in a call.",
- "maxLength": 40
+ "description": "This is the phone number that will be used for the call. To use a transient number, use `phoneNumber` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type."
},
- "voicemailMessage": {
+ "phoneNumber": {
+ "description": "This is the phone number that will be used for the call. To use an existing number, use `phoneNumberId` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ImportTwilioPhoneNumberDTO"
+ }
+ ]
+ },
+ "customerId": {
"type": "string",
- "description": "This is the message that the assistant will say if the call is forwarded to voicemail.\n\nIf unspecified, it will hang up.",
- "maxLength": 1000
+ "description": "This is the customer that will be called. To call a transient customer , use `customer` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type."
},
- "endCallMessage": {
+ "customer": {
+ "description": "This is the customer that will be called. To call an existing customer, use `customerId` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateCustomerDTO"
+ }
+ ]
+ }
+ }
+ },
+ "StructuredOutputFilterDTO": {
+ "type": "object",
+ "properties": {
+ "eq": {
"type": "string",
- "description": "This is the message that the assistant will say if it ends the call.\n\nIf unspecified, it will hang up without saying anything.",
- "maxLength": 1000
+ "description": "Equal to"
},
- "endCallPhrases": {
- "description": "This list contains phrases that, if spoken by the assistant, will trigger the call to be hung up. Case insensitive.",
+ "neq": {
+ "type": "string",
+ "description": "Not equal to"
+ },
+ "gt": {
+ "type": "string",
+ "description": "Greater than"
+ },
+ "gte": {
+ "type": "string",
+ "description": "Greater than or equal to"
+ },
+ "lt": {
+ "type": "string",
+ "description": "Less than"
+ },
+ "lte": {
+ "type": "string",
+ "description": "Less than or equal to"
+ },
+ "contains": {
+ "type": "string",
+ "description": "Contains"
+ },
+ "notContains": {
+ "type": "string",
+ "description": "Not contains"
+ }
+ }
+ },
+ "CallPaginatedResponse": {
+ "type": "object",
+ "properties": {
+ "results": {
"type": "array",
"items": {
- "type": "string",
- "maxLength": 140,
- "minLength": 2
+ "$ref": "#/components/schemas/Call"
}
},
- "compliancePlan": {
- "$ref": "#/components/schemas/CompliancePlan"
- },
"metadata": {
+ "$ref": "#/components/schemas/PaginationMeta"
+ }
+ },
+ "required": [
+ "results",
+ "metadata"
+ ]
+ },
+ "CreateOutboundCallDTO": {
+ "type": "object",
+ "properties": {
+ "customers": {
+ "description": "This is used to issue batch calls to multiple customers.\n\nOnly relevant for `outboundPhoneCall`. To call a single customer, use `customer` instead.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/CreateCustomerDTO"
+ }
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the call. This is just for your own reference.",
+ "maxLength": 40
+ },
+ "schedulePlan": {
+ "description": "This is the schedule plan of the call.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/SchedulePlan"
+ }
+ ]
+ },
+ "transport": {
"type": "object",
- "description": "This is for metadata you want to store on the assistant."
+ "description": "This is the transport of the call."
},
- "backgroundSpeechDenoisingPlan": {
- "description": "This enables filtering of noise and background speech while the user is talking.\n\nFeatures:\n- Smart denoising using Krisp\n- Fourier denoising\n\nSmart denoising can be combined with or used independently of Fourier denoising.\n\nOrder of precedence:\n- Smart denoising\n- Fourier denoising",
+ "assistantId": {
+ "type": "string",
+ "description": "This is the assistant ID that will be used for the call. To use a transient assistant, use `assistant` instead.\n\nTo start a call with:\n- Assistant, use `assistantId` or `assistant`\n- Squad, use `squadId` or `squad`\n- Workflow, use `workflowId` or `workflow`"
+ },
+ "assistant": {
+ "description": "This is the assistant that will be used for the call. To use an existing assistant, use `assistantId` instead.\n\nTo start a call with:\n- Assistant, use `assistant`\n- Squad, use `squad`\n- Workflow, use `workflow`",
"allOf": [
{
- "$ref": "#/components/schemas/BackgroundSpeechDenoisingPlan"
+ "$ref": "#/components/schemas/CreateAssistantDTO"
}
]
},
- "analysisPlan": {
- "description": "This is the plan for analysis of assistant's calls. Stored in `call.analysis`.",
+ "assistantOverrides": {
+ "description": "These are the overrides for the `assistant` or `assistantId`'s settings and template variables.",
"allOf": [
{
- "$ref": "#/components/schemas/AnalysisPlan"
+ "$ref": "#/components/schemas/AssistantOverrides"
}
]
},
- "artifactPlan": {
- "description": "This is the plan for artifacts generated during assistant's calls. Stored in `call.artifact`.",
+ "squadId": {
+ "type": "string",
+ "description": "This is the squad that will be used for the call. To use a transient squad, use `squad` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`"
+ },
+ "squad": {
+ "description": "This is a squad that will be used for the call. To use an existing squad, use `squadId` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`",
"allOf": [
{
- "$ref": "#/components/schemas/ArtifactPlan"
+ "$ref": "#/components/schemas/CreateSquadDTO"
}
]
},
- "startSpeakingPlan": {
- "description": "This is the plan for when the assistant should start talking.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to start talking after the customer is done speaking.\n- The assistant is too fast to start talking after the customer is done speaking.\n- The assistant is so fast that it's actually interrupting the customer.",
+ "squadOverrides": {
+ "description": "These are the overrides for the `squad` or `squadId`'s member settings and template variables.\nThis will apply to all members of the squad.",
"allOf": [
{
- "$ref": "#/components/schemas/StartSpeakingPlan"
+ "$ref": "#/components/schemas/AssistantOverrides"
}
]
},
- "stopSpeakingPlan": {
- "description": "This is the plan for when assistant should stop talking on customer interruption.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to recognize customer's interruption.\n- The assistant is too fast to recognize customer's interruption.\n- The assistant is getting interrupted by phrases that are just acknowledgments.\n- The assistant is getting interrupted by background noises.\n- The assistant is not properly stopping -- it starts talking right after getting interrupted.",
+ "workflowId": {
+ "type": "string",
+ "description": "This is the workflow that will be used for the call. To use a transient workflow, use `workflow` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`"
+ },
+ "workflow": {
+ "description": "This is a workflow that will be used for the call. To use an existing workflow, use `workflowId` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`",
"allOf": [
{
- "$ref": "#/components/schemas/StopSpeakingPlan"
+ "$ref": "#/components/schemas/CreateWorkflowDTO"
}
]
},
- "monitorPlan": {
- "description": "This is the plan for real-time monitoring of the assistant's calls.\n\nUsage:\n- To enable live listening of the assistant's calls, set `monitorPlan.listenEnabled` to `true`.\n- To enable live control of the assistant's calls, set `monitorPlan.controlEnabled` to `true`.",
+ "workflowOverrides": {
+ "description": "These are the overrides for the `workflow` or `workflowId`'s settings and template variables.",
"allOf": [
{
- "$ref": "#/components/schemas/MonitorPlan"
+ "$ref": "#/components/schemas/WorkflowOverrides"
}
]
},
- "credentialIds": {
- "description": "These are the credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can provide a subset using this.",
- "type": "array",
- "items": {
- "type": "string"
- }
+ "phoneNumberId": {
+ "type": "string",
+ "description": "This is the phone number that will be used for the call. To use a transient number, use `phoneNumber` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type."
},
- "server": {
- "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server.url\n2. phoneNumber.serverUrl\n3. org.serverUrl",
+ "phoneNumber": {
+ "description": "This is the phone number that will be used for the call. To use an existing number, use `phoneNumberId` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type.",
"allOf": [
{
- "$ref": "#/components/schemas/Server"
+ "$ref": "#/components/schemas/ImportTwilioPhoneNumberDTO"
}
]
},
- "keypadInputPlan": {
- "$ref": "#/components/schemas/KeypadInputPlan"
+ "customerId": {
+ "type": "string",
+ "description": "This is the customer that will be called. To call a transient customer , use `customer` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type."
+ },
+ "customer": {
+ "description": "This is the customer that will be called. To call an existing customer, use `customerId` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateCustomerDTO"
+ }
+ ]
}
}
},
- "AssistantOverrides": {
+ "CreateWebCallDTO": {
"type": "object",
"properties": {
- "transcriber": {
- "description": "These are the options for the assistant's transcriber.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/AssemblyAITranscriber",
- "title": "AssemblyAITranscriber"
- },
- {
- "$ref": "#/components/schemas/AzureSpeechTranscriber",
- "title": "AzureSpeechTranscriber"
- },
- {
- "$ref": "#/components/schemas/CustomTranscriber",
- "title": "CustomTranscriber"
- },
- {
- "$ref": "#/components/schemas/DeepgramTranscriber",
- "title": "DeepgramTranscriber"
- },
- {
- "$ref": "#/components/schemas/ElevenLabsTranscriber",
- "title": "ElevenLabsTranscriber"
- },
- {
- "$ref": "#/components/schemas/GladiaTranscriber",
- "title": "GladiaTranscriber"
- },
- {
- "$ref": "#/components/schemas/GoogleTranscriber",
- "title": "GoogleTranscriber"
- },
- {
- "$ref": "#/components/schemas/SpeechmaticsTranscriber",
- "title": "SpeechmaticsTranscriber"
- },
- {
- "$ref": "#/components/schemas/TalkscriberTranscriber",
- "title": "TalkscriberTranscriber"
- },
- {
- "$ref": "#/components/schemas/OpenAITranscriber",
- "title": "OpenAITranscriber"
- },
+ "roomDeleteOnUserLeaveEnabled": {
+ "type": "boolean",
+ "default": true
+ },
+ "assistantId": {
+ "type": "string",
+ "description": "This is the assistant ID that will be used for the call. To use a transient assistant, use `assistant` instead.\n\nTo start a call with:\n- Assistant, use `assistantId` or `assistant`\n- Squad, use `squadId` or `squad`\n- Workflow, use `workflowId` or `workflow`"
+ },
+ "assistant": {
+ "description": "This is the assistant that will be used for the call. To use an existing assistant, use `assistantId` instead.\n\nTo start a call with:\n- Assistant, use `assistant`\n- Squad, use `squad`\n- Workflow, use `workflow`",
+ "allOf": [
{
- "$ref": "#/components/schemas/CartesiaTranscriber",
- "title": "CartesiaTranscriber"
+ "$ref": "#/components/schemas/CreateAssistantDTO"
}
]
},
- "model": {
- "description": "These are the options for the assistant's LLM.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/AnthropicModel",
- "title": "Anthropic"
- },
- {
- "$ref": "#/components/schemas/AnyscaleModel",
- "title": "Anyscale"
- },
- {
- "$ref": "#/components/schemas/CerebrasModel",
- "title": "Cerebras"
- },
- {
- "$ref": "#/components/schemas/CustomLLMModel",
- "title": "CustomLLM"
- },
- {
- "$ref": "#/components/schemas/DeepInfraModel",
- "title": "DeepInfra"
- },
- {
- "$ref": "#/components/schemas/DeepSeekModel",
- "title": "DeepSeek"
- },
- {
- "$ref": "#/components/schemas/GoogleModel",
- "title": "Google"
- },
- {
- "$ref": "#/components/schemas/GroqModel",
- "title": "Groq"
- },
- {
- "$ref": "#/components/schemas/InflectionAIModel",
- "title": "InflectionAI"
- },
- {
- "$ref": "#/components/schemas/OpenAIModel",
- "title": "OpenAI"
- },
- {
- "$ref": "#/components/schemas/OpenRouterModel",
- "title": "OpenRouter"
- },
- {
- "$ref": "#/components/schemas/PerplexityAIModel",
- "title": "PerplexityAI"
- },
- {
- "$ref": "#/components/schemas/TogetherAIModel",
- "title": "Together"
- },
+ "assistantOverrides": {
+ "description": "These are the overrides for the `assistant` or `assistantId`'s settings and template variables.",
+ "allOf": [
{
- "$ref": "#/components/schemas/XaiModel",
- "title": "XAI"
+ "$ref": "#/components/schemas/AssistantOverrides"
}
]
},
- "voice": {
- "description": "These are the options for the assistant's voice.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/AzureVoice",
- "title": "AzureVoice"
- },
- {
- "$ref": "#/components/schemas/CartesiaVoice",
- "title": "CartesiaVoice"
- },
- {
- "$ref": "#/components/schemas/CustomVoice",
- "title": "CustomVoice"
- },
- {
- "$ref": "#/components/schemas/DeepgramVoice",
- "title": "DeepgramVoice"
- },
- {
- "$ref": "#/components/schemas/ElevenLabsVoice",
- "title": "ElevenLabsVoice"
- },
- {
- "$ref": "#/components/schemas/HumeVoice",
- "title": "HumeVoice"
- },
- {
- "$ref": "#/components/schemas/LMNTVoice",
- "title": "LMNTVoice"
- },
- {
- "$ref": "#/components/schemas/NeuphonicVoice",
- "title": "NeuphonicVoice"
- },
- {
- "$ref": "#/components/schemas/OpenAIVoice",
- "title": "OpenAIVoice"
- },
- {
- "$ref": "#/components/schemas/PlayHTVoice",
- "title": "PlayHTVoice"
- },
- {
- "$ref": "#/components/schemas/RimeAIVoice",
- "title": "RimeAIVoice"
- },
- {
- "$ref": "#/components/schemas/SmallestAIVoice",
- "title": "SmallestAIVoice"
- },
- {
- "$ref": "#/components/schemas/TavusVoice",
- "title": "TavusVoice"
- },
+ "squadId": {
+ "type": "string",
+ "description": "This is the squad that will be used for the call. To use a transient squad, use `squad` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`"
+ },
+ "squad": {
+ "description": "This is a squad that will be used for the call. To use an existing squad, use `squadId` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`",
+ "allOf": [
{
- "$ref": "#/components/schemas/VapiVoice",
- "title": "VapiVoice"
- },
+ "$ref": "#/components/schemas/CreateSquadDTO"
+ }
+ ]
+ },
+ "squadOverrides": {
+ "description": "These are the overrides for the `squad` or `squadId`'s member settings and template variables.\nThis will apply to all members of the squad.",
+ "allOf": [
{
- "$ref": "#/components/schemas/SesameVoice",
- "title": "SesameVoice"
- },
+ "$ref": "#/components/schemas/AssistantOverrides"
+ }
+ ]
+ },
+ "workflowId": {
+ "type": "string",
+ "description": "This is the workflow that will be used for the call. To use a transient workflow, use `workflow` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`"
+ },
+ "workflow": {
+ "description": "This is a workflow that will be used for the call. To use an existing workflow, use `workflowId` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`",
+ "allOf": [
{
- "$ref": "#/components/schemas/InworldVoice",
- "title": "InworldVoice"
- },
+ "$ref": "#/components/schemas/CreateWorkflowDTO"
+ }
+ ]
+ },
+ "workflowOverrides": {
+ "description": "These are the overrides for the `workflow` or `workflowId`'s settings and template variables.",
+ "allOf": [
{
- "$ref": "#/components/schemas/MinimaxVoice",
- "title": "MinimaxVoice"
+ "$ref": "#/components/schemas/WorkflowOverrides"
}
]
+ }
+ }
+ },
+ "UpdateCallDTO": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the call. This is just for your own reference.",
+ "maxLength": 40
+ }
+ }
+ },
+ "DeleteCallDTO": {
+ "type": "object",
+ "properties": {
+ "ids": {
+ "description": "These are the Call IDs to be bulk deleted.\nIf provided, the call ID if any in the request query will be ignored\nWhen requesting a bulk delete, updates when a call is deleted will be sent as a webhook to the server URL configured in the Org settings.\nIt may take up to a few hours to complete the bulk delete, and will be asynchronous.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "DeveloperMessage": {
+ "type": "object",
+ "properties": {
+ "role": {
+ "type": "string",
+ "description": "This is the role of the message author",
+ "default": "developer",
+ "enum": [
+ "developer"
+ ]
+ },
+ "content": {
+ "type": "string",
+ "description": "This is the content of the developer message",
+ "maxLength": 10000
+ },
+ "name": {
+ "type": "string",
+ "description": "This is an optional name for the participant",
+ "maxLength": 40
+ },
+ "metadata": {
+ "type": "object",
+ "description": "This is an optional metadata for the message"
+ }
+ },
+ "required": [
+ "role",
+ "content"
+ ]
+ },
+ "SystemMessage": {
+ "type": "object",
+ "properties": {
+ "role": {
+ "type": "string",
+ "description": "The role of the system in the conversation."
+ },
+ "message": {
+ "type": "string",
+ "description": "The message content from the system."
+ },
+ "time": {
+ "type": "number",
+ "description": "The timestamp when the message was sent."
+ },
+ "secondsFromStart": {
+ "type": "number",
+ "description": "The number of seconds from the start of the conversation."
+ }
+ },
+ "required": [
+ "role",
+ "message",
+ "time",
+ "secondsFromStart"
+ ]
+ },
+ "UserMessage": {
+ "type": "object",
+ "properties": {
+ "role": {
+ "type": "string",
+ "description": "The role of the user in the conversation."
},
- "firstMessage": {
+ "message": {
"type": "string",
- "description": "This is the first message that the assistant will say. This can also be a URL to a containerized audio file (mp3, wav, etc.).\n\nIf unspecified, assistant will wait for user to speak and use the model to respond once they speak.",
- "example": "Hello! How can I help you today?"
+ "description": "The message content from the user."
},
- "firstMessageInterruptionsEnabled": {
- "type": "boolean",
- "default": false
+ "time": {
+ "type": "number",
+ "description": "The timestamp when the message was sent."
},
- "firstMessageMode": {
- "type": "string",
- "description": "This is the mode for the first message. Default is 'assistant-speaks-first'.\n\nUse:\n- 'assistant-speaks-first' to have the assistant speak first.\n- 'assistant-waits-for-user' to have the assistant wait for the user to speak first.\n- 'assistant-speaks-first-with-model-generated-message' to have the assistant speak first with a message generated by the model based on the conversation state. (`assistant.model.messages` at call start, `call.messages` at squad transfer points).\n\n@default 'assistant-speaks-first'",
- "enum": [
- "assistant-speaks-first",
- "assistant-speaks-first-with-model-generated-message",
- "assistant-waits-for-user"
- ],
- "example": "assistant-speaks-first"
+ "endTime": {
+ "type": "number",
+ "description": "The timestamp when the message ended."
},
- "voicemailDetection": {
- "description": "These are the settings to configure or disable voicemail detection. Alternatively, voicemail detection can be configured using the model.tools=[VoicemailTool].\nThis uses Twilio's built-in detection while the VoicemailTool relies on the model to detect if a voicemail was reached.\nYou can use neither of them, one of them, or both of them. By default, Twilio built-in detection is enabled while VoicemailTool is not.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/GoogleVoicemailDetectionPlan",
- "title": "Google"
- },
- {
- "$ref": "#/components/schemas/OpenAIVoicemailDetectionPlan",
- "title": "OpenAI"
- },
- {
- "$ref": "#/components/schemas/TwilioVoicemailDetectionPlan",
- "title": "Twilio"
- },
- {
- "$ref": "#/components/schemas/VapiVoicemailDetectionPlan",
- "title": "Vapi"
- }
- ]
+ "secondsFromStart": {
+ "type": "number",
+ "description": "The number of seconds from the start of the conversation."
},
- "clientMessages": {
- "type": "array",
- "enum": [
- "conversation-update",
- "function-call",
- "function-call-result",
- "hang",
- "language-changed",
- "metadata",
- "model-output",
- "speech-update",
- "status-update",
- "transcript",
- "tool-calls",
- "tool-calls-result",
- "tool.completed",
- "transfer-update",
- "user-interrupted",
- "voice-input",
- "workflow.node.started"
- ],
- "example": [
- "conversation-update",
- "function-call",
- "hang",
- "model-output",
- "speech-update",
- "status-update",
- "transfer-update",
- "transcript",
- "tool-calls",
- "user-interrupted",
- "voice-input",
- "workflow.node.started"
- ],
- "description": "These are the messages that will be sent to your Client SDKs. Default is conversation-update,function-call,hang,model-output,speech-update,status-update,transfer-update,transcript,tool-calls,user-interrupted,voice-input,workflow.node.started. You can check the shape of the messages in ClientMessage schema.",
- "items": {
- "type": "string",
- "enum": [
- "conversation-update",
- "function-call",
- "function-call-result",
- "hang",
- "language-changed",
- "metadata",
- "model-output",
- "speech-update",
- "status-update",
- "transcript",
- "tool-calls",
- "tool-calls-result",
- "tool.completed",
- "transfer-update",
- "user-interrupted",
- "voice-input",
- "workflow.node.started"
- ]
- }
+ "duration": {
+ "type": "number",
+ "description": "The duration of the message in seconds."
},
- "serverMessages": {
+ "isFiltered": {
+ "type": "boolean",
+ "description": "Indicates if the message was filtered for security reasons."
+ },
+ "detectedThreats": {
+ "description": "List of detected security threats if the message was filtered.",
"type": "array",
- "enum": [
- "conversation-update",
- "end-of-call-report",
- "function-call",
- "hang",
- "language-changed",
- "language-change-detected",
- "model-output",
- "phone-call-control",
- "speech-update",
- "status-update",
- "transcript",
- "transcript[transcriptType=\"final\"]",
- "tool-calls",
- "transfer-destination-request",
- "handoff-destination-request",
- "transfer-update",
- "user-interrupted",
- "voice-input",
- "chat.created",
- "chat.deleted",
- "session.created",
- "session.updated",
- "session.deleted"
- ],
- "example": [
- "conversation-update",
- "end-of-call-report",
- "function-call",
- "hang",
- "speech-update",
- "status-update",
- "tool-calls",
- "transfer-destination-request",
- "handoff-destination-request",
- "user-interrupted"
- ],
- "description": "These are the messages that will be sent to your Server URL. Default is conversation-update,end-of-call-report,function-call,hang,speech-update,status-update,tool-calls,transfer-destination-request,handoff-destination-request,user-interrupted. You can check the shape of the messages in ServerMessage schema.",
"items": {
- "type": "string",
- "enum": [
- "conversation-update",
- "end-of-call-report",
- "function-call",
- "hang",
- "language-changed",
- "language-change-detected",
- "model-output",
- "phone-call-control",
- "speech-update",
- "status-update",
- "transcript",
- "transcript[transcriptType=\"final\"]",
- "tool-calls",
- "transfer-destination-request",
- "handoff-destination-request",
- "transfer-update",
- "user-interrupted",
- "voice-input",
- "chat.created",
- "chat.deleted",
- "session.created",
- "session.updated",
- "session.deleted"
- ]
+ "type": "string"
}
},
- "maxDurationSeconds": {
- "type": "number",
- "description": "This is the maximum number of seconds that the call will last. When the call reaches this duration, it will be ended.\n\n@default 600 (10 minutes)",
- "minimum": 10,
- "maximum": 43200,
- "example": 600
+ "originalMessage": {
+ "type": "string",
+ "description": "The original message before filtering (only included if content was filtered)."
},
- "backgroundSound": {
- "description": "This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'.\nYou can also provide a custom sound by providing a URL to an audio file.",
- "oneOf": [
- {
- "type": "enum",
- "enum": [
- "off",
- "office"
- ],
- "example": "office"
- },
+ "metadata": {
+ "type": "object",
+ "description": "The metadata associated with the message. Currently used to store the transcriber's word level confidence."
+ },
+ "speakerLabel": {
+ "type": "string",
+ "description": "Stable speaker label for diarized user speakers (e.g., \"Speaker 1\")."
+ }
+ },
+ "required": [
+ "role",
+ "message",
+ "time",
+ "endTime",
+ "secondsFromStart"
+ ]
+ },
+ "ToolCallFunction": {
+ "type": "object",
+ "properties": {
+ "arguments": {
+ "type": "string",
+ "description": "This is the arguments to call the function with"
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the function to call",
+ "maxLength": 80
+ }
+ },
+ "required": [
+ "arguments",
+ "name"
+ ]
+ },
+ "ToolCall": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "This is the ID of the tool call"
+ },
+ "type": {
+ "type": "string",
+ "description": "This is the type of tool"
+ },
+ "function": {
+ "description": "This is the function that was called",
+ "allOf": [
{
- "type": "string",
- "format": "uri",
- "example": "https://www.soundjay.com/ambient/sounds/people-in-lounge-1.mp3"
+ "$ref": "#/components/schemas/ToolCallFunction"
}
]
+ }
+ },
+ "required": [
+ "id",
+ "type",
+ "function"
+ ]
+ },
+ "AssistantMessage": {
+ "type": "object",
+ "properties": {
+ "role": {
+ "type": "string",
+ "description": "This is the role of the message author",
+ "default": "assistant",
+ "enum": [
+ "assistant"
+ ]
},
- "modelOutputInMessagesEnabled": {
- "type": "boolean",
- "description": "This determines whether the model's output is used in conversation history rather than the transcription of assistant's speech.\n\nDefault `false` while in beta.\n\n@default false",
- "example": false
+ "content": {
+ "type": "string",
+ "description": "This is the content of the assistant message",
+ "maxLength": 10000
},
- "transportConfigurations": {
+ "refusal": {
+ "type": "string",
+ "description": "This is the refusal message generated by the model",
+ "maxLength": 10000
+ },
+ "tool_calls": {
+ "description": "This is the tool calls generated by the model",
"type": "array",
- "description": "These are the configurations to be passed to the transport providers of assistant's calls, like Twilio. You can store multiple configurations for different transport providers. For a call, only the configuration matching the call transport provider is used.",
"items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/TransportConfigurationTwilio",
- "title": "Twilio"
- }
- ]
+ "$ref": "#/components/schemas/ToolCall"
}
},
- "observabilityPlan": {
- "description": "This is the plan for observability of assistant's calls.\n\nCurrently, only Langfuse is supported.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/LangfuseObservabilityPlan",
- "title": "Langfuse"
- }
- ],
- "allOf": [
- {
- "$ref": "#/components/schemas/LangfuseObservabilityPlan"
- }
+ "name": {
+ "type": "string",
+ "description": "This is an optional name for the participant",
+ "maxLength": 40
+ },
+ "metadata": {
+ "type": "object",
+ "description": "This is an optional metadata for the message"
+ }
+ },
+ "required": [
+ "role"
+ ]
+ },
+ "ToolMessage": {
+ "type": "object",
+ "properties": {
+ "role": {
+ "type": "string",
+ "description": "This is the role of the message author",
+ "default": "tool",
+ "enum": [
+ "tool"
]
},
- "credentials": {
- "type": "array",
- "description": "These are dynamic credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/CreateAnthropicCredentialDTO",
- "title": "AnthropicCredential"
- },
- {
- "$ref": "#/components/schemas/CreateAnyscaleCredentialDTO",
- "title": "AnyscaleCredential"
- },
- {
- "$ref": "#/components/schemas/CreateAssemblyAICredentialDTO",
- "title": "AssemblyAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateAzureCredentialDTO",
- "title": "AzureCredential"
- },
- {
- "$ref": "#/components/schemas/CreateAzureOpenAICredentialDTO",
- "title": "AzureOpenAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
- "title": "ByoSipTrunkCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCartesiaCredentialDTO",
- "title": "CartesiaCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCerebrasCredentialDTO",
- "title": "CerebrasCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCloudflareCredentialDTO",
- "title": "CloudflareCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCustomLLMCredentialDTO",
- "title": "CustomLLMCredential"
- },
- {
- "$ref": "#/components/schemas/CreateDeepgramCredentialDTO",
- "title": "DeepgramCredential"
- },
- {
- "$ref": "#/components/schemas/CreateDeepInfraCredentialDTO",
- "title": "DeepInfraCredential"
- },
- {
- "$ref": "#/components/schemas/CreateDeepSeekCredentialDTO",
- "title": "DeepSeekCredential"
- },
- {
- "$ref": "#/components/schemas/CreateElevenLabsCredentialDTO",
- "title": "ElevenLabsCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGcpCredentialDTO",
- "title": "GcpCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGladiaCredentialDTO",
- "title": "GladiaCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCredentialDTO",
- "title": "GhlCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCredentialDTO",
- "title": "GoogleCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGroqCredentialDTO",
- "title": "GroqCredential"
- },
- {
- "$ref": "#/components/schemas/CreateHumeCredentialDTO",
- "title": "HumeCredential"
- },
- {
- "$ref": "#/components/schemas/CreateInflectionAICredentialDTO",
- "title": "InflectionAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateLangfuseCredentialDTO",
- "title": "LangfuseCredential"
- },
- {
- "$ref": "#/components/schemas/CreateLmntCredentialDTO",
- "title": "LmntCredential"
- },
- {
- "$ref": "#/components/schemas/CreateMakeCredentialDTO",
- "title": "MakeCredential"
- },
- {
- "$ref": "#/components/schemas/CreateMistralCredentialDTO",
- "title": "MistralCredential"
- },
- {
- "$ref": "#/components/schemas/CreateNeuphonicCredentialDTO",
- "title": "NeuphonicCredential"
- },
- {
- "$ref": "#/components/schemas/CreateOpenAICredentialDTO",
- "title": "OpenAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateOpenRouterCredentialDTO",
- "title": "OpenRouterCredential"
- },
- {
- "$ref": "#/components/schemas/CreatePerplexityAICredentialDTO",
- "title": "PerplexityAICredential"
- },
- {
- "$ref": "#/components/schemas/CreatePlayHTCredentialDTO",
- "title": "PlayHTCredential"
- },
- {
- "$ref": "#/components/schemas/CreateRimeAICredentialDTO",
- "title": "RimeAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateRunpodCredentialDTO",
- "title": "RunpodCredential"
- },
- {
- "$ref": "#/components/schemas/CreateS3CredentialDTO",
- "title": "S3Credential"
- },
- {
- "$ref": "#/components/schemas/CreateSmallestAICredentialDTO",
- "title": "SmallestAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
- "title": "SpeechmaticsCredential"
- },
- {
- "$ref": "#/components/schemas/CreateSupabaseCredentialDTO",
- "title": "SupabaseCredential"
- },
- {
- "$ref": "#/components/schemas/CreateTavusCredentialDTO",
- "title": "TavusCredential"
- },
- {
- "$ref": "#/components/schemas/CreateTogetherAICredentialDTO",
- "title": "TogetherAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateTrieveCredentialDTO",
- "title": "TrieveCredential"
- },
- {
- "$ref": "#/components/schemas/CreateTwilioCredentialDTO",
- "title": "TwilioCredential"
- },
- {
- "$ref": "#/components/schemas/CreateVonageCredentialDTO",
- "title": "VonageCredential"
- },
- {
- "$ref": "#/components/schemas/CreateWebhookCredentialDTO",
- "title": "WebhookCredential"
- },
- {
- "$ref": "#/components/schemas/CreateXAiCredentialDTO",
- "title": "XAiCredential"
+ "content": {
+ "type": "string",
+ "description": "This is the content of the tool message",
+ "maxLength": 10000
+ },
+ "tool_call_id": {
+ "type": "string",
+ "description": "This is the ID of the tool call this message is responding to"
+ },
+ "name": {
+ "type": "string",
+ "description": "This is an optional name for the participant",
+ "maxLength": 40
+ },
+ "metadata": {
+ "type": "object",
+ "description": "This is an optional metadata for the message"
+ }
+ },
+ "required": [
+ "role",
+ "content",
+ "tool_call_id"
+ ]
+ },
+ "FunctionCall": {
+ "type": "object",
+ "properties": {
+ "arguments": {
+ "type": "string",
+ "description": "This is the arguments to call the function with"
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the function to call",
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "arguments",
+ "name"
+ ]
+ },
+ "Chat": {
+ "type": "object",
+ "properties": {
+ "assistantId": {
+ "type": "string",
+ "description": "This is the assistant that will be used for the chat. To use an existing assistant, use `assistantId` instead."
+ },
+ "assistant": {
+ "description": "This is the assistant that will be used for the chat. To use an existing assistant, use `assistantId` instead.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateAssistantDTO"
+ }
+ ]
+ },
+ "assistantOverrides": {
+ "description": "These are the variable values that will be used to replace template variables in the assistant messages.\nOnly variable substitution is supported in chat contexts - other assistant properties cannot be overridden.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AssistantOverrides"
+ }
+ ]
+ },
+ "squadId": {
+ "type": "string",
+ "description": "This is the squad that will be used for the chat. To use a transient squad, use `squad` instead."
+ },
+ "squad": {
+ "description": "This is the squad that will be used for the chat. To use an existing squad, use `squadId` instead.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateSquadDTO"
+ }
+ ]
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the chat. This is just for your own reference.",
+ "maxLength": 40
+ },
+ "sessionId": {
+ "type": "string",
+ "description": "This is the ID of the session that will be used for the chat.\nMutually exclusive with previousChatId."
+ },
+ "input": {
+ "description": "This is the input text for the chat.\nCan be a string or an array of chat messages.",
+ "oneOf": [
+ {
+ "type": "string",
+ "title": "String"
+ },
+ {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/SystemMessage",
+ "title": "SystemMessage"
+ },
+ {
+ "$ref": "#/components/schemas/UserMessage",
+ "title": "UserMessage"
+ },
+ {
+ "$ref": "#/components/schemas/AssistantMessage",
+ "title": "AssistantMessage"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessage",
+ "title": "ToolMessage"
+ },
+ {
+ "$ref": "#/components/schemas/DeveloperMessage",
+ "title": "DeveloperMessage"
+ }
+ ]
},
+ "title": "MessageArray"
+ }
+ ],
+ "examples": [
+ "Hello, how can you help me?",
+ [
{
- "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
- "title": "GoogleCalendarOAuth2ClientCredential"
- },
+ "role": "user",
+ "content": "Hello, how can you help me?"
+ }
+ ]
+ ]
+ },
+ "stream": {
+ "type": "boolean",
+ "description": "This is a flag that determines whether the response should be streamed.\nWhen true, the response will be sent as chunks of text.",
+ "default": false
+ },
+ "previousChatId": {
+ "type": "string",
+ "description": "This is the ID of the chat that will be used as context for the new chat.\nThe messages from the previous chat will be used as context.\nMutually exclusive with sessionId."
+ },
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the chat."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the org that this chat belongs to."
+ },
+ "messages": {
+ "type": "array",
+ "description": "This is an array of messages used as context for the chat.\nUsed to provide message history for multi-turn conversations.",
+ "items": {
+ "oneOf": [
{
- "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
- "title": "GoogleCalendarOAuth2AuthorizationCredential"
+ "$ref": "#/components/schemas/SystemMessage",
+ "title": "SystemMessage"
},
{
- "$ref": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
- "title": "GoogleSheetsOAuth2AuthorizationCredential"
+ "$ref": "#/components/schemas/UserMessage",
+ "title": "UserMessage"
},
{
- "$ref": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
- "title": "SlackOAuth2AuthorizationCredential"
+ "$ref": "#/components/schemas/AssistantMessage",
+ "title": "AssistantMessage"
},
{
- "$ref": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
- "title": "GoHighLevelMCPCredential"
+ "$ref": "#/components/schemas/ToolMessage",
+ "title": "ToolMessage"
},
{
- "$ref": "#/components/schemas/CreateInworldCredentialDTO",
- "title": "InworldCredential"
- }
- ],
- "discriminator": {
- "propertyName": "provider",
- "mapping": {
- "11labs": "#/components/schemas/CreateElevenLabsCredentialDTO",
- "anthropic": "#/components/schemas/CreateAnthropicCredentialDTO",
- "anyscale": "#/components/schemas/CreateAnyscaleCredentialDTO",
- "assembly-ai": "#/components/schemas/CreateAssemblyAICredentialDTO",
- "azure-openai": "#/components/schemas/CreateAzureOpenAICredentialDTO",
- "azure": "#/components/schemas/CreateAzureCredentialDTO",
- "byo-sip-trunk": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
- "cartesia": "#/components/schemas/CreateCartesiaCredentialDTO",
- "cerebras": "#/components/schemas/CreateCerebrasCredentialDTO",
- "cloudflare": "#/components/schemas/CreateCloudflareCredentialDTO",
- "custom-llm": "#/components/schemas/CreateCustomLLMCredentialDTO",
- "deepgram": "#/components/schemas/CreateDeepgramCredentialDTO",
- "deepinfra": "#/components/schemas/CreateDeepInfraCredentialDTO",
- "deep-seek": "#/components/schemas/CreateDeepSeekCredentialDTO",
- "gcp": "#/components/schemas/CreateGcpCredentialDTO",
- "gladia": "#/components/schemas/CreateGladiaCredentialDTO",
- "gohighlevel": "#/components/schemas/CreateGoHighLevelCredentialDTO",
- "google": "#/components/schemas/CreateGoogleCredentialDTO",
- "groq": "#/components/schemas/CreateGroqCredentialDTO",
- "inflection-ai": "#/components/schemas/CreateInflectionAICredentialDTO",
- "langfuse": "#/components/schemas/CreateLangfuseCredentialDTO",
- "lmnt": "#/components/schemas/CreateLmntCredentialDTO",
- "make": "#/components/schemas/CreateMakeCredentialDTO",
- "openai": "#/components/schemas/CreateOpenAICredentialDTO",
- "openrouter": "#/components/schemas/CreateOpenRouterCredentialDTO",
- "perplexity-ai": "#/components/schemas/CreatePerplexityAICredentialDTO",
- "playht": "#/components/schemas/CreatePlayHTCredentialDTO",
- "rime-ai": "#/components/schemas/CreateRimeAICredentialDTO",
- "runpod": "#/components/schemas/CreateRunpodCredentialDTO",
- "s3": "#/components/schemas/CreateS3CredentialDTO",
- "supabase": "#/components/schemas/CreateSupabaseCredentialDTO",
- "smallest-ai": "#/components/schemas/CreateSmallestAICredentialDTO",
- "tavus": "#/components/schemas/CreateTavusCredentialDTO",
- "together-ai": "#/components/schemas/CreateTogetherAICredentialDTO",
- "twilio": "#/components/schemas/CreateTwilioCredentialDTO",
- "vonage": "#/components/schemas/CreateVonageCredentialDTO",
- "webhook": "#/components/schemas/CreateWebhookCredentialDTO",
- "xai": "#/components/schemas/CreateXAiCredentialDTO",
- "neuphonic": "#/components/schemas/CreateNeuphonicCredentialDTO",
- "hume": "#/components/schemas/CreateHumeCredentialDTO",
- "mistral": "#/components/schemas/CreateMistralCredentialDTO",
- "speechmatics": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
- "trieve": "#/components/schemas/CreateTrieveCredentialDTO",
- "google.calendar.oauth2-client": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
- "google.calendar.oauth2-authorization": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
- "google.sheets.oauth2-authorization": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
- "slack.oauth2-authorization": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
- "ghl.oauth2-authorization": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
- "inworld": "#/components/schemas/CreateInworldCredentialDTO",
- "minimax": "#/components/schemas/CreateMinimaxCredentialDTO"
+ "$ref": "#/components/schemas/DeveloperMessage",
+ "title": "DeveloperMessage"
}
- }
+ ]
}
},
- "hooks": {
+ "output": {
"type": "array",
- "description": "This is a set of actions that will be performed on certain events.",
+ "description": "This is the output messages generated by the system in response to the input.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/CallHookCallEnding",
- "title": "CallHookCallEnding"
+ "$ref": "#/components/schemas/SystemMessage",
+ "title": "SystemMessage"
},
{
- "$ref": "#/components/schemas/CallHookAssistantSpeechInterrupted",
- "title": "CallHookAssistantSpeechInterrupted"
+ "$ref": "#/components/schemas/UserMessage",
+ "title": "UserMessage"
},
{
- "$ref": "#/components/schemas/CallHookCustomerSpeechInterrupted",
- "title": "CallHookCustomerSpeechInterrupted"
+ "$ref": "#/components/schemas/AssistantMessage",
+ "title": "AssistantMessage"
},
{
- "$ref": "#/components/schemas/CallHookCustomerSpeechTimeout",
- "title": "CallHookCustomerSpeechTimeout"
+ "$ref": "#/components/schemas/ToolMessage",
+ "title": "ToolMessage"
+ },
+ {
+ "$ref": "#/components/schemas/DeveloperMessage",
+ "title": "DeveloperMessage"
}
]
}
},
- "variableValues": {
- "type": "object",
- "description": "These are values that will be used to replace the template variables in the assistant messages and other text-based fields.\nThis uses LiquidJS syntax. https://liquidjs.com/tutorials/intro-to-liquid.html\n\nSo for example, `{{ name }}` will be replaced with the value of `name` in `variableValues`.\n`{{\"now\" | date: \"%b %d, %Y, %I:%M %p\", \"America/New_York\"}}` will be replaced with the current date and time in New York.\n Some VAPI reserved defaults:\n - *customer* - the customer object"
- },
- "name": {
- "type": "string",
- "description": "This is the name of the assistant.\n\nThis is required when you want to transfer between assistants in a call.",
- "maxLength": 40
- },
- "voicemailMessage": {
+ "createdAt": {
+ "format": "date-time",
"type": "string",
- "description": "This is the message that the assistant will say if the call is forwarded to voicemail.\n\nIf unspecified, it will hang up.",
- "maxLength": 1000
+ "description": "This is the ISO 8601 date-time string of when the chat was created."
},
- "endCallMessage": {
+ "updatedAt": {
+ "format": "date-time",
"type": "string",
- "description": "This is the message that the assistant will say if it ends the call.\n\nIf unspecified, it will hang up without saying anything.",
- "maxLength": 1000
+ "description": "This is the ISO 8601 date-time string of when the chat was last updated."
},
- "endCallPhrases": {
- "description": "This list contains phrases that, if spoken by the assistant, will trigger the call to be hung up. Case insensitive.",
+ "costs": {
"type": "array",
+ "description": "These are the costs of individual components of the chat in USD.",
"items": {
- "type": "string",
- "maxLength": 140,
- "minLength": 2
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ModelCost",
+ "title": "ModelCost"
+ },
+ {
+ "$ref": "#/components/schemas/ChatCost",
+ "title": "ChatCost"
+ }
+ ]
}
},
- "compliancePlan": {
- "$ref": "#/components/schemas/CompliancePlan"
+ "cost": {
+ "type": "number",
+ "description": "This is the cost of the chat in USD."
+ }
+ },
+ "required": [
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "TwilioSMSChatTransport": {
+ "type": "object",
+ "properties": {
+ "conversationType": {
+ "type": "string",
+ "description": "This is the conversation type of the call (ie, voice or chat).",
+ "default": "chat",
+ "enum": [
+ "chat"
+ ]
},
- "metadata": {
- "type": "object",
- "description": "This is for metadata you want to store on the assistant."
+ "phoneNumberId": {
+ "type": "string",
+ "description": "This is the phone number that will be used to send the SMS.\nIf provided, will create a new session. If not provided, uses existing session's phoneNumberId.\nThe phone number must have SMS enabled and belong to your organization."
},
- "backgroundSpeechDenoisingPlan": {
- "description": "This enables filtering of noise and background speech while the user is talking.\n\nFeatures:\n- Smart denoising using Krisp\n- Fourier denoising\n\nSmart denoising can be combined with or used independently of Fourier denoising.\n\nOrder of precedence:\n- Smart denoising\n- Fourier denoising",
+ "customer": {
+ "description": "This is the customer who will receive the SMS.\nIf provided, will create a new session. If not provided, uses existing session's customer.",
"allOf": [
{
- "$ref": "#/components/schemas/BackgroundSpeechDenoisingPlan"
+ "$ref": "#/components/schemas/CreateCustomerDTO"
}
]
},
- "analysisPlan": {
- "description": "This is the plan for analysis of assistant's calls. Stored in `call.analysis`.",
- "allOf": [
- {
- "$ref": "#/components/schemas/AnalysisPlan"
- }
+ "customerId": {
+ "type": "string",
+ "description": "This is the customerId of the customer who will receive the SMS."
+ },
+ "useLLMGeneratedMessageForOutbound": {
+ "type": "boolean",
+ "description": "Whether to use LLM-generated messages for outbound SMS.\nWhen true (default), input is processed by the assistant for a response.\nWhen false, the input text is forwarded directly as the SMS message without LLM processing.\nUseful for sending pre-defined messages or notifications.",
+ "default": true
+ },
+ "type": {
+ "type": "string",
+ "description": "The type of transport to use for sending the chat response.\nCurrently supports 'twilio.sms' for SMS delivery via Twilio.",
+ "enum": [
+ "twilio.sms"
]
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "CreateChatDTO": {
+ "type": "object",
+ "properties": {
+ "assistantId": {
+ "type": "string",
+ "description": "This is the assistant that will be used for the chat. To use an existing assistant, use `assistantId` instead."
},
- "artifactPlan": {
- "description": "This is the plan for artifacts generated during assistant's calls. Stored in `call.artifact`.",
+ "assistant": {
+ "description": "This is the assistant that will be used for the chat. To use an existing assistant, use `assistantId` instead.",
"allOf": [
{
- "$ref": "#/components/schemas/ArtifactPlan"
+ "$ref": "#/components/schemas/CreateAssistantDTO"
}
]
},
- "startSpeakingPlan": {
- "description": "This is the plan for when the assistant should start talking.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to start talking after the customer is done speaking.\n- The assistant is too fast to start talking after the customer is done speaking.\n- The assistant is so fast that it's actually interrupting the customer.",
+ "assistantOverrides": {
+ "description": "These are the variable values that will be used to replace template variables in the assistant messages.\nOnly variable substitution is supported in chat contexts - other assistant properties cannot be overridden.",
"allOf": [
{
- "$ref": "#/components/schemas/StartSpeakingPlan"
+ "$ref": "#/components/schemas/AssistantOverrides"
}
]
},
- "stopSpeakingPlan": {
- "description": "This is the plan for when assistant should stop talking on customer interruption.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to recognize customer's interruption.\n- The assistant is too fast to recognize customer's interruption.\n- The assistant is getting interrupted by phrases that are just acknowledgments.\n- The assistant is getting interrupted by background noises.\n- The assistant is not properly stopping -- it starts talking right after getting interrupted.",
+ "squadId": {
+ "type": "string",
+ "description": "This is the squad that will be used for the chat. To use a transient squad, use `squad` instead."
+ },
+ "squad": {
+ "description": "This is the squad that will be used for the chat. To use an existing squad, use `squadId` instead.",
"allOf": [
{
- "$ref": "#/components/schemas/StopSpeakingPlan"
+ "$ref": "#/components/schemas/CreateSquadDTO"
}
]
},
- "monitorPlan": {
- "description": "This is the plan for real-time monitoring of the assistant's calls.\n\nUsage:\n- To enable live listening of the assistant's calls, set `monitorPlan.listenEnabled` to `true`.\n- To enable live control of the assistant's calls, set `monitorPlan.controlEnabled` to `true`.",
- "allOf": [
+ "name": {
+ "type": "string",
+ "description": "This is the name of the chat. This is just for your own reference.",
+ "maxLength": 40
+ },
+ "sessionId": {
+ "type": "string",
+ "description": "This is the ID of the session that will be used for the chat.\nMutually exclusive with previousChatId."
+ },
+ "input": {
+ "description": "This is the input text for the chat.\nCan be a string or an array of chat messages.\nThis field is REQUIRED for chat creation.",
+ "oneOf": [
{
- "$ref": "#/components/schemas/MonitorPlan"
+ "type": "string",
+ "title": "String"
+ },
+ {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/SystemMessage",
+ "title": "SystemMessage"
+ },
+ {
+ "$ref": "#/components/schemas/UserMessage",
+ "title": "UserMessage"
+ },
+ {
+ "$ref": "#/components/schemas/AssistantMessage",
+ "title": "AssistantMessage"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessage",
+ "title": "ToolMessage"
+ },
+ {
+ "$ref": "#/components/schemas/DeveloperMessage",
+ "title": "DeveloperMessage"
+ }
+ ]
+ },
+ "title": "MessageArray"
}
+ ],
+ "examples": [
+ "Hello, how can you help me?",
+ [
+ {
+ "role": "user",
+ "content": "Hello, how can you help me?"
+ }
+ ]
]
},
- "credentialIds": {
- "description": "These are the credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can provide a subset using this.",
- "type": "array",
- "items": {
- "type": "string"
- }
+ "stream": {
+ "type": "boolean",
+ "description": "This is a flag that determines whether the response should be streamed.\nWhen true, the response will be sent as chunks of text.",
+ "default": false
},
- "server": {
- "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server.url\n2. phoneNumber.serverUrl\n3. org.serverUrl",
+ "previousChatId": {
+ "type": "string",
+ "description": "This is the ID of the chat that will be used as context for the new chat.\nThe messages from the previous chat will be used as context.\nMutually exclusive with sessionId."
+ },
+ "transport": {
+ "description": "This is used to send the chat through a transport like SMS.\nIf transport.phoneNumberId and transport.customer are provided, creates a new session.\nIf sessionId is provided without transport fields, uses existing session data.\nCannot specify both sessionId and transport fields (phoneNumberId/customer) together.",
"allOf": [
{
- "$ref": "#/components/schemas/Server"
+ "$ref": "#/components/schemas/TwilioSMSChatTransport"
}
]
+ }
+ },
+ "required": [
+ "input"
+ ]
+ },
+ "GetChatPaginatedDTO": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the chat to filter by."
},
- "keypadInputPlan": {
- "$ref": "#/components/schemas/KeypadInputPlan"
+ "assistantId": {
+ "type": "string",
+ "description": "This is the unique identifier for the assistant that will be used for the chat."
+ },
+ "assistantIdAny": {
+ "type": "string",
+ "description": "Filter by multiple assistant IDs. Provide as comma-separated values.",
+ "example": "assistant-1,assistant-2,assistant-3"
+ },
+ "squadId": {
+ "type": "string",
+ "description": "This is the unique identifier for the squad that will be used for the chat."
+ },
+ "sessionId": {
+ "type": "string",
+ "description": "This is the unique identifier for the session that will be used for the chat."
+ },
+ "previousChatId": {
+ "type": "string",
+ "description": "This is the unique identifier for the previous chat to filter by."
+ },
+ "page": {
+ "type": "number",
+ "description": "This is the page number to return. Defaults to 1.",
+ "minimum": 1
+ },
+ "sortOrder": {
+ "type": "string",
+ "description": "This is the sort order for pagination. Defaults to 'DESC'.",
+ "enum": [
+ "ASC",
+ "DESC"
+ ]
+ },
+ "limit": {
+ "type": "number",
+ "description": "This is the maximum number of items to return. Defaults to 100.",
+ "minimum": 0,
+ "maximum": 1000
+ },
+ "createdAtGt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the createdAt is greater than the specified value."
+ },
+ "createdAtLt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the createdAt is less than the specified value."
+ },
+ "createdAtGe": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the createdAt is greater than or equal to the specified value."
+ },
+ "createdAtLe": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the createdAt is less than or equal to the specified value."
+ },
+ "updatedAtGt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the updatedAt is greater than the specified value."
+ },
+ "updatedAtLt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the updatedAt is less than the specified value."
+ },
+ "updatedAtGe": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the updatedAt is greater than or equal to the specified value."
+ },
+ "updatedAtLe": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the updatedAt is less than or equal to the specified value."
}
}
},
- "SquadMemberDTO": {
+ "ChatPaginatedResponse": {
"type": "object",
"properties": {
- "assistantId": {
- "type": "string",
- "nullable": true,
- "description": "This is the assistant that will be used for the call. To use a transient assistant, use `assistant` instead."
- },
- "assistant": {
- "description": "This is the assistant that will be used for the call. To use an existing assistant, use `assistantId` instead.",
- "allOf": [
- {
- "$ref": "#/components/schemas/CreateAssistantDTO"
- }
- ]
- },
- "assistantOverrides": {
- "description": "This can be used to override the assistant's settings and provide values for it's template variables.",
- "allOf": [
- {
- "$ref": "#/components/schemas/AssistantOverrides"
- }
- ]
- },
- "assistantDestinations": {
- "description": "These are the others assistants that this assistant can transfer to.\n\nIf the assistant already has transfer call tool, these destinations are just appended to existing ones.",
+ "results": {
"type": "array",
"items": {
- "$ref": "#/components/schemas/TransferDestinationAssistant"
+ "$ref": "#/components/schemas/Chat"
}
+ },
+ "metadata": {
+ "$ref": "#/components/schemas/PaginationMeta"
}
- }
+ },
+ "required": [
+ "results",
+ "metadata"
+ ]
},
- "CreateSquadDTO": {
+ "CreateChatStreamResponse": {
"type": "object",
"properties": {
- "name": {
+ "id": {
"type": "string",
- "description": "This is the name of the squad."
+ "description": "This is the unique identifier for the streaming response."
},
- "members": {
- "description": "This is the list of assistants that make up the squad.\n\nThe call will start with the first assistant in the list.",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/SquadMemberDTO"
- }
+ "sessionId": {
+ "type": "string",
+ "description": "This is the ID of the session that will be used for the chat.\nHelps track conversation context across multiple messages."
},
- "membersOverrides": {
- "description": "This can be used to override all the assistants' settings and provide values for their template variables.\n\nBoth `membersOverrides` and `members[n].assistantOverrides` can be used together. First, `members[n].assistantOverrides` is applied. Then, `membersOverrides` is applied as a global override.",
- "allOf": [
- {
- "$ref": "#/components/schemas/AssistantOverrides"
- }
- ]
+ "path": {
+ "type": "string",
+ "description": "This is the path to the content being updated.\nFormat: `chat.output[{contentIndex}].content` where contentIndex identifies the specific content item.",
+ "example": "chat.output[0].content"
+ },
+ "delta": {
+ "type": "string",
+ "description": "This is the incremental content chunk being streamed."
}
},
"required": [
- "members"
+ "id",
+ "path",
+ "delta"
]
},
- "CreateWorkflowDTO": {
+ "OpenAIResponsesRequest": {
"type": "object",
"properties": {
- "nodes": {
- "type": "array",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ConversationNode",
- "title": "ConversationNode"
- },
- {
- "$ref": "#/components/schemas/ToolNode",
- "title": "ToolNode"
- }
- ]
- }
+ "assistantId": {
+ "type": "string",
+ "description": "This is the assistant that will be used for the chat. To use an existing assistant, use `assistantId` instead."
},
- "model": {
- "description": "This is the model for the workflow.\n\nThis can be overridden at node level using `nodes[n].model`.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/WorkflowOpenAIModel",
- "title": "WorkflowOpenAIModel"
- },
- {
- "$ref": "#/components/schemas/WorkflowAnthropicModel",
- "title": "WorkflowAnthropicModel"
- },
- {
- "$ref": "#/components/schemas/WorkflowGoogleModel",
- "title": "WorkflowGoogleModel"
- },
+ "assistant": {
+ "description": "This is the assistant that will be used for the chat. To use an existing assistant, use `assistantId` instead.",
+ "allOf": [
{
- "$ref": "#/components/schemas/WorkflowCustomModel",
- "title": "WorkflowCustomModel"
+ "$ref": "#/components/schemas/CreateAssistantDTO"
}
]
},
- "transcriber": {
- "description": "This is the transcriber for the workflow.\n\nThis can be overridden at node level using `nodes[n].transcriber`.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/AssemblyAITranscriber",
- "title": "AssemblyAITranscriber"
- },
- {
- "$ref": "#/components/schemas/AzureSpeechTranscriber",
- "title": "AzureSpeechTranscriber"
- },
- {
- "$ref": "#/components/schemas/CustomTranscriber",
- "title": "CustomTranscriber"
- },
- {
- "$ref": "#/components/schemas/DeepgramTranscriber",
- "title": "DeepgramTranscriber"
- },
- {
- "$ref": "#/components/schemas/ElevenLabsTranscriber",
- "title": "ElevenLabsTranscriber"
- },
- {
- "$ref": "#/components/schemas/GladiaTranscriber",
- "title": "GladiaTranscriber"
- },
- {
- "$ref": "#/components/schemas/GoogleTranscriber",
- "title": "GoogleTranscriber"
- },
- {
- "$ref": "#/components/schemas/SpeechmaticsTranscriber",
- "title": "SpeechmaticsTranscriber"
- },
- {
- "$ref": "#/components/schemas/TalkscriberTranscriber",
- "title": "TalkscriberTranscriber"
- },
- {
- "$ref": "#/components/schemas/OpenAITranscriber",
- "title": "OpenAITranscriber"
- },
+ "assistantOverrides": {
+ "description": "These are the variable values that will be used to replace template variables in the assistant messages.\nOnly variable substitution is supported in chat contexts - other assistant properties cannot be overridden.",
+ "allOf": [
{
- "$ref": "#/components/schemas/CartesiaTranscriber",
- "title": "CartesiaTranscriber"
+ "$ref": "#/components/schemas/AssistantOverrides"
}
]
},
- "voice": {
- "description": "This is the voice for the workflow.\n\nThis can be overridden at node level using `nodes[n].voice`.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/AzureVoice",
- "title": "AzureVoice"
- },
- {
- "$ref": "#/components/schemas/CartesiaVoice",
- "title": "CartesiaVoice"
- },
- {
- "$ref": "#/components/schemas/CustomVoice",
- "title": "CustomVoice"
- },
- {
- "$ref": "#/components/schemas/DeepgramVoice",
- "title": "DeepgramVoice"
- },
- {
- "$ref": "#/components/schemas/ElevenLabsVoice",
- "title": "ElevenLabsVoice"
- },
- {
- "$ref": "#/components/schemas/HumeVoice",
- "title": "HumeVoice"
- },
- {
- "$ref": "#/components/schemas/LMNTVoice",
- "title": "LMNTVoice"
- },
- {
- "$ref": "#/components/schemas/NeuphonicVoice",
- "title": "NeuphonicVoice"
- },
- {
- "$ref": "#/components/schemas/OpenAIVoice",
- "title": "OpenAIVoice"
- },
- {
- "$ref": "#/components/schemas/PlayHTVoice",
- "title": "PlayHTVoice"
- },
- {
- "$ref": "#/components/schemas/RimeAIVoice",
- "title": "RimeAIVoice"
- },
- {
- "$ref": "#/components/schemas/SmallestAIVoice",
- "title": "SmallestAIVoice"
- },
- {
- "$ref": "#/components/schemas/TavusVoice",
- "title": "TavusVoice"
- },
- {
- "$ref": "#/components/schemas/VapiVoice",
- "title": "VapiVoice"
- },
- {
- "$ref": "#/components/schemas/SesameVoice",
- "title": "SesameVoice"
- },
- {
- "$ref": "#/components/schemas/InworldVoice",
- "title": "InworldVoice"
- },
- {
- "$ref": "#/components/schemas/MinimaxVoice",
- "title": "MinimaxVoice"
- }
- ]
+ "squadId": {
+ "type": "string",
+ "description": "This is the squad that will be used for the chat. To use a transient squad, use `squad` instead."
},
- "observabilityPlan": {
- "description": "This is the plan for observability of workflow's calls.\n\nCurrently, only Langfuse is supported.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/LangfuseObservabilityPlan",
- "title": "Langfuse"
- }
- ],
+ "squad": {
+ "description": "This is the squad that will be used for the chat. To use an existing squad, use `squadId` instead.",
"allOf": [
{
- "$ref": "#/components/schemas/LangfuseObservabilityPlan"
+ "$ref": "#/components/schemas/CreateSquadDTO"
}
]
},
- "backgroundSound": {
- "description": "This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'.\nYou can also provide a custom sound by providing a URL to an audio file.",
+ "name": {
+ "type": "string",
+ "description": "This is the name of the chat. This is just for your own reference.",
+ "maxLength": 40
+ },
+ "sessionId": {
+ "type": "string",
+ "description": "This is the ID of the session that will be used for the chat.\nMutually exclusive with previousChatId."
+ },
+ "input": {
+ "description": "This is the input text for the chat.\nCan be a string or an array of chat messages.\nThis field is REQUIRED for chat creation.",
"oneOf": [
{
- "type": "enum",
- "enum": [
- "off",
- "office"
- ],
- "example": "office"
+ "type": "string",
+ "title": "String"
},
{
- "type": "string",
- "format": "uri",
- "example": "https://www.soundjay.com/ambient/sounds/people-in-lounge-1.mp3"
- }
- ]
- },
- "hooks": {
- "type": "array",
- "description": "This is a set of actions that will be performed on certain events.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/CallHookCallEnding",
- "title": "CallHookCallEnding"
- },
- {
- "$ref": "#/components/schemas/CallHookAssistantSpeechInterrupted",
- "title": "CallHookAssistantSpeechInterrupted"
- },
- {
- "$ref": "#/components/schemas/CallHookCustomerSpeechInterrupted",
- "title": "CallHookCustomerSpeechInterrupted"
- },
- {
- "$ref": "#/components/schemas/CallHookCustomerSpeechTimeout",
- "title": "CallHookCustomerSpeechTimeout"
- }
- ]
- }
- },
- "credentials": {
- "type": "array",
- "description": "These are dynamic credentials that will be used for the workflow calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/CreateAnthropicCredentialDTO",
- "title": "AnthropicCredential"
- },
- {
- "$ref": "#/components/schemas/CreateAnyscaleCredentialDTO",
- "title": "AnyscaleCredential"
- },
- {
- "$ref": "#/components/schemas/CreateAssemblyAICredentialDTO",
- "title": "AssemblyAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateAzureCredentialDTO",
- "title": "AzureCredential"
- },
- {
- "$ref": "#/components/schemas/CreateAzureOpenAICredentialDTO",
- "title": "AzureOpenAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
- "title": "ByoSipTrunkCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCartesiaCredentialDTO",
- "title": "CartesiaCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCerebrasCredentialDTO",
- "title": "CerebrasCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCloudflareCredentialDTO",
- "title": "CloudflareCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCustomLLMCredentialDTO",
- "title": "CustomLLMCredential"
- },
- {
- "$ref": "#/components/schemas/CreateDeepgramCredentialDTO",
- "title": "DeepgramCredential"
- },
- {
- "$ref": "#/components/schemas/CreateDeepInfraCredentialDTO",
- "title": "DeepInfraCredential"
- },
- {
- "$ref": "#/components/schemas/CreateDeepSeekCredentialDTO",
- "title": "DeepSeekCredential"
- },
- {
- "$ref": "#/components/schemas/CreateElevenLabsCredentialDTO",
- "title": "ElevenLabsCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGcpCredentialDTO",
- "title": "GcpCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGladiaCredentialDTO",
- "title": "GladiaCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCredentialDTO",
- "title": "GhlCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCredentialDTO",
- "title": "GoogleCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGroqCredentialDTO",
- "title": "GroqCredential"
- },
- {
- "$ref": "#/components/schemas/CreateHumeCredentialDTO",
- "title": "HumeCredential"
- },
- {
- "$ref": "#/components/schemas/CreateInflectionAICredentialDTO",
- "title": "InflectionAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateLangfuseCredentialDTO",
- "title": "LangfuseCredential"
- },
- {
- "$ref": "#/components/schemas/CreateLmntCredentialDTO",
- "title": "LmntCredential"
- },
- {
- "$ref": "#/components/schemas/CreateMakeCredentialDTO",
- "title": "MakeCredential"
- },
- {
- "$ref": "#/components/schemas/CreateMistralCredentialDTO",
- "title": "MistralCredential"
- },
- {
- "$ref": "#/components/schemas/CreateNeuphonicCredentialDTO",
- "title": "NeuphonicCredential"
- },
- {
- "$ref": "#/components/schemas/CreateOpenAICredentialDTO",
- "title": "OpenAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateOpenRouterCredentialDTO",
- "title": "OpenRouterCredential"
- },
- {
- "$ref": "#/components/schemas/CreatePerplexityAICredentialDTO",
- "title": "PerplexityAICredential"
- },
- {
- "$ref": "#/components/schemas/CreatePlayHTCredentialDTO",
- "title": "PlayHTCredential"
- },
- {
- "$ref": "#/components/schemas/CreateRimeAICredentialDTO",
- "title": "RimeAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateRunpodCredentialDTO",
- "title": "RunpodCredential"
- },
- {
- "$ref": "#/components/schemas/CreateS3CredentialDTO",
- "title": "S3Credential"
- },
- {
- "$ref": "#/components/schemas/CreateSmallestAICredentialDTO",
- "title": "SmallestAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
- "title": "SpeechmaticsCredential"
- },
- {
- "$ref": "#/components/schemas/CreateSupabaseCredentialDTO",
- "title": "SupabaseCredential"
- },
- {
- "$ref": "#/components/schemas/CreateTavusCredentialDTO",
- "title": "TavusCredential"
- },
- {
- "$ref": "#/components/schemas/CreateTogetherAICredentialDTO",
- "title": "TogetherAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateTrieveCredentialDTO",
- "title": "TrieveCredential"
- },
- {
- "$ref": "#/components/schemas/CreateTwilioCredentialDTO",
- "title": "TwilioCredential"
- },
- {
- "$ref": "#/components/schemas/CreateVonageCredentialDTO",
- "title": "VonageCredential"
- },
- {
- "$ref": "#/components/schemas/CreateWebhookCredentialDTO",
- "title": "WebhookCredential"
- },
- {
- "$ref": "#/components/schemas/CreateXAiCredentialDTO",
- "title": "XAiCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
- "title": "GoogleCalendarOAuth2ClientCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
- "title": "GoogleCalendarOAuth2AuthorizationCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
- "title": "GoogleSheetsOAuth2AuthorizationCredential"
- },
- {
- "$ref": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
- "title": "SlackOAuth2AuthorizationCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
- "title": "GoHighLevelMCPCredential"
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/SystemMessage",
+ "title": "SystemMessage"
+ },
+ {
+ "$ref": "#/components/schemas/UserMessage",
+ "title": "UserMessage"
+ },
+ {
+ "$ref": "#/components/schemas/AssistantMessage",
+ "title": "AssistantMessage"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessage",
+ "title": "ToolMessage"
+ },
+ {
+ "$ref": "#/components/schemas/DeveloperMessage",
+ "title": "DeveloperMessage"
+ }
+ ]
},
+ "title": "MessageArray"
+ }
+ ],
+ "examples": [
+ "Hello, how can you help me?",
+ [
{
- "$ref": "#/components/schemas/CreateInworldCredentialDTO",
- "title": "InworldCredential"
- }
- ],
- "discriminator": {
- "propertyName": "provider",
- "mapping": {
- "11labs": "#/components/schemas/CreateElevenLabsCredentialDTO",
- "anthropic": "#/components/schemas/CreateAnthropicCredentialDTO",
- "anyscale": "#/components/schemas/CreateAnyscaleCredentialDTO",
- "assembly-ai": "#/components/schemas/CreateAssemblyAICredentialDTO",
- "azure-openai": "#/components/schemas/CreateAzureOpenAICredentialDTO",
- "azure": "#/components/schemas/CreateAzureCredentialDTO",
- "byo-sip-trunk": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
- "cartesia": "#/components/schemas/CreateCartesiaCredentialDTO",
- "cerebras": "#/components/schemas/CreateCerebrasCredentialDTO",
- "cloudflare": "#/components/schemas/CreateCloudflareCredentialDTO",
- "custom-llm": "#/components/schemas/CreateCustomLLMCredentialDTO",
- "deepgram": "#/components/schemas/CreateDeepgramCredentialDTO",
- "deepinfra": "#/components/schemas/CreateDeepInfraCredentialDTO",
- "deep-seek": "#/components/schemas/CreateDeepSeekCredentialDTO",
- "gcp": "#/components/schemas/CreateGcpCredentialDTO",
- "gladia": "#/components/schemas/CreateGladiaCredentialDTO",
- "gohighlevel": "#/components/schemas/CreateGoHighLevelCredentialDTO",
- "google": "#/components/schemas/CreateGoogleCredentialDTO",
- "groq": "#/components/schemas/CreateGroqCredentialDTO",
- "inflection-ai": "#/components/schemas/CreateInflectionAICredentialDTO",
- "langfuse": "#/components/schemas/CreateLangfuseCredentialDTO",
- "lmnt": "#/components/schemas/CreateLmntCredentialDTO",
- "make": "#/components/schemas/CreateMakeCredentialDTO",
- "openai": "#/components/schemas/CreateOpenAICredentialDTO",
- "openrouter": "#/components/schemas/CreateOpenRouterCredentialDTO",
- "perplexity-ai": "#/components/schemas/CreatePerplexityAICredentialDTO",
- "playht": "#/components/schemas/CreatePlayHTCredentialDTO",
- "rime-ai": "#/components/schemas/CreateRimeAICredentialDTO",
- "runpod": "#/components/schemas/CreateRunpodCredentialDTO",
- "s3": "#/components/schemas/CreateS3CredentialDTO",
- "supabase": "#/components/schemas/CreateSupabaseCredentialDTO",
- "smallest-ai": "#/components/schemas/CreateSmallestAICredentialDTO",
- "tavus": "#/components/schemas/CreateTavusCredentialDTO",
- "together-ai": "#/components/schemas/CreateTogetherAICredentialDTO",
- "twilio": "#/components/schemas/CreateTwilioCredentialDTO",
- "vonage": "#/components/schemas/CreateVonageCredentialDTO",
- "webhook": "#/components/schemas/CreateWebhookCredentialDTO",
- "xai": "#/components/schemas/CreateXAiCredentialDTO",
- "neuphonic": "#/components/schemas/CreateNeuphonicCredentialDTO",
- "hume": "#/components/schemas/CreateHumeCredentialDTO",
- "mistral": "#/components/schemas/CreateMistralCredentialDTO",
- "speechmatics": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
- "trieve": "#/components/schemas/CreateTrieveCredentialDTO",
- "google.calendar.oauth2-client": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
- "google.calendar.oauth2-authorization": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
- "google.sheets.oauth2-authorization": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
- "slack.oauth2-authorization": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
- "ghl.oauth2-authorization": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
- "inworld": "#/components/schemas/CreateInworldCredentialDTO",
- "minimax": "#/components/schemas/CreateMinimaxCredentialDTO"
+ "role": "user",
+ "content": "Hello, how can you help me?"
}
- }
- }
+ ]
+ ]
},
- "name": {
+ "stream": {
+ "type": "boolean",
+ "description": "Whether to stream the response or not.",
+ "default": true
+ },
+ "previousChatId": {
"type": "string",
- "maxLength": 80
+ "description": "This is the ID of the chat that will be used as context for the new chat.\nThe messages from the previous chat will be used as context.\nMutually exclusive with sessionId."
},
- "edges": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/Edge"
+ "transport": {
+ "description": "This is used to send the chat through a transport like SMS.\nIf transport.phoneNumberId and transport.customer are provided, creates a new session.\nIf sessionId is provided without transport fields, uses existing session data.\nCannot specify both sessionId and transport fields (phoneNumberId/customer) together.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/TwilioSMSChatTransport"
+ }
+ ]
+ }
+ },
+ "required": [
+ "input"
+ ]
+ },
+ "ChatAssistantOverrides": {
+ "type": "object",
+ "properties": {
+ "variableValues": {
+ "type": "object",
+ "description": "Variable values for template substitution",
+ "example": {
+ "name": "John",
+ "company": "ACME Corp"
}
+ }
+ }
+ },
+ "CreateWebCustomerDTO": {
+ "type": "object",
+ "properties": {
+ "numberE164CheckEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle the E164 check for the `number` field. This is an advanced property which should be used if you know your use case requires it.\n\nUse cases:\n- `false`: To allow non-E164 numbers like `+001234567890`, `1234`, or `abc`. This is useful for dialing out to non-E164 numbers on your SIP trunks.\n- `true` (default): To allow only E164 numbers like `+14155551234`. This is standard for PSTN calls.\n\nIf `false`, the `number` is still required to only contain alphanumeric characters (regex: `/^\\+?[a-zA-Z0-9]+$/`).\n\n@default true (E164 check is enabled)",
+ "default": true
},
- "globalPrompt": {
+ "extension": {
"type": "string",
- "maxLength": 5000
+ "description": "This is the extension that will be dialed after the call is answered.",
+ "maxLength": 10,
+ "example": null
},
- "server": {
- "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. tool.server\n2. workflow.server / assistant.server\n3. phoneNumber.server\n4. org.server",
+ "assistantOverrides": {
+ "description": "These are the variable values that will be used to replace template variables in the assistant messages.\nOnly variable substitution is supported in web chat - other assistant properties cannot be overridden.",
"allOf": [
{
- "$ref": "#/components/schemas/Server"
+ "$ref": "#/components/schemas/ChatAssistantOverrides"
}
]
},
- "compliancePlan": {
- "description": "This is the compliance plan for the workflow. It allows you to configure HIPAA and other compliance settings.",
+ "number": {
+ "type": "string",
+ "description": "This is the number of the customer.",
+ "minLength": 3,
+ "maxLength": 40
+ },
+ "sipUri": {
+ "type": "string",
+ "description": "This is the SIP URI of the customer."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the customer. This is just for your own reference.\n\nFor SIP inbound calls, this is extracted from the `From` SIP header with format `\"Display Name\" `.",
+ "maxLength": 40
+ },
+ "email": {
+ "type": "string",
+ "description": "This is the email of the customer.",
+ "maxLength": 40
+ },
+ "externalId": {
+ "type": "string",
+ "description": "This is the external ID of the customer.",
+ "maxLength": 40
+ }
+ }
+ },
+ "CreateWebChatDTO": {
+ "type": "object",
+ "properties": {
+ "assistantId": {
+ "type": "string",
+ "description": "This is the assistant ID to use for this chat. To use a transient assistant, use `assistant` instead."
+ },
+ "assistant": {
+ "description": "This is the transient assistant configuration for this chat. To use an existing assistant, use `assistantId` instead.",
"allOf": [
{
- "$ref": "#/components/schemas/CompliancePlan"
+ "$ref": "#/components/schemas/CreateAssistantDTO"
}
]
},
- "analysisPlan": {
- "description": "This is the plan for analysis of workflow's calls. Stored in `call.analysis`.",
+ "sessionId": {
+ "type": "string",
+ "description": "This is the ID of the session that will be used for the chat.\nIf provided, the conversation will continue from the previous state.\nIf not provided or expired, a new session will be created."
+ },
+ "sessionExpirationSeconds": {
+ "type": "number",
+ "description": "This is the expiration time for the session. This can ONLY be set if starting a new chat and therefore a new session is created.\nIf session already exists, this will be ignored and NOT be updated for the existing session. Use PATCH /session/:id to update the session expiration time.",
+ "minimum": 60,
+ "maximum": 2592000
+ },
+ "assistantOverrides": {
+ "description": "These are the variable values that will be used to replace template variables in the assistant messages.\nOnly variable substitution is supported in web chat - other assistant properties cannot be overridden.",
"allOf": [
{
- "$ref": "#/components/schemas/AnalysisPlan"
+ "$ref": "#/components/schemas/ChatAssistantOverrides"
}
]
},
- "artifactPlan": {
- "description": "This is the plan for artifacts generated during workflow's calls. Stored in `call.artifact`.",
+ "customer": {
+ "description": "This is the customer information for the chat.\nUsed to automatically manage sessions for repeat customers.",
"allOf": [
{
- "$ref": "#/components/schemas/ArtifactPlan"
+ "$ref": "#/components/schemas/CreateWebCustomerDTO"
}
]
},
- "startSpeakingPlan": {
- "description": "This is the plan for when the workflow nodes should start talking.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to start talking after the customer is done speaking.\n- The assistant is too fast to start talking after the customer is done speaking.\n- The assistant is so fast that it's actually interrupting the customer.",
- "allOf": [
+ "input": {
+ "description": "This is the input text for the chat.\nCan be a string or an array of chat messages.",
+ "oneOf": [
{
- "$ref": "#/components/schemas/StartSpeakingPlan"
+ "type": "string",
+ "title": "String"
+ },
+ {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/SystemMessage",
+ "title": "SystemMessage"
+ },
+ {
+ "$ref": "#/components/schemas/UserMessage",
+ "title": "UserMessage"
+ },
+ {
+ "$ref": "#/components/schemas/AssistantMessage",
+ "title": "AssistantMessage"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessage",
+ "title": "ToolMessage"
+ },
+ {
+ "$ref": "#/components/schemas/DeveloperMessage",
+ "title": "DeveloperMessage"
+ }
+ ]
+ },
+ "title": "MessageArray"
}
+ ],
+ "examples": [
+ "Hello, how can you help me?",
+ [
+ {
+ "role": "user",
+ "content": "Hello, how can you help me?"
+ }
+ ]
]
},
- "stopSpeakingPlan": {
- "description": "This is the plan for when workflow nodes should stop talking on customer interruption.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to recognize customer's interruption.\n- The assistant is too fast to recognize customer's interruption.\n- The assistant is getting interrupted by phrases that are just acknowledgments.\n- The assistant is getting interrupted by background noises.\n- The assistant is not properly stopping -- it starts talking right after getting interrupted.",
+ "stream": {
+ "type": "boolean",
+ "description": "This is a flag that determines whether the response should be streamed.\nWhen true, the response will be sent as chunks of text.",
+ "default": false
+ },
+ "sessionEnd": {
+ "type": "boolean",
+ "description": "This is a flag to indicate end of session. When true, the session will be marked as completed and the chat will be ended.\nUsed to end session to send End-of-session report to the customer.\nWhen flag is set to true, any messages sent will not be processed and session will directly be marked as completed.",
+ "default": false
+ }
+ },
+ "required": [
+ "input"
+ ]
+ },
+ "WebChat": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the chat."
+ },
+ "sessionId": {
+ "type": "string",
+ "description": "This is the ID of the session for the chat. Send it in the next chat request to continue the conversation."
+ },
+ "output": {
+ "type": "array",
+ "description": "This is the output messages generated by the system in response to the input.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/SystemMessage",
+ "title": "SystemMessage"
+ },
+ {
+ "$ref": "#/components/schemas/UserMessage",
+ "title": "UserMessage"
+ },
+ {
+ "$ref": "#/components/schemas/AssistantMessage",
+ "title": "AssistantMessage"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessage",
+ "title": "ToolMessage"
+ },
+ {
+ "$ref": "#/components/schemas/DeveloperMessage",
+ "title": "DeveloperMessage"
+ }
+ ]
+ }
+ }
+ },
+ "required": [
+ "id",
+ "output"
+ ]
+ },
+ "OpenAIWebChatRequest": {
+ "type": "object",
+ "properties": {
+ "assistantId": {
+ "type": "string",
+ "description": "This is the assistant ID to use for this chat. To use a transient assistant, use `assistant` instead."
+ },
+ "assistant": {
+ "description": "This is the transient assistant configuration for this chat. To use an existing assistant, use `assistantId` instead.",
"allOf": [
{
- "$ref": "#/components/schemas/StopSpeakingPlan"
+ "$ref": "#/components/schemas/CreateAssistantDTO"
}
]
},
- "monitorPlan": {
- "description": "This is the plan for real-time monitoring of the workflow's calls.\n\nUsage:\n- To enable live listening of the workflow's calls, set `monitorPlan.listenEnabled` to `true`.\n- To enable live control of the workflow's calls, set `monitorPlan.controlEnabled` to `true`.",
+ "sessionId": {
+ "type": "string",
+ "description": "This is the ID of the session that will be used for the chat.\nIf provided, the conversation will continue from the previous state.\nIf not provided or expired, a new session will be created."
+ },
+ "sessionExpirationSeconds": {
+ "type": "number",
+ "description": "This is the expiration time for the session. This can ONLY be set if starting a new chat and therefore a new session is created.\nIf session already exists, this will be ignored and NOT be updated for the existing session. Use PATCH /session/:id to update the session expiration time.",
+ "minimum": 60,
+ "maximum": 2592000
+ },
+ "assistantOverrides": {
+ "description": "These are the variable values that will be used to replace template variables in the assistant messages.\nOnly variable substitution is supported in web chat - other assistant properties cannot be overridden.",
"allOf": [
{
- "$ref": "#/components/schemas/MonitorPlan"
+ "$ref": "#/components/schemas/ChatAssistantOverrides"
}
]
},
- "backgroundSpeechDenoisingPlan": {
- "description": "This enables filtering of noise and background speech while the user is talking.\n\nFeatures:\n- Smart denoising using Krisp\n- Fourier denoising\n\nBoth can be used together. Order of precedence:\n- Smart denoising\n- Fourier denoising",
+ "customer": {
+ "description": "This is the customer information for the chat.\nUsed to automatically manage sessions for repeat customers.",
"allOf": [
{
- "$ref": "#/components/schemas/BackgroundSpeechDenoisingPlan"
+ "$ref": "#/components/schemas/CreateWebCustomerDTO"
}
]
},
- "credentialIds": {
- "description": "These are the credentials that will be used for the workflow calls. By default, all the credentials are available for use in the call but you can provide a subset using this.",
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "keypadInputPlan": {
- "description": "This is the plan for keypad input handling during workflow calls.",
- "allOf": [
+ "input": {
+ "description": "This is the input text for the chat.\nCan be a string or an array of chat messages.",
+ "oneOf": [
{
- "$ref": "#/components/schemas/KeypadInputPlan"
+ "type": "string",
+ "title": "String"
+ },
+ {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/SystemMessage",
+ "title": "SystemMessage"
+ },
+ {
+ "$ref": "#/components/schemas/UserMessage",
+ "title": "UserMessage"
+ },
+ {
+ "$ref": "#/components/schemas/AssistantMessage",
+ "title": "AssistantMessage"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessage",
+ "title": "ToolMessage"
+ },
+ {
+ "$ref": "#/components/schemas/DeveloperMessage",
+ "title": "DeveloperMessage"
+ }
+ ]
+ },
+ "title": "MessageArray"
}
+ ],
+ "examples": [
+ "Hello, how can you help me?",
+ [
+ {
+ "role": "user",
+ "content": "Hello, how can you help me?"
+ }
+ ]
]
+ },
+ "stream": {
+ "type": "boolean",
+ "description": "Whether to stream the response or not.",
+ "default": true
+ },
+ "sessionEnd": {
+ "type": "boolean",
+ "description": "This is a flag to indicate end of session. When true, the session will be marked as completed and the chat will be ended.\nUsed to end session to send End-of-session report to the customer.\nWhen flag is set to true, any messages sent will not be processed and session will directly be marked as completed.",
+ "default": false
}
},
"required": [
- "nodes",
- "name",
- "edges"
+ "input"
]
},
- "WorkflowOverrides": {
+ "ExportChatDTO": {
"type": "object",
"properties": {
- "variableValues": {
- "type": "object",
- "description": "These are values that will be used to replace the template variables in the workflow messages and other text-based fields.\nThis uses LiquidJS syntax. https://liquidjs.com/tutorials/intro-to-liquid.html\n\nSo for example, `{{ name }}` will be replaced with the value of `name` in `variableValues`.\n`{{\"now\" | date: \"%b %d, %Y, %I:%M %p\", \"America/New_York\"}}` will be replaced with the current date and time in New York.\n Some VAPI reserved defaults:\n - *customer* - the customer object"
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the chat to filter by."
+ },
+ "assistantId": {
+ "type": "string",
+ "description": "This is the unique identifier for the assistant that will be used for the chat."
+ },
+ "assistantIdAny": {
+ "type": "string",
+ "description": "Filter by multiple assistant IDs. Provide as comma-separated values.",
+ "example": "assistant-1,assistant-2,assistant-3"
+ },
+ "squadId": {
+ "type": "string",
+ "description": "This is the unique identifier for the squad that will be used for the chat."
+ },
+ "sessionId": {
+ "type": "string",
+ "description": "This is the unique identifier for the session that will be used for the chat."
+ },
+ "previousChatId": {
+ "type": "string",
+ "description": "This is the unique identifier for the previous chat to filter by."
+ },
+ "columns": {
+ "type": "string",
+ "description": "Columns to include in the CSV export",
+ "enum": [
+ "id",
+ "assistantId",
+ "squadId",
+ "sessionId",
+ "previousChatId",
+ "cost",
+ "messages",
+ "output",
+ "createdAt",
+ "updatedAt"
+ ],
+ "default": [
+ "id",
+ "assistantId",
+ "squadId",
+ "sessionId",
+ "previousChatId",
+ "cost",
+ "messages",
+ "output",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "email": {
+ "type": "string",
+ "description": "This is the email address to send the export to.\nRequired if userId is not available in the request context."
+ },
+ "format": {
+ "type": "string",
+ "description": "This is the format of the export.\n\n@default csv",
+ "enum": [
+ "csv",
+ "json"
+ ],
+ "default": "csv"
+ },
+ "page": {
+ "type": "number",
+ "description": "This is the page number to return. Defaults to 1.",
+ "minimum": 1
+ },
+ "sortOrder": {
+ "type": "string",
+ "description": "This is the sort order for pagination. Defaults to 'DESC'.",
+ "enum": [
+ "ASC",
+ "DESC"
+ ]
+ },
+ "limit": {
+ "type": "number",
+ "description": "This is the maximum number of items to return. Defaults to 100.",
+ "minimum": 0,
+ "maximum": 1000
+ },
+ "createdAtGt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the createdAt is greater than the specified value."
+ },
+ "createdAtLt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the createdAt is less than the specified value."
+ },
+ "createdAtGe": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the createdAt is greater than or equal to the specified value."
+ },
+ "createdAtLe": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the createdAt is less than or equal to the specified value."
+ },
+ "updatedAtGt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the updatedAt is greater than the specified value."
+ },
+ "updatedAtLt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the updatedAt is less than the specified value."
+ },
+ "updatedAtGe": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the updatedAt is greater than or equal to the specified value."
+ },
+ "updatedAtLe": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the updatedAt is less than or equal to the specified value."
}
}
},
- "TransferPhoneNumberHookAction": {
+ "ResponseOutputText": {
"type": "object",
"properties": {
- "type": {
- "type": "string",
- "description": "This is the type of action - must be \"transfer\"",
- "enum": [
- "transfer"
- ]
+ "annotations": {
+ "default": [],
+ "description": "Annotations in the text output",
+ "type": "array",
+ "items": {
+ "type": "object"
+ }
},
- "destination": {
- "description": "This is the destination details for the transfer - can be a phone number or SIP URI",
- "oneOf": [
- {
- "$ref": "#/components/schemas/TransferDestinationNumber",
- "title": "NumberTransferDestination"
- },
- {
- "$ref": "#/components/schemas/TransferDestinationSip",
- "title": "SipTransferDestination"
- }
+ "text": {
+ "type": "string",
+ "description": "The text output from the model"
+ },
+ "type": {
+ "type": "string",
+ "default": "output_text",
+ "description": "The type of the output text",
+ "enum": [
+ "output_text"
]
}
},
"required": [
+ "annotations",
+ "text",
"type"
]
},
- "SayPhoneNumberHookAction": {
+ "ResponseOutputMessage": {
"type": "object",
"properties": {
- "type": {
+ "id": {
"type": "string",
- "description": "This is the type of action - must be \"say\"",
+ "description": "The unique ID of the output message"
+ },
+ "content": {
+ "description": "Content of the output message",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/ResponseOutputText"
+ }
+ },
+ "role": {
+ "type": "string",
+ "default": "assistant",
+ "description": "The role of the output message",
"enum": [
- "say"
+ "assistant"
]
},
- "exact": {
+ "status": {
"type": "string",
- "description": "This is the message to say",
- "maxLength": 4000
+ "description": "The status of the message",
+ "enum": [
+ "in_progress",
+ "completed",
+ "incomplete"
+ ]
+ },
+ "type": {
+ "type": "string",
+ "default": "message",
+ "description": "The type of the output message",
+ "enum": [
+ "message"
+ ]
}
},
"required": [
- "type",
- "exact"
+ "id",
+ "content",
+ "role",
+ "status",
+ "type"
]
},
- "PhoneNumberHookCallRinging": {
+ "ResponseObject": {
"type": "object",
"properties": {
- "on": {
+ "id": {
"type": "string",
- "description": "This is the event to trigger the hook on",
+ "description": "Unique identifier for this Response"
+ },
+ "object": {
+ "type": "string",
+ "default": "response",
+ "description": "The object type",
"enum": [
- "call.ringing"
- ],
- "maxLength": 1000
+ "response"
+ ]
},
- "do": {
+ "created_at": {
+ "type": "number",
+ "description": "Unix timestamp (in seconds) of when this Response was created"
+ },
+ "status": {
+ "type": "string",
+ "description": "Status of the response",
+ "enum": [
+ "completed",
+ "failed",
+ "in_progress",
+ "incomplete"
+ ]
+ },
+ "error": {
+ "type": "string",
+ "nullable": true,
+ "default": null,
+ "description": "Error message if the response failed"
+ },
+ "output": {
+ "description": "Output messages from the model",
"type": "array",
- "description": "Only the first action will be executed. Additional actions will be ignored.",
"items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/TransferPhoneNumberHookAction",
- "title": "TransferPhoneNumberHookAction"
- },
- {
- "$ref": "#/components/schemas/SayPhoneNumberHookAction",
- "title": "SayPhoneNumberHookAction"
- }
- ]
+ "$ref": "#/components/schemas/ResponseOutputMessage"
}
}
},
"required": [
- "on",
- "do"
+ "id",
+ "object",
+ "created_at",
+ "status",
+ "output"
]
},
- "PhoneNumberCallEndingHookFilter": {
+ "ResponseTextDeltaEvent": {
"type": "object",
"properties": {
- "type": {
+ "content_index": {
+ "type": "number",
+ "description": "Index of the content part"
+ },
+ "delta": {
"type": "string",
- "description": "This is the type of filter - currently only \"oneOf\" is supported",
- "enum": [
- "oneOf"
- ],
- "maxLength": 1000
+ "description": "Text delta being added"
},
- "key": {
+ "item_id": {
"type": "string",
- "description": "This is the key to filter on - only \"call.endedReason\" is allowed for phone number call ending hooks",
- "enum": [
- "call.endedReason"
- ],
- "maxLength": 1000
+ "description": "ID of the output item"
},
- "oneOf": {
- "type": "array",
- "description": "This is the array of assistant-request related ended reasons to match against",
+ "output_index": {
+ "type": "number",
+ "description": "Index of the output item"
+ },
+ "type": {
+ "type": "string",
+ "default": "response.output_text.delta",
+ "description": "Event type",
"enum": [
- "assistant-request-failed",
- "assistant-request-returned-error",
- "assistant-request-returned-unspeakable-error",
- "assistant-request-returned-invalid-assistant",
- "assistant-request-returned-no-assistant",
- "assistant-request-returned-forwarding-phone-number"
- ],
- "items": {
- "type": "string",
- "enum": [
- "assistant-request-failed",
- "assistant-request-returned-error",
- "assistant-request-returned-unspeakable-error",
- "assistant-request-returned-invalid-assistant",
- "assistant-request-returned-no-assistant",
- "assistant-request-returned-forwarding-phone-number"
- ]
- }
+ "response.output_text.delta"
+ ]
}
},
"required": [
- "type",
- "key",
- "oneOf"
+ "content_index",
+ "delta",
+ "item_id",
+ "output_index",
+ "type"
]
},
- "PhoneNumberHookCallEnding": {
+ "ResponseTextDoneEvent": {
"type": "object",
"properties": {
- "on": {
+ "content_index": {
+ "type": "number",
+ "description": "Index of the content part"
+ },
+ "item_id": {
"type": "string",
- "description": "This is the event to trigger the hook on",
- "enum": [
- "call.ending"
- ],
- "maxLength": 1000
+ "description": "ID of the output item"
},
- "filters": {
- "type": "array",
- "description": "Optional filters to decide when to trigger - restricted to assistant-request related ended reasons",
- "items": {
- "$ref": "#/components/schemas/PhoneNumberCallEndingHookFilter"
- }
+ "output_index": {
+ "type": "number",
+ "description": "Index of the output item"
},
- "do": {
- "description": "This is the action to perform when the hook triggers",
- "oneOf": [
- {
- "$ref": "#/components/schemas/TransferPhoneNumberHookAction",
- "title": "TransferPhoneNumberHookAction"
- },
- {
- "$ref": "#/components/schemas/SayPhoneNumberHookAction",
- "title": "SayPhoneNumberHookAction"
- }
+ "text": {
+ "type": "string",
+ "description": "Complete text content"
+ },
+ "type": {
+ "type": "string",
+ "default": "response.output_text.done",
+ "description": "Event type",
+ "enum": [
+ "response.output_text.done"
]
}
},
"required": [
- "on"
+ "content_index",
+ "item_id",
+ "output_index",
+ "text",
+ "type"
]
},
- "ImportTwilioPhoneNumberDTO": {
+ "ResponseCompletedEvent": {
"type": "object",
"properties": {
- "fallbackDestination": {
- "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/TransferDestinationNumber",
- "title": "NumberTransferDestination"
- },
+ "response": {
+ "description": "The completed response",
+ "allOf": [
{
- "$ref": "#/components/schemas/TransferDestinationSip",
- "title": "SipTransferDestination"
+ "$ref": "#/components/schemas/ResponseObject"
}
]
},
- "hooks": {
- "type": "array",
- "description": "This is the hooks that will be used for incoming calls to this phone number.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
- "title": "PhoneNumberHookCallRinging"
- },
- {
- "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
- "title": "PhoneNumberHookCallEnding"
- }
- ]
- }
- },
- "smsEnabled": {
- "type": "boolean",
- "description": "Controls whether Vapi sets the messaging webhook URL on the Twilio number during import.\n\nIf set to `false`, Vapi will not update the Twilio messaging URL, leaving it as is.\nIf `true` or omitted (default), Vapi will configure both the voice and messaging URLs.\n\n@default true",
- "default": true
- },
- "twilioPhoneNumber": {
- "type": "string",
- "description": "These are the digits of the phone number you own on your Twilio.",
- "deprecated": true
- },
- "twilioAccountSid": {
- "type": "string",
- "description": "This is your Twilio Account SID that will be used to handle this phone number."
- },
- "twilioAuthToken": {
- "type": "string",
- "description": "This is the Twilio Auth Token that will be used to handle this phone number."
- },
- "twilioApiKey": {
- "type": "string",
- "description": "This is the Twilio API Key that will be used to handle this phone number. If AuthToken is provided, this will be ignored."
- },
- "twilioApiSecret": {
- "type": "string",
- "description": "This is the Twilio API Secret that will be used to handle this phone number. If AuthToken is provided, this will be ignored."
- },
- "name": {
- "type": "string",
- "description": "This is the name of the phone number. This is just for your own reference.",
- "maxLength": 40
- },
- "assistantId": {
- "type": "string",
- "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
- },
- "workflowId": {
- "type": "string",
- "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
- },
- "squadId": {
+ "type": {
"type": "string",
- "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
- },
- "server": {
- "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
- "allOf": [
- {
- "$ref": "#/components/schemas/Server"
- }
+ "default": "response.completed",
+ "description": "Event type",
+ "enum": [
+ "response.completed"
]
}
},
"required": [
- "twilioPhoneNumber",
- "twilioAccountSid"
+ "response",
+ "type"
]
},
- "CreateCustomerDTO": {
+ "ResponseErrorEvent": {
"type": "object",
"properties": {
- "numberE164CheckEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle the E164 check for the `number` field. This is an advanced property which should be used if you know your use case requires it.\n\nUse cases:\n- `false`: To allow non-E164 numbers like `+001234567890`, `1234`, or `abc`. This is useful for dialing out to non-E164 numbers on your SIP trunks.\n- `true` (default): To allow only E164 numbers like `+14155551234`. This is standard for PSTN calls.\n\nIf `false`, the `number` is still required to only contain alphanumeric characters (regex: `/^\\+?[a-zA-Z0-9]+$/`).\n\n@default true (E164 check is enabled)",
- "default": true
- },
- "extension": {
+ "type": {
"type": "string",
- "description": "This is the extension that will be dialed after the call is answered.",
- "maxLength": 10,
- "example": null
- },
- "assistantOverrides": {
- "description": "These are the overrides for the assistant's settings and template variables specific to this customer.\nThis allows customization of the assistant's behavior for individual customers in batch calls.",
- "allOf": [
- {
- "$ref": "#/components/schemas/AssistantOverrides"
- }
+ "default": "error",
+ "description": "Event type",
+ "enum": [
+ "error"
]
},
- "number": {
- "type": "string",
- "description": "This is the number of the customer.",
- "minLength": 3,
- "maxLength": 40
- },
- "sipUri": {
+ "code": {
"type": "string",
- "description": "This is the SIP URI of the customer."
+ "description": "Error code",
+ "example": "ERR_SOMETHING"
},
- "name": {
+ "message": {
"type": "string",
- "description": "This is the name of the customer. This is just for your own reference.\n\nFor SIP inbound calls, this is extracted from the `From` SIP header with format `\"Display Name\" `.",
- "maxLength": 40
+ "description": "Error message",
+ "example": "Something went wrong"
},
- "email": {
+ "param": {
"type": "string",
- "description": "This is the email of the customer.",
- "maxLength": 40
+ "nullable": true,
+ "description": "Parameter that caused the error"
},
- "externalId": {
- "type": "string",
- "description": "This is the external ID of the customer.",
- "maxLength": 40
+ "sequence_number": {
+ "type": "number",
+ "description": "Sequence number of the event",
+ "example": 1
}
- }
+ },
+ "required": [
+ "type",
+ "code",
+ "message",
+ "sequence_number"
+ ]
},
- "SchedulePlan": {
+ "DialPlanEntry": {
"type": "object",
"properties": {
- "earliestAt": {
- "format": "date-time",
+ "phoneNumberId": {
"type": "string",
- "description": "This is the ISO 8601 date-time string of the earliest time the call can be scheduled."
+ "description": "The phone number ID to use for calling the customers in this entry."
},
- "latestAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of the latest time the call can be scheduled."
+ "customers": {
+ "description": "The list of customers to call using this phone number.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/CreateCustomerDTO"
+ }
}
},
"required": [
- "earliestAt"
+ "phoneNumberId",
+ "customers"
]
},
- "Call": {
+ "CreateCampaignDTO": {
"type": "object",
"properties": {
- "type": {
+ "name": {
"type": "string",
- "description": "This is the type of call.",
- "enum": [
- "inboundPhoneCall",
- "outboundPhoneCall",
- "webCall",
- "vapi.websocketCall"
- ]
+ "description": "This is the name of the campaign. This is just for your own reference.",
+ "example": "Q2 Sales Campaign"
},
- "costs": {
- "type": "array",
- "description": "These are the costs of individual components of the call in USD.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/TransportCost",
- "title": "TransportCost"
- },
- {
- "$ref": "#/components/schemas/TranscriberCost",
- "title": "TranscriberCost"
- },
- {
- "$ref": "#/components/schemas/ModelCost",
- "title": "ModelCost"
- },
- {
- "$ref": "#/components/schemas/VoiceCost",
- "title": "VoiceCost"
- },
- {
- "$ref": "#/components/schemas/VapiCost",
- "title": "VapiCost"
- },
- {
- "$ref": "#/components/schemas/VoicemailDetectionCost",
- "title": "VoicemailDetectionCost"
- },
- {
- "$ref": "#/components/schemas/AnalysisCost",
- "title": "AnalysisCost"
- },
- {
- "$ref": "#/components/schemas/KnowledgeBaseCost",
- "title": "KnowledgeBaseCost"
- }
- ]
- }
+ "assistantId": {
+ "type": "string",
+ "description": "This is the assistant ID that will be used for the campaign calls. Note: Only one of assistantId, workflowId, or squadId can be used."
},
- "messages": {
- "type": "array",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/UserMessage",
- "title": "UserMessage"
- },
- {
- "$ref": "#/components/schemas/SystemMessage",
- "title": "SystemMessage"
- },
- {
- "$ref": "#/components/schemas/BotMessage",
- "title": "BotMessage"
- },
- {
- "$ref": "#/components/schemas/ToolCallMessage",
- "title": "ToolCallMessage"
- },
- {
- "$ref": "#/components/schemas/ToolCallResultMessage",
- "title": "ToolCallResultMessage"
- }
- ]
- }
+ "workflowId": {
+ "type": "string",
+ "description": "This is the workflow ID that will be used for the campaign calls. Note: Only one of assistantId, workflowId, or squadId can be used."
},
- "phoneCallProvider": {
+ "squadId": {
"type": "string",
- "description": "This is the provider of the call.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type.",
- "deprecated": true,
- "enum": [
- "twilio",
- "vonage",
- "vapi",
- "telnyx"
- ]
+ "description": "This is the squad ID that will be used for the campaign calls. Note: Only one of assistantId, workflowId, or squadId can be used."
},
- "phoneCallTransport": {
+ "phoneNumberId": {
"type": "string",
- "description": "This is the transport of the phone call.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type.",
- "enum": [
- "sip",
- "pstn"
+ "description": "This is the phone number ID that will be used for the campaign calls. Required if dialPlan is not provided. Note: phoneNumberId and dialPlan are mutually exclusive."
+ },
+ "dialPlan": {
+ "description": "This is a list of dial entries, each specifying a phone number and the customers to call using that number. Use this when you want different phone numbers to call different sets of customers. Note: phoneNumberId and dialPlan are mutually exclusive.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/DialPlanEntry"
+ }
+ },
+ "schedulePlan": {
+ "description": "This is the schedule plan for the campaign. Calls will start at startedAt and continue until your organization’s concurrency limit is reached. Any remaining calls will be retried for up to one hour as capacity becomes available. After that hour or after latestAt, whichever comes first, any calls that couldn’t be placed won’t be retried.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/SchedulePlan"
+ }
]
},
+ "customers": {
+ "description": "These are the customers that will be called in the campaign. Required if dialPlan is not provided.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/CreateCustomerDTO"
+ }
+ }
+ },
+ "required": [
+ "name"
+ ]
+ },
+ "Campaign": {
+ "type": "object",
+ "properties": {
"status": {
"type": "string",
- "description": "This is the status of the call.",
+ "description": "This is the status of the campaign.",
"enum": [
"scheduled",
- "queued",
- "ringing",
"in-progress",
- "forwarding",
"ended"
]
},
"endedReason": {
"type": "string",
- "description": "This is the explanation for how the call ended.",
+ "description": "This is the explanation for how the campaign ended.",
"enum": [
- "call-start-error-neither-assistant-nor-server-set",
- "assistant-request-failed",
- "assistant-request-returned-error",
- "assistant-request-returned-unspeakable-error",
- "assistant-request-returned-invalid-assistant",
- "assistant-request-returned-no-assistant",
- "assistant-request-returned-forwarding-phone-number",
- "scheduled-call-deleted",
- "call.start.error-vapifault-get-org",
- "call.start.error-vapifault-get-subscription",
- "call.start.error-get-assistant",
- "call.start.error-get-phone-number",
- "call.start.error-get-customer",
- "call.start.error-get-resources-validation",
- "call.start.error-vapi-number-international",
- "call.start.error-vapi-number-outbound-daily-limit",
- "call.start.error-get-transport",
- "call.start.error-subscription-wallet-does-not-exist",
- "call.start.error-subscription-frozen",
- "call.start.error-subscription-insufficient-credits",
- "call.start.error-subscription-upgrade-failed",
- "call.start.error-subscription-concurrency-limit-reached",
- "assistant-not-valid",
- "database-error",
- "assistant-not-found",
- "pipeline-error-openai-voice-failed",
- "pipeline-error-cartesia-voice-failed",
- "pipeline-error-deepgram-voice-failed",
- "pipeline-error-eleven-labs-voice-failed",
- "pipeline-error-playht-voice-failed",
- "pipeline-error-lmnt-voice-failed",
- "pipeline-error-azure-voice-failed",
- "pipeline-error-rime-ai-voice-failed",
- "pipeline-error-smallest-ai-voice-failed",
- "pipeline-error-neuphonic-voice-failed",
- "pipeline-error-hume-voice-failed",
- "pipeline-error-sesame-voice-failed",
- "pipeline-error-inworld-voice-failed",
- "pipeline-error-minimax-voice-failed",
- "pipeline-error-tavus-video-failed",
- "call.in-progress.error-vapifault-openai-voice-failed",
- "call.in-progress.error-vapifault-cartesia-voice-failed",
- "call.in-progress.error-vapifault-deepgram-voice-failed",
- "call.in-progress.error-vapifault-eleven-labs-voice-failed",
- "call.in-progress.error-vapifault-playht-voice-failed",
- "call.in-progress.error-vapifault-lmnt-voice-failed",
- "call.in-progress.error-vapifault-azure-voice-failed",
- "call.in-progress.error-vapifault-rime-ai-voice-failed",
- "call.in-progress.error-vapifault-smallest-ai-voice-failed",
- "call.in-progress.error-vapifault-neuphonic-voice-failed",
- "call.in-progress.error-vapifault-hume-voice-failed",
- "call.in-progress.error-vapifault-sesame-voice-failed",
- "call.in-progress.error-vapifault-inworld-voice-failed",
- "call.in-progress.error-vapifault-minimax-voice-failed",
- "call.in-progress.error-vapifault-tavus-video-failed",
- "pipeline-error-vapi-llm-failed",
- "pipeline-error-vapi-400-bad-request-validation-failed",
- "pipeline-error-vapi-401-unauthorized",
- "pipeline-error-vapi-403-model-access-denied",
- "pipeline-error-vapi-429-exceeded-quota",
- "pipeline-error-vapi-500-server-error",
- "pipeline-error-vapi-503-server-overloaded-error",
- "call.in-progress.error-vapifault-vapi-llm-failed",
- "call.in-progress.error-vapifault-vapi-400-bad-request-validation-failed",
- "call.in-progress.error-vapifault-vapi-401-unauthorized",
- "call.in-progress.error-vapifault-vapi-403-model-access-denied",
- "call.in-progress.error-vapifault-vapi-429-exceeded-quota",
- "call.in-progress.error-providerfault-vapi-500-server-error",
- "call.in-progress.error-providerfault-vapi-503-server-overloaded-error",
- "pipeline-error-deepgram-transcriber-failed",
- "call.in-progress.error-vapifault-deepgram-transcriber-failed",
- "pipeline-error-gladia-transcriber-failed",
- "call.in-progress.error-vapifault-gladia-transcriber-failed",
- "pipeline-error-speechmatics-transcriber-failed",
- "call.in-progress.error-vapifault-speechmatics-transcriber-failed",
- "pipeline-error-assembly-ai-transcriber-failed",
- "pipeline-error-assembly-ai-returning-400-insufficent-funds",
- "pipeline-error-assembly-ai-returning-400-paid-only-feature",
- "pipeline-error-assembly-ai-returning-401-invalid-credentials",
- "pipeline-error-assembly-ai-returning-500-invalid-schema",
- "pipeline-error-assembly-ai-returning-500-word-boost-parsing-failed",
- "call.in-progress.error-vapifault-assembly-ai-transcriber-failed",
- "call.in-progress.error-vapifault-assembly-ai-returning-400-insufficent-funds",
- "call.in-progress.error-vapifault-assembly-ai-returning-400-paid-only-feature",
- "call.in-progress.error-vapifault-assembly-ai-returning-401-invalid-credentials",
- "call.in-progress.error-vapifault-assembly-ai-returning-500-invalid-schema",
- "call.in-progress.error-vapifault-assembly-ai-returning-500-word-boost-parsing-failed",
- "pipeline-error-talkscriber-transcriber-failed",
- "call.in-progress.error-vapifault-talkscriber-transcriber-failed",
- "pipeline-error-azure-speech-transcriber-failed",
- "call.in-progress.error-vapifault-azure-speech-transcriber-failed",
- "call.in-progress.error-pipeline-no-available-llm-model",
- "worker-shutdown",
- "vonage-disconnected",
- "vonage-failed-to-connect-call",
- "vonage-completed",
- "phone-call-provider-bypass-enabled-but-no-call-received",
- "call.in-progress.error-providerfault-transport-never-connected",
- "call.in-progress.error-vapifault-worker-not-available",
- "call.in-progress.error-vapifault-transport-never-connected",
- "call.in-progress.error-vapifault-transport-connected-but-call-not-active",
- "call.in-progress.error-vapifault-call-started-but-connection-to-transport-missing",
- "call.in-progress.error-vapifault-worker-died",
- "call.in-progress.twilio-completed-call",
- "call.in-progress.sip-completed-call",
- "call.in-progress.error-vapifault-openai-llm-failed",
- "call.in-progress.error-vapifault-azure-openai-llm-failed",
- "call.in-progress.error-vapifault-groq-llm-failed",
- "call.in-progress.error-vapifault-google-llm-failed",
- "call.in-progress.error-vapifault-xai-llm-failed",
- "call.in-progress.error-vapifault-mistral-llm-failed",
- "call.in-progress.error-vapifault-inflection-ai-llm-failed",
- "call.in-progress.error-vapifault-cerebras-llm-failed",
- "call.in-progress.error-vapifault-deep-seek-llm-failed",
- "call.in-progress.error-vapifault-chat-pipeline-failed-to-start",
- "pipeline-error-openai-400-bad-request-validation-failed",
- "pipeline-error-openai-401-unauthorized",
- "pipeline-error-openai-401-incorrect-api-key",
- "pipeline-error-openai-401-account-not-in-organization",
- "pipeline-error-openai-403-model-access-denied",
- "pipeline-error-openai-429-exceeded-quota",
- "pipeline-error-openai-429-rate-limit-reached",
- "pipeline-error-openai-500-server-error",
- "pipeline-error-openai-503-server-overloaded-error",
- "pipeline-error-openai-llm-failed",
- "call.in-progress.error-vapifault-openai-400-bad-request-validation-failed",
- "call.in-progress.error-vapifault-openai-401-unauthorized",
- "call.in-progress.error-vapifault-openai-401-incorrect-api-key",
- "call.in-progress.error-vapifault-openai-401-account-not-in-organization",
- "call.in-progress.error-vapifault-openai-403-model-access-denied",
- "call.in-progress.error-vapifault-openai-429-exceeded-quota",
- "call.in-progress.error-vapifault-openai-429-rate-limit-reached",
- "call.in-progress.error-providerfault-openai-500-server-error",
- "call.in-progress.error-providerfault-openai-503-server-overloaded-error",
- "pipeline-error-azure-openai-400-bad-request-validation-failed",
- "pipeline-error-azure-openai-401-unauthorized",
- "pipeline-error-azure-openai-403-model-access-denied",
- "pipeline-error-azure-openai-429-exceeded-quota",
- "pipeline-error-azure-openai-500-server-error",
- "pipeline-error-azure-openai-503-server-overloaded-error",
- "pipeline-error-azure-openai-llm-failed",
- "call.in-progress.error-vapifault-azure-openai-400-bad-request-validation-failed",
- "call.in-progress.error-vapifault-azure-openai-401-unauthorized",
- "call.in-progress.error-vapifault-azure-openai-403-model-access-denied",
- "call.in-progress.error-vapifault-azure-openai-429-exceeded-quota",
- "call.in-progress.error-providerfault-azure-openai-500-server-error",
- "call.in-progress.error-providerfault-azure-openai-503-server-overloaded-error",
- "pipeline-error-google-400-bad-request-validation-failed",
- "pipeline-error-google-401-unauthorized",
- "pipeline-error-google-403-model-access-denied",
- "pipeline-error-google-429-exceeded-quota",
- "pipeline-error-google-500-server-error",
- "pipeline-error-google-503-server-overloaded-error",
- "pipeline-error-google-llm-failed",
- "call.in-progress.error-vapifault-google-400-bad-request-validation-failed",
- "call.in-progress.error-vapifault-google-401-unauthorized",
- "call.in-progress.error-vapifault-google-403-model-access-denied",
- "call.in-progress.error-vapifault-google-429-exceeded-quota",
- "call.in-progress.error-providerfault-google-500-server-error",
- "call.in-progress.error-providerfault-google-503-server-overloaded-error",
- "pipeline-error-xai-400-bad-request-validation-failed",
- "pipeline-error-xai-401-unauthorized",
- "pipeline-error-xai-403-model-access-denied",
- "pipeline-error-xai-429-exceeded-quota",
- "pipeline-error-xai-500-server-error",
- "pipeline-error-xai-503-server-overloaded-error",
- "pipeline-error-xai-llm-failed",
- "call.in-progress.error-vapifault-xai-400-bad-request-validation-failed",
- "call.in-progress.error-vapifault-xai-401-unauthorized",
- "call.in-progress.error-vapifault-xai-403-model-access-denied",
- "call.in-progress.error-vapifault-xai-429-exceeded-quota",
- "call.in-progress.error-providerfault-xai-500-server-error",
- "call.in-progress.error-providerfault-xai-503-server-overloaded-error",
- "pipeline-error-mistral-400-bad-request-validation-failed",
- "pipeline-error-mistral-401-unauthorized",
- "pipeline-error-mistral-403-model-access-denied",
- "pipeline-error-mistral-429-exceeded-quota",
- "pipeline-error-mistral-500-server-error",
- "pipeline-error-mistral-503-server-overloaded-error",
- "pipeline-error-mistral-llm-failed",
- "call.in-progress.error-vapifault-mistral-400-bad-request-validation-failed",
- "call.in-progress.error-vapifault-mistral-401-unauthorized",
- "call.in-progress.error-vapifault-mistral-403-model-access-denied",
- "call.in-progress.error-vapifault-mistral-429-exceeded-quota",
- "call.in-progress.error-providerfault-mistral-500-server-error",
- "call.in-progress.error-providerfault-mistral-503-server-overloaded-error",
- "pipeline-error-inflection-ai-400-bad-request-validation-failed",
- "pipeline-error-inflection-ai-401-unauthorized",
- "pipeline-error-inflection-ai-403-model-access-denied",
- "pipeline-error-inflection-ai-429-exceeded-quota",
- "pipeline-error-inflection-ai-500-server-error",
- "pipeline-error-inflection-ai-503-server-overloaded-error",
- "pipeline-error-inflection-ai-llm-failed",
- "call.in-progress.error-vapifault-inflection-ai-400-bad-request-validation-failed",
- "call.in-progress.error-vapifault-inflection-ai-401-unauthorized",
- "call.in-progress.error-vapifault-inflection-ai-403-model-access-denied",
- "call.in-progress.error-vapifault-inflection-ai-429-exceeded-quota",
- "call.in-progress.error-providerfault-inflection-ai-500-server-error",
- "call.in-progress.error-providerfault-inflection-ai-503-server-overloaded-error",
- "pipeline-error-deep-seek-400-bad-request-validation-failed",
- "pipeline-error-deep-seek-401-unauthorized",
- "pipeline-error-deep-seek-403-model-access-denied",
- "pipeline-error-deep-seek-429-exceeded-quota",
- "pipeline-error-deep-seek-500-server-error",
- "pipeline-error-deep-seek-503-server-overloaded-error",
- "pipeline-error-deep-seek-llm-failed",
- "call.in-progress.error-vapifault-deep-seek-400-bad-request-validation-failed",
- "call.in-progress.error-vapifault-deep-seek-401-unauthorized",
- "call.in-progress.error-vapifault-deep-seek-403-model-access-denied",
- "call.in-progress.error-vapifault-deep-seek-429-exceeded-quota",
- "call.in-progress.error-providerfault-deep-seek-500-server-error",
- "call.in-progress.error-providerfault-deep-seek-503-server-overloaded-error",
- "pipeline-error-groq-400-bad-request-validation-failed",
- "pipeline-error-groq-401-unauthorized",
- "pipeline-error-groq-403-model-access-denied",
- "pipeline-error-groq-429-exceeded-quota",
- "pipeline-error-groq-500-server-error",
- "pipeline-error-groq-503-server-overloaded-error",
- "pipeline-error-groq-llm-failed",
- "call.in-progress.error-vapifault-groq-400-bad-request-validation-failed",
- "call.in-progress.error-vapifault-groq-401-unauthorized",
- "call.in-progress.error-vapifault-groq-403-model-access-denied",
- "call.in-progress.error-vapifault-groq-429-exceeded-quota",
- "call.in-progress.error-providerfault-groq-500-server-error",
- "call.in-progress.error-providerfault-groq-503-server-overloaded-error",
- "pipeline-error-cerebras-400-bad-request-validation-failed",
- "pipeline-error-cerebras-401-unauthorized",
- "pipeline-error-cerebras-403-model-access-denied",
- "pipeline-error-cerebras-429-exceeded-quota",
- "pipeline-error-cerebras-500-server-error",
- "pipeline-error-cerebras-503-server-overloaded-error",
- "pipeline-error-cerebras-llm-failed",
- "call.in-progress.error-vapifault-cerebras-400-bad-request-validation-failed",
- "call.in-progress.error-vapifault-cerebras-401-unauthorized",
- "call.in-progress.error-vapifault-cerebras-403-model-access-denied",
- "call.in-progress.error-vapifault-cerebras-429-exceeded-quota",
- "call.in-progress.error-providerfault-cerebras-500-server-error",
- "call.in-progress.error-providerfault-cerebras-503-server-overloaded-error",
- "pipeline-error-anthropic-400-bad-request-validation-failed",
- "pipeline-error-anthropic-401-unauthorized",
- "pipeline-error-anthropic-403-model-access-denied",
- "pipeline-error-anthropic-429-exceeded-quota",
- "pipeline-error-anthropic-500-server-error",
- "pipeline-error-anthropic-503-server-overloaded-error",
- "pipeline-error-anthropic-llm-failed",
- "call.in-progress.error-vapifault-anthropic-llm-failed",
- "call.in-progress.error-vapifault-anthropic-400-bad-request-validation-failed",
- "call.in-progress.error-vapifault-anthropic-401-unauthorized",
- "call.in-progress.error-vapifault-anthropic-403-model-access-denied",
- "call.in-progress.error-vapifault-anthropic-429-exceeded-quota",
- "call.in-progress.error-providerfault-anthropic-500-server-error",
- "call.in-progress.error-providerfault-anthropic-503-server-overloaded-error",
- "pipeline-error-anthropic-bedrock-400-bad-request-validation-failed",
- "pipeline-error-anthropic-bedrock-401-unauthorized",
- "pipeline-error-anthropic-bedrock-403-model-access-denied",
- "pipeline-error-anthropic-bedrock-429-exceeded-quota",
- "pipeline-error-anthropic-bedrock-500-server-error",
- "pipeline-error-anthropic-bedrock-503-server-overloaded-error",
- "pipeline-error-anthropic-bedrock-llm-failed",
- "call.in-progress.error-vapifault-anthropic-bedrock-llm-failed",
- "call.in-progress.error-vapifault-anthropic-bedrock-400-bad-request-validation-failed",
- "call.in-progress.error-vapifault-anthropic-bedrock-401-unauthorized",
- "call.in-progress.error-vapifault-anthropic-bedrock-403-model-access-denied",
- "call.in-progress.error-vapifault-anthropic-bedrock-429-exceeded-quota",
- "call.in-progress.error-providerfault-anthropic-bedrock-500-server-error",
- "call.in-progress.error-providerfault-anthropic-bedrock-503-server-overloaded-error",
- "pipeline-error-anthropic-vertex-400-bad-request-validation-failed",
- "pipeline-error-anthropic-vertex-401-unauthorized",
- "pipeline-error-anthropic-vertex-403-model-access-denied",
- "pipeline-error-anthropic-vertex-429-exceeded-quota",
- "pipeline-error-anthropic-vertex-500-server-error",
- "pipeline-error-anthropic-vertex-503-server-overloaded-error",
- "pipeline-error-anthropic-vertex-llm-failed",
- "call.in-progress.error-vapifault-anthropic-vertex-llm-failed",
- "call.in-progress.error-vapifault-anthropic-vertex-400-bad-request-validation-failed",
- "call.in-progress.error-vapifault-anthropic-vertex-401-unauthorized",
- "call.in-progress.error-vapifault-anthropic-vertex-403-model-access-denied",
- "call.in-progress.error-vapifault-anthropic-vertex-429-exceeded-quota",
- "call.in-progress.error-providerfault-anthropic-vertex-500-server-error",
- "call.in-progress.error-providerfault-anthropic-vertex-503-server-overloaded-error",
- "pipeline-error-together-ai-400-bad-request-validation-failed",
- "pipeline-error-together-ai-401-unauthorized",
- "pipeline-error-together-ai-403-model-access-denied",
- "pipeline-error-together-ai-429-exceeded-quota",
- "pipeline-error-together-ai-500-server-error",
- "pipeline-error-together-ai-503-server-overloaded-error",
- "pipeline-error-together-ai-llm-failed",
- "call.in-progress.error-vapifault-together-ai-llm-failed",
- "call.in-progress.error-vapifault-together-ai-400-bad-request-validation-failed",
- "call.in-progress.error-vapifault-together-ai-401-unauthorized",
- "call.in-progress.error-vapifault-together-ai-403-model-access-denied",
- "call.in-progress.error-vapifault-together-ai-429-exceeded-quota",
- "call.in-progress.error-providerfault-together-ai-500-server-error",
- "call.in-progress.error-providerfault-together-ai-503-server-overloaded-error",
- "pipeline-error-anyscale-400-bad-request-validation-failed",
- "pipeline-error-anyscale-401-unauthorized",
- "pipeline-error-anyscale-403-model-access-denied",
- "pipeline-error-anyscale-429-exceeded-quota",
- "pipeline-error-anyscale-500-server-error",
- "pipeline-error-anyscale-503-server-overloaded-error",
- "pipeline-error-anyscale-llm-failed",
- "call.in-progress.error-vapifault-anyscale-llm-failed",
- "call.in-progress.error-vapifault-anyscale-400-bad-request-validation-failed",
- "call.in-progress.error-vapifault-anyscale-401-unauthorized",
- "call.in-progress.error-vapifault-anyscale-403-model-access-denied",
- "call.in-progress.error-vapifault-anyscale-429-exceeded-quota",
- "call.in-progress.error-providerfault-anyscale-500-server-error",
- "call.in-progress.error-providerfault-anyscale-503-server-overloaded-error",
- "pipeline-error-openrouter-400-bad-request-validation-failed",
- "pipeline-error-openrouter-401-unauthorized",
- "pipeline-error-openrouter-403-model-access-denied",
- "pipeline-error-openrouter-429-exceeded-quota",
- "pipeline-error-openrouter-500-server-error",
- "pipeline-error-openrouter-503-server-overloaded-error",
- "pipeline-error-openrouter-llm-failed",
- "call.in-progress.error-vapifault-openrouter-llm-failed",
- "call.in-progress.error-vapifault-openrouter-400-bad-request-validation-failed",
- "call.in-progress.error-vapifault-openrouter-401-unauthorized",
- "call.in-progress.error-vapifault-openrouter-403-model-access-denied",
- "call.in-progress.error-vapifault-openrouter-429-exceeded-quota",
- "call.in-progress.error-providerfault-openrouter-500-server-error",
- "call.in-progress.error-providerfault-openrouter-503-server-overloaded-error",
- "pipeline-error-perplexity-ai-400-bad-request-validation-failed",
- "pipeline-error-perplexity-ai-401-unauthorized",
- "pipeline-error-perplexity-ai-403-model-access-denied",
- "pipeline-error-perplexity-ai-429-exceeded-quota",
- "pipeline-error-perplexity-ai-500-server-error",
- "pipeline-error-perplexity-ai-503-server-overloaded-error",
- "pipeline-error-perplexity-ai-llm-failed",
- "call.in-progress.error-vapifault-perplexity-ai-llm-failed",
- "call.in-progress.error-vapifault-perplexity-ai-400-bad-request-validation-failed",
- "call.in-progress.error-vapifault-perplexity-ai-401-unauthorized",
- "call.in-progress.error-vapifault-perplexity-ai-403-model-access-denied",
- "call.in-progress.error-vapifault-perplexity-ai-429-exceeded-quota",
- "call.in-progress.error-providerfault-perplexity-ai-500-server-error",
- "call.in-progress.error-providerfault-perplexity-ai-503-server-overloaded-error",
- "pipeline-error-deepinfra-400-bad-request-validation-failed",
- "pipeline-error-deepinfra-401-unauthorized",
- "pipeline-error-deepinfra-403-model-access-denied",
- "pipeline-error-deepinfra-429-exceeded-quota",
- "pipeline-error-deepinfra-500-server-error",
- "pipeline-error-deepinfra-503-server-overloaded-error",
- "pipeline-error-deepinfra-llm-failed",
- "call.in-progress.error-vapifault-deepinfra-llm-failed",
- "call.in-progress.error-vapifault-deepinfra-400-bad-request-validation-failed",
- "call.in-progress.error-vapifault-deepinfra-401-unauthorized",
- "call.in-progress.error-vapifault-deepinfra-403-model-access-denied",
- "call.in-progress.error-vapifault-deepinfra-429-exceeded-quota",
- "call.in-progress.error-providerfault-deepinfra-500-server-error",
- "call.in-progress.error-providerfault-deepinfra-503-server-overloaded-error",
- "pipeline-error-runpod-400-bad-request-validation-failed",
- "pipeline-error-runpod-401-unauthorized",
- "pipeline-error-runpod-403-model-access-denied",
- "pipeline-error-runpod-429-exceeded-quota",
- "pipeline-error-runpod-500-server-error",
- "pipeline-error-runpod-503-server-overloaded-error",
- "pipeline-error-runpod-llm-failed",
- "call.in-progress.error-vapifault-runpod-llm-failed",
- "call.in-progress.error-vapifault-runpod-400-bad-request-validation-failed",
- "call.in-progress.error-vapifault-runpod-401-unauthorized",
- "call.in-progress.error-vapifault-runpod-403-model-access-denied",
- "call.in-progress.error-vapifault-runpod-429-exceeded-quota",
- "call.in-progress.error-providerfault-runpod-500-server-error",
- "call.in-progress.error-providerfault-runpod-503-server-overloaded-error",
- "pipeline-error-custom-llm-400-bad-request-validation-failed",
- "pipeline-error-custom-llm-401-unauthorized",
- "pipeline-error-custom-llm-403-model-access-denied",
- "pipeline-error-custom-llm-429-exceeded-quota",
- "pipeline-error-custom-llm-500-server-error",
- "pipeline-error-custom-llm-503-server-overloaded-error",
- "pipeline-error-custom-llm-llm-failed",
- "call.in-progress.error-vapifault-custom-llm-llm-failed",
- "call.in-progress.error-vapifault-custom-llm-400-bad-request-validation-failed",
- "call.in-progress.error-vapifault-custom-llm-401-unauthorized",
- "call.in-progress.error-vapifault-custom-llm-403-model-access-denied",
- "call.in-progress.error-vapifault-custom-llm-429-exceeded-quota",
- "call.in-progress.error-providerfault-custom-llm-500-server-error",
- "call.in-progress.error-providerfault-custom-llm-503-server-overloaded-error",
- "pipeline-error-custom-voice-failed",
- "pipeline-error-cartesia-socket-hang-up",
- "pipeline-error-cartesia-requested-payment",
- "pipeline-error-cartesia-500-server-error",
- "pipeline-error-cartesia-502-server-error",
- "pipeline-error-cartesia-503-server-error",
- "pipeline-error-cartesia-522-server-error",
- "call.in-progress.error-vapifault-cartesia-socket-hang-up",
- "call.in-progress.error-vapifault-cartesia-requested-payment",
- "call.in-progress.error-providerfault-cartesia-500-server-error",
- "call.in-progress.error-providerfault-cartesia-503-server-error",
- "call.in-progress.error-providerfault-cartesia-522-server-error",
- "pipeline-error-eleven-labs-voice-not-found",
- "pipeline-error-eleven-labs-quota-exceeded",
- "pipeline-error-eleven-labs-unauthorized-access",
- "pipeline-error-eleven-labs-unauthorized-to-access-model",
- "pipeline-error-eleven-labs-professional-voices-only-for-creator-plus",
- "pipeline-error-eleven-labs-blocked-free-plan-and-requested-upgrade",
- "pipeline-error-eleven-labs-blocked-concurrent-requests-and-requested-upgrade",
- "pipeline-error-eleven-labs-blocked-using-instant-voice-clone-and-requested-upgrade",
- "pipeline-error-eleven-labs-system-busy-and-requested-upgrade",
- "pipeline-error-eleven-labs-voice-not-fine-tuned",
- "pipeline-error-eleven-labs-invalid-api-key",
- "pipeline-error-eleven-labs-invalid-voice-samples",
- "pipeline-error-eleven-labs-voice-disabled-by-owner",
- "pipeline-error-eleven-labs-vapi-voice-disabled-by-owner",
- "pipeline-error-eleven-labs-blocked-account-in-probation",
- "pipeline-error-eleven-labs-blocked-content-against-their-policy",
- "pipeline-error-eleven-labs-missing-samples-for-voice-clone",
- "pipeline-error-eleven-labs-voice-not-fine-tuned-and-cannot-be-used",
- "pipeline-error-eleven-labs-voice-not-allowed-for-free-users",
- "pipeline-error-eleven-labs-max-character-limit-exceeded",
- "pipeline-error-eleven-labs-blocked-voice-potentially-against-terms-of-service-and-awaiting-verification",
- "pipeline-error-eleven-labs-500-server-error",
- "pipeline-error-eleven-labs-503-server-error",
- "call.in-progress.error-vapifault-eleven-labs-voice-not-found",
- "call.in-progress.error-vapifault-eleven-labs-quota-exceeded",
- "call.in-progress.error-vapifault-eleven-labs-unauthorized-access",
- "call.in-progress.error-vapifault-eleven-labs-unauthorized-to-access-model",
- "call.in-progress.error-vapifault-eleven-labs-professional-voices-only-for-creator-plus",
- "call.in-progress.error-vapifault-eleven-labs-blocked-free-plan-and-requested-upgrade",
- "call.in-progress.error-vapifault-eleven-labs-blocked-concurrent-requests-and-requested-upgrade",
- "call.in-progress.error-vapifault-eleven-labs-blocked-using-instant-voice-clone-and-requested-upgrade",
- "call.in-progress.error-vapifault-eleven-labs-system-busy-and-requested-upgrade",
- "call.in-progress.error-vapifault-eleven-labs-voice-not-fine-tuned",
- "call.in-progress.error-vapifault-eleven-labs-invalid-api-key",
- "call.in-progress.error-vapifault-eleven-labs-invalid-voice-samples",
- "call.in-progress.error-vapifault-eleven-labs-voice-disabled-by-owner",
- "call.in-progress.error-vapifault-eleven-labs-blocked-account-in-probation",
- "call.in-progress.error-vapifault-eleven-labs-blocked-content-against-their-policy",
- "call.in-progress.error-vapifault-eleven-labs-missing-samples-for-voice-clone",
- "call.in-progress.error-vapifault-eleven-labs-voice-not-fine-tuned-and-cannot-be-used",
- "call.in-progress.error-vapifault-eleven-labs-voice-not-allowed-for-free-users",
- "call.in-progress.error-vapifault-eleven-labs-max-character-limit-exceeded",
- "call.in-progress.error-vapifault-eleven-labs-blocked-voice-potentially-against-terms-of-service-and-awaiting-verification",
- "call.in-progress.error-providerfault-eleven-labs-500-server-error",
- "call.in-progress.error-providerfault-eleven-labs-503-server-error",
- "pipeline-error-playht-request-timed-out",
- "pipeline-error-playht-invalid-voice",
- "pipeline-error-playht-unexpected-error",
- "pipeline-error-playht-out-of-credits",
- "pipeline-error-playht-invalid-emotion",
- "pipeline-error-playht-voice-must-be-a-valid-voice-manifest-uri",
- "pipeline-error-playht-401-unauthorized",
- "pipeline-error-playht-403-forbidden-out-of-characters",
- "pipeline-error-playht-403-forbidden-api-access-not-available",
- "pipeline-error-playht-429-exceeded-quota",
- "pipeline-error-playht-502-gateway-error",
- "pipeline-error-playht-504-gateway-error",
- "call.in-progress.error-vapifault-playht-request-timed-out",
- "call.in-progress.error-vapifault-playht-invalid-voice",
- "call.in-progress.error-vapifault-playht-unexpected-error",
- "call.in-progress.error-vapifault-playht-out-of-credits",
- "call.in-progress.error-vapifault-playht-invalid-emotion",
- "call.in-progress.error-vapifault-playht-voice-must-be-a-valid-voice-manifest-uri",
- "call.in-progress.error-vapifault-playht-401-unauthorized",
- "call.in-progress.error-vapifault-playht-403-forbidden-out-of-characters",
- "call.in-progress.error-vapifault-playht-403-forbidden-api-access-not-available",
- "call.in-progress.error-vapifault-playht-429-exceeded-quota",
- "call.in-progress.error-providerfault-playht-502-gateway-error",
- "call.in-progress.error-providerfault-playht-504-gateway-error",
- "pipeline-error-custom-transcriber-failed",
- "call.in-progress.error-vapifault-custom-transcriber-failed",
- "pipeline-error-eleven-labs-transcriber-failed",
- "call.in-progress.error-vapifault-eleven-labs-transcriber-failed",
- "pipeline-error-deepgram-returning-400-no-such-model-language-tier-combination",
- "pipeline-error-deepgram-returning-401-invalid-credentials",
- "pipeline-error-deepgram-returning-403-model-access-denied",
- "pipeline-error-deepgram-returning-404-not-found",
- "pipeline-error-deepgram-returning-500-invalid-json",
- "pipeline-error-deepgram-returning-502-network-error",
- "pipeline-error-deepgram-returning-502-bad-gateway-ehostunreach",
- "pipeline-error-deepgram-returning-econnreset",
- "call.in-progress.error-vapifault-deepgram-returning-400-no-such-model-language-tier-combination",
- "call.in-progress.error-vapifault-deepgram-returning-401-invalid-credentials",
- "call.in-progress.error-vapifault-deepgram-returning-404-not-found",
- "call.in-progress.error-vapifault-deepgram-returning-403-model-access-denied",
- "call.in-progress.error-providerfault-deepgram-returning-500-invalid-json",
- "call.in-progress.error-providerfault-deepgram-returning-502-network-error",
- "call.in-progress.error-providerfault-deepgram-returning-502-bad-gateway-ehostunreach",
- "pipeline-error-google-transcriber-failed",
- "call.in-progress.error-vapifault-google-transcriber-failed",
- "pipeline-error-openai-transcriber-failed",
- "call.in-progress.error-vapifault-openai-transcriber-failed",
- "call.in-progress.error-warm-transfer-max-duration",
- "call.in-progress.error-warm-transfer-assistant-cancelled",
- "call.in-progress.error-warm-transfer-silence-timeout",
- "call.in-progress.error-warm-transfer-microphone-timeout",
- "call.in-progress.error-warm-transfer-hang-timeout",
- "call.in-progress.error-warm-transfer-idle-timeout",
- "assistant-ended-call",
- "assistant-said-end-call-phrase",
- "assistant-ended-call-with-hangup-task",
- "assistant-ended-call-after-message-spoken",
- "assistant-forwarded-call",
- "assistant-join-timed-out",
- "call.in-progress.error-assistant-did-not-receive-customer-audio",
- "call.in-progress.error-transfer-failed",
- "customer-busy",
- "customer-ended-call",
- "customer-ended-call-after-warm-transfer-attempt",
- "customer-did-not-answer",
- "customer-did-not-give-microphone-permission",
- "exceeded-max-duration",
- "manually-canceled",
- "phone-call-provider-closed-websocket",
- "call.forwarding.operator-busy",
- "silence-timed-out",
- "call.in-progress.error-sip-inbound-call-failed-to-connect",
- "call.in-progress.error-providerfault-outbound-sip-403-forbidden",
- "call.in-progress.error-providerfault-outbound-sip-407-proxy-authentication-required",
- "call.in-progress.error-providerfault-outbound-sip-503-service-unavailable",
- "call.in-progress.error-providerfault-outbound-sip-480-temporarily-unavailable",
- "call.in-progress.error-sip-outbound-call-failed-to-connect",
- "call.ringing.hook-executed-say",
- "call.ringing.hook-executed-transfer",
- "call.ending.hook-executed-say",
- "call.ending.hook-executed-transfer",
- "call.ringing.sip-inbound-caller-hungup-before-call-connect",
- "call.ringing.error-sip-inbound-call-failed-to-connect",
- "twilio-failed-to-connect-call",
- "twilio-reported-customer-misdialed",
- "vonage-rejected",
- "voicemail"
+ "campaign.scheduled.ended-by-user",
+ "campaign.in-progress.ended-by-user",
+ "campaign.ended.success"
]
},
- "destination": {
- "description": "This is the destination where the call ended up being transferred to. If the call was not transferred, this will be empty.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/TransferDestinationNumber",
- "title": "NumberTransferDestination"
- },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the campaign. This is just for your own reference.",
+ "example": "Q2 Sales Campaign"
+ },
+ "assistantId": {
+ "type": "string",
+ "description": "This is the assistant ID that will be used for the campaign calls. Note: Only one of assistantId, workflowId, or squadId can be used."
+ },
+ "workflowId": {
+ "type": "string",
+ "description": "This is the workflow ID that will be used for the campaign calls. Note: Only one of assistantId, workflowId, or squadId can be used."
+ },
+ "squadId": {
+ "type": "string",
+ "description": "This is the squad ID that will be used for the campaign calls. Note: Only one of assistantId, workflowId, or squadId can be used."
+ },
+ "phoneNumberId": {
+ "type": "string",
+ "description": "This is the phone number ID that will be used for the campaign calls. Required if dialPlan is not provided. Note: phoneNumberId and dialPlan are mutually exclusive."
+ },
+ "dialPlan": {
+ "description": "This is a list of dial entries, each specifying a phone number and the customers to call using that number. Use this when you want different phone numbers to call different sets of customers. Note: phoneNumberId and dialPlan are mutually exclusive.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/DialPlanEntry"
+ }
+ },
+ "schedulePlan": {
+ "description": "This is the schedule plan for the campaign. Calls will start at startedAt and continue until your organization’s concurrency limit is reached. Any remaining calls will be retried for up to one hour as capacity becomes available. After that hour or after latestAt, whichever comes first, any calls that couldn’t be placed won’t be retried.",
+ "allOf": [
{
- "$ref": "#/components/schemas/TransferDestinationSip",
- "title": "SipTransferDestination"
+ "$ref": "#/components/schemas/SchedulePlan"
}
]
},
+ "customers": {
+ "description": "These are the customers that will be called in the campaign. Required if dialPlan is not provided.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/CreateCustomerDTO"
+ }
+ },
"id": {
"type": "string",
- "description": "This is the unique identifier for the call."
+ "description": "This is the unique identifier for the campaign."
},
"orgId": {
"type": "string",
- "description": "This is the unique identifier for the org that this call belongs to."
+ "description": "This is the unique identifier for the org that this campaign belongs to."
},
"createdAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the call was created."
+ "description": "This is the ISO 8601 date-time string of when the campaign was created."
},
"updatedAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the call was last updated."
+ "description": "This is the ISO 8601 date-time string of when the campaign was last updated."
},
- "startedAt": {
- "format": "date-time",
+ "calls": {
+ "type": "object",
+ "description": "This is a map of call IDs to campaign call details."
+ },
+ "callsCounterScheduled": {
+ "type": "number",
+ "description": "This is the number of calls that have been scheduled."
+ },
+ "callsCounterQueued": {
+ "type": "number",
+ "description": "This is the number of calls that have been queued."
+ },
+ "callsCounterInProgress": {
+ "type": "number",
+ "description": "This is the number of calls that have been in progress."
+ },
+ "callsCounterEndedVoicemail": {
+ "type": "number",
+ "description": "This is the number of calls whose ended reason is 'voicemail'."
+ },
+ "callsCounterEnded": {
+ "type": "number",
+ "description": "This is the number of calls that have ended."
+ }
+ },
+ "required": [
+ "status",
+ "name",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt",
+ "calls",
+ "callsCounterScheduled",
+ "callsCounterQueued",
+ "callsCounterInProgress",
+ "callsCounterEndedVoicemail",
+ "callsCounterEnded"
+ ]
+ },
+ "CampaignPaginatedResponse": {
+ "type": "object",
+ "properties": {
+ "results": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/Campaign"
+ }
+ },
+ "metadata": {
+ "$ref": "#/components/schemas/PaginationMeta"
+ }
+ },
+ "required": [
+ "results",
+ "metadata"
+ ]
+ },
+ "UpdateCampaignDTO": {
+ "type": "object",
+ "properties": {
+ "name": {
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the call was started."
+ "description": "This is the name of the campaign. This is just for your own reference."
},
- "endedAt": {
- "format": "date-time",
+ "assistantId": {
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the call was ended."
+ "description": "This is the assistant ID that will be used for the campaign calls.\nCan only be updated if campaign is not in progress or has ended."
},
- "cost": {
- "type": "number",
- "description": "This is the cost of the call in USD."
+ "workflowId": {
+ "type": "string",
+ "description": "This is the workflow ID that will be used for the campaign calls.\nCan only be updated if campaign is not in progress or has ended."
},
- "costBreakdown": {
- "description": "This is the cost of the call in USD.",
+ "squadId": {
+ "type": "string",
+ "description": "This is the squad ID that will be used for the campaign calls.\nCan only be updated if campaign is not in progress or has ended."
+ },
+ "phoneNumberId": {
+ "type": "string",
+ "description": "This is the phone number ID that will be used for the campaign calls.\nCan only be updated if campaign is not in progress or has ended.\nNote: `phoneNumberId` and `dialPlan` are mutually exclusive."
+ },
+ "dialPlan": {
+ "description": "This is a list of dial entries, each specifying a phone number and the customers to call using that number. Can only be updated if campaign is not in progress or has ended. Note: phoneNumberId and dialPlan are mutually exclusive.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/DialPlanEntry"
+ }
+ },
+ "schedulePlan": {
+ "description": "This is the schedule plan for the campaign.\nCan only be updated if campaign is not in progress or has ended.",
"allOf": [
{
- "$ref": "#/components/schemas/CostBreakdown"
+ "$ref": "#/components/schemas/SchedulePlan"
}
]
},
- "artifactPlan": {
- "description": "This is a copy of assistant artifact plan. This isn't actually stored on the call but rather just returned in POST /call/web to enable artifact creation client side.",
- "allOf": [
- {
- "$ref": "#/components/schemas/ArtifactPlan"
- }
+ "status": {
+ "type": "string",
+ "description": "This is the status of the campaign.\nCan only be updated to 'ended' if you want to end the campaign.\nWhen set to 'ended', it will delete all scheduled calls. Calls in progress will be allowed to complete.",
+ "enum": [
+ "ended"
]
+ }
+ }
+ },
+ "RelayTargetAssistant": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "assistant"
+ ],
+ "description": "The type of relay target"
},
- "analysis": {
- "description": "This is the analysis of the call. Configure in `assistant.analysisPlan`.",
- "allOf": [
- {
- "$ref": "#/components/schemas/Analysis"
- }
+ "assistantId": {
+ "type": "string",
+ "description": "The unique identifier of the assistant"
+ },
+ "assistantName": {
+ "type": "string",
+ "description": "The name of the assistant"
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "RelayTargetSquad": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "squad"
+ ],
+ "description": "The type of relay target"
+ },
+ "squadId": {
+ "type": "string",
+ "description": "The unique identifier of the squad"
+ },
+ "squadName": {
+ "type": "string",
+ "description": "The name of the squad"
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "RelayTargetOptions": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "assistant",
+ "squad"
]
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "RelayCommandSay": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "say"
+ ],
+ "description": "The type of relay command"
},
- "monitor": {
- "description": "This is to real-time monitor the call. Configure in `assistant.monitorPlan`.",
- "allOf": [
- {
- "$ref": "#/components/schemas/Monitor"
- }
+ "content": {
+ "type": "string",
+ "description": "The content for the assistant to speak"
+ }
+ },
+ "required": [
+ "type",
+ "content"
+ ]
+ },
+ "RelayCommandNote": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "message.add"
+ ],
+ "description": "The type of relay command"
+ },
+ "content": {
+ "type": "string",
+ "description": "The note content to add to the conversation"
+ }
+ },
+ "required": [
+ "type",
+ "content"
+ ]
+ },
+ "RelayCommandOptions": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "say",
+ "message.add"
]
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "RelayRequest": {
+ "type": "object",
+ "properties": {
+ "source": {
+ "type": "string",
+ "description": "The source identifier of the relay request"
},
- "artifact": {
- "description": "These are the artifacts created from the call. Configure in `assistant.artifactPlan`.",
- "allOf": [
+ "target": {
+ "oneOf": [
{
- "$ref": "#/components/schemas/Artifact"
+ "$ref": "#/components/schemas/RelayTargetAssistant"
+ },
+ {
+ "$ref": "#/components/schemas/RelayTargetSquad"
}
- ]
+ ],
+ "description": "The target assistant or squad to relay the commands to"
},
- "phoneCallProviderId": {
+ "customerId": {
"type": "string",
- "description": "The ID of the call as provided by the phone number service. callSid in Twilio. conversationUuid in Vonage. callControlId in Telnyx.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type.",
- "deprecated": true
+ "description": "The unique identifier of the customer"
},
- "campaignId": {
+ "commands": {
+ "type": "array",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/RelayCommandSay"
+ },
+ {
+ "$ref": "#/components/schemas/RelayCommandNote"
+ }
+ ]
+ },
+ "description": "The list of commands to relay to the target"
+ }
+ },
+ "required": [
+ "source",
+ "target",
+ "customerId",
+ "commands"
+ ]
+ },
+ "RelayResponse": {
+ "type": "object",
+ "properties": {
+ "status": {
"type": "string",
- "description": "This is the campaign ID that the call belongs to."
+ "enum": [
+ "deliveredLive",
+ "deliveredHeadless",
+ "failed"
+ ],
+ "description": "The status of the relay request"
+ },
+ "callId": {
+ "type": "string",
+ "description": "The unique identifier of the call, if delivered to a live call"
+ },
+ "sessionId": {
+ "type": "string",
+ "description": "The unique identifier of the session, if delivered to a headless session"
+ },
+ "chatId": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "status"
+ ]
+ },
+ "Session": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the session."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the organization that owns this session."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 timestamp indicating when the session was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 timestamp indicating when the session was last updated."
+ },
+ "cost": {
+ "type": "number",
+ "description": "This is the cost of the session in USD."
+ },
+ "costs": {
+ "type": "array",
+ "description": "These are the costs of individual components of the session in USD.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ModelCost",
+ "title": "ModelCost"
+ },
+ {
+ "$ref": "#/components/schemas/AnalysisCost",
+ "title": "AnalysisCost"
+ },
+ {
+ "$ref": "#/components/schemas/SessionCost",
+ "title": "SessionCost"
+ }
+ ]
+ }
+ },
+ "name": {
+ "type": "string",
+ "description": "This is a user-defined name for the session. Maximum length is 40 characters.",
+ "maxLength": 40
+ },
+ "status": {
+ "type": "string",
+ "description": "This is the current status of the session. Can be either 'active' or 'completed'.",
+ "enum": [
+ "active",
+ "completed"
+ ]
+ },
+ "expirationSeconds": {
+ "type": "number",
+ "description": "Session expiration time in seconds. Defaults to 24 hours (86400 seconds) if not set.",
+ "minimum": 60,
+ "maximum": 2592000,
+ "example": 86400
},
"assistantId": {
"type": "string",
- "description": "This is the assistant ID that will be used for the call. To use a transient assistant, use `assistant` instead.\n\nTo start a call with:\n- Assistant, use `assistantId` or `assistant`\n- Squad, use `squadId` or `squad`\n- Workflow, use `workflowId` or `workflow`"
+ "description": "This is the ID of the assistant associated with this session. Use this when referencing an existing assistant."
},
"assistant": {
- "description": "This is the assistant that will be used for the call. To use an existing assistant, use `assistantId` instead.\n\nTo start a call with:\n- Assistant, use `assistant`\n- Squad, use `squad`\n- Workflow, use `workflow`",
+ "description": "This is the assistant configuration for this session. Use this when creating a new assistant configuration.\nIf assistantId is provided, this will be ignored.",
"allOf": [
{
"$ref": "#/components/schemas/CreateAssistantDTO"
@@ -27598,7 +37384,7 @@
]
},
"assistantOverrides": {
- "description": "These are the overrides for the `assistant` or `assistantId`'s settings and template variables.",
+ "description": "These are the overrides for the assistant configuration.\nUse this to provide variable values and other overrides when using assistantId.\nVariable substitution will be applied to the assistant's messages and other text-based fields.",
"allOf": [
{
"$ref": "#/components/schemas/AssistantOverrides"
@@ -27607,76 +37393,75 @@
},
"squadId": {
"type": "string",
- "description": "This is the squad that will be used for the call. To use a transient squad, use `squad` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`"
+ "description": "This is the squad ID associated with this session. Use this when referencing an existing squad."
},
"squad": {
- "description": "This is a squad that will be used for the call. To use an existing squad, use `squadId` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`",
+ "description": "This is the squad configuration for this session. Use this when creating a new squad configuration.\nIf squadId is provided, this will be ignored.",
"allOf": [
{
"$ref": "#/components/schemas/CreateSquadDTO"
}
]
},
- "workflowId": {
- "type": "string",
- "description": "This is the workflow that will be used for the call. To use a transient workflow, use `workflow` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`"
+ "messages": {
+ "type": "array",
+ "description": "This is an array of chat messages in the session.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/SystemMessage",
+ "title": "SystemMessage"
+ },
+ {
+ "$ref": "#/components/schemas/UserMessage",
+ "title": "UserMessage"
+ },
+ {
+ "$ref": "#/components/schemas/AssistantMessage",
+ "title": "AssistantMessage"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessage",
+ "title": "ToolMessage"
+ },
+ {
+ "$ref": "#/components/schemas/DeveloperMessage",
+ "title": "DeveloperMessage"
+ }
+ ]
+ }
},
- "workflow": {
- "description": "This is a workflow that will be used for the call. To use an existing workflow, use `workflowId` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`",
+ "customer": {
+ "description": "This is the customer information associated with this session.",
"allOf": [
{
- "$ref": "#/components/schemas/CreateWorkflowDTO"
+ "$ref": "#/components/schemas/CreateCustomerDTO"
}
]
},
- "workflowOverrides": {
- "description": "These are the overrides for the `workflow` or `workflowId`'s settings and template variables.",
- "allOf": [
- {
- "$ref": "#/components/schemas/WorkflowOverrides"
- }
- ]
+ "customerId": {
+ "type": "string",
+ "description": "This is the customerId of the customer associated with this session."
},
"phoneNumberId": {
"type": "string",
- "description": "This is the phone number that will be used for the call. To use a transient number, use `phoneNumber` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type."
+ "description": "This is the ID of the phone number associated with this session."
},
"phoneNumber": {
- "description": "This is the phone number that will be used for the call. To use an existing number, use `phoneNumberId` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type.",
+ "description": "This is the phone number configuration for this session.",
"allOf": [
{
"$ref": "#/components/schemas/ImportTwilioPhoneNumberDTO"
}
]
},
- "customerId": {
- "type": "string",
- "description": "This is the customer that will be called. To call a transient customer , use `customer` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type."
- },
- "customer": {
- "description": "This is the customer that will be called. To call an existing customer, use `customerId` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type.",
- "allOf": [
- {
- "$ref": "#/components/schemas/CreateCustomerDTO"
- }
- ]
- },
- "name": {
- "type": "string",
- "description": "This is the name of the call. This is just for your own reference.",
- "maxLength": 40
- },
- "schedulePlan": {
- "description": "This is the schedule plan of the call.",
+ "artifact": {
+ "description": "These are the artifacts that were extracted from the session messages.\nThey are only available after the session has completed.\nThe artifact plan from the assistant or active assistant of squad is used to generate the artifact.\nCurrently the only supported fields of assistant artifact plan are:\n- structuredOutputIds",
"allOf": [
{
- "$ref": "#/components/schemas/SchedulePlan"
+ "$ref": "#/components/schemas/Artifact"
}
]
- },
- "transport": {
- "type": "object",
- "description": "This is the transport of the call."
}
},
"required": [
@@ -27686,77 +37471,35 @@
"updatedAt"
]
},
- "CallBatchError": {
- "type": "object",
- "properties": {
- "customer": {
- "$ref": "#/components/schemas/CreateCustomerDTO"
- },
- "error": {
- "type": "string"
- }
- },
- "required": [
- "customer",
- "error"
- ]
- },
- "CallBatchResponse": {
- "type": "object",
- "properties": {
- "results": {
- "description": "This is the list of calls that were created.",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/Call"
- }
- },
- "errors": {
- "description": "This is the list of calls that failed to be created.",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/CallBatchError"
- }
- }
- },
- "required": [
- "results",
- "errors"
- ]
- },
- "CreateCallDTO": {
+ "CreateSessionDTO": {
"type": "object",
"properties": {
- "customers": {
- "description": "This is used to issue batch calls to multiple customers.\n\nOnly relevant for `outboundPhoneCall`. To call a single customer, use `customer` instead.",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/CreateCustomerDTO"
- }
- },
"name": {
"type": "string",
- "description": "This is the name of the call. This is just for your own reference.",
+ "description": "This is a user-defined name for the session. Maximum length is 40 characters.",
"maxLength": 40
},
- "schedulePlan": {
- "description": "This is the schedule plan of the call.",
- "allOf": [
- {
- "$ref": "#/components/schemas/SchedulePlan"
- }
+ "status": {
+ "type": "string",
+ "description": "This is the current status of the session. Can be either 'active' or 'completed'.",
+ "enum": [
+ "active",
+ "completed"
]
},
- "transport": {
- "type": "object",
- "description": "This is the transport of the call."
+ "expirationSeconds": {
+ "type": "number",
+ "description": "Session expiration time in seconds. Defaults to 24 hours (86400 seconds) if not set.",
+ "minimum": 60,
+ "maximum": 2592000,
+ "example": 86400
},
"assistantId": {
"type": "string",
- "description": "This is the assistant ID that will be used for the call. To use a transient assistant, use `assistant` instead.\n\nTo start a call with:\n- Assistant, use `assistantId` or `assistant`\n- Squad, use `squadId` or `squad`\n- Workflow, use `workflowId` or `workflow`"
+ "description": "This is the ID of the assistant associated with this session. Use this when referencing an existing assistant."
},
"assistant": {
- "description": "This is the assistant that will be used for the call. To use an existing assistant, use `assistantId` instead.\n\nTo start a call with:\n- Assistant, use `assistant`\n- Squad, use `squad`\n- Workflow, use `workflow`",
+ "description": "This is the assistant configuration for this session. Use this when creating a new assistant configuration.\nIf assistantId is provided, this will be ignored.",
"allOf": [
{
"$ref": "#/components/schemas/CreateAssistantDTO"
@@ -27764,7 +37507,7 @@
]
},
"assistantOverrides": {
- "description": "These are the overrides for the `assistant` or `assistantId`'s settings and template variables.",
+ "description": "These are the overrides for the assistant configuration.\nUse this to provide variable values and other overrides when using assistantId.\nVariable substitution will be applied to the assistant's messages and other text-based fields.",
"allOf": [
{
"$ref": "#/components/schemas/AssistantOverrides"
@@ -27773,91 +37516,243 @@
},
"squadId": {
"type": "string",
- "description": "This is the squad that will be used for the call. To use a transient squad, use `squad` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`"
+ "description": "This is the squad ID associated with this session. Use this when referencing an existing squad."
},
"squad": {
- "description": "This is a squad that will be used for the call. To use an existing squad, use `squadId` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`",
+ "description": "This is the squad configuration for this session. Use this when creating a new squad configuration.\nIf squadId is provided, this will be ignored.",
"allOf": [
{
"$ref": "#/components/schemas/CreateSquadDTO"
}
]
},
- "workflowId": {
- "type": "string",
- "description": "This is the workflow that will be used for the call. To use a transient workflow, use `workflow` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`"
+ "messages": {
+ "type": "array",
+ "description": "This is an array of chat messages in the session.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/SystemMessage",
+ "title": "SystemMessage"
+ },
+ {
+ "$ref": "#/components/schemas/UserMessage",
+ "title": "UserMessage"
+ },
+ {
+ "$ref": "#/components/schemas/AssistantMessage",
+ "title": "AssistantMessage"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessage",
+ "title": "ToolMessage"
+ },
+ {
+ "$ref": "#/components/schemas/DeveloperMessage",
+ "title": "DeveloperMessage"
+ }
+ ]
+ }
},
- "workflow": {
- "description": "This is a workflow that will be used for the call. To use an existing workflow, use `workflowId` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`",
+ "customer": {
+ "description": "This is the customer information associated with this session.",
"allOf": [
{
- "$ref": "#/components/schemas/CreateWorkflowDTO"
+ "$ref": "#/components/schemas/CreateCustomerDTO"
}
]
},
- "workflowOverrides": {
- "description": "These are the overrides for the `workflow` or `workflowId`'s settings and template variables.",
- "allOf": [
- {
- "$ref": "#/components/schemas/WorkflowOverrides"
- }
- ]
+ "customerId": {
+ "type": "string",
+ "description": "This is the customerId of the customer associated with this session."
},
"phoneNumberId": {
"type": "string",
- "description": "This is the phone number that will be used for the call. To use a transient number, use `phoneNumber` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type."
+ "description": "This is the ID of the phone number associated with this session."
},
"phoneNumber": {
- "description": "This is the phone number that will be used for the call. To use an existing number, use `phoneNumberId` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type.",
+ "description": "This is the phone number configuration for this session.",
"allOf": [
{
"$ref": "#/components/schemas/ImportTwilioPhoneNumberDTO"
}
]
+ }
+ }
+ },
+ "UpdateSessionDTO": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "This is the new name for the session. Maximum length is 40 characters.",
+ "maxLength": 40
},
- "customerId": {
+ "status": {
"type": "string",
- "description": "This is the customer that will be called. To call a transient customer , use `customer` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type."
+ "description": "This is the new status for the session.",
+ "enum": [
+ "active",
+ "completed"
+ ]
+ },
+ "expirationSeconds": {
+ "type": "number",
+ "description": "Session expiration time in seconds. Defaults to 24 hours (86400 seconds) if not set.",
+ "minimum": 60,
+ "maximum": 2592000,
+ "example": 86400
+ },
+ "messages": {
+ "type": "array",
+ "description": "This is the updated array of chat messages.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/SystemMessage",
+ "title": "SystemMessage"
+ },
+ {
+ "$ref": "#/components/schemas/UserMessage",
+ "title": "UserMessage"
+ },
+ {
+ "$ref": "#/components/schemas/AssistantMessage",
+ "title": "AssistantMessage"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessage",
+ "title": "ToolMessage"
+ },
+ {
+ "$ref": "#/components/schemas/DeveloperMessage",
+ "title": "DeveloperMessage"
+ }
+ ]
+ }
+ }
+ }
+ },
+ "GetSessionPaginatedDTO": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the session to filter by."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the session to filter by."
+ },
+ "assistantId": {
+ "type": "string",
+ "description": "This is the ID of the assistant to filter sessions by."
+ },
+ "assistantIdAny": {
+ "type": "string",
+ "description": "Filter by multiple assistant IDs. Provide as comma-separated values.",
+ "example": "assistant-1,assistant-2,assistant-3"
+ },
+ "squadId": {
+ "type": "string",
+ "description": "This is the ID of the squad to filter sessions by."
+ },
+ "workflowId": {
+ "type": "string",
+ "description": "This is the ID of the workflow to filter sessions by."
},
"customer": {
- "description": "This is the customer that will be called. To call an existing customer, use `customerId` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type.",
+ "description": "This is the customer information to filter by.",
"allOf": [
{
"$ref": "#/components/schemas/CreateCustomerDTO"
}
]
- }
- }
- },
- "PaginationMeta": {
- "type": "object",
- "properties": {
- "itemsPerPage": {
- "type": "number"
},
- "totalItems": {
- "type": "number"
+ "customerNumberAny": {
+ "type": "string",
+ "description": "Filter by any of the specified customer phone numbers (comma-separated).",
+ "example": "+1234567890,+0987654321"
},
- "currentPage": {
- "type": "number"
+ "phoneNumberId": {
+ "type": "string",
+ "description": "This will return sessions with the specified phoneNumberId."
},
- "itemsBeyondRetention": {
- "type": "boolean"
+ "phoneNumberIdAny": {
+ "description": "This will return sessions with any of the specified phoneNumberIds.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "page": {
+ "type": "number",
+ "description": "This is the page number to return. Defaults to 1.",
+ "minimum": 1
+ },
+ "sortOrder": {
+ "type": "string",
+ "description": "This is the sort order for pagination. Defaults to 'DESC'.",
+ "enum": [
+ "ASC",
+ "DESC"
+ ]
+ },
+ "limit": {
+ "type": "number",
+ "description": "This is the maximum number of items to return. Defaults to 100.",
+ "minimum": 0,
+ "maximum": 1000
+ },
+ "createdAtGt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the createdAt is greater than the specified value."
+ },
+ "createdAtLt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the createdAt is less than the specified value."
+ },
+ "createdAtGe": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the createdAt is greater than or equal to the specified value."
+ },
+ "createdAtLe": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the createdAt is less than or equal to the specified value."
+ },
+ "updatedAtGt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the updatedAt is greater than the specified value."
+ },
+ "updatedAtLt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the updatedAt is less than the specified value."
+ },
+ "updatedAtGe": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the updatedAt is greater than or equal to the specified value."
+ },
+ "updatedAtLe": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the updatedAt is less than or equal to the specified value."
}
- },
- "required": [
- "itemsPerPage",
- "totalItems",
- "currentPage"
- ]
+ }
},
- "CallPaginatedResponse": {
+ "SessionPaginatedResponse": {
"type": "object",
"properties": {
"results": {
"type": "array",
"items": {
- "$ref": "#/components/schemas/Call"
+ "$ref": "#/components/schemas/Session"
}
},
"metadata": {
@@ -27869,5167 +37764,5078 @@
"metadata"
]
},
- "CreateOutboundCallDTO": {
+ "ExportSessionDTO": {
"type": "object",
"properties": {
- "customers": {
- "description": "This is used to issue batch calls to multiple customers.\n\nOnly relevant for `outboundPhoneCall`. To call a single customer, use `customer` instead.",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/CreateCustomerDTO"
- }
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the session to filter by."
},
"name": {
"type": "string",
- "description": "This is the name of the call. This is just for your own reference.",
- "maxLength": 40
- },
- "schedulePlan": {
- "description": "This is the schedule plan of the call.",
- "allOf": [
- {
- "$ref": "#/components/schemas/SchedulePlan"
- }
- ]
- },
- "transport": {
- "type": "object",
- "description": "This is the transport of the call."
+ "description": "This is the name of the session to filter by."
},
"assistantId": {
"type": "string",
- "description": "This is the assistant ID that will be used for the call. To use a transient assistant, use `assistant` instead.\n\nTo start a call with:\n- Assistant, use `assistantId` or `assistant`\n- Squad, use `squadId` or `squad`\n- Workflow, use `workflowId` or `workflow`"
- },
- "assistant": {
- "description": "This is the assistant that will be used for the call. To use an existing assistant, use `assistantId` instead.\n\nTo start a call with:\n- Assistant, use `assistant`\n- Squad, use `squad`\n- Workflow, use `workflow`",
- "allOf": [
- {
- "$ref": "#/components/schemas/CreateAssistantDTO"
- }
- ]
+ "description": "This is the ID of the assistant to filter sessions by."
},
- "assistantOverrides": {
- "description": "These are the overrides for the `assistant` or `assistantId`'s settings and template variables.",
- "allOf": [
- {
- "$ref": "#/components/schemas/AssistantOverrides"
- }
- ]
+ "assistantIdAny": {
+ "type": "string",
+ "description": "Filter by multiple assistant IDs. Provide as comma-separated values.",
+ "example": "assistant-1,assistant-2,assistant-3"
},
"squadId": {
"type": "string",
- "description": "This is the squad that will be used for the call. To use a transient squad, use `squad` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`"
- },
- "squad": {
- "description": "This is a squad that will be used for the call. To use an existing squad, use `squadId` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`",
- "allOf": [
- {
- "$ref": "#/components/schemas/CreateSquadDTO"
- }
- ]
+ "description": "This is the ID of the squad to filter sessions by."
},
"workflowId": {
"type": "string",
- "description": "This is the workflow that will be used for the call. To use a transient workflow, use `workflow` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`"
+ "description": "This is the ID of the workflow to filter sessions by."
},
- "workflow": {
- "description": "This is a workflow that will be used for the call. To use an existing workflow, use `workflowId` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`",
+ "customer": {
+ "description": "This is the customer information to filter by.",
"allOf": [
{
- "$ref": "#/components/schemas/CreateWorkflowDTO"
+ "$ref": "#/components/schemas/CreateCustomerDTO"
}
]
},
- "workflowOverrides": {
- "description": "These are the overrides for the `workflow` or `workflowId`'s settings and template variables.",
- "allOf": [
- {
- "$ref": "#/components/schemas/WorkflowOverrides"
- }
+ "customerNumberAny": {
+ "type": "string",
+ "description": "Filter by any of the specified customer phone numbers (comma-separated).",
+ "example": "+1234567890,+0987654321"
+ },
+ "columns": {
+ "type": "string",
+ "description": "Columns to include in the CSV export",
+ "enum": [
+ "id",
+ "name",
+ "status",
+ "assistantId",
+ "squadId",
+ "customerName",
+ "customerNumber",
+ "phoneNumberId",
+ "cost",
+ "messages",
+ "createdAt",
+ "updatedAt"
+ ],
+ "default": [
+ "id",
+ "name",
+ "status",
+ "assistantId",
+ "squadId",
+ "customerName",
+ "customerNumber",
+ "phoneNumberId",
+ "cost",
+ "messages",
+ "createdAt",
+ "updatedAt"
]
},
+ "email": {
+ "type": "string",
+ "description": "This is the email address to send the export to.\nRequired if userId is not available in the request context."
+ },
+ "format": {
+ "type": "string",
+ "description": "This is the format of the export.\n\n@default csv",
+ "enum": [
+ "csv",
+ "json"
+ ],
+ "default": "csv"
+ },
"phoneNumberId": {
"type": "string",
- "description": "This is the phone number that will be used for the call. To use a transient number, use `phoneNumber` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type."
+ "description": "This will return sessions with the specified phoneNumberId."
},
- "phoneNumber": {
- "description": "This is the phone number that will be used for the call. To use an existing number, use `phoneNumberId` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type.",
- "allOf": [
- {
- "$ref": "#/components/schemas/ImportTwilioPhoneNumberDTO"
- }
+ "phoneNumberIdAny": {
+ "description": "This will return sessions with any of the specified phoneNumberIds.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "page": {
+ "type": "number",
+ "description": "This is the page number to return. Defaults to 1.",
+ "minimum": 1
+ },
+ "sortOrder": {
+ "type": "string",
+ "description": "This is the sort order for pagination. Defaults to 'DESC'.",
+ "enum": [
+ "ASC",
+ "DESC"
]
},
- "customerId": {
+ "limit": {
+ "type": "number",
+ "description": "This is the maximum number of items to return. Defaults to 100.",
+ "minimum": 0,
+ "maximum": 1000
+ },
+ "createdAtGt": {
+ "format": "date-time",
"type": "string",
- "description": "This is the customer that will be called. To call a transient customer , use `customer` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type."
+ "description": "This will return items where the createdAt is greater than the specified value."
},
- "customer": {
- "description": "This is the customer that will be called. To call an existing customer, use `customerId` instead.\n\nOnly relevant for `outboundPhoneCall` and `inboundPhoneCall` type.",
- "allOf": [
- {
- "$ref": "#/components/schemas/CreateCustomerDTO"
- }
- ]
+ "createdAtLt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the createdAt is less than the specified value."
+ },
+ "createdAtGe": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the createdAt is greater than or equal to the specified value."
+ },
+ "createdAtLe": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the createdAt is less than or equal to the specified value."
+ },
+ "updatedAtGt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the updatedAt is greater than the specified value."
+ },
+ "updatedAtLt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the updatedAt is less than the specified value."
+ },
+ "updatedAtGe": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the updatedAt is greater than or equal to the specified value."
+ },
+ "updatedAtLe": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This will return items where the updatedAt is less than or equal to the specified value."
}
}
},
- "CreateWebCallDTO": {
+ "ByoPhoneNumber": {
"type": "object",
"properties": {
- "assistantId": {
- "type": "string",
- "description": "This is the assistant ID that will be used for the call. To use a transient assistant, use `assistant` instead.\n\nTo start a call with:\n- Assistant, use `assistantId` or `assistant`\n- Squad, use `squadId` or `squad`\n- Workflow, use `workflowId` or `workflow`"
- },
- "assistant": {
- "description": "This is the assistant that will be used for the call. To use an existing assistant, use `assistantId` instead.\n\nTo start a call with:\n- Assistant, use `assistant`\n- Squad, use `squad`\n- Workflow, use `workflow`",
- "allOf": [
+ "fallbackDestination": {
+ "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
+ "oneOf": [
{
- "$ref": "#/components/schemas/CreateAssistantDTO"
+ "$ref": "#/components/schemas/TransferDestinationNumber",
+ "title": "NumberTransferDestination"
+ },
+ {
+ "$ref": "#/components/schemas/TransferDestinationSip",
+ "title": "SipTransferDestination"
}
]
},
- "assistantOverrides": {
- "description": "These are the overrides for the `assistant` or `assistantId`'s settings and template variables.",
- "allOf": [
- {
- "$ref": "#/components/schemas/AssistantOverrides"
- }
+ "hooks": {
+ "type": "array",
+ "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
+ "title": "PhoneNumberHookCallRinging"
+ },
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
+ "title": "PhoneNumberHookCallEnding"
+ }
+ ]
+ }
+ },
+ "provider": {
+ "type": "string",
+ "description": "This is to bring your own phone numbers from your own SIP trunks or Carriers.",
+ "enum": [
+ "byo-phone-number"
]
},
- "squadId": {
+ "numberE164CheckEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle the E164 check for the `number` field. This is an advanced property which should be used if you know your use case requires it.\n\nUse cases:\n- `false`: To allow non-E164 numbers like `+001234567890`, `1234`, or `abc`. This is useful for dialing out to non-E164 numbers on your SIP trunks.\n- `true` (default): To allow only E164 numbers like `+14155551234`. This is standard for PSTN calls.\n\nIf `false`, the `number` is still required to only contain alphanumeric characters (regex: `/^\\+?[a-zA-Z0-9]+$/`).\n\n@default true (E164 check is enabled)",
+ "default": true
+ },
+ "id": {
"type": "string",
- "description": "This is the squad that will be used for the call. To use a transient squad, use `squad` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`"
+ "description": "This is the unique identifier for the phone number."
},
- "squad": {
- "description": "This is a squad that will be used for the call. To use an existing squad, use `squadId` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`",
- "allOf": [
- {
- "$ref": "#/components/schemas/CreateSquadDTO"
- }
- ]
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the org that this phone number belongs to."
},
- "workflowId": {
+ "createdAt": {
+ "format": "date-time",
"type": "string",
- "description": "This is the workflow that will be used for the call. To use a transient workflow, use `workflow` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`"
+ "description": "This is the ISO 8601 date-time string of when the phone number was created."
},
- "workflow": {
- "description": "This is a workflow that will be used for the call. To use an existing workflow, use `workflowId` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`",
- "allOf": [
- {
- "$ref": "#/components/schemas/CreateWorkflowDTO"
- }
- ]
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the phone number was last updated."
},
- "workflowOverrides": {
- "description": "These are the overrides for the `workflow` or `workflowId`'s settings and template variables.",
- "allOf": [
- {
- "$ref": "#/components/schemas/WorkflowOverrides"
- }
+ "status": {
+ "type": "string",
+ "description": "This is the status of the phone number.",
+ "enum": [
+ "active",
+ "activating",
+ "blocked"
]
- }
- }
- },
- "UpdateCallDTO": {
- "type": "object",
- "properties": {
+ },
"name": {
"type": "string",
- "description": "This is the name of the call. This is just for your own reference.",
+ "description": "This is the name of the phone number. This is just for your own reference.",
"maxLength": 40
- }
- }
- },
- "DeveloperMessage": {
- "type": "object",
- "properties": {
- "role": {
+ },
+ "assistantId": {
"type": "string",
- "description": "This is the role of the message author",
- "default": "developer",
- "enum": [
- "developer"
- ]
+ "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "content": {
+ "workflowId": {
"type": "string",
- "description": "This is the content of the developer message",
- "maxLength": 10000
+ "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "name": {
+ "squadId": {
"type": "string",
- "description": "This is an optional name for the participant",
+ "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ },
+ "server": {
+ "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
+ },
+ "number": {
+ "type": "string",
+ "description": "This is the number of the customer.",
+ "minLength": 3,
"maxLength": 40
},
- "metadata": {
- "type": "object",
- "description": "This is an optional metadata for the message"
+ "credentialId": {
+ "type": "string",
+ "description": "This is the credential of your own SIP trunk or Carrier (type `byo-sip-trunk`) which can be used to make calls to this phone number.\n\nYou can add the SIP trunk or Carrier credential in the Provider Credentials page on the Dashboard to get the credentialId."
}
},
"required": [
- "role",
- "content"
+ "provider",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt",
+ "credentialId"
]
},
- "SystemMessage": {
+ "TwilioPhoneNumber": {
"type": "object",
"properties": {
- "role": {
- "type": "string",
- "description": "The role of the system in the conversation."
+ "fallbackDestination": {
+ "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TransferDestinationNumber",
+ "title": "NumberTransferDestination"
+ },
+ {
+ "$ref": "#/components/schemas/TransferDestinationSip",
+ "title": "SipTransferDestination"
+ }
+ ]
},
- "message": {
+ "hooks": {
+ "type": "array",
+ "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
+ "title": "PhoneNumberHookCallRinging"
+ },
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
+ "title": "PhoneNumberHookCallEnding"
+ }
+ ]
+ }
+ },
+ "provider": {
"type": "string",
- "description": "The message content from the system."
+ "description": "This is to use numbers bought on Twilio.",
+ "enum": [
+ "twilio"
+ ]
},
- "time": {
- "type": "number",
- "description": "The timestamp when the message was sent."
+ "smsEnabled": {
+ "type": "boolean",
+ "description": "Controls whether Vapi sets the messaging webhook URL on the Twilio number during import.\n\nIf set to `false`, Vapi will not update the Twilio messaging URL, leaving it as is.\nIf `true` or omitted (default), Vapi will configure both the voice and messaging URLs.\n\n@default true",
+ "default": true
},
- "secondsFromStart": {
- "type": "number",
- "description": "The number of seconds from the start of the conversation."
- }
- },
- "required": [
- "role",
- "message",
- "time",
- "secondsFromStart"
- ]
- },
- "UserMessage": {
- "type": "object",
- "properties": {
- "role": {
+ "id": {
"type": "string",
- "description": "The role of the user in the conversation."
+ "description": "This is the unique identifier for the phone number."
},
- "message": {
+ "orgId": {
"type": "string",
- "description": "The message content from the user."
- },
- "time": {
- "type": "number",
- "description": "The timestamp when the message was sent."
- },
- "endTime": {
- "type": "number",
- "description": "The timestamp when the message ended."
+ "description": "This is the unique identifier for the org that this phone number belongs to."
},
- "secondsFromStart": {
- "type": "number",
- "description": "The number of seconds from the start of the conversation."
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the phone number was created."
},
- "duration": {
- "type": "number",
- "description": "The duration of the message in seconds."
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the phone number was last updated."
},
- "isFiltered": {
- "type": "boolean",
- "description": "Indicates if the message was filtered for security reasons."
+ "status": {
+ "type": "string",
+ "description": "This is the status of the phone number.",
+ "enum": [
+ "active",
+ "activating",
+ "blocked"
+ ]
},
- "detectedThreats": {
- "description": "List of detected security threats if the message was filtered.",
- "type": "array",
- "items": {
- "type": "string"
- }
+ "twilioAuthToken": {
+ "type": "string",
+ "description": "This is the Twilio Auth Token for the phone number."
},
- "originalMessage": {
+ "twilioApiKey": {
"type": "string",
- "description": "The original message before filtering (only included if content was filtered)."
- }
- },
- "required": [
- "role",
- "message",
- "time",
- "endTime",
- "secondsFromStart"
- ]
- },
- "ToolCallFunction": {
- "type": "object",
- "properties": {
- "arguments": {
+ "description": "This is the Twilio API Key for the phone number."
+ },
+ "twilioApiSecret": {
"type": "string",
- "description": "This is the arguments to call the function with"
+ "description": "This is the Twilio API Secret for the phone number."
},
"name": {
"type": "string",
- "description": "This is the name of the function to call",
+ "description": "This is the name of the phone number. This is just for your own reference.",
"maxLength": 40
- }
- },
- "required": [
- "arguments",
- "name"
- ]
- },
- "ToolCall": {
- "type": "object",
- "properties": {
- "id": {
+ },
+ "assistantId": {
"type": "string",
- "description": "This is the ID of the tool call"
+ "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "type": {
+ "workflowId": {
"type": "string",
- "description": "This is the type of tool"
+ "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "function": {
- "description": "This is the function that was called",
+ "squadId": {
+ "type": "string",
+ "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ },
+ "server": {
+ "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
"allOf": [
{
- "$ref": "#/components/schemas/ToolCallFunction"
+ "$ref": "#/components/schemas/Server"
}
]
+ },
+ "number": {
+ "type": "string",
+ "description": "These are the digits of the phone number you own on your Twilio."
+ },
+ "twilioAccountSid": {
+ "type": "string",
+ "description": "This is the Twilio Account SID for the phone number."
}
},
"required": [
+ "provider",
"id",
- "type",
- "function"
+ "orgId",
+ "createdAt",
+ "updatedAt",
+ "number",
+ "twilioAccountSid"
]
},
- "AssistantMessage": {
+ "VonagePhoneNumber": {
"type": "object",
"properties": {
- "role": {
+ "fallbackDestination": {
+ "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TransferDestinationNumber",
+ "title": "NumberTransferDestination"
+ },
+ {
+ "$ref": "#/components/schemas/TransferDestinationSip",
+ "title": "SipTransferDestination"
+ }
+ ]
+ },
+ "hooks": {
+ "type": "array",
+ "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
+ "title": "PhoneNumberHookCallRinging"
+ },
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
+ "title": "PhoneNumberHookCallEnding"
+ }
+ ]
+ }
+ },
+ "provider": {
"type": "string",
- "description": "This is the role of the message author",
- "default": "assistant",
+ "description": "This is to use numbers bought on Vonage.",
"enum": [
- "assistant"
+ "vonage"
]
},
- "content": {
+ "id": {
"type": "string",
- "description": "This is the content of the assistant message",
- "maxLength": 10000
+ "description": "This is the unique identifier for the phone number."
},
- "refusal": {
+ "orgId": {
"type": "string",
- "description": "This is the refusal message generated by the model",
- "maxLength": 10000
+ "description": "This is the unique identifier for the org that this phone number belongs to."
},
- "tool_calls": {
- "description": "This is the tool calls generated by the model",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/ToolCall"
- }
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the phone number was created."
},
- "name": {
+ "updatedAt": {
+ "format": "date-time",
"type": "string",
- "description": "This is an optional name for the participant",
- "maxLength": 40
+ "description": "This is the ISO 8601 date-time string of when the phone number was last updated."
},
- "metadata": {
- "type": "object",
- "description": "This is an optional metadata for the message"
- }
- },
- "required": [
- "role"
- ]
- },
- "ToolMessage": {
- "type": "object",
- "properties": {
- "role": {
+ "status": {
"type": "string",
- "description": "This is the role of the message author",
- "default": "tool",
+ "description": "This is the status of the phone number.",
"enum": [
- "tool"
+ "active",
+ "activating",
+ "blocked"
]
},
- "content": {
+ "name": {
"type": "string",
- "description": "This is the content of the tool message",
- "maxLength": 10000
+ "description": "This is the name of the phone number. This is just for your own reference.",
+ "maxLength": 40
},
- "tool_call_id": {
+ "assistantId": {
"type": "string",
- "description": "This is the ID of the tool call this message is responding to"
+ "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "name": {
+ "workflowId": {
"type": "string",
- "description": "This is an optional name for the participant",
- "maxLength": 40
+ "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "metadata": {
- "type": "object",
- "description": "This is an optional metadata for the message"
+ "squadId": {
+ "type": "string",
+ "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ },
+ "server": {
+ "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
+ },
+ "number": {
+ "type": "string",
+ "description": "These are the digits of the phone number you own on your Vonage."
+ },
+ "credentialId": {
+ "type": "string",
+ "description": "This is the credential you added in dashboard.vapi.ai/keys. This is used to configure the number to send inbound calls to Vapi, make outbound calls and do live call updates like transfers and hangups."
}
},
"required": [
- "role",
- "content",
- "tool_call_id"
+ "provider",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt",
+ "number",
+ "credentialId"
]
},
- "FunctionCall": {
+ "SipAuthentication": {
"type": "object",
"properties": {
- "arguments": {
+ "realm": {
"type": "string",
- "description": "This is the arguments to call the function with"
+ "description": "This will be expected in the `realm` field of the `authorization` header of the SIP INVITE. Defaults to sip.vapi.ai."
},
- "name": {
+ "username": {
"type": "string",
- "description": "This is the name of the function to call",
+ "description": "This will be expected in the `username` field of the `authorization` header of the SIP INVITE.",
+ "minLength": 20,
+ "maxLength": 40
+ },
+ "password": {
+ "type": "string",
+ "description": "This will be expected to generate the `response` field of the `authorization` header of the SIP INVITE, through digest authentication.",
+ "minLength": 20,
"maxLength": 40
}
},
"required": [
- "arguments",
- "name"
+ "username",
+ "password"
]
},
- "Chat": {
+ "VapiPhoneNumber": {
"type": "object",
"properties": {
- "assistantId": {
- "type": "string",
- "description": "This is the assistant that will be used for the chat. To use an existing assistant, use `assistantId` instead."
- },
- "assistant": {
- "description": "This is the assistant that will be used for the chat. To use an existing assistant, use `assistantId` instead.",
- "allOf": [
+ "fallbackDestination": {
+ "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
+ "oneOf": [
{
- "$ref": "#/components/schemas/CreateAssistantDTO"
+ "$ref": "#/components/schemas/TransferDestinationNumber",
+ "title": "NumberTransferDestination"
+ },
+ {
+ "$ref": "#/components/schemas/TransferDestinationSip",
+ "title": "SipTransferDestination"
}
]
},
- "assistantOverrides": {
- "description": "These are the variable values that will be used to replace template variables in the assistant messages.\nOnly variable substitution is supported in chat contexts - other assistant properties cannot be overridden.",
- "allOf": [
- {
- "$ref": "#/components/schemas/AssistantOverrides"
- }
+ "hooks": {
+ "type": "array",
+ "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
+ "title": "PhoneNumberHookCallRinging"
+ },
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
+ "title": "PhoneNumberHookCallEnding"
+ }
+ ]
+ }
+ },
+ "provider": {
+ "type": "string",
+ "description": "This is to create free SIP phone numbers on Vapi.",
+ "enum": [
+ "vapi"
]
},
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the phone number."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the org that this phone number belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the phone number was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the phone number was last updated."
+ },
+ "status": {
+ "type": "string",
+ "description": "This is the status of the phone number.",
+ "enum": [
+ "active",
+ "activating",
+ "blocked"
+ ]
+ },
+ "number": {
+ "type": "string",
+ "description": "These are the digits of the phone number you purchased from Vapi."
+ },
"name": {
"type": "string",
- "description": "This is the name of the chat. This is just for your own reference.",
+ "description": "This is the name of the phone number. This is just for your own reference.",
"maxLength": 40
},
- "sessionId": {
+ "assistantId": {
"type": "string",
- "description": "This is the ID of the session that will be used for the chat.\nMutually exclusive with previousChatId."
+ "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "input": {
- "description": "This is the input text for the chat.\nCan be a string or an array of chat messages.",
- "oneOf": [
- {
- "type": "string",
- "title": "String"
- },
- {
- "type": "array",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/SystemMessage",
- "title": "SystemMessage"
- },
- {
- "$ref": "#/components/schemas/UserMessage",
- "title": "UserMessage"
- },
- {
- "$ref": "#/components/schemas/AssistantMessage",
- "title": "AssistantMessage"
- },
- {
- "$ref": "#/components/schemas/ToolMessage",
- "title": "ToolMessage"
- },
- {
- "$ref": "#/components/schemas/DeveloperMessage",
- "title": "DeveloperMessage"
- }
- ]
- },
- "title": "MessageArray"
- }
- ],
- "examples": [
- "Hello, how can you help me?",
- [
- {
- "role": "user",
- "content": "Hello, how can you help me?"
- }
- ]
- ]
+ "workflowId": {
+ "type": "string",
+ "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "stream": {
- "type": "boolean",
- "description": "This is a flag that determines whether the response should be streamed.\nWhen true, the response will be sent as chunks of text.",
- "default": false
+ "squadId": {
+ "type": "string",
+ "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ },
+ "server": {
+ "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
},
- "previousChatId": {
+ "numberDesiredAreaCode": {
"type": "string",
- "description": "This is the ID of the chat that will be used as context for the new chat.\nThe messages from the previous chat will be used as context.\nMutually exclusive with sessionId."
+ "description": "This is the area code of the phone number to purchase.",
+ "minLength": 3,
+ "maxLength": 3
},
- "id": {
+ "sipUri": {
"type": "string",
- "description": "This is the unique identifier for the chat."
+ "description": "This is the SIP URI of the phone number. You can SIP INVITE this. The assistant attached to this number will answer.\n\nThis is case-insensitive."
},
- "orgId": {
- "type": "string",
- "description": "This is the unique identifier for the org that this chat belongs to."
+ "authentication": {
+ "description": "This enables authentication for incoming SIP INVITE requests to the `sipUri`.\n\nIf not set, any username/password to the 401 challenge of the SIP INVITE will be accepted.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/SipAuthentication"
+ }
+ ]
+ }
+ },
+ "required": [
+ "provider",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "TelnyxPhoneNumber": {
+ "type": "object",
+ "properties": {
+ "fallbackDestination": {
+ "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TransferDestinationNumber",
+ "title": "NumberTransferDestination"
+ },
+ {
+ "$ref": "#/components/schemas/TransferDestinationSip",
+ "title": "SipTransferDestination"
+ }
+ ]
},
- "messages": {
+ "hooks": {
"type": "array",
- "description": "This is an array of messages used as context for the chat.\nUsed to provide message history for multi-turn conversations.",
+ "description": "This is the hooks that will be used for incoming calls to this phone number.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/SystemMessage",
- "title": "SystemMessage"
- },
- {
- "$ref": "#/components/schemas/UserMessage",
- "title": "UserMessage"
- },
- {
- "$ref": "#/components/schemas/AssistantMessage",
- "title": "AssistantMessage"
- },
- {
- "$ref": "#/components/schemas/ToolMessage",
- "title": "ToolMessage"
+ "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
+ "title": "PhoneNumberHookCallRinging"
},
{
- "$ref": "#/components/schemas/DeveloperMessage",
- "title": "DeveloperMessage"
+ "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
+ "title": "PhoneNumberHookCallEnding"
}
]
}
},
- "output": {
- "type": "array",
- "description": "This is the output messages generated by the system in response to the input.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/SystemMessage",
- "title": "SystemMessage"
- },
- {
- "$ref": "#/components/schemas/UserMessage",
- "title": "UserMessage"
- },
- {
- "$ref": "#/components/schemas/AssistantMessage",
- "title": "AssistantMessage"
- },
- {
- "$ref": "#/components/schemas/ToolMessage",
- "title": "ToolMessage"
- },
- {
- "$ref": "#/components/schemas/DeveloperMessage",
- "title": "DeveloperMessage"
- }
- ]
- }
+ "provider": {
+ "type": "string",
+ "description": "This is to use numbers bought on Telnyx.",
+ "enum": [
+ "telnyx"
+ ]
+ },
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the phone number."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the org that this phone number belongs to."
},
"createdAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the chat was created."
+ "description": "This is the ISO 8601 date-time string of when the phone number was created."
},
"updatedAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the chat was last updated."
+ "description": "This is the ISO 8601 date-time string of when the phone number was last updated."
},
- "costs": {
- "type": "array",
- "description": "These are the costs of individual components of the chat in USD.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ModelCost",
- "title": "ModelCost"
- },
- {
- "$ref": "#/components/schemas/ChatCost",
- "title": "ChatCost"
- }
- ]
- }
+ "status": {
+ "type": "string",
+ "description": "This is the status of the phone number.",
+ "enum": [
+ "active",
+ "activating",
+ "blocked"
+ ]
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the phone number. This is just for your own reference.",
+ "maxLength": 40
},
- "cost": {
- "type": "number",
- "description": "This is the cost of the chat in USD."
- }
- },
- "required": [
- "id",
- "orgId",
- "createdAt",
- "updatedAt"
- ]
- },
- "CreateChatDTO": {
- "type": "object",
- "properties": {
"assistantId": {
"type": "string",
- "description": "This is the assistant that will be used for the chat. To use an existing assistant, use `assistantId` instead."
+ "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "assistant": {
- "description": "This is the assistant that will be used for the chat. To use an existing assistant, use `assistantId` instead.",
- "allOf": [
- {
- "$ref": "#/components/schemas/CreateAssistantDTO"
- }
- ]
+ "workflowId": {
+ "type": "string",
+ "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "assistantOverrides": {
- "description": "These are the variable values that will be used to replace template variables in the assistant messages.\nOnly variable substitution is supported in chat contexts - other assistant properties cannot be overridden.",
+ "squadId": {
+ "type": "string",
+ "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ },
+ "server": {
+ "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
"allOf": [
{
- "$ref": "#/components/schemas/AssistantOverrides"
+ "$ref": "#/components/schemas/Server"
}
]
},
- "name": {
+ "number": {
"type": "string",
- "description": "This is the name of the chat. This is just for your own reference.",
- "maxLength": 40
+ "description": "These are the digits of the phone number you own on your Telnyx."
},
- "sessionId": {
+ "credentialId": {
"type": "string",
- "description": "This is the ID of the session that will be used for the chat.\nMutually exclusive with previousChatId."
- },
- "input": {
- "description": "This is the input text for the chat.\nCan be a string or an array of chat messages.\nThis field is REQUIRED for chat creation.",
+ "description": "This is the credential you added in dashboard.vapi.ai/keys. This is used to configure the number to send inbound calls to Vapi, make outbound calls and do live call updates like transfers and hangups."
+ }
+ },
+ "required": [
+ "provider",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt",
+ "number",
+ "credentialId"
+ ]
+ },
+ "CreateByoPhoneNumberDTO": {
+ "type": "object",
+ "properties": {
+ "fallbackDestination": {
+ "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
"oneOf": [
{
- "type": "string",
- "title": "String"
+ "$ref": "#/components/schemas/TransferDestinationNumber",
+ "title": "NumberTransferDestination"
},
{
- "type": "array",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/SystemMessage",
- "title": "SystemMessage"
- },
- {
- "$ref": "#/components/schemas/UserMessage",
- "title": "UserMessage"
- },
- {
- "$ref": "#/components/schemas/AssistantMessage",
- "title": "AssistantMessage"
- },
- {
- "$ref": "#/components/schemas/ToolMessage",
- "title": "ToolMessage"
- },
- {
- "$ref": "#/components/schemas/DeveloperMessage",
- "title": "DeveloperMessage"
- }
- ]
- },
- "title": "MessageArray"
+ "$ref": "#/components/schemas/TransferDestinationSip",
+ "title": "SipTransferDestination"
}
- ],
- "examples": [
- "Hello, how can you help me?",
- [
+ ]
+ },
+ "hooks": {
+ "type": "array",
+ "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "items": {
+ "oneOf": [
{
- "role": "user",
- "content": "Hello, how can you help me?"
+ "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
+ "title": "PhoneNumberHookCallRinging"
+ },
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
+ "title": "PhoneNumberHookCallEnding"
}
]
+ }
+ },
+ "provider": {
+ "type": "string",
+ "description": "This is to bring your own phone numbers from your own SIP trunks or Carriers.",
+ "enum": [
+ "byo-phone-number"
]
},
- "stream": {
+ "numberE164CheckEnabled": {
"type": "boolean",
- "description": "This is a flag that determines whether the response should be streamed.\nWhen true, the response will be sent as chunks of text.",
- "default": false
+ "description": "This is the flag to toggle the E164 check for the `number` field. This is an advanced property which should be used if you know your use case requires it.\n\nUse cases:\n- `false`: To allow non-E164 numbers like `+001234567890`, `1234`, or `abc`. This is useful for dialing out to non-E164 numbers on your SIP trunks.\n- `true` (default): To allow only E164 numbers like `+14155551234`. This is standard for PSTN calls.\n\nIf `false`, the `number` is still required to only contain alphanumeric characters (regex: `/^\\+?[a-zA-Z0-9]+$/`).\n\n@default true (E164 check is enabled)",
+ "default": true
},
- "previousChatId": {
+ "number": {
"type": "string",
- "description": "This is the ID of the chat that will be used as context for the new chat.\nThe messages from the previous chat will be used as context.\nMutually exclusive with sessionId."
- }
- },
- "required": [
- "input"
- ]
- },
- "GetChatPaginatedDTO": {
- "type": "object",
- "properties": {
+ "description": "This is the number of the customer.",
+ "minLength": 3,
+ "maxLength": 40
+ },
+ "credentialId": {
+ "type": "string",
+ "description": "This is the credential of your own SIP trunk or Carrier (type `byo-sip-trunk`) which can be used to make calls to this phone number.\n\nYou can add the SIP trunk or Carrier credential in the Provider Credentials page on the Dashboard to get the credentialId."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the phone number. This is just for your own reference.",
+ "maxLength": 40
+ },
"assistantId": {
"type": "string",
- "description": "This is the unique identifier for the assistant that will be used for the chat."
+ "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
"workflowId": {
"type": "string",
- "description": "This is the unique identifier for the workflow that will be used for the chat."
+ "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "sessionId": {
+ "squadId": {
"type": "string",
- "description": "This is the unique identifier for the session that will be used for the chat."
+ "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "page": {
- "type": "number",
- "description": "This is the page number to return. Defaults to 1.",
- "minimum": 1
+ "server": {
+ "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
+ }
+ },
+ "required": [
+ "provider",
+ "credentialId"
+ ]
+ },
+ "CreateTwilioPhoneNumberDTO": {
+ "type": "object",
+ "properties": {
+ "fallbackDestination": {
+ "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TransferDestinationNumber",
+ "title": "NumberTransferDestination"
+ },
+ {
+ "$ref": "#/components/schemas/TransferDestinationSip",
+ "title": "SipTransferDestination"
+ }
+ ]
},
- "sortOrder": {
+ "hooks": {
+ "type": "array",
+ "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
+ "title": "PhoneNumberHookCallRinging"
+ },
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
+ "title": "PhoneNumberHookCallEnding"
+ }
+ ]
+ }
+ },
+ "provider": {
"type": "string",
- "description": "This is the sort order for pagination. Defaults to 'DESC'.",
+ "description": "This is to use numbers bought on Twilio.",
"enum": [
- "ASC",
- "DESC"
+ "twilio"
]
},
- "limit": {
- "type": "number",
- "description": "This is the maximum number of items to return. Defaults to 100.",
- "minimum": 0,
- "maximum": 1000
+ "smsEnabled": {
+ "type": "boolean",
+ "description": "Controls whether Vapi sets the messaging webhook URL on the Twilio number during import.\n\nIf set to `false`, Vapi will not update the Twilio messaging URL, leaving it as is.\nIf `true` or omitted (default), Vapi will configure both the voice and messaging URLs.\n\n@default true",
+ "default": true
},
- "createdAtGt": {
- "format": "date-time",
+ "number": {
"type": "string",
- "description": "This will return items where the createdAt is greater than the specified value."
+ "description": "These are the digits of the phone number you own on your Twilio."
},
- "createdAtLt": {
- "format": "date-time",
+ "twilioAccountSid": {
"type": "string",
- "description": "This will return items where the createdAt is less than the specified value."
+ "description": "This is the Twilio Account SID for the phone number."
},
- "createdAtGe": {
- "format": "date-time",
+ "twilioAuthToken": {
"type": "string",
- "description": "This will return items where the createdAt is greater than or equal to the specified value."
+ "description": "This is the Twilio Auth Token for the phone number."
},
- "createdAtLe": {
- "format": "date-time",
+ "twilioApiKey": {
"type": "string",
- "description": "This will return items where the createdAt is less than or equal to the specified value."
+ "description": "This is the Twilio API Key for the phone number."
},
- "updatedAtGt": {
- "format": "date-time",
+ "twilioApiSecret": {
"type": "string",
- "description": "This will return items where the updatedAt is greater than the specified value."
+ "description": "This is the Twilio API Secret for the phone number."
},
- "updatedAtLt": {
- "format": "date-time",
+ "name": {
"type": "string",
- "description": "This will return items where the updatedAt is less than the specified value."
+ "description": "This is the name of the phone number. This is just for your own reference.",
+ "maxLength": 40
},
- "updatedAtGe": {
- "format": "date-time",
+ "assistantId": {
"type": "string",
- "description": "This will return items where the updatedAt is greater than or equal to the specified value."
+ "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "updatedAtLe": {
- "format": "date-time",
+ "workflowId": {
"type": "string",
- "description": "This will return items where the updatedAt is less than or equal to the specified value."
- }
- }
- },
- "ChatPaginatedResponse": {
- "type": "object",
- "properties": {
- "results": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/Chat"
- }
+ "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "metadata": {
- "$ref": "#/components/schemas/PaginationMeta"
+ "squadId": {
+ "type": "string",
+ "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ },
+ "server": {
+ "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
}
},
"required": [
- "results",
- "metadata"
+ "provider",
+ "number",
+ "twilioAccountSid"
]
},
- "CreateChatStreamResponse": {
+ "CreateVonagePhoneNumberDTO": {
"type": "object",
"properties": {
- "id": {
+ "fallbackDestination": {
+ "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TransferDestinationNumber",
+ "title": "NumberTransferDestination"
+ },
+ {
+ "$ref": "#/components/schemas/TransferDestinationSip",
+ "title": "SipTransferDestination"
+ }
+ ]
+ },
+ "hooks": {
+ "type": "array",
+ "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
+ "title": "PhoneNumberHookCallRinging"
+ },
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
+ "title": "PhoneNumberHookCallEnding"
+ }
+ ]
+ }
+ },
+ "provider": {
"type": "string",
- "description": "This is the unique identifier for the streaming response."
+ "description": "This is to use numbers bought on Vonage.",
+ "enum": [
+ "vonage"
+ ]
},
- "sessionId": {
+ "number": {
"type": "string",
- "description": "This is the ID of the session that will be used for the chat.\nHelps track conversation context across multiple messages."
+ "description": "These are the digits of the phone number you own on your Vonage."
},
- "path": {
+ "credentialId": {
"type": "string",
- "description": "This is the path to the content being updated.\nFormat: `chat.output[{contentIndex}].content` where contentIndex identifies the specific content item.",
- "example": "chat.output[0].content"
+ "description": "This is the credential you added in dashboard.vapi.ai/keys. This is used to configure the number to send inbound calls to Vapi, make outbound calls and do live call updates like transfers and hangups."
},
- "delta": {
+ "name": {
"type": "string",
- "description": "This is the incremental content chunk being streamed."
+ "description": "This is the name of the phone number. This is just for your own reference.",
+ "maxLength": 40
+ },
+ "assistantId": {
+ "type": "string",
+ "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ },
+ "workflowId": {
+ "type": "string",
+ "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ },
+ "squadId": {
+ "type": "string",
+ "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ },
+ "server": {
+ "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
}
},
"required": [
- "id",
- "path",
- "delta"
+ "provider",
+ "number",
+ "credentialId"
]
},
- "OpenAIResponsesRequest": {
+ "CreateVapiPhoneNumberDTO": {
"type": "object",
"properties": {
- "assistantId": {
- "type": "string",
- "description": "This is the assistant that will be used for the chat. To use an existing assistant, use `assistantId` instead."
- },
- "assistant": {
- "description": "This is the assistant that will be used for the chat. To use an existing assistant, use `assistantId` instead.",
- "allOf": [
+ "fallbackDestination": {
+ "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
+ "oneOf": [
{
- "$ref": "#/components/schemas/CreateAssistantDTO"
+ "$ref": "#/components/schemas/TransferDestinationNumber",
+ "title": "NumberTransferDestination"
+ },
+ {
+ "$ref": "#/components/schemas/TransferDestinationSip",
+ "title": "SipTransferDestination"
}
]
},
- "assistantOverrides": {
- "description": "These are the variable values that will be used to replace template variables in the assistant messages.\nOnly variable substitution is supported in chat contexts - other assistant properties cannot be overridden.",
+ "hooks": {
+ "type": "array",
+ "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
+ "title": "PhoneNumberHookCallRinging"
+ },
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
+ "title": "PhoneNumberHookCallEnding"
+ }
+ ]
+ }
+ },
+ "provider": {
+ "type": "string",
+ "description": "This is to create free SIP phone numbers on Vapi.",
+ "enum": [
+ "vapi"
+ ]
+ },
+ "numberDesiredAreaCode": {
+ "type": "string",
+ "description": "This is the area code of the phone number to purchase.",
+ "minLength": 3,
+ "maxLength": 3
+ },
+ "sipUri": {
+ "type": "string",
+ "description": "This is the SIP URI of the phone number. You can SIP INVITE this. The assistant attached to this number will answer.\n\nThis is case-insensitive."
+ },
+ "authentication": {
+ "description": "This enables authentication for incoming SIP INVITE requests to the `sipUri`.\n\nIf not set, any username/password to the 401 challenge of the SIP INVITE will be accepted.",
"allOf": [
{
- "$ref": "#/components/schemas/AssistantOverrides"
+ "$ref": "#/components/schemas/SipAuthentication"
}
]
},
"name": {
"type": "string",
- "description": "This is the name of the chat. This is just for your own reference.",
+ "description": "This is the name of the phone number. This is just for your own reference.",
"maxLength": 40
},
- "sessionId": {
+ "assistantId": {
"type": "string",
- "description": "This is the ID of the session that will be used for the chat.\nMutually exclusive with previousChatId."
+ "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "input": {
- "description": "This is the input text for the chat.\nCan be a string or an array of chat messages.\nThis field is REQUIRED for chat creation.",
- "oneOf": [
- {
- "type": "string",
- "title": "String"
- },
+ "workflowId": {
+ "type": "string",
+ "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ },
+ "squadId": {
+ "type": "string",
+ "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ },
+ "server": {
+ "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "allOf": [
{
- "type": "array",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/SystemMessage",
- "title": "SystemMessage"
- },
- {
- "$ref": "#/components/schemas/UserMessage",
- "title": "UserMessage"
- },
- {
- "$ref": "#/components/schemas/AssistantMessage",
- "title": "AssistantMessage"
- },
- {
- "$ref": "#/components/schemas/ToolMessage",
- "title": "ToolMessage"
- },
- {
- "$ref": "#/components/schemas/DeveloperMessage",
- "title": "DeveloperMessage"
- }
- ]
- },
- "title": "MessageArray"
+ "$ref": "#/components/schemas/Server"
}
- ],
- "examples": [
- "Hello, how can you help me?",
- [
- {
- "role": "user",
- "content": "Hello, how can you help me?"
- }
- ]
]
- },
- "stream": {
- "type": "boolean",
- "description": "Whether to stream the response or not.",
- "default": true
- },
- "previousChatId": {
- "type": "string",
- "description": "This is the ID of the chat that will be used as context for the new chat.\nThe messages from the previous chat will be used as context.\nMutually exclusive with sessionId."
}
},
"required": [
- "input"
+ "provider"
]
},
- "ChatAssistantOverrides": {
+ "CreateTelnyxPhoneNumberDTO": {
"type": "object",
"properties": {
- "variableValues": {
- "type": "object",
- "description": "Variable values for template substitution",
- "example": {
- "name": "John",
- "company": "ACME Corp"
+ "fallbackDestination": {
+ "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TransferDestinationNumber",
+ "title": "NumberTransferDestination"
+ },
+ {
+ "$ref": "#/components/schemas/TransferDestinationSip",
+ "title": "SipTransferDestination"
+ }
+ ]
+ },
+ "hooks": {
+ "type": "array",
+ "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
+ "title": "PhoneNumberHookCallRinging"
+ },
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
+ "title": "PhoneNumberHookCallEnding"
+ }
+ ]
}
- }
- }
- },
- "CreateWebCustomerDTO": {
- "type": "object",
- "properties": {
- "numberE164CheckEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle the E164 check for the `number` field. This is an advanced property which should be used if you know your use case requires it.\n\nUse cases:\n- `false`: To allow non-E164 numbers like `+001234567890`, `1234`, or `abc`. This is useful for dialing out to non-E164 numbers on your SIP trunks.\n- `true` (default): To allow only E164 numbers like `+14155551234`. This is standard for PSTN calls.\n\nIf `false`, the `number` is still required to only contain alphanumeric characters (regex: `/^\\+?[a-zA-Z0-9]+$/`).\n\n@default true (E164 check is enabled)",
- "default": true
},
- "extension": {
+ "provider": {
"type": "string",
- "description": "This is the extension that will be dialed after the call is answered.",
- "maxLength": 10,
- "example": null
- },
- "assistantOverrides": {
- "description": "These are the variable values that will be used to replace template variables in the assistant messages.\nOnly variable substitution is supported in web chat - other assistant properties cannot be overridden.",
- "allOf": [
- {
- "$ref": "#/components/schemas/ChatAssistantOverrides"
- }
+ "description": "This is to use numbers bought on Telnyx.",
+ "enum": [
+ "telnyx"
]
},
"number": {
"type": "string",
- "description": "This is the number of the customer.",
- "minLength": 3,
- "maxLength": 40
+ "description": "These are the digits of the phone number you own on your Telnyx."
},
- "sipUri": {
+ "credentialId": {
"type": "string",
- "description": "This is the SIP URI of the customer."
+ "description": "This is the credential you added in dashboard.vapi.ai/keys. This is used to configure the number to send inbound calls to Vapi, make outbound calls and do live call updates like transfers and hangups."
},
"name": {
"type": "string",
- "description": "This is the name of the customer. This is just for your own reference.\n\nFor SIP inbound calls, this is extracted from the `From` SIP header with format `\"Display Name\" `.",
- "maxLength": 40
- },
- "email": {
- "type": "string",
- "description": "This is the email of the customer.",
+ "description": "This is the name of the phone number. This is just for your own reference.",
"maxLength": 40
},
- "externalId": {
- "type": "string",
- "description": "This is the external ID of the customer.",
- "maxLength": 40
- }
- }
- },
- "CreateWebChatDTO": {
- "type": "object",
- "properties": {
"assistantId": {
"type": "string",
- "description": "The assistant ID to use for this chat"
+ "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "sessionId": {
+ "workflowId": {
"type": "string",
- "description": "This is the ID of the session that will be used for the chat.\nIf provided, the conversation will continue from the previous state.\nIf not provided or expired, a new session will be created."
+ "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "assistantOverrides": {
- "description": "These are the variable values that will be used to replace template variables in the assistant messages.\nOnly variable substitution is supported in web chat - other assistant properties cannot be overridden.",
- "allOf": [
- {
- "$ref": "#/components/schemas/ChatAssistantOverrides"
- }
- ]
+ "squadId": {
+ "type": "string",
+ "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "customer": {
- "description": "This is the customer information for the chat.\nUsed to automatically manage sessions for repeat customers.",
+ "server": {
+ "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
"allOf": [
{
- "$ref": "#/components/schemas/CreateWebCustomerDTO"
- }
- ]
- },
- "input": {
- "description": "This is the input text for the chat.\nCan be a string or an array of chat messages.",
- "oneOf": [
- {
- "type": "string",
- "title": "String"
- },
- {
- "type": "array",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/SystemMessage",
- "title": "SystemMessage"
- },
- {
- "$ref": "#/components/schemas/UserMessage",
- "title": "UserMessage"
- },
- {
- "$ref": "#/components/schemas/AssistantMessage",
- "title": "AssistantMessage"
- },
- {
- "$ref": "#/components/schemas/ToolMessage",
- "title": "ToolMessage"
- },
- {
- "$ref": "#/components/schemas/DeveloperMessage",
- "title": "DeveloperMessage"
- }
- ]
- },
- "title": "MessageArray"
+ "$ref": "#/components/schemas/Server"
}
- ],
- "examples": [
- "Hello, how can you help me?",
- [
- {
- "role": "user",
- "content": "Hello, how can you help me?"
- }
- ]
]
- },
- "stream": {
- "type": "boolean",
- "description": "This is a flag that determines whether the response should be streamed.\nWhen true, the response will be sent as chunks of text.",
- "default": false
}
},
"required": [
- "assistantId",
- "input"
+ "provider",
+ "number",
+ "credentialId"
]
},
- "WebChat": {
+ "UpdateByoPhoneNumberDTO": {
"type": "object",
"properties": {
- "id": {
- "type": "string",
- "description": "This is the unique identifier for the chat."
- },
- "sessionId": {
- "type": "string",
- "description": "This is the ID of the session for the chat. Send it in the next chat request to continue the conversation."
+ "fallbackDestination": {
+ "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TransferDestinationNumber",
+ "title": "NumberTransferDestination"
+ },
+ {
+ "$ref": "#/components/schemas/TransferDestinationSip",
+ "title": "SipTransferDestination"
+ }
+ ]
},
- "output": {
+ "hooks": {
"type": "array",
- "description": "This is the output messages generated by the system in response to the input.",
+ "description": "This is the hooks that will be used for incoming calls to this phone number.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/SystemMessage",
- "title": "SystemMessage"
- },
- {
- "$ref": "#/components/schemas/UserMessage",
- "title": "UserMessage"
- },
- {
- "$ref": "#/components/schemas/AssistantMessage",
- "title": "AssistantMessage"
- },
- {
- "$ref": "#/components/schemas/ToolMessage",
- "title": "ToolMessage"
+ "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
+ "title": "PhoneNumberHookCallRinging"
},
{
- "$ref": "#/components/schemas/DeveloperMessage",
- "title": "DeveloperMessage"
+ "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
+ "title": "PhoneNumberHookCallEnding"
}
]
}
- }
- },
- "required": [
- "id",
- "output"
- ]
- },
- "OpenAIWebChatRequest": {
- "type": "object",
- "properties": {
+ },
+ "numberE164CheckEnabled": {
+ "type": "boolean",
+ "description": "This is the flag to toggle the E164 check for the `number` field. This is an advanced property which should be used if you know your use case requires it.\n\nUse cases:\n- `false`: To allow non-E164 numbers like `+001234567890`, `1234`, or `abc`. This is useful for dialing out to non-E164 numbers on your SIP trunks.\n- `true` (default): To allow only E164 numbers like `+14155551234`. This is standard for PSTN calls.\n\nIf `false`, the `number` is still required to only contain alphanumeric characters (regex: `/^\\+?[a-zA-Z0-9]+$/`).\n\n@default true (E164 check is enabled)",
+ "default": true
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the phone number. This is just for your own reference.",
+ "maxLength": 40
+ },
"assistantId": {
"type": "string",
- "description": "The assistant ID to use for this chat"
+ "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "sessionId": {
+ "workflowId": {
"type": "string",
- "description": "This is the ID of the session that will be used for the chat.\nIf provided, the conversation will continue from the previous state.\nIf not provided or expired, a new session will be created."
+ "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "assistantOverrides": {
- "description": "These are the variable values that will be used to replace template variables in the assistant messages.\nOnly variable substitution is supported in web chat - other assistant properties cannot be overridden.",
- "allOf": [
- {
- "$ref": "#/components/schemas/ChatAssistantOverrides"
- }
- ]
+ "squadId": {
+ "type": "string",
+ "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "customer": {
- "description": "This is the customer information for the chat.\nUsed to automatically manage sessions for repeat customers.",
+ "server": {
+ "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
"allOf": [
{
- "$ref": "#/components/schemas/CreateWebCustomerDTO"
+ "$ref": "#/components/schemas/Server"
}
]
},
- "input": {
- "description": "This is the input text for the chat.\nCan be a string or an array of chat messages.",
+ "number": {
+ "type": "string",
+ "description": "This is the number of the customer.",
+ "minLength": 3,
+ "maxLength": 40
+ },
+ "credentialId": {
+ "type": "string",
+ "description": "This is the credential of your own SIP trunk or Carrier (type `byo-sip-trunk`) which can be used to make calls to this phone number.\n\nYou can add the SIP trunk or Carrier credential in the Provider Credentials page on the Dashboard to get the credentialId."
+ }
+ }
+ },
+ "UpdateTwilioPhoneNumberDTO": {
+ "type": "object",
+ "properties": {
+ "fallbackDestination": {
+ "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
"oneOf": [
{
- "type": "string",
- "title": "String"
+ "$ref": "#/components/schemas/TransferDestinationNumber",
+ "title": "NumberTransferDestination"
},
{
- "type": "array",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/SystemMessage",
- "title": "SystemMessage"
- },
- {
- "$ref": "#/components/schemas/UserMessage",
- "title": "UserMessage"
- },
- {
- "$ref": "#/components/schemas/AssistantMessage",
- "title": "AssistantMessage"
- },
- {
- "$ref": "#/components/schemas/ToolMessage",
- "title": "ToolMessage"
- },
- {
- "$ref": "#/components/schemas/DeveloperMessage",
- "title": "DeveloperMessage"
- }
- ]
- },
- "title": "MessageArray"
+ "$ref": "#/components/schemas/TransferDestinationSip",
+ "title": "SipTransferDestination"
}
- ],
- "examples": [
- "Hello, how can you help me?",
- [
+ ]
+ },
+ "hooks": {
+ "type": "array",
+ "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "items": {
+ "oneOf": [
{
- "role": "user",
- "content": "Hello, how can you help me?"
+ "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
+ "title": "PhoneNumberHookCallRinging"
+ },
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
+ "title": "PhoneNumberHookCallEnding"
}
]
- ]
+ }
},
- "stream": {
+ "smsEnabled": {
"type": "boolean",
- "description": "Whether to stream the response or not.",
+ "description": "Controls whether Vapi sets the messaging webhook URL on the Twilio number during import.\n\nIf set to `false`, Vapi will not update the Twilio messaging URL, leaving it as is.\nIf `true` or omitted (default), Vapi will configure both the voice and messaging URLs.\n\n@default true",
"default": true
- }
- },
- "required": [
- "assistantId",
- "input"
- ]
- },
- "ResponseOutputText": {
- "type": "object",
- "properties": {
- "annotations": {
- "default": [],
- "description": "Annotations in the text output",
- "type": "array",
- "items": {
- "type": "object"
- }
},
- "text": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the phone number. This is just for your own reference.",
+ "maxLength": 40
+ },
+ "assistantId": {
+ "type": "string",
+ "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ },
+ "workflowId": {
+ "type": "string",
+ "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ },
+ "squadId": {
+ "type": "string",
+ "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ },
+ "server": {
+ "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
+ },
+ "number": {
+ "type": "string",
+ "description": "These are the digits of the phone number you own on your Twilio."
+ },
+ "twilioAccountSid": {
+ "type": "string",
+ "description": "This is the Twilio Account SID for the phone number."
+ },
+ "twilioAuthToken": {
+ "type": "string",
+ "description": "This is the Twilio Auth Token for the phone number."
+ },
+ "twilioApiKey": {
"type": "string",
- "description": "The text output from the model"
+ "description": "This is the Twilio API Key for the phone number."
},
- "type": {
+ "twilioApiSecret": {
"type": "string",
- "default": "output_text",
- "description": "The type of the output text",
- "enum": [
- "output_text"
- ]
+ "description": "This is the Twilio API Secret for the phone number."
}
- },
- "required": [
- "annotations",
- "text",
- "type"
- ]
+ }
},
- "ResponseOutputMessage": {
+ "UpdateVonagePhoneNumberDTO": {
"type": "object",
"properties": {
- "id": {
- "type": "string",
- "description": "The unique ID of the output message"
+ "fallbackDestination": {
+ "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TransferDestinationNumber",
+ "title": "NumberTransferDestination"
+ },
+ {
+ "$ref": "#/components/schemas/TransferDestinationSip",
+ "title": "SipTransferDestination"
+ }
+ ]
},
- "content": {
- "description": "Content of the output message",
+ "hooks": {
"type": "array",
+ "description": "This is the hooks that will be used for incoming calls to this phone number.",
"items": {
- "$ref": "#/components/schemas/ResponseOutputText"
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
+ "title": "PhoneNumberHookCallRinging"
+ },
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
+ "title": "PhoneNumberHookCallEnding"
+ }
+ ]
}
},
- "role": {
+ "name": {
"type": "string",
- "default": "assistant",
- "description": "The role of the output message",
- "enum": [
- "assistant"
- ]
+ "description": "This is the name of the phone number. This is just for your own reference.",
+ "maxLength": 40
},
- "status": {
+ "assistantId": {
"type": "string",
- "description": "The status of the message",
- "enum": [
- "in_progress",
- "completed",
- "incomplete"
- ]
+ "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "type": {
- "type": "string",
- "default": "message",
- "description": "The type of the output message",
- "enum": [
- "message"
- ]
- }
- },
- "required": [
- "id",
- "content",
- "role",
- "status",
- "type"
- ]
- },
- "ResponseObject": {
- "type": "object",
- "properties": {
- "id": {
+ "workflowId": {
"type": "string",
- "description": "Unique identifier for this Response"
+ "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "object": {
+ "squadId": {
"type": "string",
- "default": "response",
- "description": "The object type",
- "enum": [
- "response"
- ]
- },
- "created_at": {
- "type": "number",
- "description": "Unix timestamp (in seconds) of when this Response was created"
+ "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "status": {
- "type": "string",
- "description": "Status of the response",
- "enum": [
- "completed",
- "failed",
- "in_progress",
- "incomplete"
+ "server": {
+ "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
]
},
- "error": {
+ "number": {
"type": "string",
- "nullable": true,
- "default": null,
- "description": "Error message if the response failed"
+ "description": "These are the digits of the phone number you own on your Vonage."
},
- "output": {
- "description": "Output messages from the model",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/ResponseOutputMessage"
- }
+ "credentialId": {
+ "type": "string",
+ "description": "This is the credential you added in dashboard.vapi.ai/keys. This is used to configure the number to send inbound calls to Vapi, make outbound calls and do live call updates like transfers and hangups."
}
- },
- "required": [
- "id",
- "object",
- "created_at",
- "status",
- "output"
- ]
+ }
},
- "ResponseTextDeltaEvent": {
+ "UpdateVapiPhoneNumberDTO": {
"type": "object",
"properties": {
- "content_index": {
- "type": "number",
- "description": "Index of the content part"
+ "fallbackDestination": {
+ "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TransferDestinationNumber",
+ "title": "NumberTransferDestination"
+ },
+ {
+ "$ref": "#/components/schemas/TransferDestinationSip",
+ "title": "SipTransferDestination"
+ }
+ ]
},
- "delta": {
+ "hooks": {
+ "type": "array",
+ "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
+ "title": "PhoneNumberHookCallRinging"
+ },
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
+ "title": "PhoneNumberHookCallEnding"
+ }
+ ]
+ }
+ },
+ "name": {
"type": "string",
- "description": "Text delta being added"
+ "description": "This is the name of the phone number. This is just for your own reference.",
+ "maxLength": 40
},
- "item_id": {
+ "assistantId": {
"type": "string",
- "description": "ID of the output item"
+ "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "output_index": {
- "type": "number",
- "description": "Index of the output item"
+ "workflowId": {
+ "type": "string",
+ "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "type": {
+ "squadId": {
"type": "string",
- "default": "response.output_text.delta",
- "description": "Event type",
- "enum": [
- "response.output_text.delta"
+ "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ },
+ "server": {
+ "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
+ },
+ "sipUri": {
+ "type": "string",
+ "description": "This is the SIP URI of the phone number. You can SIP INVITE this. The assistant attached to this number will answer.\n\nThis is case-insensitive."
+ },
+ "authentication": {
+ "description": "This enables authentication for incoming SIP INVITE requests to the `sipUri`.\n\nIf not set, any username/password to the 401 challenge of the SIP INVITE will be accepted.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/SipAuthentication"
+ }
]
}
- },
- "required": [
- "content_index",
- "delta",
- "item_id",
- "output_index",
- "type"
- ]
+ }
},
- "ResponseTextDoneEvent": {
+ "UpdateTelnyxPhoneNumberDTO": {
"type": "object",
"properties": {
- "content_index": {
- "type": "number",
- "description": "Index of the content part"
+ "fallbackDestination": {
+ "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TransferDestinationNumber",
+ "title": "NumberTransferDestination"
+ },
+ {
+ "$ref": "#/components/schemas/TransferDestinationSip",
+ "title": "SipTransferDestination"
+ }
+ ]
},
- "item_id": {
+ "hooks": {
+ "type": "array",
+ "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
+ "title": "PhoneNumberHookCallRinging"
+ },
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
+ "title": "PhoneNumberHookCallEnding"
+ }
+ ]
+ }
+ },
+ "name": {
"type": "string",
- "description": "ID of the output item"
+ "description": "This is the name of the phone number. This is just for your own reference.",
+ "maxLength": 40
},
- "output_index": {
- "type": "number",
- "description": "Index of the output item"
+ "assistantId": {
+ "type": "string",
+ "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "text": {
+ "workflowId": {
"type": "string",
- "description": "Complete text content"
+ "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "type": {
+ "squadId": {
"type": "string",
- "default": "response.output_text.done",
- "description": "Event type",
- "enum": [
- "response.output_text.done"
- ]
- }
- },
- "required": [
- "content_index",
- "item_id",
- "output_index",
- "text",
- "type"
- ]
- },
- "ResponseCompletedEvent": {
- "type": "object",
- "properties": {
- "response": {
- "description": "The completed response",
+ "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ },
+ "server": {
+ "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
"allOf": [
{
- "$ref": "#/components/schemas/ResponseObject"
+ "$ref": "#/components/schemas/Server"
}
]
},
- "type": {
+ "number": {
"type": "string",
- "default": "response.completed",
- "description": "Event type",
- "enum": [
- "response.completed"
- ]
+ "description": "These are the digits of the phone number you own on your Telnyx."
+ },
+ "credentialId": {
+ "type": "string",
+ "description": "This is the credential you added in dashboard.vapi.ai/keys. This is used to configure the number to send inbound calls to Vapi, make outbound calls and do live call updates like transfers and hangups."
}
- },
- "required": [
- "response",
- "type"
- ]
+ }
},
- "ResponseErrorEvent": {
+ "ImportVonagePhoneNumberDTO": {
"type": "object",
"properties": {
- "type": {
- "type": "string",
- "default": "error",
- "description": "Event type",
- "enum": [
- "error"
+ "fallbackDestination": {
+ "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TransferDestinationNumber",
+ "title": "NumberTransferDestination"
+ },
+ {
+ "$ref": "#/components/schemas/TransferDestinationSip",
+ "title": "SipTransferDestination"
+ }
]
},
- "code": {
- "type": "string",
- "description": "Error code",
- "example": "ERR_SOMETHING"
+ "hooks": {
+ "type": "array",
+ "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
+ "title": "PhoneNumberHookCallRinging"
+ },
+ {
+ "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
+ "title": "PhoneNumberHookCallEnding"
+ }
+ ]
+ }
},
- "message": {
+ "vonagePhoneNumber": {
"type": "string",
- "description": "Error message",
- "example": "Something went wrong"
+ "description": "These are the digits of the phone number you own on your Vonage.",
+ "deprecated": true
},
- "param": {
+ "credentialId": {
"type": "string",
- "nullable": true,
- "description": "Parameter that caused the error"
+ "description": "This is the credential you added in dashboard.vapi.ai/keys. This is used to configure the number to send inbound calls to Vapi, make outbound calls and do live call updates like transfers and hangups."
},
- "sequence_number": {
- "type": "number",
- "description": "Sequence number of the event",
- "example": 1
- }
- },
- "required": [
- "type",
- "code",
- "message",
- "sequence_number"
- ]
- },
- "CreateCampaignDTO": {
- "type": "object",
- "properties": {
"name": {
"type": "string",
- "description": "This is the name of the campaign. This is just for your own reference.",
- "example": "Q2 Sales Campaign"
+ "description": "This is the name of the phone number. This is just for your own reference.",
+ "maxLength": 40
},
"assistantId": {
"type": "string",
- "description": "This is the assistant ID that will be used for the campaign calls. Note: Either assistantId or workflowId can be used, but not both."
+ "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
"workflowId": {
"type": "string",
- "description": "This is the workflow ID that will be used for the campaign calls. Note: Either assistantId or workflowId can be used, but not both."
+ "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "phoneNumberId": {
+ "squadId": {
"type": "string",
- "description": "This is the phone number ID that will be used for the campaign calls."
+ "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
},
- "schedulePlan": {
- "description": "This is the schedule plan for the campaign.",
+ "server": {
+ "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
"allOf": [
{
- "$ref": "#/components/schemas/SchedulePlan"
+ "$ref": "#/components/schemas/Server"
}
]
- },
- "customers": {
- "description": "These are the customers that will be called in the campaign.",
+ }
+ },
+ "required": [
+ "vonagePhoneNumber",
+ "credentialId"
+ ]
+ },
+ "PhoneNumberPaginatedResponse": {
+ "type": "object",
+ "properties": {
+ "results": {
"type": "array",
+ "description": "A list of phone numbers, which can be of any provider type.",
"items": {
- "$ref": "#/components/schemas/CreateCustomerDTO"
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ByoPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/TwilioPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/VonagePhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/VapiPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/TelnyxPhoneNumber"
+ }
+ ]
}
+ },
+ "metadata": {
+ "description": "Metadata about the pagination.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/PaginationMeta"
+ }
+ ]
}
},
"required": [
- "name",
- "phoneNumberId",
- "customers"
+ "results",
+ "metadata"
]
},
- "Campaign": {
+ "ApiRequestTool": {
"type": "object",
"properties": {
- "status": {
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
+ },
+ "type": {
"type": "string",
- "description": "This is the status of the campaign.",
"enum": [
- "scheduled",
- "in-progress",
- "ended"
- ]
+ "apiRequest"
+ ],
+ "description": "The type of tool. \"apiRequest\" for API request tool."
},
- "endedReason": {
+ "method": {
"type": "string",
- "description": "This is the explanation for how the campaign ended.",
"enum": [
- "campaign.scheduled.ended-by-user",
- "campaign.in-progress.ended-by-user",
- "campaign.ended.success"
+ "POST",
+ "GET",
+ "PUT",
+ "PATCH",
+ "DELETE"
]
},
- "name": {
- "type": "string",
- "description": "This is the name of the campaign. This is just for your own reference.",
- "example": "Q2 Sales Campaign"
- },
- "assistantId": {
- "type": "string",
- "description": "This is the assistant ID that will be used for the campaign calls. Note: Either assistantId or workflowId can be used, but not both."
- },
- "workflowId": {
- "type": "string",
- "description": "This is the workflow ID that will be used for the campaign calls. Note: Either assistantId or workflowId can be used, but not both."
+ "timeoutSeconds": {
+ "type": "number",
+ "description": "This is the timeout in seconds for the request. Defaults to 20 seconds.\n\n@default 20",
+ "minimum": 1,
+ "maximum": 300,
+ "example": 20
},
- "phoneNumberId": {
+ "credentialId": {
"type": "string",
- "description": "This is the phone number ID that will be used for the campaign calls."
+ "description": "The credential ID for API request authentication",
+ "example": "550e8400-e29b-41d4-a716-446655440000"
},
- "schedulePlan": {
- "description": "This is the schedule plan for the campaign.",
- "allOf": [
- {
- "$ref": "#/components/schemas/SchedulePlan"
- }
- ]
+ "encryptedPaths": {
+ "type": "array",
+ "description": "This is the paths to encrypt in the request body if credentialId and encryptionPlan are defined.",
+ "items": {
+ "type": "string"
+ }
},
- "customers": {
- "description": "These are the customers that will be called in the campaign.",
+ "parameters": {
+ "description": "Static key-value pairs merged into the request body. Values support Liquid templates.",
"type": "array",
"items": {
- "$ref": "#/components/schemas/CreateCustomerDTO"
+ "$ref": "#/components/schemas/ToolParameter"
}
},
"id": {
"type": "string",
- "description": "This is the unique identifier for the campaign."
+ "description": "This is the unique identifier for the tool."
},
"orgId": {
"type": "string",
- "description": "This is the unique identifier for the org that this campaign belongs to."
+ "description": "This is the unique identifier for the organization that this tool belongs to."
},
"createdAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the campaign was created."
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
},
"updatedAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the campaign was last updated."
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
},
- "calls": {
- "type": "object",
- "description": "This is a map of call IDs to campaign call details."
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
+ ]
},
- "callsCounterScheduled": {
- "type": "number",
- "description": "This is the number of calls that have been scheduled."
+ "name": {
+ "type": "string",
+ "description": "This is the name of the tool. This will be passed to the model.\n\nMust be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 40.",
+ "maxLength": 40,
+ "pattern": "/^[a-zA-Z0-9_-]{1,40}$/"
},
- "callsCounterQueued": {
- "type": "number",
- "description": "This is the number of calls that have been queued."
+ "description": {
+ "type": "string",
+ "description": "This is the description of the tool. This will be passed to the model."
},
- "callsCounterInProgress": {
- "type": "number",
- "description": "This is the number of calls that have been in progress."
+ "url": {
+ "type": "string",
+ "description": "This is where the request will be sent."
},
- "callsCounterEndedVoicemail": {
- "type": "number",
- "description": "This is the number of calls whose ended reason is 'voicemail'."
+ "body": {
+ "description": "This is the body of the request.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/JsonSchema"
+ }
+ ]
},
- "callsCounterEnded": {
- "type": "number",
- "description": "This is the number of calls that have ended."
+ "headers": {
+ "description": "These are the headers to send with the request.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/JsonSchema"
+ }
+ ]
+ },
+ "backoffPlan": {
+ "description": "This is the backoff plan if the request fails. Defaults to undefined (the request will not be retried).\n\n@default undefined (the request will not be retried)",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/BackoffPlan"
+ }
+ ]
+ },
+ "variableExtractionPlan": {
+ "description": "This is the plan to extract variables from the tool's response. These will be accessible during the call and stored in `call.artifact.variableValues` after the call.\n\nUsage:\n1. Use `aliases` to extract variables from the tool's response body. (Most common case)\n\n```json\n{\n \"aliases\": [\n {\n \"key\": \"customerName\",\n \"value\": \"{{customer.name}}\"\n },\n {\n \"key\": \"customerAge\",\n \"value\": \"{{customer.age}}\"\n }\n ]\n}\n```\n\nThe tool response body is made available to the liquid template.\n\n2. Use `aliases` to extract variables from the tool's response body if the response is an array.\n\n```json\n{\n \"aliases\": [\n {\n \"key\": \"customerName\",\n \"value\": \"{{$[0].name}}\"\n },\n {\n \"key\": \"customerAge\",\n \"value\": \"{{$[0].age}}\"\n }\n ]\n}\n```\n\n$ is a shorthand for the tool's response body. `$[0]` is the first item in the array. `$[n]` is the nth item in the array. Note, $ is available regardless of the response body type (both object and array).\n\n3. Use `aliases` to extract variables from the tool's response headers.\n\n```json\n{\n \"aliases\": [\n {\n \"key\": \"customerName\",\n \"value\": \"{{tool.response.headers.customer-name}}\"\n },\n {\n \"key\": \"customerAge\",\n \"value\": \"{{tool.response.headers.customer-age}}\"\n }\n ]\n}\n```\n\n`tool.response` is made available to the liquid template. Particularly, both `tool.response.headers` and `tool.response.body` are available. Note, `tool.response` is available regardless of the response body type (both object and array).\n\n4. Use `schema` to extract a large portion of the tool's response body.\n\n4.1. If you hit example.com and it returns `{\"name\": \"John\", \"age\": 30}`, then you can specify the schema as:\n\n```json\n{\n \"schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"age\": {\n \"type\": \"number\"\n }\n }\n }\n}\n```\nThese will be extracted as `{{ name }}` and `{{ age }}` respectively. To emphasize, object properties are extracted as direct global variables.\n\n4.2. If you hit example.com and it returns `{\"name\": {\"first\": \"John\", \"last\": \"Doe\"}}`, then you can specify the schema as:\n\n```json\n{\n \"schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"object\",\n \"properties\": {\n \"first\": {\n \"type\": \"string\"\n },\n \"last\": {\n \"type\": \"string\"\n }\n }\n }\n }\n }\n}\n```\n\nThese will be extracted as `{{ name }}`. And, `{{ name.first }}` and `{{ name.last }}` will be accessible.\n\n4.3. If you hit example.com and it returns `[\"94123\", \"94124\"]`, then you can specify the schema as:\n\n```json\n{\n \"schema\": {\n \"type\": \"array\",\n \"title\": \"zipCodes\",\n \"items\": {\n \"type\": \"string\"\n }\n }\n}\n```\n\nThis will be extracted as `{{ zipCodes }}`. To access the array items, you can use `{{ zipCodes[0] }}` and `{{ zipCodes[1] }}`.\n\n4.4. If you hit example.com and it returns `[{\"name\": \"John\", \"age\": 30, \"zipCodes\": [\"94123\", \"94124\"]}, {\"name\": \"Jane\", \"age\": 25, \"zipCodes\": [\"94125\", \"94126\"]}]`, then you can specify the schema as:\n\n```json\n{\n \"schema\": {\n \"type\": \"array\",\n \"title\": \"people\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"age\": {\n \"type\": \"number\"\n },\n \"zipCodes\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n }\n }\n }\n }\n }\n}\n```\n\nThis will be extracted as `{{ people }}`. To access the array items, you can use `{{ people[n].name }}`, `{{ people[n].age }}`, `{{ people[n].zipCodes }}`, `{{ people[n].zipCodes[0] }}` and `{{ people[n].zipCodes[1] }}`.\n\nNote: Both `aliases` and `schema` can be used together.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/VariableExtractionPlan"
+ }
+ ]
}
},
"required": [
- "status",
- "name",
- "phoneNumberId",
- "customers",
+ "type",
+ "method",
"id",
"orgId",
"createdAt",
"updatedAt",
- "calls",
- "callsCounterScheduled",
- "callsCounterQueued",
- "callsCounterInProgress",
- "callsCounterEndedVoicemail",
- "callsCounterEnded"
+ "url"
]
},
- "CampaignPaginatedResponse": {
+ "CodeToolEnvironmentVariable": {
"type": "object",
"properties": {
- "results": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/Campaign"
- }
+ "name": {
+ "type": "string",
+ "description": "Name of the environment variable",
+ "maxLength": 64,
+ "pattern": "/^[A-Z][A-Z0-9_]*$/",
+ "example": "API_KEY"
},
- "metadata": {
- "$ref": "#/components/schemas/PaginationMeta"
+ "value": {
+ "type": "string",
+ "description": "Value of the environment variable. Supports Liquid templates.",
+ "maxLength": 10000,
+ "example": "{{apiKey}}"
}
},
"required": [
- "results",
- "metadata"
+ "name",
+ "value"
]
},
- "UpdateCampaignDTO": {
+ "CodeTool": {
"type": "object",
"properties": {
- "name": {
- "type": "string",
- "description": "This is the name of the campaign. This is just for your own reference."
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
},
- "assistantId": {
+ "type": {
"type": "string",
- "description": "This is the assistant ID that will be used for the campaign calls.\nCan only be updated if campaign is not in progress or has ended."
+ "enum": [
+ "code"
+ ],
+ "description": "The type of tool. \"code\" for Code tool."
},
- "workflowId": {
+ "async": {
+ "type": "boolean",
+ "example": false,
+ "description": "This determines if the tool is async.\n\n If async, the assistant will move forward without waiting for your server to respond. This is useful if you just want to trigger something on your server.\n\n If sync, the assistant will wait for your server to respond. This is useful if want assistant to respond with the result from your server.\n\n Defaults to synchronous (`false`)."
+ },
+ "server": {
+ "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
+ },
+ "code": {
"type": "string",
- "description": "This is the workflow ID that will be used for the campaign calls.\nCan only be updated if campaign is not in progress or has ended."
+ "description": "TypeScript code to execute when the tool is called",
+ "maxLength": 50000
},
- "phoneNumberId": {
+ "environmentVariables": {
+ "description": "Environment variables available in code via `env` object",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/CodeToolEnvironmentVariable"
+ }
+ },
+ "timeoutSeconds": {
+ "type": "number",
+ "description": "This is the timeout in seconds for the code execution. Defaults to 10 seconds.\nMaximum is 30 seconds to prevent abuse.\n\n@default 10",
+ "minimum": 1,
+ "maximum": 30,
+ "example": 10
+ },
+ "credentialId": {
"type": "string",
- "description": "This is the phone number ID that will be used for the campaign calls.\nCan only be updated if campaign is not in progress or has ended."
+ "description": "Credential ID containing the Val Town API key",
+ "example": "550e8400-e29b-41d4-a716-446655440000"
},
- "schedulePlan": {
- "description": "This is the schedule plan for the campaign.\nCan only be updated if campaign is not in progress or has ended.",
+ "variableExtractionPlan": {
+ "description": "Plan to extract variables from the tool response",
"allOf": [
{
- "$ref": "#/components/schemas/SchedulePlan"
+ "$ref": "#/components/schemas/VariableExtractionPlan"
}
]
},
- "status": {
- "type": "string",
- "description": "This is the status of the campaign.\nCan only be updated to 'ended' if you want to end the campaign.\nWhen set to 'ended', it will delete all scheduled calls. Calls in progress will be allowed to complete.",
- "enum": [
- "ended"
- ]
- }
- }
- },
- "Session": {
- "type": "object",
- "properties": {
"id": {
"type": "string",
- "description": "This is the unique identifier for the session."
+ "description": "This is the unique identifier for the tool."
},
"orgId": {
"type": "string",
- "description": "This is the unique identifier for the organization that owns this session."
+ "description": "This is the unique identifier for the organization that this tool belongs to."
},
"createdAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 timestamp indicating when the session was created."
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
},
"updatedAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 timestamp indicating when the session was last updated."
- },
- "name": {
- "type": "string",
- "description": "This is a user-defined name for the session. Maximum length is 40 characters.",
- "maxLength": 40
- },
- "status": {
- "type": "string",
- "description": "This is the current status of the session. Can be either 'active' or 'completed'.",
- "enum": [
- "active",
- "completed"
- ]
- },
- "expirationSeconds": {
- "type": "number",
- "description": "Session expiration time in seconds. Defaults to 24 hours (86400 seconds) if not set.",
- "minimum": 60,
- "maximum": 2592000,
- "example": 86400
- },
- "assistantId": {
- "type": "string",
- "description": "This is the ID of the assistant associated with this session. Use this when referencing an existing assistant."
- },
- "assistant": {
- "description": "This is the assistant configuration for this session. Use this when creating a new assistant configuration.\nIf assistantId is provided, this will be ignored.",
- "allOf": [
- {
- "$ref": "#/components/schemas/CreateAssistantDTO"
- }
- ]
- },
- "messages": {
- "type": "array",
- "description": "This is an array of chat messages in the session.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/SystemMessage",
- "title": "SystemMessage"
- },
- {
- "$ref": "#/components/schemas/UserMessage",
- "title": "UserMessage"
- },
- {
- "$ref": "#/components/schemas/AssistantMessage",
- "title": "AssistantMessage"
- },
- {
- "$ref": "#/components/schemas/ToolMessage",
- "title": "ToolMessage"
- },
- {
- "$ref": "#/components/schemas/DeveloperMessage",
- "title": "DeveloperMessage"
- }
- ]
- }
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
},
- "customer": {
- "description": "This is the customer information associated with this session.",
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/CreateCustomerDTO"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
},
- "phoneNumberId": {
- "type": "string",
- "description": "This is the ID of the phone number associated with this session."
- },
- "phoneNumber": {
- "description": "This is the phone number configuration for this session.",
+ "function": {
+ "description": "This is the function definition of the tool.\n\nFor the Code tool, this defines the name, description, and parameters that the model\nwill use to understand when and how to call this tool.",
"allOf": [
{
- "$ref": "#/components/schemas/ImportTwilioPhoneNumberDTO"
+ "$ref": "#/components/schemas/OpenAIFunction"
}
]
}
},
"required": [
+ "type",
+ "code",
"id",
"orgId",
"createdAt",
"updatedAt"
]
},
- "CreateSessionDTO": {
+ "DtmfTool": {
"type": "object",
"properties": {
- "name": {
- "type": "string",
- "description": "This is a user-defined name for the session. Maximum length is 40 characters.",
- "maxLength": 40
- },
- "status": {
- "type": "string",
- "description": "This is the current status of the session. Can be either 'active' or 'completed'.",
- "enum": [
- "active",
- "completed"
- ]
- },
- "expirationSeconds": {
- "type": "number",
- "description": "Session expiration time in seconds. Defaults to 24 hours (86400 seconds) if not set.",
- "minimum": 60,
- "maximum": 2592000,
- "example": 86400
- },
- "assistantId": {
- "type": "string",
- "description": "This is the ID of the assistant associated with this session. Use this when referencing an existing assistant."
- },
- "assistant": {
- "description": "This is the assistant configuration for this session. Use this when creating a new assistant configuration.\nIf assistantId is provided, this will be ignored.",
- "allOf": [
- {
- "$ref": "#/components/schemas/CreateAssistantDTO"
- }
- ]
- },
"messages": {
"type": "array",
- "description": "This is an array of chat messages in the session.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/SystemMessage",
- "title": "SystemMessage"
- },
- {
- "$ref": "#/components/schemas/UserMessage",
- "title": "UserMessage"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/AssistantMessage",
- "title": "AssistantMessage"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
},
{
- "$ref": "#/components/schemas/ToolMessage",
- "title": "ToolMessage"
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
},
{
- "$ref": "#/components/schemas/DeveloperMessage",
- "title": "DeveloperMessage"
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
]
}
},
- "customer": {
- "description": "This is the customer information associated with this session.",
- "allOf": [
- {
- "$ref": "#/components/schemas/CreateCustomerDTO"
- }
- ]
+ "type": {
+ "type": "string",
+ "enum": [
+ "dtmf"
+ ],
+ "description": "The type of tool. \"dtmf\" for DTMF tool."
},
- "phoneNumberId": {
+ "sipInfoDtmfEnabled": {
+ "type": "boolean",
+ "description": "This enables sending DTMF tones via SIP INFO messages instead of RFC 2833 (RTP events). When enabled, DTMF digits will be sent using the SIP INFO method, which can be more reliable in some network configurations. Only relevant when using the `vapi.sip` transport.",
+ "default": false
+ },
+ "id": {
"type": "string",
- "description": "This is the ID of the phone number associated with this session."
+ "description": "This is the unique identifier for the tool."
},
- "phoneNumber": {
- "description": "This is the phone number configuration for this session.",
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the organization that this tool belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/ImportTwilioPhoneNumberDTO"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
}
- }
+ },
+ "required": [
+ "type",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
},
- "UpdateSessionDTO": {
+ "EndCallTool": {
"type": "object",
"properties": {
- "name": {
- "type": "string",
- "description": "This is the new name for the session. Maximum length is 40 characters.",
- "maxLength": 40
- },
- "status": {
- "type": "string",
- "description": "This is the new status for the session.",
- "enum": [
- "active",
- "completed"
- ]
- },
- "expirationSeconds": {
- "type": "number",
- "description": "Session expiration time in seconds. Defaults to 24 hours (86400 seconds) if not set.",
- "minimum": 60,
- "maximum": 2592000,
- "example": 86400
- },
"messages": {
"type": "array",
- "description": "This is the updated array of chat messages.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/SystemMessage",
- "title": "SystemMessage"
- },
- {
- "$ref": "#/components/schemas/UserMessage",
- "title": "UserMessage"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/AssistantMessage",
- "title": "AssistantMessage"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
},
{
- "$ref": "#/components/schemas/ToolMessage",
- "title": "ToolMessage"
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
},
{
- "$ref": "#/components/schemas/DeveloperMessage",
- "title": "DeveloperMessage"
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
]
}
- }
- }
- },
- "GetSessionPaginatedDTO": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string",
- "description": "This is the name of the session to filter by."
- },
- "assistantId": {
- "type": "string",
- "description": "This is the ID of the assistant to filter sessions by."
- },
- "workflowId": {
- "type": "string",
- "description": "This is the ID of the workflow to filter sessions by."
- },
- "page": {
- "type": "number",
- "description": "This is the page number to return. Defaults to 1.",
- "minimum": 1
},
- "sortOrder": {
+ "type": {
"type": "string",
- "description": "This is the sort order for pagination. Defaults to 'DESC'.",
"enum": [
- "ASC",
- "DESC"
- ]
- },
- "limit": {
- "type": "number",
- "description": "This is the maximum number of items to return. Defaults to 100.",
- "minimum": 0,
- "maximum": 1000
- },
- "createdAtGt": {
- "format": "date-time",
- "type": "string",
- "description": "This will return items where the createdAt is greater than the specified value."
- },
- "createdAtLt": {
- "format": "date-time",
- "type": "string",
- "description": "This will return items where the createdAt is less than the specified value."
- },
- "createdAtGe": {
- "format": "date-time",
- "type": "string",
- "description": "This will return items where the createdAt is greater than or equal to the specified value."
- },
- "createdAtLe": {
- "format": "date-time",
- "type": "string",
- "description": "This will return items where the createdAt is less than or equal to the specified value."
- },
- "updatedAtGt": {
- "format": "date-time",
- "type": "string",
- "description": "This will return items where the updatedAt is greater than the specified value."
+ "endCall"
+ ],
+ "description": "The type of tool. \"endCall\" for End Call tool."
},
- "updatedAtLt": {
- "format": "date-time",
+ "id": {
"type": "string",
- "description": "This will return items where the updatedAt is less than the specified value."
+ "description": "This is the unique identifier for the tool."
},
- "updatedAtGe": {
- "format": "date-time",
+ "orgId": {
"type": "string",
- "description": "This will return items where the updatedAt is greater than or equal to the specified value."
+ "description": "This is the unique identifier for the organization that this tool belongs to."
},
- "updatedAtLe": {
+ "createdAt": {
"format": "date-time",
"type": "string",
- "description": "This will return items where the updatedAt is less than or equal to the specified value."
- }
- }
- },
- "SessionPaginatedResponse": {
- "type": "object",
- "properties": {
- "results": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/Session"
- }
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
},
- "metadata": {
- "$ref": "#/components/schemas/PaginationMeta"
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
+ ]
}
},
"required": [
- "results",
- "metadata"
+ "type",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
]
},
- "Assistant": {
+ "FunctionTool": {
"type": "object",
"properties": {
- "transcriber": {
- "description": "These are the options for the assistant's transcriber.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/AssemblyAITranscriber",
- "title": "AssemblyAITranscriber"
- },
- {
- "$ref": "#/components/schemas/AzureSpeechTranscriber",
- "title": "AzureSpeechTranscriber"
- },
- {
- "$ref": "#/components/schemas/CustomTranscriber",
- "title": "CustomTranscriber"
- },
- {
- "$ref": "#/components/schemas/DeepgramTranscriber",
- "title": "DeepgramTranscriber"
- },
- {
- "$ref": "#/components/schemas/ElevenLabsTranscriber",
- "title": "ElevenLabsTranscriber"
- },
- {
- "$ref": "#/components/schemas/GladiaTranscriber",
- "title": "GladiaTranscriber"
- },
- {
- "$ref": "#/components/schemas/GoogleTranscriber",
- "title": "GoogleTranscriber"
- },
- {
- "$ref": "#/components/schemas/SpeechmaticsTranscriber",
- "title": "SpeechmaticsTranscriber"
- },
- {
- "$ref": "#/components/schemas/TalkscriberTranscriber",
- "title": "TalkscriberTranscriber"
- },
- {
- "$ref": "#/components/schemas/OpenAITranscriber",
- "title": "OpenAITranscriber"
- },
- {
- "$ref": "#/components/schemas/CartesiaTranscriber",
- "title": "CartesiaTranscriber"
- }
- ]
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
},
- "model": {
- "description": "These are the options for the assistant's LLM.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/AnthropicModel",
- "title": "Anthropic"
- },
- {
- "$ref": "#/components/schemas/AnyscaleModel",
- "title": "Anyscale"
- },
- {
- "$ref": "#/components/schemas/CerebrasModel",
- "title": "Cerebras"
- },
- {
- "$ref": "#/components/schemas/CustomLLMModel",
- "title": "CustomLLM"
- },
- {
- "$ref": "#/components/schemas/DeepInfraModel",
- "title": "DeepInfra"
- },
- {
- "$ref": "#/components/schemas/DeepSeekModel",
- "title": "DeepSeek"
- },
- {
- "$ref": "#/components/schemas/GoogleModel",
- "title": "Google"
- },
- {
- "$ref": "#/components/schemas/GroqModel",
- "title": "Groq"
- },
- {
- "$ref": "#/components/schemas/InflectionAIModel",
- "title": "InflectionAI"
- },
- {
- "$ref": "#/components/schemas/OpenAIModel",
- "title": "OpenAI"
- },
- {
- "$ref": "#/components/schemas/OpenRouterModel",
- "title": "OpenRouter"
- },
- {
- "$ref": "#/components/schemas/PerplexityAIModel",
- "title": "PerplexityAI"
- },
- {
- "$ref": "#/components/schemas/TogetherAIModel",
- "title": "Together"
- },
+ "type": {
+ "type": "string",
+ "enum": [
+ "function"
+ ],
+ "description": "The type of tool. \"function\" for Function tool."
+ },
+ "async": {
+ "type": "boolean",
+ "example": false,
+ "description": "This determines if the tool is async.\n\n If async, the assistant will move forward without waiting for your server to respond. This is useful if you just want to trigger something on your server.\n\n If sync, the assistant will wait for your server to respond. This is useful if want assistant to respond with the result from your server.\n\n Defaults to synchronous (`false`)."
+ },
+ "server": {
+ "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
+ "allOf": [
{
- "$ref": "#/components/schemas/XaiModel",
- "title": "XAI"
+ "$ref": "#/components/schemas/Server"
}
]
},
- "voice": {
- "description": "These are the options for the assistant's voice.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/AzureVoice",
- "title": "AzureVoice"
- },
- {
- "$ref": "#/components/schemas/CartesiaVoice",
- "title": "CartesiaVoice"
- },
- {
- "$ref": "#/components/schemas/CustomVoice",
- "title": "CustomVoice"
- },
- {
- "$ref": "#/components/schemas/DeepgramVoice",
- "title": "DeepgramVoice"
- },
- {
- "$ref": "#/components/schemas/ElevenLabsVoice",
- "title": "ElevenLabsVoice"
- },
- {
- "$ref": "#/components/schemas/HumeVoice",
- "title": "HumeVoice"
- },
- {
- "$ref": "#/components/schemas/LMNTVoice",
- "title": "LMNTVoice"
- },
- {
- "$ref": "#/components/schemas/NeuphonicVoice",
- "title": "NeuphonicVoice"
- },
- {
- "$ref": "#/components/schemas/OpenAIVoice",
- "title": "OpenAIVoice"
- },
- {
- "$ref": "#/components/schemas/PlayHTVoice",
- "title": "PlayHTVoice"
- },
- {
- "$ref": "#/components/schemas/RimeAIVoice",
- "title": "RimeAIVoice"
- },
- {
- "$ref": "#/components/schemas/SmallestAIVoice",
- "title": "SmallestAIVoice"
- },
- {
- "$ref": "#/components/schemas/TavusVoice",
- "title": "TavusVoice"
- },
- {
- "$ref": "#/components/schemas/VapiVoice",
- "title": "VapiVoice"
- },
- {
- "$ref": "#/components/schemas/SesameVoice",
- "title": "SesameVoice"
- },
- {
- "$ref": "#/components/schemas/InworldVoice",
- "title": "InworldVoice"
- },
+ "variableExtractionPlan": {
+ "description": "Plan to extract variables from the tool response",
+ "allOf": [
{
- "$ref": "#/components/schemas/MinimaxVoice",
- "title": "MinimaxVoice"
+ "$ref": "#/components/schemas/VariableExtractionPlan"
}
]
},
- "firstMessage": {
+ "parameters": {
+ "description": "Static key-value pairs merged into the request body. Values support Liquid templates.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/ToolParameter"
+ }
+ },
+ "id": {
"type": "string",
- "description": "This is the first message that the assistant will say. This can also be a URL to a containerized audio file (mp3, wav, etc.).\n\nIf unspecified, assistant will wait for user to speak and use the model to respond once they speak.",
- "example": "Hello! How can I help you today?"
+ "description": "This is the unique identifier for the tool."
},
- "firstMessageInterruptionsEnabled": {
- "type": "boolean",
- "default": false
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the organization that this tool belongs to."
},
- "firstMessageMode": {
+ "createdAt": {
+ "format": "date-time",
"type": "string",
- "description": "This is the mode for the first message. Default is 'assistant-speaks-first'.\n\nUse:\n- 'assistant-speaks-first' to have the assistant speak first.\n- 'assistant-waits-for-user' to have the assistant wait for the user to speak first.\n- 'assistant-speaks-first-with-model-generated-message' to have the assistant speak first with a message generated by the model based on the conversation state. (`assistant.model.messages` at call start, `call.messages` at squad transfer points).\n\n@default 'assistant-speaks-first'",
- "enum": [
- "assistant-speaks-first",
- "assistant-speaks-first-with-model-generated-message",
- "assistant-waits-for-user"
- ],
- "example": "assistant-speaks-first"
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
},
- "voicemailDetection": {
- "description": "These are the settings to configure or disable voicemail detection. Alternatively, voicemail detection can be configured using the model.tools=[VoicemailTool].\nThis uses Twilio's built-in detection while the VoicemailTool relies on the model to detect if a voicemail was reached.\nYou can use neither of them, one of them, or both of them. By default, Twilio built-in detection is enabled while VoicemailTool is not.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/GoogleVoicemailDetectionPlan",
- "title": "Google"
- },
- {
- "$ref": "#/components/schemas/OpenAIVoicemailDetectionPlan",
- "title": "OpenAI"
- },
- {
- "$ref": "#/components/schemas/TwilioVoicemailDetectionPlan",
- "title": "Twilio"
- },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
{
- "$ref": "#/components/schemas/VapiVoicemailDetectionPlan",
- "title": "Vapi"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
},
- "clientMessages": {
+ "function": {
+ "description": "This is the function definition of the tool.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/OpenAIFunction"
+ }
+ ]
+ }
+ },
+ "required": [
+ "type",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "GhlTool": {
+ "type": "object",
+ "properties": {
+ "messages": {
"type": "array",
- "enum": [
- "conversation-update",
- "function-call",
- "function-call-result",
- "hang",
- "language-changed",
- "metadata",
- "model-output",
- "speech-update",
- "status-update",
- "transcript",
- "tool-calls",
- "tool-calls-result",
- "tool.completed",
- "transfer-update",
- "user-interrupted",
- "voice-input",
- "workflow.node.started"
- ],
- "example": [
- "conversation-update",
- "function-call",
- "hang",
- "model-output",
- "speech-update",
- "status-update",
- "transfer-update",
- "transcript",
- "tool-calls",
- "user-interrupted",
- "voice-input",
- "workflow.node.started"
- ],
- "description": "These are the messages that will be sent to your Client SDKs. Default is conversation-update,function-call,hang,model-output,speech-update,status-update,transfer-update,transcript,tool-calls,user-interrupted,voice-input,workflow.node.started. You can check the shape of the messages in ClientMessage schema.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
- "type": "string",
- "enum": [
- "conversation-update",
- "function-call",
- "function-call-result",
- "hang",
- "language-changed",
- "metadata",
- "model-output",
- "speech-update",
- "status-update",
- "transcript",
- "tool-calls",
- "tool-calls-result",
- "tool.completed",
- "transfer-update",
- "user-interrupted",
- "voice-input",
- "workflow.node.started"
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
]
}
},
- "serverMessages": {
- "type": "array",
+ "type": {
+ "type": "string",
"enum": [
- "conversation-update",
- "end-of-call-report",
- "function-call",
- "hang",
- "language-changed",
- "language-change-detected",
- "model-output",
- "phone-call-control",
- "speech-update",
- "status-update",
- "transcript",
- "transcript[transcriptType=\"final\"]",
- "tool-calls",
- "transfer-destination-request",
- "handoff-destination-request",
- "transfer-update",
- "user-interrupted",
- "voice-input",
- "chat.created",
- "chat.deleted",
- "session.created",
- "session.updated",
- "session.deleted"
- ],
- "example": [
- "conversation-update",
- "end-of-call-report",
- "function-call",
- "hang",
- "speech-update",
- "status-update",
- "tool-calls",
- "transfer-destination-request",
- "handoff-destination-request",
- "user-interrupted"
+ "ghl"
],
- "description": "These are the messages that will be sent to your Server URL. Default is conversation-update,end-of-call-report,function-call,hang,speech-update,status-update,tool-calls,transfer-destination-request,handoff-destination-request,user-interrupted. You can check the shape of the messages in ServerMessage schema.",
- "items": {
- "type": "string",
- "enum": [
- "conversation-update",
- "end-of-call-report",
- "function-call",
- "hang",
- "language-changed",
- "language-change-detected",
- "model-output",
- "phone-call-control",
- "speech-update",
- "status-update",
- "transcript",
- "transcript[transcriptType=\"final\"]",
- "tool-calls",
- "transfer-destination-request",
- "handoff-destination-request",
- "transfer-update",
- "user-interrupted",
- "voice-input",
- "chat.created",
- "chat.deleted",
- "session.created",
- "session.updated",
- "session.deleted"
- ]
- }
+ "description": "The type of tool. \"ghl\" for GHL tool."
},
- "maxDurationSeconds": {
- "type": "number",
- "description": "This is the maximum number of seconds that the call will last. When the call reaches this duration, it will be ended.\n\n@default 600 (10 minutes)",
- "minimum": 10,
- "maximum": 43200,
- "example": 600
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the tool."
},
- "backgroundSound": {
- "description": "This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'.\nYou can also provide a custom sound by providing a URL to an audio file.",
- "oneOf": [
- {
- "type": "enum",
- "enum": [
- "off",
- "office"
- ],
- "example": "office"
- },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the organization that this tool belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
{
- "type": "string",
- "format": "uri",
- "example": "https://www.soundjay.com/ambient/sounds/people-in-lounge-1.mp3"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
},
- "modelOutputInMessagesEnabled": {
- "type": "boolean",
- "description": "This determines whether the model's output is used in conversation history rather than the transcription of assistant's speech.\n\nDefault `false` while in beta.\n\n@default false",
- "example": false
- },
- "transportConfigurations": {
+ "metadata": {
+ "$ref": "#/components/schemas/GhlToolMetadata"
+ }
+ },
+ "required": [
+ "type",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt",
+ "metadata"
+ ]
+ },
+ "MakeTool": {
+ "type": "object",
+ "properties": {
+ "messages": {
"type": "array",
- "description": "These are the configurations to be passed to the transport providers of assistant's calls, like Twilio. You can store multiple configurations for different transport providers. For a call, only the configuration matching the call transport provider is used.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/TransportConfigurationTwilio",
- "title": "Twilio"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
]
}
},
- "observabilityPlan": {
- "description": "This is the plan for observability of assistant's calls.\n\nCurrently, only Langfuse is supported.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/LangfuseObservabilityPlan",
- "title": "Langfuse"
- }
+ "type": {
+ "type": "string",
+ "enum": [
+ "make"
],
+ "description": "The type of tool. \"make\" for Make tool."
+ },
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the tool."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the organization that this tool belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/LangfuseObservabilityPlan"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
},
- "credentials": {
+ "metadata": {
+ "$ref": "#/components/schemas/MakeToolMetadata"
+ }
+ },
+ "required": [
+ "type",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt",
+ "metadata"
+ ]
+ },
+ "TransferCallTool": {
+ "type": "object",
+ "properties": {
+ "messages": {
"type": "array",
- "description": "These are dynamic credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/CreateAnthropicCredentialDTO",
- "title": "AnthropicCredential"
- },
- {
- "$ref": "#/components/schemas/CreateAnyscaleCredentialDTO",
- "title": "AnyscaleCredential"
- },
- {
- "$ref": "#/components/schemas/CreateAssemblyAICredentialDTO",
- "title": "AssemblyAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateAzureCredentialDTO",
- "title": "AzureCredential"
- },
- {
- "$ref": "#/components/schemas/CreateAzureOpenAICredentialDTO",
- "title": "AzureOpenAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
- "title": "ByoSipTrunkCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCartesiaCredentialDTO",
- "title": "CartesiaCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCerebrasCredentialDTO",
- "title": "CerebrasCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCloudflareCredentialDTO",
- "title": "CloudflareCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCustomLLMCredentialDTO",
- "title": "CustomLLMCredential"
- },
- {
- "$ref": "#/components/schemas/CreateDeepgramCredentialDTO",
- "title": "DeepgramCredential"
- },
- {
- "$ref": "#/components/schemas/CreateDeepInfraCredentialDTO",
- "title": "DeepInfraCredential"
- },
- {
- "$ref": "#/components/schemas/CreateDeepSeekCredentialDTO",
- "title": "DeepSeekCredential"
- },
- {
- "$ref": "#/components/schemas/CreateElevenLabsCredentialDTO",
- "title": "ElevenLabsCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGcpCredentialDTO",
- "title": "GcpCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGladiaCredentialDTO",
- "title": "GladiaCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCredentialDTO",
- "title": "GhlCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCredentialDTO",
- "title": "GoogleCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGroqCredentialDTO",
- "title": "GroqCredential"
- },
- {
- "$ref": "#/components/schemas/CreateHumeCredentialDTO",
- "title": "HumeCredential"
- },
- {
- "$ref": "#/components/schemas/CreateInflectionAICredentialDTO",
- "title": "InflectionAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateLangfuseCredentialDTO",
- "title": "LangfuseCredential"
- },
- {
- "$ref": "#/components/schemas/CreateLmntCredentialDTO",
- "title": "LmntCredential"
- },
- {
- "$ref": "#/components/schemas/CreateMakeCredentialDTO",
- "title": "MakeCredential"
- },
- {
- "$ref": "#/components/schemas/CreateMistralCredentialDTO",
- "title": "MistralCredential"
- },
- {
- "$ref": "#/components/schemas/CreateNeuphonicCredentialDTO",
- "title": "NeuphonicCredential"
- },
- {
- "$ref": "#/components/schemas/CreateOpenAICredentialDTO",
- "title": "OpenAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateOpenRouterCredentialDTO",
- "title": "OpenRouterCredential"
- },
- {
- "$ref": "#/components/schemas/CreatePerplexityAICredentialDTO",
- "title": "PerplexityAICredential"
- },
- {
- "$ref": "#/components/schemas/CreatePlayHTCredentialDTO",
- "title": "PlayHTCredential"
- },
- {
- "$ref": "#/components/schemas/CreateRimeAICredentialDTO",
- "title": "RimeAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateRunpodCredentialDTO",
- "title": "RunpodCredential"
- },
- {
- "$ref": "#/components/schemas/CreateS3CredentialDTO",
- "title": "S3Credential"
- },
- {
- "$ref": "#/components/schemas/CreateSmallestAICredentialDTO",
- "title": "SmallestAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
- "title": "SpeechmaticsCredential"
- },
- {
- "$ref": "#/components/schemas/CreateSupabaseCredentialDTO",
- "title": "SupabaseCredential"
- },
- {
- "$ref": "#/components/schemas/CreateTavusCredentialDTO",
- "title": "TavusCredential"
- },
- {
- "$ref": "#/components/schemas/CreateTogetherAICredentialDTO",
- "title": "TogetherAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateTrieveCredentialDTO",
- "title": "TrieveCredential"
- },
- {
- "$ref": "#/components/schemas/CreateTwilioCredentialDTO",
- "title": "TwilioCredential"
- },
- {
- "$ref": "#/components/schemas/CreateVonageCredentialDTO",
- "title": "VonageCredential"
- },
- {
- "$ref": "#/components/schemas/CreateWebhookCredentialDTO",
- "title": "WebhookCredential"
- },
- {
- "$ref": "#/components/schemas/CreateXAiCredentialDTO",
- "title": "XAiCredential"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
- "title": "GoogleCalendarOAuth2ClientCredential"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
},
{
- "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
- "title": "GoogleCalendarOAuth2AuthorizationCredential"
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
},
{
- "$ref": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
- "title": "GoogleSheetsOAuth2AuthorizationCredential"
- },
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "transferCall"
+ ]
+ },
+ "destinations": {
+ "type": "array",
+ "description": "These are the destinations that the call can be transferred to. If no destinations are provided, server.url will be used to get the transfer destination once the tool is called.",
+ "items": {
+ "oneOf": [
{
- "$ref": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
- "title": "SlackOAuth2AuthorizationCredential"
+ "$ref": "#/components/schemas/TransferDestinationAssistant",
+ "title": "Assistant"
},
{
- "$ref": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
- "title": "GoHighLevelMCPCredential"
+ "$ref": "#/components/schemas/TransferDestinationNumber",
+ "title": "Number"
},
{
- "$ref": "#/components/schemas/CreateInworldCredentialDTO",
- "title": "InworldCredential"
- }
- ],
- "discriminator": {
- "propertyName": "provider",
- "mapping": {
- "11labs": "#/components/schemas/CreateElevenLabsCredentialDTO",
- "anthropic": "#/components/schemas/CreateAnthropicCredentialDTO",
- "anyscale": "#/components/schemas/CreateAnyscaleCredentialDTO",
- "assembly-ai": "#/components/schemas/CreateAssemblyAICredentialDTO",
- "azure-openai": "#/components/schemas/CreateAzureOpenAICredentialDTO",
- "azure": "#/components/schemas/CreateAzureCredentialDTO",
- "byo-sip-trunk": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
- "cartesia": "#/components/schemas/CreateCartesiaCredentialDTO",
- "cerebras": "#/components/schemas/CreateCerebrasCredentialDTO",
- "cloudflare": "#/components/schemas/CreateCloudflareCredentialDTO",
- "custom-llm": "#/components/schemas/CreateCustomLLMCredentialDTO",
- "deepgram": "#/components/schemas/CreateDeepgramCredentialDTO",
- "deepinfra": "#/components/schemas/CreateDeepInfraCredentialDTO",
- "deep-seek": "#/components/schemas/CreateDeepSeekCredentialDTO",
- "gcp": "#/components/schemas/CreateGcpCredentialDTO",
- "gladia": "#/components/schemas/CreateGladiaCredentialDTO",
- "gohighlevel": "#/components/schemas/CreateGoHighLevelCredentialDTO",
- "google": "#/components/schemas/CreateGoogleCredentialDTO",
- "groq": "#/components/schemas/CreateGroqCredentialDTO",
- "inflection-ai": "#/components/schemas/CreateInflectionAICredentialDTO",
- "langfuse": "#/components/schemas/CreateLangfuseCredentialDTO",
- "lmnt": "#/components/schemas/CreateLmntCredentialDTO",
- "make": "#/components/schemas/CreateMakeCredentialDTO",
- "openai": "#/components/schemas/CreateOpenAICredentialDTO",
- "openrouter": "#/components/schemas/CreateOpenRouterCredentialDTO",
- "perplexity-ai": "#/components/schemas/CreatePerplexityAICredentialDTO",
- "playht": "#/components/schemas/CreatePlayHTCredentialDTO",
- "rime-ai": "#/components/schemas/CreateRimeAICredentialDTO",
- "runpod": "#/components/schemas/CreateRunpodCredentialDTO",
- "s3": "#/components/schemas/CreateS3CredentialDTO",
- "supabase": "#/components/schemas/CreateSupabaseCredentialDTO",
- "smallest-ai": "#/components/schemas/CreateSmallestAICredentialDTO",
- "tavus": "#/components/schemas/CreateTavusCredentialDTO",
- "together-ai": "#/components/schemas/CreateTogetherAICredentialDTO",
- "twilio": "#/components/schemas/CreateTwilioCredentialDTO",
- "vonage": "#/components/schemas/CreateVonageCredentialDTO",
- "webhook": "#/components/schemas/CreateWebhookCredentialDTO",
- "xai": "#/components/schemas/CreateXAiCredentialDTO",
- "neuphonic": "#/components/schemas/CreateNeuphonicCredentialDTO",
- "hume": "#/components/schemas/CreateHumeCredentialDTO",
- "mistral": "#/components/schemas/CreateMistralCredentialDTO",
- "speechmatics": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
- "trieve": "#/components/schemas/CreateTrieveCredentialDTO",
- "google.calendar.oauth2-client": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
- "google.calendar.oauth2-authorization": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
- "google.sheets.oauth2-authorization": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
- "slack.oauth2-authorization": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
- "ghl.oauth2-authorization": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
- "inworld": "#/components/schemas/CreateInworldCredentialDTO",
- "minimax": "#/components/schemas/CreateMinimaxCredentialDTO"
+ "$ref": "#/components/schemas/TransferDestinationSip",
+ "title": "Sip"
}
- }
+ ]
}
},
- "hooks": {
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the tool."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the organization that this tool belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "type",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "HandoffTool": {
+ "type": "object",
+ "properties": {
+ "messages": {
"type": "array",
- "description": "This is a set of actions that will be performed on certain events.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/CallHookCallEnding",
- "title": "CallHookCallEnding"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/CallHookAssistantSpeechInterrupted",
- "title": "CallHookAssistantSpeechInterrupted"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
},
{
- "$ref": "#/components/schemas/CallHookCustomerSpeechInterrupted",
- "title": "CallHookCustomerSpeechInterrupted"
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
},
{
- "$ref": "#/components/schemas/CallHookCustomerSpeechTimeout",
- "title": "CallHookCustomerSpeechTimeout"
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
]
}
},
- "name": {
- "type": "string",
- "description": "This is the name of the assistant.\n\nThis is required when you want to transfer between assistants in a call.",
- "maxLength": 40
- },
- "voicemailMessage": {
+ "type": {
"type": "string",
- "description": "This is the message that the assistant will say if the call is forwarded to voicemail.\n\nIf unspecified, it will hang up.",
- "maxLength": 1000
+ "description": "This is the type of the tool.\nWhen you're using handoff tool, we recommend adding this to your system prompt\n---\n# System context\n\nYou are part of a multi-agent system designed to make agent coordination and execution easy. Agents uses two primary abstraction: **Agents** and **Handoffs**. An agent encompasses instructions and tools and can hand off a conversation to another agent when appropriate. Handoffs are achieved by calling a handoff function, generally named `handoff_to_`. Handoffs between agents are handled seamlessly in the background; do not mention or draw attention to these handoffs in your conversation with the user.\n\n# Agent context\n\n{put your agent system prompt here}\n---",
+ "enum": [
+ "handoff"
+ ]
},
- "endCallMessage": {
+ "defaultResult": {
"type": "string",
- "description": "This is the message that the assistant will say if it ends the call.\n\nIf unspecified, it will hang up without saying anything.",
- "maxLength": 1000
+ "description": "This is the default local tool result message used when no runtime handoff result override is returned."
},
- "endCallPhrases": {
- "description": "This list contains phrases that, if spoken by the assistant, will trigger the call to be hung up. Case insensitive.",
+ "destinations": {
"type": "array",
+ "description": "These are the destinations that the call can be handed off to.\n\nUsage:\n1. Single destination\n\nUse `assistantId` to handoff the call to a saved assistant, or `assistantName` to handoff the call to an assistant in the same squad.\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-123\", // or \"assistantName\": \"Assistant123\"\n \"description\": \"customer wants to be handed off to assistant-123\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n }\n ],\n }\n ]\n}\n```\n\n2. Multiple destinations\n\n2.1. Multiple Tools, Each With One Destination (OpenAI recommended)\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-123\",\n \"description\": \"customer wants to be handed off to assistant-123\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n },\n ],\n },\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-456\",\n \"description\": \"customer wants to be handed off to assistant-456\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n }\n ],\n }\n ]\n}\n```\n\n2.2. One Tool, Multiple Destinations (Anthropic recommended)\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-123\",\n \"description\": \"customer wants to be handed off to assistant-123\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n },\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-456\",\n \"description\": \"customer wants to be handed off to assistant-456\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n }\n ],\n }\n ]\n}\n```\n\n3. Dynamic destination\n\n3.1 To determine the destination dynamically, supply a `dynamic` handoff destination type and a `server` object.\n VAPI will send a handoff-destination-request webhook to the `server.url`.\n The response from the server will be used as the destination (if valid).\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"dynamic\",\n \"server\": {\n \"url\": \"https://example.com\"\n }\n }\n ],\n }\n ]\n}\n```\n\n3.2. To pass custom parameters to the server, you can use the `function` object.\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"dynamic\",\n \"server\": {\n \"url\": \"https://example.com\"\n },\n }\n ],\n \"function\": {\n \"name\": \"handoff\",\n \"description\": \"Call this function when the customer is ready to be handed off to the next assistant\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"destination\": {\n \"type\": \"string\",\n \"description\": \"Use dynamic when customer is ready to be handed off to the next assistant\",\n \"enum\": [\"dynamic\"]\n },\n \"customerAreaCode\": {\n \"type\": \"number\",\n \"description\": \"Area code of the customer\"\n },\n \"customerIntent\": {\n \"type\": \"string\",\n \"enum\": [\"new-customer\", \"existing-customer\"],\n \"description\": \"Use new-customer when customer is a new customer, existing-customer when customer is an existing customer\"\n },\n \"customerSentiment\": {\n \"type\": \"string\",\n \"enum\": [\"positive\", \"negative\", \"neutral\"],\n \"description\": \"Use positive when customer is happy, negative when customer is unhappy, neutral when customer is neutral\"\n }\n }\n }\n }\n }\n ]\n}\n```\n\nThe properties `customerAreaCode`, `customerIntent`, and `customerSentiment` will be passed to the server in the webhook request body.",
"items": {
- "type": "string",
- "maxLength": 140,
- "minLength": 2
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/HandoffDestinationAssistant",
+ "title": "Assistant"
+ },
+ {
+ "$ref": "#/components/schemas/HandoffDestinationDynamic",
+ "title": "Dynamic"
+ },
+ {
+ "$ref": "#/components/schemas/HandoffDestinationSquad",
+ "title": "Squad"
+ }
+ ]
}
},
- "compliancePlan": {
- "$ref": "#/components/schemas/CompliancePlan"
- },
- "metadata": {
- "type": "object",
- "description": "This is for metadata you want to store on the assistant."
- },
- "backgroundSpeechDenoisingPlan": {
- "description": "This enables filtering of noise and background speech while the user is talking.\n\nFeatures:\n- Smart denoising using Krisp\n- Fourier denoising\n\nSmart denoising can be combined with or used independently of Fourier denoising.\n\nOrder of precedence:\n- Smart denoising\n- Fourier denoising",
- "allOf": [
- {
- "$ref": "#/components/schemas/BackgroundSpeechDenoisingPlan"
- }
- ]
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the tool."
},
- "analysisPlan": {
- "description": "This is the plan for analysis of assistant's calls. Stored in `call.analysis`.",
- "allOf": [
- {
- "$ref": "#/components/schemas/AnalysisPlan"
- }
- ]
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the organization that this tool belongs to."
},
- "artifactPlan": {
- "description": "This is the plan for artifacts generated during assistant's calls. Stored in `call.artifact`.",
- "allOf": [
- {
- "$ref": "#/components/schemas/ArtifactPlan"
- }
- ]
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
},
- "startSpeakingPlan": {
- "description": "This is the plan for when the assistant should start talking.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to start talking after the customer is done speaking.\n- The assistant is too fast to start talking after the customer is done speaking.\n- The assistant is so fast that it's actually interrupting the customer.",
- "allOf": [
- {
- "$ref": "#/components/schemas/StartSpeakingPlan"
- }
- ]
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
},
- "stopSpeakingPlan": {
- "description": "This is the plan for when assistant should stop talking on customer interruption.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to recognize customer's interruption.\n- The assistant is too fast to recognize customer's interruption.\n- The assistant is getting interrupted by phrases that are just acknowledgments.\n- The assistant is getting interrupted by background noises.\n- The assistant is not properly stopping -- it starts talking right after getting interrupted.",
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/StopSpeakingPlan"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
},
- "monitorPlan": {
- "description": "This is the plan for real-time monitoring of the assistant's calls.\n\nUsage:\n- To enable live listening of the assistant's calls, set `monitorPlan.listenEnabled` to `true`.\n- To enable live control of the assistant's calls, set `monitorPlan.controlEnabled` to `true`.",
+ "function": {
+ "description": "This is the optional function definition that will be passed to the LLM.\nIf this is not defined, we will construct this based on the other properties.\n\nFor example, given the following tools definition:\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-123\",\n \"description\": \"customer wants to be handed off to assistant-123\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n },\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-456\",\n \"description\": \"customer wants to be handed off to assistant-456\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n }\n ],\n }\n ]\n}\n```\n\nWe will construct the following function definition:\n```json\n{\n \"function\": {\n \"name\": \"handoff_to_assistant-123\",\n \"description\": \"\n Use this function to handoff the call to the next assistant.\n Only use it when instructions explicitly ask you to use the handoff_to_assistant function.\n DO NOT call this function unless you are instructed to do so.\n Here are the destinations you can handoff the call to:\n 1. assistant-123. When: customer wants to be handed off to assistant-123\n 2. assistant-456. When: customer wants to be handed off to assistant-456\n \",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"destination\": {\n \"type\": \"string\",\n \"description\": \"Options: assistant-123 (customer wants to be handed off to assistant-123), assistant-456 (customer wants to be handed off to assistant-456)\",\n \"enum\": [\"assistant-123\", \"assistant-456\"]\n },\n },\n \"required\": [\"destination\"]\n }\n }\n}\n```\n\nTo override this function, please provide an OpenAI function definition and refer to it in the system prompt.\nYou may override parts of the function definition (i.e. you may only want to change the function name for your prompt).\nIf you choose to override the function parameters, it must include `destination` as a required parameter, and it must evaluate to either an assistantId, assistantName, or a the string literal `dynamic`.\n\nTo pass custom parameters to the server in a dynamic handoff, you can use the function parameters, with `dynamic` as the destination.\n```json\n{\n \"function\": {\n \"name\": \"dynamic_handoff\",\n \"description\": \"\n Call this function when the customer is ready to be handed off to the next assistant\n \",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"destination\": {\n \"type\": \"string\",\n \"enum\": [\"dynamic\"]\n },\n \"customerAreaCode\": {\n \"type\": \"number\",\n \"description\": \"Area code of the customer\"\n },\n \"customerIntent\": {\n \"type\": \"string\",\n \"enum\": [\"new-customer\", \"existing-customer\"],\n \"description\": \"Use new-customer when customer is a new customer, existing-customer when customer is an existing customer\"\n },\n \"customerSentiment\": {\n \"type\": \"string\",\n \"enum\": [\"positive\", \"negative\", \"neutral\"],\n \"description\": \"Use positive when customer is happy, negative when customer is unhappy, neutral when customer is neutral\"\n }\n },\n \"required\": [\"destination\", \"customerAreaCode\", \"customerIntent\", \"customerSentiment\"]\n }\n }\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/MonitorPlan"
+ "$ref": "#/components/schemas/OpenAIFunction"
}
]
- },
- "credentialIds": {
- "description": "These are the credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can provide a subset using this.",
+ }
+ },
+ "required": [
+ "type",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "OutputTool": {
+ "type": "object",
+ "properties": {
+ "messages": {
"type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
- "type": "string"
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
}
},
- "server": {
- "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server.url\n2. phoneNumber.serverUrl\n3. org.serverUrl",
- "allOf": [
- {
- "$ref": "#/components/schemas/Server"
- }
- ]
- },
- "keypadInputPlan": {
- "$ref": "#/components/schemas/KeypadInputPlan"
+ "type": {
+ "type": "string",
+ "enum": [
+ "output"
+ ],
+ "description": "The type of tool. \"output\" for Output tool."
},
"id": {
"type": "string",
- "description": "This is the unique identifier for the assistant."
+ "description": "This is the unique identifier for the tool."
},
"orgId": {
"type": "string",
- "description": "This is the unique identifier for the org that this assistant belongs to."
+ "description": "This is the unique identifier for the organization that this tool belongs to."
},
"createdAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the assistant was created."
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
},
"updatedAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the assistant was last updated."
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
+ ]
}
},
"required": [
+ "type",
"id",
"orgId",
"createdAt",
"updatedAt"
]
},
- "AssistantPaginatedResponse": {
+ "BashTool": {
"type": "object",
"properties": {
- "results": {
+ "messages": {
"type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
- "$ref": "#/components/schemas/Assistant"
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
}
},
- "metadata": {
- "$ref": "#/components/schemas/PaginationMeta"
- }
- },
- "required": [
- "results",
- "metadata"
- ]
- },
- "AssistantVersionPaginatedResponse": {
- "type": "object",
- "properties": {
- "results": {
- "type": "array"
- },
- "metadata": {
- "$ref": "#/components/schemas/PaginationMeta"
+ "type": {
+ "type": "string",
+ "enum": [
+ "bash"
+ ],
+ "description": "The type of tool. \"bash\" for Bash tool."
},
- "nextPageState": {
- "type": "string"
- }
- },
- "required": [
- "results",
- "metadata"
- ]
- },
- "UpdateAssistantDTO": {
- "type": "object",
- "properties": {
- "transcriber": {
- "description": "These are the options for the assistant's transcriber.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/AssemblyAITranscriber",
- "title": "AssemblyAITranscriber"
- },
- {
- "$ref": "#/components/schemas/AzureSpeechTranscriber",
- "title": "AzureSpeechTranscriber"
- },
- {
- "$ref": "#/components/schemas/CustomTranscriber",
- "title": "CustomTranscriber"
- },
- {
- "$ref": "#/components/schemas/DeepgramTranscriber",
- "title": "DeepgramTranscriber"
- },
- {
- "$ref": "#/components/schemas/ElevenLabsTranscriber",
- "title": "ElevenLabsTranscriber"
- },
- {
- "$ref": "#/components/schemas/GladiaTranscriber",
- "title": "GladiaTranscriber"
- },
- {
- "$ref": "#/components/schemas/GoogleTranscriber",
- "title": "GoogleTranscriber"
- },
- {
- "$ref": "#/components/schemas/SpeechmaticsTranscriber",
- "title": "SpeechmaticsTranscriber"
- },
- {
- "$ref": "#/components/schemas/TalkscriberTranscriber",
- "title": "TalkscriberTranscriber"
- },
- {
- "$ref": "#/components/schemas/OpenAITranscriber",
- "title": "OpenAITranscriber"
- },
- {
- "$ref": "#/components/schemas/CartesiaTranscriber",
- "title": "CartesiaTranscriber"
- }
- ]
+ "subType": {
+ "type": "string",
+ "enum": [
+ "bash_20241022"
+ ],
+ "description": "The sub type of tool."
},
- "model": {
- "description": "These are the options for the assistant's LLM.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/AnthropicModel",
- "title": "Anthropic"
- },
- {
- "$ref": "#/components/schemas/AnyscaleModel",
- "title": "Anyscale"
- },
- {
- "$ref": "#/components/schemas/CerebrasModel",
- "title": "Cerebras"
- },
- {
- "$ref": "#/components/schemas/CustomLLMModel",
- "title": "CustomLLM"
- },
- {
- "$ref": "#/components/schemas/DeepInfraModel",
- "title": "DeepInfra"
- },
- {
- "$ref": "#/components/schemas/DeepSeekModel",
- "title": "DeepSeek"
- },
- {
- "$ref": "#/components/schemas/GoogleModel",
- "title": "Google"
- },
- {
- "$ref": "#/components/schemas/GroqModel",
- "title": "Groq"
- },
- {
- "$ref": "#/components/schemas/InflectionAIModel",
- "title": "InflectionAI"
- },
- {
- "$ref": "#/components/schemas/OpenAIModel",
- "title": "OpenAI"
- },
- {
- "$ref": "#/components/schemas/OpenRouterModel",
- "title": "OpenRouter"
- },
- {
- "$ref": "#/components/schemas/PerplexityAIModel",
- "title": "PerplexityAI"
- },
- {
- "$ref": "#/components/schemas/TogetherAIModel",
- "title": "Together"
- },
+ "server": {
+ "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
+ "allOf": [
{
- "$ref": "#/components/schemas/XaiModel",
- "title": "XAI"
+ "$ref": "#/components/schemas/Server"
}
]
},
- "voice": {
- "description": "These are the options for the assistant's voice.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/AzureVoice",
- "title": "AzureVoice"
- },
- {
- "$ref": "#/components/schemas/CartesiaVoice",
- "title": "CartesiaVoice"
- },
- {
- "$ref": "#/components/schemas/CustomVoice",
- "title": "CustomVoice"
- },
- {
- "$ref": "#/components/schemas/DeepgramVoice",
- "title": "DeepgramVoice"
- },
- {
- "$ref": "#/components/schemas/ElevenLabsVoice",
- "title": "ElevenLabsVoice"
- },
- {
- "$ref": "#/components/schemas/HumeVoice",
- "title": "HumeVoice"
- },
- {
- "$ref": "#/components/schemas/LMNTVoice",
- "title": "LMNTVoice"
- },
- {
- "$ref": "#/components/schemas/NeuphonicVoice",
- "title": "NeuphonicVoice"
- },
- {
- "$ref": "#/components/schemas/OpenAIVoice",
- "title": "OpenAIVoice"
- },
- {
- "$ref": "#/components/schemas/PlayHTVoice",
- "title": "PlayHTVoice"
- },
- {
- "$ref": "#/components/schemas/RimeAIVoice",
- "title": "RimeAIVoice"
- },
- {
- "$ref": "#/components/schemas/SmallestAIVoice",
- "title": "SmallestAIVoice"
- },
- {
- "$ref": "#/components/schemas/TavusVoice",
- "title": "TavusVoice"
- },
- {
- "$ref": "#/components/schemas/VapiVoice",
- "title": "VapiVoice"
- },
- {
- "$ref": "#/components/schemas/SesameVoice",
- "title": "SesameVoice"
- },
- {
- "$ref": "#/components/schemas/InworldVoice",
- "title": "InworldVoice"
- },
- {
- "$ref": "#/components/schemas/MinimaxVoice",
- "title": "MinimaxVoice"
- }
- ]
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the tool."
},
- "firstMessage": {
+ "orgId": {
"type": "string",
- "description": "This is the first message that the assistant will say. This can also be a URL to a containerized audio file (mp3, wav, etc.).\n\nIf unspecified, assistant will wait for user to speak and use the model to respond once they speak.",
- "example": "Hello! How can I help you today?"
+ "description": "This is the unique identifier for the organization that this tool belongs to."
},
- "firstMessageInterruptionsEnabled": {
- "type": "boolean",
- "default": false
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
},
- "firstMessageMode": {
+ "updatedAt": {
+ "format": "date-time",
"type": "string",
- "description": "This is the mode for the first message. Default is 'assistant-speaks-first'.\n\nUse:\n- 'assistant-speaks-first' to have the assistant speak first.\n- 'assistant-waits-for-user' to have the assistant wait for the user to speak first.\n- 'assistant-speaks-first-with-model-generated-message' to have the assistant speak first with a message generated by the model based on the conversation state. (`assistant.model.messages` at call start, `call.messages` at squad transfer points).\n\n@default 'assistant-speaks-first'",
- "enum": [
- "assistant-speaks-first",
- "assistant-speaks-first-with-model-generated-message",
- "assistant-waits-for-user"
- ],
- "example": "assistant-speaks-first"
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
},
- "voicemailDetection": {
- "description": "These are the settings to configure or disable voicemail detection. Alternatively, voicemail detection can be configured using the model.tools=[VoicemailTool].\nThis uses Twilio's built-in detection while the VoicemailTool relies on the model to detect if a voicemail was reached.\nYou can use neither of them, one of them, or both of them. By default, Twilio built-in detection is enabled while VoicemailTool is not.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/GoogleVoicemailDetectionPlan",
- "title": "Google"
- },
- {
- "$ref": "#/components/schemas/OpenAIVoicemailDetectionPlan",
- "title": "OpenAI"
- },
- {
- "$ref": "#/components/schemas/TwilioVoicemailDetectionPlan",
- "title": "Twilio"
- },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
{
- "$ref": "#/components/schemas/VapiVoicemailDetectionPlan",
- "title": "Vapi"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
},
- "clientMessages": {
- "type": "array",
+ "name": {
+ "type": "string",
+ "description": "The name of the tool, fixed to 'bash'",
+ "default": "bash",
"enum": [
- "conversation-update",
- "function-call",
- "function-call-result",
- "hang",
- "language-changed",
- "metadata",
- "model-output",
- "speech-update",
- "status-update",
- "transcript",
- "tool-calls",
- "tool-calls-result",
- "tool.completed",
- "transfer-update",
- "user-interrupted",
- "voice-input",
- "workflow.node.started"
- ],
- "example": [
- "conversation-update",
- "function-call",
- "hang",
- "model-output",
- "speech-update",
- "status-update",
- "transfer-update",
- "transcript",
- "tool-calls",
- "user-interrupted",
- "voice-input",
- "workflow.node.started"
- ],
- "description": "These are the messages that will be sent to your Client SDKs. Default is conversation-update,function-call,hang,model-output,speech-update,status-update,transfer-update,transcript,tool-calls,user-interrupted,voice-input,workflow.node.started. You can check the shape of the messages in ClientMessage schema.",
+ "bash"
+ ]
+ }
+ },
+ "required": [
+ "type",
+ "subType",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt",
+ "name"
+ ]
+ },
+ "ComputerTool": {
+ "type": "object",
+ "properties": {
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
- "type": "string",
- "enum": [
- "conversation-update",
- "function-call",
- "function-call-result",
- "hang",
- "language-changed",
- "metadata",
- "model-output",
- "speech-update",
- "status-update",
- "transcript",
- "tool-calls",
- "tool-calls-result",
- "tool.completed",
- "transfer-update",
- "user-interrupted",
- "voice-input",
- "workflow.node.started"
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
]
}
},
- "serverMessages": {
- "type": "array",
+ "type": {
+ "type": "string",
"enum": [
- "conversation-update",
- "end-of-call-report",
- "function-call",
- "hang",
- "language-changed",
- "language-change-detected",
- "model-output",
- "phone-call-control",
- "speech-update",
- "status-update",
- "transcript",
- "transcript[transcriptType=\"final\"]",
- "tool-calls",
- "transfer-destination-request",
- "handoff-destination-request",
- "transfer-update",
- "user-interrupted",
- "voice-input",
- "chat.created",
- "chat.deleted",
- "session.created",
- "session.updated",
- "session.deleted"
- ],
- "example": [
- "conversation-update",
- "end-of-call-report",
- "function-call",
- "hang",
- "speech-update",
- "status-update",
- "tool-calls",
- "transfer-destination-request",
- "handoff-destination-request",
- "user-interrupted"
+ "computer"
],
- "description": "These are the messages that will be sent to your Server URL. Default is conversation-update,end-of-call-report,function-call,hang,speech-update,status-update,tool-calls,transfer-destination-request,handoff-destination-request,user-interrupted. You can check the shape of the messages in ServerMessage schema.",
- "items": {
- "type": "string",
- "enum": [
- "conversation-update",
- "end-of-call-report",
- "function-call",
- "hang",
- "language-changed",
- "language-change-detected",
- "model-output",
- "phone-call-control",
- "speech-update",
- "status-update",
- "transcript",
- "transcript[transcriptType=\"final\"]",
- "tool-calls",
- "transfer-destination-request",
- "handoff-destination-request",
- "transfer-update",
- "user-interrupted",
- "voice-input",
- "chat.created",
- "chat.deleted",
- "session.created",
- "session.updated",
- "session.deleted"
- ]
- }
- },
- "maxDurationSeconds": {
- "type": "number",
- "description": "This is the maximum number of seconds that the call will last. When the call reaches this duration, it will be ended.\n\n@default 600 (10 minutes)",
- "minimum": 10,
- "maximum": 43200,
- "example": 600
+ "description": "The type of tool. \"computer\" for Computer tool."
},
- "backgroundSound": {
- "description": "This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'.\nYou can also provide a custom sound by providing a URL to an audio file.",
- "oneOf": [
+ "subType": {
+ "type": "string",
+ "enum": [
+ "computer_20241022"
+ ],
+ "description": "The sub type of tool."
+ },
+ "server": {
+ "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
+ "allOf": [
{
- "type": "enum",
- "enum": [
- "off",
- "office"
- ],
- "example": "office"
- },
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
+ },
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the tool."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the organization that this tool belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
{
- "type": "string",
- "format": "uri",
- "example": "https://www.soundjay.com/ambient/sounds/people-in-lounge-1.mp3"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
},
- "modelOutputInMessagesEnabled": {
- "type": "boolean",
- "description": "This determines whether the model's output is used in conversation history rather than the transcription of assistant's speech.\n\nDefault `false` while in beta.\n\n@default false",
- "example": false
+ "name": {
+ "type": "string",
+ "description": "The name of the tool, fixed to 'computer'",
+ "default": "computer",
+ "enum": [
+ "computer"
+ ]
},
- "transportConfigurations": {
+ "displayWidthPx": {
+ "type": "number",
+ "description": "The display width in pixels"
+ },
+ "displayHeightPx": {
+ "type": "number",
+ "description": "The display height in pixels"
+ },
+ "displayNumber": {
+ "type": "number",
+ "description": "Optional display number"
+ }
+ },
+ "required": [
+ "type",
+ "subType",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt",
+ "name",
+ "displayWidthPx",
+ "displayHeightPx"
+ ]
+ },
+ "TextEditorTool": {
+ "type": "object",
+ "properties": {
+ "messages": {
"type": "array",
- "description": "These are the configurations to be passed to the transport providers of assistant's calls, like Twilio. You can store multiple configurations for different transport providers. For a call, only the configuration matching the call transport provider is used.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/TransportConfigurationTwilio",
- "title": "Twilio"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
]
}
},
- "observabilityPlan": {
- "description": "This is the plan for observability of assistant's calls.\n\nCurrently, only Langfuse is supported.",
- "oneOf": [
+ "type": {
+ "type": "string",
+ "enum": [
+ "textEditor"
+ ],
+ "description": "The type of tool. \"textEditor\" for Text Editor tool."
+ },
+ "subType": {
+ "type": "string",
+ "enum": [
+ "text_editor_20241022"
+ ],
+ "description": "The sub type of tool."
+ },
+ "server": {
+ "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
+ "allOf": [
{
- "$ref": "#/components/schemas/LangfuseObservabilityPlan",
- "title": "Langfuse"
+ "$ref": "#/components/schemas/Server"
}
- ],
+ ]
+ },
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the tool."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the organization that this tool belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/LangfuseObservabilityPlan"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
},
- "credentials": {
+ "name": {
+ "type": "string",
+ "description": "The name of the tool, fixed to 'str_replace_editor'",
+ "default": "str_replace_editor",
+ "enum": [
+ "str_replace_editor"
+ ]
+ }
+ },
+ "required": [
+ "type",
+ "subType",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt",
+ "name"
+ ]
+ },
+ "QueryTool": {
+ "type": "object",
+ "properties": {
+ "messages": {
"type": "array",
- "description": "These are dynamic credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/CreateAnthropicCredentialDTO",
- "title": "AnthropicCredential"
- },
- {
- "$ref": "#/components/schemas/CreateAnyscaleCredentialDTO",
- "title": "AnyscaleCredential"
- },
- {
- "$ref": "#/components/schemas/CreateAssemblyAICredentialDTO",
- "title": "AssemblyAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateAzureCredentialDTO",
- "title": "AzureCredential"
- },
- {
- "$ref": "#/components/schemas/CreateAzureOpenAICredentialDTO",
- "title": "AzureOpenAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
- "title": "ByoSipTrunkCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCartesiaCredentialDTO",
- "title": "CartesiaCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCerebrasCredentialDTO",
- "title": "CerebrasCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCloudflareCredentialDTO",
- "title": "CloudflareCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCustomLLMCredentialDTO",
- "title": "CustomLLMCredential"
- },
- {
- "$ref": "#/components/schemas/CreateDeepgramCredentialDTO",
- "title": "DeepgramCredential"
- },
- {
- "$ref": "#/components/schemas/CreateDeepInfraCredentialDTO",
- "title": "DeepInfraCredential"
- },
- {
- "$ref": "#/components/schemas/CreateDeepSeekCredentialDTO",
- "title": "DeepSeekCredential"
- },
- {
- "$ref": "#/components/schemas/CreateElevenLabsCredentialDTO",
- "title": "ElevenLabsCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGcpCredentialDTO",
- "title": "GcpCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGladiaCredentialDTO",
- "title": "GladiaCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCredentialDTO",
- "title": "GhlCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCredentialDTO",
- "title": "GoogleCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGroqCredentialDTO",
- "title": "GroqCredential"
- },
- {
- "$ref": "#/components/schemas/CreateHumeCredentialDTO",
- "title": "HumeCredential"
- },
- {
- "$ref": "#/components/schemas/CreateInflectionAICredentialDTO",
- "title": "InflectionAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateLangfuseCredentialDTO",
- "title": "LangfuseCredential"
- },
- {
- "$ref": "#/components/schemas/CreateLmntCredentialDTO",
- "title": "LmntCredential"
- },
- {
- "$ref": "#/components/schemas/CreateMakeCredentialDTO",
- "title": "MakeCredential"
- },
- {
- "$ref": "#/components/schemas/CreateMistralCredentialDTO",
- "title": "MistralCredential"
- },
- {
- "$ref": "#/components/schemas/CreateNeuphonicCredentialDTO",
- "title": "NeuphonicCredential"
- },
- {
- "$ref": "#/components/schemas/CreateOpenAICredentialDTO",
- "title": "OpenAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateOpenRouterCredentialDTO",
- "title": "OpenRouterCredential"
- },
- {
- "$ref": "#/components/schemas/CreatePerplexityAICredentialDTO",
- "title": "PerplexityAICredential"
- },
- {
- "$ref": "#/components/schemas/CreatePlayHTCredentialDTO",
- "title": "PlayHTCredential"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/CreateRimeAICredentialDTO",
- "title": "RimeAICredential"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
},
{
- "$ref": "#/components/schemas/CreateRunpodCredentialDTO",
- "title": "RunpodCredential"
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
},
{
- "$ref": "#/components/schemas/CreateS3CredentialDTO",
- "title": "S3Credential"
- },
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "query"
+ ],
+ "description": "The type of tool. \"query\" for Query tool."
+ },
+ "knowledgeBases": {
+ "description": "The knowledge bases to query",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/KnowledgeBase"
+ }
+ },
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the tool."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the organization that this tool belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "type",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "GoogleCalendarCreateEventTool": {
+ "type": "object",
+ "properties": {
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "oneOf": [
{
- "$ref": "#/components/schemas/CreateSmallestAICredentialDTO",
- "title": "SmallestAICredential"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
- "title": "SpeechmaticsCredential"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
},
{
- "$ref": "#/components/schemas/CreateSupabaseCredentialDTO",
- "title": "SupabaseCredential"
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
},
{
- "$ref": "#/components/schemas/CreateTavusCredentialDTO",
- "title": "TavusCredential"
- },
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "google.calendar.event.create"
+ ],
+ "description": "The type of tool. \"google.calendar.event.create\" for Google Calendar Create Event tool."
+ },
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the tool."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the organization that this tool belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "type",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "GoogleSheetsRowAppendTool": {
+ "type": "object",
+ "properties": {
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "oneOf": [
{
- "$ref": "#/components/schemas/CreateTogetherAICredentialDTO",
- "title": "TogetherAICredential"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/CreateTrieveCredentialDTO",
- "title": "TrieveCredential"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
},
{
- "$ref": "#/components/schemas/CreateTwilioCredentialDTO",
- "title": "TwilioCredential"
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
},
{
- "$ref": "#/components/schemas/CreateVonageCredentialDTO",
- "title": "VonageCredential"
- },
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "google.sheets.row.append"
+ ],
+ "description": "The type of tool. \"google.sheets.row.append\" for Google Sheets Row Append tool."
+ },
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the tool."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the organization that this tool belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "type",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "GoogleCalendarCheckAvailabilityTool": {
+ "type": "object",
+ "properties": {
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "oneOf": [
{
- "$ref": "#/components/schemas/CreateWebhookCredentialDTO",
- "title": "WebhookCredential"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/CreateXAiCredentialDTO",
- "title": "XAiCredential"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
},
{
- "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
- "title": "GoogleCalendarOAuth2ClientCredential"
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
},
{
- "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
- "title": "GoogleCalendarOAuth2AuthorizationCredential"
- },
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "google.calendar.availability.check"
+ ],
+ "description": "The type of tool. \"google.calendar.availability.check\" for Google Calendar Check Availability tool."
+ },
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the tool."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the organization that this tool belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "type",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "SlackSendMessageTool": {
+ "type": "object",
+ "properties": {
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "oneOf": [
{
- "$ref": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
- "title": "GoogleSheetsOAuth2AuthorizationCredential"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
- "title": "SlackOAuth2AuthorizationCredential"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
},
{
- "$ref": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
- "title": "GoHighLevelMCPCredential"
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
},
{
- "$ref": "#/components/schemas/CreateInworldCredentialDTO",
- "title": "InworldCredential"
- }
- ],
- "discriminator": {
- "propertyName": "provider",
- "mapping": {
- "11labs": "#/components/schemas/CreateElevenLabsCredentialDTO",
- "anthropic": "#/components/schemas/CreateAnthropicCredentialDTO",
- "anyscale": "#/components/schemas/CreateAnyscaleCredentialDTO",
- "assembly-ai": "#/components/schemas/CreateAssemblyAICredentialDTO",
- "azure-openai": "#/components/schemas/CreateAzureOpenAICredentialDTO",
- "azure": "#/components/schemas/CreateAzureCredentialDTO",
- "byo-sip-trunk": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
- "cartesia": "#/components/schemas/CreateCartesiaCredentialDTO",
- "cerebras": "#/components/schemas/CreateCerebrasCredentialDTO",
- "cloudflare": "#/components/schemas/CreateCloudflareCredentialDTO",
- "custom-llm": "#/components/schemas/CreateCustomLLMCredentialDTO",
- "deepgram": "#/components/schemas/CreateDeepgramCredentialDTO",
- "deepinfra": "#/components/schemas/CreateDeepInfraCredentialDTO",
- "deep-seek": "#/components/schemas/CreateDeepSeekCredentialDTO",
- "gcp": "#/components/schemas/CreateGcpCredentialDTO",
- "gladia": "#/components/schemas/CreateGladiaCredentialDTO",
- "gohighlevel": "#/components/schemas/CreateGoHighLevelCredentialDTO",
- "google": "#/components/schemas/CreateGoogleCredentialDTO",
- "groq": "#/components/schemas/CreateGroqCredentialDTO",
- "inflection-ai": "#/components/schemas/CreateInflectionAICredentialDTO",
- "langfuse": "#/components/schemas/CreateLangfuseCredentialDTO",
- "lmnt": "#/components/schemas/CreateLmntCredentialDTO",
- "make": "#/components/schemas/CreateMakeCredentialDTO",
- "openai": "#/components/schemas/CreateOpenAICredentialDTO",
- "openrouter": "#/components/schemas/CreateOpenRouterCredentialDTO",
- "perplexity-ai": "#/components/schemas/CreatePerplexityAICredentialDTO",
- "playht": "#/components/schemas/CreatePlayHTCredentialDTO",
- "rime-ai": "#/components/schemas/CreateRimeAICredentialDTO",
- "runpod": "#/components/schemas/CreateRunpodCredentialDTO",
- "s3": "#/components/schemas/CreateS3CredentialDTO",
- "supabase": "#/components/schemas/CreateSupabaseCredentialDTO",
- "smallest-ai": "#/components/schemas/CreateSmallestAICredentialDTO",
- "tavus": "#/components/schemas/CreateTavusCredentialDTO",
- "together-ai": "#/components/schemas/CreateTogetherAICredentialDTO",
- "twilio": "#/components/schemas/CreateTwilioCredentialDTO",
- "vonage": "#/components/schemas/CreateVonageCredentialDTO",
- "webhook": "#/components/schemas/CreateWebhookCredentialDTO",
- "xai": "#/components/schemas/CreateXAiCredentialDTO",
- "neuphonic": "#/components/schemas/CreateNeuphonicCredentialDTO",
- "hume": "#/components/schemas/CreateHumeCredentialDTO",
- "mistral": "#/components/schemas/CreateMistralCredentialDTO",
- "speechmatics": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
- "trieve": "#/components/schemas/CreateTrieveCredentialDTO",
- "google.calendar.oauth2-client": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
- "google.calendar.oauth2-authorization": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
- "google.sheets.oauth2-authorization": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
- "slack.oauth2-authorization": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
- "ghl.oauth2-authorization": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
- "inworld": "#/components/schemas/CreateInworldCredentialDTO",
- "minimax": "#/components/schemas/CreateMinimaxCredentialDTO"
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
- }
+ ]
}
},
- "hooks": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "slack.message.send"
+ ],
+ "description": "The type of tool. \"slack.message.send\" for Slack Send Message tool."
+ },
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the tool."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the organization that this tool belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "type",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "SmsTool": {
+ "type": "object",
+ "properties": {
+ "messages": {
"type": "array",
- "description": "This is a set of actions that will be performed on certain events.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/CallHookCallEnding",
- "title": "CallHookCallEnding"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/CallHookAssistantSpeechInterrupted",
- "title": "CallHookAssistantSpeechInterrupted"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
},
{
- "$ref": "#/components/schemas/CallHookCustomerSpeechInterrupted",
- "title": "CallHookCustomerSpeechInterrupted"
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
},
{
- "$ref": "#/components/schemas/CallHookCustomerSpeechTimeout",
- "title": "CallHookCustomerSpeechTimeout"
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
]
}
},
- "name": {
+ "type": {
"type": "string",
- "description": "This is the name of the assistant.\n\nThis is required when you want to transfer between assistants in a call.",
- "maxLength": 40
+ "enum": [
+ "sms"
+ ],
+ "description": "The type of tool. \"sms\" for Twilio SMS sending tool."
},
- "voicemailMessage": {
+ "id": {
"type": "string",
- "description": "This is the message that the assistant will say if the call is forwarded to voicemail.\n\nIf unspecified, it will hang up.",
- "maxLength": 1000
+ "description": "This is the unique identifier for the tool."
},
- "endCallMessage": {
+ "orgId": {
"type": "string",
- "description": "This is the message that the assistant will say if it ends the call.\n\nIf unspecified, it will hang up without saying anything.",
- "maxLength": 1000
- },
- "endCallPhrases": {
- "description": "This list contains phrases that, if spoken by the assistant, will trigger the call to be hung up. Case insensitive.",
- "type": "array",
- "items": {
- "type": "string",
- "maxLength": 140,
- "minLength": 2
- }
+ "description": "This is the unique identifier for the organization that this tool belongs to."
},
- "compliancePlan": {
- "$ref": "#/components/schemas/CompliancePlan"
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
},
- "metadata": {
- "type": "object",
- "description": "This is for metadata you want to store on the assistant."
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
},
- "backgroundSpeechDenoisingPlan": {
- "description": "This enables filtering of noise and background speech while the user is talking.\n\nFeatures:\n- Smart denoising using Krisp\n- Fourier denoising\n\nSmart denoising can be combined with or used independently of Fourier denoising.\n\nOrder of precedence:\n- Smart denoising\n- Fourier denoising",
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/BackgroundSpeechDenoisingPlan"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
+ }
+ },
+ "required": [
+ "type",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "McpTool": {
+ "type": "object",
+ "properties": {
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
},
- "analysisPlan": {
- "description": "This is the plan for analysis of assistant's calls. Stored in `call.analysis`.",
- "allOf": [
- {
- "$ref": "#/components/schemas/AnalysisPlan"
- }
- ]
+ "type": {
+ "type": "string",
+ "enum": [
+ "mcp"
+ ],
+ "description": "The type of tool. \"mcp\" for MCP tool."
},
- "artifactPlan": {
- "description": "This is the plan for artifacts generated during assistant's calls. Stored in `call.artifact`.",
+ "server": {
+ "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
"allOf": [
{
- "$ref": "#/components/schemas/ArtifactPlan"
+ "$ref": "#/components/schemas/Server"
}
]
},
- "startSpeakingPlan": {
- "description": "This is the plan for when the assistant should start talking.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to start talking after the customer is done speaking.\n- The assistant is too fast to start talking after the customer is done speaking.\n- The assistant is so fast that it's actually interrupting the customer.",
- "allOf": [
- {
- "$ref": "#/components/schemas/StartSpeakingPlan"
- }
- ]
+ "toolMessages": {
+ "description": "Per-tool message overrides for individual tools loaded from the MCP server. Set messages to an empty array to suppress messages for a specific tool. Tools not listed here will use the default messages from the parent tool.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/McpToolMessages"
+ }
},
- "stopSpeakingPlan": {
- "description": "This is the plan for when assistant should stop talking on customer interruption.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to recognize customer's interruption.\n- The assistant is too fast to recognize customer's interruption.\n- The assistant is getting interrupted by phrases that are just acknowledgments.\n- The assistant is getting interrupted by background noises.\n- The assistant is not properly stopping -- it starts talking right after getting interrupted.",
- "allOf": [
- {
- "$ref": "#/components/schemas/StopSpeakingPlan"
- }
- ]
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the tool."
},
- "monitorPlan": {
- "description": "This is the plan for real-time monitoring of the assistant's calls.\n\nUsage:\n- To enable live listening of the assistant's calls, set `monitorPlan.listenEnabled` to `true`.\n- To enable live control of the assistant's calls, set `monitorPlan.controlEnabled` to `true`.",
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the organization that this tool belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/MonitorPlan"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
},
- "credentialIds": {
- "description": "These are the credentials that will be used for the assistant calls. By default, all the credentials are available for use in the call but you can provide a subset using this.",
+ "metadata": {
+ "$ref": "#/components/schemas/McpToolMetadata"
+ }
+ },
+ "required": [
+ "type",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "GoHighLevelCalendarAvailabilityTool": {
+ "type": "object",
+ "properties": {
+ "messages": {
"type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
- "type": "string"
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
}
},
- "server": {
- "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server.url\n2. phoneNumber.serverUrl\n3. org.serverUrl",
+ "type": {
+ "type": "string",
+ "enum": [
+ "gohighlevel.calendar.availability.check"
+ ],
+ "description": "The type of tool. \"gohighlevel.calendar.availability.check\" for GoHighLevel Calendar Availability Check tool."
+ },
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the tool."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the organization that this tool belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/Server"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
- },
- "keypadInputPlan": {
- "$ref": "#/components/schemas/KeypadInputPlan"
}
- }
+ },
+ "required": [
+ "type",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
},
- "ByoPhoneNumber": {
+ "GoHighLevelCalendarEventCreateTool": {
"type": "object",
"properties": {
- "fallbackDestination": {
- "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/TransferDestinationNumber",
- "title": "NumberTransferDestination"
- },
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "gohighlevel.calendar.event.create"
+ ],
+ "description": "The type of tool. \"gohighlevel.calendar.event.create\" for GoHighLevel Calendar Event Create tool."
+ },
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the tool."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the organization that this tool belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
{
- "$ref": "#/components/schemas/TransferDestinationSip",
- "title": "SipTransferDestination"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
- },
- "hooks": {
+ }
+ },
+ "required": [
+ "type",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "GoHighLevelContactCreateTool": {
+ "type": "object",
+ "properties": {
+ "messages": {
"type": "array",
- "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
- "title": "PhoneNumberHookCallRinging"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
- "title": "PhoneNumberHookCallEnding"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
]
}
},
- "provider": {
+ "type": {
"type": "string",
- "description": "This is to bring your own phone numbers from your own SIP trunks or Carriers.",
"enum": [
- "byo-phone-number"
- ]
- },
- "numberE164CheckEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle the E164 check for the `number` field. This is an advanced property which should be used if you know your use case requires it.\n\nUse cases:\n- `false`: To allow non-E164 numbers like `+001234567890`, `1234`, or `abc`. This is useful for dialing out to non-E164 numbers on your SIP trunks.\n- `true` (default): To allow only E164 numbers like `+14155551234`. This is standard for PSTN calls.\n\nIf `false`, the `number` is still required to only contain alphanumeric characters (regex: `/^\\+?[a-zA-Z0-9]+$/`).\n\n@default true (E164 check is enabled)",
- "default": true
+ "gohighlevel.contact.create"
+ ],
+ "description": "The type of tool. \"gohighlevel.contact.create\" for GoHighLevel Contact Create tool."
},
"id": {
"type": "string",
- "description": "This is the unique identifier for the phone number."
+ "description": "This is the unique identifier for the tool."
},
"orgId": {
"type": "string",
- "description": "This is the unique identifier for the org that this phone number belongs to."
+ "description": "This is the unique identifier for the organization that this tool belongs to."
},
"createdAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the phone number was created."
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
},
"updatedAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the phone number was last updated."
- },
- "status": {
- "type": "string",
- "description": "This is the status of the phone number.",
- "enum": [
- "active",
- "activating",
- "blocked"
- ]
- },
- "name": {
- "type": "string",
- "description": "This is the name of the phone number. This is just for your own reference.",
- "maxLength": 40
- },
- "assistantId": {
- "type": "string",
- "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
- },
- "workflowId": {
- "type": "string",
- "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
- },
- "squadId": {
- "type": "string",
- "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
},
- "server": {
- "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/Server"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
- },
- "number": {
- "type": "string",
- "description": "This is the number of the customer.",
- "minLength": 3,
- "maxLength": 40
- },
- "credentialId": {
- "type": "string",
- "description": "This is the credential of your own SIP trunk or Carrier (type `byo-sip-trunk`) which can be used to make calls to this phone number.\n\nYou can add the SIP trunk or Carrier credential in the Provider Credentials page on the Dashboard to get the credentialId."
}
},
"required": [
- "provider",
+ "type",
"id",
"orgId",
"createdAt",
- "updatedAt",
- "credentialId"
+ "updatedAt"
]
},
- "TwilioPhoneNumber": {
+ "GoHighLevelContactGetTool": {
"type": "object",
"properties": {
- "fallbackDestination": {
- "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/TransferDestinationNumber",
- "title": "NumberTransferDestination"
- },
- {
- "$ref": "#/components/schemas/TransferDestinationSip",
- "title": "SipTransferDestination"
- }
- ]
- },
- "hooks": {
+ "messages": {
"type": "array",
- "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
- "title": "PhoneNumberHookCallRinging"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
- "title": "PhoneNumberHookCallEnding"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
]
}
},
- "provider": {
+ "type": {
"type": "string",
- "description": "This is to use numbers bought on Twilio.",
"enum": [
- "twilio"
- ]
- },
- "smsEnabled": {
- "type": "boolean",
- "description": "Controls whether Vapi sets the messaging webhook URL on the Twilio number during import.\n\nIf set to `false`, Vapi will not update the Twilio messaging URL, leaving it as is.\nIf `true` or omitted (default), Vapi will configure both the voice and messaging URLs.\n\n@default true",
- "default": true
+ "gohighlevel.contact.get"
+ ],
+ "description": "The type of tool. \"gohighlevel.contact.get\" for GoHighLevel Contact Get tool."
},
"id": {
"type": "string",
- "description": "This is the unique identifier for the phone number."
+ "description": "This is the unique identifier for the tool."
},
"orgId": {
"type": "string",
- "description": "This is the unique identifier for the org that this phone number belongs to."
+ "description": "This is the unique identifier for the organization that this tool belongs to."
},
"createdAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the phone number was created."
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
},
"updatedAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the phone number was last updated."
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
},
- "status": {
- "type": "string",
- "description": "This is the status of the phone number.",
- "enum": [
- "active",
- "activating",
- "blocked"
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
]
+ }
+ },
+ "required": [
+ "type",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "SipRequestTool": {
+ "type": "object",
+ "properties": {
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
},
- "twilioAuthToken": {
+ "type": {
"type": "string",
- "description": "This is the Twilio Auth Token for the phone number."
+ "enum": [
+ "sipRequest"
+ ],
+ "description": "The type of tool. \"sipRequest\" for SIP request tool."
},
- "twilioApiKey": {
+ "verb": {
"type": "string",
- "description": "This is the Twilio API Key for the phone number."
+ "enum": [
+ "INFO",
+ "MESSAGE",
+ "NOTIFY"
+ ],
+ "description": "The SIP method to send."
},
- "twilioApiSecret": {
- "type": "string",
- "description": "This is the Twilio API Secret for the phone number."
+ "headers": {
+ "description": "JSON schema for headers the model should populate when sending the SIP request.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/JsonSchema"
+ }
+ ]
},
- "name": {
+ "body": {
+ "description": "Body to include in the SIP request. Either a literal string body, or a JSON schema describing a structured body that the model should populate.",
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "$ref": "#/components/schemas/JsonSchema"
+ }
+ ]
+ },
+ "id": {
"type": "string",
- "description": "This is the name of the phone number. This is just for your own reference.",
- "maxLength": 40
+ "description": "This is the unique identifier for the tool."
},
- "assistantId": {
+ "orgId": {
"type": "string",
- "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "description": "This is the unique identifier for the organization that this tool belongs to."
},
- "workflowId": {
+ "createdAt": {
+ "format": "date-time",
"type": "string",
- "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
},
- "squadId": {
+ "updatedAt": {
+ "format": "date-time",
"type": "string",
- "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
},
- "server": {
- "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/Server"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
- },
- "number": {
- "type": "string",
- "description": "These are the digits of the phone number you own on your Twilio."
- },
- "twilioAccountSid": {
- "type": "string",
- "description": "This is the Twilio Account SID for the phone number."
}
},
"required": [
- "provider",
+ "type",
+ "verb",
"id",
"orgId",
"createdAt",
- "updatedAt",
- "number",
- "twilioAccountSid"
+ "updatedAt"
]
},
- "VonagePhoneNumber": {
+ "VoicemailTool": {
"type": "object",
"properties": {
- "fallbackDestination": {
- "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/TransferDestinationNumber",
- "title": "NumberTransferDestination"
- },
- {
- "$ref": "#/components/schemas/TransferDestinationSip",
- "title": "SipTransferDestination"
- }
- ]
- },
- "hooks": {
+ "messages": {
"type": "array",
- "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
- "title": "PhoneNumberHookCallRinging"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
- "title": "PhoneNumberHookCallEnding"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
]
}
},
- "provider": {
+ "type": {
"type": "string",
- "description": "This is to use numbers bought on Vonage.",
+ "description": "The type of tool. \"voicemail\" for Voicemail tool.",
"enum": [
- "vonage"
+ "voicemail"
]
},
+ "beepDetectionEnabled": {
+ "type": "boolean",
+ "description": "This is the flag that enables beep detection for voicemail detection and applies only for twilio based calls.\n\n@default false",
+ "default": false,
+ "example": false
+ },
"id": {
"type": "string",
- "description": "This is the unique identifier for the phone number."
+ "description": "This is the unique identifier for the tool."
},
"orgId": {
"type": "string",
- "description": "This is the unique identifier for the org that this phone number belongs to."
+ "description": "This is the unique identifier for the organization that this tool belongs to."
},
"createdAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the phone number was created."
+ "description": "This is the ISO 8601 date-time string of when the tool was created."
},
"updatedAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the phone number was last updated."
+ "description": "This is the ISO 8601 date-time string of when the tool was last updated."
},
- "status": {
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "type",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "CreateApiRequestToolDTO": {
+ "type": "object",
+ "properties": {
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
+ },
+ "type": {
"type": "string",
- "description": "This is the status of the phone number.",
"enum": [
- "active",
- "activating",
- "blocked"
+ "apiRequest"
+ ],
+ "description": "The type of tool. \"apiRequest\" for API request tool."
+ },
+ "method": {
+ "type": "string",
+ "enum": [
+ "POST",
+ "GET",
+ "PUT",
+ "PATCH",
+ "DELETE"
]
},
- "name": {
+ "timeoutSeconds": {
+ "type": "number",
+ "description": "This is the timeout in seconds for the request. Defaults to 20 seconds.\n\n@default 20",
+ "minimum": 1,
+ "maximum": 300,
+ "example": 20
+ },
+ "credentialId": {
"type": "string",
- "description": "This is the name of the phone number. This is just for your own reference.",
- "maxLength": 40
+ "description": "The credential ID for API request authentication",
+ "example": "550e8400-e29b-41d4-a716-446655440000"
},
- "assistantId": {
+ "encryptedPaths": {
+ "type": "array",
+ "description": "This is the paths to encrypt in the request body if credentialId and encryptionPlan are defined.",
+ "items": {
+ "type": "string"
+ }
+ },
+ "parameters": {
+ "description": "Static key-value pairs merged into the request body. Values support Liquid templates.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/ToolParameter"
+ }
+ },
+ "name": {
"type": "string",
- "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "description": "This is the name of the tool. This will be passed to the model.\n\nMust be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 40.",
+ "maxLength": 40,
+ "pattern": "/^[a-zA-Z0-9_-]{1,40}$/"
},
- "workflowId": {
+ "description": {
"type": "string",
- "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "description": "This is the description of the tool. This will be passed to the model."
},
- "squadId": {
+ "url": {
"type": "string",
- "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "description": "This is where the request will be sent."
},
- "server": {
- "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "body": {
+ "description": "This is the body of the request.",
"allOf": [
{
- "$ref": "#/components/schemas/Server"
+ "$ref": "#/components/schemas/JsonSchema"
}
]
},
- "number": {
- "type": "string",
- "description": "These are the digits of the phone number you own on your Vonage."
+ "headers": {
+ "description": "These are the headers to send with the request.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/JsonSchema"
+ }
+ ]
},
- "credentialId": {
- "type": "string",
- "description": "This is the credential you added in dashboard.vapi.ai/keys. This is used to configure the number to send inbound calls to Vapi, make outbound calls and do live call updates like transfers and hangups."
- }
- },
- "required": [
- "provider",
- "id",
- "orgId",
- "createdAt",
- "updatedAt",
- "number",
- "credentialId"
- ]
- },
- "SipAuthentication": {
- "type": "object",
- "properties": {
- "realm": {
- "type": "string",
- "description": "This will be expected in the `realm` field of the `authorization` header of the SIP INVITE. Defaults to sip.vapi.ai."
+ "backoffPlan": {
+ "description": "This is the backoff plan if the request fails. Defaults to undefined (the request will not be retried).\n\n@default undefined (the request will not be retried)",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/BackoffPlan"
+ }
+ ]
},
- "username": {
- "type": "string",
- "description": "This will be expected in the `username` field of the `authorization` header of the SIP INVITE.",
- "minLength": 20,
- "maxLength": 40
+ "variableExtractionPlan": {
+ "description": "This is the plan to extract variables from the tool's response. These will be accessible during the call and stored in `call.artifact.variableValues` after the call.\n\nUsage:\n1. Use `aliases` to extract variables from the tool's response body. (Most common case)\n\n```json\n{\n \"aliases\": [\n {\n \"key\": \"customerName\",\n \"value\": \"{{customer.name}}\"\n },\n {\n \"key\": \"customerAge\",\n \"value\": \"{{customer.age}}\"\n }\n ]\n}\n```\n\nThe tool response body is made available to the liquid template.\n\n2. Use `aliases` to extract variables from the tool's response body if the response is an array.\n\n```json\n{\n \"aliases\": [\n {\n \"key\": \"customerName\",\n \"value\": \"{{$[0].name}}\"\n },\n {\n \"key\": \"customerAge\",\n \"value\": \"{{$[0].age}}\"\n }\n ]\n}\n```\n\n$ is a shorthand for the tool's response body. `$[0]` is the first item in the array. `$[n]` is the nth item in the array. Note, $ is available regardless of the response body type (both object and array).\n\n3. Use `aliases` to extract variables from the tool's response headers.\n\n```json\n{\n \"aliases\": [\n {\n \"key\": \"customerName\",\n \"value\": \"{{tool.response.headers.customer-name}}\"\n },\n {\n \"key\": \"customerAge\",\n \"value\": \"{{tool.response.headers.customer-age}}\"\n }\n ]\n}\n```\n\n`tool.response` is made available to the liquid template. Particularly, both `tool.response.headers` and `tool.response.body` are available. Note, `tool.response` is available regardless of the response body type (both object and array).\n\n4. Use `schema` to extract a large portion of the tool's response body.\n\n4.1. If you hit example.com and it returns `{\"name\": \"John\", \"age\": 30}`, then you can specify the schema as:\n\n```json\n{\n \"schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"age\": {\n \"type\": \"number\"\n }\n }\n }\n}\n```\nThese will be extracted as `{{ name }}` and `{{ age }}` respectively. To emphasize, object properties are extracted as direct global variables.\n\n4.2. If you hit example.com and it returns `{\"name\": {\"first\": \"John\", \"last\": \"Doe\"}}`, then you can specify the schema as:\n\n```json\n{\n \"schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"object\",\n \"properties\": {\n \"first\": {\n \"type\": \"string\"\n },\n \"last\": {\n \"type\": \"string\"\n }\n }\n }\n }\n }\n}\n```\n\nThese will be extracted as `{{ name }}`. And, `{{ name.first }}` and `{{ name.last }}` will be accessible.\n\n4.3. If you hit example.com and it returns `[\"94123\", \"94124\"]`, then you can specify the schema as:\n\n```json\n{\n \"schema\": {\n \"type\": \"array\",\n \"title\": \"zipCodes\",\n \"items\": {\n \"type\": \"string\"\n }\n }\n}\n```\n\nThis will be extracted as `{{ zipCodes }}`. To access the array items, you can use `{{ zipCodes[0] }}` and `{{ zipCodes[1] }}`.\n\n4.4. If you hit example.com and it returns `[{\"name\": \"John\", \"age\": 30, \"zipCodes\": [\"94123\", \"94124\"]}, {\"name\": \"Jane\", \"age\": 25, \"zipCodes\": [\"94125\", \"94126\"]}]`, then you can specify the schema as:\n\n```json\n{\n \"schema\": {\n \"type\": \"array\",\n \"title\": \"people\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"age\": {\n \"type\": \"number\"\n },\n \"zipCodes\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n }\n }\n }\n }\n }\n}\n```\n\nThis will be extracted as `{{ people }}`. To access the array items, you can use `{{ people[n].name }}`, `{{ people[n].age }}`, `{{ people[n].zipCodes }}`, `{{ people[n].zipCodes[0] }}` and `{{ people[n].zipCodes[1] }}`.\n\nNote: Both `aliases` and `schema` can be used together.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/VariableExtractionPlan"
+ }
+ ]
},
- "password": {
- "type": "string",
- "description": "This will be expected to generate the `response` field of the `authorization` header of the SIP INVITE, through digest authentication.",
- "minLength": 20,
- "maxLength": 40
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
+ ]
}
},
"required": [
- "username",
- "password"
+ "type",
+ "method",
+ "url"
]
},
- "VapiPhoneNumber": {
+ "CreateCodeToolDTO": {
"type": "object",
"properties": {
- "fallbackDestination": {
- "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/TransferDestinationNumber",
- "title": "NumberTransferDestination"
- },
- {
- "$ref": "#/components/schemas/TransferDestinationSip",
- "title": "SipTransferDestination"
- }
- ]
- },
- "hooks": {
+ "messages": {
"type": "array",
- "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
- "title": "PhoneNumberHookCallRinging"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
- "title": "PhoneNumberHookCallEnding"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
]
}
},
- "provider": {
+ "type": {
"type": "string",
- "description": "This is to create free SIP phone numbers on Vapi.",
"enum": [
- "vapi"
- ]
- },
- "id": {
- "type": "string",
- "description": "This is the unique identifier for the phone number."
- },
- "orgId": {
- "type": "string",
- "description": "This is the unique identifier for the org that this phone number belongs to."
- },
- "createdAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the phone number was created."
+ "code"
+ ],
+ "description": "The type of tool. \"code\" for Code tool."
},
- "updatedAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the phone number was last updated."
+ "async": {
+ "type": "boolean",
+ "example": false,
+ "description": "This determines if the tool is async.\n\n If async, the assistant will move forward without waiting for your server to respond. This is useful if you just want to trigger something on your server.\n\n If sync, the assistant will wait for your server to respond. This is useful if want assistant to respond with the result from your server.\n\n Defaults to synchronous (`false`)."
},
- "status": {
- "type": "string",
- "description": "This is the status of the phone number.",
- "enum": [
- "active",
- "activating",
- "blocked"
+ "server": {
+ "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
]
},
- "number": {
- "type": "string",
- "description": "These are the digits of the phone number you purchased from Vapi."
- },
- "name": {
+ "code": {
"type": "string",
- "description": "This is the name of the phone number. This is just for your own reference.",
- "maxLength": 40
+ "description": "TypeScript code to execute when the tool is called",
+ "maxLength": 50000
},
- "assistantId": {
- "type": "string",
- "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "environmentVariables": {
+ "description": "Environment variables available in code via `env` object",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/CodeToolEnvironmentVariable"
+ }
},
- "workflowId": {
- "type": "string",
- "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "timeoutSeconds": {
+ "type": "number",
+ "description": "This is the timeout in seconds for the code execution. Defaults to 10 seconds.\nMaximum is 30 seconds to prevent abuse.\n\n@default 10",
+ "minimum": 1,
+ "maximum": 30,
+ "example": 10
},
- "squadId": {
+ "credentialId": {
"type": "string",
- "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "description": "Credential ID containing the Val Town API key",
+ "example": "550e8400-e29b-41d4-a716-446655440000"
},
- "server": {
- "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "variableExtractionPlan": {
+ "description": "Plan to extract variables from the tool response",
"allOf": [
{
- "$ref": "#/components/schemas/Server"
+ "$ref": "#/components/schemas/VariableExtractionPlan"
}
]
},
- "numberDesiredAreaCode": {
- "type": "string",
- "description": "This is the area code of the phone number to purchase.",
- "minLength": 3,
- "maxLength": 3
- },
- "sipUri": {
- "type": "string",
- "description": "This is the SIP URI of the phone number. You can SIP INVITE this. The assistant attached to this number will answer.\n\nThis is case-insensitive."
+ "function": {
+ "description": "This is the function definition of the tool.\n\nFor the Code tool, this defines the name, description, and parameters that the model\nwill use to understand when and how to call this tool.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/OpenAIFunction"
+ }
+ ]
},
- "authentication": {
- "description": "This enables authentication for incoming SIP INVITE requests to the `sipUri`.\n\nIf not set, any username/password to the 401 challenge of the SIP INVITE will be accepted.",
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/SipAuthentication"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
}
},
"required": [
- "provider",
- "id",
- "orgId",
- "createdAt",
- "updatedAt"
+ "type",
+ "code"
]
},
- "TelnyxPhoneNumber": {
+ "CreateOutputToolDTO": {
"type": "object",
"properties": {
- "fallbackDestination": {
- "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/TransferDestinationNumber",
- "title": "NumberTransferDestination"
- },
- {
- "$ref": "#/components/schemas/TransferDestinationSip",
- "title": "SipTransferDestination"
- }
- ]
- },
- "hooks": {
+ "messages": {
"type": "array",
- "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
- "title": "PhoneNumberHookCallRinging"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
- "title": "PhoneNumberHookCallEnding"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
]
}
},
- "provider": {
- "type": "string",
- "description": "This is to use numbers bought on Telnyx.",
- "enum": [
- "telnyx"
- ]
- },
- "id": {
- "type": "string",
- "description": "This is the unique identifier for the phone number."
- },
- "orgId": {
- "type": "string",
- "description": "This is the unique identifier for the org that this phone number belongs to."
- },
- "createdAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the phone number was created."
- },
- "updatedAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the phone number was last updated."
- },
- "status": {
+ "type": {
"type": "string",
- "description": "This is the status of the phone number.",
"enum": [
- "active",
- "activating",
- "blocked"
- ]
- },
- "name": {
- "type": "string",
- "description": "This is the name of the phone number. This is just for your own reference.",
- "maxLength": 40
- },
- "assistantId": {
- "type": "string",
- "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
- },
- "workflowId": {
- "type": "string",
- "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
- },
- "squadId": {
- "type": "string",
- "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "output"
+ ],
+ "description": "The type of tool. \"output\" for Output tool."
},
- "server": {
- "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/Server"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
- },
- "number": {
- "type": "string",
- "description": "These are the digits of the phone number you own on your Telnyx."
- },
- "credentialId": {
- "type": "string",
- "description": "This is the credential you added in dashboard.vapi.ai/keys. This is used to configure the number to send inbound calls to Vapi, make outbound calls and do live call updates like transfers and hangups."
}
},
"required": [
- "provider",
- "id",
- "orgId",
- "createdAt",
- "updatedAt",
- "number",
- "credentialId"
+ "type"
]
},
- "CreateByoPhoneNumberDTO": {
+ "CreateBashToolDTO": {
"type": "object",
"properties": {
- "fallbackDestination": {
- "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/TransferDestinationNumber",
- "title": "NumberTransferDestination"
- },
- {
- "$ref": "#/components/schemas/TransferDestinationSip",
- "title": "SipTransferDestination"
- }
- ]
- },
- "hooks": {
+ "messages": {
"type": "array",
- "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
- "title": "PhoneNumberHookCallRinging"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
- "title": "PhoneNumberHookCallEnding"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
]
}
},
- "provider": {
+ "type": {
"type": "string",
- "description": "This is to bring your own phone numbers from your own SIP trunks or Carriers.",
"enum": [
- "byo-phone-number"
- ]
- },
- "numberE164CheckEnabled": {
- "type": "boolean",
- "description": "This is the flag to toggle the E164 check for the `number` field. This is an advanced property which should be used if you know your use case requires it.\n\nUse cases:\n- `false`: To allow non-E164 numbers like `+001234567890`, `1234`, or `abc`. This is useful for dialing out to non-E164 numbers on your SIP trunks.\n- `true` (default): To allow only E164 numbers like `+14155551234`. This is standard for PSTN calls.\n\nIf `false`, the `number` is still required to only contain alphanumeric characters (regex: `/^\\+?[a-zA-Z0-9]+$/`).\n\n@default true (E164 check is enabled)",
- "default": true
+ "bash"
+ ],
+ "description": "The type of tool. \"bash\" for Bash tool."
},
- "number": {
+ "subType": {
"type": "string",
- "description": "This is the number of the customer.",
- "minLength": 3,
- "maxLength": 40
+ "enum": [
+ "bash_20241022"
+ ],
+ "description": "The sub type of tool."
},
- "credentialId": {
- "type": "string",
- "description": "This is the credential of your own SIP trunk or Carrier (type `byo-sip-trunk`) which can be used to make calls to this phone number.\n\nYou can add the SIP trunk or Carrier credential in the Provider Credentials page on the Dashboard to get the credentialId."
+ "server": {
+ "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
},
"name": {
"type": "string",
- "description": "This is the name of the phone number. This is just for your own reference.",
- "maxLength": 40
- },
- "assistantId": {
- "type": "string",
- "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
- },
- "workflowId": {
- "type": "string",
- "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
- },
- "squadId": {
- "type": "string",
- "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "description": "The name of the tool, fixed to 'bash'",
+ "default": "bash",
+ "enum": [
+ "bash"
+ ]
},
- "server": {
- "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/Server"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
}
},
"required": [
- "provider",
- "credentialId"
+ "type",
+ "subType",
+ "name"
]
},
- "CreateTwilioPhoneNumberDTO": {
+ "CreateComputerToolDTO": {
"type": "object",
"properties": {
- "fallbackDestination": {
- "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/TransferDestinationNumber",
- "title": "NumberTransferDestination"
- },
- {
- "$ref": "#/components/schemas/TransferDestinationSip",
- "title": "SipTransferDestination"
- }
- ]
- },
- "hooks": {
+ "messages": {
"type": "array",
- "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
- "title": "PhoneNumberHookCallRinging"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
- "title": "PhoneNumberHookCallEnding"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
]
}
},
- "provider": {
+ "type": {
"type": "string",
- "description": "This is to use numbers bought on Twilio.",
"enum": [
- "twilio"
- ]
- },
- "smsEnabled": {
- "type": "boolean",
- "description": "Controls whether Vapi sets the messaging webhook URL on the Twilio number during import.\n\nIf set to `false`, Vapi will not update the Twilio messaging URL, leaving it as is.\nIf `true` or omitted (default), Vapi will configure both the voice and messaging URLs.\n\n@default true",
- "default": true
- },
- "number": {
- "type": "string",
- "description": "These are the digits of the phone number you own on your Twilio."
- },
- "twilioAccountSid": {
- "type": "string",
- "description": "This is the Twilio Account SID for the phone number."
- },
- "twilioAuthToken": {
- "type": "string",
- "description": "This is the Twilio Auth Token for the phone number."
+ "computer"
+ ],
+ "description": "The type of tool. \"computer\" for Computer tool."
},
- "twilioApiKey": {
+ "subType": {
"type": "string",
- "description": "This is the Twilio API Key for the phone number."
+ "enum": [
+ "computer_20241022"
+ ],
+ "description": "The sub type of tool."
},
- "twilioApiSecret": {
- "type": "string",
- "description": "This is the Twilio API Secret for the phone number."
+ "server": {
+ "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
},
"name": {
"type": "string",
- "description": "This is the name of the phone number. This is just for your own reference.",
- "maxLength": 40
+ "description": "The name of the tool, fixed to 'computer'",
+ "default": "computer",
+ "enum": [
+ "computer"
+ ]
},
- "assistantId": {
- "type": "string",
- "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "displayWidthPx": {
+ "type": "number",
+ "description": "The display width in pixels"
},
- "workflowId": {
- "type": "string",
- "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "displayHeightPx": {
+ "type": "number",
+ "description": "The display height in pixels"
},
- "squadId": {
- "type": "string",
- "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "displayNumber": {
+ "type": "number",
+ "description": "Optional display number"
},
- "server": {
- "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/Server"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
}
},
"required": [
- "provider",
- "number",
- "twilioAccountSid"
+ "type",
+ "subType",
+ "name",
+ "displayWidthPx",
+ "displayHeightPx"
]
},
- "CreateVonagePhoneNumberDTO": {
+ "CreateTextEditorToolDTO": {
"type": "object",
"properties": {
- "fallbackDestination": {
- "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/TransferDestinationNumber",
- "title": "NumberTransferDestination"
- },
- {
- "$ref": "#/components/schemas/TransferDestinationSip",
- "title": "SipTransferDestination"
- }
- ]
- },
- "hooks": {
+ "messages": {
"type": "array",
- "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
- "title": "PhoneNumberHookCallRinging"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
- "title": "PhoneNumberHookCallEnding"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
]
}
},
- "provider": {
+ "type": {
"type": "string",
- "description": "This is to use numbers bought on Vonage.",
"enum": [
- "vonage"
- ]
+ "textEditor"
+ ],
+ "description": "The type of tool. \"textEditor\" for Text Editor tool."
},
- "number": {
+ "subType": {
"type": "string",
- "description": "These are the digits of the phone number you own on your Vonage."
+ "enum": [
+ "text_editor_20241022"
+ ],
+ "description": "The sub type of tool."
},
- "credentialId": {
- "type": "string",
- "description": "This is the credential you added in dashboard.vapi.ai/keys. This is used to configure the number to send inbound calls to Vapi, make outbound calls and do live call updates like transfers and hangups."
+ "server": {
+ "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
},
"name": {
"type": "string",
- "description": "This is the name of the phone number. This is just for your own reference.",
- "maxLength": 40
- },
- "assistantId": {
- "type": "string",
- "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "description": "The name of the tool, fixed to 'str_replace_editor'",
+ "default": "str_replace_editor",
+ "enum": [
+ "str_replace_editor"
+ ]
},
- "workflowId": {
- "type": "string",
- "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "type",
+ "subType",
+ "name"
+ ]
+ },
+ "CreateSmsToolDTO": {
+ "type": "object",
+ "properties": {
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
},
- "squadId": {
+ "type": {
"type": "string",
- "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "enum": [
+ "sms"
+ ],
+ "description": "The type of tool. \"sms\" for Twilio SMS sending tool."
},
- "server": {
- "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/Server"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
}
},
"required": [
- "provider",
- "number",
- "credentialId"
+ "type"
]
},
- "CreateVapiPhoneNumberDTO": {
+ "CreateSipRequestToolDTO": {
"type": "object",
"properties": {
- "fallbackDestination": {
- "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/TransferDestinationNumber",
- "title": "NumberTransferDestination"
- },
- {
- "$ref": "#/components/schemas/TransferDestinationSip",
- "title": "SipTransferDestination"
- }
- ]
- },
- "hooks": {
+ "messages": {
"type": "array",
- "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
- "title": "PhoneNumberHookCallRinging"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
- "title": "PhoneNumberHookCallEnding"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
]
}
},
- "provider": {
+ "type": {
"type": "string",
- "description": "This is to create free SIP phone numbers on Vapi.",
"enum": [
- "vapi"
- ]
- },
- "numberDesiredAreaCode": {
- "type": "string",
- "description": "This is the area code of the phone number to purchase.",
- "minLength": 3,
- "maxLength": 3
+ "sipRequest"
+ ],
+ "description": "The type of tool. \"sipRequest\" for SIP request tool."
},
- "sipUri": {
+ "verb": {
"type": "string",
- "description": "This is the SIP URI of the phone number. You can SIP INVITE this. The assistant attached to this number will answer.\n\nThis is case-insensitive."
+ "enum": [
+ "INFO",
+ "MESSAGE",
+ "NOTIFY"
+ ],
+ "description": "The SIP method to send."
},
- "authentication": {
- "description": "This enables authentication for incoming SIP INVITE requests to the `sipUri`.\n\nIf not set, any username/password to the 401 challenge of the SIP INVITE will be accepted.",
+ "headers": {
+ "description": "JSON schema for headers the model should populate when sending the SIP request.",
"allOf": [
{
- "$ref": "#/components/schemas/SipAuthentication"
+ "$ref": "#/components/schemas/JsonSchema"
}
]
},
- "name": {
- "type": "string",
- "description": "This is the name of the phone number. This is just for your own reference.",
- "maxLength": 40
- },
- "assistantId": {
- "type": "string",
- "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
- },
- "workflowId": {
- "type": "string",
- "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
- },
- "squadId": {
- "type": "string",
- "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "body": {
+ "description": "Body to include in the SIP request. Either a literal string body, or a JSON schema describing a structured body that the model should populate.",
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "$ref": "#/components/schemas/JsonSchema"
+ }
+ ]
},
- "server": {
- "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/Server"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
}
},
"required": [
- "provider"
+ "type",
+ "verb"
]
},
- "CreateTelnyxPhoneNumberDTO": {
+ "UpdateApiRequestToolDTO": {
"type": "object",
"properties": {
- "fallbackDestination": {
- "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/TransferDestinationNumber",
- "title": "NumberTransferDestination"
- },
- {
- "$ref": "#/components/schemas/TransferDestinationSip",
- "title": "SipTransferDestination"
- }
- ]
- },
- "hooks": {
+ "messages": {
"type": "array",
- "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
- "title": "PhoneNumberHookCallRinging"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
- "title": "PhoneNumberHookCallEnding"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
]
}
},
- "provider": {
+ "method": {
"type": "string",
- "description": "This is to use numbers bought on Telnyx.",
"enum": [
- "telnyx"
+ "POST",
+ "GET",
+ "PUT",
+ "PATCH",
+ "DELETE"
]
},
- "number": {
- "type": "string",
- "description": "These are the digits of the phone number you own on your Telnyx."
+ "timeoutSeconds": {
+ "type": "number",
+ "description": "This is the timeout in seconds for the request. Defaults to 20 seconds.\n\n@default 20",
+ "minimum": 1,
+ "maximum": 300,
+ "example": 20
},
"credentialId": {
"type": "string",
- "description": "This is the credential you added in dashboard.vapi.ai/keys. This is used to configure the number to send inbound calls to Vapi, make outbound calls and do live call updates like transfers and hangups."
+ "description": "The credential ID for API request authentication",
+ "example": "550e8400-e29b-41d4-a716-446655440000"
},
- "name": {
- "type": "string",
- "description": "This is the name of the phone number. This is just for your own reference.",
- "maxLength": 40
+ "encryptedPaths": {
+ "type": "array",
+ "description": "This is the paths to encrypt in the request body if credentialId and encryptionPlan are defined.",
+ "items": {
+ "type": "string"
+ }
},
- "assistantId": {
+ "parameters": {
+ "description": "Static key-value pairs merged into the request body. Values support Liquid templates.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/ToolParameter"
+ }
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
+ ]
+ },
+ "name": {
"type": "string",
- "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "description": "This is the name of the tool. This will be passed to the model.\n\nMust be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 40.",
+ "maxLength": 40,
+ "pattern": "/^[a-zA-Z0-9_-]{1,40}$/"
},
- "workflowId": {
+ "description": {
"type": "string",
- "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "description": "This is the description of the tool. This will be passed to the model."
},
- "squadId": {
+ "url": {
"type": "string",
- "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "description": "This is where the request will be sent."
},
- "server": {
- "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "body": {
+ "description": "This is the body of the request.",
"allOf": [
{
- "$ref": "#/components/schemas/Server"
+ "$ref": "#/components/schemas/JsonSchema"
}
]
- }
- },
- "required": [
- "provider",
- "number",
- "credentialId"
- ]
- },
- "UpdateByoPhoneNumberDTO": {
- "type": "object",
- "properties": {
- "fallbackDestination": {
- "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
- "oneOf": [
+ },
+ "headers": {
+ "description": "These are the headers to send with the request.",
+ "allOf": [
{
- "$ref": "#/components/schemas/TransferDestinationNumber",
- "title": "NumberTransferDestination"
- },
+ "$ref": "#/components/schemas/JsonSchema"
+ }
+ ]
+ },
+ "backoffPlan": {
+ "description": "This is the backoff plan if the request fails. Defaults to undefined (the request will not be retried).\n\n@default undefined (the request will not be retried)",
+ "allOf": [
{
- "$ref": "#/components/schemas/TransferDestinationSip",
- "title": "SipTransferDestination"
+ "$ref": "#/components/schemas/BackoffPlan"
}
]
},
- "hooks": {
+ "variableExtractionPlan": {
+ "description": "This is the plan to extract variables from the tool's response. These will be accessible during the call and stored in `call.artifact.variableValues` after the call.\n\nUsage:\n1. Use `aliases` to extract variables from the tool's response body. (Most common case)\n\n```json\n{\n \"aliases\": [\n {\n \"key\": \"customerName\",\n \"value\": \"{{customer.name}}\"\n },\n {\n \"key\": \"customerAge\",\n \"value\": \"{{customer.age}}\"\n }\n ]\n}\n```\n\nThe tool response body is made available to the liquid template.\n\n2. Use `aliases` to extract variables from the tool's response body if the response is an array.\n\n```json\n{\n \"aliases\": [\n {\n \"key\": \"customerName\",\n \"value\": \"{{$[0].name}}\"\n },\n {\n \"key\": \"customerAge\",\n \"value\": \"{{$[0].age}}\"\n }\n ]\n}\n```\n\n$ is a shorthand for the tool's response body. `$[0]` is the first item in the array. `$[n]` is the nth item in the array. Note, $ is available regardless of the response body type (both object and array).\n\n3. Use `aliases` to extract variables from the tool's response headers.\n\n```json\n{\n \"aliases\": [\n {\n \"key\": \"customerName\",\n \"value\": \"{{tool.response.headers.customer-name}}\"\n },\n {\n \"key\": \"customerAge\",\n \"value\": \"{{tool.response.headers.customer-age}}\"\n }\n ]\n}\n```\n\n`tool.response` is made available to the liquid template. Particularly, both `tool.response.headers` and `tool.response.body` are available. Note, `tool.response` is available regardless of the response body type (both object and array).\n\n4. Use `schema` to extract a large portion of the tool's response body.\n\n4.1. If you hit example.com and it returns `{\"name\": \"John\", \"age\": 30}`, then you can specify the schema as:\n\n```json\n{\n \"schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"age\": {\n \"type\": \"number\"\n }\n }\n }\n}\n```\nThese will be extracted as `{{ name }}` and `{{ age }}` respectively. To emphasize, object properties are extracted as direct global variables.\n\n4.2. If you hit example.com and it returns `{\"name\": {\"first\": \"John\", \"last\": \"Doe\"}}`, then you can specify the schema as:\n\n```json\n{\n \"schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"object\",\n \"properties\": {\n \"first\": {\n \"type\": \"string\"\n },\n \"last\": {\n \"type\": \"string\"\n }\n }\n }\n }\n }\n}\n```\n\nThese will be extracted as `{{ name }}`. And, `{{ name.first }}` and `{{ name.last }}` will be accessible.\n\n4.3. If you hit example.com and it returns `[\"94123\", \"94124\"]`, then you can specify the schema as:\n\n```json\n{\n \"schema\": {\n \"type\": \"array\",\n \"title\": \"zipCodes\",\n \"items\": {\n \"type\": \"string\"\n }\n }\n}\n```\n\nThis will be extracted as `{{ zipCodes }}`. To access the array items, you can use `{{ zipCodes[0] }}` and `{{ zipCodes[1] }}`.\n\n4.4. If you hit example.com and it returns `[{\"name\": \"John\", \"age\": 30, \"zipCodes\": [\"94123\", \"94124\"]}, {\"name\": \"Jane\", \"age\": 25, \"zipCodes\": [\"94125\", \"94126\"]}]`, then you can specify the schema as:\n\n```json\n{\n \"schema\": {\n \"type\": \"array\",\n \"title\": \"people\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"age\": {\n \"type\": \"number\"\n },\n \"zipCodes\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n }\n }\n }\n }\n }\n}\n```\n\nThis will be extracted as `{{ people }}`. To access the array items, you can use `{{ people[n].name }}`, `{{ people[n].age }}`, `{{ people[n].zipCodes }}`, `{{ people[n].zipCodes[0] }}` and `{{ people[n].zipCodes[1] }}`.\n\nNote: Both `aliases` and `schema` can be used together.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/VariableExtractionPlan"
+ }
+ ]
+ }
+ }
+ },
+ "UpdateCodeToolDTO": {
+ "type": "object",
+ "properties": {
+ "messages": {
"type": "array",
- "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
- "title": "PhoneNumberHookCallRinging"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
- "title": "PhoneNumberHookCallEnding"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
]
}
},
- "numberE164CheckEnabled": {
+ "async": {
"type": "boolean",
- "description": "This is the flag to toggle the E164 check for the `number` field. This is an advanced property which should be used if you know your use case requires it.\n\nUse cases:\n- `false`: To allow non-E164 numbers like `+001234567890`, `1234`, or `abc`. This is useful for dialing out to non-E164 numbers on your SIP trunks.\n- `true` (default): To allow only E164 numbers like `+14155551234`. This is standard for PSTN calls.\n\nIf `false`, the `number` is still required to only contain alphanumeric characters (regex: `/^\\+?[a-zA-Z0-9]+$/`).\n\n@default true (E164 check is enabled)",
- "default": true
+ "example": false,
+ "description": "This determines if the tool is async.\n\n If async, the assistant will move forward without waiting for your server to respond. This is useful if you just want to trigger something on your server.\n\n If sync, the assistant will wait for your server to respond. This is useful if want assistant to respond with the result from your server.\n\n Defaults to synchronous (`false`)."
},
- "name": {
- "type": "string",
- "description": "This is the name of the phone number. This is just for your own reference.",
- "maxLength": 40
+ "server": {
+ "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
},
- "assistantId": {
+ "code": {
"type": "string",
- "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "description": "TypeScript code to execute when the tool is called",
+ "maxLength": 50000
},
- "workflowId": {
- "type": "string",
- "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "environmentVariables": {
+ "description": "Environment variables available in code via `env` object",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/CodeToolEnvironmentVariable"
+ }
},
- "squadId": {
+ "timeoutSeconds": {
+ "type": "number",
+ "description": "This is the timeout in seconds for the code execution. Defaults to 10 seconds.\nMaximum is 30 seconds to prevent abuse.\n\n@default 10",
+ "minimum": 1,
+ "maximum": 30,
+ "example": 10
+ },
+ "credentialId": {
"type": "string",
- "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "description": "Credential ID containing the Val Town API key",
+ "example": "550e8400-e29b-41d4-a716-446655440000"
},
- "server": {
- "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "variableExtractionPlan": {
+ "description": "Plan to extract variables from the tool response",
"allOf": [
{
- "$ref": "#/components/schemas/Server"
+ "$ref": "#/components/schemas/VariableExtractionPlan"
}
]
},
- "number": {
- "type": "string",
- "description": "This is the number of the customer.",
- "minLength": 3,
- "maxLength": 40
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
+ ]
},
- "credentialId": {
- "type": "string",
- "description": "This is the credential of your own SIP trunk or Carrier (type `byo-sip-trunk`) which can be used to make calls to this phone number.\n\nYou can add the SIP trunk or Carrier credential in the Provider Credentials page on the Dashboard to get the credentialId."
+ "function": {
+ "description": "This is the function definition of the tool.\n\nFor the Code tool, this defines the name, description, and parameters that the model\nwill use to understand when and how to call this tool.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/OpenAIFunction"
+ }
+ ]
}
}
},
- "UpdateTwilioPhoneNumberDTO": {
+ "UpdateDtmfToolDTO": {
"type": "object",
"properties": {
- "fallbackDestination": {
- "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/TransferDestinationNumber",
- "title": "NumberTransferDestination"
- },
- {
- "$ref": "#/components/schemas/TransferDestinationSip",
- "title": "SipTransferDestination"
- }
- ]
- },
- "hooks": {
+ "messages": {
"type": "array",
- "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
- "title": "PhoneNumberHookCallRinging"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
- "title": "PhoneNumberHookCallEnding"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
]
}
},
- "smsEnabled": {
+ "sipInfoDtmfEnabled": {
"type": "boolean",
- "description": "Controls whether Vapi sets the messaging webhook URL on the Twilio number during import.\n\nIf set to `false`, Vapi will not update the Twilio messaging URL, leaving it as is.\nIf `true` or omitted (default), Vapi will configure both the voice and messaging URLs.\n\n@default true",
- "default": true
- },
- "name": {
- "type": "string",
- "description": "This is the name of the phone number. This is just for your own reference.",
- "maxLength": 40
- },
- "assistantId": {
- "type": "string",
- "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
- },
- "workflowId": {
- "type": "string",
- "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
- },
- "squadId": {
- "type": "string",
- "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "description": "This enables sending DTMF tones via SIP INFO messages instead of RFC 2833 (RTP events). When enabled, DTMF digits will be sent using the SIP INFO method, which can be more reliable in some network configurations. Only relevant when using the `vapi.sip` transport.",
+ "default": false
},
- "server": {
- "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/Server"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
- },
- "number": {
- "type": "string",
- "description": "These are the digits of the phone number you own on your Twilio."
- },
- "twilioAccountSid": {
- "type": "string",
- "description": "This is the Twilio Account SID for the phone number."
- },
- "twilioAuthToken": {
- "type": "string",
- "description": "This is the Twilio Auth Token for the phone number."
- },
- "twilioApiKey": {
- "type": "string",
- "description": "This is the Twilio API Key for the phone number."
- },
- "twilioApiSecret": {
- "type": "string",
- "description": "This is the Twilio API Secret for the phone number."
}
}
},
- "UpdateVonagePhoneNumberDTO": {
+ "UpdateEndCallToolDTO": {
"type": "object",
"properties": {
- "fallbackDestination": {
- "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/TransferDestinationNumber",
- "title": "NumberTransferDestination"
- },
- {
- "$ref": "#/components/schemas/TransferDestinationSip",
- "title": "SipTransferDestination"
- }
- ]
- },
- "hooks": {
+ "messages": {
"type": "array",
- "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
- "title": "PhoneNumberHookCallRinging"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
- "title": "PhoneNumberHookCallEnding"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
]
}
},
- "name": {
- "type": "string",
- "description": "This is the name of the phone number. This is just for your own reference.",
- "maxLength": 40
- },
- "assistantId": {
- "type": "string",
- "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
- },
- "workflowId": {
- "type": "string",
- "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
- },
- "squadId": {
- "type": "string",
- "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
- },
- "server": {
- "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/Server"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
- },
- "number": {
- "type": "string",
- "description": "These are the digits of the phone number you own on your Vonage."
- },
- "credentialId": {
- "type": "string",
- "description": "This is the credential you added in dashboard.vapi.ai/keys. This is used to configure the number to send inbound calls to Vapi, make outbound calls and do live call updates like transfers and hangups."
}
}
},
- "UpdateVapiPhoneNumberDTO": {
- "type": "object",
- "properties": {
- "fallbackDestination": {
- "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/TransferDestinationNumber",
- "title": "NumberTransferDestination"
- },
- {
- "$ref": "#/components/schemas/TransferDestinationSip",
- "title": "SipTransferDestination"
- }
- ]
- },
- "hooks": {
+ "UpdateFunctionToolDTO": {
+ "type": "object",
+ "properties": {
+ "messages": {
"type": "array",
- "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
- "title": "PhoneNumberHookCallRinging"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
- "title": "PhoneNumberHookCallEnding"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
]
}
},
- "name": {
- "type": "string",
- "description": "This is the name of the phone number. This is just for your own reference.",
- "maxLength": 40
- },
- "assistantId": {
- "type": "string",
- "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
- },
- "workflowId": {
- "type": "string",
- "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
- },
- "squadId": {
- "type": "string",
- "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "async": {
+ "type": "boolean",
+ "example": false,
+ "description": "This determines if the tool is async.\n\n If async, the assistant will move forward without waiting for your server to respond. This is useful if you just want to trigger something on your server.\n\n If sync, the assistant will wait for your server to respond. This is useful if want assistant to respond with the result from your server.\n\n Defaults to synchronous (`false`)."
},
"server": {
- "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
"allOf": [
{
"$ref": "#/components/schemas/Server"
}
]
},
- "sipUri": {
- "type": "string",
- "description": "This is the SIP URI of the phone number. You can SIP INVITE this. The assistant attached to this number will answer.\n\nThis is case-insensitive."
+ "variableExtractionPlan": {
+ "description": "Plan to extract variables from the tool response",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/VariableExtractionPlan"
+ }
+ ]
},
- "authentication": {
- "description": "This enables authentication for incoming SIP INVITE requests to the `sipUri`.\n\nIf not set, any username/password to the 401 challenge of the SIP INVITE will be accepted.",
+ "parameters": {
+ "description": "Static key-value pairs merged into the request body. Values support Liquid templates.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/ToolParameter"
+ }
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/SipAuthentication"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
+ ]
+ },
+ "function": {
+ "description": "This is the function definition of the tool.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/OpenAIFunction"
}
]
}
}
},
- "UpdateTelnyxPhoneNumberDTO": {
+ "UpdateGhlToolDTO": {
"type": "object",
"properties": {
- "fallbackDestination": {
- "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/TransferDestinationNumber",
- "title": "NumberTransferDestination"
- },
- {
- "$ref": "#/components/schemas/TransferDestinationSip",
- "title": "SipTransferDestination"
- }
- ]
- },
- "hooks": {
+ "messages": {
"type": "array",
- "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
- "title": "PhoneNumberHookCallRinging"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
- "title": "PhoneNumberHookCallEnding"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
]
}
},
- "name": {
- "type": "string",
- "description": "This is the name of the phone number. This is just for your own reference.",
- "maxLength": 40
- },
- "assistantId": {
- "type": "string",
- "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
- },
- "workflowId": {
- "type": "string",
- "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
- },
- "squadId": {
- "type": "string",
- "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
- },
- "server": {
- "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/Server"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
},
- "number": {
- "type": "string",
- "description": "These are the digits of the phone number you own on your Telnyx."
- },
- "credentialId": {
- "type": "string",
- "description": "This is the credential you added in dashboard.vapi.ai/keys. This is used to configure the number to send inbound calls to Vapi, make outbound calls and do live call updates like transfers and hangups."
+ "metadata": {
+ "$ref": "#/components/schemas/GhlToolMetadata"
}
}
},
- "ImportVonagePhoneNumberDTO": {
+ "UpdateMakeToolDTO": {
"type": "object",
"properties": {
- "fallbackDestination": {
- "description": "This is the fallback destination an inbound call will be transferred to if:\n1. `assistantId` is not set\n2. `squadId` is not set\n3. and, `assistant-request` message to the `serverUrl` fails\n\nIf this is not set and above conditions are met, the inbound call is hung up with an error message.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/TransferDestinationNumber",
- "title": "NumberTransferDestination"
- },
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
{
- "$ref": "#/components/schemas/TransferDestinationSip",
- "title": "SipTransferDestination"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
},
- "hooks": {
+ "metadata": {
+ "$ref": "#/components/schemas/MakeToolMetadata"
+ }
+ }
+ },
+ "UpdateHandoffToolDTO": {
+ "type": "object",
+ "properties": {
+ "messages": {
"type": "array",
- "description": "This is the hooks that will be used for incoming calls to this phone number.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/PhoneNumberHookCallRinging",
- "title": "PhoneNumberHookCallRinging"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/PhoneNumberHookCallEnding",
- "title": "PhoneNumberHookCallEnding"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
]
}
},
- "vonagePhoneNumber": {
- "type": "string",
- "description": "These are the digits of the phone number you own on your Vonage.",
- "deprecated": true
- },
- "credentialId": {
- "type": "string",
- "description": "This is the credential you added in dashboard.vapi.ai/keys. This is used to configure the number to send inbound calls to Vapi, make outbound calls and do live call updates like transfers and hangups."
- },
- "name": {
- "type": "string",
- "description": "This is the name of the phone number. This is just for your own reference.",
- "maxLength": 40
- },
- "assistantId": {
+ "defaultResult": {
"type": "string",
- "description": "This is the assistant that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId` nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "description": "This is the default local tool result message used when no runtime handoff result override is returned."
},
- "workflowId": {
- "type": "string",
- "description": "This is the workflow that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "destinations": {
+ "type": "array",
+ "description": "These are the destinations that the call can be handed off to.\n\nUsage:\n1. Single destination\n\nUse `assistantId` to handoff the call to a saved assistant, or `assistantName` to handoff the call to an assistant in the same squad.\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-123\", // or \"assistantName\": \"Assistant123\"\n \"description\": \"customer wants to be handed off to assistant-123\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n }\n ],\n }\n ]\n}\n```\n\n2. Multiple destinations\n\n2.1. Multiple Tools, Each With One Destination (OpenAI recommended)\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-123\",\n \"description\": \"customer wants to be handed off to assistant-123\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n },\n ],\n },\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-456\",\n \"description\": \"customer wants to be handed off to assistant-456\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n }\n ],\n }\n ]\n}\n```\n\n2.2. One Tool, Multiple Destinations (Anthropic recommended)\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-123\",\n \"description\": \"customer wants to be handed off to assistant-123\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n },\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-456\",\n \"description\": \"customer wants to be handed off to assistant-456\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n }\n ],\n }\n ]\n}\n```\n\n3. Dynamic destination\n\n3.1 To determine the destination dynamically, supply a `dynamic` handoff destination type and a `server` object.\n VAPI will send a handoff-destination-request webhook to the `server.url`.\n The response from the server will be used as the destination (if valid).\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"dynamic\",\n \"server\": {\n \"url\": \"https://example.com\"\n }\n }\n ],\n }\n ]\n}\n```\n\n3.2. To pass custom parameters to the server, you can use the `function` object.\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"dynamic\",\n \"server\": {\n \"url\": \"https://example.com\"\n },\n }\n ],\n \"function\": {\n \"name\": \"handoff\",\n \"description\": \"Call this function when the customer is ready to be handed off to the next assistant\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"destination\": {\n \"type\": \"string\",\n \"description\": \"Use dynamic when customer is ready to be handed off to the next assistant\",\n \"enum\": [\"dynamic\"]\n },\n \"customerAreaCode\": {\n \"type\": \"number\",\n \"description\": \"Area code of the customer\"\n },\n \"customerIntent\": {\n \"type\": \"string\",\n \"enum\": [\"new-customer\", \"existing-customer\"],\n \"description\": \"Use new-customer when customer is a new customer, existing-customer when customer is an existing customer\"\n },\n \"customerSentiment\": {\n \"type\": \"string\",\n \"enum\": [\"positive\", \"negative\", \"neutral\"],\n \"description\": \"Use positive when customer is happy, negative when customer is unhappy, neutral when customer is neutral\"\n }\n }\n }\n }\n }\n ]\n}\n```\n\nThe properties `customerAreaCode`, `customerIntent`, and `customerSentiment` will be passed to the server in the webhook request body.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/HandoffDestinationAssistant",
+ "title": "Assistant"
+ },
+ {
+ "$ref": "#/components/schemas/HandoffDestinationDynamic",
+ "title": "Dynamic"
+ },
+ {
+ "$ref": "#/components/schemas/HandoffDestinationSquad",
+ "title": "Squad"
+ }
+ ]
+ }
},
- "squadId": {
- "type": "string",
- "description": "This is the squad that will be used for incoming calls to this phone number.\n\nIf neither `assistantId`, `squadId`, nor `workflowId` is set, `assistant-request` will be sent to your Server URL. Check `ServerMessage` and `ServerMessageResponse` for the shape of the message and response that is expected."
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
+ ]
},
- "server": {
- "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. assistant.server\n2. phoneNumber.server\n3. org.server",
+ "function": {
+ "description": "This is the optional function definition that will be passed to the LLM.\nIf this is not defined, we will construct this based on the other properties.\n\nFor example, given the following tools definition:\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-123\",\n \"description\": \"customer wants to be handed off to assistant-123\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n },\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-456\",\n \"description\": \"customer wants to be handed off to assistant-456\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n }\n ],\n }\n ]\n}\n```\n\nWe will construct the following function definition:\n```json\n{\n \"function\": {\n \"name\": \"handoff_to_assistant-123\",\n \"description\": \"\n Use this function to handoff the call to the next assistant.\n Only use it when instructions explicitly ask you to use the handoff_to_assistant function.\n DO NOT call this function unless you are instructed to do so.\n Here are the destinations you can handoff the call to:\n 1. assistant-123. When: customer wants to be handed off to assistant-123\n 2. assistant-456. When: customer wants to be handed off to assistant-456\n \",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"destination\": {\n \"type\": \"string\",\n \"description\": \"Options: assistant-123 (customer wants to be handed off to assistant-123), assistant-456 (customer wants to be handed off to assistant-456)\",\n \"enum\": [\"assistant-123\", \"assistant-456\"]\n },\n },\n \"required\": [\"destination\"]\n }\n }\n}\n```\n\nTo override this function, please provide an OpenAI function definition and refer to it in the system prompt.\nYou may override parts of the function definition (i.e. you may only want to change the function name for your prompt).\nIf you choose to override the function parameters, it must include `destination` as a required parameter, and it must evaluate to either an assistantId, assistantName, or a the string literal `dynamic`.\n\nTo pass custom parameters to the server in a dynamic handoff, you can use the function parameters, with `dynamic` as the destination.\n```json\n{\n \"function\": {\n \"name\": \"dynamic_handoff\",\n \"description\": \"\n Call this function when the customer is ready to be handed off to the next assistant\n \",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"destination\": {\n \"type\": \"string\",\n \"enum\": [\"dynamic\"]\n },\n \"customerAreaCode\": {\n \"type\": \"number\",\n \"description\": \"Area code of the customer\"\n },\n \"customerIntent\": {\n \"type\": \"string\",\n \"enum\": [\"new-customer\", \"existing-customer\"],\n \"description\": \"Use new-customer when customer is a new customer, existing-customer when customer is an existing customer\"\n },\n \"customerSentiment\": {\n \"type\": \"string\",\n \"enum\": [\"positive\", \"negative\", \"neutral\"],\n \"description\": \"Use positive when customer is happy, negative when customer is unhappy, neutral when customer is neutral\"\n }\n },\n \"required\": [\"destination\", \"customerAreaCode\", \"customerIntent\", \"customerSentiment\"]\n }\n }\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/Server"
+ "$ref": "#/components/schemas/OpenAIFunction"
}
]
}
- },
- "required": [
- "vonagePhoneNumber",
- "credentialId"
- ]
+ }
},
- "PhoneNumberPaginatedResponse": {
+ "UpdateTransferCallToolDTO": {
"type": "object",
"properties": {
- "results": {
+ "messages": {
"type": "array",
- "description": "A list of phone numbers, which can be of any provider type.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/ByoPhoneNumber"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/TwilioPhoneNumber"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
},
{
- "$ref": "#/components/schemas/VonagePhoneNumber"
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
},
{
- "$ref": "#/components/schemas/VapiPhoneNumber"
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
+ },
+ "destinations": {
+ "type": "array",
+ "description": "These are the destinations that the call can be transferred to. If no destinations are provided, server.url will be used to get the transfer destination once the tool is called.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TransferDestinationAssistant",
+ "title": "Assistant"
},
{
- "$ref": "#/components/schemas/TelnyxPhoneNumber"
+ "$ref": "#/components/schemas/TransferDestinationNumber",
+ "title": "Number"
+ },
+ {
+ "$ref": "#/components/schemas/TransferDestinationSip",
+ "title": "Sip"
}
]
}
},
- "metadata": {
- "description": "Metadata about the pagination.",
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/PaginationMeta"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
}
- },
- "required": [
- "results",
- "metadata"
- ]
+ }
},
- "ApiRequestTool": {
+ "UpdateOutputToolDTO": {
"type": "object",
"properties": {
"messages": {
@@ -33056,48 +42862,6 @@
]
}
},
- "type": {
- "type": "string",
- "enum": [
- "apiRequest"
- ],
- "description": "The type of tool. \"apiRequest\" for API request tool."
- },
- "method": {
- "type": "string",
- "enum": [
- "POST",
- "GET",
- "PUT",
- "PATCH",
- "DELETE"
- ]
- },
- "timeoutSeconds": {
- "type": "number",
- "description": "This is the timeout in seconds for the request. Defaults to 20 seconds.\n\n@default 20",
- "minimum": 1,
- "maximum": 300,
- "example": 20
- },
- "id": {
- "type": "string",
- "description": "This is the unique identifier for the tool."
- },
- "orgId": {
- "type": "string",
- "description": "This is the unique identifier for the organization that this tool belongs to."
- },
- "createdAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was created."
- },
- "updatedAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was last updated."
- },
"rejectionPlan": {
"description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
@@ -33105,66 +42869,10 @@
"$ref": "#/components/schemas/ToolRejectionPlan"
}
]
- },
- "name": {
- "type": "string",
- "description": "This is the name of the tool. This will be passed to the model.\n\nMust be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 40.",
- "maxLength": 40,
- "pattern": "/^[a-zA-Z0-9_-]{1,40}$/"
- },
- "description": {
- "type": "string",
- "description": "This is the description of the tool. This will be passed to the model.",
- "maxLength": 1000
- },
- "url": {
- "type": "string",
- "description": "This is where the request will be sent."
- },
- "body": {
- "description": "This is the body of the request.",
- "allOf": [
- {
- "$ref": "#/components/schemas/JsonSchema"
- }
- ]
- },
- "headers": {
- "description": "These are the headers to send with the request.",
- "allOf": [
- {
- "$ref": "#/components/schemas/JsonSchema"
- }
- ]
- },
- "backoffPlan": {
- "description": "This is the backoff plan if the request fails. Defaults to undefined (the request will not be retried).\n\n@default undefined (the request will not be retried)",
- "allOf": [
- {
- "$ref": "#/components/schemas/BackoffPlan"
- }
- ]
- },
- "variableExtractionPlan": {
- "description": "This is the plan to extract variables from the tool's response. These will be accessible during the call and stored in `call.artifact.variableValues` after the call.\n\nUsage:\n1. Use `aliases` to extract variables from the tool's response body. (Most common case)\n\n```json\n{\n \"aliases\": [\n {\n \"key\": \"customerName\",\n \"value\": \"{{customer.name}}\"\n },\n {\n \"key\": \"customerAge\",\n \"value\": \"{{customer.age}}\"\n }\n ]\n}\n```\n\nThe tool response body is made available to the liquid template.\n\n2. Use `aliases` to extract variables from the tool's response body if the response is an array.\n\n```json\n{\n \"aliases\": [\n {\n \"key\": \"customerName\",\n \"value\": \"{{$[0].name}}\"\n },\n {\n \"key\": \"customerAge\",\n \"value\": \"{{$[0].age}}\"\n }\n ]\n}\n```\n\n$ is a shorthand for the tool's response body. `$[0]` is the first item in the array. `$[n]` is the nth item in the array. Note, $ is available regardless of the response body type (both object and array).\n\n3. Use `aliases` to extract variables from the tool's response headers.\n\n```json\n{\n \"aliases\": [\n {\n \"key\": \"customerName\",\n \"value\": \"{{tool.response.headers.customer-name}}\"\n },\n {\n \"key\": \"customerAge\",\n \"value\": \"{{tool.response.headers.customer-age}}\"\n }\n ]\n}\n```\n\n`tool.response` is made available to the liquid template. Particularly, both `tool.response.headers` and `tool.response.body` are available. Note, `tool.response` is available regardless of the response body type (both object and array).\n\n4. Use `schema` to extract a large portion of the tool's response body.\n\n4.1. If you hit example.com and it returns `{\"name\": \"John\", \"age\": 30}`, then you can specify the schema as:\n\n```json\n{\n \"schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"age\": {\n \"type\": \"number\"\n }\n }\n }\n}\n```\nThese will be extracted as `{{ name }}` and `{{ age }}` respectively. To emphasize, object properties are extracted as direct global variables.\n\n4.2. If you hit example.com and it returns `{\"name\": {\"first\": \"John\", \"last\": \"Doe\"}}`, then you can specify the schema as:\n\n```json\n{\n \"schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"object\",\n \"properties\": {\n \"first\": {\n \"type\": \"string\"\n },\n \"last\": {\n \"type\": \"string\"\n }\n }\n }\n }\n }\n}\n```\n\nThese will be extracted as `{{ name }}`. And, `{{ name.first }}` and `{{ name.last }}` will be accessible.\n\n4.3. If you hit example.com and it returns `[\"94123\", \"94124\"]`, then you can specify the schema as:\n\n```json\n{\n \"schema\": {\n \"type\": \"array\",\n \"title\": \"zipCodes\",\n \"items\": {\n \"type\": \"string\"\n }\n }\n}\n```\n\nThis will be extracted as `{{ zipCodes }}`. To access the array items, you can use `{{ zipCodes[0] }}` and `{{ zipCodes[1] }}`.\n\n4.4. If you hit example.com and it returns `[{\"name\": \"John\", \"age\": 30, \"zipCodes\": [\"94123\", \"94124\"]}, {\"name\": \"Jane\", \"age\": 25, \"zipCodes\": [\"94125\", \"94126\"]}]`, then you can specify the schema as:\n\n```json\n{\n \"schema\": {\n \"type\": \"array\",\n \"title\": \"people\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"age\": {\n \"type\": \"number\"\n },\n \"zipCodes\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n }\n }\n }\n }\n }\n}\n```\n\nThis will be extracted as `{{ people }}`. To access the array items, you can use `{{ people[n].name }}`, `{{ people[n].age }}`, `{{ people[n].zipCodes }}`, `{{ people[n].zipCodes[0] }}` and `{{ people[n].zipCodes[1] }}`.\n\nNote: Both `aliases` and `schema` can be used together.",
- "allOf": [
- {
- "$ref": "#/components/schemas/VariableExtractionPlan"
- }
- ]
}
- },
- "required": [
- "type",
- "method",
- "id",
- "orgId",
- "createdAt",
- "updatedAt",
- "url"
- ]
+ }
},
- "DtmfTool": {
+ "UpdateBashToolDTO": {
"type": "object",
"properties": {
"messages": {
@@ -33191,30 +42899,20 @@
]
}
},
- "type": {
+ "subType": {
"type": "string",
"enum": [
- "dtmf"
+ "bash_20241022"
],
- "description": "The type of tool. \"dtmf\" for DTMF tool."
- },
- "id": {
- "type": "string",
- "description": "This is the unique identifier for the tool."
- },
- "orgId": {
- "type": "string",
- "description": "This is the unique identifier for the organization that this tool belongs to."
- },
- "createdAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was created."
+ "description": "The sub type of tool."
},
- "updatedAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ "server": {
+ "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
},
"rejectionPlan": {
"description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
@@ -33223,17 +42921,18 @@
"$ref": "#/components/schemas/ToolRejectionPlan"
}
]
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the tool, fixed to 'bash'",
+ "default": "bash",
+ "enum": [
+ "bash"
+ ]
}
- },
- "required": [
- "type",
- "id",
- "orgId",
- "createdAt",
- "updatedAt"
- ]
+ }
},
- "EndCallTool": {
+ "UpdateComputerToolDTO": {
"type": "object",
"properties": {
"messages": {
@@ -33260,30 +42959,20 @@
]
}
},
- "type": {
+ "subType": {
"type": "string",
"enum": [
- "endCall"
+ "computer_20241022"
],
- "description": "The type of tool. \"endCall\" for End Call tool."
- },
- "id": {
- "type": "string",
- "description": "This is the unique identifier for the tool."
- },
- "orgId": {
- "type": "string",
- "description": "This is the unique identifier for the organization that this tool belongs to."
- },
- "createdAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was created."
+ "description": "The sub type of tool."
},
- "updatedAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ "server": {
+ "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
},
"rejectionPlan": {
"description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
@@ -33292,17 +42981,30 @@
"$ref": "#/components/schemas/ToolRejectionPlan"
}
]
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the tool, fixed to 'computer'",
+ "default": "computer",
+ "enum": [
+ "computer"
+ ]
+ },
+ "displayWidthPx": {
+ "type": "number",
+ "description": "The display width in pixels"
+ },
+ "displayHeightPx": {
+ "type": "number",
+ "description": "The display height in pixels"
+ },
+ "displayNumber": {
+ "type": "number",
+ "description": "Optional display number"
}
- },
- "required": [
- "type",
- "id",
- "orgId",
- "createdAt",
- "updatedAt"
- ]
+ }
},
- "FunctionTool": {
+ "UpdateTextEditorToolDTO": {
"type": "object",
"properties": {
"messages": {
@@ -33329,17 +43031,12 @@
]
}
},
- "type": {
+ "subType": {
"type": "string",
"enum": [
- "function"
+ "text_editor_20241022"
],
- "description": "The type of tool. \"function\" for Function tool."
- },
- "async": {
- "type": "boolean",
- "example": false,
- "description": "This determines if the tool is async.\n\n If async, the assistant will move forward without waiting for your server to respond. This is useful if you just want to trigger something on your server.\n\n If sync, the assistant will wait for your server to respond. This is useful if want assistant to respond with the result from your server.\n\n Defaults to synchronous (`false`)."
+ "description": "The sub type of tool."
},
"server": {
"description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
@@ -33349,24 +43046,6 @@
}
]
},
- "id": {
- "type": "string",
- "description": "This is the unique identifier for the tool."
- },
- "orgId": {
- "type": "string",
- "description": "This is the unique identifier for the organization that this tool belongs to."
- },
- "createdAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was created."
- },
- "updatedAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was last updated."
- },
"rejectionPlan": {
"description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
@@ -33375,24 +43054,17 @@
}
]
},
- "function": {
- "description": "This is the function definition of the tool.",
- "allOf": [
- {
- "$ref": "#/components/schemas/OpenAIFunction"
- }
+ "name": {
+ "type": "string",
+ "description": "The name of the tool, fixed to 'str_replace_editor'",
+ "default": "str_replace_editor",
+ "enum": [
+ "str_replace_editor"
]
}
- },
- "required": [
- "type",
- "id",
- "orgId",
- "createdAt",
- "updatedAt"
- ]
+ }
},
- "GhlTool": {
+ "UpdateQueryToolDTO": {
"type": "object",
"properties": {
"messages": {
@@ -33419,30 +43091,12 @@
]
}
},
- "type": {
- "type": "string",
- "enum": [
- "ghl"
- ],
- "description": "The type of tool. \"ghl\" for GHL tool."
- },
- "id": {
- "type": "string",
- "description": "This is the unique identifier for the tool."
- },
- "orgId": {
- "type": "string",
- "description": "This is the unique identifier for the organization that this tool belongs to."
- },
- "createdAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was created."
- },
- "updatedAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ "knowledgeBases": {
+ "description": "The knowledge bases to query",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/KnowledgeBase"
+ }
},
"rejectionPlan": {
"description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
@@ -33451,21 +43105,10 @@
"$ref": "#/components/schemas/ToolRejectionPlan"
}
]
- },
- "metadata": {
- "$ref": "#/components/schemas/GhlToolMetadata"
}
- },
- "required": [
- "type",
- "id",
- "orgId",
- "createdAt",
- "updatedAt",
- "metadata"
- ]
+ }
},
- "MakeTool": {
+ "UpdateGoogleCalendarCreateEventToolDTO": {
"type": "object",
"properties": {
"messages": {
@@ -33492,31 +43135,6 @@
]
}
},
- "type": {
- "type": "string",
- "enum": [
- "make"
- ],
- "description": "The type of tool. \"make\" for Make tool."
- },
- "id": {
- "type": "string",
- "description": "This is the unique identifier for the tool."
- },
- "orgId": {
- "type": "string",
- "description": "This is the unique identifier for the organization that this tool belongs to."
- },
- "createdAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was created."
- },
- "updatedAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was last updated."
- },
"rejectionPlan": {
"description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
@@ -33524,21 +43142,10 @@
"$ref": "#/components/schemas/ToolRejectionPlan"
}
]
- },
- "metadata": {
- "$ref": "#/components/schemas/MakeToolMetadata"
}
- },
- "required": [
- "type",
- "id",
- "orgId",
- "createdAt",
- "updatedAt",
- "metadata"
- ]
+ }
},
- "TransferCallTool": {
+ "UpdateGoogleSheetsRowAppendToolDTO": {
"type": "object",
"properties": {
"messages": {
@@ -33565,50 +43172,43 @@
]
}
},
- "type": {
- "type": "string",
- "enum": [
- "transferCall"
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
]
- },
- "destinations": {
+ }
+ }
+ },
+ "UpdateGoogleCalendarCheckAvailabilityToolDTO": {
+ "type": "object",
+ "properties": {
+ "messages": {
"type": "array",
- "description": "These are the destinations that the call can be transferred to. If no destinations are provided, server.url will be used to get the transfer destination once the tool is called.",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/TransferDestinationAssistant",
- "title": "Assistant"
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
},
{
- "$ref": "#/components/schemas/TransferDestinationNumber",
- "title": "Number"
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
},
{
- "$ref": "#/components/schemas/TransferDestinationSip",
- "title": "Sip"
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
}
]
}
},
- "id": {
- "type": "string",
- "description": "This is the unique identifier for the tool."
- },
- "orgId": {
- "type": "string",
- "description": "This is the unique identifier for the organization that this tool belongs to."
- },
- "createdAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was created."
- },
- "updatedAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was last updated."
- },
"rejectionPlan": {
"description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
@@ -33617,16 +43217,9 @@
}
]
}
- },
- "required": [
- "type",
- "id",
- "orgId",
- "createdAt",
- "updatedAt"
- ]
+ }
},
- "HandoffTool": {
+ "UpdateSlackSendMessageToolDTO": {
"type": "object",
"properties": {
"messages": {
@@ -33653,47 +43246,6 @@
]
}
},
- "type": {
- "type": "string",
- "description": "This is the type of the tool.\nWhen you're using handoff tool, we recommend adding this to your system prompt\n---\n# System context\n\nYou are part of a multi-agent system designed to make agent coordination and execution easy. Agents uses two primary abstraction: **Agents** and **Handoffs**. An agent encompasses instructions and tools and can hand off a conversation to another agent when appropriate. Handoffs are achieved by calling a handoff function, generally named `handoff_to_`. Handoffs between agents are handled seamlessly in the background; do not mention or draw attention to these handoffs in your conversation with the user.\n\n# Agent context\n\n{put your agent system prompt here}\n---",
- "enum": [
- "handoff"
- ]
- },
- "destinations": {
- "type": "array",
- "description": "These are the destinations that the call can be handed off to.\n\nUsage:\n1. Single destination\n\nUse `assistantId` to handoff the call to a saved assistant, or `assistantName` to handoff the call to an assistant in the same squad.\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-123\", // or \"assistantName\": \"Assistant123\"\n \"description\": \"customer wants to be handed off to assistant-123\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n }\n ],\n }\n ]\n}\n```\n\n2. Multiple destinations\n\n2.1. Multiple Tools, Each With One Destination (OpenAI recommended)\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-123\",\n \"description\": \"customer wants to be handed off to assistant-123\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n },\n ],\n },\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-456\",\n \"description\": \"customer wants to be handed off to assistant-456\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n }\n ],\n }\n ]\n}\n```\n\n2.2. One Tool, Multiple Destinations (Anthropic recommended)\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-123\",\n \"description\": \"customer wants to be handed off to assistant-123\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n },\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-456\",\n \"description\": \"customer wants to be handed off to assistant-456\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n }\n ],\n }\n ]\n}\n```\n\n3. Dynamic destination\n\n3.1 To determine the destination dynamically, supply a `dynamic` handoff destination type and a `server` object.\n VAPI will send a handoff-destination-request webhook to the `server.url`.\n The response from the server will be used as the destination (if valid).\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"dynamic\",\n \"server\": {\n \"url\": \"https://example.com\"\n }\n }\n ],\n }\n ]\n}\n```\n\n3.2. To pass custom parameters to the server, you can use the `function` object.\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"dynamic\",\n \"server\": {\n \"url\": \"https://example.com\"\n },\n }\n ],\n \"function\": {\n \"name\": \"handoff\",\n \"description\": \"Call this function when the customer is ready to be handed off to the next assistant\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"destination\": {\n \"type\": \"string\",\n \"description\": \"Use dynamic when customer is ready to be handed off to the next assistant\",\n \"enum\": [\"dynamic\"]\n },\n \"customerAreaCode\": {\n \"type\": \"number\",\n \"description\": \"Area code of the customer\"\n },\n \"customerIntent\": {\n \"type\": \"string\",\n \"enum\": [\"new-customer\", \"existing-customer\"],\n \"description\": \"Use new-customer when customer is a new customer, existing-customer when customer is an existing customer\"\n },\n \"customerSentiment\": {\n \"type\": \"string\",\n \"enum\": [\"positive\", \"negative\", \"neutral\"],\n \"description\": \"Use positive when customer is happy, negative when customer is unhappy, neutral when customer is neutral\"\n }\n }\n }\n }\n }\n ]\n}\n```\n\nThe properties `customerAreaCode`, `customerIntent`, and `customerSentiment` will be passed to the server in the webhook request body.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/HandoffDestinationAssistant",
- "title": "Assistant"
- },
- {
- "$ref": "#/components/schemas/HandoffDestinationDynamic",
- "title": "Dynamic"
- }
- ]
- }
- },
- "id": {
- "type": "string",
- "description": "This is the unique identifier for the tool."
- },
- "orgId": {
- "type": "string",
- "description": "This is the unique identifier for the organization that this tool belongs to."
- },
- "createdAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was created."
- },
- "updatedAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was last updated."
- },
"rejectionPlan": {
"description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
@@ -33702,16 +43254,9 @@
}
]
}
- },
- "required": [
- "type",
- "id",
- "orgId",
- "createdAt",
- "updatedAt"
- ]
+ }
},
- "OutputTool": {
+ "UpdateSmsToolDTO": {
"type": "object",
"properties": {
"messages": {
@@ -33738,31 +43283,6 @@
]
}
},
- "type": {
- "type": "string",
- "enum": [
- "output"
- ],
- "description": "The type of tool. \"output\" for Output tool."
- },
- "id": {
- "type": "string",
- "description": "This is the unique identifier for the tool."
- },
- "orgId": {
- "type": "string",
- "description": "This is the unique identifier for the organization that this tool belongs to."
- },
- "createdAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was created."
- },
- "updatedAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was last updated."
- },
"rejectionPlan": {
"description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
@@ -33771,16 +43291,9 @@
}
]
}
- },
- "required": [
- "type",
- "id",
- "orgId",
- "createdAt",
- "updatedAt"
- ]
+ }
},
- "BashTool": {
+ "UpdateMcpToolDTO": {
"type": "object",
"properties": {
"messages": {
@@ -33807,20 +43320,6 @@
]
}
},
- "type": {
- "type": "string",
- "enum": [
- "bash"
- ],
- "description": "The type of tool. \"bash\" for Bash tool."
- },
- "subType": {
- "type": "string",
- "enum": [
- "bash_20241022"
- ],
- "description": "The sub type of tool."
- },
"server": {
"description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
"allOf": [
@@ -33829,23 +43328,12 @@
}
]
},
- "id": {
- "type": "string",
- "description": "This is the unique identifier for the tool."
- },
- "orgId": {
- "type": "string",
- "description": "This is the unique identifier for the organization that this tool belongs to."
- },
- "createdAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was created."
- },
- "updatedAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ "toolMessages": {
+ "description": "Per-tool message overrides for individual tools loaded from the MCP server. Set messages to an empty array to suppress messages for a specific tool. Tools not listed here will use the default messages from the parent tool.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/McpToolMessages"
+ }
},
"rejectionPlan": {
"description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
@@ -33855,26 +43343,12 @@
}
]
},
- "name": {
- "type": "string",
- "description": "The name of the tool, fixed to 'bash'",
- "default": "bash",
- "enum": [
- "bash"
- ]
+ "metadata": {
+ "$ref": "#/components/schemas/McpToolMetadata"
}
- },
- "required": [
- "type",
- "subType",
- "id",
- "orgId",
- "createdAt",
- "updatedAt",
- "name"
- ]
+ }
},
- "ComputerTool": {
+ "UpdateGoHighLevelCalendarAvailabilityToolDTO": {
"type": "object",
"properties": {
"messages": {
@@ -33901,46 +43375,6 @@
]
}
},
- "type": {
- "type": "string",
- "enum": [
- "computer"
- ],
- "description": "The type of tool. \"computer\" for Computer tool."
- },
- "subType": {
- "type": "string",
- "enum": [
- "computer_20241022"
- ],
- "description": "The sub type of tool."
- },
- "server": {
- "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
- "allOf": [
- {
- "$ref": "#/components/schemas/Server"
- }
- ]
- },
- "id": {
- "type": "string",
- "description": "This is the unique identifier for the tool."
- },
- "orgId": {
- "type": "string",
- "description": "This is the unique identifier for the organization that this tool belongs to."
- },
- "createdAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was created."
- },
- "updatedAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was last updated."
- },
"rejectionPlan": {
"description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
@@ -33948,41 +43382,10 @@
"$ref": "#/components/schemas/ToolRejectionPlan"
}
]
- },
- "name": {
- "type": "string",
- "description": "The name of the tool, fixed to 'computer'",
- "default": "computer",
- "enum": [
- "computer"
- ]
- },
- "displayWidthPx": {
- "type": "number",
- "description": "The display width in pixels"
- },
- "displayHeightPx": {
- "type": "number",
- "description": "The display height in pixels"
- },
- "displayNumber": {
- "type": "number",
- "description": "Optional display number"
}
- },
- "required": [
- "type",
- "subType",
- "id",
- "orgId",
- "createdAt",
- "updatedAt",
- "name",
- "displayWidthPx",
- "displayHeightPx"
- ]
+ }
},
- "TextEditorTool": {
+ "UpdateGoHighLevelCalendarEventCreateToolDTO": {
"type": "object",
"properties": {
"messages": {
@@ -34009,74 +43412,54 @@
]
}
},
- "type": {
- "type": "string",
- "enum": [
- "textEditor"
- ],
- "description": "The type of tool. \"textEditor\" for Text Editor tool."
- },
- "subType": {
- "type": "string",
- "enum": [
- "text_editor_20241022"
- ],
- "description": "The sub type of tool."
- },
- "server": {
- "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/Server"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
}
]
- },
- "id": {
- "type": "string",
- "description": "This is the unique identifier for the tool."
- },
- "orgId": {
- "type": "string",
- "description": "This is the unique identifier for the organization that this tool belongs to."
- },
- "createdAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was created."
- },
- "updatedAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ }
+ }
+ },
+ "UpdateGoHighLevelContactCreateToolDTO": {
+ "type": "object",
+ "properties": {
+ "messages": {
+ "type": "array",
+ "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ToolMessageStart",
+ "title": "ToolMessageStart"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageComplete",
+ "title": "ToolMessageComplete"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageFailed",
+ "title": "ToolMessageFailed"
+ },
+ {
+ "$ref": "#/components/schemas/ToolMessageDelayed",
+ "title": "ToolMessageDelayed"
+ }
+ ]
+ }
},
"rejectionPlan": {
"description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
- }
- ]
- },
- "name": {
- "type": "string",
- "description": "The name of the tool, fixed to 'str_replace_editor'",
- "default": "str_replace_editor",
- "enum": [
- "str_replace_editor"
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
]
}
- },
- "required": [
- "type",
- "subType",
- "id",
- "orgId",
- "createdAt",
- "updatedAt",
- "name"
- ]
+ }
},
- "QueryTool": {
+ "UpdateGoHighLevelContactGetToolDTO": {
"type": "object",
"properties": {
"messages": {
@@ -34103,38 +43486,6 @@
]
}
},
- "type": {
- "type": "string",
- "enum": [
- "query"
- ],
- "description": "The type of tool. \"query\" for Query tool."
- },
- "knowledgeBases": {
- "description": "The knowledge bases to query",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/KnowledgeBase"
- }
- },
- "id": {
- "type": "string",
- "description": "This is the unique identifier for the tool."
- },
- "orgId": {
- "type": "string",
- "description": "This is the unique identifier for the organization that this tool belongs to."
- },
- "createdAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was created."
- },
- "updatedAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was last updated."
- },
"rejectionPlan": {
"description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
"allOf": [
@@ -34143,16 +43494,9 @@
}
]
}
- },
- "required": [
- "type",
- "id",
- "orgId",
- "createdAt",
- "updatedAt"
- ]
+ }
},
- "GoogleCalendarCreateEventTool": {
+ "UpdateSipRequestToolDTO": {
"type": "object",
"properties": {
"messages": {
@@ -34179,30 +43523,33 @@
]
}
},
- "type": {
+ "verb": {
"type": "string",
"enum": [
- "google.calendar.event.create"
+ "INFO",
+ "MESSAGE",
+ "NOTIFY"
],
- "description": "The type of tool. \"google.calendar.event.create\" for Google Calendar Create Event tool."
- },
- "id": {
- "type": "string",
- "description": "This is the unique identifier for the tool."
- },
- "orgId": {
- "type": "string",
- "description": "This is the unique identifier for the organization that this tool belongs to."
+ "description": "The SIP method to send."
},
- "createdAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was created."
+ "headers": {
+ "description": "JSON schema for headers the model should populate when sending the SIP request.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/JsonSchema"
+ }
+ ]
},
- "updatedAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ "body": {
+ "description": "Body to include in the SIP request. Either a literal string body, or a JSON schema describing a structured body that the model should populate.",
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "$ref": "#/components/schemas/JsonSchema"
+ }
+ ]
},
"rejectionPlan": {
"description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
@@ -34212,16 +43559,9 @@
}
]
}
- },
- "required": [
- "type",
- "id",
- "orgId",
- "createdAt",
- "updatedAt"
- ]
+ }
},
- "GoogleSheetsRowAppendTool": {
+ "UpdateVoicemailToolDTO": {
"type": "object",
"properties": {
"messages": {
@@ -34248,3870 +43588,5285 @@
]
}
},
- "type": {
+ "beepDetectionEnabled": {
+ "type": "boolean",
+ "description": "This is the flag that enables beep detection for voicemail detection and applies only for twilio based calls.\n\n@default false",
+ "default": false,
+ "example": false
+ },
+ "rejectionPlan": {
+ "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ToolRejectionPlan"
+ }
+ ]
+ }
+ }
+ },
+ "CreateFileDTO": {
+ "type": "object",
+ "properties": {
+ "file": {
+ "type": "string",
+ "description": "This is the File you want to upload for use with the Knowledge Base.",
+ "format": "binary"
+ }
+ },
+ "required": [
+ "file"
+ ]
+ },
+ "File": {
+ "type": "object",
+ "properties": {
+ "object": {
"type": "string",
"enum": [
- "google.sheets.row.append"
+ "file"
+ ]
+ },
+ "status": {
+ "enum": [
+ "processing",
+ "done",
+ "failed"
],
- "description": "The type of tool. \"google.sheets.row.append\" for Google Sheets Row Append tool."
+ "type": "string"
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the file. This is just for your own reference.",
+ "maxLength": 40
+ },
+ "originalName": {
+ "type": "string"
+ },
+ "bytes": {
+ "type": "number"
+ },
+ "purpose": {
+ "type": "string"
+ },
+ "mimetype": {
+ "type": "string"
+ },
+ "key": {
+ "type": "string"
+ },
+ "path": {
+ "type": "string"
+ },
+ "bucket": {
+ "type": "string"
+ },
+ "url": {
+ "type": "string"
+ },
+ "parsedTextUrl": {
+ "type": "string"
+ },
+ "parsedTextBytes": {
+ "type": "number"
+ },
+ "metadata": {
+ "type": "object"
},
"id": {
"type": "string",
- "description": "This is the unique identifier for the tool."
+ "description": "This is the unique identifier for the file."
},
"orgId": {
"type": "string",
- "description": "This is the unique identifier for the organization that this tool belongs to."
+ "description": "This is the unique identifier for the org that this file belongs to."
},
"createdAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was created."
+ "description": "This is the ISO 8601 date-time string of when the file was created."
},
"updatedAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ "description": "This is the ISO 8601 date-time string of when the file was last updated."
+ }
+ },
+ "required": [
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "UpdateFileDTO": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the file. This is just for your own reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ }
+ },
+ "TrieveKnowledgeBaseSearchPlan": {
+ "type": "object",
+ "properties": {
+ "topK": {
+ "type": "number",
+ "description": "Specifies the number of top chunks to return. This corresponds to the `page_size` parameter in Trieve."
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "removeStopWords": {
+ "type": "boolean",
+ "description": "If true, stop words (specified in server/src/stop-words.txt in the git repo) will be removed. This will preserve queries that are entirely stop words."
+ },
+ "scoreThreshold": {
+ "type": "number",
+ "description": "This is the score threshold to filter out chunks with a score below the threshold for cosine distance metric. For Manhattan Distance, Euclidean Distance, and Dot Product, it will filter out scores above the threshold distance. This threshold applies before weight and bias modifications. If not specified, this defaults to no threshold. A threshold of 0 will default to no threshold."
+ },
+ "searchType": {
+ "type": "string",
+ "description": "This is the search method used when searching for relevant chunks from the vector store.",
+ "enum": [
+ "fulltext",
+ "semantic",
+ "hybrid",
+ "bm25"
+ ]
+ }
+ },
+ "required": [
+ "searchType"
+ ]
+ },
+ "TrieveKnowledgeBase": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "description": "This knowledge base is provided by Trieve.\n\nTo learn more about Trieve, visit https://trieve.ai.",
+ "enum": [
+ "trieve"
+ ]
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the knowledge base."
+ },
+ "searchPlan": {
+ "description": "This is the searching plan used when searching for relevant chunks from the vector store.\n\nYou should configure this if you're running into these issues:\n- Too much unnecessary context is being fed as knowledge base context.\n- Not enough relevant context is being fed as knowledge base context.",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/TrieveKnowledgeBaseSearchPlan"
+ }
+ ]
+ },
+ "createPlan": {
+ "description": "This is the plan if you want us to create/import a new vector store using Trieve.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TrieveKnowledgeBaseImport",
+ "title": "Import"
}
]
+ },
+ "id": {
+ "type": "string",
+ "description": "This is the id of the knowledge base."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the org id of the knowledge base."
}
},
"required": [
- "type",
+ "provider",
"id",
- "orgId",
- "createdAt",
- "updatedAt"
+ "orgId"
]
},
- "GoogleCalendarCheckAvailabilityTool": {
+ "CustomKnowledgeBase": {
"type": "object",
"properties": {
- "messages": {
+ "provider": {
+ "type": "string",
+ "description": "This knowledge base is bring your own knowledge base implementation.",
+ "enum": [
+ "custom-knowledge-base"
+ ]
+ },
+ "server": {
+ "description": "This is where the knowledge base request will be sent.\n\nRequest Example:\n\nPOST https://{server.url}\nContent-Type: application/json\n\n{\n \"messsage\": {\n \"type\": \"knowledge-base-request\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": \"Why is ocean blue?\"\n }\n ],\n ...other metadata about the call...\n }\n}\n\nResponse Expected:\n```\n{\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"The ocean is blue because water absorbs everything but blue.\",\n }, // YOU CAN RETURN THE EXACT RESPONSE TO SPEAK\n \"documents\": [\n {\n \"content\": \"The ocean is blue primarily because water absorbs colors in the red part of the light spectrum and scatters the blue light, making it more visible to our eyes.\",\n \"similarity\": 1\n },\n {\n \"content\": \"Blue light is scattered more by the water molecules than other colors, enhancing the blue appearance of the ocean.\",\n \"similarity\": .5\n }\n ] // OR, YOU CAN RETURN AN ARRAY OF DOCUMENTS THAT WILL BE SENT TO THE MODEL\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
+ },
+ "id": {
+ "type": "string",
+ "description": "This is the id of the knowledge base."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the org id of the knowledge base."
+ }
+ },
+ "required": [
+ "provider",
+ "server",
+ "id",
+ "orgId"
+ ]
+ },
+ "CreateTrieveKnowledgeBaseDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "description": "This knowledge base is provided by Trieve.\n\nTo learn more about Trieve, visit https://trieve.ai.",
+ "enum": [
+ "trieve"
+ ]
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the knowledge base."
+ },
+ "searchPlan": {
+ "description": "This is the searching plan used when searching for relevant chunks from the vector store.\n\nYou should configure this if you're running into these issues:\n- Too much unnecessary context is being fed as knowledge base context.\n- Not enough relevant context is being fed as knowledge base context.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/TrieveKnowledgeBaseSearchPlan"
+ }
+ ]
+ },
+ "createPlan": {
+ "description": "This is the plan if you want us to create/import a new vector store using Trieve.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TrieveKnowledgeBaseImport",
+ "title": "Import"
+ }
+ ]
+ }
+ },
+ "required": [
+ "provider"
+ ]
+ },
+ "UpdateTrieveKnowledgeBaseDTO": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the knowledge base."
+ },
+ "searchPlan": {
+ "description": "This is the searching plan used when searching for relevant chunks from the vector store.\n\nYou should configure this if you're running into these issues:\n- Too much unnecessary context is being fed as knowledge base context.\n- Not enough relevant context is being fed as knowledge base context.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/TrieveKnowledgeBaseSearchPlan"
+ }
+ ]
+ },
+ "createPlan": {
+ "description": "This is the plan if you want us to create/import a new vector store using Trieve.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TrieveKnowledgeBaseImport",
+ "title": "Import"
+ }
+ ]
+ }
+ }
+ },
+ "UpdateCustomKnowledgeBaseDTO": {
+ "type": "object",
+ "properties": {
+ "server": {
+ "description": "This is where the knowledge base request will be sent.\n\nRequest Example:\n\nPOST https://{server.url}\nContent-Type: application/json\n\n{\n \"messsage\": {\n \"type\": \"knowledge-base-request\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": \"Why is ocean blue?\"\n }\n ],\n ...other metadata about the call...\n }\n}\n\nResponse Expected:\n```\n{\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"The ocean is blue because water absorbs everything but blue.\",\n }, // YOU CAN RETURN THE EXACT RESPONSE TO SPEAK\n \"documents\": [\n {\n \"content\": \"The ocean is blue primarily because water absorbs colors in the red part of the light spectrum and scatters the blue light, making it more visible to our eyes.\",\n \"similarity\": 1\n },\n {\n \"content\": \"Blue light is scattered more by the water molecules than other colors, enhancing the blue appearance of the ocean.\",\n \"similarity\": .5\n }\n ] // OR, YOU CAN RETURN AN ARRAY OF DOCUMENTS THAT WILL BE SENT TO THE MODEL\n}\n```",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Server"
+ }
+ ]
+ }
+ }
+ },
+ "TrieveKnowledgeBaseChunkPlan": {
+ "type": "object",
+ "properties": {
+ "fileIds": {
+ "description": "These are the file ids that will be used to create the vector store. To upload files, use the `POST /files` endpoint.",
"type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
+ "type": "string"
+ }
+ },
+ "websites": {
+ "description": "These are the websites that will be used to create the vector store.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "targetSplitsPerChunk": {
+ "type": "number",
+ "description": "This is an optional field which allows you to specify the number of splits you want per chunk. If not specified, the default 20 is used. However, you may want to use a different number."
+ },
+ "splitDelimiters": {
+ "description": "This is an optional field which allows you to specify the delimiters to use when splitting the file before chunking the text. If not specified, the default [.!?\\n] are used to split into sentences. However, you may want to use spaces or other delimiters.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "rebalanceChunks": {
+ "type": "boolean",
+ "description": "This is an optional field which allows you to specify whether or not to rebalance the chunks created from the file. If not specified, the default true is used. If true, Trieve will evenly distribute remainder splits across chunks such that 66 splits with a target_splits_per_chunk of 20 will result in 3 chunks with 22 splits each."
+ }
+ }
+ },
+ "TrieveKnowledgeBaseCreate": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "This is to create a new dataset on Trieve.",
+ "enum": [
+ "create"
+ ]
+ },
+ "chunkPlans": {
+ "description": "These are the chunk plans used to create the dataset.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/TrieveKnowledgeBaseChunkPlan"
}
+ }
+ },
+ "required": [
+ "type",
+ "chunkPlans"
+ ]
+ },
+ "TrieveKnowledgeBaseImport": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "This is to import an existing dataset from Trieve.",
+ "enum": [
+ "import"
+ ]
+ },
+ "providerId": {
+ "type": "string",
+ "description": "This is the `datasetId` of the dataset on your Trieve account."
+ }
+ },
+ "required": [
+ "type",
+ "providerId"
+ ]
+ },
+ "StructuredOutput": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "This is the type of structured output.\n\n- 'ai': Uses an LLM to extract structured data from the conversation (default).\n- 'regex': Uses a regex pattern to extract data from the transcript without an LLM.",
+ "enum": [
+ "ai",
+ "regex"
+ ]
+ },
+ "regex": {
+ "type": "string",
+ "description": "This is the regex pattern to match against the transcript.\n\nOnly used when type is 'regex'. Supports both raw patterns (e.g. '\\d+') and\nregex literal format (e.g. '/\\d+/gi'). Uses RE2 syntax for safety.\n\nThe result depends on the schema type:\n- boolean: true if the pattern matches, false otherwise\n- string: the first match or first capture group\n- number/integer: the first match parsed as a number\n- array: all matches",
+ "minLength": 1,
+ "maxLength": 1000
+ },
+ "model": {
+ "description": "This is the model that will be used to extract the structured output.\n\nTo provide your own custom system and user prompts for structured output extraction, populate the messages array with your system and user messages. You can specify liquid templating in your system and user messages.\nBetween the system or user messages, you must reference either 'transcript' or 'messages' with the `{{}}` syntax to access the conversation history.\nBetween the system or user messages, you must reference a variation of the structured output with the `{{}}` syntax to access the structured output definition.\ni.e.:\n`{{structuredOutput}}`\n`{{structuredOutput.name}}`\n`{{structuredOutput.description}}`\n`{{structuredOutput.schema}}`\n\nIf model is not specified, GPT-4.1 will be used by default for extraction, utilizing default system and user prompts.\nIf messages or required fields are not specified, the default system and user prompts will be used.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/WorkflowOpenAIModel",
+ "title": "WorkflowOpenAIModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowAnthropicModel",
+ "title": "WorkflowAnthropicModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowAnthropicBedrockModel",
+ "title": "WorkflowAnthropicBedrockModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowGoogleModel",
+ "title": "WorkflowGoogleModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowCustomModel",
+ "title": "WorkflowCustomModel"
+ }
+ ]
},
- "type": {
- "type": "string",
- "enum": [
- "google.calendar.availability.check"
- ],
- "description": "The type of tool. \"google.calendar.availability.check\" for Google Calendar Check Availability tool."
+ "compliancePlan": {
+ "description": "Compliance configuration for this output. Only enable overrides if no sensitive data will be stored.",
+ "example": {
+ "forceStoreOnHipaaEnabled": false
+ },
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ComplianceOverride"
+ }
+ ]
},
"id": {
"type": "string",
- "description": "This is the unique identifier for the tool."
+ "description": "This is the unique identifier for the structured output."
},
"orgId": {
"type": "string",
- "description": "This is the unique identifier for the organization that this tool belongs to."
+ "description": "This is the unique identifier for the org that this structured output belongs to."
},
"createdAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was created."
+ "description": "This is the ISO 8601 date-time string of when the structured output was created."
},
"updatedAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ "description": "This is the ISO 8601 date-time string of when the structured output was last updated."
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "name": {
+ "type": "string",
+ "description": "This is the name of the structured output.",
+ "minLength": 1,
+ "maxLength": 40
+ },
+ "description": {
+ "type": "string",
+ "description": "This is the description of what the structured output extracts.\n\nUse this to provide context about what data will be extracted and how it will be used."
+ },
+ "assistantIds": {
+ "description": "These are the assistant IDs that this structured output is linked to.\n\nWhen linked to assistants, this structured output will be available for extraction during those assistant's calls.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "workflowIds": {
+ "description": "These are the workflow IDs that this structured output is linked to.\n\nWhen linked to workflows, this structured output will be available for extraction during those workflow's execution.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "schema": {
+ "description": "This is the JSON Schema definition for the structured output.\n\nDefines the structure and validation rules for the data that will be extracted. Supports all JSON Schema features including:\n- Objects and nested properties\n- Arrays and array validation\n- String, number, boolean, and null types\n- Enums and const values\n- Validation constraints (min/max, patterns, etc.)\n- Composition with allOf, anyOf, oneOf",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/JsonSchema"
}
]
}
},
"required": [
- "type",
"id",
"orgId",
"createdAt",
- "updatedAt"
+ "updatedAt",
+ "name",
+ "schema"
]
},
- "SlackSendMessageTool": {
+ "StructuredOutputPaginatedResponse": {
"type": "object",
"properties": {
- "messages": {
+ "results": {
"type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
+ "$ref": "#/components/schemas/StructuredOutput"
}
},
+ "metadata": {
+ "$ref": "#/components/schemas/PaginationMeta"
+ }
+ },
+ "required": [
+ "results",
+ "metadata"
+ ]
+ },
+ "UpdateStructuredOutputDTO": {
+ "type": "object",
+ "properties": {
"type": {
"type": "string",
+ "description": "This is the type of structured output.\n\n- 'ai': Uses an LLM to extract structured data from the conversation (default).\n- 'regex': Uses a regex pattern to extract data from the transcript without an LLM.",
"enum": [
- "slack.message.send"
- ],
- "description": "The type of tool. \"slack.message.send\" for Slack Send Message tool."
+ "ai",
+ "regex"
+ ]
},
- "id": {
+ "regex": {
"type": "string",
- "description": "This is the unique identifier for the tool."
+ "description": "This is the regex pattern to match against the transcript.\n\nOnly used when type is 'regex'. Supports both raw patterns (e.g. '\\d+') and\nregex literal format (e.g. '/\\d+/gi'). Uses RE2 syntax for safety.\n\nThe result depends on the schema type:\n- boolean: true if the pattern matches, false otherwise\n- string: the first match or first capture group\n- number/integer: the first match parsed as a number\n- array: all matches",
+ "minLength": 1,
+ "maxLength": 1000
},
- "orgId": {
- "type": "string",
- "description": "This is the unique identifier for the organization that this tool belongs to."
+ "model": {
+ "description": "This is the model that will be used to extract the structured output.\n\nTo provide your own custom system and user prompts for structured output extraction, populate the messages array with your system and user messages. You can specify liquid templating in your system and user messages.\nBetween the system or user messages, you must reference either 'transcript' or 'messages' with the `{{}}` syntax to access the conversation history.\nBetween the system or user messages, you must reference a variation of the structured output with the `{{}}` syntax to access the structured output definition.\ni.e.:\n`{{structuredOutput}}`\n`{{structuredOutput.name}}`\n`{{structuredOutput.description}}`\n`{{structuredOutput.schema}}`\n\nIf model is not specified, GPT-4.1 will be used by default for extraction, utilizing default system and user prompts.\nIf messages or required fields are not specified, the default system and user prompts will be used.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/WorkflowOpenAIModel",
+ "title": "WorkflowOpenAIModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowAnthropicModel",
+ "title": "WorkflowAnthropicModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowAnthropicBedrockModel",
+ "title": "WorkflowAnthropicBedrockModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowGoogleModel",
+ "title": "WorkflowGoogleModel"
+ },
+ {
+ "$ref": "#/components/schemas/WorkflowCustomModel",
+ "title": "WorkflowCustomModel"
+ }
+ ]
},
- "createdAt": {
- "format": "date-time",
+ "compliancePlan": {
+ "description": "Compliance configuration for this output. Only enable overrides if no sensitive data will be stored.",
+ "example": {
+ "forceStoreOnHipaaEnabled": false
+ },
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/ComplianceOverride"
+ }
+ ]
+ },
+ "name": {
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was created."
+ "description": "This is the name of the structured output.",
+ "minLength": 1,
+ "maxLength": 40
},
- "updatedAt": {
- "format": "date-time",
+ "description": {
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ "description": "This is the description of what the structured output extracts.\n\nUse this to provide context about what data will be extracted and how it will be used."
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "assistantIds": {
+ "description": "These are the assistant IDs that this structured output is linked to.\n\nWhen linked to assistants, this structured output will be available for extraction during those assistant's calls.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "workflowIds": {
+ "description": "These are the workflow IDs that this structured output is linked to.\n\nWhen linked to workflows, this structured output will be available for extraction during those workflow's execution.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "schema": {
+ "description": "This is the JSON Schema definition for the structured output.\n\nDefines the structure and validation rules for the data that will be extracted. Supports all JSON Schema features including:\n- Objects and nested properties\n- Arrays and array validation\n- String, number, boolean, and null types\n- Enums and const values\n- Validation constraints (min/max, patterns, etc.)\n- Composition with allOf, anyOf, oneOf",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/JsonSchema"
}
]
}
- },
- "required": [
- "type",
- "id",
- "orgId",
- "createdAt",
- "updatedAt"
- ]
+ }
},
- "SmsTool": {
+ "StructuredOutputRunDTO": {
"type": "object",
"properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
- },
- "type": {
- "type": "string",
- "enum": [
- "sms"
- ],
- "description": "The type of tool. \"sms\" for Twilio SMS sending tool."
- },
- "id": {
- "type": "string",
- "description": "This is the unique identifier for the tool."
- },
- "orgId": {
- "type": "string",
- "description": "This is the unique identifier for the organization that this tool belongs to."
- },
- "createdAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was created."
+ "previewEnabled": {
+ "type": "boolean",
+ "description": "This is the preview flag for the re-run. If true, the re-run will be executed and the response will be returned immediately and the call artifact will NOT be updated.\nIf false (default), the re-run will be executed and the response will be updated in the call artifact.",
+ "default": false
},
- "updatedAt": {
- "format": "date-time",
+ "structuredOutputId": {
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ "description": "This is the ID of the structured output that will be run. This must be provided unless a transient structured output is provided.\nWhen the re-run is executed, only the value of this structured output will be replaced with the new value, or added if not present."
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "structuredOutput": {
+ "description": "This is the transient structured output that will be run. This must be provided if a structured output ID is not provided.\nWhen the re-run is executed, the structured output value will be added to the existing artifact.",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/CreateStructuredOutputDTO"
}
]
+ },
+ "callIds": {
+ "description": "This is the array of callIds that will be updated with the new structured output value. If preview is true, this array must be provided and contain exactly 1 callId.\nIf preview is false, up to 100 callIds may be provided.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
}
},
"required": [
- "type",
- "id",
- "orgId",
- "createdAt",
- "updatedAt"
+ "callIds"
]
},
- "McpTool": {
+ "TesterPlan": {
"type": "object",
"properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
- },
- "type": {
- "type": "string",
- "enum": [
- "mcp"
- ],
- "description": "The type of tool. \"mcp\" for MCP tool."
- },
- "server": {
- "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
+ "assistant": {
+ "description": "Pass a transient assistant to use for the test assistant.\n\nMake sure to write a detailed system prompt for a test assistant, and use the {{test.script}} variable to access the test script.",
"allOf": [
{
- "$ref": "#/components/schemas/Server"
+ "$ref": "#/components/schemas/CreateAssistantDTO"
}
]
},
- "id": {
- "type": "string",
- "description": "This is the unique identifier for the tool."
- },
- "orgId": {
- "type": "string",
- "description": "This is the unique identifier for the organization that this tool belongs to."
- },
- "createdAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was created."
- },
- "updatedAt": {
- "format": "date-time",
+ "assistantId": {
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ "description": "Pass an assistant id that can be access\n\nMake sure to write a detailed system prompt for the test assistant, and use the {{test.script}} variable to access the test script."
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "assistantOverrides": {
+ "description": "Add any assistant overrides to the test assistant.\n\nOne use case is if you want to pass custom variables into the test using variableValues, that you can then access in the script\nand rubric using {{varName}}.",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/AssistantOverrides"
}
]
+ }
+ }
+ },
+ "TestSuitePhoneNumber": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "description": "This is the provider of the phone number.",
+ "enum": [
+ "test-suite"
+ ]
},
- "metadata": {
- "$ref": "#/components/schemas/McpToolMetadata"
+ "number": {
+ "type": "string",
+ "description": "This is the phone number that is being tested.",
+ "maxLength": 50
}
},
"required": [
- "type",
- "id",
- "orgId",
- "createdAt",
- "updatedAt"
+ "provider",
+ "number"
]
},
- "GoHighLevelCalendarAvailabilityTool": {
+ "TargetPlan": {
"type": "object",
"properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
+ "phoneNumberId": {
+ "type": "string",
+ "description": "This is the phone number that is being tested.\nDuring the actual test, it'll be called and the assistant attached to it will pick up and be tested.\nTo test an assistant directly, send assistantId instead."
},
- "type": {
+ "phoneNumber": {
+ "description": "This can be any phone number (even not on Vapi).\nDuring the actual test, it'll be called.\nTo test a Vapi number, send phoneNumberId. To test an assistant directly, send assistantId instead.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/TestSuitePhoneNumber"
+ }
+ ]
+ },
+ "assistantId": {
"type": "string",
- "enum": [
- "gohighlevel.calendar.availability.check"
- ],
- "description": "The type of tool. \"gohighlevel.calendar.availability.check\" for GoHighLevel Calendar Availability Check tool."
+ "description": "This is the assistant being tested.\nDuring the actual test, it'll invoked directly.\nTo test the assistant over phone number, send phoneNumberId instead."
},
+ "assistantOverrides": {
+ "description": "This is the assistant overrides applied to assistantId before it is tested.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AssistantOverrides"
+ }
+ ]
+ }
+ }
+ },
+ "TestSuite": {
+ "type": "object",
+ "properties": {
"id": {
"type": "string",
- "description": "This is the unique identifier for the tool."
+ "description": "This is the unique identifier for the test suite."
},
"orgId": {
"type": "string",
- "description": "This is the unique identifier for the organization that this tool belongs to."
+ "description": "This is the unique identifier for the org that this test suite belongs to."
},
"createdAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was created."
+ "description": "This is the ISO 8601 date-time string of when the test suite was created."
},
"updatedAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ "description": "This is the ISO 8601 date-time string of when the test suite was last updated."
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "name": {
+ "type": "string",
+ "description": "This is the name of the test suite.",
+ "maxLength": 80
+ },
+ "phoneNumberId": {
+ "type": "string",
+ "description": "This is the phone number ID associated with this test suite.",
+ "deprecated": true
+ },
+ "testerPlan": {
+ "description": "Override the default tester plan by providing custom assistant configuration for the test agent.\n\nWe recommend only using this if you are confident, as we have already set sensible defaults on the tester plan.",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/TesterPlan"
+ }
+ ]
+ },
+ "targetPlan": {
+ "description": "These are the configuration for the assistant / phone number that is being tested.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/TargetPlan"
}
]
}
},
"required": [
- "type",
"id",
"orgId",
"createdAt",
"updatedAt"
]
},
- "GoHighLevelCalendarEventCreateTool": {
+ "TestSuitesPaginatedResponse": {
"type": "object",
"properties": {
- "messages": {
+ "results": {
"type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
+ "$ref": "#/components/schemas/TestSuite"
}
},
- "type": {
+ "metadata": {
+ "$ref": "#/components/schemas/PaginationMeta"
+ }
+ },
+ "required": [
+ "results",
+ "metadata"
+ ]
+ },
+ "CreateTestSuiteDto": {
+ "type": "object",
+ "properties": {
+ "name": {
"type": "string",
- "enum": [
- "gohighlevel.calendar.event.create"
- ],
- "description": "The type of tool. \"gohighlevel.calendar.event.create\" for GoHighLevel Calendar Event Create tool."
+ "description": "This is the name of the test suite.",
+ "maxLength": 80
},
- "id": {
+ "phoneNumberId": {
"type": "string",
- "description": "This is the unique identifier for the tool."
+ "description": "This is the phone number ID associated with this test suite.",
+ "deprecated": true
},
- "orgId": {
- "type": "string",
- "description": "This is the unique identifier for the organization that this tool belongs to."
+ "testerPlan": {
+ "description": "Override the default tester plan by providing custom assistant configuration for the test agent.\n\nWe recommend only using this if you are confident, as we have already set sensible defaults on the tester plan.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/TesterPlan"
+ }
+ ]
},
- "createdAt": {
- "format": "date-time",
+ "targetPlan": {
+ "description": "These are the configuration for the assistant / phone number that is being tested.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/TargetPlan"
+ }
+ ]
+ }
+ }
+ },
+ "UpdateTestSuiteDto": {
+ "type": "object",
+ "properties": {
+ "name": {
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was created."
+ "description": "This is the name of the test suite.",
+ "maxLength": 80
},
- "updatedAt": {
- "format": "date-time",
+ "phoneNumberId": {
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ "description": "This is the phone number ID associated with this test suite.",
+ "deprecated": true
+ },
+ "testerPlan": {
+ "description": "Override the default tester plan by providing custom assistant configuration for the test agent.\n\nWe recommend only using this if you are confident, as we have already set sensible defaults on the tester plan.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/TesterPlan"
+ }
+ ]
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "targetPlan": {
+ "description": "These are the configuration for the assistant / phone number that is being tested.",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/TargetPlan"
}
]
}
- },
- "required": [
- "type",
- "id",
- "orgId",
- "createdAt",
- "updatedAt"
- ]
+ }
},
- "GoHighLevelContactCreateTool": {
+ "TestSuiteTestVoice": {
"type": "object",
"properties": {
- "messages": {
+ "scorers": {
"type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "description": "These are the scorers used to evaluate the test.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
+ "$ref": "#/components/schemas/TestSuiteTestScorerAI",
+ "title": "AI"
}
]
}
},
"type": {
"type": "string",
+ "description": "This is the type of the test, which must be voice.",
"enum": [
- "gohighlevel.contact.create"
+ "voice"
],
- "description": "The type of tool. \"gohighlevel.contact.create\" for GoHighLevel Contact Create tool."
+ "maxLength": 100
},
"id": {
"type": "string",
- "description": "This is the unique identifier for the tool."
+ "description": "This is the unique identifier for the test."
+ },
+ "testSuiteId": {
+ "type": "string",
+ "description": "This is the unique identifier for the test suite this test belongs to."
},
"orgId": {
"type": "string",
- "description": "This is the unique identifier for the organization that this tool belongs to."
+ "description": "This is the unique identifier for the organization this test belongs to."
},
"createdAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was created."
+ "description": "This is the ISO 8601 date-time string of when the test was created."
},
"updatedAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ "description": "This is the ISO 8601 date-time string of when the test was last updated."
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
- "allOf": [
- {
- "$ref": "#/components/schemas/ToolRejectionPlan"
- }
- ]
+ "name": {
+ "type": "string",
+ "description": "This is the name of the test.",
+ "maxLength": 80
+ },
+ "script": {
+ "type": "string",
+ "description": "This is the script to be used for the voice test.",
+ "maxLength": 10000
+ },
+ "numAttempts": {
+ "type": "number",
+ "description": "This is the number of attempts allowed for the test.",
+ "minimum": 1,
+ "maximum": 10
}
},
"required": [
+ "scorers",
"type",
"id",
+ "testSuiteId",
"orgId",
"createdAt",
- "updatedAt"
+ "updatedAt",
+ "script"
]
},
- "GoHighLevelContactGetTool": {
+ "TestSuiteTestChat": {
"type": "object",
"properties": {
- "messages": {
+ "scorers": {
"type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "description": "These are the scorers used to evaluate the test.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
+ "$ref": "#/components/schemas/TestSuiteTestScorerAI",
+ "title": "AI"
}
]
}
},
"type": {
"type": "string",
+ "description": "This is the type of the test, which must be chat.",
"enum": [
- "gohighlevel.contact.get"
+ "chat"
],
- "description": "The type of tool. \"gohighlevel.contact.get\" for GoHighLevel Contact Get tool."
+ "maxLength": 100
},
"id": {
"type": "string",
- "description": "This is the unique identifier for the tool."
+ "description": "This is the unique identifier for the test."
+ },
+ "testSuiteId": {
+ "type": "string",
+ "description": "This is the unique identifier for the test suite this test belongs to."
},
"orgId": {
"type": "string",
- "description": "This is the unique identifier for the organization that this tool belongs to."
+ "description": "This is the unique identifier for the organization this test belongs to."
},
"createdAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was created."
+ "description": "This is the ISO 8601 date-time string of when the test was created."
},
"updatedAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the tool was last updated."
+ "description": "This is the ISO 8601 date-time string of when the test was last updated."
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
- "allOf": [
- {
- "$ref": "#/components/schemas/ToolRejectionPlan"
- }
- ]
+ "name": {
+ "type": "string",
+ "description": "This is the name of the test.",
+ "maxLength": 80
+ },
+ "script": {
+ "type": "string",
+ "description": "This is the script to be used for the chat test.",
+ "maxLength": 10000
+ },
+ "numAttempts": {
+ "type": "number",
+ "description": "This is the number of attempts allowed for the test.",
+ "minimum": 1,
+ "maximum": 10
}
},
"required": [
+ "scorers",
"type",
"id",
+ "testSuiteId",
"orgId",
"createdAt",
- "updatedAt"
+ "updatedAt",
+ "script"
]
},
- "CreateApiRequestToolDTO": {
+ "CreateTestSuiteTestVoiceDto": {
"type": "object",
"properties": {
- "messages": {
+ "scorers": {
"type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "description": "These are the scorers used to evaluate the test.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
+ "$ref": "#/components/schemas/TestSuiteTestScorerAI",
+ "title": "AI"
}
]
}
},
"type": {
"type": "string",
+ "description": "This is the type of the test, which must be voice.",
"enum": [
- "apiRequest"
+ "voice"
],
- "description": "The type of tool. \"apiRequest\" for API request tool."
+ "maxLength": 100
},
- "method": {
+ "script": {
"type": "string",
- "enum": [
- "POST",
- "GET",
- "PUT",
- "PATCH",
- "DELETE"
- ]
+ "description": "This is the script to be used for the voice test.",
+ "maxLength": 10000
},
- "timeoutSeconds": {
+ "numAttempts": {
"type": "number",
- "description": "This is the timeout in seconds for the request. Defaults to 20 seconds.\n\n@default 20",
+ "description": "This is the number of attempts allowed for the test.",
"minimum": 1,
- "maximum": 300,
- "example": 20
+ "maximum": 10
},
"name": {
"type": "string",
- "description": "This is the name of the tool. This will be passed to the model.\n\nMust be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 40.",
- "maxLength": 40,
- "pattern": "/^[a-zA-Z0-9_-]{1,40}$/"
- },
- "description": {
- "type": "string",
- "description": "This is the description of the tool. This will be passed to the model.",
- "maxLength": 1000
- },
- "url": {
- "type": "string",
- "description": "This is where the request will be sent."
- },
- "body": {
- "description": "This is the body of the request.",
- "allOf": [
- {
- "$ref": "#/components/schemas/JsonSchema"
- }
- ]
- },
- "headers": {
- "description": "These are the headers to send with the request.",
- "allOf": [
- {
- "$ref": "#/components/schemas/JsonSchema"
- }
- ]
- },
- "backoffPlan": {
- "description": "This is the backoff plan if the request fails. Defaults to undefined (the request will not be retried).\n\n@default undefined (the request will not be retried)",
- "allOf": [
- {
- "$ref": "#/components/schemas/BackoffPlan"
- }
- ]
- },
- "variableExtractionPlan": {
- "description": "This is the plan to extract variables from the tool's response. These will be accessible during the call and stored in `call.artifact.variableValues` after the call.\n\nUsage:\n1. Use `aliases` to extract variables from the tool's response body. (Most common case)\n\n```json\n{\n \"aliases\": [\n {\n \"key\": \"customerName\",\n \"value\": \"{{customer.name}}\"\n },\n {\n \"key\": \"customerAge\",\n \"value\": \"{{customer.age}}\"\n }\n ]\n}\n```\n\nThe tool response body is made available to the liquid template.\n\n2. Use `aliases` to extract variables from the tool's response body if the response is an array.\n\n```json\n{\n \"aliases\": [\n {\n \"key\": \"customerName\",\n \"value\": \"{{$[0].name}}\"\n },\n {\n \"key\": \"customerAge\",\n \"value\": \"{{$[0].age}}\"\n }\n ]\n}\n```\n\n$ is a shorthand for the tool's response body. `$[0]` is the first item in the array. `$[n]` is the nth item in the array. Note, $ is available regardless of the response body type (both object and array).\n\n3. Use `aliases` to extract variables from the tool's response headers.\n\n```json\n{\n \"aliases\": [\n {\n \"key\": \"customerName\",\n \"value\": \"{{tool.response.headers.customer-name}}\"\n },\n {\n \"key\": \"customerAge\",\n \"value\": \"{{tool.response.headers.customer-age}}\"\n }\n ]\n}\n```\n\n`tool.response` is made available to the liquid template. Particularly, both `tool.response.headers` and `tool.response.body` are available. Note, `tool.response` is available regardless of the response body type (both object and array).\n\n4. Use `schema` to extract a large portion of the tool's response body.\n\n4.1. If you hit example.com and it returns `{\"name\": \"John\", \"age\": 30}`, then you can specify the schema as:\n\n```json\n{\n \"schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"age\": {\n \"type\": \"number\"\n }\n }\n }\n}\n```\nThese will be extracted as `{{ name }}` and `{{ age }}` respectively. To emphasize, object properties are extracted as direct global variables.\n\n4.2. If you hit example.com and it returns `{\"name\": {\"first\": \"John\", \"last\": \"Doe\"}}`, then you can specify the schema as:\n\n```json\n{\n \"schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"object\",\n \"properties\": {\n \"first\": {\n \"type\": \"string\"\n },\n \"last\": {\n \"type\": \"string\"\n }\n }\n }\n }\n }\n}\n```\n\nThese will be extracted as `{{ name }}`. And, `{{ name.first }}` and `{{ name.last }}` will be accessible.\n\n4.3. If you hit example.com and it returns `[\"94123\", \"94124\"]`, then you can specify the schema as:\n\n```json\n{\n \"schema\": {\n \"type\": \"array\",\n \"title\": \"zipCodes\",\n \"items\": {\n \"type\": \"string\"\n }\n }\n}\n```\n\nThis will be extracted as `{{ zipCodes }}`. To access the array items, you can use `{{ zipCodes[0] }}` and `{{ zipCodes[1] }}`.\n\n4.4. If you hit example.com and it returns `[{\"name\": \"John\", \"age\": 30, \"zipCodes\": [\"94123\", \"94124\"]}, {\"name\": \"Jane\", \"age\": 25, \"zipCodes\": [\"94125\", \"94126\"]}]`, then you can specify the schema as:\n\n```json\n{\n \"schema\": {\n \"type\": \"array\",\n \"title\": \"people\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"age\": {\n \"type\": \"number\"\n },\n \"zipCodes\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n }\n }\n }\n }\n }\n}\n```\n\nThis will be extracted as `{{ people }}`. To access the array items, you can use `{{ people[n].name }}`, `{{ people[n].age }}`, `{{ people[n].zipCodes }}`, `{{ people[n].zipCodes[0] }}` and `{{ people[n].zipCodes[1] }}`.\n\nNote: Both `aliases` and `schema` can be used together.",
- "allOf": [
- {
- "$ref": "#/components/schemas/VariableExtractionPlan"
- }
- ]
- },
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
- "allOf": [
- {
- "$ref": "#/components/schemas/ToolRejectionPlan"
- }
- ]
+ "description": "This is the name of the test.",
+ "maxLength": 80
}
},
"required": [
+ "scorers",
"type",
- "method",
- "url"
+ "script"
]
},
- "CreateOutputToolDTO": {
+ "CreateTestSuiteTestChatDto": {
"type": "object",
"properties": {
- "messages": {
+ "scorers": {
"type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "description": "These are the scorers used to evaluate the test.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
+ "$ref": "#/components/schemas/TestSuiteTestScorerAI",
+ "title": "AI"
}
]
}
},
"type": {
"type": "string",
+ "description": "This is the type of the test, which must be chat.",
"enum": [
- "output"
+ "chat"
],
- "description": "The type of tool. \"output\" for Output tool."
+ "maxLength": 100
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
- "allOf": [
- {
- "$ref": "#/components/schemas/ToolRejectionPlan"
- }
- ]
+ "script": {
+ "type": "string",
+ "description": "This is the script to be used for the chat test.",
+ "maxLength": 10000
+ },
+ "numAttempts": {
+ "type": "number",
+ "description": "This is the number of attempts allowed for the test.",
+ "minimum": 1,
+ "maximum": 10
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the test.",
+ "maxLength": 80
}
},
"required": [
- "type"
+ "scorers",
+ "type",
+ "script"
]
},
- "CreateBashToolDTO": {
+ "UpdateTestSuiteTestVoiceDto": {
"type": "object",
"properties": {
- "messages": {
+ "scorers": {
"type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "description": "These are the scorers used to evaluate the test.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
+ "$ref": "#/components/schemas/TestSuiteTestScorerAI",
+ "title": "AI"
}
]
}
},
"type": {
"type": "string",
+ "description": "This is the type of the test, which must be voice.",
"enum": [
- "bash"
+ "voice"
],
- "description": "The type of tool. \"bash\" for Bash tool."
+ "maxLength": 100
},
- "subType": {
+ "name": {
"type": "string",
- "enum": [
- "bash_20241022"
- ],
- "description": "The sub type of tool."
- },
- "server": {
- "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
- "allOf": [
- {
- "$ref": "#/components/schemas/Server"
- }
- ]
+ "description": "This is the name of the test.",
+ "maxLength": 80
},
- "name": {
+ "script": {
"type": "string",
- "description": "The name of the tool, fixed to 'bash'",
- "default": "bash",
- "enum": [
- "bash"
- ]
+ "description": "This is the script to be used for the voice test.",
+ "maxLength": 10000
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
- "allOf": [
- {
- "$ref": "#/components/schemas/ToolRejectionPlan"
- }
- ]
+ "numAttempts": {
+ "type": "number",
+ "description": "This is the number of attempts allowed for the test.",
+ "minimum": 1,
+ "maximum": 10
}
- },
- "required": [
- "type",
- "subType",
- "name"
- ]
+ }
},
- "CreateComputerToolDTO": {
+ "UpdateTestSuiteTestChatDto": {
"type": "object",
"properties": {
- "messages": {
+ "scorers": {
"type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "description": "These are the scorers used to evaluate the test.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
+ "$ref": "#/components/schemas/TestSuiteTestScorerAI",
+ "title": "AI"
}
]
}
},
"type": {
"type": "string",
+ "description": "This is the type of the test, which must be chat.",
"enum": [
- "computer"
- ],
- "description": "The type of tool. \"computer\" for Computer tool."
- },
- "subType": {
- "type": "string",
- "enum": [
- "computer_20241022"
+ "chat"
],
- "description": "The sub type of tool."
- },
- "server": {
- "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
- "allOf": [
- {
- "$ref": "#/components/schemas/Server"
- }
- ]
+ "maxLength": 100
},
"name": {
"type": "string",
- "description": "The name of the tool, fixed to 'computer'",
- "default": "computer",
- "enum": [
- "computer"
- ]
- },
- "displayWidthPx": {
- "type": "number",
- "description": "The display width in pixels"
+ "description": "This is the name of the test.",
+ "maxLength": 80
},
- "displayHeightPx": {
- "type": "number",
- "description": "The display height in pixels"
+ "script": {
+ "type": "string",
+ "description": "This is the script to be used for the chat test.",
+ "maxLength": 10000
},
- "displayNumber": {
+ "numAttempts": {
"type": "number",
- "description": "Optional display number"
- },
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
- "allOf": [
- {
- "$ref": "#/components/schemas/ToolRejectionPlan"
- }
- ]
+ "description": "This is the number of attempts allowed for the test.",
+ "minimum": 1,
+ "maximum": 10
}
- },
- "required": [
- "type",
- "subType",
- "name",
- "displayWidthPx",
- "displayHeightPx"
- ]
+ }
},
- "CreateTextEditorToolDTO": {
+ "TestSuiteTestScorerAI": {
"type": "object",
"properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
- },
"type": {
"type": "string",
+ "description": "This is the type of the scorer, which must be AI.",
"enum": [
- "textEditor"
- ],
- "description": "The type of tool. \"textEditor\" for Text Editor tool."
- },
- "subType": {
- "type": "string",
- "enum": [
- "text_editor_20241022"
+ "ai"
],
- "description": "The sub type of tool."
- },
- "server": {
- "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
- "allOf": [
- {
- "$ref": "#/components/schemas/Server"
- }
- ]
- },
- "name": {
- "type": "string",
- "description": "The name of the tool, fixed to 'str_replace_editor'",
- "default": "str_replace_editor",
- "enum": [
- "str_replace_editor"
- ]
+ "maxLength": 100
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
- "allOf": [
- {
- "$ref": "#/components/schemas/ToolRejectionPlan"
- }
- ]
+ "rubric": {
+ "type": "string",
+ "description": "This is the rubric used by the AI scorer.",
+ "maxLength": 10000
}
},
"required": [
"type",
- "subType",
- "name"
+ "rubric"
]
},
- "CreateSmsToolDTO": {
+ "TestSuiteTestsPaginatedResponse": {
"type": "object",
"properties": {
- "messages": {
+ "results": {
"type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "description": "A list of test suite tests.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
+ "$ref": "#/components/schemas/TestSuiteTestVoice"
},
{
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
+ "$ref": "#/components/schemas/TestSuiteTestChat"
}
]
}
},
+ "metadata": {
+ "description": "Metadata about the pagination.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/PaginationMeta"
+ }
+ ]
+ }
+ },
+ "required": [
+ "results",
+ "metadata"
+ ]
+ },
+ "TestSuiteRunScorerAI": {
+ "type": "object",
+ "properties": {
"type": {
"type": "string",
+ "description": "This is the type of the scorer, which must be AI.",
"enum": [
- "sms"
+ "ai"
],
- "description": "The type of tool. \"sms\" for Twilio SMS sending tool."
+ "maxLength": 100
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "result": {
+ "type": "string",
+ "description": "This is the result of the test suite.",
+ "enum": [
+ "pass",
+ "fail"
+ ],
+ "maxLength": 100
+ },
+ "reasoning": {
+ "type": "string",
+ "description": "This is the reasoning provided by the AI scorer.",
+ "maxLength": 10000
+ },
+ "rubric": {
+ "type": "string",
+ "description": "This is the rubric used by the AI scorer.",
+ "maxLength": 10000
+ }
+ },
+ "required": [
+ "type",
+ "result",
+ "reasoning",
+ "rubric"
+ ]
+ },
+ "TestSuiteRunTestAttemptCall": {
+ "type": "object",
+ "properties": {
+ "artifact": {
+ "description": "This is the artifact of the call.",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/Artifact"
}
]
}
},
"required": [
- "type"
+ "artifact"
]
},
- "UpdateApiRequestToolDTO": {
+ "TestSuiteRunTestAttemptMetadata": {
"type": "object",
"properties": {
- "messages": {
+ "sessionId": {
+ "type": "string",
+ "description": "This is the session ID for the test attempt."
+ }
+ },
+ "required": [
+ "sessionId"
+ ]
+ },
+ "TestSuiteRunTestAttempt": {
+ "type": "object",
+ "properties": {
+ "scorerResults": {
"type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "description": "These are the results of the scorers used to evaluate the test attempt.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
+ "$ref": "#/components/schemas/TestSuiteRunScorerAI",
+ "title": "AI"
}
]
}
},
- "method": {
- "type": "string",
- "enum": [
- "POST",
- "GET",
- "PUT",
- "PATCH",
- "DELETE"
- ]
- },
- "timeoutSeconds": {
- "type": "number",
- "description": "This is the timeout in seconds for the request. Defaults to 20 seconds.\n\n@default 20",
- "minimum": 1,
- "maximum": 300,
- "example": 20
- },
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "call": {
+ "description": "This is the call made during the test attempt.",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/TestSuiteRunTestAttemptCall"
}
]
},
- "name": {
- "type": "string",
- "description": "This is the name of the tool. This will be passed to the model.\n\nMust be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 40.",
- "maxLength": 40,
- "pattern": "/^[a-zA-Z0-9_-]{1,40}$/"
- },
- "description": {
- "type": "string",
- "description": "This is the description of the tool. This will be passed to the model.",
- "maxLength": 1000
- },
- "url": {
+ "callId": {
"type": "string",
- "description": "This is where the request will be sent."
- },
- "body": {
- "description": "This is the body of the request.",
- "allOf": [
- {
- "$ref": "#/components/schemas/JsonSchema"
- }
- ]
+ "description": "This is the call ID for the test attempt."
},
- "headers": {
- "description": "These are the headers to send with the request.",
+ "metadata": {
+ "description": "This is the metadata for the test attempt.",
"allOf": [
{
- "$ref": "#/components/schemas/JsonSchema"
+ "$ref": "#/components/schemas/TestSuiteRunTestAttemptMetadata"
}
]
- },
- "backoffPlan": {
- "description": "This is the backoff plan if the request fails. Defaults to undefined (the request will not be retried).\n\n@default undefined (the request will not be retried)",
- "allOf": [
+ }
+ },
+ "required": [
+ "scorerResults"
+ ]
+ },
+ "TestSuiteRunTestResult": {
+ "type": "object",
+ "properties": {
+ "test": {
+ "description": "This is the test that was run.",
+ "oneOf": [
{
- "$ref": "#/components/schemas/BackoffPlan"
+ "$ref": "#/components/schemas/TestSuiteTestVoice",
+ "title": "TestSuiteTestVoice"
}
]
},
- "variableExtractionPlan": {
- "description": "This is the plan to extract variables from the tool's response. These will be accessible during the call and stored in `call.artifact.variableValues` after the call.\n\nUsage:\n1. Use `aliases` to extract variables from the tool's response body. (Most common case)\n\n```json\n{\n \"aliases\": [\n {\n \"key\": \"customerName\",\n \"value\": \"{{customer.name}}\"\n },\n {\n \"key\": \"customerAge\",\n \"value\": \"{{customer.age}}\"\n }\n ]\n}\n```\n\nThe tool response body is made available to the liquid template.\n\n2. Use `aliases` to extract variables from the tool's response body if the response is an array.\n\n```json\n{\n \"aliases\": [\n {\n \"key\": \"customerName\",\n \"value\": \"{{$[0].name}}\"\n },\n {\n \"key\": \"customerAge\",\n \"value\": \"{{$[0].age}}\"\n }\n ]\n}\n```\n\n$ is a shorthand for the tool's response body. `$[0]` is the first item in the array. `$[n]` is the nth item in the array. Note, $ is available regardless of the response body type (both object and array).\n\n3. Use `aliases` to extract variables from the tool's response headers.\n\n```json\n{\n \"aliases\": [\n {\n \"key\": \"customerName\",\n \"value\": \"{{tool.response.headers.customer-name}}\"\n },\n {\n \"key\": \"customerAge\",\n \"value\": \"{{tool.response.headers.customer-age}}\"\n }\n ]\n}\n```\n\n`tool.response` is made available to the liquid template. Particularly, both `tool.response.headers` and `tool.response.body` are available. Note, `tool.response` is available regardless of the response body type (both object and array).\n\n4. Use `schema` to extract a large portion of the tool's response body.\n\n4.1. If you hit example.com and it returns `{\"name\": \"John\", \"age\": 30}`, then you can specify the schema as:\n\n```json\n{\n \"schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"age\": {\n \"type\": \"number\"\n }\n }\n }\n}\n```\nThese will be extracted as `{{ name }}` and `{{ age }}` respectively. To emphasize, object properties are extracted as direct global variables.\n\n4.2. If you hit example.com and it returns `{\"name\": {\"first\": \"John\", \"last\": \"Doe\"}}`, then you can specify the schema as:\n\n```json\n{\n \"schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"object\",\n \"properties\": {\n \"first\": {\n \"type\": \"string\"\n },\n \"last\": {\n \"type\": \"string\"\n }\n }\n }\n }\n }\n}\n```\n\nThese will be extracted as `{{ name }}`. And, `{{ name.first }}` and `{{ name.last }}` will be accessible.\n\n4.3. If you hit example.com and it returns `[\"94123\", \"94124\"]`, then you can specify the schema as:\n\n```json\n{\n \"schema\": {\n \"type\": \"array\",\n \"title\": \"zipCodes\",\n \"items\": {\n \"type\": \"string\"\n }\n }\n}\n```\n\nThis will be extracted as `{{ zipCodes }}`. To access the array items, you can use `{{ zipCodes[0] }}` and `{{ zipCodes[1] }}`.\n\n4.4. If you hit example.com and it returns `[{\"name\": \"John\", \"age\": 30, \"zipCodes\": [\"94123\", \"94124\"]}, {\"name\": \"Jane\", \"age\": 25, \"zipCodes\": [\"94125\", \"94126\"]}]`, then you can specify the schema as:\n\n```json\n{\n \"schema\": {\n \"type\": \"array\",\n \"title\": \"people\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"age\": {\n \"type\": \"number\"\n },\n \"zipCodes\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n }\n }\n }\n }\n }\n}\n```\n\nThis will be extracted as `{{ people }}`. To access the array items, you can use `{{ people[n].name }}`, `{{ people[n].age }}`, `{{ people[n].zipCodes }}`, `{{ people[n].zipCodes[0] }}` and `{{ people[n].zipCodes[1] }}`.\n\nNote: Both `aliases` and `schema` can be used together.",
- "allOf": [
- {
- "$ref": "#/components/schemas/VariableExtractionPlan"
- }
- ]
+ "attempts": {
+ "description": "These are the attempts made for this test.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/TestSuiteRunTestAttempt"
+ }
}
- }
+ },
+ "required": [
+ "test",
+ "attempts"
+ ]
},
- "UpdateDtmfToolDTO": {
+ "TestSuiteRun": {
"type": "object",
"properties": {
- "messages": {
+ "status": {
+ "type": "string",
+ "description": "This is the current status of the test suite run.",
+ "enum": [
+ "queued",
+ "in-progress",
+ "completed",
+ "failed"
+ ]
+ },
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the test suite run."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the organization this run belongs to."
+ },
+ "testSuiteId": {
+ "type": "string",
+ "description": "This is the unique identifier for the test suite this run belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the test suite run was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the test suite run was last updated."
+ },
+ "testResults": {
+ "description": "These are the results of the tests in this test suite run.",
"type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
+ "$ref": "#/components/schemas/TestSuiteRunTestResult"
}
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
- "allOf": [
- {
- "$ref": "#/components/schemas/ToolRejectionPlan"
- }
- ]
+ "name": {
+ "type": "string",
+ "description": "This is the name of the test suite run.",
+ "maxLength": 80
}
- }
+ },
+ "required": [
+ "status",
+ "id",
+ "orgId",
+ "testSuiteId",
+ "createdAt",
+ "updatedAt",
+ "testResults"
+ ]
},
- "UpdateEndCallToolDTO": {
+ "TestSuiteRunsPaginatedResponse": {
"type": "object",
"properties": {
- "messages": {
+ "results": {
"type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
+ "$ref": "#/components/schemas/TestSuiteRun"
}
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
- "allOf": [
- {
- "$ref": "#/components/schemas/ToolRejectionPlan"
- }
- ]
+ "metadata": {
+ "$ref": "#/components/schemas/PaginationMeta"
+ }
+ },
+ "required": [
+ "results",
+ "metadata"
+ ]
+ },
+ "CreateTestSuiteRunDto": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the test suite run.",
+ "maxLength": 80
}
}
},
- "UpdateFunctionToolDTO": {
+ "UpdateTestSuiteRunDto": {
"type": "object",
"properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
- },
- "async": {
- "type": "boolean",
- "example": false,
- "description": "This determines if the tool is async.\n\n If async, the assistant will move forward without waiting for your server to respond. This is useful if you just want to trigger something on your server.\n\n If sync, the assistant will wait for your server to respond. This is useful if want assistant to respond with the result from your server.\n\n Defaults to synchronous (`false`)."
+ "name": {
+ "type": "string",
+ "description": "This is the name of the test suite run.",
+ "maxLength": 80
+ }
+ }
+ },
+ "CreatePersonalityDTO": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the personality (e.g., \"Confused Carl\", \"Rude Rob\").",
+ "maxLength": 80
},
- "server": {
- "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
+ "assistant": {
+ "description": "This is the full assistant configuration for this personality.\nIt defines the tester's voice, model, behavior via system prompt, and other settings.",
"allOf": [
{
- "$ref": "#/components/schemas/Server"
+ "$ref": "#/components/schemas/CreateAssistantDTO"
}
]
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
- "allOf": [
- {
- "$ref": "#/components/schemas/ToolRejectionPlan"
- }
- ]
+ "path": {
+ "type": "string",
+ "nullable": true,
+ "description": "Optional folder path for organizing personalities.\nSupports up to 3 levels (e.g., \"dept/feature/variant\").\nMaps to GitOps resource folder structure.",
+ "maxLength": 255,
+ "pattern": "/^[a-zA-Z0-9][a-zA-Z0-9._-]*(?:\\/[a-zA-Z0-9][a-zA-Z0-9._-]*){0,2}$/"
+ }
+ },
+ "required": [
+ "name",
+ "assistant"
+ ]
+ },
+ "Personality": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the personality.",
+ "format": "uuid"
},
- "function": {
- "description": "This is the function definition of the tool.",
+ "orgId": {
+ "type": "string",
+ "nullable": true,
+ "description": "This is the unique identifier for the organization this personality belongs to.\nIf null, this is a Vapi-provided default personality available to all organizations.",
+ "format": "uuid"
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the personality was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the personality was last updated."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the personality (e.g., \"Confused Carl\", \"Rude Rob\").",
+ "maxLength": 80
+ },
+ "assistant": {
+ "description": "This is the full assistant configuration for this personality.\nIt defines the tester's voice, model, behavior via system prompt, and other settings.",
"allOf": [
{
- "$ref": "#/components/schemas/OpenAIFunction"
+ "$ref": "#/components/schemas/CreateAssistantDTO"
}
]
+ },
+ "path": {
+ "type": "string",
+ "nullable": true,
+ "description": "Optional folder path for organizing personalities.\nSupports up to 3 levels (e.g., \"dept/feature/variant\").\nMaps to GitOps resource folder structure.",
+ "maxLength": 255,
+ "pattern": "/^[a-zA-Z0-9][a-zA-Z0-9._-]*(?:\\/[a-zA-Z0-9][a-zA-Z0-9._-]*){0,2}$/"
}
- }
+ },
+ "required": [
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt",
+ "name",
+ "assistant"
+ ]
},
- "UpdateGhlToolDTO": {
+ "UpdatePersonalityDTO": {
"type": "object",
"properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
+ "name": {
+ "type": "string",
+ "description": "This is the name of the personality.",
+ "maxLength": 80
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "assistant": {
+ "description": "This is the full assistant configuration for this personality.",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/CreateAssistantDTO"
}
]
},
- "metadata": {
- "$ref": "#/components/schemas/GhlToolMetadata"
+ "path": {
+ "type": "string",
+ "nullable": true,
+ "description": "Optional folder path for organizing personalities.\nSupports up to 3 levels (e.g., \"dept/feature/variant\").\nSet to null to remove from folder.",
+ "maxLength": 255,
+ "pattern": "/^[a-zA-Z0-9][a-zA-Z0-9._-]*(?:\\/[a-zA-Z0-9][a-zA-Z0-9._-]*){0,2}$/"
}
}
},
- "UpdateMakeToolDTO": {
+ "SimulationHookInclude": {
"type": "object",
"properties": {
+ "transcript": {
+ "type": "boolean",
+ "description": "Include transcript in the hook payload",
+ "default": false
+ },
"messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
+ "type": "boolean",
+ "description": "Include messages in the hook payload",
+ "default": false
+ },
+ "recordingUrl": {
+ "type": "boolean",
+ "description": "Include recordingUrl in the hook payload",
+ "default": false
+ }
+ }
+ },
+ "SimulationHookWebhookAction": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "webhook"
+ ]
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "server": {
+ "description": "Optional server override for this hook action.\nIf omitted, runtime defaults may apply (e.g. org server).",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/Server"
}
]
},
- "metadata": {
- "$ref": "#/components/schemas/MakeToolMetadata"
+ "include": {
+ "description": "Optional payload include controls.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/SimulationHookInclude"
+ }
+ ]
}
- }
+ },
+ "required": [
+ "type"
+ ]
},
- "UpdateHandoffToolDTO": {
+ "SimulationHookCallStarted": {
"type": "object",
"properties": {
- "messages": {
+ "on": {
+ "type": "string",
+ "enum": [
+ "simulation.run.started"
+ ],
+ "maxLength": 1000
+ },
+ "do": {
"type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
+ "$ref": "#/components/schemas/SimulationHookWebhookAction",
+ "title": "SimulationHookWebhookAction"
}
]
}
+ }
+ },
+ "required": [
+ "on",
+ "do"
+ ]
+ },
+ "SimulationHookCallEnded": {
+ "type": "object",
+ "properties": {
+ "on": {
+ "type": "string",
+ "enum": [
+ "simulation.run.ended"
+ ],
+ "maxLength": 1000
},
- "destinations": {
+ "do": {
"type": "array",
- "description": "These are the destinations that the call can be handed off to.\n\nUsage:\n1. Single destination\n\nUse `assistantId` to handoff the call to a saved assistant, or `assistantName` to handoff the call to an assistant in the same squad.\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-123\", // or \"assistantName\": \"Assistant123\"\n \"description\": \"customer wants to be handed off to assistant-123\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n }\n ],\n }\n ]\n}\n```\n\n2. Multiple destinations\n\n2.1. Multiple Tools, Each With One Destination (OpenAI recommended)\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-123\",\n \"description\": \"customer wants to be handed off to assistant-123\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n },\n ],\n },\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-456\",\n \"description\": \"customer wants to be handed off to assistant-456\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n }\n ],\n }\n ]\n}\n```\n\n2.2. One Tool, Multiple Destinations (Anthropic recommended)\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-123\",\n \"description\": \"customer wants to be handed off to assistant-123\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n },\n {\n \"type\": \"assistant\",\n \"assistantId\": \"assistant-456\",\n \"description\": \"customer wants to be handed off to assistant-456\",\n \"contextEngineeringPlan\": {\n \"type\": \"all\"\n }\n }\n ],\n }\n ]\n}\n```\n\n3. Dynamic destination\n\n3.1 To determine the destination dynamically, supply a `dynamic` handoff destination type and a `server` object.\n VAPI will send a handoff-destination-request webhook to the `server.url`.\n The response from the server will be used as the destination (if valid).\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"dynamic\",\n \"server\": {\n \"url\": \"https://example.com\"\n }\n }\n ],\n }\n ]\n}\n```\n\n3.2. To pass custom parameters to the server, you can use the `function` object.\n\n```json\n{\n \"tools\": [\n {\n \"type\": \"handoff\",\n \"destinations\": [\n {\n \"type\": \"dynamic\",\n \"server\": {\n \"url\": \"https://example.com\"\n },\n }\n ],\n \"function\": {\n \"name\": \"handoff\",\n \"description\": \"Call this function when the customer is ready to be handed off to the next assistant\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"destination\": {\n \"type\": \"string\",\n \"description\": \"Use dynamic when customer is ready to be handed off to the next assistant\",\n \"enum\": [\"dynamic\"]\n },\n \"customerAreaCode\": {\n \"type\": \"number\",\n \"description\": \"Area code of the customer\"\n },\n \"customerIntent\": {\n \"type\": \"string\",\n \"enum\": [\"new-customer\", \"existing-customer\"],\n \"description\": \"Use new-customer when customer is a new customer, existing-customer when customer is an existing customer\"\n },\n \"customerSentiment\": {\n \"type\": \"string\",\n \"enum\": [\"positive\", \"negative\", \"neutral\"],\n \"description\": \"Use positive when customer is happy, negative when customer is unhappy, neutral when customer is neutral\"\n }\n }\n }\n }\n }\n ]\n}\n```\n\nThe properties `customerAreaCode`, `customerIntent`, and `customerSentiment` will be passed to the server in the webhook request body.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/HandoffDestinationAssistant",
- "title": "Assistant"
- },
- {
- "$ref": "#/components/schemas/HandoffDestinationDynamic",
- "title": "Dynamic"
+ "$ref": "#/components/schemas/SimulationHookWebhookAction",
+ "title": "SimulationHookWebhookAction"
}
]
}
+ }
+ },
+ "required": [
+ "on",
+ "do"
+ ]
+ },
+ "EvaluationPlanItem": {
+ "type": "object",
+ "properties": {
+ "structuredOutputId": {
+ "type": "string",
+ "description": "This is the ID of an existing structured output to use for evaluation.\nMutually exclusive with structuredOutput.",
+ "format": "uuid"
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "structuredOutput": {
+ "description": "This is an inline structured output definition for evaluation.\nMutually exclusive with structuredOutputId.\nOnly primitive schema types (string, number, integer, boolean) are allowed.",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/CreateStructuredOutputDTO"
+ }
+ ]
+ },
+ "comparator": {
+ "type": "string",
+ "description": "This is the comparison operator to use when evaluating the extracted value against the expected value.\nAvailable operators depend on the structured output's schema type:\n- boolean: '=', '!='\n- string: '=', '!='\n- number/integer: '=', '!=', '>', '<', '>=', '<='",
+ "enum": [
+ "=",
+ "!=",
+ ">",
+ "<",
+ ">=",
+ "<="
+ ],
+ "example": "="
+ },
+ "value": {
+ "description": "This is the expected value to compare against the extracted structured output result.\nType should match the structured output's schema type.",
+ "oneOf": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "boolean"
}
]
+ },
+ "required": {
+ "type": "boolean",
+ "description": "This is whether this evaluation must pass for the simulation to pass.\nDefaults to true. If false, the result is informational only.",
+ "default": true
}
- }
+ },
+ "required": [
+ "comparator",
+ "value"
+ ]
},
- "UpdateTransferCallToolDTO": {
+ "ScenarioToolMock": {
"type": "object",
"properties": {
- "messages": {
+ "toolName": {
+ "type": "string",
+ "description": "This is the tool call function name to mock (must match `toolCall.function.name`)."
+ },
+ "result": {
+ "type": "string",
+ "description": "This is the result content to return for this tool call."
+ },
+ "enabled": {
+ "type": "boolean",
+ "description": "This is whether this mock is enabled. Defaults to true when omitted.",
+ "default": true
+ }
+ },
+ "required": [
+ "toolName"
+ ]
+ },
+ "CreateScenarioDTO": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the scenario.",
+ "maxLength": 80,
+ "example": "Health Enrollment - Eligible Path"
+ },
+ "instructions": {
+ "type": "string",
+ "description": "This is the script/instructions for the tester to follow during the simulation.",
+ "maxLength": 10000,
+ "example": "You are calling to enroll in the Twin Health program. Confirm your identity when asked."
+ },
+ "evaluations": {
+ "description": "This is the structured output-based evaluation plan for the simulation.\nEach item defines a structured output to extract and evaluate against an expected value.",
"type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
+ "$ref": "#/components/schemas/EvaluationPlanItem"
}
},
- "destinations": {
+ "hooks": {
"type": "array",
- "description": "These are the destinations that the call can be transferred to. If no destinations are provided, server.url will be used to get the transfer destination once the tool is called.",
+ "description": "Hooks to run on simulation lifecycle events",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/TransferDestinationAssistant",
- "title": "Assistant"
- },
- {
- "$ref": "#/components/schemas/TransferDestinationNumber",
- "title": "Number"
+ "$ref": "#/components/schemas/SimulationHookCallStarted",
+ "title": "SimulationHookCallStarted"
},
{
- "$ref": "#/components/schemas/TransferDestinationSip",
- "title": "Sip"
+ "$ref": "#/components/schemas/SimulationHookCallEnded",
+ "title": "SimulationHookCallEnded"
}
]
}
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "targetOverrides": {
+ "description": "Overrides to inject into the simulated target assistant or squad",
+ "example": {
+ "variableValues": {
+ "customerName": "Alice",
+ "orderId": "12345"
+ }
+ },
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/AssistantOverrides"
}
]
+ },
+ "toolMocks": {
+ "description": "Scenario-level tool call mocks to use during simulations.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/ScenarioToolMock"
+ }
+ },
+ "path": {
+ "type": "string",
+ "nullable": true,
+ "description": "Optional folder path for organizing scenarios.\nSupports up to 3 levels (e.g., \"dept/feature/variant\").\nMaps to GitOps resource folder structure.",
+ "maxLength": 255,
+ "pattern": "/^[a-zA-Z0-9][a-zA-Z0-9._-]*(?:\\/[a-zA-Z0-9][a-zA-Z0-9._-]*){0,2}$/"
}
- }
+ },
+ "required": [
+ "name",
+ "instructions",
+ "evaluations"
+ ]
},
- "UpdateOutputToolDTO": {
+ "Scenario": {
"type": "object",
"properties": {
- "messages": {
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the scenario.",
+ "format": "uuid"
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the organization this scenario belongs to.",
+ "format": "uuid"
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the scenario was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the scenario was last updated."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the scenario.",
+ "maxLength": 80,
+ "example": "Health Enrollment - Eligible Path"
+ },
+ "instructions": {
+ "type": "string",
+ "description": "This is the script/instructions for the tester to follow during the simulation.",
+ "maxLength": 10000,
+ "example": "You are calling to enroll in the Twin Health program. Confirm your identity when asked."
+ },
+ "evaluations": {
+ "description": "This is the structured output-based evaluation plan for the simulation.\nEach item defines a structured output to extract and evaluate against an expected value.",
"type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "$ref": "#/components/schemas/EvaluationPlanItem"
+ }
+ },
+ "hooks": {
+ "type": "array",
+ "description": "Hooks to run on simulation lifecycle events",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
+ "$ref": "#/components/schemas/SimulationHookCallStarted",
+ "title": "SimulationHookCallStarted"
},
{
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
+ "$ref": "#/components/schemas/SimulationHookCallEnded",
+ "title": "SimulationHookCallEnded"
}
]
}
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "targetOverrides": {
+ "description": "Overrides to inject into the simulated target assistant or squad",
+ "example": {
+ "variableValues": {
+ "customerName": "Alice",
+ "orderId": "12345"
+ }
+ },
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/AssistantOverrides"
}
]
+ },
+ "toolMocks": {
+ "description": "Scenario-level tool call mocks to use during simulations.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/ScenarioToolMock"
+ }
+ },
+ "path": {
+ "type": "string",
+ "nullable": true,
+ "description": "Optional folder path for organizing scenarios.\nSupports up to 3 levels (e.g., \"dept/feature/variant\").\nMaps to GitOps resource folder structure.",
+ "maxLength": 255,
+ "pattern": "/^[a-zA-Z0-9][a-zA-Z0-9._-]*(?:\\/[a-zA-Z0-9][a-zA-Z0-9._-]*){0,2}$/"
}
- }
+ },
+ "required": [
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt",
+ "name",
+ "instructions",
+ "evaluations"
+ ]
},
- "UpdateBashToolDTO": {
+ "UpdateScenarioDTO": {
"type": "object",
"properties": {
- "messages": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the scenario.",
+ "maxLength": 80
+ },
+ "instructions": {
+ "type": "string",
+ "description": "This is the script/instructions for the tester to follow during the simulation.",
+ "maxLength": 10000
+ },
+ "evaluations": {
+ "description": "This is the structured output-based evaluation plan for the simulation.\nEach item defines a structured output to extract and evaluate against an expected value.",
"type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "items": {
+ "$ref": "#/components/schemas/EvaluationPlanItem"
+ }
+ },
+ "hooks": {
+ "type": "array",
+ "description": "Hooks to run on simulation lifecycle events",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
+ "$ref": "#/components/schemas/SimulationHookCallStarted",
+ "title": "SimulationHookCallStarted"
},
{
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
+ "$ref": "#/components/schemas/SimulationHookCallEnded",
+ "title": "SimulationHookCallEnded"
}
]
}
},
- "subType": {
- "type": "string",
- "enum": [
- "bash_20241022"
- ],
- "description": "The sub type of tool."
- },
- "server": {
- "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
- "allOf": [
- {
- "$ref": "#/components/schemas/Server"
+ "targetOverrides": {
+ "description": "Overrides to inject into the simulated target assistant or squad",
+ "example": {
+ "variableValues": {
+ "customerName": "Alice",
+ "orderId": "12345"
}
- ]
- },
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ },
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/AssistantOverrides"
}
]
},
- "name": {
+ "toolMocks": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/ScenarioToolMock"
+ }
+ },
+ "path": {
"type": "string",
- "description": "The name of the tool, fixed to 'bash'",
- "default": "bash",
- "enum": [
- "bash"
- ]
+ "nullable": true,
+ "description": "Optional folder path for organizing scenarios.\nSupports up to 3 levels (e.g., \"dept/feature/variant\").\nSet to null to remove from folder.",
+ "maxLength": 255,
+ "pattern": "/^[a-zA-Z0-9][a-zA-Z0-9._-]*(?:\\/[a-zA-Z0-9][a-zA-Z0-9._-]*){0,2}$/"
}
}
},
- "UpdateComputerToolDTO": {
+ "SimulationRunSimulationEntry": {
"type": "object",
"properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
- },
- "subType": {
+ "type": {
"type": "string",
"enum": [
- "computer_20241022"
+ "simulation"
],
- "description": "The sub type of tool."
+ "description": "Type discriminator"
},
- "server": {
- "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
+ "simulationId": {
+ "type": "string",
+ "description": "ID of an existing simulation to run. When provided, scenarioId/personalityId/inline fields are ignored.",
+ "format": "uuid"
+ },
+ "scenarioId": {
+ "type": "string",
+ "description": "ID of an existing scenario. Cannot be combined with inline scenario.",
+ "format": "uuid"
+ },
+ "scenario": {
+ "description": "Inline scenario configuration. Cannot be combined with scenarioId.",
"allOf": [
{
- "$ref": "#/components/schemas/Server"
+ "$ref": "#/components/schemas/CreateScenarioDTO"
}
]
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "personalityId": {
+ "type": "string",
+ "description": "ID of an existing personality. Cannot be combined with inline personality.",
+ "format": "uuid"
+ },
+ "personality": {
+ "description": "Inline personality configuration. Cannot be combined with personalityId.",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/CreatePersonalityDTO"
}
]
},
"name": {
"type": "string",
- "description": "The name of the tool, fixed to 'computer'",
- "default": "computer",
- "enum": [
- "computer"
- ]
- },
- "displayWidthPx": {
- "type": "number",
- "description": "The display width in pixels"
- },
- "displayHeightPx": {
- "type": "number",
- "description": "The display height in pixels"
- },
- "displayNumber": {
- "type": "number",
- "description": "Optional display number"
+ "maxLength": 80,
+ "description": "Optional name for this simulation entry"
}
- }
+ },
+ "required": [
+ "type"
+ ]
},
- "UpdateTextEditorToolDTO": {
+ "SimulationRunSuiteEntry": {
"type": "object",
"properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
- },
- "subType": {
+ "type": {
"type": "string",
+ "description": "Type discriminator",
"enum": [
- "text_editor_20241022"
- ],
- "description": "The sub type of tool."
- },
- "server": {
- "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
- "allOf": [
- {
- "$ref": "#/components/schemas/Server"
- }
+ "simulationSuite"
]
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
- "allOf": [
- {
- "$ref": "#/components/schemas/ToolRejectionPlan"
- }
- ]
+ "simulationSuiteId": {
+ "type": "string",
+ "description": "ID of the simulation suite to run",
+ "format": "uuid"
},
- "name": {
+ "suiteId": {
"type": "string",
- "description": "The name of the tool, fixed to 'str_replace_editor'",
- "default": "str_replace_editor",
- "enum": [
- "str_replace_editor"
- ]
+ "deprecated": true
}
- }
+ },
+ "required": [
+ "type"
+ ]
},
- "UpdateQueryToolDTO": {
+ "SimulationRunTargetAssistant": {
"type": "object",
"properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
+ "type": {
+ "type": "string",
+ "enum": [
+ "assistant"
+ ],
+ "description": "Type of target"
},
- "knowledgeBases": {
- "description": "The knowledge bases to query",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/KnowledgeBase"
- }
+ "assistantId": {
+ "type": "string",
+ "description": "ID of an existing assistant to test against. Cannot be combined with inline assistant.",
+ "format": "uuid"
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "assistant": {
+ "description": "Inline assistant configuration to test against. Cannot be combined with assistantId.",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/CreateAssistantDTO"
}
]
}
- }
+ },
+ "required": [
+ "type"
+ ]
},
- "UpdateGoogleCalendarCreateEventToolDTO": {
+ "SimulationRunTargetSquad": {
"type": "object",
"properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
+ "type": {
+ "type": "string",
+ "enum": [
+ "squad"
+ ],
+ "description": "Type of target"
+ },
+ "squadId": {
+ "type": "string",
+ "description": "ID of an existing squad to test against. Cannot be combined with inline squad.",
+ "format": "uuid"
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "squad": {
+ "description": "Inline squad configuration to test against. Cannot be combined with squadId.",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/CreateSquadDTO"
}
]
}
- }
+ },
+ "required": [
+ "type"
+ ]
},
- "UpdateGoogleSheetsRowAppendToolDTO": {
+ "SimulationRunTransportConfiguration": {
"type": "object",
"properties": {
- "messages": {
+ "provider": {
+ "type": "string",
+ "description": "Transport provider for the simulation run",
+ "enum": [
+ "vapi.websocket",
+ "vapi.webchat"
+ ]
+ }
+ },
+ "required": [
+ "provider"
+ ]
+ },
+ "CreateSimulationRunDTO": {
+ "type": "object",
+ "properties": {
+ "simulations": {
"type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "description": "Array of simulations and/or suites to run",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
+ "$ref": "#/components/schemas/SimulationRunSimulationEntry",
+ "title": "Simulation"
},
{
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
+ "$ref": "#/components/schemas/SimulationRunSuiteEntry",
+ "title": "Suite"
}
]
}
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "target": {
+ "description": "Target to test against",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/SimulationRunTargetAssistant",
+ "title": "Assistant"
+ },
+ {
+ "$ref": "#/components/schemas/SimulationRunTargetSquad",
+ "title": "Squad"
+ }
+ ]
+ },
+ "iterations": {
+ "type": "number",
+ "minimum": 1,
+ "description": "Number of times to run each simulation (default: 1)",
+ "default": 1
+ },
+ "transport": {
+ "description": "Transport configuration for the simulation runs",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/SimulationRunTransportConfiguration"
}
]
}
- }
+ },
+ "required": [
+ "simulations",
+ "target"
+ ]
},
- "UpdateGoogleCalendarCheckAvailabilityToolDTO": {
+ "SimulationRunItemCounts": {
"type": "object",
"properties": {
- "messages": {
+ "total": {
+ "type": "number",
+ "description": "Total number of run items"
+ },
+ "passed": {
+ "type": "number",
+ "description": "Number of passed run items"
+ },
+ "failed": {
+ "type": "number",
+ "description": "Number of failed run items"
+ },
+ "running": {
+ "type": "number",
+ "description": "Number of running/evaluating run items"
+ },
+ "queued": {
+ "type": "number",
+ "description": "Number of queued run items"
+ },
+ "canceled": {
+ "type": "number",
+ "description": "Number of canceled run items"
+ }
+ },
+ "required": [
+ "total",
+ "passed",
+ "failed",
+ "running",
+ "queued",
+ "canceled"
+ ]
+ },
+ "SimulationRun": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the run",
+ "format": "uuid"
+ },
+ "orgId": {
+ "type": "string",
+ "description": "Organization ID",
+ "format": "uuid"
+ },
+ "status": {
+ "type": "string",
+ "enum": [
+ "queued",
+ "running",
+ "ended"
+ ],
+ "description": "Current status of the run"
+ },
+ "queuedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "When the run was queued"
+ },
+ "startedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "When the run started"
+ },
+ "endedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "When the run ended"
+ },
+ "endedReason": {
+ "type": "string",
+ "description": "Reason the run ended"
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "ISO 8601 date-time when created"
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "ISO 8601 date-time when last updated"
+ },
+ "itemCounts": {
+ "description": "Aggregate counts of run items by status",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/SimulationRunItemCounts"
+ }
+ ]
+ },
+ "simulations": {
"type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "description": "Array of simulations and/or suites to run",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
+ "$ref": "#/components/schemas/SimulationRunSimulationEntry",
+ "title": "Simulation"
},
{
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
+ "$ref": "#/components/schemas/SimulationRunSuiteEntry",
+ "title": "Suite"
}
]
}
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "target": {
+ "description": "Target to test against",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/SimulationRunTargetAssistant",
+ "title": "Assistant"
+ },
+ {
+ "$ref": "#/components/schemas/SimulationRunTargetSquad",
+ "title": "Squad"
+ }
+ ]
+ },
+ "iterations": {
+ "type": "number",
+ "minimum": 1,
+ "description": "Number of times to run each simulation (default: 1)",
+ "default": 1
+ },
+ "transport": {
+ "description": "Transport configuration for the simulation runs",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/SimulationRunTransportConfiguration"
}
]
}
+ },
+ "required": [
+ "id",
+ "orgId",
+ "status",
+ "queuedAt",
+ "createdAt",
+ "updatedAt",
+ "simulations",
+ "target"
+ ]
+ },
+ "SimulationRunItemCallMonitor": {
+ "type": "object",
+ "properties": {
+ "listenUrl": {
+ "type": "string",
+ "description": "This is the WebSocket URL to listen to the live call audio (combined both parties)."
+ }
}
},
- "UpdateSlackSendMessageToolDTO": {
+ "SimulationRunItemCallMetadata": {
"type": "object",
"properties": {
+ "transcript": {
+ "type": "string",
+ "description": "This is the transcript of the conversation."
+ },
"messages": {
"type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "description": "This is the list of conversation messages in OpenAI format.",
"items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
+ "type": "object"
}
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "recordingUrl": {
+ "type": "string",
+ "description": "This is the URL to the call recording."
+ },
+ "monitor": {
+ "description": "This is the call monitoring data (live listen URL).",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/SimulationRunItemCallMonitor"
}
]
}
}
},
- "UpdateSmsToolDTO": {
+ "SimulationRunItemMetadata": {
"type": "object",
"properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
+ "assistant": {
+ "type": "object",
+ "description": "This is a snapshot of the assistant at run creation time.",
+ "additionalProperties": true
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "squad": {
+ "type": "object",
+ "description": "This is a snapshot of the squad at run creation time.",
+ "additionalProperties": true
+ },
+ "scenario": {
+ "type": "object",
+ "description": "This is a snapshot of the scenario at run creation time.",
+ "additionalProperties": true
+ },
+ "personality": {
+ "type": "object",
+ "description": "This is a snapshot of the personality at run creation time.",
+ "additionalProperties": true
+ },
+ "simulation": {
+ "type": "object",
+ "description": "This is a snapshot of the simulation at run creation time.",
+ "additionalProperties": true
+ },
+ "call": {
+ "description": "This is the call-related data (transcript, messages, recording).",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/SimulationRunItemCallMetadata"
}
]
+ },
+ "hooks": {
+ "type": "object",
+ "description": "Hook execution state for this run item (used for idempotency + debugging).",
+ "additionalProperties": true
}
}
},
- "UpdateMcpToolDTO": {
+ "StructuredOutputEvaluationResult": {
"type": "object",
"properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
+ "structuredOutputId": {
+ "type": "string",
+ "description": "This is the ID of the structured output that was evaluated.\nWill be 'inline' for inline structured output definitions."
},
- "server": {
- "description": "\n This is the server where a `tool-calls` webhook will be sent.\n\n Notes:\n - Webhook is sent to this server when a tool call is made.\n - Webhook contains the call, assistant, and phone number objects.\n - Webhook contains the variables set on the assistant.\n - Webhook is sent to the first available URL in this order: {{tool.server.url}}, {{assistant.server.url}}, {{phoneNumber.server.url}}, {{org.server.url}}.\n - Webhook expects a response with tool call result.",
- "allOf": [
+ "name": {
+ "type": "string",
+ "description": "This is the name of the structured output."
+ },
+ "extractedValue": {
+ "description": "This is the value extracted from the call by the structured output.",
+ "oneOf": [
{
- "$ref": "#/components/schemas/Server"
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "null"
}
]
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
- "allOf": [
+ "expectedValue": {
+ "description": "This is the expected value that was defined in the evaluation plan.",
+ "oneOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "boolean"
}
]
},
- "metadata": {
- "$ref": "#/components/schemas/McpToolMetadata"
+ "comparator": {
+ "type": "string",
+ "description": "This is the comparison operator used for evaluation.",
+ "enum": [
+ "=",
+ "!=",
+ ">",
+ "<",
+ ">=",
+ "<="
+ ]
+ },
+ "passed": {
+ "type": "boolean",
+ "description": "This indicates whether the evaluation passed (extracted value matched expected value using comparator)."
+ },
+ "required": {
+ "type": "boolean",
+ "description": "This indicates whether this evaluation was required for the simulation to pass."
+ },
+ "error": {
+ "type": "string",
+ "description": "This contains any error that occurred during extraction."
+ },
+ "isSkipped": {
+ "type": "boolean",
+ "description": "This indicates whether this evaluation was skipped (e.g., multimodal in chat mode)."
+ },
+ "skipReason": {
+ "type": "string",
+ "description": "This contains the reason for skipping the evaluation."
}
- }
+ },
+ "required": [
+ "structuredOutputId",
+ "name",
+ "extractedValue",
+ "expectedValue",
+ "comparator",
+ "passed",
+ "required"
+ ]
},
- "UpdateGoHighLevelCalendarAvailabilityToolDTO": {
+ "LatencyMetrics": {
"type": "object",
"properties": {
- "messages": {
- "type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
- }
+ "turnCount": {
+ "type": "number",
+ "description": "This is the number of conversation turns."
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
- "allOf": [
- {
- "$ref": "#/components/schemas/ToolRejectionPlan"
- }
- ]
+ "avgTurn": {
+ "type": "number",
+ "description": "This is the average total turn latency in milliseconds."
+ },
+ "avgTranscriber": {
+ "type": "number",
+ "description": "This is the average transcriber latency in milliseconds."
+ },
+ "avgModel": {
+ "type": "number",
+ "description": "This is the average LLM/model latency in milliseconds."
+ },
+ "avgVoice": {
+ "type": "number",
+ "description": "This is the average voice/TTS latency in milliseconds."
+ },
+ "avgEndpointing": {
+ "type": "number",
+ "description": "This is the average endpointing latency in milliseconds."
}
- }
+ },
+ "required": [
+ "turnCount"
+ ]
},
- "UpdateGoHighLevelCalendarEventCreateToolDTO": {
+ "SimulationRunItemResults": {
"type": "object",
"properties": {
- "messages": {
+ "evaluations": {
+ "description": "This is the list of results from structured output evaluations.",
"type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
+ "$ref": "#/components/schemas/StructuredOutputEvaluationResult"
}
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "passed": {
+ "type": "boolean",
+ "description": "This indicates whether all required evaluations passed."
+ },
+ "latencyMetrics": {
+ "description": "This contains the latency metrics collected from the call.",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/LatencyMetrics"
}
]
}
- }
+ },
+ "required": [
+ "evaluations",
+ "passed"
+ ]
},
- "UpdateGoHighLevelContactCreateToolDTO": {
+ "SimulationRunItemImprovementSuggestion": {
"type": "object",
"properties": {
- "messages": {
+ "issue": {
+ "type": "string",
+ "description": "This is the issue identified."
+ },
+ "suggestion": {
+ "type": "string",
+ "description": "This is the suggested improvement."
+ }
+ },
+ "required": [
+ "issue",
+ "suggestion"
+ ]
+ },
+ "SimulationRunItemImprovements": {
+ "type": "object",
+ "properties": {
+ "analysis": {
+ "type": "string",
+ "description": "This is a summary analysis of why evaluations failed."
+ },
+ "systemPromptSuggestions": {
+ "description": "This is the list of suggestions for improving the system prompt.",
"type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
"items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
- },
- {
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
- }
- ]
+ "$ref": "#/components/schemas/SimulationRunItemImprovementSuggestion"
}
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "toolSuggestions": {
+ "description": "This is the list of suggestions for improving tools.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/SimulationRunItemImprovementSuggestion"
+ }
+ },
+ "scenarioSuggestions": {
+ "description": "This is the list of suggestions for improving the scenario/evaluation plan.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/SimulationRunItemImprovementSuggestion"
+ }
+ },
+ "suggestedSystemPrompt": {
+ "type": "string",
+ "description": "This is a complete revised system prompt if major changes are needed."
+ }
+ },
+ "required": [
+ "analysis",
+ "systemPromptSuggestions",
+ "toolSuggestions",
+ "scenarioSuggestions"
+ ]
+ },
+ "SimulationRunConfiguration": {
+ "type": "object",
+ "properties": {
+ "transport": {
+ "description": "Transport configuration for the simulation run",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/SimulationRunTransportConfiguration"
}
]
}
}
},
- "UpdateGoHighLevelContactGetToolDTO": {
+ "SimulationRunItem": {
"type": "object",
"properties": {
- "messages": {
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the simulation run item.",
+ "format": "uuid"
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the organization.",
+ "format": "uuid"
+ },
+ "simulationId": {
+ "type": "string",
+ "description": "This is the ID of the simulation this run belongs to.",
+ "format": "uuid"
+ },
+ "status": {
+ "type": "string",
+ "description": "This is the current status of the run.",
+ "enum": [
+ "queued",
+ "running",
+ "evaluating",
+ "passed",
+ "failed",
+ "canceled"
+ ]
+ },
+ "queuedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the run was queued."
+ },
+ "startedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the run started."
+ },
+ "completedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the run completed."
+ },
+ "failedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the run failed."
+ },
+ "canceledAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the run was canceled."
+ },
+ "failureReason": {
+ "type": "string",
+ "description": "This is the reason for failure.",
+ "maxLength": 2000
+ },
+ "callId": {
+ "type": "string",
+ "description": "This is the ID of the target Vapi call (the assistant being tested).",
+ "format": "uuid"
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the run item was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the run item was last updated."
+ },
+ "runId": {
+ "type": "string",
+ "description": "This is the ID of the parent run (batch/group).",
+ "format": "uuid"
+ },
+ "hooks": {
"type": "array",
- "description": "These are the messages that will be spoken to the user as the tool is running.\n\nFor some tools, this is auto-filled based on special fields like `tool.destinations`. For others like the function tool, these can be custom configured.",
+ "description": "Hooks configured for this simulation run item",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/ToolMessageStart",
- "title": "ToolMessageStart"
- },
- {
- "$ref": "#/components/schemas/ToolMessageComplete",
- "title": "ToolMessageComplete"
- },
- {
- "$ref": "#/components/schemas/ToolMessageFailed",
- "title": "ToolMessageFailed"
+ "$ref": "#/components/schemas/SimulationHookCallStarted",
+ "title": "SimulationHookCallStarted"
},
{
- "$ref": "#/components/schemas/ToolMessageDelayed",
- "title": "ToolMessageDelayed"
+ "$ref": "#/components/schemas/SimulationHookCallEnded",
+ "title": "SimulationHookCallEnded"
}
]
}
},
- "rejectionPlan": {
- "description": "This is the plan to reject a tool call based on the conversation state.\n\n// Example 1: Reject endCall if user didn't say goodbye\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '(?i)\\\\b(bye|goodbye|farewell|see you later|take care)\\\\b',\n target: { position: -1, role: 'user' },\n negate: true // Reject if pattern does NOT match\n }]\n}\n```\n\n// Example 2: Reject transfer if user is actually asking a question\n```json\n{\n conditions: [{\n type: 'regex',\n regex: '\\\\?',\n target: { position: -1, role: 'user' }\n }]\n}\n```\n\n// Example 3: Reject transfer if user didn't mention transfer recently\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 5 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' %}\n{% assign mentioned = false %}\n{% for msg in userMessages %}\n {% if msg.content contains 'transfer' or msg.content contains 'connect' or msg.content contains 'speak to' %}\n {% assign mentioned = true %}\n {% break %}\n {% endif %}\n{% endfor %}\n{% if mentioned %}\n false\n{% else %}\n true\n{% endif %}`\n }]\n}\n```\n\n// Example 4: Reject endCall if the bot is looping and trying to exit\n```json\n{\n conditions: [{\n type: 'liquid',\n liquid: `{% assign recentMessages = messages | last: 6 %}\n{% assign userMessages = recentMessages | where: 'role', 'user' | reverse %}\n{% if userMessages.size < 3 %}\n false\n{% else %}\n {% assign msg1 = userMessages[0].content | downcase %}\n {% assign msg2 = userMessages[1].content | downcase %}\n {% assign msg3 = userMessages[2].content | downcase %}\n {% comment %} Check for repetitive messages {% endcomment %}\n {% if msg1 == msg2 or msg1 == msg3 or msg2 == msg3 %}\n true\n {% comment %} Check for common loop phrases {% endcomment %}\n {% elsif msg1 contains 'cool thanks' or msg2 contains 'cool thanks' or msg3 contains 'cool thanks' %}\n true\n {% elsif msg1 contains 'okay thanks' or msg2 contains 'okay thanks' or msg3 contains 'okay thanks' %}\n true\n {% elsif msg1 contains 'got it' or msg2 contains 'got it' or msg3 contains 'got it' %}\n true\n {% else %}\n false\n {% endif %}\n{% endif %}`\n }]\n}\n```",
+ "iterationNumber": {
+ "type": "number",
+ "description": "This is the iteration number (1-indexed) when run with iterations > 1.",
+ "default": 1
+ },
+ "sessionId": {
+ "type": "string",
+ "description": "This is the session ID for chat-based simulations (webchat transport).",
+ "format": "uuid"
+ },
+ "scenarioId": {
+ "type": "string",
+ "description": "This is the scenario ID at run creation time.",
+ "format": "uuid"
+ },
+ "personalityId": {
+ "type": "string",
+ "description": "This is the personality ID at run creation time.",
+ "format": "uuid"
+ },
+ "metadata": {
+ "description": "This is the metadata containing snapshots and call data.",
"allOf": [
{
- "$ref": "#/components/schemas/ToolRejectionPlan"
+ "$ref": "#/components/schemas/SimulationRunItemMetadata"
+ }
+ ]
+ },
+ "results": {
+ "description": "This is the results of the simulation run.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/SimulationRunItemResults"
+ }
+ ]
+ },
+ "improvementSuggestions": {
+ "description": "This is the AI-generated improvement suggestions for failed runs.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/SimulationRunItemImprovements"
+ }
+ ]
+ },
+ "configurations": {
+ "description": "This is the configuration for how this simulation run executes.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/SimulationRunConfiguration"
}
]
- }
- }
- },
- "CreateFileDTO": {
- "type": "object",
- "properties": {
- "file": {
- "type": "string",
- "description": "This is the File you want to upload for use with the Knowledge Base.",
- "format": "binary"
}
},
"required": [
- "file"
+ "id",
+ "orgId",
+ "simulationId",
+ "status",
+ "queuedAt",
+ "createdAt",
+ "updatedAt"
]
},
- "File": {
+ "CreateSimulationSuiteDTO": {
"type": "object",
"properties": {
- "object": {
- "type": "string",
- "enum": [
- "file"
- ]
- },
- "status": {
- "enum": [
- "processing",
- "done",
- "failed"
- ],
- "type": "string"
- },
"name": {
"type": "string",
- "description": "This is the name of the file. This is just for your own reference.",
- "maxLength": 40
- },
- "originalName": {
- "type": "string"
- },
- "bytes": {
- "type": "number"
- },
- "purpose": {
- "type": "string"
+ "description": "This is the name of the simulation suite.",
+ "maxLength": 80,
+ "example": "Checkout Flow Tests"
},
- "mimetype": {
- "type": "string"
+ "slackWebhookUrl": {
+ "type": "string",
+ "description": "This is the Slack webhook URL for notifications."
},
- "key": {
- "type": "string"
+ "simulationIds": {
+ "description": "This is the list of simulation IDs to include in the suite.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
},
"path": {
- "type": "string"
- },
- "bucket": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "parsedTextUrl": {
- "type": "string"
- },
- "parsedTextBytes": {
- "type": "number"
- },
- "metadata": {
- "type": "object"
- },
+ "type": "string",
+ "nullable": true,
+ "description": "Optional folder path for organizing simulation suites.\nSupports up to 3 levels (e.g., \"dept/feature/variant\").\nMaps to GitOps resource folder structure.",
+ "maxLength": 255,
+ "pattern": "/^[a-zA-Z0-9][a-zA-Z0-9._-]*(?:\\/[a-zA-Z0-9][a-zA-Z0-9._-]*){0,2}$/"
+ }
+ },
+ "required": [
+ "name",
+ "simulationIds"
+ ]
+ },
+ "SimulationSuite": {
+ "type": "object",
+ "properties": {
"id": {
"type": "string",
- "description": "This is the unique identifier for the file."
+ "description": "This is the unique identifier for the simulation suite.",
+ "format": "uuid"
},
"orgId": {
"type": "string",
- "description": "This is the unique identifier for the org that this file belongs to."
+ "description": "This is the unique identifier for the organization this suite belongs to.",
+ "format": "uuid"
},
"createdAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the file was created."
+ "description": "This is the ISO 8601 date-time string of when the suite was created."
},
"updatedAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the file was last updated."
+ "description": "This is the ISO 8601 date-time string of when the suite was last updated."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the simulation suite.",
+ "maxLength": 80,
+ "example": "Checkout Flow Tests"
+ },
+ "slackWebhookUrl": {
+ "type": "string",
+ "description": "This is the Slack webhook URL for notifications."
+ },
+ "path": {
+ "type": "string",
+ "nullable": true,
+ "description": "Optional folder path for organizing simulation suites.\nSupports up to 3 levels (e.g., \"dept/feature/variant\").\nMaps to GitOps resource folder structure.",
+ "maxLength": 255,
+ "pattern": "/^[a-zA-Z0-9][a-zA-Z0-9._-]*(?:\\/[a-zA-Z0-9][a-zA-Z0-9._-]*){0,2}$/"
+ },
+ "simulationIds": {
+ "description": "This is the list of simulation IDs in this suite.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
}
},
"required": [
"id",
"orgId",
"createdAt",
- "updatedAt"
+ "updatedAt",
+ "name",
+ "simulationIds"
]
},
- "UpdateFileDTO": {
+ "UpdateSimulationSuiteDTO": {
"type": "object",
"properties": {
"name": {
"type": "string",
- "description": "This is the name of the file. This is just for your own reference.",
- "minLength": 1,
- "maxLength": 40
+ "description": "This is the name of the simulation suite.",
+ "maxLength": 80
+ },
+ "slackWebhookUrl": {
+ "type": "string",
+ "description": "This is the Slack webhook URL for notifications."
+ },
+ "simulationIds": {
+ "description": "This is the list of simulation IDs to include in the suite (replaces existing).",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "path": {
+ "type": "string",
+ "nullable": true,
+ "description": "Optional folder path for organizing simulation suites.\nSupports up to 3 levels (e.g., \"dept/feature/variant\").\nSet to null to remove from folder.",
+ "maxLength": 255,
+ "pattern": "/^[a-zA-Z0-9][a-zA-Z0-9._-]*(?:\\/[a-zA-Z0-9][a-zA-Z0-9._-]*){0,2}$/"
}
}
},
- "TrieveKnowledgeBaseSearchPlan": {
+ "GenerateScenariosDTO": {
"type": "object",
"properties": {
- "topK": {
- "type": "number",
- "description": "Specifies the number of top chunks to return. This corresponds to the `page_size` parameter in Trieve."
- },
- "removeStopWords": {
- "type": "boolean",
- "description": "If true, stop words (specified in server/src/stop-words.txt in the git repo) will be removed. This will preserve queries that are entirely stop words."
- },
- "scoreThreshold": {
- "type": "number",
- "description": "This is the score threshold to filter out chunks with a score below the threshold for cosine distance metric. For Manhattan Distance, Euclidean Distance, and Dot Product, it will filter out scores above the threshold distance. This threshold applies before weight and bias modifications. If not specified, this defaults to no threshold. A threshold of 0 will default to no threshold."
+ "assistantId": {
+ "type": "string",
+ "description": "ID of the assistant to generate scenarios for"
},
- "searchType": {
+ "squadId": {
"type": "string",
- "description": "This is the search method used when searching for relevant chunks from the vector store.",
- "enum": [
- "fulltext",
- "semantic",
- "hybrid",
- "bm25"
- ]
+ "description": "ID of the squad to generate scenarios for"
}
- },
- "required": [
- "searchType"
- ]
+ }
},
- "TrieveKnowledgeBase": {
+ "GeneratedScenario": {
"type": "object",
"properties": {
- "provider": {
- "type": "string",
- "description": "This knowledge base is provided by Trieve.\n\nTo learn more about Trieve, visit https://trieve.ai.",
- "enum": [
- "trieve"
- ]
- },
"name": {
"type": "string",
- "description": "This is the name of the knowledge base."
+ "description": "Short descriptive name"
},
- "searchPlan": {
- "description": "This is the searching plan used when searching for relevant chunks from the vector store.\n\nYou should configure this if you're running into these issues:\n- Too much unnecessary context is being fed as knowledge base context.\n- Not enough relevant context is being fed as knowledge base context.",
- "allOf": [
- {
- "$ref": "#/components/schemas/TrieveKnowledgeBaseSearchPlan"
- }
- ]
+ "instructions": {
+ "type": "string",
+ "description": "Instructions for the tester"
},
- "createPlan": {
- "description": "This is the plan if you want us to create/import a new vector store using Trieve.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/TrieveKnowledgeBaseImport",
- "title": "Import"
- }
- ]
+ "category": {
+ "type": "string",
+ "enum": [
+ "happy_path",
+ "edge_case",
+ "failure_mode"
+ ],
+ "description": "Scenario category"
},
- "id": {
+ "reasoning": {
"type": "string",
- "description": "This is the id of the knowledge base."
+ "description": "Why this scenario is valuable"
+ }
+ },
+ "required": [
+ "name",
+ "instructions",
+ "category",
+ "reasoning"
+ ]
+ },
+ "GenerateScenariosResponse": {
+ "type": "object",
+ "properties": {
+ "scenarios": {
+ "description": "Generated scenarios",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/GeneratedScenario"
+ }
},
- "orgId": {
+ "coverageNotes": {
"type": "string",
- "description": "This is the org id of the knowledge base."
+ "description": "Summary of test coverage"
}
},
"required": [
- "provider",
- "id",
- "orgId"
+ "scenarios",
+ "coverageNotes"
]
},
- "CustomKnowledgeBase": {
+ "CreateSimulationDTO": {
"type": "object",
"properties": {
- "provider": {
+ "name": {
"type": "string",
- "description": "This knowledge base is bring your own knowledge base implementation.",
- "enum": [
- "custom-knowledge-base"
- ]
+ "description": "This is an optional friendly name for the simulation.",
+ "maxLength": 80,
+ "example": "Eligible Path with Confused User"
},
- "server": {
- "description": "This is where the knowledge base request will be sent.\n\nRequest Example:\n\nPOST https://{server.url}\nContent-Type: application/json\n\n{\n \"messsage\": {\n \"type\": \"knowledge-base-request\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": \"Why is ocean blue?\"\n }\n ],\n ...other metadata about the call...\n }\n}\n\nResponse Expected:\n```\n{\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"The ocean is blue because water absorbs everything but blue.\",\n }, // YOU CAN RETURN THE EXACT RESPONSE TO SPEAK\n \"documents\": [\n {\n \"content\": \"The ocean is blue primarily because water absorbs colors in the red part of the light spectrum and scatters the blue light, making it more visible to our eyes.\",\n \"similarity\": 1\n },\n {\n \"content\": \"Blue light is scattered more by the water molecules than other colors, enhancing the blue appearance of the ocean.\",\n \"similarity\": .5\n }\n ] // OR, YOU CAN RETURN AN ARRAY OF DOCUMENTS THAT WILL BE SENT TO THE MODEL\n}\n```",
- "allOf": [
- {
- "$ref": "#/components/schemas/Server"
- }
- ]
+ "scenarioId": {
+ "type": "string",
+ "description": "This is the ID of the scenario to use for this simulation.",
+ "format": "uuid"
},
- "id": {
+ "personalityId": {
"type": "string",
- "description": "This is the id of the knowledge base."
+ "description": "This is the ID of the personality to use for this simulation.",
+ "format": "uuid"
},
- "orgId": {
+ "path": {
"type": "string",
- "description": "This is the org id of the knowledge base."
+ "nullable": true,
+ "description": "Optional folder path for organizing simulations.\nSupports up to 3 levels (e.g., \"dept/feature/variant\").\nMaps to GitOps resource folder structure.",
+ "maxLength": 255,
+ "pattern": "/^[a-zA-Z0-9][a-zA-Z0-9._-]*(?:\\/[a-zA-Z0-9][a-zA-Z0-9._-]*){0,2}$/"
}
},
"required": [
- "provider",
- "server",
- "id",
- "orgId"
+ "scenarioId",
+ "personalityId"
]
},
- "CreateTrieveKnowledgeBaseDTO": {
+ "Simulation": {
"type": "object",
"properties": {
- "provider": {
+ "id": {
"type": "string",
- "description": "This knowledge base is provided by Trieve.\n\nTo learn more about Trieve, visit https://trieve.ai.",
- "enum": [
- "trieve"
- ]
+ "description": "This is the unique identifier for the simulation.",
+ "format": "uuid"
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the organization this simulation belongs to.",
+ "format": "uuid"
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the simulation was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the simulation was last updated."
},
"name": {
"type": "string",
- "description": "This is the name of the knowledge base."
+ "description": "This is an optional friendly name for the simulation.",
+ "maxLength": 80,
+ "example": "Eligible Path with Confused User"
},
- "searchPlan": {
- "description": "This is the searching plan used when searching for relevant chunks from the vector store.\n\nYou should configure this if you're running into these issues:\n- Too much unnecessary context is being fed as knowledge base context.\n- Not enough relevant context is being fed as knowledge base context.",
- "allOf": [
- {
- "$ref": "#/components/schemas/TrieveKnowledgeBaseSearchPlan"
- }
- ]
+ "scenarioId": {
+ "type": "string",
+ "description": "This is the ID of the scenario to use for this simulation.",
+ "format": "uuid"
},
- "createPlan": {
- "description": "This is the plan if you want us to create/import a new vector store using Trieve.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/TrieveKnowledgeBaseImport",
- "title": "Import"
- }
- ]
+ "personalityId": {
+ "type": "string",
+ "description": "This is the ID of the personality to use for this simulation.",
+ "format": "uuid"
+ },
+ "path": {
+ "type": "string",
+ "nullable": true,
+ "description": "Optional folder path for organizing simulations.\nSupports up to 3 levels (e.g., \"dept/feature/variant\").\nMaps to GitOps resource folder structure.",
+ "maxLength": 255,
+ "pattern": "/^[a-zA-Z0-9][a-zA-Z0-9._-]*(?:\\/[a-zA-Z0-9][a-zA-Z0-9._-]*){0,2}$/"
}
},
"required": [
- "provider"
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt",
+ "scenarioId",
+ "personalityId"
]
},
- "UpdateTrieveKnowledgeBaseDTO": {
+ "UpdateSimulationDTO": {
"type": "object",
"properties": {
"name": {
"type": "string",
- "description": "This is the name of the knowledge base."
+ "description": "This is an optional friendly name for the simulation.",
+ "maxLength": 80
},
- "searchPlan": {
- "description": "This is the searching plan used when searching for relevant chunks from the vector store.\n\nYou should configure this if you're running into these issues:\n- Too much unnecessary context is being fed as knowledge base context.\n- Not enough relevant context is being fed as knowledge base context.",
- "allOf": [
- {
- "$ref": "#/components/schemas/TrieveKnowledgeBaseSearchPlan"
- }
- ]
+ "scenarioId": {
+ "type": "string",
+ "description": "This is the ID of the scenario to use for this simulation.",
+ "format": "uuid"
},
- "createPlan": {
- "description": "This is the plan if you want us to create/import a new vector store using Trieve.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/TrieveKnowledgeBaseImport",
- "title": "Import"
- }
- ]
- }
- }
- },
- "UpdateCustomKnowledgeBaseDTO": {
- "type": "object",
- "properties": {
- "server": {
- "description": "This is where the knowledge base request will be sent.\n\nRequest Example:\n\nPOST https://{server.url}\nContent-Type: application/json\n\n{\n \"messsage\": {\n \"type\": \"knowledge-base-request\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": \"Why is ocean blue?\"\n }\n ],\n ...other metadata about the call...\n }\n}\n\nResponse Expected:\n```\n{\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"The ocean is blue because water absorbs everything but blue.\",\n }, // YOU CAN RETURN THE EXACT RESPONSE TO SPEAK\n \"documents\": [\n {\n \"content\": \"The ocean is blue primarily because water absorbs colors in the red part of the light spectrum and scatters the blue light, making it more visible to our eyes.\",\n \"similarity\": 1\n },\n {\n \"content\": \"Blue light is scattered more by the water molecules than other colors, enhancing the blue appearance of the ocean.\",\n \"similarity\": .5\n }\n ] // OR, YOU CAN RETURN AN ARRAY OF DOCUMENTS THAT WILL BE SENT TO THE MODEL\n}\n```",
- "allOf": [
- {
- "$ref": "#/components/schemas/Server"
- }
- ]
+ "personalityId": {
+ "type": "string",
+ "description": "This is the ID of the personality to use for this simulation.",
+ "format": "uuid"
+ },
+ "path": {
+ "type": "string",
+ "nullable": true,
+ "description": "Optional folder path for organizing simulations.\nSupports up to 3 levels (e.g., \"dept/feature/variant\").\nSet to null to remove from folder.",
+ "maxLength": 255,
+ "pattern": "/^[a-zA-Z0-9][a-zA-Z0-9._-]*(?:\\/[a-zA-Z0-9][a-zA-Z0-9._-]*){0,2}$/"
}
}
},
- "TrieveKnowledgeBaseChunkPlan": {
+ "SimulationConcurrencyResponse": {
"type": "object",
"properties": {
- "fileIds": {
- "description": "These are the file ids that will be used to create the vector store. To upload files, use the `POST /files` endpoint.",
- "type": "array",
- "items": {
- "type": "string"
- }
+ "orgId": {
+ "type": "string"
},
- "websites": {
- "description": "These are the websites that will be used to create the vector store.",
- "type": "array",
- "items": {
- "type": "string"
- }
+ "concurrencyLimit": {
+ "type": "number",
+ "description": "Max call slots for simulations (each voice simulation uses 2 call slots: tester + target)"
},
- "targetSplitsPerChunk": {
+ "activeSimulations": {
"type": "number",
- "description": "This is an optional field which allows you to specify the number of splits you want per chunk. If not specified, the default 20 is used. However, you may want to use a different number."
+ "description": "Number of call slots currently in use by running simulations"
},
- "splitDelimiters": {
- "description": "This is an optional field which allows you to specify the delimiters to use when splitting the file before chunking the text. If not specified, the default [.!?\\n] are used to split into sentences. However, you may want to use spaces or other delimiters.",
- "type": "array",
- "items": {
- "type": "string"
- }
+ "availableToStart": {
+ "type": "number",
+ "description": "Number of voice simulations that can start now (available call slots / 2)"
},
- "rebalanceChunks": {
+ "createdAt": {
+ "type": "string",
+ "format": "date-time",
+ "nullable": true
+ },
+ "updatedAt": {
+ "type": "string",
+ "format": "date-time",
+ "nullable": true
+ },
+ "isDefault": {
"type": "boolean",
- "description": "This is an optional field which allows you to specify whether or not to rebalance the chunks created from the file. If not specified, the default true is used. If true, Trieve will evenly distribute remainder splits across chunks such that 66 splits with a target_splits_per_chunk of 20 will result in 3 chunks with 22 splits each."
+ "description": "True if org is using platform default concurrency limit"
}
- }
+ },
+ "required": [
+ "orgId",
+ "concurrencyLimit",
+ "activeSimulations",
+ "availableToStart",
+ "createdAt",
+ "updatedAt",
+ "isDefault"
+ ]
},
- "TrieveKnowledgeBaseCreate": {
+ "BarInsightMetadata": {
"type": "object",
"properties": {
- "type": {
+ "xAxisLabel": {
"type": "string",
- "description": "This is to create a new dataset on Trieve.",
- "enum": [
- "create"
- ]
+ "minLength": 1,
+ "maxLength": 40
},
- "chunkPlans": {
- "description": "These are the chunk plans used to create the dataset.",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/TrieveKnowledgeBaseChunkPlan"
- }
+ "yAxisLabel": {
+ "type": "string",
+ "minLength": 1,
+ "maxLength": 40
+ },
+ "yAxisMin": {
+ "type": "number"
+ },
+ "yAxisMax": {
+ "type": "number"
+ },
+ "name": {
+ "type": "string",
+ "minLength": 1,
+ "maxLength": 255
}
- },
- "required": [
- "type",
- "chunkPlans"
- ]
+ }
},
- "TrieveKnowledgeBaseImport": {
+ "InsightTimeRangeWithStep": {
"type": "object",
"properties": {
- "type": {
+ "step": {
"type": "string",
- "description": "This is to import an existing dataset from Trieve.",
+ "description": "This is the group by step for aggregation.\n\nIf not provided, defaults to group by day.",
"enum": [
- "import"
+ "minute",
+ "hour",
+ "day",
+ "week",
+ "month",
+ "quarter",
+ "year"
]
},
- "providerId": {
+ "start": {
+ "type": "object",
+ "description": "This is the start date for the time range.\n\nShould be a valid ISO 8601 date-time string or relative time string.\nIf not provided, defaults to the 7 days ago.\n\nRelative time strings of the format \"-{number}{unit}\" are allowed.\n\nValid units are:\n- d: days\n- h: hours\n- w: weeks\n- m: months\n- y: years",
+ "example": "\"2025-01-01\" or \"-7d\" or \"now\""
+ },
+ "end": {
+ "type": "object",
+ "description": "This is the end date for the time range.\n\nShould be a valid ISO 8601 date-time string or relative time string.\nIf not provided, defaults to now.\n\nRelative time strings of the format \"-{number}{unit}\" are allowed.\n\nValid units are:\n- d: days\n- h: hours\n- w: weeks\n- m: months\n- y: years",
+ "example": "\"2025-01-01\" or \"now\""
+ },
+ "timezone": {
"type": "string",
- "description": "This is the `datasetId` of the dataset on your Trieve account."
+ "description": "This is the timezone you want to set for the query.\n\nIf not provided, defaults to UTC."
}
- },
- "required": [
- "type",
- "providerId"
- ]
+ }
},
- "Workflow": {
+ "BarInsight": {
"type": "object",
"properties": {
- "nodes": {
- "type": "array",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/ConversationNode",
- "title": "ConversationNode"
- },
- {
- "$ref": "#/components/schemas/ToolNode",
- "title": "ToolNode"
- }
- ]
- }
- },
- "model": {
- "description": "This is the model for the workflow.\n\nThis can be overridden at node level using `nodes[n].model`.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/WorkflowOpenAIModel",
- "title": "WorkflowOpenAIModel"
- },
- {
- "$ref": "#/components/schemas/WorkflowAnthropicModel",
- "title": "WorkflowAnthropicModel"
- },
- {
- "$ref": "#/components/schemas/WorkflowGoogleModel",
- "title": "WorkflowGoogleModel"
- },
- {
- "$ref": "#/components/schemas/WorkflowCustomModel",
- "title": "WorkflowCustomModel"
- }
- ]
+ "name": {
+ "type": "string",
+ "description": "This is the name of the Insight.",
+ "minLength": 1,
+ "maxLength": 255
},
- "transcriber": {
- "description": "This is the transcriber for the workflow.\n\nThis can be overridden at node level using `nodes[n].transcriber`.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/AssemblyAITranscriber",
- "title": "AssemblyAITranscriber"
- },
- {
- "$ref": "#/components/schemas/AzureSpeechTranscriber",
- "title": "AzureSpeechTranscriber"
- },
- {
- "$ref": "#/components/schemas/CustomTranscriber",
- "title": "CustomTranscriber"
- },
- {
- "$ref": "#/components/schemas/DeepgramTranscriber",
- "title": "DeepgramTranscriber"
- },
- {
- "$ref": "#/components/schemas/ElevenLabsTranscriber",
- "title": "ElevenLabsTranscriber"
- },
- {
- "$ref": "#/components/schemas/GladiaTranscriber",
- "title": "GladiaTranscriber"
- },
- {
- "$ref": "#/components/schemas/GoogleTranscriber",
- "title": "GoogleTranscriber"
- },
- {
- "$ref": "#/components/schemas/SpeechmaticsTranscriber",
- "title": "SpeechmaticsTranscriber"
- },
- {
- "$ref": "#/components/schemas/TalkscriberTranscriber",
- "title": "TalkscriberTranscriber"
- },
- {
- "$ref": "#/components/schemas/OpenAITranscriber",
- "title": "OpenAITranscriber"
- },
- {
- "$ref": "#/components/schemas/CartesiaTranscriber",
- "title": "CartesiaTranscriber"
- }
+ "type": {
+ "type": "string",
+ "description": "This is the type of the Insight.\nIt is required to be `bar` to create a bar insight.",
+ "enum": [
+ "bar"
]
},
- "voice": {
- "description": "This is the voice for the workflow.\n\nThis can be overridden at node level using `nodes[n].voice`.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/AzureVoice",
- "title": "AzureVoice"
- },
- {
- "$ref": "#/components/schemas/CartesiaVoice",
- "title": "CartesiaVoice"
- },
- {
- "$ref": "#/components/schemas/CustomVoice",
- "title": "CustomVoice"
- },
- {
- "$ref": "#/components/schemas/DeepgramVoice",
- "title": "DeepgramVoice"
- },
- {
- "$ref": "#/components/schemas/ElevenLabsVoice",
- "title": "ElevenLabsVoice"
- },
- {
- "$ref": "#/components/schemas/HumeVoice",
- "title": "HumeVoice"
- },
- {
- "$ref": "#/components/schemas/LMNTVoice",
- "title": "LMNTVoice"
- },
- {
- "$ref": "#/components/schemas/NeuphonicVoice",
- "title": "NeuphonicVoice"
- },
- {
- "$ref": "#/components/schemas/OpenAIVoice",
- "title": "OpenAIVoice"
- },
- {
- "$ref": "#/components/schemas/PlayHTVoice",
- "title": "PlayHTVoice"
- },
- {
- "$ref": "#/components/schemas/RimeAIVoice",
- "title": "RimeAIVoice"
- },
- {
- "$ref": "#/components/schemas/SmallestAIVoice",
- "title": "SmallestAIVoice"
- },
- {
- "$ref": "#/components/schemas/TavusVoice",
- "title": "TavusVoice"
- },
- {
- "$ref": "#/components/schemas/VapiVoice",
- "title": "VapiVoice"
- },
- {
- "$ref": "#/components/schemas/SesameVoice",
- "title": "SesameVoice"
- },
- {
- "$ref": "#/components/schemas/InworldVoice",
- "title": "InworldVoice"
- },
- {
- "$ref": "#/components/schemas/MinimaxVoice",
- "title": "MinimaxVoice"
- }
- ]
+ "formulas": {
+ "type": "array",
+ "description": "Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight.\nThe formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html\nA formula is created by using the query names as the variable.\nThe formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result.\nFor example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this:\n```\n{{['Query 1']}} / {{['Query 2']}} * 100\n```\n\n```\n({{[Query 1]}} * 10) + {{[Query 2]}}\n```\nThis will take the\n\nYou can also use the query names as the variable in the formula.",
+ "items": {
+ "$ref": "#/components/schemas/InsightFormula"
+ }
},
- "observabilityPlan": {
- "description": "This is the plan for observability of workflow's calls.\n\nCurrently, only Langfuse is supported.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/LangfuseObservabilityPlan",
- "title": "Langfuse"
- }
- ],
+ "metadata": {
+ "description": "This is the metadata for the insight.",
"allOf": [
{
- "$ref": "#/components/schemas/LangfuseObservabilityPlan"
+ "$ref": "#/components/schemas/BarInsightMetadata"
}
]
},
- "backgroundSound": {
- "description": "This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'.\nYou can also provide a custom sound by providing a URL to an audio file.",
- "oneOf": [
- {
- "type": "enum",
- "enum": [
- "off",
- "office"
- ],
- "example": "office"
- },
- {
- "type": "string",
- "format": "uri",
- "example": "https://www.soundjay.com/ambient/sounds/people-in-lounge-1.mp3"
- }
+ "timeRange": {
+ "$ref": "#/components/schemas/InsightTimeRangeWithStep"
+ },
+ "groupBy": {
+ "type": "string",
+ "description": "This is the group by column for the insight when table is `call`.\nThese are the columns to group the results by.\nAll results are grouped by the time range step by default.",
+ "example": [
+ "assistant_id"
+ ],
+ "enum": [
+ "assistantId",
+ "workflowId",
+ "squadId",
+ "phoneNumberId",
+ "type",
+ "endedReason",
+ "customerNumber",
+ "campaignId",
+ "artifact.structuredOutputs[OutputID]"
]
},
- "hooks": {
+ "queries": {
"type": "array",
- "description": "This is a set of actions that will be performed on certain events.",
+ "description": "These are the queries to run to generate the insight.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/CallHookCallEnding",
- "title": "CallHookCallEnding"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStringTypeColumn",
+ "title": "JSONQueryOnCallTableWithStringTypeColumn"
},
{
- "$ref": "#/components/schemas/CallHookAssistantSpeechInterrupted",
- "title": "CallHookAssistantSpeechInterrupted"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithNumberTypeColumn",
+ "title": "JSONQueryOnCallTableWithNumberTypeColumn"
},
{
- "$ref": "#/components/schemas/CallHookCustomerSpeechInterrupted",
- "title": "CallHookCustomerSpeechInterrupted"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStructuredOutputColumn",
+ "title": "JSONQueryOnCallTableWithStructuredOutputColumn"
},
{
- "$ref": "#/components/schemas/CallHookCustomerSpeechTimeout",
- "title": "CallHookCustomerSpeechTimeout"
+ "$ref": "#/components/schemas/JSONQueryOnEventsTable",
+ "title": "JSONQueryOnEventsTable"
}
]
}
},
- "credentials": {
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the Insight."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the org that this Insight belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the Insight was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the Insight was last updated."
+ }
+ },
+ "required": [
+ "type",
+ "queries",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "InsightTimeRange": {
+ "type": "object",
+ "properties": {
+ "start": {
+ "type": "object",
+ "description": "This is the start date for the time range.\n\nShould be a valid ISO 8601 date-time string or relative time string.\nIf not provided, defaults to the 7 days ago.\n\nRelative time strings of the format \"-{number}{unit}\" are allowed.\n\nValid units are:\n- d: days\n- h: hours\n- w: weeks\n- m: months\n- y: years",
+ "example": "\"2025-01-01\" or \"-7d\" or \"now\""
+ },
+ "end": {
+ "type": "object",
+ "description": "This is the end date for the time range.\n\nShould be a valid ISO 8601 date-time string or relative time string.\nIf not provided, defaults to now.\n\nRelative time strings of the format \"-{number}{unit}\" are allowed.\n\nValid units are:\n- d: days\n- h: hours\n- w: weeks\n- m: months\n- y: years",
+ "example": "\"2025-01-01\" or \"now\""
+ },
+ "timezone": {
+ "type": "string",
+ "description": "This is the timezone you want to set for the query.\n\nIf not provided, defaults to UTC."
+ }
+ }
+ },
+ "PieInsight": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the Insight.",
+ "minLength": 1,
+ "maxLength": 255
+ },
+ "type": {
+ "type": "string",
+ "description": "This is the type of the Insight.\nIt is required to be `pie` to create a pie insight.",
+ "enum": [
+ "pie"
+ ]
+ },
+ "formulas": {
"type": "array",
- "description": "These are dynamic credentials that will be used for the workflow calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials.",
+ "description": "Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight.\nThe formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html\nA formula is created by using the query names as the variable.\nThe formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result.\nFor example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this:\n```\n{{['Query 1']}} / {{['Query 2']}} * 100\n```\n\n```\n({{[Query 1]}} * 10) + {{[Query 2]}}\n```\nThis will take the\n\nYou can also use the query names as the variable in the formula.",
+ "items": {
+ "$ref": "#/components/schemas/InsightFormula"
+ }
+ },
+ "timeRange": {
+ "$ref": "#/components/schemas/InsightTimeRange"
+ },
+ "groupBy": {
+ "type": "string",
+ "description": "This is the group by column for the insight when table is `call`.\nThese are the columns to group the results by.\nAll results are grouped by the time range step by default.",
+ "example": [
+ "assistant_id"
+ ],
+ "enum": [
+ "assistantId",
+ "workflowId",
+ "squadId",
+ "phoneNumberId",
+ "type",
+ "endedReason",
+ "customerNumber",
+ "campaignId",
+ "artifact.structuredOutputs[OutputID]"
+ ]
+ },
+ "queries": {
+ "type": "array",
+ "description": "These are the queries to run to generate the insight.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/CreateAnthropicCredentialDTO",
- "title": "AnthropicCredential"
- },
- {
- "$ref": "#/components/schemas/CreateAnyscaleCredentialDTO",
- "title": "AnyscaleCredential"
- },
- {
- "$ref": "#/components/schemas/CreateAssemblyAICredentialDTO",
- "title": "AssemblyAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateAzureCredentialDTO",
- "title": "AzureCredential"
- },
- {
- "$ref": "#/components/schemas/CreateAzureOpenAICredentialDTO",
- "title": "AzureOpenAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
- "title": "ByoSipTrunkCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCartesiaCredentialDTO",
- "title": "CartesiaCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCerebrasCredentialDTO",
- "title": "CerebrasCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCloudflareCredentialDTO",
- "title": "CloudflareCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCustomLLMCredentialDTO",
- "title": "CustomLLMCredential"
- },
- {
- "$ref": "#/components/schemas/CreateDeepgramCredentialDTO",
- "title": "DeepgramCredential"
- },
- {
- "$ref": "#/components/schemas/CreateDeepInfraCredentialDTO",
- "title": "DeepInfraCredential"
- },
- {
- "$ref": "#/components/schemas/CreateDeepSeekCredentialDTO",
- "title": "DeepSeekCredential"
- },
- {
- "$ref": "#/components/schemas/CreateElevenLabsCredentialDTO",
- "title": "ElevenLabsCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGcpCredentialDTO",
- "title": "GcpCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGladiaCredentialDTO",
- "title": "GladiaCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoHighLevelCredentialDTO",
- "title": "GhlCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCredentialDTO",
- "title": "GoogleCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGroqCredentialDTO",
- "title": "GroqCredential"
- },
- {
- "$ref": "#/components/schemas/CreateHumeCredentialDTO",
- "title": "HumeCredential"
- },
- {
- "$ref": "#/components/schemas/CreateInflectionAICredentialDTO",
- "title": "InflectionAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateLangfuseCredentialDTO",
- "title": "LangfuseCredential"
- },
- {
- "$ref": "#/components/schemas/CreateLmntCredentialDTO",
- "title": "LmntCredential"
- },
- {
- "$ref": "#/components/schemas/CreateMakeCredentialDTO",
- "title": "MakeCredential"
- },
- {
- "$ref": "#/components/schemas/CreateMistralCredentialDTO",
- "title": "MistralCredential"
- },
- {
- "$ref": "#/components/schemas/CreateNeuphonicCredentialDTO",
- "title": "NeuphonicCredential"
- },
- {
- "$ref": "#/components/schemas/CreateOpenAICredentialDTO",
- "title": "OpenAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateOpenRouterCredentialDTO",
- "title": "OpenRouterCredential"
- },
- {
- "$ref": "#/components/schemas/CreatePerplexityAICredentialDTO",
- "title": "PerplexityAICredential"
- },
- {
- "$ref": "#/components/schemas/CreatePlayHTCredentialDTO",
- "title": "PlayHTCredential"
- },
- {
- "$ref": "#/components/schemas/CreateRimeAICredentialDTO",
- "title": "RimeAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateRunpodCredentialDTO",
- "title": "RunpodCredential"
- },
- {
- "$ref": "#/components/schemas/CreateS3CredentialDTO",
- "title": "S3Credential"
- },
- {
- "$ref": "#/components/schemas/CreateSmallestAICredentialDTO",
- "title": "SmallestAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
- "title": "SpeechmaticsCredential"
- },
- {
- "$ref": "#/components/schemas/CreateSupabaseCredentialDTO",
- "title": "SupabaseCredential"
- },
- {
- "$ref": "#/components/schemas/CreateTavusCredentialDTO",
- "title": "TavusCredential"
- },
- {
- "$ref": "#/components/schemas/CreateTogetherAICredentialDTO",
- "title": "TogetherAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateTrieveCredentialDTO",
- "title": "TrieveCredential"
- },
- {
- "$ref": "#/components/schemas/CreateTwilioCredentialDTO",
- "title": "TwilioCredential"
- },
- {
- "$ref": "#/components/schemas/CreateVonageCredentialDTO",
- "title": "VonageCredential"
- },
- {
- "$ref": "#/components/schemas/CreateWebhookCredentialDTO",
- "title": "WebhookCredential"
- },
- {
- "$ref": "#/components/schemas/CreateXAiCredentialDTO",
- "title": "XAiCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
- "title": "GoogleCalendarOAuth2ClientCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
- "title": "GoogleCalendarOAuth2AuthorizationCredential"
- },
- {
- "$ref": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
- "title": "GoogleSheetsOAuth2AuthorizationCredential"
- },
- {
- "$ref": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
- "title": "SlackOAuth2AuthorizationCredential"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStringTypeColumn",
+ "title": "JSONQueryOnCallTableWithStringTypeColumn"
},
{
- "$ref": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
- "title": "GoHighLevelMCPCredential"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithNumberTypeColumn",
+ "title": "JSONQueryOnCallTableWithNumberTypeColumn"
},
{
- "$ref": "#/components/schemas/CreateInworldCredentialDTO",
- "title": "InworldCredential"
- }
- ],
- "discriminator": {
- "propertyName": "provider",
- "mapping": {
- "11labs": "#/components/schemas/CreateElevenLabsCredentialDTO",
- "anthropic": "#/components/schemas/CreateAnthropicCredentialDTO",
- "anyscale": "#/components/schemas/CreateAnyscaleCredentialDTO",
- "assembly-ai": "#/components/schemas/CreateAssemblyAICredentialDTO",
- "azure-openai": "#/components/schemas/CreateAzureOpenAICredentialDTO",
- "azure": "#/components/schemas/CreateAzureCredentialDTO",
- "byo-sip-trunk": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
- "cartesia": "#/components/schemas/CreateCartesiaCredentialDTO",
- "cerebras": "#/components/schemas/CreateCerebrasCredentialDTO",
- "cloudflare": "#/components/schemas/CreateCloudflareCredentialDTO",
- "custom-llm": "#/components/schemas/CreateCustomLLMCredentialDTO",
- "deepgram": "#/components/schemas/CreateDeepgramCredentialDTO",
- "deepinfra": "#/components/schemas/CreateDeepInfraCredentialDTO",
- "deep-seek": "#/components/schemas/CreateDeepSeekCredentialDTO",
- "gcp": "#/components/schemas/CreateGcpCredentialDTO",
- "gladia": "#/components/schemas/CreateGladiaCredentialDTO",
- "gohighlevel": "#/components/schemas/CreateGoHighLevelCredentialDTO",
- "google": "#/components/schemas/CreateGoogleCredentialDTO",
- "groq": "#/components/schemas/CreateGroqCredentialDTO",
- "inflection-ai": "#/components/schemas/CreateInflectionAICredentialDTO",
- "langfuse": "#/components/schemas/CreateLangfuseCredentialDTO",
- "lmnt": "#/components/schemas/CreateLmntCredentialDTO",
- "make": "#/components/schemas/CreateMakeCredentialDTO",
- "openai": "#/components/schemas/CreateOpenAICredentialDTO",
- "openrouter": "#/components/schemas/CreateOpenRouterCredentialDTO",
- "perplexity-ai": "#/components/schemas/CreatePerplexityAICredentialDTO",
- "playht": "#/components/schemas/CreatePlayHTCredentialDTO",
- "rime-ai": "#/components/schemas/CreateRimeAICredentialDTO",
- "runpod": "#/components/schemas/CreateRunpodCredentialDTO",
- "s3": "#/components/schemas/CreateS3CredentialDTO",
- "supabase": "#/components/schemas/CreateSupabaseCredentialDTO",
- "smallest-ai": "#/components/schemas/CreateSmallestAICredentialDTO",
- "tavus": "#/components/schemas/CreateTavusCredentialDTO",
- "together-ai": "#/components/schemas/CreateTogetherAICredentialDTO",
- "twilio": "#/components/schemas/CreateTwilioCredentialDTO",
- "vonage": "#/components/schemas/CreateVonageCredentialDTO",
- "webhook": "#/components/schemas/CreateWebhookCredentialDTO",
- "xai": "#/components/schemas/CreateXAiCredentialDTO",
- "neuphonic": "#/components/schemas/CreateNeuphonicCredentialDTO",
- "hume": "#/components/schemas/CreateHumeCredentialDTO",
- "mistral": "#/components/schemas/CreateMistralCredentialDTO",
- "speechmatics": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
- "trieve": "#/components/schemas/CreateTrieveCredentialDTO",
- "google.calendar.oauth2-client": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
- "google.calendar.oauth2-authorization": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
- "google.sheets.oauth2-authorization": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
- "slack.oauth2-authorization": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
- "ghl.oauth2-authorization": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
- "inworld": "#/components/schemas/CreateInworldCredentialDTO",
- "minimax": "#/components/schemas/CreateMinimaxCredentialDTO"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStructuredOutputColumn",
+ "title": "JSONQueryOnCallTableWithStructuredOutputColumn"
}
- }
+ ]
}
},
"id": {
- "type": "string"
+ "type": "string",
+ "description": "This is the unique identifier for the Insight."
},
"orgId": {
- "type": "string"
+ "type": "string",
+ "description": "This is the unique identifier for the org that this Insight belongs to."
},
"createdAt": {
"format": "date-time",
- "type": "string"
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the Insight was created."
},
"updatedAt": {
"format": "date-time",
- "type": "string"
- },
- "name": {
"type": "string",
- "maxLength": 80
- },
- "edges": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/Edge"
- }
+ "description": "This is the ISO 8601 date-time string of when the Insight was last updated."
+ }
+ },
+ "required": [
+ "type",
+ "queries",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "LineInsightMetadata": {
+ "type": "object",
+ "properties": {
+ "xAxisLabel": {
+ "type": "string",
+ "minLength": 1,
+ "maxLength": 40
},
- "globalPrompt": {
+ "yAxisLabel": {
"type": "string",
- "maxLength": 5000
+ "minLength": 1,
+ "maxLength": 40
},
- "server": {
- "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. tool.server\n2. workflow.server / assistant.server\n3. phoneNumber.server\n4. org.server",
- "allOf": [
- {
- "$ref": "#/components/schemas/Server"
- }
- ]
+ "yAxisMin": {
+ "type": "number"
},
- "compliancePlan": {
- "description": "This is the compliance plan for the workflow. It allows you to configure HIPAA and other compliance settings.",
- "allOf": [
- {
- "$ref": "#/components/schemas/CompliancePlan"
- }
- ]
+ "yAxisMax": {
+ "type": "number"
},
- "analysisPlan": {
- "description": "This is the plan for analysis of workflow's calls. Stored in `call.analysis`.",
- "allOf": [
- {
- "$ref": "#/components/schemas/AnalysisPlan"
- }
- ]
+ "name": {
+ "type": "string",
+ "minLength": 1,
+ "maxLength": 255
+ }
+ }
+ },
+ "LineInsight": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the Insight.",
+ "minLength": 1,
+ "maxLength": 255
},
- "artifactPlan": {
- "description": "This is the plan for artifacts generated during workflow's calls. Stored in `call.artifact`.",
- "allOf": [
- {
- "$ref": "#/components/schemas/ArtifactPlan"
- }
+ "type": {
+ "type": "string",
+ "description": "This is the type of the Insight.\nIt is required to be `line` to create a line insight.",
+ "enum": [
+ "line"
]
},
- "startSpeakingPlan": {
- "description": "This is the plan for when the workflow nodes should start talking.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to start talking after the customer is done speaking.\n- The assistant is too fast to start talking after the customer is done speaking.\n- The assistant is so fast that it's actually interrupting the customer.",
- "allOf": [
- {
- "$ref": "#/components/schemas/StartSpeakingPlan"
- }
- ]
+ "formulas": {
+ "type": "array",
+ "description": "Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight.\nThe formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html\nA formula is created by using the query names as the variable.\nThe formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result.\nFor example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this:\n```\n{{['Query 1']}} / {{['Query 2']}} * 100\n```\n\n```\n({{[Query 1]}} * 10) + {{[Query 2]}}\n```\nThis will take the\n\nYou can also use the query names as the variable in the formula.",
+ "items": {
+ "$ref": "#/components/schemas/InsightFormula"
+ }
},
- "stopSpeakingPlan": {
- "description": "This is the plan for when workflow nodes should stop talking on customer interruption.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to recognize customer's interruption.\n- The assistant is too fast to recognize customer's interruption.\n- The assistant is getting interrupted by phrases that are just acknowledgments.\n- The assistant is getting interrupted by background noises.\n- The assistant is not properly stopping -- it starts talking right after getting interrupted.",
+ "metadata": {
+ "description": "This is the metadata for the insight.",
"allOf": [
{
- "$ref": "#/components/schemas/StopSpeakingPlan"
+ "$ref": "#/components/schemas/LineInsightMetadata"
}
]
},
- "monitorPlan": {
- "description": "This is the plan for real-time monitoring of the workflow's calls.\n\nUsage:\n- To enable live listening of the workflow's calls, set `monitorPlan.listenEnabled` to `true`.\n- To enable live control of the workflow's calls, set `monitorPlan.controlEnabled` to `true`.",
- "allOf": [
- {
- "$ref": "#/components/schemas/MonitorPlan"
- }
- ]
+ "timeRange": {
+ "$ref": "#/components/schemas/InsightTimeRangeWithStep"
},
- "backgroundSpeechDenoisingPlan": {
- "description": "This enables filtering of noise and background speech while the user is talking.\n\nFeatures:\n- Smart denoising using Krisp\n- Fourier denoising\n\nBoth can be used together. Order of precedence:\n- Smart denoising\n- Fourier denoising",
- "allOf": [
- {
- "$ref": "#/components/schemas/BackgroundSpeechDenoisingPlan"
- }
+ "groupBy": {
+ "type": "string",
+ "description": "This is the group by column for the insight when table is `call`.\nThese are the columns to group the results by.\nAll results are grouped by the time range step by default.",
+ "example": [
+ "assistant_id"
+ ],
+ "enum": [
+ "assistantId",
+ "workflowId",
+ "squadId",
+ "phoneNumberId",
+ "type",
+ "endedReason",
+ "customerNumber",
+ "campaignId",
+ "artifact.structuredOutputs[OutputID]"
]
},
- "credentialIds": {
- "description": "These are the credentials that will be used for the workflow calls. By default, all the credentials are available for use in the call but you can provide a subset using this.",
+ "queries": {
"type": "array",
+ "description": "These are the queries to run to generate the insight.",
"items": {
- "type": "string"
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStringTypeColumn",
+ "title": "JSONQueryOnCallTableWithStringTypeColumn"
+ },
+ {
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithNumberTypeColumn",
+ "title": "JSONQueryOnCallTableWithNumberTypeColumn"
+ },
+ {
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStructuredOutputColumn",
+ "title": "JSONQueryOnCallTableWithStructuredOutputColumn"
+ }
+ ]
}
},
- "keypadInputPlan": {
- "description": "This is the plan for keypad input handling during workflow calls.",
- "allOf": [
- {
- "$ref": "#/components/schemas/KeypadInputPlan"
- }
- ]
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the Insight."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the org that this Insight belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the Insight was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the Insight was last updated."
}
},
"required": [
- "nodes",
+ "type",
+ "queries",
"id",
"orgId",
"createdAt",
- "updatedAt",
- "name",
- "edges"
+ "updatedAt"
]
},
- "UpdateWorkflowDTO": {
+ "TextInsight": {
"type": "object",
"properties": {
- "nodes": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the Insight.",
+ "minLength": 1,
+ "maxLength": 255
+ },
+ "type": {
+ "type": "string",
+ "description": "This is the type of the Insight.\nIt is required to be `text` to create a text insight.",
+ "enum": [
+ "text"
+ ]
+ },
+ "formula": {
+ "type": "object",
+ "description": "Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight.\nThe formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html\nA formula is created by using the query names as the variable.\nThe formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result.\nFor example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this:\n```\n{{['Query 1']}} / {{['Query 2']}} * 100\n```\n\n```\n({{[Query 1]}} * 10) + {{[Query 2]}}\n```\nThis will take the\n\nYou can also use the query names as the variable in the formula.",
+ "items": {
+ "$ref": "#/components/schemas/InsightFormula"
+ }
+ },
+ "timeRange": {
+ "$ref": "#/components/schemas/InsightTimeRange"
+ },
+ "queries": {
"type": "array",
+ "description": "These are the queries to run to generate the insight.\nFor Text Insights, we only allow a single query, or require a formula if multiple queries are provided",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/ConversationNode",
- "title": "ConversationNode"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStringTypeColumn",
+ "title": "JSONQueryOnCallTableWithStringTypeColumn"
},
{
- "$ref": "#/components/schemas/ToolNode",
- "title": "ToolNode"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithNumberTypeColumn",
+ "title": "JSONQueryOnCallTableWithNumberTypeColumn"
+ },
+ {
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStructuredOutputColumn",
+ "title": "JSONQueryOnCallTableWithStructuredOutputColumn"
}
]
}
},
- "model": {
- "description": "This is the model for the workflow.\n\nThis can be overridden at node level using `nodes[n].model`.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/WorkflowOpenAIModel",
- "title": "WorkflowOpenAIModel"
- },
- {
- "$ref": "#/components/schemas/WorkflowAnthropicModel",
- "title": "WorkflowAnthropicModel"
- },
- {
- "$ref": "#/components/schemas/WorkflowGoogleModel",
- "title": "WorkflowGoogleModel"
- },
- {
- "$ref": "#/components/schemas/WorkflowCustomModel",
- "title": "WorkflowCustomModel"
- }
- ]
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the Insight."
},
- "transcriber": {
- "description": "This is the transcriber for the workflow.\n\nThis can be overridden at node level using `nodes[n].transcriber`.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/AssemblyAITranscriber",
- "title": "AssemblyAITranscriber"
- },
- {
- "$ref": "#/components/schemas/AzureSpeechTranscriber",
- "title": "AzureSpeechTranscriber"
- },
- {
- "$ref": "#/components/schemas/CustomTranscriber",
- "title": "CustomTranscriber"
- },
- {
- "$ref": "#/components/schemas/DeepgramTranscriber",
- "title": "DeepgramTranscriber"
- },
- {
- "$ref": "#/components/schemas/ElevenLabsTranscriber",
- "title": "ElevenLabsTranscriber"
- },
- {
- "$ref": "#/components/schemas/GladiaTranscriber",
- "title": "GladiaTranscriber"
- },
- {
- "$ref": "#/components/schemas/GoogleTranscriber",
- "title": "GoogleTranscriber"
- },
- {
- "$ref": "#/components/schemas/SpeechmaticsTranscriber",
- "title": "SpeechmaticsTranscriber"
- },
- {
- "$ref": "#/components/schemas/TalkscriberTranscriber",
- "title": "TalkscriberTranscriber"
- },
- {
- "$ref": "#/components/schemas/OpenAITranscriber",
- "title": "OpenAITranscriber"
- },
- {
- "$ref": "#/components/schemas/CartesiaTranscriber",
- "title": "CartesiaTranscriber"
- }
- ]
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the org that this Insight belongs to."
},
- "voice": {
- "description": "This is the voice for the workflow.\n\nThis can be overridden at node level using `nodes[n].voice`.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/AzureVoice",
- "title": "AzureVoice"
- },
- {
- "$ref": "#/components/schemas/CartesiaVoice",
- "title": "CartesiaVoice"
- },
- {
- "$ref": "#/components/schemas/CustomVoice",
- "title": "CustomVoice"
- },
- {
- "$ref": "#/components/schemas/DeepgramVoice",
- "title": "DeepgramVoice"
- },
- {
- "$ref": "#/components/schemas/ElevenLabsVoice",
- "title": "ElevenLabsVoice"
- },
- {
- "$ref": "#/components/schemas/HumeVoice",
- "title": "HumeVoice"
- },
- {
- "$ref": "#/components/schemas/LMNTVoice",
- "title": "LMNTVoice"
- },
- {
- "$ref": "#/components/schemas/NeuphonicVoice",
- "title": "NeuphonicVoice"
- },
- {
- "$ref": "#/components/schemas/OpenAIVoice",
- "title": "OpenAIVoice"
- },
- {
- "$ref": "#/components/schemas/PlayHTVoice",
- "title": "PlayHTVoice"
- },
- {
- "$ref": "#/components/schemas/RimeAIVoice",
- "title": "RimeAIVoice"
- },
- {
- "$ref": "#/components/schemas/SmallestAIVoice",
- "title": "SmallestAIVoice"
- },
- {
- "$ref": "#/components/schemas/TavusVoice",
- "title": "TavusVoice"
- },
- {
- "$ref": "#/components/schemas/VapiVoice",
- "title": "VapiVoice"
- },
- {
- "$ref": "#/components/schemas/SesameVoice",
- "title": "SesameVoice"
- },
- {
- "$ref": "#/components/schemas/InworldVoice",
- "title": "InworldVoice"
- },
- {
- "$ref": "#/components/schemas/MinimaxVoice",
- "title": "MinimaxVoice"
- }
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the Insight was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the Insight was last updated."
+ }
+ },
+ "required": [
+ "type",
+ "queries",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "UpdateBarInsightFromCallTableDTO": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the Insight.",
+ "minLength": 1,
+ "maxLength": 255
+ },
+ "type": {
+ "type": "string",
+ "description": "This is the type of the Insight.\nIt is required to be `bar` to create a bar insight.",
+ "enum": [
+ "bar"
]
},
- "observabilityPlan": {
- "description": "This is the plan for observability of workflow's calls.\n\nCurrently, only Langfuse is supported.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/LangfuseObservabilityPlan",
- "title": "Langfuse"
- }
- ],
+ "formulas": {
+ "type": "array",
+ "description": "Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight.\nThe formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html\nA formula is created by using the query names as the variable.\nThe formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result.\nFor example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this:\n```\n{{['Query 1']}} / {{['Query 2']}} * 100\n```\n\n```\n({{[Query 1]}} * 10) + {{[Query 2]}}\n```\nThis will take the\n\nYou can also use the query names as the variable in the formula.",
+ "items": {
+ "$ref": "#/components/schemas/InsightFormula"
+ }
+ },
+ "metadata": {
+ "description": "This is the metadata for the insight.",
"allOf": [
{
- "$ref": "#/components/schemas/LangfuseObservabilityPlan"
+ "$ref": "#/components/schemas/BarInsightMetadata"
}
]
},
- "backgroundSound": {
- "description": "This is the background sound in the call. Default for phone calls is 'office' and default for web calls is 'off'.\nYou can also provide a custom sound by providing a URL to an audio file.",
- "oneOf": [
- {
- "type": "enum",
- "enum": [
- "off",
- "office"
- ],
- "example": "office"
- },
- {
- "type": "string",
- "format": "uri",
- "example": "https://www.soundjay.com/ambient/sounds/people-in-lounge-1.mp3"
- }
+ "timeRange": {
+ "$ref": "#/components/schemas/InsightTimeRangeWithStep"
+ },
+ "groupBy": {
+ "type": "string",
+ "description": "This is the group by column for the insight when table is `call`.\nThese are the columns to group the results by.\nAll results are grouped by the time range step by default.",
+ "example": [
+ "assistant_id"
+ ],
+ "enum": [
+ "assistantId",
+ "workflowId",
+ "squadId",
+ "phoneNumberId",
+ "type",
+ "endedReason",
+ "customerNumber",
+ "campaignId",
+ "artifact.structuredOutputs[OutputID]"
]
},
- "hooks": {
+ "queries": {
"type": "array",
- "description": "This is a set of actions that will be performed on certain events.",
+ "description": "These are the queries to run to generate the insight.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/CallHookCallEnding",
- "title": "CallHookCallEnding"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStringTypeColumn",
+ "title": "JSONQueryOnCallTableWithStringTypeColumn"
},
{
- "$ref": "#/components/schemas/CallHookAssistantSpeechInterrupted",
- "title": "CallHookAssistantSpeechInterrupted"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithNumberTypeColumn",
+ "title": "JSONQueryOnCallTableWithNumberTypeColumn"
},
{
- "$ref": "#/components/schemas/CallHookCustomerSpeechInterrupted",
- "title": "CallHookCustomerSpeechInterrupted"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStructuredOutputColumn",
+ "title": "JSONQueryOnCallTableWithStructuredOutputColumn"
},
{
- "$ref": "#/components/schemas/CallHookCustomerSpeechTimeout",
- "title": "CallHookCustomerSpeechTimeout"
+ "$ref": "#/components/schemas/JSONQueryOnEventsTable",
+ "title": "JSONQueryOnEventsTable"
}
]
}
+ }
+ }
+ },
+ "UpdatePieInsightFromCallTableDTO": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the Insight.",
+ "minLength": 1,
+ "maxLength": 255
},
- "credentials": {
+ "type": {
+ "type": "string",
+ "description": "This is the type of the Insight.\nIt is required to be `pie` to create a pie insight.",
+ "enum": [
+ "pie"
+ ]
+ },
+ "formulas": {
"type": "array",
- "description": "These are dynamic credentials that will be used for the workflow calls. By default, all the credentials are available for use in the call but you can supplement an additional credentials using this. Dynamic credentials override existing credentials.",
+ "description": "Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight.\nThe formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html\nA formula is created by using the query names as the variable.\nThe formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result.\nFor example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this:\n```\n{{['Query 1']}} / {{['Query 2']}} * 100\n```\n\n```\n({{[Query 1]}} * 10) + {{[Query 2]}}\n```\nThis will take the\n\nYou can also use the query names as the variable in the formula.",
+ "items": {
+ "$ref": "#/components/schemas/InsightFormula"
+ }
+ },
+ "timeRange": {
+ "$ref": "#/components/schemas/InsightTimeRange"
+ },
+ "groupBy": {
+ "type": "string",
+ "description": "This is the group by column for the insight when table is `call`.\nThese are the columns to group the results by.\nAll results are grouped by the time range step by default.",
+ "example": [
+ "assistant_id"
+ ],
+ "enum": [
+ "assistantId",
+ "workflowId",
+ "squadId",
+ "phoneNumberId",
+ "type",
+ "endedReason",
+ "customerNumber",
+ "campaignId",
+ "artifact.structuredOutputs[OutputID]"
+ ]
+ },
+ "queries": {
+ "type": "array",
+ "description": "These are the queries to run to generate the insight.",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/CreateAnthropicCredentialDTO",
- "title": "AnthropicCredential"
- },
- {
- "$ref": "#/components/schemas/CreateAnyscaleCredentialDTO",
- "title": "AnyscaleCredential"
- },
- {
- "$ref": "#/components/schemas/CreateAssemblyAICredentialDTO",
- "title": "AssemblyAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateAzureCredentialDTO",
- "title": "AzureCredential"
- },
- {
- "$ref": "#/components/schemas/CreateAzureOpenAICredentialDTO",
- "title": "AzureOpenAICredential"
- },
- {
- "$ref": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
- "title": "ByoSipTrunkCredential"
- },
- {
- "$ref": "#/components/schemas/CreateCartesiaCredentialDTO",
- "title": "CartesiaCredential"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStringTypeColumn",
+ "title": "JSONQueryOnCallTableWithStringTypeColumn"
},
{
- "$ref": "#/components/schemas/CreateCerebrasCredentialDTO",
- "title": "CerebrasCredential"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithNumberTypeColumn",
+ "title": "JSONQueryOnCallTableWithNumberTypeColumn"
},
{
- "$ref": "#/components/schemas/CreateCloudflareCredentialDTO",
- "title": "CloudflareCredential"
- },
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStructuredOutputColumn",
+ "title": "JSONQueryOnCallTableWithStructuredOutputColumn"
+ }
+ ]
+ }
+ }
+ }
+ },
+ "UpdateLineInsightFromCallTableDTO": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the Insight.",
+ "minLength": 1,
+ "maxLength": 255
+ },
+ "type": {
+ "type": "string",
+ "description": "This is the type of the Insight.\nIt is required to be `line` to create a line insight.",
+ "enum": [
+ "line"
+ ]
+ },
+ "formulas": {
+ "type": "array",
+ "description": "Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight.\nThe formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html\nA formula is created by using the query names as the variable.\nThe formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result.\nFor example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this:\n```\n{{['Query 1']}} / {{['Query 2']}} * 100\n```\n\n```\n({{[Query 1]}} * 10) + {{[Query 2]}}\n```\nThis will take the\n\nYou can also use the query names as the variable in the formula.",
+ "items": {
+ "$ref": "#/components/schemas/InsightFormula"
+ }
+ },
+ "metadata": {
+ "description": "This is the metadata for the insight.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/LineInsightMetadata"
+ }
+ ]
+ },
+ "timeRange": {
+ "$ref": "#/components/schemas/InsightTimeRangeWithStep"
+ },
+ "groupBy": {
+ "type": "string",
+ "description": "This is the group by column for the insight when table is `call`.\nThese are the columns to group the results by.\nAll results are grouped by the time range step by default.",
+ "example": [
+ "assistant_id"
+ ],
+ "enum": [
+ "assistantId",
+ "workflowId",
+ "squadId",
+ "phoneNumberId",
+ "type",
+ "endedReason",
+ "customerNumber",
+ "campaignId",
+ "artifact.structuredOutputs[OutputID]"
+ ]
+ },
+ "queries": {
+ "type": "array",
+ "description": "These are the queries to run to generate the insight.",
+ "items": {
+ "oneOf": [
{
- "$ref": "#/components/schemas/CreateCustomLLMCredentialDTO",
- "title": "CustomLLMCredential"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStringTypeColumn",
+ "title": "JSONQueryOnCallTableWithStringTypeColumn"
},
{
- "$ref": "#/components/schemas/CreateDeepgramCredentialDTO",
- "title": "DeepgramCredential"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithNumberTypeColumn",
+ "title": "JSONQueryOnCallTableWithNumberTypeColumn"
},
{
- "$ref": "#/components/schemas/CreateDeepInfraCredentialDTO",
- "title": "DeepInfraCredential"
- },
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStructuredOutputColumn",
+ "title": "JSONQueryOnCallTableWithStructuredOutputColumn"
+ }
+ ]
+ }
+ }
+ }
+ },
+ "UpdateTextInsightFromCallTableDTO": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the Insight.",
+ "minLength": 1,
+ "maxLength": 255
+ },
+ "type": {
+ "type": "string",
+ "description": "This is the type of the Insight.\nIt is required to be `text` to create a text insight.",
+ "enum": [
+ "text"
+ ]
+ },
+ "formula": {
+ "type": "object",
+ "description": "Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight.\nThe formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html\nA formula is created by using the query names as the variable.\nThe formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result.\nFor example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this:\n```\n{{['Query 1']}} / {{['Query 2']}} * 100\n```\n\n```\n({{[Query 1]}} * 10) + {{[Query 2]}}\n```\nThis will take the\n\nYou can also use the query names as the variable in the formula.",
+ "items": {
+ "$ref": "#/components/schemas/InsightFormula"
+ }
+ },
+ "timeRange": {
+ "$ref": "#/components/schemas/InsightTimeRange"
+ },
+ "queries": {
+ "type": "array",
+ "description": "These are the queries to run to generate the insight.\nFor Text Insights, we only allow a single query, or require a formula if multiple queries are provided",
+ "items": {
+ "oneOf": [
{
- "$ref": "#/components/schemas/CreateDeepSeekCredentialDTO",
- "title": "DeepSeekCredential"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStringTypeColumn",
+ "title": "JSONQueryOnCallTableWithStringTypeColumn"
},
{
- "$ref": "#/components/schemas/CreateElevenLabsCredentialDTO",
- "title": "ElevenLabsCredential"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithNumberTypeColumn",
+ "title": "JSONQueryOnCallTableWithNumberTypeColumn"
},
{
- "$ref": "#/components/schemas/CreateGcpCredentialDTO",
- "title": "GcpCredential"
- },
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStructuredOutputColumn",
+ "title": "JSONQueryOnCallTableWithStructuredOutputColumn"
+ }
+ ]
+ }
+ }
+ }
+ },
+ "CreateBarInsightFromCallTableDTO": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the Insight.",
+ "minLength": 1,
+ "maxLength": 255
+ },
+ "type": {
+ "type": "string",
+ "description": "This is the type of the Insight.\nIt is required to be `bar` to create a bar insight.",
+ "enum": [
+ "bar"
+ ]
+ },
+ "formulas": {
+ "type": "array",
+ "description": "Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight.\nThe formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html\nA formula is created by using the query names as the variable.\nThe formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result.\nFor example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this:\n```\n{{['Query 1']}} / {{['Query 2']}} * 100\n```\n\n```\n({{[Query 1]}} * 10) + {{[Query 2]}}\n```\nThis will take the\n\nYou can also use the query names as the variable in the formula.",
+ "items": {
+ "$ref": "#/components/schemas/InsightFormula"
+ }
+ },
+ "metadata": {
+ "description": "This is the metadata for the insight.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/BarInsightMetadata"
+ }
+ ]
+ },
+ "timeRange": {
+ "$ref": "#/components/schemas/InsightTimeRangeWithStep"
+ },
+ "groupBy": {
+ "type": "string",
+ "description": "This is the group by column for the insight when table is `call`.\nThese are the columns to group the results by.\nAll results are grouped by the time range step by default.",
+ "example": [
+ "assistant_id"
+ ],
+ "enum": [
+ "assistantId",
+ "workflowId",
+ "squadId",
+ "phoneNumberId",
+ "type",
+ "endedReason",
+ "customerNumber",
+ "campaignId",
+ "artifact.structuredOutputs[OutputID]"
+ ]
+ },
+ "queries": {
+ "type": "array",
+ "description": "These are the queries to run to generate the insight.",
+ "items": {
+ "oneOf": [
{
- "$ref": "#/components/schemas/CreateGladiaCredentialDTO",
- "title": "GladiaCredential"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStringTypeColumn",
+ "title": "JSONQueryOnCallTableWithStringTypeColumn"
},
{
- "$ref": "#/components/schemas/CreateGoHighLevelCredentialDTO",
- "title": "GhlCredential"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithNumberTypeColumn",
+ "title": "JSONQueryOnCallTableWithNumberTypeColumn"
},
{
- "$ref": "#/components/schemas/CreateGoogleCredentialDTO",
- "title": "GoogleCredential"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStructuredOutputColumn",
+ "title": "JSONQueryOnCallTableWithStructuredOutputColumn"
},
{
- "$ref": "#/components/schemas/CreateGroqCredentialDTO",
- "title": "GroqCredential"
- },
+ "$ref": "#/components/schemas/JSONQueryOnEventsTable",
+ "title": "JSONQueryOnEventsTable"
+ }
+ ]
+ }
+ }
+ },
+ "required": [
+ "type",
+ "queries"
+ ]
+ },
+ "CreatePieInsightFromCallTableDTO": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the Insight.",
+ "minLength": 1,
+ "maxLength": 255
+ },
+ "type": {
+ "type": "string",
+ "description": "This is the type of the Insight.\nIt is required to be `pie` to create a pie insight.",
+ "enum": [
+ "pie"
+ ]
+ },
+ "formulas": {
+ "type": "array",
+ "description": "Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight.\nThe formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html\nA formula is created by using the query names as the variable.\nThe formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result.\nFor example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this:\n```\n{{['Query 1']}} / {{['Query 2']}} * 100\n```\n\n```\n({{[Query 1]}} * 10) + {{[Query 2]}}\n```\nThis will take the\n\nYou can also use the query names as the variable in the formula.",
+ "items": {
+ "$ref": "#/components/schemas/InsightFormula"
+ }
+ },
+ "timeRange": {
+ "$ref": "#/components/schemas/InsightTimeRange"
+ },
+ "groupBy": {
+ "type": "string",
+ "description": "This is the group by column for the insight when table is `call`.\nThese are the columns to group the results by.\nAll results are grouped by the time range step by default.",
+ "example": [
+ "assistant_id"
+ ],
+ "enum": [
+ "assistantId",
+ "workflowId",
+ "squadId",
+ "phoneNumberId",
+ "type",
+ "endedReason",
+ "customerNumber",
+ "campaignId",
+ "artifact.structuredOutputs[OutputID]"
+ ]
+ },
+ "queries": {
+ "type": "array",
+ "description": "These are the queries to run to generate the insight.",
+ "items": {
+ "oneOf": [
{
- "$ref": "#/components/schemas/CreateHumeCredentialDTO",
- "title": "HumeCredential"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStringTypeColumn",
+ "title": "JSONQueryOnCallTableWithStringTypeColumn"
},
{
- "$ref": "#/components/schemas/CreateInflectionAICredentialDTO",
- "title": "InflectionAICredential"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithNumberTypeColumn",
+ "title": "JSONQueryOnCallTableWithNumberTypeColumn"
},
{
- "$ref": "#/components/schemas/CreateLangfuseCredentialDTO",
- "title": "LangfuseCredential"
- },
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStructuredOutputColumn",
+ "title": "JSONQueryOnCallTableWithStructuredOutputColumn"
+ }
+ ]
+ }
+ }
+ },
+ "required": [
+ "type",
+ "queries"
+ ]
+ },
+ "CreateLineInsightFromCallTableDTO": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the Insight.",
+ "minLength": 1,
+ "maxLength": 255
+ },
+ "type": {
+ "type": "string",
+ "description": "This is the type of the Insight.\nIt is required to be `line` to create a line insight.",
+ "enum": [
+ "line"
+ ]
+ },
+ "formulas": {
+ "type": "array",
+ "description": "Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight.\nThe formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html\nA formula is created by using the query names as the variable.\nThe formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result.\nFor example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this:\n```\n{{['Query 1']}} / {{['Query 2']}} * 100\n```\n\n```\n({{[Query 1]}} * 10) + {{[Query 2]}}\n```\nThis will take the\n\nYou can also use the query names as the variable in the formula.",
+ "items": {
+ "$ref": "#/components/schemas/InsightFormula"
+ }
+ },
+ "metadata": {
+ "description": "This is the metadata for the insight.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/LineInsightMetadata"
+ }
+ ]
+ },
+ "timeRange": {
+ "$ref": "#/components/schemas/InsightTimeRangeWithStep"
+ },
+ "groupBy": {
+ "type": "string",
+ "description": "This is the group by column for the insight when table is `call`.\nThese are the columns to group the results by.\nAll results are grouped by the time range step by default.",
+ "example": [
+ "assistant_id"
+ ],
+ "enum": [
+ "assistantId",
+ "workflowId",
+ "squadId",
+ "phoneNumberId",
+ "type",
+ "endedReason",
+ "customerNumber",
+ "campaignId",
+ "artifact.structuredOutputs[OutputID]"
+ ]
+ },
+ "queries": {
+ "type": "array",
+ "description": "These are the queries to run to generate the insight.",
+ "items": {
+ "oneOf": [
{
- "$ref": "#/components/schemas/CreateLmntCredentialDTO",
- "title": "LmntCredential"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStringTypeColumn",
+ "title": "JSONQueryOnCallTableWithStringTypeColumn"
},
{
- "$ref": "#/components/schemas/CreateMakeCredentialDTO",
- "title": "MakeCredential"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithNumberTypeColumn",
+ "title": "JSONQueryOnCallTableWithNumberTypeColumn"
},
{
- "$ref": "#/components/schemas/CreateMistralCredentialDTO",
- "title": "MistralCredential"
- },
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStructuredOutputColumn",
+ "title": "JSONQueryOnCallTableWithStructuredOutputColumn"
+ }
+ ]
+ }
+ }
+ },
+ "required": [
+ "type",
+ "queries"
+ ]
+ },
+ "CreateTextInsightFromCallTableDTO": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the Insight.",
+ "minLength": 1,
+ "maxLength": 255
+ },
+ "type": {
+ "type": "string",
+ "description": "This is the type of the Insight.\nIt is required to be `text` to create a text insight.",
+ "enum": [
+ "text"
+ ]
+ },
+ "formula": {
+ "type": "object",
+ "description": "Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight.\nThe formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html\nA formula is created by using the query names as the variable.\nThe formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result.\nFor example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this:\n```\n{{['Query 1']}} / {{['Query 2']}} * 100\n```\n\n```\n({{[Query 1]}} * 10) + {{[Query 2]}}\n```\nThis will take the\n\nYou can also use the query names as the variable in the formula.",
+ "items": {
+ "$ref": "#/components/schemas/InsightFormula"
+ }
+ },
+ "timeRange": {
+ "$ref": "#/components/schemas/InsightTimeRange"
+ },
+ "queries": {
+ "type": "array",
+ "description": "These are the queries to run to generate the insight.\nFor Text Insights, we only allow a single query, or require a formula if multiple queries are provided",
+ "items": {
+ "oneOf": [
{
- "$ref": "#/components/schemas/CreateNeuphonicCredentialDTO",
- "title": "NeuphonicCredential"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStringTypeColumn",
+ "title": "JSONQueryOnCallTableWithStringTypeColumn"
},
{
- "$ref": "#/components/schemas/CreateOpenAICredentialDTO",
- "title": "OpenAICredential"
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithNumberTypeColumn",
+ "title": "JSONQueryOnCallTableWithNumberTypeColumn"
},
{
- "$ref": "#/components/schemas/CreateOpenRouterCredentialDTO",
- "title": "OpenRouterCredential"
- },
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStructuredOutputColumn",
+ "title": "JSONQueryOnCallTableWithStructuredOutputColumn"
+ }
+ ]
+ }
+ }
+ },
+ "required": [
+ "type",
+ "queries"
+ ]
+ },
+ "JSONQueryOnCallTableWithStringTypeColumn": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "This is the type of query. Only allowed type is \"vapiql-json\".",
+ "example": "vapiql-json",
+ "enum": [
+ "vapiql-json"
+ ]
+ },
+ "table": {
+ "type": "string",
+ "description": "This is the table that will be queried.",
+ "enum": [
+ "call"
+ ]
+ },
+ "filters": {
+ "type": "array",
+ "description": "This is the filters to apply to the insight.\nThe discriminator automatically selects the correct filter type based on column and operator.",
+ "items": {
+ "oneOf": [
{
- "$ref": "#/components/schemas/CreatePerplexityAICredentialDTO",
- "title": "PerplexityAICredential"
+ "$ref": "#/components/schemas/FilterStringTypeColumnOnCallTable"
},
{
- "$ref": "#/components/schemas/CreatePlayHTCredentialDTO",
- "title": "PlayHTCredential"
+ "$ref": "#/components/schemas/FilterStringArrayTypeColumnOnCallTable"
},
{
- "$ref": "#/components/schemas/CreateRimeAICredentialDTO",
- "title": "RimeAICredential"
+ "$ref": "#/components/schemas/FilterNumberTypeColumnOnCallTable"
},
{
- "$ref": "#/components/schemas/CreateRunpodCredentialDTO",
- "title": "RunpodCredential"
+ "$ref": "#/components/schemas/FilterNumberArrayTypeColumnOnCallTable"
},
{
- "$ref": "#/components/schemas/CreateS3CredentialDTO",
- "title": "S3Credential"
+ "$ref": "#/components/schemas/FilterDateTypeColumnOnCallTable"
},
{
- "$ref": "#/components/schemas/CreateSmallestAICredentialDTO",
- "title": "SmallestAICredential"
- },
+ "$ref": "#/components/schemas/FilterStructuredOutputColumnOnCallTable"
+ }
+ ]
+ }
+ },
+ "column": {
+ "type": "string",
+ "enum": [
+ "id",
+ "artifact.structuredOutputs[OutputID]"
+ ],
+ "description": "This is the column that will be queried in the selected table.\nAvailable columns depend on the selected table.\nString Type columns are columns where the rows store String data",
+ "example": "id"
+ },
+ "operation": {
+ "type": "string",
+ "enum": [
+ "count"
+ ],
+ "description": "This is the aggregation operation to perform on the column.\nWhen the column is a string type, the operation must be \"count\".",
+ "example": "count"
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the query.\nIt will be used to label the query in the insight board on the UI.",
+ "example": "Total Calls"
+ }
+ },
+ "required": [
+ "type",
+ "table",
+ "column",
+ "operation"
+ ]
+ },
+ "JSONQueryOnCallTableWithNumberTypeColumn": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "This is the type of query. Only allowed type is \"vapiql-json\".",
+ "example": "vapiql-json",
+ "enum": [
+ "vapiql-json"
+ ]
+ },
+ "table": {
+ "type": "string",
+ "description": "This is the table that will be queried.",
+ "enum": [
+ "call"
+ ]
+ },
+ "filters": {
+ "type": "array",
+ "description": "This is the filters to apply to the insight.\nThe discriminator automatically selects the correct filter type based on column and operator.",
+ "items": {
+ "oneOf": [
{
- "$ref": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
- "title": "SpeechmaticsCredential"
+ "$ref": "#/components/schemas/FilterStringTypeColumnOnCallTable"
},
{
- "$ref": "#/components/schemas/CreateSupabaseCredentialDTO",
- "title": "SupabaseCredential"
+ "$ref": "#/components/schemas/FilterStringArrayTypeColumnOnCallTable"
},
{
- "$ref": "#/components/schemas/CreateTavusCredentialDTO",
- "title": "TavusCredential"
+ "$ref": "#/components/schemas/FilterNumberTypeColumnOnCallTable"
},
{
- "$ref": "#/components/schemas/CreateTogetherAICredentialDTO",
- "title": "TogetherAICredential"
+ "$ref": "#/components/schemas/FilterNumberArrayTypeColumnOnCallTable"
},
{
- "$ref": "#/components/schemas/CreateTrieveCredentialDTO",
- "title": "TrieveCredential"
+ "$ref": "#/components/schemas/FilterDateTypeColumnOnCallTable"
},
{
- "$ref": "#/components/schemas/CreateTwilioCredentialDTO",
- "title": "TwilioCredential"
- },
+ "$ref": "#/components/schemas/FilterStructuredOutputColumnOnCallTable"
+ }
+ ]
+ }
+ },
+ "column": {
+ "type": "string",
+ "enum": [
+ "cost",
+ "duration",
+ "averageModelLatency",
+ "averageVoiceLatency",
+ "averageTranscriberLatency",
+ "averageTurnLatency",
+ "averageEndpointingLatency",
+ "artifact.structuredOutputs[OutputID]"
+ ],
+ "description": "This is the column that will be queried in the selected table.\nAvailable columns depend on the selected table.\nNumber Type columns are columns where the rows store Number data",
+ "example": "duration"
+ },
+ "operation": {
+ "type": "string",
+ "enum": [
+ "average",
+ "sum",
+ "min",
+ "max"
+ ],
+ "description": "This is the aggregation operation to perform on the column.\nWhen the column is a number type, the operation must be one of the following:\n- average\n- sum\n- min\n- max",
+ "example": "sum"
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the query.\nIt will be used to label the query in the insight board on the UI.",
+ "example": "Total Calls"
+ }
+ },
+ "required": [
+ "type",
+ "table",
+ "column",
+ "operation"
+ ]
+ },
+ "JSONQueryOnCallTableWithStructuredOutputColumn": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "This is the type of query. Only allowed type is \"vapiql-json\".",
+ "example": "vapiql-json",
+ "enum": [
+ "vapiql-json"
+ ]
+ },
+ "table": {
+ "type": "string",
+ "description": "This is the table that will be queried.",
+ "enum": [
+ "call"
+ ]
+ },
+ "filters": {
+ "type": "array",
+ "description": "This is the filters to apply to the insight.\nThe discriminator automatically selects the correct filter type based on column and operator.",
+ "items": {
+ "oneOf": [
{
- "$ref": "#/components/schemas/CreateVonageCredentialDTO",
- "title": "VonageCredential"
+ "$ref": "#/components/schemas/FilterStringTypeColumnOnCallTable"
},
{
- "$ref": "#/components/schemas/CreateWebhookCredentialDTO",
- "title": "WebhookCredential"
+ "$ref": "#/components/schemas/FilterStringArrayTypeColumnOnCallTable"
},
{
- "$ref": "#/components/schemas/CreateXAiCredentialDTO",
- "title": "XAiCredential"
+ "$ref": "#/components/schemas/FilterNumberTypeColumnOnCallTable"
},
{
- "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
- "title": "GoogleCalendarOAuth2ClientCredential"
+ "$ref": "#/components/schemas/FilterNumberArrayTypeColumnOnCallTable"
},
{
- "$ref": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
- "title": "GoogleCalendarOAuth2AuthorizationCredential"
+ "$ref": "#/components/schemas/FilterDateTypeColumnOnCallTable"
},
{
- "$ref": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
- "title": "GoogleSheetsOAuth2AuthorizationCredential"
- },
+ "$ref": "#/components/schemas/FilterStructuredOutputColumnOnCallTable"
+ }
+ ]
+ }
+ },
+ "column": {
+ "type": "string",
+ "enum": [
+ "artifact.structuredOutputs[OutputID]"
+ ],
+ "description": "This is the column that will be queried in the call table.\nStructured Output Type columns are only to query on artifact.structuredOutputs[OutputID] column.",
+ "example": "artifact.structuredOutputs[OutputID]"
+ },
+ "operation": {
+ "type": "string",
+ "enum": [
+ "average",
+ "count",
+ "sum",
+ "min",
+ "max"
+ ],
+ "description": "This is the aggregation operation to perform on the column.\nWhen the column is a structured output type, the operation depends on the value of the structured output.\nIf the structured output is a string or boolean, the operation must be \"count\".\nIf the structured output is a number, the operation can be \"average\", \"sum\", \"min\", or \"max\".",
+ "example": "count"
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the query.\nIt will be used to label the query in the insight board on the UI.",
+ "example": "Total Calls"
+ }
+ },
+ "required": [
+ "type",
+ "table",
+ "column",
+ "operation"
+ ]
+ },
+ "JSONQueryOnEventsTable": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "This is the type of query. Only allowed type is \"vapiql-json\".",
+ "example": "vapiql-json",
+ "enum": [
+ "vapiql-json"
+ ]
+ },
+ "table": {
+ "type": "string",
+ "description": "This is the table that will be queried.\nMust be \"events\" for event-based insights.",
+ "enum": [
+ "events"
+ ]
+ },
+ "on": {
+ "type": "string",
+ "description": "The event type to query",
+ "example": "assistant.model.requestFailed",
+ "enum": [
+ "call.started",
+ "call.ended",
+ "call.inProgress",
+ "call.queued",
+ "call.transportConnected",
+ "call.transportDisconnected",
+ "call.transportReconnected",
+ "call.transferInitiated",
+ "call.transferCompleted",
+ "call.transferFailed",
+ "call.transferCancelled",
+ "call.handoffInitiated",
+ "call.handoffCompleted",
+ "call.handoffFailed",
+ "call.assistantSwapped",
+ "call.assistantStarted",
+ "call.customerJoined",
+ "call.customerLeft",
+ "call.controlReceived",
+ "call.listenStarted",
+ "call.recordingStarted",
+ "call.recordingPaused",
+ "call.recordingResumed",
+ "call.voicemailDetected",
+ "call.voicemailNotDetected",
+ "call.dtmfReceived",
+ "call.dtmfSent",
+ "call.amdDetected",
+ "call.hookTriggered",
+ "call.hookSucceeded",
+ "call.hookFailed",
+ "call.statusReceived",
+ "call.silenceTimeout",
+ "call.microphoneTimeout",
+ "call.maxDurationReached",
+ "assistant.voice.requestStarted",
+ "assistant.voice.requestSucceeded",
+ "assistant.voice.requestFailed",
+ "assistant.voice.connectionOpened",
+ "assistant.voice.connectionClosed",
+ "assistant.voice.firstAudioReceived",
+ "assistant.voice.audioChunkReceived",
+ "assistant.voice.generationSucceeded",
+ "assistant.voice.generationFailed",
+ "assistant.voice.textPushed",
+ "assistant.voice.reconnecting",
+ "assistant.voice.cleanup",
+ "assistant.voice.clearing",
+ "assistant.voice.voiceSwitched",
+ "assistant.model.requestStarted",
+ "assistant.model.requestSucceeded",
+ "assistant.model.requestFailed",
+ "assistant.model.requestAttemptStarted",
+ "assistant.model.requestAttemptSucceeded",
+ "assistant.model.requestAttemptFailed",
+ "assistant.model.connectionOpened",
+ "assistant.model.connectionClosed",
+ "assistant.model.firstTokenReceived",
+ "assistant.model.tokenReceived",
+ "assistant.model.responseSucceeded",
+ "assistant.model.responseFailed",
+ "assistant.model.toolCallsReceived",
+ "assistant.model.reconnecting",
+ "assistant.model.cleanup",
+ "assistant.model.clearing",
+ "assistant.tool.started",
+ "assistant.tool.completed",
+ "assistant.tool.failed",
+ "assistant.tool.delayedMessageSent",
+ "assistant.tool.timeout",
+ "assistant.tool.asyncCallbackReceived",
+ "assistant.transcriber.requestStarted",
+ "assistant.transcriber.requestSucceeded",
+ "assistant.transcriber.requestFailed",
+ "assistant.transcriber.connectionOpened",
+ "assistant.transcriber.connectionClosed",
+ "assistant.transcriber.partialTranscript",
+ "assistant.transcriber.finalTranscript",
+ "assistant.transcriber.keepAlive",
+ "assistant.transcriber.reconnecting",
+ "assistant.transcriber.cleanup",
+ "assistant.transcriber.clearing",
+ "assistant.transcriber.transcriptIgnored",
+ "assistant.transcriber.languageSwitched",
+ "assistant.analysis.structuredOutputGenerated",
+ "pipeline.turnStarted",
+ "pipeline.cleared",
+ "pipeline.botSpeechStarted",
+ "pipeline.botSpeechStopped",
+ "pipeline.userSpeechStarted",
+ "pipeline.userSpeechStopped",
+ "pipeline.endpointingTriggered",
+ "pipeline.firstMessageStarted",
+ "pipeline.firstMessageCompleted"
+ ]
+ },
+ "operation": {
+ "type": "string",
+ "description": "This is the operation to perform on matching events.\n- \"count\": Returns the raw count of matching events\n- \"percentage\": Returns (count of matching events / total calls) * 100",
+ "example": "count",
+ "enum": [
+ "count",
+ "percentage"
+ ]
+ },
+ "filters": {
+ "type": "array",
+ "description": "These are the filters to apply to the events query.\nEach filter filters on a field specific to the event type.",
+ "items": {
+ "oneOf": [
{
- "$ref": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
- "title": "SlackOAuth2AuthorizationCredential"
+ "$ref": "#/components/schemas/EventsTableStringCondition"
},
{
- "$ref": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
- "title": "GoHighLevelMCPCredential"
+ "$ref": "#/components/schemas/EventsTableNumberCondition"
},
{
- "$ref": "#/components/schemas/CreateInworldCredentialDTO",
- "title": "InworldCredential"
- }
- ],
- "discriminator": {
- "propertyName": "provider",
- "mapping": {
- "11labs": "#/components/schemas/CreateElevenLabsCredentialDTO",
- "anthropic": "#/components/schemas/CreateAnthropicCredentialDTO",
- "anyscale": "#/components/schemas/CreateAnyscaleCredentialDTO",
- "assembly-ai": "#/components/schemas/CreateAssemblyAICredentialDTO",
- "azure-openai": "#/components/schemas/CreateAzureOpenAICredentialDTO",
- "azure": "#/components/schemas/CreateAzureCredentialDTO",
- "byo-sip-trunk": "#/components/schemas/CreateByoSipTrunkCredentialDTO",
- "cartesia": "#/components/schemas/CreateCartesiaCredentialDTO",
- "cerebras": "#/components/schemas/CreateCerebrasCredentialDTO",
- "cloudflare": "#/components/schemas/CreateCloudflareCredentialDTO",
- "custom-llm": "#/components/schemas/CreateCustomLLMCredentialDTO",
- "deepgram": "#/components/schemas/CreateDeepgramCredentialDTO",
- "deepinfra": "#/components/schemas/CreateDeepInfraCredentialDTO",
- "deep-seek": "#/components/schemas/CreateDeepSeekCredentialDTO",
- "gcp": "#/components/schemas/CreateGcpCredentialDTO",
- "gladia": "#/components/schemas/CreateGladiaCredentialDTO",
- "gohighlevel": "#/components/schemas/CreateGoHighLevelCredentialDTO",
- "google": "#/components/schemas/CreateGoogleCredentialDTO",
- "groq": "#/components/schemas/CreateGroqCredentialDTO",
- "inflection-ai": "#/components/schemas/CreateInflectionAICredentialDTO",
- "langfuse": "#/components/schemas/CreateLangfuseCredentialDTO",
- "lmnt": "#/components/schemas/CreateLmntCredentialDTO",
- "make": "#/components/schemas/CreateMakeCredentialDTO",
- "openai": "#/components/schemas/CreateOpenAICredentialDTO",
- "openrouter": "#/components/schemas/CreateOpenRouterCredentialDTO",
- "perplexity-ai": "#/components/schemas/CreatePerplexityAICredentialDTO",
- "playht": "#/components/schemas/CreatePlayHTCredentialDTO",
- "rime-ai": "#/components/schemas/CreateRimeAICredentialDTO",
- "runpod": "#/components/schemas/CreateRunpodCredentialDTO",
- "s3": "#/components/schemas/CreateS3CredentialDTO",
- "supabase": "#/components/schemas/CreateSupabaseCredentialDTO",
- "smallest-ai": "#/components/schemas/CreateSmallestAICredentialDTO",
- "tavus": "#/components/schemas/CreateTavusCredentialDTO",
- "together-ai": "#/components/schemas/CreateTogetherAICredentialDTO",
- "twilio": "#/components/schemas/CreateTwilioCredentialDTO",
- "vonage": "#/components/schemas/CreateVonageCredentialDTO",
- "webhook": "#/components/schemas/CreateWebhookCredentialDTO",
- "xai": "#/components/schemas/CreateXAiCredentialDTO",
- "neuphonic": "#/components/schemas/CreateNeuphonicCredentialDTO",
- "hume": "#/components/schemas/CreateHumeCredentialDTO",
- "mistral": "#/components/schemas/CreateMistralCredentialDTO",
- "speechmatics": "#/components/schemas/CreateSpeechmaticsCredentialDTO",
- "trieve": "#/components/schemas/CreateTrieveCredentialDTO",
- "google.calendar.oauth2-client": "#/components/schemas/CreateGoogleCalendarOAuth2ClientCredentialDTO",
- "google.calendar.oauth2-authorization": "#/components/schemas/CreateGoogleCalendarOAuth2AuthorizationCredentialDTO",
- "google.sheets.oauth2-authorization": "#/components/schemas/CreateGoogleSheetsOAuth2AuthorizationCredentialDTO",
- "slack.oauth2-authorization": "#/components/schemas/CreateSlackOAuth2AuthorizationCredentialDTO",
- "ghl.oauth2-authorization": "#/components/schemas/CreateGoHighLevelMCPCredentialDTO",
- "inworld": "#/components/schemas/CreateInworldCredentialDTO",
- "minimax": "#/components/schemas/CreateMinimaxCredentialDTO"
+ "$ref": "#/components/schemas/EventsTableBooleanCondition"
}
- }
+ ]
}
},
- "name": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the query.\nIt will be used to label the query in the insight board on the UI.",
+ "example": "Model Failures"
+ }
+ },
+ "required": [
+ "type",
+ "table",
+ "on",
+ "operation"
+ ]
+ },
+ "FilterStringTypeColumnOnCallTable": {
+ "type": "object",
+ "properties": {
+ "column": {
+ "type": "string",
+ "description": "This is the column in the call table that will be filtered on.\nString Type columns are columns where the rows store data as a string.\nMust be a valid column for the selected table.",
+ "example": "assistant_id",
+ "enum": [
+ "assistantId",
+ "workflowId",
+ "squadId",
+ "phoneNumberId",
+ "type",
+ "customerNumber",
+ "status",
+ "endedReason",
+ "forwardedPhoneNumber",
+ "campaignId"
+ ]
+ },
+ "operator": {
+ "type": "string",
+ "description": "This is the operator to use for the filter.\nFor string type columns, the operator must be \"=\", \"!=\", \"contains\", \"not contains\"",
+ "example": "\"=\" or \"!=\"",
+ "enum": [
+ "=",
+ "!=",
+ "contains",
+ "not_contains"
+ ]
+ },
+ "value": {
+ "type": "string",
+ "description": "This is the value to filter on."
+ }
+ },
+ "required": [
+ "column",
+ "operator",
+ "value"
+ ]
+ },
+ "FilterNumberTypeColumnOnCallTable": {
+ "type": "object",
+ "properties": {
+ "column": {
+ "type": "string",
+ "description": "This is the column in the call table that will be filtered on.\nNumber Type columns are columns where the rows store data as a number.\nMust be a valid column for the selected table.",
+ "example": "duration",
+ "enum": [
+ "duration",
+ "cost",
+ "averageModelLatency",
+ "averageVoiceLatency",
+ "averageTranscriberLatency",
+ "averageTurnLatency",
+ "averageEndpointingLatency"
+ ]
+ },
+ "operator": {
+ "type": "string",
+ "description": "This is the operator to use for the filter.\nFor number type columns, the operator must be \"=\", \">\", \"<\", \">=\", \"<=\"",
+ "example": "\"=\" or \">\" or \"<\" or \">=\" or \"<=\"",
+ "enum": [
+ "=",
+ "!=",
+ ">",
+ "<",
+ ">=",
+ "<="
+ ]
+ },
+ "value": {
+ "type": "number",
+ "description": "This is the value to filter on."
+ }
+ },
+ "required": [
+ "column",
+ "operator",
+ "value"
+ ]
+ },
+ "FilterDateTypeColumnOnCallTable": {
+ "type": "object",
+ "properties": {
+ "column": {
+ "type": "string",
+ "description": "This is the column in the call table that will be filtered on.\nDate Type columns are columns where the rows store data as a date.\nMust be a valid column for the selected table.",
+ "example": "created_at",
+ "enum": [
+ "startedAt",
+ "endedAt"
+ ]
+ },
+ "operator": {
+ "type": "string",
+ "description": "This is the operator to use for the filter.\nFor date type columns, the operator must be \"=\", \">\", \"<\", \">=\", \"<=\"",
+ "example": "\"=\" or \">\" or \"<\" or \">=\" or \"<=\"",
+ "enum": [
+ "=",
+ "!=",
+ ">",
+ "<",
+ ">=",
+ "<="
+ ]
+ },
+ "value": {
+ "type": "string",
+ "description": "This is the value to filter on.\nMust be a valid ISO 8601 date-time string.",
+ "example": "2025-01-01T00:00:00Z"
+ }
+ },
+ "required": [
+ "column",
+ "operator",
+ "value"
+ ]
+ },
+ "FilterStructuredOutputColumnOnCallTable": {
+ "type": "object",
+ "properties": {
+ "column": {
+ "type": "string",
+ "description": "This is the column in the call table that will be filtered on.\nStructured Output Type columns are only to filter on artifact.structuredOutputs[OutputID] column.",
+ "example": "artifact.structuredOutputs[OutputID]",
+ "enum": [
+ "artifact.structuredOutputs[OutputID]"
+ ]
+ },
+ "operator": {
+ "type": "string",
+ "description": "This is the operator to use for the filter.\nThe operator depends on the value type of the structured output.\nIf the structured output is a string or boolean, the operator must be \"=\", \"!=\"\nIf the structured output is a number, the operator must be \"=\", \">\", \"<\", \">=\", \"<=\"\nIf the structured output is an array, the operator must be \"in\" or \"not_in\"",
+ "example": "\"=\" or \">\" or \"<\" or \"in\" or \"not_in\"",
+ "enum": [
+ "=",
+ "!=",
+ ">",
+ "<",
+ ">=",
+ "<=",
+ "in",
+ "not_in",
+ "contains",
+ "not_contains",
+ "is_empty",
+ "is_not_empty"
+ ]
+ },
+ "value": {
+ "type": "object",
+ "description": "This is the value to filter on.\nThe value type depends on the structured output type being filtered."
+ }
+ },
+ "required": [
+ "column",
+ "operator",
+ "value"
+ ]
+ },
+ "FilterStringArrayTypeColumnOnCallTable": {
+ "type": "object",
+ "properties": {
+ "column": {
+ "type": "string",
+ "description": "This is the column in the call table that will be filtered on.\nString Array Type columns are the same as String Type columns, but provides the ability to filter on multiple values provided as an array.\nMust be a valid column for the selected table.",
+ "example": "assistant_id",
+ "enum": [
+ "assistantId",
+ "workflowId",
+ "squadId",
+ "phoneNumberId",
+ "type",
+ "customerNumber",
+ "status",
+ "endedReason",
+ "forwardedPhoneNumber",
+ "campaignId"
+ ]
+ },
+ "operator": {
"type": "string",
- "maxLength": 80
+ "description": "This is the operator to use for the filter.\nThe operator must be `in` or `not_in`.",
+ "example": "\"in\" or \"not_in\"",
+ "enum": [
+ "in",
+ "not_in",
+ "is_empty",
+ "is_not_empty"
+ ]
},
- "edges": {
+ "value": {
+ "description": "These are the values to filter on.",
"type": "array",
"items": {
- "$ref": "#/components/schemas/Edge"
+ "type": "string"
}
- },
- "globalPrompt": {
+ }
+ },
+ "required": [
+ "column",
+ "operator",
+ "value"
+ ]
+ },
+ "FilterNumberArrayTypeColumnOnCallTable": {
+ "type": "object",
+ "properties": {
+ "column": {
"type": "string",
- "maxLength": 5000
- },
- "server": {
- "description": "This is where Vapi will send webhooks. You can find all webhooks available along with their shape in ServerMessage schema.\n\nThe order of precedence is:\n\n1. tool.server\n2. workflow.server / assistant.server\n3. phoneNumber.server\n4. org.server",
- "allOf": [
- {
- "$ref": "#/components/schemas/Server"
- }
+ "description": "This is the column in the call table that will be filtered on.\nNumber Array Type columns are the same as Number Type columns, but provides the ability to filter on multiple values provided as an array.\nMust be a valid column for the selected table.",
+ "example": "duration",
+ "enum": [
+ "duration",
+ "cost",
+ "averageModelLatency",
+ "averageVoiceLatency",
+ "averageTranscriberLatency",
+ "averageTurnLatency",
+ "averageEndpointingLatency"
]
},
- "compliancePlan": {
- "description": "This is the compliance plan for the workflow. It allows you to configure HIPAA and other compliance settings.",
- "allOf": [
- {
- "$ref": "#/components/schemas/CompliancePlan"
- }
+ "operator": {
+ "type": "string",
+ "description": "This is the operator to use for the filter.\nThe operator must be `in` or `not_in`.",
+ "example": "\"in\" or \"not_in\"",
+ "enum": [
+ "in",
+ "not_in",
+ "is_empty",
+ "is_not_empty"
]
},
- "analysisPlan": {
- "description": "This is the plan for analysis of workflow's calls. Stored in `call.analysis`.",
- "allOf": [
- {
- "$ref": "#/components/schemas/AnalysisPlan"
- }
- ]
+ "value": {
+ "description": "This is the value to filter on.",
+ "type": "array",
+ "items": {
+ "type": "number"
+ }
+ }
+ },
+ "required": [
+ "column",
+ "operator",
+ "value"
+ ]
+ },
+ "EventsTableStringCondition": {
+ "type": "object",
+ "properties": {
+ "column": {
+ "type": "string",
+ "description": "The string field name from the event data",
+ "example": "provider"
},
- "artifactPlan": {
- "description": "This is the plan for artifacts generated during workflow's calls. Stored in `call.artifact`.",
- "allOf": [
- {
- "$ref": "#/components/schemas/ArtifactPlan"
- }
+ "operator": {
+ "type": "string",
+ "description": "String comparison operator",
+ "example": "=",
+ "enum": [
+ "=",
+ "!=",
+ "contains",
+ "notContains"
]
},
- "startSpeakingPlan": {
- "description": "This is the plan for when the workflow nodes should start talking.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to start talking after the customer is done speaking.\n- The assistant is too fast to start talking after the customer is done speaking.\n- The assistant is so fast that it's actually interrupting the customer.",
- "allOf": [
- {
- "$ref": "#/components/schemas/StartSpeakingPlan"
- }
- ]
+ "value": {
+ "type": "string",
+ "description": "The string value to compare",
+ "example": "openai"
+ }
+ },
+ "required": [
+ "column",
+ "operator",
+ "value"
+ ]
+ },
+ "EventsTableNumberCondition": {
+ "type": "object",
+ "properties": {
+ "column": {
+ "type": "string",
+ "description": "The number field name from the event data",
+ "example": "latency"
},
- "stopSpeakingPlan": {
- "description": "This is the plan for when workflow nodes should stop talking on customer interruption.\n\nYou should configure this if you're running into these issues:\n- The assistant is too slow to recognize customer's interruption.\n- The assistant is too fast to recognize customer's interruption.\n- The assistant is getting interrupted by phrases that are just acknowledgments.\n- The assistant is getting interrupted by background noises.\n- The assistant is not properly stopping -- it starts talking right after getting interrupted.",
- "allOf": [
- {
- "$ref": "#/components/schemas/StopSpeakingPlan"
- }
+ "operator": {
+ "type": "string",
+ "description": "Number comparison operator",
+ "example": ">=",
+ "enum": [
+ "=",
+ "!=",
+ ">",
+ ">=",
+ "<",
+ "<="
]
},
- "monitorPlan": {
- "description": "This is the plan for real-time monitoring of the workflow's calls.\n\nUsage:\n- To enable live listening of the workflow's calls, set `monitorPlan.listenEnabled` to `true`.\n- To enable live control of the workflow's calls, set `monitorPlan.controlEnabled` to `true`.",
- "allOf": [
- {
- "$ref": "#/components/schemas/MonitorPlan"
- }
- ]
+ "value": {
+ "type": "number",
+ "description": "The number value to compare",
+ "example": 1000
+ }
+ },
+ "required": [
+ "column",
+ "operator",
+ "value"
+ ]
+ },
+ "EventsTableBooleanCondition": {
+ "type": "object",
+ "properties": {
+ "column": {
+ "type": "string",
+ "description": "The boolean field name from the event data",
+ "example": "success"
},
- "backgroundSpeechDenoisingPlan": {
- "description": "This enables filtering of noise and background speech while the user is talking.\n\nFeatures:\n- Smart denoising using Krisp\n- Fourier denoising\n\nBoth can be used together. Order of precedence:\n- Smart denoising\n- Fourier denoising",
- "allOf": [
- {
- "$ref": "#/components/schemas/BackgroundSpeechDenoisingPlan"
- }
+ "operator": {
+ "type": "string",
+ "description": "Boolean comparison operator",
+ "example": "=",
+ "enum": [
+ "="
]
},
- "credentialIds": {
- "description": "These are the credentials that will be used for the workflow calls. By default, all the credentials are available for use in the call but you can provide a subset using this.",
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "keypadInputPlan": {
- "description": "This is the plan for keypad input handling during workflow calls.",
- "allOf": [
- {
- "$ref": "#/components/schemas/KeypadInputPlan"
- }
- ]
+ "value": {
+ "type": "boolean",
+ "description": "The boolean value to compare",
+ "example": true
}
- }
+ },
+ "required": [
+ "column",
+ "operator",
+ "value"
+ ]
},
- "Squad": {
+ "BarInsightFromCallTable": {
"type": "object",
"properties": {
"name": {
"type": "string",
- "description": "This is the name of the squad."
+ "description": "This is the name of the Insight.",
+ "minLength": 1,
+ "maxLength": 255
},
- "members": {
- "description": "This is the list of assistants that make up the squad.\n\nThe call will start with the first assistant in the list.",
+ "type": {
+ "type": "string",
+ "description": "This is the type of the Insight.\nIt is required to be `bar` to create a bar insight.",
+ "enum": [
+ "bar"
+ ]
+ },
+ "formulas": {
"type": "array",
+ "description": "Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight.\nThe formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html\nA formula is created by using the query names as the variable.\nThe formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result.\nFor example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this:\n```\n{{['Query 1']}} / {{['Query 2']}} * 100\n```\n\n```\n({{[Query 1]}} * 10) + {{[Query 2]}}\n```\nThis will take the\n\nYou can also use the query names as the variable in the formula.",
"items": {
- "$ref": "#/components/schemas/SquadMemberDTO"
+ "$ref": "#/components/schemas/InsightFormula"
}
},
- "membersOverrides": {
- "description": "This can be used to override all the assistants' settings and provide values for their template variables.\n\nBoth `membersOverrides` and `members[n].assistantOverrides` can be used together. First, `members[n].assistantOverrides` is applied. Then, `membersOverrides` is applied as a global override.",
+ "metadata": {
+ "description": "This is the metadata for the insight.",
"allOf": [
{
- "$ref": "#/components/schemas/AssistantOverrides"
+ "$ref": "#/components/schemas/BarInsightMetadata"
}
]
},
- "id": {
+ "timeRange": {
+ "$ref": "#/components/schemas/InsightTimeRangeWithStep"
+ },
+ "groupBy": {
"type": "string",
- "description": "This is the unique identifier for the squad."
+ "description": "This is the group by column for the insight when table is `call`.\nThese are the columns to group the results by.\nAll results are grouped by the time range step by default.",
+ "example": [
+ "assistant_id"
+ ],
+ "enum": [
+ "assistantId",
+ "workflowId",
+ "squadId",
+ "phoneNumberId",
+ "type",
+ "endedReason",
+ "customerNumber",
+ "campaignId",
+ "artifact.structuredOutputs[OutputID]"
+ ]
},
- "orgId": {
+ "queries": {
+ "type": "array",
+ "description": "These are the queries to run to generate the insight.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStringTypeColumn",
+ "title": "JSONQueryOnCallTableWithStringTypeColumn"
+ },
+ {
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithNumberTypeColumn",
+ "title": "JSONQueryOnCallTableWithNumberTypeColumn"
+ },
+ {
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStructuredOutputColumn",
+ "title": "JSONQueryOnCallTableWithStructuredOutputColumn"
+ },
+ {
+ "$ref": "#/components/schemas/JSONQueryOnEventsTable",
+ "title": "JSONQueryOnEventsTable"
+ }
+ ]
+ }
+ }
+ },
+ "required": [
+ "type",
+ "queries"
+ ]
+ },
+ "PieInsightFromCallTable": {
+ "type": "object",
+ "properties": {
+ "name": {
"type": "string",
- "description": "This is the unique identifier for the org that this squad belongs to."
+ "description": "This is the name of the Insight.",
+ "minLength": 1,
+ "maxLength": 255
},
- "createdAt": {
- "format": "date-time",
+ "type": {
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the squad was created."
+ "description": "This is the type of the Insight.\nIt is required to be `pie` to create a pie insight.",
+ "enum": [
+ "pie"
+ ]
},
- "updatedAt": {
- "format": "date-time",
+ "formulas": {
+ "type": "array",
+ "description": "Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight.\nThe formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html\nA formula is created by using the query names as the variable.\nThe formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result.\nFor example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this:\n```\n{{['Query 1']}} / {{['Query 2']}} * 100\n```\n\n```\n({{[Query 1]}} * 10) + {{[Query 2]}}\n```\nThis will take the\n\nYou can also use the query names as the variable in the formula.",
+ "items": {
+ "$ref": "#/components/schemas/InsightFormula"
+ }
+ },
+ "timeRange": {
+ "$ref": "#/components/schemas/InsightTimeRange"
+ },
+ "groupBy": {
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the squad was last updated."
+ "description": "This is the group by column for the insight when table is `call`.\nThese are the columns to group the results by.\nAll results are grouped by the time range step by default.",
+ "example": [
+ "assistant_id"
+ ],
+ "enum": [
+ "assistantId",
+ "workflowId",
+ "squadId",
+ "phoneNumberId",
+ "type",
+ "endedReason",
+ "customerNumber",
+ "campaignId",
+ "artifact.structuredOutputs[OutputID]"
+ ]
+ },
+ "queries": {
+ "type": "array",
+ "description": "These are the queries to run to generate the insight.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStringTypeColumn",
+ "title": "JSONQueryOnCallTableWithStringTypeColumn"
+ },
+ {
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithNumberTypeColumn",
+ "title": "JSONQueryOnCallTableWithNumberTypeColumn"
+ },
+ {
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStructuredOutputColumn",
+ "title": "JSONQueryOnCallTableWithStructuredOutputColumn"
+ }
+ ]
+ }
}
},
"required": [
- "members",
- "id",
- "orgId",
- "createdAt",
- "updatedAt"
+ "type",
+ "queries"
]
},
- "UpdateSquadDTO": {
+ "LineInsightFromCallTable": {
"type": "object",
"properties": {
"name": {
"type": "string",
- "description": "This is the name of the squad."
+ "description": "This is the name of the Insight.",
+ "minLength": 1,
+ "maxLength": 255
},
- "members": {
- "description": "This is the list of assistants that make up the squad.\n\nThe call will start with the first assistant in the list.",
+ "type": {
+ "type": "string",
+ "description": "This is the type of the Insight.\nIt is required to be `line` to create a line insight.",
+ "enum": [
+ "line"
+ ]
+ },
+ "formulas": {
"type": "array",
+ "description": "Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight.\nThe formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html\nA formula is created by using the query names as the variable.\nThe formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result.\nFor example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this:\n```\n{{['Query 1']}} / {{['Query 2']}} * 100\n```\n\n```\n({{[Query 1]}} * 10) + {{[Query 2]}}\n```\nThis will take the\n\nYou can also use the query names as the variable in the formula.",
"items": {
- "$ref": "#/components/schemas/SquadMemberDTO"
+ "$ref": "#/components/schemas/InsightFormula"
}
},
- "membersOverrides": {
- "description": "This can be used to override all the assistants' settings and provide values for their template variables.\n\nBoth `membersOverrides` and `members[n].assistantOverrides` can be used together. First, `members[n].assistantOverrides` is applied. Then, `membersOverrides` is applied as a global override.",
+ "metadata": {
+ "description": "This is the metadata for the insight.",
"allOf": [
{
- "$ref": "#/components/schemas/AssistantOverrides"
+ "$ref": "#/components/schemas/LineInsightMetadata"
}
]
+ },
+ "timeRange": {
+ "$ref": "#/components/schemas/InsightTimeRangeWithStep"
+ },
+ "groupBy": {
+ "type": "string",
+ "description": "This is the group by column for the insight when table is `call`.\nThese are the columns to group the results by.\nAll results are grouped by the time range step by default.",
+ "example": [
+ "assistant_id"
+ ],
+ "enum": [
+ "assistantId",
+ "workflowId",
+ "squadId",
+ "phoneNumberId",
+ "type",
+ "endedReason",
+ "customerNumber",
+ "campaignId",
+ "artifact.structuredOutputs[OutputID]"
+ ]
+ },
+ "queries": {
+ "type": "array",
+ "description": "These are the queries to run to generate the insight.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStringTypeColumn",
+ "title": "JSONQueryOnCallTableWithStringTypeColumn"
+ },
+ {
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithNumberTypeColumn",
+ "title": "JSONQueryOnCallTableWithNumberTypeColumn"
+ },
+ {
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStructuredOutputColumn",
+ "title": "JSONQueryOnCallTableWithStructuredOutputColumn"
+ }
+ ]
+ }
}
},
"required": [
- "members"
+ "type",
+ "queries"
]
},
- "TesterPlan": {
+ "TextInsightFromCallTable": {
"type": "object",
"properties": {
- "assistant": {
- "description": "Pass a transient assistant to use for the test assistant.\n\nMake sure to write a detailed system prompt for a test assistant, and use the {{test.script}} variable to access the test script.",
- "allOf": [
- {
- "$ref": "#/components/schemas/CreateAssistantDTO"
- }
- ]
- },
- "assistantId": {
+ "name": {
"type": "string",
- "description": "Pass an assistant id that can be access\n\nMake sure to write a detailed system prompt for the test assistant, and use the {{test.script}} variable to access the test script."
+ "description": "This is the name of the Insight.",
+ "minLength": 1,
+ "maxLength": 255
},
- "assistantOverrides": {
- "description": "Add any assistant overrides to the test assistant.\n\nOne use case is if you want to pass custom variables into the test using variableValues, that you can then access in the script\nand rubric using {{varName}}.",
- "allOf": [
- {
- "$ref": "#/components/schemas/AssistantOverrides"
- }
+ "type": {
+ "type": "string",
+ "description": "This is the type of the Insight.\nIt is required to be `text` to create a text insight.",
+ "enum": [
+ "text"
]
+ },
+ "formula": {
+ "type": "object",
+ "description": "Formulas are mathematical expressions applied on the data returned by the queries to transform them before being used to create the insight.\nThe formulas needs to be a valid mathematical expression, supported by MathJS - https://mathjs.org/docs/expressions/syntax.html\nA formula is created by using the query names as the variable.\nThe formulas must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result.\nFor example, if you have 2 queries, 'Was Booking Made' and 'Average Call Duration', you can create a formula like this:\n```\n{{['Query 1']}} / {{['Query 2']}} * 100\n```\n\n```\n({{[Query 1]}} * 10) + {{[Query 2]}}\n```\nThis will take the\n\nYou can also use the query names as the variable in the formula.",
+ "items": {
+ "$ref": "#/components/schemas/InsightFormula"
+ }
+ },
+ "timeRange": {
+ "$ref": "#/components/schemas/InsightTimeRange"
+ },
+ "queries": {
+ "type": "array",
+ "description": "These are the queries to run to generate the insight.\nFor Text Insights, we only allow a single query, or require a formula if multiple queries are provided",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStringTypeColumn",
+ "title": "JSONQueryOnCallTableWithStringTypeColumn"
+ },
+ {
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithNumberTypeColumn",
+ "title": "JSONQueryOnCallTableWithNumberTypeColumn"
+ },
+ {
+ "$ref": "#/components/schemas/JSONQueryOnCallTableWithStructuredOutputColumn",
+ "title": "JSONQueryOnCallTableWithStructuredOutputColumn"
+ }
+ ]
+ }
}
- }
+ },
+ "required": [
+ "type",
+ "queries"
+ ]
},
- "TestSuitePhoneNumber": {
+ "InsightFormula": {
"type": "object",
"properties": {
- "provider": {
+ "name": {
"type": "string",
- "description": "This is the provider of the phone number.",
- "enum": [
- "test-suite"
- ]
+ "description": "This is the name of the formula.\nIt will be used to label the formula in the insight board on the UI.",
+ "example": "Booking Rate",
+ "minLength": 1,
+ "maxLength": 255
},
- "number": {
+ "formula": {
"type": "string",
- "description": "This is the phone number that is being tested.",
- "maxLength": 50
+ "description": "This is the formula to calculate the insight from the queries.\nThe formula needs to be a valid mathematical expression.\nThe formula must contain at least one query name in the LiquidJS format {{query_name}} or {{['query name']}} which will be substituted with the query result.\nAny MathJS formula is allowed - https://mathjs.org/docs/expressions/syntax.html\n\nCommon valid math operations are +, -, *, /, %",
+ "minLength": 1,
+ "maxLength": 1000
}
},
"required": [
- "provider",
- "number"
+ "formula"
]
},
- "TargetPlan": {
+ "InsightRunFormatPlan": {
"type": "object",
"properties": {
- "phoneNumberId": {
+ "format": {
"type": "string",
- "description": "This is the phone number that is being tested.\nDuring the actual test, it'll be called and the assistant attached to it will pick up and be tested.\nTo test an assistant directly, send assistantId instead."
- },
- "phoneNumber": {
- "description": "This can be any phone number (even not on Vapi).\nDuring the actual test, it'll be called.\nTo test a Vapi number, send phoneNumberId. To test an assistant directly, send assistantId instead.",
- "allOf": [
- {
- "$ref": "#/components/schemas/TestSuitePhoneNumber"
- }
+ "description": "This is the format of the data to return.\nIf not provided, defaults to \"raw\".\nRaw provides the data as fetched from the database, with formulas evaluated.\nRecharts provides the data in a format that can is ready to be used by recharts.js to render charts.",
+ "example": "raw",
+ "enum": [
+ "raw",
+ "recharts"
]
+ }
+ }
+ },
+ "InsightRunDTO": {
+ "type": "object",
+ "properties": {
+ "formatPlan": {
+ "$ref": "#/components/schemas/InsightRunFormatPlan"
},
- "assistantId": {
- "type": "string",
- "description": "This is the assistant being tested.\nDuring the actual test, it'll invoked directly.\nTo test the assistant over phone number, send phoneNumberId instead."
- },
- "assistantOverrides": {
- "description": "This is the assistant overrides applied to assistantId before it is tested.",
+ "timeRangeOverride": {
+ "description": "This is the optional time range override for the insight.\nIf provided, overrides every field in the insight's timeRange.\nIf this is provided with missing fields, defaults will be used, not the insight's timeRange.\nstart default - \"-7d\"\nend default - \"now\"\nstep default - \"day\"\nFor Pie and Text Insights, step will be ignored even if provided.",
+ "example": "{ start: \"2025-01-01\", end: \"2025-01-07\", step: \"day\" }",
"allOf": [
{
- "$ref": "#/components/schemas/AssistantOverrides"
+ "$ref": "#/components/schemas/InsightTimeRangeWithStep"
}
]
}
}
},
- "TestSuite": {
+ "InsightRunResponse": {
"type": "object",
"properties": {
"id": {
- "type": "string",
- "description": "This is the unique identifier for the test suite."
+ "type": "string"
+ },
+ "insightId": {
+ "type": "string"
},
"orgId": {
- "type": "string",
- "description": "This is the unique identifier for the org that this test suite belongs to."
+ "type": "string"
},
"createdAt": {
"format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the test suite was created."
+ "type": "string"
},
"updatedAt": {
"format": "date-time",
+ "type": "string"
+ }
+ },
+ "required": [
+ "id",
+ "insightId",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "Insight": {
+ "type": "object",
+ "properties": {
+ "name": {
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the test suite was last updated."
+ "description": "This is the name of the Insight.",
+ "minLength": 1,
+ "maxLength": 255
},
- "name": {
+ "type": {
"type": "string",
- "description": "This is the name of the test suite.",
- "maxLength": 80
+ "description": "This is the type of the Insight.",
+ "enum": [
+ "bar",
+ "line",
+ "pie",
+ "text"
+ ]
},
- "phoneNumberId": {
+ "id": {
"type": "string",
- "description": "This is the phone number ID associated with this test suite.",
- "deprecated": true
+ "description": "This is the unique identifier for the Insight."
},
- "testerPlan": {
- "description": "Override the default tester plan by providing custom assistant configuration for the test agent.\n\nWe recommend only using this if you are confident, as we have already set sensible defaults on the tester plan.",
- "allOf": [
- {
- "$ref": "#/components/schemas/TesterPlan"
- }
- ]
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the org that this Insight belongs to."
},
- "targetPlan": {
- "description": "These are the configuration for the assistant / phone number that is being tested.",
- "allOf": [
- {
- "$ref": "#/components/schemas/TargetPlan"
- }
- ]
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the Insight was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the Insight was last updated."
}
},
"required": [
+ "type",
"id",
"orgId",
"createdAt",
"updatedAt"
]
},
- "TestSuitesPaginatedResponse": {
+ "InsightPaginatedResponse": {
"type": "object",
"properties": {
"results": {
"type": "array",
"items": {
- "$ref": "#/components/schemas/TestSuite"
+ "$ref": "#/components/schemas/Insight"
}
},
"metadata": {
@@ -38123,901 +48878,1327 @@
"metadata"
]
},
- "CreateTestSuiteDto": {
+ "CreateEvalDTO": {
"type": "object",
"properties": {
- "name": {
- "type": "string",
- "description": "This is the name of the test suite.",
- "maxLength": 80
- },
- "phoneNumberId": {
- "type": "string",
- "description": "This is the phone number ID associated with this test suite.",
- "deprecated": true
- },
- "testerPlan": {
- "description": "Override the default tester plan by providing custom assistant configuration for the test agent.\n\nWe recommend only using this if you are confident, as we have already set sensible defaults on the tester plan.",
- "allOf": [
- {
- "$ref": "#/components/schemas/TesterPlan"
- }
- ]
+ "messages": {
+ "type": "array",
+ "description": "This is the mock conversation that will be used to evaluate the flow of the conversation.\n\nMock Messages are used to simulate the flow of the conversation\n\nEvaluation Messages are used as checkpoints in the flow where the model's response to previous conversation needs to be evaluated to check the content and tool calls",
+ "example": "[{ role: \"user\", content: \"Hello, how are you?\" }, { role: \"assistant\", judgePlan: { type: \"exact\", content: \"I am good, thank you!\" } }]",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ChatEvalAssistantMessageMock",
+ "title": "ChatEvalAssistantMessageMock"
+ },
+ {
+ "$ref": "#/components/schemas/ChatEvalSystemMessageMock",
+ "title": "ChatEvalSystemMessageMock"
+ },
+ {
+ "$ref": "#/components/schemas/ChatEvalToolResponseMessageMock",
+ "title": "ChatEvalToolResponseMessageMock"
+ },
+ {
+ "$ref": "#/components/schemas/ChatEvalToolResponseMessageEvaluation",
+ "title": "ChatEvalToolResponseMessageEvaluation"
+ },
+ {
+ "$ref": "#/components/schemas/ChatEvalUserMessageMock",
+ "title": "ChatEvalUserMessageMock"
+ },
+ {
+ "$ref": "#/components/schemas/ChatEvalAssistantMessageEvaluation",
+ "title": "ChatEvalAssistantMessageEvaluation"
+ }
+ ]
+ }
},
- "targetPlan": {
- "description": "These are the configuration for the assistant / phone number that is being tested.",
- "allOf": [
- {
- "$ref": "#/components/schemas/TargetPlan"
- }
- ]
- }
- }
- },
- "UpdateTestSuiteDto": {
- "type": "object",
- "properties": {
"name": {
"type": "string",
- "description": "This is the name of the test suite.",
- "maxLength": 80
- },
- "phoneNumberId": {
- "type": "string",
- "description": "This is the phone number ID associated with this test suite.",
- "deprecated": true
+ "description": "This is the name of the eval.\nIt helps identify what the eval is checking for.",
+ "example": "Verified User Flow Eval",
+ "minLength": 1,
+ "maxLength": 80
},
- "testerPlan": {
- "description": "Override the default tester plan by providing custom assistant configuration for the test agent.\n\nWe recommend only using this if you are confident, as we have already set sensible defaults on the tester plan.",
- "allOf": [
- {
- "$ref": "#/components/schemas/TesterPlan"
- }
- ]
+ "description": {
+ "type": "string",
+ "description": "This is the description of the eval.\nThis helps describe the eval and its purpose in detail. It will not be used to evaluate the flow of the conversation.",
+ "example": "This eval checks if the user flow is verified.",
+ "maxLength": 500
},
- "targetPlan": {
- "description": "These are the configuration for the assistant / phone number that is being tested.",
- "allOf": [
- {
- "$ref": "#/components/schemas/TargetPlan"
- }
+ "type": {
+ "type": "string",
+ "description": "This is the type of the eval.\nCurrently it is fixed to `chat.mockConversation`.",
+ "example": "chat.mockConversation",
+ "enum": [
+ "chat.mockConversation"
]
}
- }
+ },
+ "required": [
+ "messages",
+ "type"
+ ]
},
- "TestSuiteTestVoice": {
+ "Eval": {
"type": "object",
"properties": {
- "scorers": {
+ "messages": {
"type": "array",
- "description": "These are the scorers used to evaluate the test.",
+ "description": "This is the mock conversation that will be used to evaluate the flow of the conversation.\n\nMock Messages are used to simulate the flow of the conversation\n\nEvaluation Messages are used as checkpoints in the flow where the model's response to previous conversation needs to be evaluated to check the content and tool calls",
+ "example": "[{ role: \"user\", content: \"Hello, how are you?\" }, { role: \"assistant\", judgePlan: { type: \"exact\", content: \"I am good, thank you!\" } }]",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/TestSuiteTestScorerAI",
- "title": "AI"
+ "$ref": "#/components/schemas/ChatEvalAssistantMessageMock",
+ "title": "ChatEvalAssistantMessageMock"
+ },
+ {
+ "$ref": "#/components/schemas/ChatEvalSystemMessageMock",
+ "title": "ChatEvalSystemMessageMock"
+ },
+ {
+ "$ref": "#/components/schemas/ChatEvalToolResponseMessageMock",
+ "title": "ChatEvalToolResponseMessageMock"
+ },
+ {
+ "$ref": "#/components/schemas/ChatEvalToolResponseMessageEvaluation",
+ "title": "ChatEvalToolResponseMessageEvaluation"
+ },
+ {
+ "$ref": "#/components/schemas/ChatEvalUserMessageMock",
+ "title": "ChatEvalUserMessageMock"
+ },
+ {
+ "$ref": "#/components/schemas/ChatEvalAssistantMessageEvaluation",
+ "title": "ChatEvalAssistantMessageEvaluation"
}
]
}
},
- "type": {
- "type": "string",
- "description": "This is the type of the test, which must be voice.",
- "enum": [
- "voice"
- ],
- "maxLength": 100
- },
"id": {
- "type": "string",
- "description": "This is the unique identifier for the test."
- },
- "testSuiteId": {
- "type": "string",
- "description": "This is the unique identifier for the test suite this test belongs to."
+ "type": "string"
},
"orgId": {
- "type": "string",
- "description": "This is the unique identifier for the organization this test belongs to."
+ "type": "string"
},
"createdAt": {
"format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the test was created."
+ "type": "string"
},
"updatedAt": {
"format": "date-time",
- "type": "string",
- "description": "This is the ISO 8601 date-time string of when the test was last updated."
+ "type": "string"
},
"name": {
"type": "string",
- "description": "This is the name of the test.",
+ "description": "This is the name of the eval.\nIt helps identify what the eval is checking for.",
+ "example": "Verified User Flow Eval",
+ "minLength": 1,
"maxLength": 80
},
- "script": {
+ "description": {
"type": "string",
- "description": "This is the script to be used for the voice test.",
- "maxLength": 10000
+ "description": "This is the description of the eval.\nThis helps describe the eval and its purpose in detail. It will not be used to evaluate the flow of the conversation.",
+ "example": "This eval checks if the user flow is verified.",
+ "maxLength": 500
},
- "numAttempts": {
- "type": "number",
- "description": "This is the number of attempts allowed for the test.",
- "minimum": 1,
- "maximum": 10
+ "type": {
+ "type": "string",
+ "description": "This is the type of the eval.\nCurrently it is fixed to `chat.mockConversation`.",
+ "example": "chat.mockConversation",
+ "enum": [
+ "chat.mockConversation"
+ ]
}
},
"required": [
- "scorers",
- "type",
+ "messages",
"id",
- "testSuiteId",
"orgId",
"createdAt",
"updatedAt",
- "script"
+ "type"
]
},
- "TestSuiteTestChat": {
+ "EvalModelListOptions": {
"type": "object",
"properties": {
- "scorers": {
+ "provider": {
+ "type": "string",
+ "description": "This is the provider of the model.",
+ "enum": [
+ "openai",
+ "anthropic",
+ "google",
+ "groq",
+ "custom-llm"
+ ]
+ }
+ },
+ "required": [
+ "provider"
+ ]
+ },
+ "EvalUserEditable": {
+ "type": "object",
+ "properties": {
+ "messages": {
"type": "array",
- "description": "These are the scorers used to evaluate the test.",
+ "description": "This is the mock conversation that will be used to evaluate the flow of the conversation.\n\nMock Messages are used to simulate the flow of the conversation\n\nEvaluation Messages are used as checkpoints in the flow where the model's response to previous conversation needs to be evaluated to check the content and tool calls",
+ "example": "[{ role: \"user\", content: \"Hello, how are you?\" }, { role: \"assistant\", judgePlan: { type: \"exact\", content: \"I am good, thank you!\" } }]",
"items": {
"oneOf": [
{
- "$ref": "#/components/schemas/TestSuiteTestScorerAI",
- "title": "AI"
+ "$ref": "#/components/schemas/ChatEvalAssistantMessageMock",
+ "title": "ChatEvalAssistantMessageMock"
+ },
+ {
+ "$ref": "#/components/schemas/ChatEvalSystemMessageMock",
+ "title": "ChatEvalSystemMessageMock"
+ },
+ {
+ "$ref": "#/components/schemas/ChatEvalToolResponseMessageMock",
+ "title": "ChatEvalToolResponseMessageMock"
+ },
+ {
+ "$ref": "#/components/schemas/ChatEvalToolResponseMessageEvaluation",
+ "title": "ChatEvalToolResponseMessageEvaluation"
+ },
+ {
+ "$ref": "#/components/schemas/ChatEvalUserMessageMock",
+ "title": "ChatEvalUserMessageMock"
+ },
+ {
+ "$ref": "#/components/schemas/ChatEvalAssistantMessageEvaluation",
+ "title": "ChatEvalAssistantMessageEvaluation"
}
]
}
},
+ "name": {
+ "type": "string",
+ "description": "This is the name of the eval.\nIt helps identify what the eval is checking for.",
+ "example": "Verified User Flow Eval",
+ "minLength": 1,
+ "maxLength": 80
+ },
+ "description": {
+ "type": "string",
+ "description": "This is the description of the eval.\nThis helps describe the eval and its purpose in detail. It will not be used to evaluate the flow of the conversation.",
+ "example": "This eval checks if the user flow is verified.",
+ "maxLength": 500
+ },
"type": {
"type": "string",
- "description": "This is the type of the test, which must be chat.",
+ "description": "This is the type of the eval.\nCurrently it is fixed to `chat.mockConversation`.",
+ "example": "chat.mockConversation",
"enum": [
- "chat"
- ],
+ "chat.mockConversation"
+ ]
+ }
+ },
+ "required": [
+ "messages",
+ "type"
+ ]
+ },
+ "ChatEvalAssistantMessageMockToolCall": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the tool that will be called.\nIt should be one of the tools created in the organization.",
+ "example": "get_weather",
"maxLength": 100
},
- "id": {
+ "arguments": {
+ "type": "object",
+ "description": "This is the arguments that will be passed to the tool call.",
+ "example": "\"{\"city\": \"San Francisco\"}\""
+ }
+ },
+ "required": [
+ "name"
+ ]
+ },
+ "ChatEvalAssistantMessageMock": {
+ "type": "object",
+ "properties": {
+ "role": {
"type": "string",
- "description": "This is the unique identifier for the test."
+ "enum": [
+ "assistant"
+ ],
+ "description": "This is the role of the message author.\nFor a mock assistant message, the role is always 'assistant'\n@default 'assistant'",
+ "default": "assistant"
},
- "testSuiteId": {
+ "content": {
"type": "string",
- "description": "This is the unique identifier for the test suite this test belongs to."
+ "description": "This is the content of the assistant message.\nThis is the message that the assistant would have sent.",
+ "example": "The weather in San Francisco is sunny.",
+ "maxLength": 1000
},
- "orgId": {
+ "toolCalls": {
+ "description": "This is the tool calls that will be made by the assistant.",
+ "example": "[{ name: \"get_weather\", arguments: { city: \"San Francisco\" } }]",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/ChatEvalAssistantMessageMockToolCall"
+ }
+ }
+ },
+ "required": [
+ "role"
+ ]
+ },
+ "ChatEvalSystemMessageMock": {
+ "type": "object",
+ "properties": {
+ "role": {
"type": "string",
- "description": "This is the unique identifier for the organization this test belongs to."
+ "enum": [
+ "system"
+ ],
+ "description": "This is the role of the message author.\nFor a mock system message, the role is always 'system'\n@default 'system'",
+ "default": "system"
},
- "createdAt": {
- "format": "date-time",
+ "content": {
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the test was created."
- },
- "updatedAt": {
- "format": "date-time",
+ "description": "This is the content of the system message that would have been added in the middle of the conversation.\nDo not include the assistant prompt as a part of this message. It will automatically be fetched during runtime.",
+ "example": "You are a helpful assistant."
+ }
+ },
+ "required": [
+ "role",
+ "content"
+ ]
+ },
+ "ChatEvalToolResponseMessageMock": {
+ "type": "object",
+ "properties": {
+ "role": {
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the test was last updated."
+ "enum": [
+ "tool"
+ ],
+ "description": "This is the role of the message author.\nFor a mock tool response message, the role is always 'tool'\n@default 'tool'",
+ "default": "tool"
},
- "name": {
+ "content": {
"type": "string",
- "description": "This is the name of the test.",
- "maxLength": 80
- },
- "script": {
+ "description": "This is the content of the tool response message. JSON Objects should be stringified.",
+ "examples": [
+ "The weather in San Francisco is sunny.",
+ "{weather: sunny}"
+ ]
+ }
+ },
+ "required": [
+ "role",
+ "content"
+ ]
+ },
+ "ChatEvalUserMessageMock": {
+ "type": "object",
+ "properties": {
+ "role": {
"type": "string",
- "description": "This is the script to be used for the chat test.",
- "maxLength": 10000
+ "enum": [
+ "user"
+ ],
+ "description": "This is the role of the message author.\nFor a mock user message, the role is always 'user'\n@default 'user'",
+ "default": "user"
},
- "numAttempts": {
- "type": "number",
- "description": "This is the number of attempts allowed for the test.",
- "minimum": 1,
- "maximum": 10
+ "content": {
+ "type": "string",
+ "description": "This is the content of the user message.\nThis is the message that the user would have sent.",
+ "example": "Hello, how are you?",
+ "maxLength": 1000
}
},
"required": [
- "scorers",
- "type",
- "id",
- "testSuiteId",
- "orgId",
- "createdAt",
- "updatedAt",
- "script"
+ "role",
+ "content"
]
},
- "CreateTestSuiteTestVoiceDto": {
+ "AssistantMessageEvaluationContinuePlan": {
"type": "object",
"properties": {
- "scorers": {
+ "exitOnFailureEnabled": {
+ "type": "boolean",
+ "description": "This is whether the evaluation should exit if the assistant message evaluates to false.\nBy default, it is false and the evaluation will continue.\n@default false"
+ },
+ "contentOverride": {
+ "type": "string",
+ "description": "This is the content that will be used in the conversation for this assistant turn moving forward if provided.\nIt will override the content received from the model.",
+ "example": "The weather in San Francisco is sunny.",
+ "maxLength": 1000
+ },
+ "toolCallsOverride": {
+ "description": "This is the tool calls that will be used in the conversation for this assistant turn moving forward if provided.\nIt will override the tool calls received from the model.",
+ "example": "[{ name: \"get_weather\", arguments: { city: \"San Francisco\" } }]",
"type": "array",
- "description": "These are the scorers used to evaluate the test.",
"items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/TestSuiteTestScorerAI",
- "title": "AI"
- }
- ]
+ "$ref": "#/components/schemas/ChatEvalAssistantMessageMockToolCall"
}
- },
- "type": {
+ }
+ }
+ },
+ "ChatEvalAssistantMessageEvaluation": {
+ "type": "object",
+ "properties": {
+ "role": {
"type": "string",
- "description": "This is the type of the test, which must be voice.",
"enum": [
- "voice"
+ "assistant"
],
- "maxLength": 100
+ "description": "This is the role of the message author.\nFor an assistant message evaluation, the role is always 'assistant'\n@default 'assistant'",
+ "default": "assistant"
},
- "script": {
+ "judgePlan": {
+ "description": "This is the judge plan that instructs how to evaluate the assistant message.\nThe assistant message can be evaluated against fixed content (exact match or RegEx) or with an LLM-as-judge by defining the evaluation criteria in a prompt.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AssistantMessageJudgePlanExact",
+ "title": "AssistantMessageJudgePlanExact"
+ },
+ {
+ "$ref": "#/components/schemas/AssistantMessageJudgePlanRegex",
+ "title": "AssistantMessageJudgePlanRegex"
+ },
+ {
+ "$ref": "#/components/schemas/AssistantMessageJudgePlanAI",
+ "title": "AssistantMessageJudgePlanAI"
+ }
+ ]
+ },
+ "continuePlan": {
+ "description": "This is the plan for how the overall evaluation will proceed after the assistant message is evaluated.\nThis lets you configure whether to stop the evaluation if this message fails, and whether to override any content for future turns",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AssistantMessageEvaluationContinuePlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "role",
+ "judgePlan"
+ ]
+ },
+ "EvalOpenAIModel": {
+ "type": "object",
+ "properties": {
+ "provider": {
"type": "string",
- "description": "This is the script to be used for the voice test.",
- "maxLength": 10000
+ "description": "This is the provider of the model (`openai`).",
+ "enum": [
+ "openai"
+ ]
},
- "numAttempts": {
+ "model": {
+ "type": "string",
+ "description": "This is the OpenAI model that will be used.\n\nWhen using Vapi OpenAI or your own Azure Credentials, you have the option to specify the region for the selected model. This shouldn't be specified unless you have a specific reason to do so. Vapi will automatically find the fastest region that make sense.\nThis is helpful when you are required to comply with Data Residency rules. Learn more about Azure regions here https://azure.microsoft.com/en-us/explore/global-infrastructure/data-residency/.",
+ "maxLength": 100,
+ "enum": [
+ "gpt-5.4",
+ "gpt-5.4-mini",
+ "gpt-5.4-nano",
+ "gpt-5.2",
+ "gpt-5.2-chat-latest",
+ "gpt-5.1",
+ "gpt-5.1-chat-latest",
+ "gpt-5",
+ "gpt-5-chat-latest",
+ "gpt-5-mini",
+ "gpt-5-nano",
+ "gpt-4.1-2025-04-14",
+ "gpt-4.1-mini-2025-04-14",
+ "gpt-4.1-nano-2025-04-14",
+ "gpt-4.1",
+ "gpt-4.1-mini",
+ "gpt-4.1-nano",
+ "chatgpt-4o-latest",
+ "o3",
+ "o3-mini",
+ "o4-mini",
+ "o1-mini",
+ "o1-mini-2024-09-12",
+ "gpt-4o-mini-2024-07-18",
+ "gpt-4o-mini",
+ "gpt-4o",
+ "gpt-4o-2024-05-13",
+ "gpt-4o-2024-08-06",
+ "gpt-4o-2024-11-20",
+ "gpt-4-turbo",
+ "gpt-4-turbo-2024-04-09",
+ "gpt-4-turbo-preview",
+ "gpt-4-0125-preview",
+ "gpt-4-1106-preview",
+ "gpt-4",
+ "gpt-4-0613",
+ "gpt-3.5-turbo",
+ "gpt-3.5-turbo-0125",
+ "gpt-3.5-turbo-1106",
+ "gpt-3.5-turbo-16k",
+ "gpt-3.5-turbo-0613",
+ "gpt-4.1-2025-04-14:westus",
+ "gpt-4.1-2025-04-14:eastus2",
+ "gpt-4.1-2025-04-14:eastus",
+ "gpt-4.1-2025-04-14:westus3",
+ "gpt-4.1-2025-04-14:northcentralus",
+ "gpt-4.1-2025-04-14:southcentralus",
+ "gpt-4.1-2025-04-14:westeurope",
+ "gpt-4.1-2025-04-14:germanywestcentral",
+ "gpt-4.1-2025-04-14:polandcentral",
+ "gpt-4.1-2025-04-14:spaincentral",
+ "gpt-4.1-mini-2025-04-14:westus",
+ "gpt-4.1-mini-2025-04-14:eastus2",
+ "gpt-4.1-mini-2025-04-14:eastus",
+ "gpt-4.1-mini-2025-04-14:westus3",
+ "gpt-4.1-mini-2025-04-14:northcentralus",
+ "gpt-4.1-mini-2025-04-14:southcentralus",
+ "gpt-4.1-mini-2025-04-14:westeurope",
+ "gpt-4.1-mini-2025-04-14:germanywestcentral",
+ "gpt-4.1-mini-2025-04-14:polandcentral",
+ "gpt-4.1-mini-2025-04-14:spaincentral",
+ "gpt-4.1-nano-2025-04-14:westus",
+ "gpt-4.1-nano-2025-04-14:eastus2",
+ "gpt-4.1-nano-2025-04-14:westus3",
+ "gpt-4.1-nano-2025-04-14:northcentralus",
+ "gpt-4.1-nano-2025-04-14:southcentralus",
+ "gpt-4o-2024-11-20:swedencentral",
+ "gpt-4o-2024-11-20:westus",
+ "gpt-4o-2024-11-20:eastus2",
+ "gpt-4o-2024-11-20:eastus",
+ "gpt-4o-2024-11-20:westus3",
+ "gpt-4o-2024-11-20:southcentralus",
+ "gpt-4o-2024-11-20:westeurope",
+ "gpt-4o-2024-11-20:germanywestcentral",
+ "gpt-4o-2024-11-20:polandcentral",
+ "gpt-4o-2024-11-20:spaincentral",
+ "gpt-4o-2024-08-06:westus",
+ "gpt-4o-2024-08-06:westus3",
+ "gpt-4o-2024-08-06:eastus",
+ "gpt-4o-2024-08-06:eastus2",
+ "gpt-4o-2024-08-06:northcentralus",
+ "gpt-4o-2024-08-06:southcentralus",
+ "gpt-4o-mini-2024-07-18:westus",
+ "gpt-4o-mini-2024-07-18:westus3",
+ "gpt-4o-mini-2024-07-18:eastus",
+ "gpt-4o-mini-2024-07-18:eastus2",
+ "gpt-4o-mini-2024-07-18:northcentralus",
+ "gpt-4o-mini-2024-07-18:southcentralus",
+ "gpt-4o-2024-05-13:eastus2",
+ "gpt-4o-2024-05-13:eastus",
+ "gpt-4o-2024-05-13:northcentralus",
+ "gpt-4o-2024-05-13:southcentralus",
+ "gpt-4o-2024-05-13:westus3",
+ "gpt-4o-2024-05-13:westus",
+ "gpt-4-turbo-2024-04-09:eastus2",
+ "gpt-4-0125-preview:eastus",
+ "gpt-4-0125-preview:northcentralus",
+ "gpt-4-0125-preview:southcentralus",
+ "gpt-4-1106-preview:australiaeast",
+ "gpt-4-1106-preview:canadaeast",
+ "gpt-4-1106-preview:france",
+ "gpt-4-1106-preview:india",
+ "gpt-4-1106-preview:norway",
+ "gpt-4-1106-preview:swedencentral",
+ "gpt-4-1106-preview:uk",
+ "gpt-4-1106-preview:westus",
+ "gpt-4-1106-preview:westus3",
+ "gpt-4-0613:canadaeast",
+ "gpt-3.5-turbo-0125:canadaeast",
+ "gpt-3.5-turbo-0125:northcentralus",
+ "gpt-3.5-turbo-0125:southcentralus",
+ "gpt-3.5-turbo-1106:canadaeast",
+ "gpt-3.5-turbo-1106:westus"
+ ]
+ },
+ "temperature": {
"type": "number",
- "description": "This is the number of attempts allowed for the test.",
- "minimum": 1,
- "maximum": 10
+ "description": "This is the temperature of the model. For LLM-as-a-judge, it's recommended to set it between 0 - 0.3 to avoid hallucinations and ensure the model judges the output correctly based on the instructions.",
+ "minimum": 0,
+ "maximum": 2
},
- "name": {
- "type": "string",
- "description": "This is the name of the test.",
- "maxLength": 80
+ "maxTokens": {
+ "type": "number",
+ "description": "This is the max tokens of the model.\nIf your Judge instructions return `true` or `false` takes only 1 token (as per the OpenAI Tokenizer), and therefore is recommended to set it to a low number to force the model to return a short response.",
+ "minimum": 50,
+ "maximum": 10000
+ },
+ "messages": {
+ "description": "These are the messages which will instruct the AI Judge on how to evaluate the assistant message.\nThe LLM-Judge must respond with \"pass\" or \"fail\" to indicate if the assistant message passes the eval.\n\nTo access the messages in the mock conversation, use the LiquidJS variable `{{messages}}`.\nThe assistant message to be evaluated will be passed as the last message in the `messages` array and can be accessed using `{{messages[-1]}}`.\n\nIt is recommended to use the system message to instruct the LLM how to evaluate the assistant message, and then use the first user message to pass the assistant message to be evaluated.",
+ "example": "{",
+ "type": "array",
+ "items": {
+ "type": "object"
+ }
}
},
"required": [
- "scorers",
- "type",
- "script"
+ "provider",
+ "model",
+ "messages"
]
},
- "CreateTestSuiteTestChatDto": {
+ "EvalAnthropicModel": {
"type": "object",
"properties": {
- "scorers": {
+ "provider": {
+ "type": "string",
+ "description": "This is the provider of the model (`anthropic`).",
+ "enum": [
+ "anthropic"
+ ]
+ },
+ "model": {
+ "type": "string",
+ "description": "This is the specific model that will be used.",
+ "maxLength": 100,
+ "enum": [
+ "claude-3-opus-20240229",
+ "claude-3-sonnet-20240229",
+ "claude-3-haiku-20240307",
+ "claude-3-5-sonnet-20240620",
+ "claude-3-5-sonnet-20241022",
+ "claude-3-5-haiku-20241022",
+ "claude-3-7-sonnet-20250219",
+ "claude-opus-4-20250514",
+ "claude-opus-4-5-20251101",
+ "claude-opus-4-6",
+ "claude-sonnet-4-20250514",
+ "claude-sonnet-4-5-20250929",
+ "claude-sonnet-4-6",
+ "claude-haiku-4-5-20251001"
+ ]
+ },
+ "thinking": {
+ "description": "This is the optional configuration for Anthropic's thinking feature.\n\n- If provided, `maxTokens` must be greater than `thinking.budgetTokens`.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AnthropicThinkingConfig"
+ }
+ ]
+ },
+ "temperature": {
+ "type": "number",
+ "description": "This is the temperature of the model. For LLM-as-a-judge, it's recommended to set it between 0 - 0.3 to avoid hallucinations and ensure the model judges the output correctly based on the instructions.",
+ "minimum": 0,
+ "maximum": 2
+ },
+ "maxTokens": {
+ "type": "number",
+ "description": "This is the max tokens of the model.\nIf your Judge instructions return `true` or `false` takes only 1 token (as per the OpenAI Tokenizer), and therefore is recommended to set it to a low number to force the model to return a short response.",
+ "minimum": 50,
+ "maximum": 10000
+ },
+ "messages": {
+ "description": "These are the messages which will instruct the AI Judge on how to evaluate the assistant message.\nThe LLM-Judge must respond with \"pass\" or \"fail\" to indicate if the assistant message passes the eval.\n\nTo access the messages in the mock conversation, use the LiquidJS variable `{{messages}}`.\nThe assistant message to be evaluated will be passed as the last message in the `messages` array and can be accessed using `{{messages[-1]}}`.\n\nIt is recommended to use the system message to instruct the LLM how to evaluate the assistant message, and then use the first user message to pass the assistant message to be evaluated.",
+ "example": "{",
"type": "array",
- "description": "These are the scorers used to evaluate the test.",
"items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/TestSuiteTestScorerAI",
- "title": "AI"
- }
- ]
+ "type": "object"
}
- },
- "type": {
+ }
+ },
+ "required": [
+ "provider",
+ "model",
+ "messages"
+ ]
+ },
+ "EvalGoogleModel": {
+ "type": "object",
+ "properties": {
+ "provider": {
"type": "string",
- "description": "This is the type of the test, which must be chat.",
+ "description": "This is the provider of the model (`google`).",
"enum": [
- "chat"
- ],
- "maxLength": 100
+ "google"
+ ]
},
- "script": {
+ "model": {
"type": "string",
- "description": "This is the script to be used for the chat test.",
- "maxLength": 10000
+ "description": "This is the name of the model. Ex. gpt-4o",
+ "maxLength": 100,
+ "enum": [
+ "gemini-3-flash-preview",
+ "gemini-2.5-pro",
+ "gemini-2.5-flash",
+ "gemini-2.5-flash-lite",
+ "gemini-2.0-flash-thinking-exp",
+ "gemini-2.0-pro-exp-02-05",
+ "gemini-2.0-flash",
+ "gemini-2.0-flash-lite",
+ "gemini-2.0-flash-exp",
+ "gemini-2.0-flash-realtime-exp",
+ "gemini-1.5-flash",
+ "gemini-1.5-flash-002",
+ "gemini-1.5-pro",
+ "gemini-1.5-pro-002",
+ "gemini-1.0-pro"
+ ]
+ },
+ "temperature": {
+ "type": "number",
+ "description": "This is the temperature of the model. For LLM-as-a-judge, it's recommended to set it between 0 - 0.3 to avoid hallucinations and ensure the model judges the output correctly based on the instructions.",
+ "minimum": 0,
+ "maximum": 2
},
- "numAttempts": {
+ "maxTokens": {
"type": "number",
- "description": "This is the number of attempts allowed for the test.",
- "minimum": 1,
- "maximum": 10
+ "description": "This is the max tokens of the model.\nIf your Judge instructions return `true` or `false` takes only 1 token (as per the OpenAI Tokenizer), and therefore is recommended to set it to a low number to force the model to return a short response.",
+ "minimum": 50,
+ "maximum": 10000
},
- "name": {
- "type": "string",
- "description": "This is the name of the test.",
- "maxLength": 80
+ "messages": {
+ "description": "These are the messages which will instruct the AI Judge on how to evaluate the assistant message.\nThe LLM-Judge must respond with \"pass\" or \"fail\" to indicate if the assistant message passes the eval.\n\nTo access the messages in the mock conversation, use the LiquidJS variable `{{messages}}`.\nThe assistant message to be evaluated will be passed as the last message in the `messages` array and can be accessed using `{{messages[-1]}}`.\n\nIt is recommended to use the system message to instruct the LLM how to evaluate the assistant message, and then use the first user message to pass the assistant message to be evaluated.",
+ "example": "{",
+ "type": "array",
+ "items": {
+ "type": "object"
+ }
}
},
"required": [
- "scorers",
- "type",
- "script"
+ "provider",
+ "model",
+ "messages"
]
},
- "UpdateTestSuiteTestVoiceDto": {
+ "EvalGroqModel": {
"type": "object",
"properties": {
- "scorers": {
- "type": "array",
- "description": "These are the scorers used to evaluate the test.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/TestSuiteTestScorerAI",
- "title": "AI"
- }
- ]
- }
- },
- "type": {
+ "provider": {
"type": "string",
- "description": "This is the type of the test, which must be voice.",
+ "description": "This is the provider of the model (`groq`).",
"enum": [
- "voice"
- ],
- "maxLength": 100
+ "groq"
+ ]
},
- "name": {
+ "model": {
"type": "string",
- "description": "This is the name of the test.",
- "maxLength": 80
+ "description": "This is the name of the model. Ex. gpt-4o",
+ "maxLength": 100,
+ "enum": [
+ "openai/gpt-oss-20b",
+ "openai/gpt-oss-120b",
+ "deepseek-r1-distill-llama-70b",
+ "llama-3.3-70b-versatile",
+ "llama-3.1-405b-reasoning",
+ "llama-3.1-8b-instant",
+ "llama3-8b-8192",
+ "llama3-70b-8192",
+ "gemma2-9b-it",
+ "moonshotai/kimi-k2-instruct-0905",
+ "meta-llama/llama-4-maverick-17b-128e-instruct",
+ "meta-llama/llama-4-scout-17b-16e-instruct",
+ "mistral-saba-24b",
+ "compound-beta",
+ "compound-beta-mini"
+ ]
},
- "script": {
- "type": "string",
- "description": "This is the script to be used for the voice test.",
- "maxLength": 10000
+ "temperature": {
+ "type": "number",
+ "description": "This is the temperature of the model. For LLM-as-a-judge, it's recommended to set it between 0 - 0.3 to avoid hallucinations and ensure the model judges the output correctly based on the instructions.",
+ "minimum": 0,
+ "maximum": 2
},
- "numAttempts": {
+ "maxTokens": {
"type": "number",
- "description": "This is the number of attempts allowed for the test.",
- "minimum": 1,
- "maximum": 10
+ "description": "This is the max tokens of the model.\nIf your Judge instructions return `true` or `false` takes only 1 token (as per the OpenAI Tokenizer), and therefore is recommended to set it to a low number to force the model to return a short response.",
+ "minimum": 50,
+ "maximum": 10000
+ },
+ "messages": {
+ "description": "These are the messages which will instruct the AI Judge on how to evaluate the assistant message.\nThe LLM-Judge must respond with \"pass\" or \"fail\" to indicate if the assistant message passes the eval.\n\nTo access the messages in the mock conversation, use the LiquidJS variable `{{messages}}`.\nThe assistant message to be evaluated will be passed as the last message in the `messages` array and can be accessed using `{{messages[-1]}}`.\n\nIt is recommended to use the system message to instruct the LLM how to evaluate the assistant message, and then use the first user message to pass the assistant message to be evaluated.",
+ "example": "{",
+ "type": "array",
+ "items": {
+ "type": "object"
+ }
}
- }
+ },
+ "required": [
+ "provider",
+ "model",
+ "messages"
+ ]
},
- "UpdateTestSuiteTestChatDto": {
+ "EvalCustomModel": {
"type": "object",
"properties": {
- "scorers": {
- "type": "array",
- "description": "These are the scorers used to evaluate the test.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/TestSuiteTestScorerAI",
- "title": "AI"
- }
- ]
- }
- },
- "type": {
+ "provider": {
"type": "string",
- "description": "This is the type of the test, which must be chat.",
+ "description": "This is the provider of the model (`custom-llm`).",
"enum": [
- "chat"
- ],
- "maxLength": 100
+ "custom-llm"
+ ]
},
- "name": {
+ "url": {
"type": "string",
- "description": "This is the name of the test.",
- "maxLength": 80
+ "description": "These is the URL we'll use for the OpenAI client's `baseURL`. Ex. https://openrouter.ai/api/v1"
},
- "script": {
- "type": "string",
- "description": "This is the script to be used for the chat test.",
- "maxLength": 10000
+ "headers": {
+ "type": "object",
+ "description": "These are the headers we'll use for the OpenAI client's `headers`."
},
- "numAttempts": {
+ "timeoutSeconds": {
"type": "number",
- "description": "This is the number of attempts allowed for the test.",
- "minimum": 1,
- "maximum": 10
- }
- }
- },
- "TestSuiteTestScorerAI": {
- "type": "object",
- "properties": {
- "type": {
+ "description": "This sets the timeout for the connection to the custom provider without needing to stream any tokens back. Default is 20 seconds.",
+ "minimum": 20,
+ "maximum": 600
+ },
+ "model": {
"type": "string",
- "description": "This is the type of the scorer, which must be AI.",
- "enum": [
- "ai"
- ],
+ "description": "This is the name of the model. Ex. gpt-4o",
"maxLength": 100
},
- "rubric": {
- "type": "string",
- "description": "This is the rubric used by the AI scorer.",
- "maxLength": 10000
- }
- },
- "required": [
- "type",
- "rubric"
- ]
- },
- "TestSuiteTestsPaginatedResponse": {
- "type": "object",
- "properties": {
- "results": {
+ "temperature": {
+ "type": "number",
+ "description": "This is the temperature of the model. For LLM-as-a-judge, it's recommended to set it between 0 - 0.3 to avoid hallucinations and ensure the model judges the output correctly based on the instructions.",
+ "minimum": 0,
+ "maximum": 2
+ },
+ "maxTokens": {
+ "type": "number",
+ "description": "This is the max tokens of the model.\nIf your Judge instructions return `true` or `false` takes only 1 token (as per the OpenAI Tokenizer), and therefore is recommended to set it to a low number to force the model to return a short response.",
+ "minimum": 50,
+ "maximum": 10000
+ },
+ "messages": {
+ "description": "These are the messages which will instruct the AI Judge on how to evaluate the assistant message.\nThe LLM-Judge must respond with \"pass\" or \"fail\" to indicate if the assistant message passes the eval.\n\nTo access the messages in the mock conversation, use the LiquidJS variable `{{messages}}`.\nThe assistant message to be evaluated will be passed as the last message in the `messages` array and can be accessed using `{{messages[-1]}}`.\n\nIt is recommended to use the system message to instruct the LLM how to evaluate the assistant message, and then use the first user message to pass the assistant message to be evaluated.",
+ "example": "{",
"type": "array",
- "description": "A list of test suite tests.",
"items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/TestSuiteTestVoice"
- },
- {
- "$ref": "#/components/schemas/TestSuiteTestChat"
- }
- ]
+ "type": "object"
}
- },
- "metadata": {
- "description": "Metadata about the pagination.",
- "allOf": [
- {
- "$ref": "#/components/schemas/PaginationMeta"
- }
- ]
}
},
"required": [
- "results",
- "metadata"
+ "provider",
+ "url",
+ "model",
+ "messages"
]
},
- "TestSuiteRunScorerAI": {
+ "AssistantMessageJudgePlanAI": {
"type": "object",
"properties": {
+ "model": {
+ "description": "This is the model to use for the LLM-as-a-judge.\nIf not provided, will default to the assistant's model.\n\nThe instructions on how to evaluate the model output with this LLM-Judge must be passed as a system message in the messages array of the model.\n\nThe Mock conversation can be passed to the LLM-Judge to evaluate using the prompt {{messages}} and will be evaluated as a LiquidJS Variable. To access and judge only the last message, use {{messages[-1]}}\n\nThe LLM-Judge must respond with \"pass\" or \"fail\" and only those two responses are allowed.",
+ "example": "{",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/EvalOpenAIModel",
+ "title": "EvalOpenAIModel"
+ },
+ {
+ "$ref": "#/components/schemas/EvalAnthropicModel",
+ "title": "EvalAnthropicModel"
+ },
+ {
+ "$ref": "#/components/schemas/EvalGoogleModel",
+ "title": "EvalGoogleModel"
+ },
+ {
+ "$ref": "#/components/schemas/EvalCustomModel",
+ "title": "EvalCustomModel"
+ }
+ ]
+ },
"type": {
"type": "string",
- "description": "This is the type of the scorer, which must be AI.",
"enum": [
"ai"
],
- "maxLength": 100
- },
- "result": {
- "type": "string",
- "description": "This is the result of the test suite.",
- "enum": [
- "pass",
- "fail"
- ],
- "maxLength": 100
- },
- "reasoning": {
- "type": "string",
- "description": "This is the reasoning provided by the AI scorer.",
- "maxLength": 10000
+ "description": "This is the type of the judge plan.\nUse 'ai' to evaluate the assistant message content using LLM-as-a-judge.\n@default 'ai'"
},
- "rubric": {
- "type": "string",
- "description": "This is the rubric used by the AI scorer.",
- "maxLength": 10000
+ "autoIncludeMessageHistory": {
+ "type": "boolean",
+ "description": "This is the flag to enable automatically adding the liquid variable {{messages}} to the model's messages array\nThis is only applicable if the user has not provided any messages in the model's messages array\n@default true"
}
},
"required": [
- "type",
- "result",
- "reasoning",
- "rubric"
+ "model",
+ "type"
]
},
- "TestSuiteRunTestAttemptCall": {
+ "ChatEvalToolResponseMessageEvaluation": {
"type": "object",
"properties": {
- "artifact": {
- "description": "This is the artifact of the call.",
+ "role": {
+ "type": "string",
+ "enum": [
+ "tool"
+ ],
+ "description": "This is the role of the message author.\nFor a tool response message evaluation, the role is always 'tool'\n@default 'tool'",
+ "default": "tool"
+ },
+ "judgePlan": {
+ "description": "This is the judge plan that instructs how to evaluate the tool response message.\nThe tool response message can be evaluated with an LLM-as-judge by defining the evaluation criteria in a prompt.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AssistantMessageJudgePlanAI",
+ "title": "AssistantMessageJudgePlanAI"
+ }
+ ],
"allOf": [
{
- "$ref": "#/components/schemas/Artifact"
+ "$ref": "#/components/schemas/AssistantMessageJudgePlanAI"
}
]
}
},
"required": [
- "artifact"
+ "role",
+ "judgePlan"
]
},
- "TestSuiteRunTestAttemptMetadata": {
+ "AssistantMessageJudgePlanExact": {
"type": "object",
"properties": {
- "sessionId": {
+ "type": {
"type": "string",
- "description": "This is the session ID for the test attempt."
- }
- },
- "required": [
- "sessionId"
- ]
- },
- "TestSuiteRunTestAttempt": {
- "type": "object",
- "properties": {
- "scorerResults": {
- "type": "array",
- "description": "These are the results of the scorers used to evaluate the test attempt.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/TestSuiteRunScorerAI",
- "title": "AI"
- }
- ]
- }
- },
- "call": {
- "description": "This is the call made during the test attempt.",
- "allOf": [
- {
- "$ref": "#/components/schemas/TestSuiteRunTestAttemptCall"
- }
- ]
+ "enum": [
+ "exact"
+ ],
+ "description": "This is the type of the judge plan.\nUse 'exact' for an exact match on the content and tool calls - without using LLM-as-a-judge.\n@default 'exact'"
},
- "callId": {
+ "content": {
"type": "string",
- "description": "This is the call ID for the test attempt."
+ "description": "This is what that will be used to evaluate the model's message content.\nIf you provide a string, the assistant message content will be evaluated against it as an exact match, case-insensitive.",
+ "example": "The weather in San Francisco is sunny.",
+ "maxLength": 1000
},
- "metadata": {
- "description": "This is the metadata for the test attempt.",
- "allOf": [
- {
- "$ref": "#/components/schemas/TestSuiteRunTestAttemptMetadata"
- }
- ]
+ "toolCalls": {
+ "description": "This is the tool calls that will be used to evaluate the model's message content.\nThe tool name must be a valid tool that the assistant is allowed to call.\n\nFor the Query tool, the arguments for the tool call are in the format - {knowledgeBaseNames: ['kb_name', 'kb_name_2']}\n\nFor the DTMF tool, the arguments for the tool call are in the format - {dtmf: \"1234*\"}\n\nFor the Handoff tool, the arguments for the tool call are in the format - {destination: \"assistant_id\"}\n\nFor the Transfer Call tool, the arguments for the tool call are in the format - {destination: \"phone_number_or_assistant_id\"}\n\nFor all other tools, they are called without arguments or with user-defined arguments",
+ "example": "[{ name: \"get_weather\", arguments: { city: \"San Francisco\" } }]",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/ChatEvalAssistantMessageMockToolCall"
+ }
}
},
"required": [
- "scorerResults"
+ "type",
+ "content"
]
},
- "TestSuiteRunTestResult": {
+ "AssistantMessageJudgePlanRegex": {
"type": "object",
"properties": {
- "test": {
- "description": "This is the test that was run.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/TestSuiteTestVoice",
- "title": "TestSuiteTestVoice"
- }
- ]
+ "type": {
+ "type": "string",
+ "enum": [
+ "regex"
+ ],
+ "description": "This is the type of the judge plan.\nUse 'regex' for a regex match on the content and tool calls - without using LLM-as-a-judge.\n@default 'regex'"
},
- "attempts": {
- "description": "These are the attempts made for this test.",
+ "content": {
+ "type": "string",
+ "description": "This is what that will be used to evaluate the model's message content.\nThe content will be evaluated against the regex pattern provided in the Judge Plan content field.\nEvaluation is considered successful if the regex pattern matches any part of the assistant message content.",
+ "example": "/sunny/i",
+ "maxLength": 1000
+ },
+ "toolCalls": {
+ "description": "This is the tool calls that will be used to evaluate the model's message content.\nThe tool name must be a valid tool that the assistant is allowed to call.\nThe values to the arguments for the tool call should be a Regular Expression.\nEvaluation is considered successful if the regex pattern matches any part of each tool call argument.\n\nFor the Query tool, the arguments for the tool call are in the format - {knowledgeBaseNames: ['kb_name', 'kb_name_2']}\n\nFor the DTMF tool, the arguments for the tool call are in the format - {dtmf: \"1234*\"}\n\nFor the Handoff tool, the arguments for the tool call are in the format - {destination: \"assistant_id\"}\n\nFor the Transfer Call tool, the arguments for the tool call are in the format - {destination: \"phone_number_or_assistant_id\"}\n\nFor all other tools, they are called without arguments or with user-defined arguments",
+ "example": "[{ name: \"get_weather\", arguments: { city: \"/San Francisco/i\" } }]",
"type": "array",
"items": {
- "$ref": "#/components/schemas/TestSuiteRunTestAttempt"
+ "$ref": "#/components/schemas/ChatEvalAssistantMessageMockToolCall"
}
}
},
"required": [
- "test",
- "attempts"
+ "type",
+ "content"
]
},
- "TestSuiteRun": {
+ "GetEvalPaginatedDTO": {
"type": "object",
"properties": {
- "status": {
+ "id": {
+ "type": "string"
+ },
+ "page": {
+ "type": "number",
+ "description": "This is the page number to return. Defaults to 1.",
+ "minimum": 1
+ },
+ "sortOrder": {
"type": "string",
- "description": "This is the current status of the test suite run.",
+ "description": "This is the sort order for pagination. Defaults to 'DESC'.",
"enum": [
- "queued",
- "in-progress",
- "completed",
- "failed"
+ "ASC",
+ "DESC"
]
},
- "id": {
- "type": "string",
- "description": "This is the unique identifier for the test suite run."
- },
- "orgId": {
- "type": "string",
- "description": "This is the unique identifier for the organization this run belongs to."
+ "limit": {
+ "type": "number",
+ "description": "This is the maximum number of items to return. Defaults to 100.",
+ "minimum": 0,
+ "maximum": 1000
},
- "testSuiteId": {
+ "createdAtGt": {
+ "format": "date-time",
"type": "string",
- "description": "This is the unique identifier for the test suite this run belongs to."
+ "description": "This will return items where the createdAt is greater than the specified value."
},
- "createdAt": {
+ "createdAtLt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the test suite run was created."
+ "description": "This will return items where the createdAt is less than the specified value."
},
- "updatedAt": {
+ "createdAtGe": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the test suite run was last updated."
- },
- "testResults": {
- "description": "These are the results of the tests in this test suite run.",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/TestSuiteRunTestResult"
- }
+ "description": "This will return items where the createdAt is greater than or equal to the specified value."
},
- "name": {
+ "createdAtLe": {
+ "format": "date-time",
"type": "string",
- "description": "This is the name of the test suite run.",
- "maxLength": 80
- }
- },
- "required": [
- "status",
- "id",
- "orgId",
- "testSuiteId",
- "createdAt",
- "updatedAt",
- "testResults"
- ]
- },
- "TestSuiteRunsPaginatedResponse": {
- "type": "object",
- "properties": {
- "results": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/TestSuiteRun"
- }
+ "description": "This will return items where the createdAt is less than or equal to the specified value."
},
- "metadata": {
- "$ref": "#/components/schemas/PaginationMeta"
- }
- },
- "required": [
- "results",
- "metadata"
- ]
- },
- "CreateTestSuiteRunDto": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string",
- "description": "This is the name of the test suite run.",
- "maxLength": 80
- }
- }
- },
- "UpdateTestSuiteRunDto": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string",
- "description": "This is the name of the test suite run.",
- "maxLength": 80
- }
- }
- },
- "TimeRange": {
- "type": "object",
- "properties": {
- "step": {
+ "updatedAtGt": {
+ "format": "date-time",
"type": "string",
- "description": "This is the time step for aggregations.\n\nIf not provided, defaults to returning for the entire time range.",
- "enum": [
- "second",
- "minute",
- "hour",
- "day",
- "week",
- "month",
- "quarter",
- "year",
- "decade",
- "century",
- "millennium"
- ]
+ "description": "This will return items where the updatedAt is greater than the specified value."
},
- "start": {
+ "updatedAtLt": {
"format": "date-time",
"type": "string",
- "description": "This is the start date for the time range.\n\nIf not provided, defaults to the 7 days ago."
+ "description": "This will return items where the updatedAt is less than the specified value."
},
- "end": {
+ "updatedAtGe": {
"format": "date-time",
"type": "string",
- "description": "This is the end date for the time range.\n\nIf not provided, defaults to now."
+ "description": "This will return items where the updatedAt is greater than or equal to the specified value."
},
- "timezone": {
+ "updatedAtLe": {
+ "format": "date-time",
"type": "string",
- "description": "This is the timezone you want to set for the query.\n\nIf not provided, defaults to UTC."
+ "description": "This will return items where the updatedAt is less than or equal to the specified value."
}
}
},
- "AnalyticsOperation": {
+ "EvalPaginatedResponse": {
"type": "object",
"properties": {
- "operation": {
- "type": "string",
- "description": "This is the aggregation operation you want to perform.",
- "enum": [
- "sum",
- "avg",
- "count",
- "min",
- "max",
- "history"
- ]
- },
- "column": {
- "type": "string",
- "description": "This is the columns you want to perform the aggregation operation on.",
- "enum": [
- "id",
- "cost",
- "costBreakdown.llm",
- "costBreakdown.stt",
- "costBreakdown.tts",
- "costBreakdown.vapi",
- "costBreakdown.transport",
- "costBreakdown.analysisBreakdown.summary",
- "costBreakdown.transcriber",
- "costBreakdown.ttsCharacters",
- "costBreakdown.llmPromptTokens",
- "costBreakdown.llmCompletionTokens",
- "duration",
- "concurrency",
- "minutesUsed"
- ]
- },
- "alias": {
- "type": "string",
- "description": "This is the alias for column name returned. Defaults to `${operation}${column}`.",
- "maxLength": 40
+ "results": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/Eval"
+ }
+ },
+ "metadata": {
+ "$ref": "#/components/schemas/PaginationMeta"
}
},
"required": [
- "operation",
- "column"
+ "results",
+ "metadata"
]
},
- "AnalyticsQuery": {
+ "UpdateEvalDTO": {
"type": "object",
"properties": {
- "table": {
- "type": "string",
- "description": "This is the table you want to query.",
- "enum": [
- "call",
- "subscription"
- ]
- },
- "groupBy": {
+ "messages": {
"type": "array",
- "description": "This is the list of columns you want to group by.",
- "enum": [
- "type",
- "assistantId",
- "endedReason",
- "analysis.successEvaluation",
- "status"
- ],
+ "description": "This is the mock conversation that will be used to evaluate the flow of the conversation.\n\nMock Messages are used to simulate the flow of the conversation\n\nEvaluation Messages are used as checkpoints in the flow where the model's response to previous conversation needs to be evaluated to check the content and tool calls",
+ "example": "[{ role: \"user\", content: \"Hello, how are you?\" }, { role: \"assistant\", judgePlan: { type: \"exact\", content: \"I am good, thank you!\" } }]",
"items": {
- "type": "string",
- "enum": [
- "type",
- "assistantId",
- "endedReason",
- "analysis.successEvaluation",
- "status"
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ChatEvalAssistantMessageMock",
+ "title": "ChatEvalAssistantMessageMock"
+ },
+ {
+ "$ref": "#/components/schemas/ChatEvalSystemMessageMock",
+ "title": "ChatEvalSystemMessageMock"
+ },
+ {
+ "$ref": "#/components/schemas/ChatEvalToolResponseMessageMock",
+ "title": "ChatEvalToolResponseMessageMock"
+ },
+ {
+ "$ref": "#/components/schemas/ChatEvalToolResponseMessageEvaluation",
+ "title": "ChatEvalToolResponseMessageEvaluation"
+ },
+ {
+ "$ref": "#/components/schemas/ChatEvalUserMessageMock",
+ "title": "ChatEvalUserMessageMock"
+ },
+ {
+ "$ref": "#/components/schemas/ChatEvalAssistantMessageEvaluation",
+ "title": "ChatEvalAssistantMessageEvaluation"
+ }
]
}
},
"name": {
"type": "string",
- "description": "This is the name of the query. This will be used to identify the query in the response.",
- "maxLength": 40
+ "description": "This is the name of the eval.\nIt helps identify what the eval is checking for.",
+ "example": "Verified User Flow Eval",
+ "minLength": 1,
+ "maxLength": 80
},
- "timeRange": {
- "description": "This is the time range for the query.",
+ "description": {
+ "type": "string",
+ "description": "This is the description of the eval.\nThis helps describe the eval and its purpose in detail. It will not be used to evaluate the flow of the conversation.",
+ "example": "This eval checks if the user flow is verified.",
+ "maxLength": 500
+ },
+ "type": {
+ "type": "string",
+ "description": "This is the type of the eval.\nCurrently it is fixed to `chat.mockConversation`.",
+ "example": "chat.mockConversation",
+ "enum": [
+ "chat.mockConversation"
+ ]
+ }
+ }
+ },
+ "CreateEvalRunDTO": {
+ "type": "object",
+ "properties": {
+ "eval": {
+ "description": "This is the transient eval that will be run",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateEvalDTO",
+ "title": "CreateEvalDTO"
+ }
+ ],
"allOf": [
{
- "$ref": "#/components/schemas/TimeRange"
+ "$ref": "#/components/schemas/CreateEvalDTO"
}
]
},
- "operations": {
- "description": "This is the list of operations you want to perform.",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/AnalyticsOperation"
- }
+ "target": {
+ "description": "This is the target that will be run against the eval",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/EvalRunTargetAssistant",
+ "title": "EvalRunTargetAssistant"
+ },
+ {
+ "$ref": "#/components/schemas/EvalRunTargetSquad",
+ "title": "EvalRunTargetSquad"
+ }
+ ]
+ },
+ "type": {
+ "type": "string",
+ "description": "This is the type of the run.\nCurrently it is fixed to `eval`.",
+ "example": "eval",
+ "enum": [
+ "eval"
+ ]
+ },
+ "evalId": {
+ "type": "string",
+ "description": "This is the id of the eval that will be run.",
+ "example": "123e4567-e89b-12d3-a456-426614174000"
}
},
"required": [
- "table",
- "name",
- "operations"
+ "target",
+ "type"
]
},
- "AnalyticsQueryDTO": {
+ "EvalRunResult": {
"type": "object",
"properties": {
- "queries": {
- "description": "This is the list of metric queries you want to perform.",
+ "status": {
+ "type": "string",
+ "description": "This is the status of the eval run result.\nThe status is only 'pass' or 'fail' for an eval run result.\nCurrently, An eval is considered `pass` only if all the Assistant Judge messages are evaluated to pass.",
+ "example": "pass",
+ "enum": [
+ "pass",
+ "fail"
+ ]
+ },
+ "messages": {
"type": "array",
+ "description": "This is the messages of the eval run result.\nIt contains the user/system messages",
"items": {
- "$ref": "#/components/schemas/AnalyticsQuery"
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/ChatEvalUserMessageMock",
+ "title": "ChatEvalUserMessageMock"
+ },
+ {
+ "$ref": "#/components/schemas/ChatEvalSystemMessageMock",
+ "title": "ChatEvalSystemMessageMock"
+ },
+ {
+ "$ref": "#/components/schemas/ChatEvalToolResponseMessageMock",
+ "title": "ChatEvalToolResponseMessageMock"
+ },
+ {
+ "$ref": "#/components/schemas/ChatEvalAssistantMessageMock",
+ "title": "ChatEvalAssistantMessageMock"
+ }
+ ]
}
+ },
+ "startedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the start time of the eval run result.",
+ "example": "2021-01-01T00:00:00.000Z"
+ },
+ "endedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the end time of the eval run result.",
+ "example": "2021-01-01T00:00:00.000Z"
}
},
"required": [
- "queries"
+ "status",
+ "messages",
+ "startedAt",
+ "endedAt"
]
},
- "AnalyticsQueryResult": {
+ "EvalRun": {
"type": "object",
"properties": {
- "name": {
+ "status": {
"type": "string",
- "description": "This is the unique key for the query."
+ "description": "This is the status of the eval run. When an eval run is created, the status is 'running'.\nWhen the eval run is completed, the status is 'ended'.",
+ "example": "running",
+ "enum": [
+ "running",
+ "ended",
+ "queued"
+ ]
},
- "timeRange": {
- "description": "This is the time range for the query.",
+ "endedReason": {
+ "type": "string",
+ "description": "This is the reason for the eval run to end.\nWhen the eval run is completed normally i.e end of mock conversation, the status is 'mockConversation.done'.\nWhen the eval fails due to an error like Chat error or incorrect configuration, the status is 'error'.\nWhen the eval runs for too long, due to model issues or tool call issues, the status is 'timeout'.\nWhen the eval run is cancelled by the user, the status is 'cancelled'.\nWhen the eval run is cancelled by Vapi for any reason, the status is 'aborted'.",
+ "example": "mockConversation.done",
+ "enum": [
+ "mockConversation.done",
+ "error",
+ "timeout",
+ "cancelled",
+ "aborted"
+ ]
+ },
+ "eval": {
+ "description": "This is the transient eval that will be run",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateEvalDTO",
+ "title": "CreateEvalDTO"
+ }
+ ],
"allOf": [
{
- "$ref": "#/components/schemas/TimeRange"
+ "$ref": "#/components/schemas/CreateEvalDTO"
}
]
},
- "result": {
- "description": "This is the result of the query, a list of unique groups with result of their aggregations.\n\nExample:\n\"result\": [\n { \"date\": \"2023-01-01\", \"assistantId\": \"123\", \"endedReason\": \"customer-ended-call\", \"sumDuration\": 120, \"avgCost\": 10.5 },\n { \"date\": \"2023-01-02\", \"assistantId\": \"123\", \"endedReason\": \"customer-did-not-give-microphone-permission\", \"sumDuration\": 0, \"avgCost\": 0 },\n // Additional results\n]",
+ "target": {
+ "description": "This is the target that will be run against the eval",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/EvalRunTargetAssistant",
+ "title": "EvalRunTargetAssistant"
+ },
+ {
+ "$ref": "#/components/schemas/EvalRunTargetSquad",
+ "title": "EvalRunTargetSquad"
+ }
+ ]
+ },
+ "id": {
+ "type": "string"
+ },
+ "orgId": {
+ "type": "string"
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string"
+ },
+ "startedAt": {
+ "format": "date-time",
+ "type": "string"
+ },
+ "endedAt": {
+ "format": "date-time",
+ "type": "string"
+ },
+ "endedMessage": {
+ "type": "string",
+ "description": "This is the ended message when the eval run ended for any reason apart from mockConversation.done",
+ "example": "The Assistant returned an error"
+ },
+ "results": {
+ "description": "This is the results of the eval or suite run.\nThe array will have a single item for an eval run, and multiple items each corresponding to the an eval in a suite run in the same order as the evals in the suite.",
"type": "array",
"items": {
- "type": "object"
+ "$ref": "#/components/schemas/EvalRunResult"
}
- }
- },
- "required": [
- "name",
- "timeRange",
- "result"
- ]
- },
- "CallLogPrivileged": {
- "type": "object",
- "properties": {
- "callId": {
- "type": "string",
- "description": "This is the unique identifier for the call."
},
- "orgId": {
- "type": "string",
- "description": "This is the unique identifier for the org that this call log belongs to."
+ "cost": {
+ "type": "number",
+ "description": "This is the cost of the eval or suite run in USD.",
+ "example": 0.01
},
- "log": {
- "type": "string",
- "description": "This is the log message associated with the call."
+ "costs": {
+ "description": "This is the break up of costs of the eval or suite run.",
+ "example": "[{ type: \"model\", model: \"gpt-4o\", cost: 0.01 }]",
+ "type": "array",
+ "items": {
+ "type": "object"
+ }
},
- "level": {
+ "type": {
"type": "string",
- "description": "This is the level of the log message.",
+ "description": "This is the type of the run.\nCurrently it is fixed to `eval`.",
+ "example": "eval",
"enum": [
- "INFO",
- "LOG",
- "WARN",
- "ERROR",
- "CHECKPOINT"
+ "eval"
]
},
- "time": {
- "format": "date-time",
+ "evalId": {
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the log was created."
+ "description": "This is the id of the eval that will be run.",
+ "example": "123e4567-e89b-12d3-a456-426614174000"
}
},
"required": [
- "callId",
+ "status",
+ "endedReason",
+ "target",
+ "id",
"orgId",
- "log",
- "level",
- "time"
+ "createdAt",
+ "startedAt",
+ "endedAt",
+ "results",
+ "cost",
+ "costs",
+ "type"
]
},
- "CallLogsPaginatedResponse": {
+ "EvalRunPaginatedResponse": {
"type": "object",
"properties": {
"results": {
"type": "array",
"items": {
- "$ref": "#/components/schemas/CallLogPrivileged"
+ "$ref": "#/components/schemas/EvalRun"
}
},
"metadata": {
@@ -39029,250 +50210,217 @@
"metadata"
]
},
- "Error": {
+ "GetEvalRunPaginatedDTO": {
"type": "object",
"properties": {
- "message": {
+ "id": {
"type": "string"
- }
- },
- "required": [
- "message"
- ]
- },
- "Log": {
- "type": "object",
- "properties": {
- "time": {
- "type": "string",
- "description": "This is the timestamp at which the log was written."
- },
- "orgId": {
- "type": "string",
- "description": "This is the unique identifier for the org that this log belongs to."
- },
- "type": {
- "type": "string",
- "description": "This is the type of the log.",
- "enum": [
- "API",
- "Webhook",
- "Call",
- "Provider"
- ]
},
- "webhookType": {
- "type": "string",
- "description": "This is the type of the webhook, given the log is from a webhook."
+ "page": {
+ "type": "number",
+ "description": "This is the page number to return. Defaults to 1.",
+ "minimum": 1
},
- "resource": {
+ "sortOrder": {
"type": "string",
- "description": "This is the specific resource, relevant only to API logs.",
+ "description": "This is the sort order for pagination. Defaults to 'DESC'.",
"enum": [
- "org",
- "assistant",
- "analytics",
- "credential",
- "phone-number",
- "block",
- "voice-library",
- "provider",
- "tool",
- "token",
- "template",
- "squad",
- "call",
- "file",
- "metric",
- "log"
+ "ASC",
+ "DESC"
]
},
- "requestDurationSeconds": {
+ "limit": {
"type": "number",
- "description": "'This is how long the request took.",
- "minimum": 0
+ "description": "This is the maximum number of items to return. Defaults to 100.",
+ "minimum": 0,
+ "maximum": 1000
},
- "requestStartedAt": {
+ "createdAtGt": {
+ "format": "date-time",
"type": "string",
- "description": "This is the timestamp at which the request began."
+ "description": "This will return items where the createdAt is greater than the specified value."
},
- "requestFinishedAt": {
+ "createdAtLt": {
+ "format": "date-time",
"type": "string",
- "description": "This is the timestamp at which the request finished."
- },
- "requestBody": {
- "type": "object",
- "description": "This is the body of the request."
+ "description": "This will return items where the createdAt is less than the specified value."
},
- "requestHttpMethod": {
+ "createdAtGe": {
+ "format": "date-time",
"type": "string",
- "description": "This is the request method.",
- "enum": [
- "POST",
- "GET",
- "PUT",
- "PATCH",
- "DELETE"
- ]
+ "description": "This will return items where the createdAt is greater than or equal to the specified value."
},
- "requestUrl": {
+ "createdAtLe": {
+ "format": "date-time",
"type": "string",
- "description": "This is the request URL."
+ "description": "This will return items where the createdAt is less than or equal to the specified value."
},
- "requestPath": {
+ "updatedAtGt": {
+ "format": "date-time",
"type": "string",
- "description": "This is the request path."
+ "description": "This will return items where the updatedAt is greater than the specified value."
},
- "requestQuery": {
+ "updatedAtLt": {
+ "format": "date-time",
"type": "string",
- "description": "This is the request query."
- },
- "responseHttpCode": {
- "type": "number",
- "description": "This the HTTP status code of the response."
+ "description": "This will return items where the updatedAt is less than the specified value."
},
- "requestIpAddress": {
+ "updatedAtGe": {
+ "format": "date-time",
"type": "string",
- "description": "This is the request IP address."
+ "description": "This will return items where the updatedAt is greater than or equal to the specified value."
},
- "requestOrigin": {
+ "updatedAtLe": {
+ "format": "date-time",
"type": "string",
- "description": "This is the origin of the request"
- },
- "responseBody": {
- "type": "object",
- "description": "This is the body of the response."
- },
- "requestHeaders": {
- "type": "object",
- "description": "These are the headers of the request."
- },
- "error": {
- "description": "This is the error, if one occurred.",
+ "description": "This will return items where the updatedAt is less than or equal to the specified value."
+ }
+ }
+ },
+ "EvalRunTargetAssistant": {
+ "type": "object",
+ "properties": {
+ "assistant": {
+ "description": "This is the transient assistant that will be run against the eval",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateAssistantDTO",
+ "title": "CreateAssistantDTO"
+ }
+ ],
"allOf": [
{
- "$ref": "#/components/schemas/Error"
+ "$ref": "#/components/schemas/CreateAssistantDTO"
}
]
},
- "assistantId": {
- "type": "string",
- "description": "This is the ID of the assistant."
- },
- "phoneNumberId": {
- "type": "string",
- "description": "This is the ID of the phone number."
- },
- "customerId": {
- "type": "string",
- "description": "This is the ID of the customer."
+ "assistantOverrides": {
+ "description": "This is the overrides that will be applied to the assistant.",
+ "example": "{",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AssistantOverrides",
+ "title": "AssistantOverrides"
+ }
+ ],
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AssistantOverrides"
+ }
+ ]
},
- "squadId": {
+ "type": {
"type": "string",
- "description": "This is the ID of the squad."
+ "description": "This is the type of the target.\nCurrently it is fixed to `assistant`.",
+ "example": "assistant",
+ "enum": [
+ "assistant"
+ ]
},
- "callId": {
+ "assistantId": {
"type": "string",
- "description": "This is the ID of the call."
+ "description": "This is the id of the assistant that will be run against the eval",
+ "example": "123e4567-e89b-12d3-a456-426614174000"
}
},
"required": [
- "time",
- "orgId",
"type"
]
},
- "LogsPaginatedResponse": {
- "type": "object",
- "properties": {
- "results": {
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/Log"
- }
- },
- "metadata": {
- "$ref": "#/components/schemas/PaginationMeta"
- }
- },
- "required": [
- "results",
- "metadata"
- ]
- },
- "StructuredOutput": {
+ "EvalRunTargetSquad": {
"type": "object",
"properties": {
- "model": {
- "description": "This is the model that will be used to extract the structured output.\n\nTo provide your own custom system and user prompts for structured output extraction, populate the messages array with your system and user messages. You can specify liquid templating in your system and user messages.\nBetween the system or user messages, you must reference either 'transcript' or 'messages' with the '{{}}' syntax to access the conversation history.\nBetween the system or user messages, you must reference a variation of the structured output with the '{{}}' syntax to access the structured output definition.\ni.e.:\n{{structuredOutput}}\n{{structuredOutput.name}}\n{{structuredOutput.description}}\n{{structuredOutput.schema}}\n\nIf model is not specified, GPT-4.1 will be used by default for extraction, utilizing default system and user prompts.\nIf messages or required fields are not specified, the default system and user prompts will be used.",
+ "squad": {
+ "description": "This is the transient squad that will be run against the eval",
"oneOf": [
{
- "$ref": "#/components/schemas/WorkflowOpenAIModel",
- "title": "WorkflowOpenAIModel"
- },
+ "$ref": "#/components/schemas/CreateSquadDTO",
+ "title": "CreateSquadDTO"
+ }
+ ],
+ "allOf": [
{
- "$ref": "#/components/schemas/WorkflowAnthropicModel",
- "title": "WorkflowAnthropicModel"
- },
+ "$ref": "#/components/schemas/CreateSquadDTO"
+ }
+ ]
+ },
+ "assistantOverrides": {
+ "description": "This is the overrides that will be applied to the assistants.",
+ "example": "{",
+ "oneOf": [
{
- "$ref": "#/components/schemas/WorkflowGoogleModel",
- "title": "WorkflowGoogleModel"
- },
+ "$ref": "#/components/schemas/AssistantOverrides",
+ "title": "AssistantOverrides"
+ }
+ ],
+ "allOf": [
{
- "$ref": "#/components/schemas/WorkflowCustomModel",
- "title": "WorkflowCustomModel"
+ "$ref": "#/components/schemas/AssistantOverrides"
}
]
},
+ "type": {
+ "type": "string",
+ "description": "This is the type of the target.\nCurrently it is fixed to `squad`.",
+ "example": "squad",
+ "enum": [
+ "squad"
+ ]
+ },
+ "squadId": {
+ "type": "string",
+ "description": "This is the id of the squad that will be run against the eval",
+ "example": "123e4567-e89b-12d3-a456-426614174000"
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "Scorecard": {
+ "type": "object",
+ "properties": {
"id": {
"type": "string",
- "description": "This is the unique identifier for the structured output."
+ "description": "This is the unique identifier for the scorecard."
},
"orgId": {
"type": "string",
- "description": "This is the unique identifier for the org that this structured output belongs to."
+ "description": "This is the unique identifier for the org that this scorecard belongs to."
},
"createdAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the structured output was created."
+ "description": "This is the ISO 8601 date-time string of when the scorecard was created."
},
"updatedAt": {
"format": "date-time",
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the structured output was last updated."
+ "description": "This is the ISO 8601 date-time string of when the scorecard was last updated."
},
"name": {
"type": "string",
- "description": "This is the name of the structured output.",
- "minLength": 1,
- "maxLength": 40
+ "description": "This is the name of the scorecard. It is only for user reference and will not be used for any evaluation.",
+ "maxLength": 80
},
"description": {
"type": "string",
- "description": "This is the description of what the structured output extracts.\n\nUse this to provide context about what data will be extracted and how it will be used."
+ "description": "This is the description of the scorecard. It is only for user reference and will not be used for any evaluation.",
+ "maxLength": 500
},
- "assistantIds": {
- "description": "These are the assistant IDs that this structured output is linked to.\n\nWhen linked to assistants, this structured output will be available for extraction during those assistant's calls.",
+ "metrics": {
+ "description": "These are the metrics that will be used to evaluate the scorecard.\nEach metric will have a set of conditions and points that will be used to generate the score.",
"type": "array",
"items": {
- "type": "string"
+ "$ref": "#/components/schemas/ScorecardMetric"
}
},
- "workflowIds": {
- "description": "These are the workflow IDs that this structured output is linked to.\n\nWhen linked to workflows, this structured output will be available for extraction during those workflow's execution.",
+ "assistantIds": {
+ "description": "These are the assistant IDs that this scorecard is linked to.\nWhen linked to assistants, this scorecard will be available for evaluation during those assistants' calls.",
"type": "array",
"items": {
"type": "string"
}
- },
- "schema": {
- "description": "This is the JSON Schema definition for the structured output.\n\nDefines the structure and validation rules for the data that will be extracted. Supports all JSON Schema features including:\n- Objects and nested properties\n- Arrays and array validation\n- String, number, boolean, and null types\n- Enums and const values\n- Validation constraints (min/max, patterns, etc.)\n- Composition with allOf, anyOf, oneOf",
- "allOf": [
- {
- "$ref": "#/components/schemas/JsonSchema"
- }
- ]
}
},
"required": [
@@ -39280,17 +50428,16 @@
"orgId",
"createdAt",
"updatedAt",
- "name",
- "schema"
+ "metrics"
]
},
- "StructuredOutputPaginatedResponse": {
+ "ScorecardPaginatedResponse": {
"type": "object",
"properties": {
"results": {
"type": "array",
"items": {
- "$ref": "#/components/schemas/StructuredOutput"
+ "$ref": "#/components/schemas/Scorecard"
}
},
"metadata": {
@@ -39302,123 +50449,32 @@
"metadata"
]
},
- "CreateStructuredOutputDTO": {
+ "UpdateScorecardDTO": {
"type": "object",
"properties": {
- "model": {
- "description": "This is the model that will be used to extract the structured output.\n\nTo provide your own custom system and user prompts for structured output extraction, populate the messages array with your system and user messages. You can specify liquid templating in your system and user messages.\nBetween the system or user messages, you must reference either 'transcript' or 'messages' with the '{{}}' syntax to access the conversation history.\nBetween the system or user messages, you must reference a variation of the structured output with the '{{}}' syntax to access the structured output definition.\ni.e.:\n{{structuredOutput}}\n{{structuredOutput.name}}\n{{structuredOutput.description}}\n{{structuredOutput.schema}}\n\nIf model is not specified, GPT-4.1 will be used by default for extraction, utilizing default system and user prompts.\nIf messages or required fields are not specified, the default system and user prompts will be used.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/WorkflowOpenAIModel",
- "title": "WorkflowOpenAIModel"
- },
- {
- "$ref": "#/components/schemas/WorkflowAnthropicModel",
- "title": "WorkflowAnthropicModel"
- },
- {
- "$ref": "#/components/schemas/WorkflowGoogleModel",
- "title": "WorkflowGoogleModel"
- },
- {
- "$ref": "#/components/schemas/WorkflowCustomModel",
- "title": "WorkflowCustomModel"
- }
- ]
- },
"name": {
"type": "string",
- "description": "This is the name of the structured output.",
- "minLength": 1,
- "maxLength": 40
- },
- "schema": {
- "description": "This is the JSON Schema definition for the structured output.\n\nThis is required when creating a structured output. Defines the structure and validation rules for the data that will be extracted. Supports all JSON Schema features including:\n- Objects and nested properties\n- Arrays and array validation\n- String, number, boolean, and null types\n- Enums and const values\n- Validation constraints (min/max, patterns, etc.)\n- Composition with allOf, anyOf, oneOf",
- "allOf": [
- {
- "$ref": "#/components/schemas/JsonSchema"
- }
- ]
+ "description": "This is the name of the scorecard. It is only for user reference and will not be used for any evaluation.",
+ "maxLength": 80
},
"description": {
"type": "string",
- "description": "This is the description of what the structured output extracts.\n\nUse this to provide context about what data will be extracted and how it will be used."
- },
- "assistantIds": {
- "description": "These are the assistant IDs that this structured output is linked to.\n\nWhen linked to assistants, this structured output will be available for extraction during those assistant's calls.",
- "type": "array",
- "items": {
- "type": "string"
- }
+ "description": "This is the description of the scorecard. It is only for user reference and will not be used for any evaluation.",
+ "maxLength": 500
},
- "workflowIds": {
- "description": "These are the workflow IDs that this structured output is linked to.\n\nWhen linked to workflows, this structured output will be available for extraction during those workflow's execution.",
+ "metrics": {
+ "description": "These are the metrics that will be used to evaluate the scorecard.\nEach metric will have a set of conditions and points that will be used to generate the score.",
"type": "array",
"items": {
- "type": "string"
+ "$ref": "#/components/schemas/ScorecardMetric"
}
- }
- },
- "required": [
- "name",
- "schema"
- ]
- },
- "UpdateStructuredOutputDTO": {
- "type": "object",
- "properties": {
- "model": {
- "description": "This is the model that will be used to extract the structured output.\n\nTo provide your own custom system and user prompts for structured output extraction, populate the messages array with your system and user messages. You can specify liquid templating in your system and user messages.\nBetween the system or user messages, you must reference either 'transcript' or 'messages' with the '{{}}' syntax to access the conversation history.\nBetween the system or user messages, you must reference a variation of the structured output with the '{{}}' syntax to access the structured output definition.\ni.e.:\n{{structuredOutput}}\n{{structuredOutput.name}}\n{{structuredOutput.description}}\n{{structuredOutput.schema}}\n\nIf model is not specified, GPT-4.1 will be used by default for extraction, utilizing default system and user prompts.\nIf messages or required fields are not specified, the default system and user prompts will be used.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/WorkflowOpenAIModel",
- "title": "WorkflowOpenAIModel"
- },
- {
- "$ref": "#/components/schemas/WorkflowAnthropicModel",
- "title": "WorkflowAnthropicModel"
- },
- {
- "$ref": "#/components/schemas/WorkflowGoogleModel",
- "title": "WorkflowGoogleModel"
- },
- {
- "$ref": "#/components/schemas/WorkflowCustomModel",
- "title": "WorkflowCustomModel"
- }
- ]
- },
- "name": {
- "type": "string",
- "description": "This is the name of the structured output.",
- "minLength": 1,
- "maxLength": 40
- },
- "description": {
- "type": "string",
- "description": "This is the description of what the structured output extracts.\n\nUse this to provide context about what data will be extracted and how it will be used."
},
"assistantIds": {
- "description": "These are the assistant IDs that this structured output is linked to.\n\nWhen linked to assistants, this structured output will be available for extraction during those assistant's calls.",
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "workflowIds": {
- "description": "These are the workflow IDs that this structured output is linked to.\n\nWhen linked to workflows, this structured output will be available for extraction during those workflow's execution.",
+ "description": "These are the assistant IDs that this scorecard is linked to.\nWhen linked to assistants, this scorecard will be available for evaluation during those assistants' calls.",
"type": "array",
"items": {
"type": "string"
}
- },
- "schema": {
- "description": "This is the JSON Schema definition for the structured output.\n\nDefines the structure and validation rules for the data that will be extracted. Supports all JSON Schema features including:\n- Objects and nested properties\n- Arrays and array validation\n- String, number, boolean, and null types\n- Enums and const values\n- Validation constraints (min/max, patterns, etc.)\n- Composition with allOf, anyOf, oneOf",
- "allOf": [
- {
- "$ref": "#/components/schemas/JsonSchema"
- }
- ]
}
}
},
@@ -39427,7 +50483,7 @@
"properties": {
"hipaaEnabled": {
"type": "boolean",
- "description": "When this is enabled, no logs, recordings, or transcriptions will be stored. At the end of the call, you will still receive an end-of-call-report message to store on your server. Defaults to false.\nWhen HIPAA is enabled, only OpenAI/Custom LLM or Azure Providers will be available for LLM and Voice respectively.\nThis is due to the compliance requirements of HIPAA. Other providers may not meet these requirements.",
+ "description": "When this is enabled, logs, recordings, and transcriptions will be stored in HIPAA-compliant storage. Defaults to false.\nWhen HIPAA is enabled, only HIPAA-compliant providers will be available for LLM, Voice, and Transcriber respectively.\nThis is due to the compliance requirements of HIPAA. Other providers may not meet these requirements.",
"example": false
},
"subscriptionId": {
@@ -39443,8 +50499,11 @@
"type": "string",
"description": "This is the channel of the org. There is the cluster the API traffic for the org will be directed.",
"enum": [
+ "daily",
"default",
- "weekly"
+ "weekly",
+ "intuit",
+ "hcs"
]
},
"billingLimit": {
@@ -39610,6 +50669,14 @@
"type": "boolean",
"description": "This is the HIPAA enabled flag for the subscription. It determines whether orgs under this\nsubscription have the option to enable HIPAA compliance."
},
+ "zdrEnabled": {
+ "type": "boolean",
+ "description": "This is the ZDR enabled flag for the subscription. It determines whether orgs under this\nsubscription have the option to enable ZDR."
+ },
+ "dataRetentionEnabled": {
+ "type": "boolean",
+ "description": "This is the data retention enabled flag for the subscription. It determines whether orgs under this\nsubscription have the option to enable data retention."
+ },
"hipaaCommonPaperAgreementId": {
"type": "string",
"description": "This is the ID for the Common Paper agreement outlining the HIPAA contract."
@@ -39744,7 +50811,7 @@
"properties": {
"hipaaEnabled": {
"type": "boolean",
- "description": "When this is enabled, no logs, recordings, or transcriptions will be stored. At the end of the call, you will still receive an end-of-call-report message to store on your server. Defaults to false.\nWhen HIPAA is enabled, only OpenAI/Custom LLM or Azure Providers will be available for LLM and Voice respectively.\nThis is due to the compliance requirements of HIPAA. Other providers may not meet these requirements.",
+ "description": "When this is enabled, logs, recordings, and transcriptions will be stored in HIPAA-compliant storage. Defaults to false.\nWhen HIPAA is enabled, only HIPAA-compliant providers will be available for LLM, Voice, and Transcriber respectively.\nThis is due to the compliance requirements of HIPAA. Other providers may not meet these requirements.",
"example": false
},
"subscription": {
@@ -39802,8 +50869,11 @@
"type": "string",
"description": "This is the channel of the org. There is the cluster the API traffic for the org will be directed.",
"enum": [
+ "daily",
"default",
- "weekly"
+ "weekly",
+ "intuit",
+ "hcs"
]
},
"billingLimit": {
@@ -39847,7 +50917,7 @@
"properties": {
"hipaaEnabled": {
"type": "boolean",
- "description": "When this is enabled, no logs, recordings, or transcriptions will be stored. At the end of the call, you will still receive an end-of-call-report message to store on your server. Defaults to false.\nWhen HIPAA is enabled, only OpenAI/Custom LLM or Azure Providers will be available for LLM and Voice respectively.\nThis is due to the compliance requirements of HIPAA. Other providers may not meet these requirements.",
+ "description": "When this is enabled, logs, recordings, and transcriptions will be stored in HIPAA-compliant storage. Defaults to false.\nWhen HIPAA is enabled, only HIPAA-compliant providers will be available for LLM, Voice, and Transcriber respectively.\nThis is due to the compliance requirements of HIPAA. Other providers may not meet these requirements.",
"example": false
},
"subscriptionId": {
@@ -39863,8 +50933,11 @@
"type": "string",
"description": "This is the channel of the org. There is the cluster the API traffic for the org will be directed.",
"enum": [
+ "daily",
"default",
- "weekly"
+ "weekly",
+ "intuit",
+ "hcs"
]
},
"billingLimit": {
@@ -40097,8 +51170,7 @@
"id",
"orgId",
"createdAt",
- "updatedAt",
- "value"
+ "updatedAt"
]
},
"UpdateTokenDTO": {
@@ -40175,6 +51247,165 @@
"updatedAt"
]
},
+ "AWSStsAuthenticationArtifact": {
+ "type": "object",
+ "properties": {
+ "externalId": {
+ "type": "string",
+ "description": "This is the optional external ID for the AWS credential"
+ }
+ },
+ "required": [
+ "externalId"
+ ]
+ },
+ "AWSStsAssumeRoleUser": {
+ "type": "object",
+ "properties": {
+ "AssumedRoleId": {
+ "type": "string",
+ "description": "This is the assumed role ID"
+ },
+ "Arn": {
+ "type": "string",
+ "description": "This is the assumed role ARN"
+ }
+ }
+ },
+ "AWSStsCredentials": {
+ "type": "object",
+ "properties": {
+ "AccessKeyId": {
+ "type": "string",
+ "description": "This is the access key ID for the AWS credential"
+ },
+ "Expiration": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the expiration date for the AWS credential"
+ },
+ "SecretAccessKey": {
+ "type": "string",
+ "description": "This is the secret access key for the AWS credential"
+ },
+ "SessionToken": {
+ "type": "string",
+ "description": "This is the session token for the AWS credential"
+ }
+ }
+ },
+ "AWSStsAuthenticationSession": {
+ "type": "object",
+ "properties": {
+ "assumedRoleUser": {
+ "description": "This is the assumed role user",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AWSStsAssumeRoleUser"
+ }
+ ]
+ },
+ "credentials": {
+ "description": "This is the credentials for the AWS STS assume role",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AWSStsCredentials"
+ }
+ ]
+ },
+ "packedPolicySize": {
+ "type": "number",
+ "description": "This is the size of the policy"
+ },
+ "sourcedIDEntity": {
+ "type": "string",
+ "description": "This is the sourced ID entity"
+ }
+ }
+ },
+ "AnthropicBedrockCredential": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "anthropic-bedrock"
+ ]
+ },
+ "region": {
+ "type": "string",
+ "description": "AWS region where Bedrock is configured.",
+ "enum": [
+ "us-east-1",
+ "us-west-2",
+ "eu-west-1",
+ "eu-west-3",
+ "ap-northeast-1",
+ "ap-southeast-2"
+ ]
+ },
+ "authenticationPlan": {
+ "description": "Authentication method - either direct IAM credentials or cross-account role assumption.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AWSIAMCredentialsAuthenticationPlan"
+ },
+ {
+ "$ref": "#/components/schemas/AWSStsAuthenticationPlan"
+ }
+ ]
+ },
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the credential."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the org that this credential belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the credential was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the assistant was last updated."
+ },
+ "authenticationArtifact": {
+ "description": "Stores the external ID (generated or user-provided) for future AssumeRole calls.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AWSStsAuthenticationArtifact"
+ }
+ ]
+ },
+ "authenticationSession": {
+ "description": "Cached authentication session from AssumeRole (temporary credentials).\nManaged by the system, auto-refreshed when expired.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AWSStsAuthenticationSession"
+ }
+ ]
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "region",
+ "authenticationPlan",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
"AnyscaleCredential": {
"type": "object",
"properties": {
@@ -40292,22 +51523,27 @@
"type": "string",
"description": "This is the region of the Azure resource.",
"enum": [
- "australia",
+ "australiaeast",
"canadaeast",
"canadacentral",
+ "centralus",
"eastus2",
"eastus",
"france",
+ "germanywestcentral",
"india",
"japaneast",
"japanwest",
- "uaenorth",
"northcentralus",
"norway",
+ "polandcentral",
"southcentralus",
+ "spaincentral",
"swedencentral",
"switzerland",
+ "uaenorth",
"uk",
+ "westeurope",
"westus",
"westus3"
]
@@ -40376,22 +51612,27 @@
"region": {
"type": "string",
"enum": [
- "australia",
+ "australiaeast",
"canadaeast",
"canadacentral",
+ "centralus",
"eastus2",
"eastus",
"france",
+ "germanywestcentral",
"india",
"japaneast",
"japanwest",
- "uaenorth",
"northcentralus",
"norway",
+ "polandcentral",
"southcentralus",
+ "spaincentral",
"swedencentral",
"switzerland",
+ "uaenorth",
"uk",
+ "westeurope",
"westus",
"westus3"
]
@@ -40399,6 +51640,13 @@
"models": {
"type": "array",
"enum": [
+ "gpt-5.4",
+ "gpt-5.4-mini",
+ "gpt-5.4-nano",
+ "gpt-5.2",
+ "gpt-5.2-chat",
+ "gpt-5.1",
+ "gpt-5.1-chat",
"gpt-5",
"gpt-5-mini",
"gpt-5-nano",
@@ -40423,6 +51671,13 @@
"items": {
"type": "string",
"enum": [
+ "gpt-5.4",
+ "gpt-5.4-mini",
+ "gpt-5.4-nano",
+ "gpt-5.2",
+ "gpt-5.2-chat",
+ "gpt-5.1",
+ "gpt-5.1-chat",
"gpt-5",
"gpt-5-mini",
"gpt-5-nano",
@@ -40718,13 +51973,351 @@
"minLength": 1,
"maxLength": 40
},
- "bucketPlan": {
- "description": "This is the bucket plan that can be provided to store call artifacts in R2",
+ "bucketPlan": {
+ "description": "This is the bucket plan that can be provided to store call artifacts in R2",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CloudflareR2BucketPlan"
+ }
+ ]
+ }
+ },
+ "required": [
+ "provider",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "Oauth2AuthenticationSession": {
+ "type": "object",
+ "properties": {
+ "accessToken": {
+ "type": "string",
+ "description": "This is the OAuth2 access token."
+ },
+ "expiresAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the OAuth2 access token expiration."
+ },
+ "refreshToken": {
+ "type": "string",
+ "description": "This is the OAuth2 refresh token."
+ }
+ }
+ },
+ "CustomLLMCredential": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "custom-llm"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "maxLength": 10000,
+ "description": "This is not returned in the API."
+ },
+ "authenticationPlan": {
+ "description": "This is the authentication plan. Currently supports OAuth2 RFC 6749. To use Bearer authentication, use apiKey",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/OAuth2AuthenticationPlan"
+ }
+ ]
+ },
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the credential."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the org that this credential belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the credential was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the assistant was last updated."
+ },
+ "authenticationSession": {
+ "description": "This is the authentication session for the credential. Available for credentials that have an authentication plan.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Oauth2AuthenticationSession"
+ }
+ ]
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "DeepgramCredential": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "deepgram"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the credential."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the org that this credential belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the credential was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the assistant was last updated."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ },
+ "apiUrl": {
+ "type": "string",
+ "description": "This can be used to point to an onprem Deepgram instance. Defaults to api.deepgram.com."
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "DeepInfraCredential": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "deepinfra"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the credential."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the org that this credential belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the credential was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the assistant was last updated."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "DeepSeekCredential": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "deep-seek"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the credential."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the org that this credential belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the credential was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the assistant was last updated."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "ElevenLabsCredential": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "11labs"
+ ]
+ },
+ "apiKey": {
+ "type": "string",
+ "maxLength": 10000,
+ "description": "This is not returned in the API."
+ },
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the credential."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the org that this credential belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the credential was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the assistant was last updated."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey",
+ "id",
+ "orgId",
+ "createdAt",
+ "updatedAt"
+ ]
+ },
+ "GcpCredential": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "gcp"
+ ]
+ },
+ "fallbackIndex": {
+ "type": "number",
+ "description": "This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order.",
+ "minimum": 1
+ },
+ "id": {
+ "type": "string",
+ "description": "This is the unique identifier for the credential."
+ },
+ "orgId": {
+ "type": "string",
+ "description": "This is the unique identifier for the org that this credential belongs to."
+ },
+ "createdAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the credential was created."
+ },
+ "updatedAt": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the ISO 8601 date-time string of when the assistant was last updated."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ },
+ "gcpKey": {
+ "description": "This is the GCP key. This is the JSON that can be generated in the Google Cloud Console at https://console.cloud.google.com/iam-admin/serviceaccounts/details//keys.\n\nThe schema is identical to the JSON that GCP outputs.",
"allOf": [
{
- "$ref": "#/components/schemas/CloudflareR2BucketPlan"
+ "$ref": "#/components/schemas/GcpKey"
}
]
+ },
+ "region": {
+ "type": "string",
+ "description": "This is the region of the GCP resource.",
+ "maxLength": 40
+ },
+ "bucketPlan": {
+ "$ref": "#/components/schemas/BucketPlan"
}
},
"required": [
@@ -40732,49 +52325,23 @@
"id",
"orgId",
"createdAt",
- "updatedAt"
+ "updatedAt",
+ "gcpKey"
]
},
- "Oauth2AuthenticationSession": {
- "type": "object",
- "properties": {
- "accessToken": {
- "type": "string",
- "description": "This is the OAuth2 access token."
- },
- "expiresAt": {
- "format": "date-time",
- "type": "string",
- "description": "This is the OAuth2 access token expiration."
- },
- "refreshToken": {
- "type": "string",
- "description": "This is the OAuth2 refresh token."
- }
- }
- },
- "CustomLLMCredential": {
+ "GladiaCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "custom-llm"
+ "gladia"
]
},
"apiKey": {
"type": "string",
- "maxLength": 10000,
"description": "This is not returned in the API."
},
- "authenticationPlan": {
- "description": "This is the authentication plan. Currently supports OAuth2 RFC 6749. To use Bearer authentication, use apiKey",
- "allOf": [
- {
- "$ref": "#/components/schemas/OAuth2AuthenticationPlan"
- }
- ]
- },
"id": {
"type": "string",
"description": "This is the unique identifier for the credential."
@@ -40793,14 +52360,6 @@
"type": "string",
"description": "This is the ISO 8601 date-time string of when the assistant was last updated."
},
- "authenticationSession": {
- "description": "This is the authentication session for the credential. Available for credentials that have an authentication plan.",
- "allOf": [
- {
- "$ref": "#/components/schemas/Oauth2AuthenticationSession"
- }
- ]
- },
"name": {
"type": "string",
"description": "This is the name of credential. This is just for your reference.",
@@ -40817,13 +52376,13 @@
"updatedAt"
]
},
- "DeepgramCredential": {
+ "GoHighLevelCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "deepgram"
+ "gohighlevel"
]
},
"apiKey": {
@@ -40853,10 +52412,6 @@
"description": "This is the name of credential. This is just for your reference.",
"minLength": 1,
"maxLength": 40
- },
- "apiUrl": {
- "type": "string",
- "description": "This can be used to point to an onprem Deepgram instance. Defaults to api.deepgram.com."
}
},
"required": [
@@ -40868,17 +52423,19 @@
"updatedAt"
]
},
- "DeepInfraCredential": {
+ "GoogleCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
+ "description": "This is the key for Gemini in Google AI Studio. Get it from here: https://aistudio.google.com/app/apikey",
"enum": [
- "deepinfra"
+ "google"
]
},
"apiKey": {
"type": "string",
+ "maxLength": 10000,
"description": "This is not returned in the API."
},
"id": {
@@ -40915,13 +52472,13 @@
"updatedAt"
]
},
- "DeepSeekCredential": {
+ "GroqCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "deep-seek"
+ "groq"
]
},
"apiKey": {
@@ -40962,13 +52519,13 @@
"updatedAt"
]
},
- "ElevenLabsCredential": {
+ "HumeCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "11labs"
+ "hume"
]
},
"apiKey": {
@@ -41010,19 +52567,20 @@
"updatedAt"
]
},
- "GcpCredential": {
+ "InflectionAICredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
+ "description": "This is the api key for Pi in InflectionAI's console. Get it from here: https://developers.inflection.ai/keys, billing will need to be setup",
"enum": [
- "gcp"
+ "inflection-ai"
]
},
- "fallbackIndex": {
- "type": "number",
- "description": "This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order.",
- "minimum": 1
+ "apiKey": {
+ "type": "string",
+ "maxLength": 10000,
+ "description": "This is not returned in the API."
},
"id": {
"type": "string",
@@ -41047,45 +52605,37 @@
"description": "This is the name of credential. This is just for your reference.",
"minLength": 1,
"maxLength": 40
- },
- "gcpKey": {
- "description": "This is the GCP key. This is the JSON that can be generated in the Google Cloud Console at https://console.cloud.google.com/iam-admin/serviceaccounts/details//keys.\n\nThe schema is identical to the JSON that GCP outputs.",
- "allOf": [
- {
- "$ref": "#/components/schemas/GcpKey"
- }
- ]
- },
- "region": {
- "type": "string",
- "description": "This is the region of the GCP resource.",
- "maxLength": 40
- },
- "bucketPlan": {
- "$ref": "#/components/schemas/BucketPlan"
}
},
"required": [
"provider",
+ "apiKey",
"id",
"orgId",
"createdAt",
- "updatedAt",
- "gcpKey"
+ "updatedAt"
]
},
- "GladiaCredential": {
+ "LangfuseCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "gladia"
+ "langfuse"
]
},
+ "publicKey": {
+ "type": "string",
+ "description": "The public key for Langfuse project. Eg: pk-lf-..."
+ },
"apiKey": {
"type": "string",
- "description": "This is not returned in the API."
+ "description": "The secret key for Langfuse project. Eg: sk-lf-... .This is not returned in the API."
+ },
+ "apiUrl": {
+ "type": "string",
+ "description": "The host URL for Langfuse project. Eg: https://cloud.langfuse.com"
},
"id": {
"type": "string",
@@ -41114,20 +52664,22 @@
},
"required": [
"provider",
+ "publicKey",
"apiKey",
+ "apiUrl",
"id",
"orgId",
"createdAt",
"updatedAt"
]
},
- "GoHighLevelCredential": {
+ "LmntCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "gohighlevel"
+ "lmnt"
]
},
"apiKey": {
@@ -41168,19 +52720,25 @@
"updatedAt"
]
},
- "GoogleCredential": {
+ "MakeCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
- "description": "This is the key for Gemini in Google AI Studio. Get it from here: https://aistudio.google.com/app/apikey",
"enum": [
- "google"
+ "make"
]
},
+ "teamId": {
+ "type": "string",
+ "description": "Team ID"
+ },
+ "region": {
+ "type": "string",
+ "description": "Region of your application. For example: eu1, eu2, us1, us2"
+ },
"apiKey": {
"type": "string",
- "maxLength": 10000,
"description": "This is not returned in the API."
},
"id": {
@@ -41210,6 +52768,8 @@
},
"required": [
"provider",
+ "teamId",
+ "region",
"apiKey",
"id",
"orgId",
@@ -41217,17 +52777,18 @@
"updatedAt"
]
},
- "GroqCredential": {
+ "MistralCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "groq"
+ "mistral"
]
},
"apiKey": {
"type": "string",
+ "maxLength": 100,
"description": "This is not returned in the API."
},
"id": {
@@ -41264,18 +52825,17 @@
"updatedAt"
]
},
- "HumeCredential": {
+ "NeuphonicCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "hume"
+ "neuphonic"
]
},
"apiKey": {
"type": "string",
- "maxLength": 10000,
"description": "This is not returned in the API."
},
"id": {
@@ -41312,19 +52872,17 @@
"updatedAt"
]
},
- "InflectionAICredential": {
+ "OpenAICredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
- "description": "This is the api key for Pi in InflectionAI's console. Get it from here: https://developers.inflection.ai/keys, billing will need to be setup",
"enum": [
- "inflection-ai"
+ "openai"
]
},
"apiKey": {
"type": "string",
- "maxLength": 10000,
"description": "This is not returned in the API."
},
"id": {
@@ -41361,26 +52919,18 @@
"updatedAt"
]
},
- "LangfuseCredential": {
+ "OpenRouterCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "langfuse"
+ "openrouter"
]
},
- "publicKey": {
- "type": "string",
- "description": "The public key for Langfuse project. Eg: pk-lf-..."
- },
"apiKey": {
"type": "string",
- "description": "The secret key for Langfuse project. Eg: sk-lf-... .This is not returned in the API."
- },
- "apiUrl": {
- "type": "string",
- "description": "The host URL for Langfuse project. Eg: https://cloud.langfuse.com"
+ "description": "This is not returned in the API."
},
"id": {
"type": "string",
@@ -41409,22 +52959,20 @@
},
"required": [
"provider",
- "publicKey",
"apiKey",
- "apiUrl",
"id",
"orgId",
"createdAt",
"updatedAt"
]
},
- "LmntCredential": {
+ "PerplexityAICredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "lmnt"
+ "perplexity-ai"
]
},
"apiKey": {
@@ -41465,23 +53013,15 @@
"updatedAt"
]
},
- "MakeCredential": {
+ "PlayHTCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "make"
+ "playht"
]
},
- "teamId": {
- "type": "string",
- "description": "Team ID"
- },
- "region": {
- "type": "string",
- "description": "Region of your application. For example: eu1, eu2, us1, us2"
- },
"apiKey": {
"type": "string",
"description": "This is not returned in the API."
@@ -41509,31 +53049,32 @@
"description": "This is the name of credential. This is just for your reference.",
"minLength": 1,
"maxLength": 40
+ },
+ "userId": {
+ "type": "string"
}
},
"required": [
"provider",
- "teamId",
- "region",
"apiKey",
"id",
"orgId",
"createdAt",
- "updatedAt"
+ "updatedAt",
+ "userId"
]
},
- "MistralCredential": {
+ "RimeAICredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "mistral"
+ "rime-ai"
]
},
"apiKey": {
"type": "string",
- "maxLength": 100,
"description": "This is not returned in the API."
},
"id": {
@@ -41570,13 +53111,13 @@
"updatedAt"
]
},
- "NeuphonicCredential": {
+ "RunpodCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "neuphonic"
+ "runpod"
]
},
"apiKey": {
@@ -41617,17 +53158,18 @@
"updatedAt"
]
},
- "OpenAICredential": {
+ "WellSaidCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "openai"
+ "wellsaid"
]
},
"apiKey": {
"type": "string",
+ "maxLength": 10000,
"description": "This is not returned in the API."
},
"id": {
@@ -41664,18 +53206,40 @@
"updatedAt"
]
},
- "OpenRouterCredential": {
+ "S3Credential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "openrouter"
- ]
+ "s3"
+ ],
+ "description": "Credential provider. Only allowed value is s3"
},
- "apiKey": {
+ "awsAccessKeyId": {
"type": "string",
- "description": "This is not returned in the API."
+ "description": "AWS access key ID."
+ },
+ "awsSecretAccessKey": {
+ "type": "string",
+ "description": "AWS access key secret. This is not returned in the API."
+ },
+ "region": {
+ "type": "string",
+ "description": "AWS region in which the S3 bucket is located."
+ },
+ "s3BucketName": {
+ "type": "string",
+ "description": "AWS S3 bucket name."
+ },
+ "s3PathPrefix": {
+ "type": "string",
+ "description": "The path prefix for the uploaded recording. Ex. \"recordings/\""
+ },
+ "fallbackIndex": {
+ "type": "number",
+ "minimum": 1,
+ "description": "This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order."
},
"id": {
"type": "string",
@@ -41704,20 +53268,24 @@
},
"required": [
"provider",
- "apiKey",
+ "awsAccessKeyId",
+ "awsSecretAccessKey",
+ "region",
+ "s3BucketName",
+ "s3PathPrefix",
"id",
"orgId",
"createdAt",
"updatedAt"
]
},
- "PerplexityAICredential": {
+ "SmallestAICredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "perplexity-ai"
+ "smallest-ai"
]
},
"apiKey": {
@@ -41758,13 +53326,13 @@
"updatedAt"
]
},
- "PlayHTCredential": {
+ "SonioxCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "playht"
+ "soniox"
]
},
"apiKey": {
@@ -41794,9 +53362,6 @@
"description": "This is the name of credential. This is just for your reference.",
"minLength": 1,
"maxLength": 40
- },
- "userId": {
- "type": "string"
}
},
"required": [
@@ -41805,17 +53370,16 @@
"id",
"orgId",
"createdAt",
- "updatedAt",
- "userId"
+ "updatedAt"
]
},
- "RimeAICredential": {
+ "SpeechmaticsCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "rime-ai"
+ "speechmatics"
]
},
"apiKey": {
@@ -41856,18 +53420,20 @@
"updatedAt"
]
},
- "RunpodCredential": {
+ "SupabaseCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "runpod"
- ]
+ "supabase"
+ ],
+ "description": "This is for supabase storage."
},
- "apiKey": {
- "type": "string",
- "description": "This is not returned in the API."
+ "fallbackIndex": {
+ "type": "number",
+ "minimum": 1,
+ "description": "This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order."
},
"id": {
"type": "string",
@@ -41892,51 +53458,31 @@
"description": "This is the name of credential. This is just for your reference.",
"minLength": 1,
"maxLength": 40
+ },
+ "bucketPlan": {
+ "$ref": "#/components/schemas/SupabaseBucketPlan"
}
},
"required": [
"provider",
- "apiKey",
"id",
"orgId",
"createdAt",
"updatedAt"
]
},
- "S3Credential": {
+ "TavusCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "s3"
- ],
- "description": "Credential provider. Only allowed value is s3"
- },
- "awsAccessKeyId": {
- "type": "string",
- "description": "AWS access key ID."
- },
- "awsSecretAccessKey": {
- "type": "string",
- "description": "AWS access key secret. This is not returned in the API."
- },
- "region": {
- "type": "string",
- "description": "AWS region in which the S3 bucket is located."
- },
- "s3BucketName": {
- "type": "string",
- "description": "AWS S3 bucket name."
+ "tavus"
+ ]
},
- "s3PathPrefix": {
+ "apiKey": {
"type": "string",
- "description": "The path prefix for the uploaded recording. Ex. \"recordings/\""
- },
- "fallbackIndex": {
- "type": "number",
- "minimum": 1,
- "description": "This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order."
+ "description": "This is not returned in the API."
},
"id": {
"type": "string",
@@ -41965,24 +53511,20 @@
},
"required": [
"provider",
- "awsAccessKeyId",
- "awsSecretAccessKey",
- "region",
- "s3BucketName",
- "s3PathPrefix",
+ "apiKey",
"id",
"orgId",
"createdAt",
"updatedAt"
]
},
- "SmallestAICredential": {
+ "TogetherAICredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "smallest-ai"
+ "together-ai"
]
},
"apiKey": {
@@ -42023,13 +53565,13 @@
"updatedAt"
]
},
- "SpeechmaticsCredential": {
+ "TrieveCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "speechmatics"
+ "trieve"
]
},
"apiKey": {
@@ -42070,20 +53612,26 @@
"updatedAt"
]
},
- "SupabaseCredential": {
+ "TwilioCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "supabase"
- ],
- "description": "This is for supabase storage."
+ "twilio"
+ ]
},
- "fallbackIndex": {
- "type": "number",
- "minimum": 1,
- "description": "This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order."
+ "authToken": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "apiSecret": {
+ "type": "string",
+ "description": "This is not returned in the API."
},
"id": {
"type": "string",
@@ -42109,8 +53657,8 @@
"minLength": 1,
"maxLength": 40
},
- "bucketPlan": {
- "$ref": "#/components/schemas/SupabaseBucketPlan"
+ "accountSid": {
+ "type": "string"
}
},
"required": [
@@ -42118,19 +53666,25 @@
"id",
"orgId",
"createdAt",
- "updatedAt"
+ "updatedAt",
+ "accountSid"
]
},
- "TavusCredential": {
+ "VonageCredential": {
"type": "object",
"properties": {
+ "vonageApplicationPrivateKey": {
+ "type": "string",
+ "description": "This is not returned in the API.",
+ "maxLength": 10000
+ },
"provider": {
"type": "string",
"enum": [
- "tavus"
+ "vonage"
]
},
- "apiKey": {
+ "apiSecret": {
"type": "string",
"description": "This is not returned in the API."
},
@@ -42152,34 +53706,63 @@
"type": "string",
"description": "This is the ISO 8601 date-time string of when the assistant was last updated."
},
+ "vonageApplicationId": {
+ "type": "string",
+ "description": "This is the Vonage Application ID for the credential.\n\nOnly relevant for Vonage credentials.",
+ "maxLength": 10000
+ },
"name": {
"type": "string",
"description": "This is the name of credential. This is just for your reference.",
"minLength": 1,
"maxLength": 40
+ },
+ "apiKey": {
+ "type": "string"
}
},
"required": [
+ "vonageApplicationPrivateKey",
"provider",
- "apiKey",
+ "apiSecret",
"id",
"orgId",
"createdAt",
- "updatedAt"
+ "updatedAt",
+ "vonageApplicationId",
+ "apiKey"
]
},
- "TogetherAICredential": {
+ "WebhookCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "together-ai"
+ "webhook"
]
},
- "apiKey": {
- "type": "string",
- "description": "This is not returned in the API."
+ "authenticationPlan": {
+ "description": "This is the authentication plan. Supports OAuth2 RFC 6749, HMAC signing, and Bearer authentication.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/OAuth2AuthenticationPlan"
+ },
+ {
+ "$ref": "#/components/schemas/HMACAuthenticationPlan"
+ },
+ {
+ "$ref": "#/components/schemas/BearerAuthenticationPlan"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "type",
+ "mapping": {
+ "oauth2": "#/components/schemas/OAuth2AuthenticationPlan",
+ "hmac": "#/components/schemas/HMACAuthenticationPlan",
+ "bearer": "#/components/schemas/BearerAuthenticationPlan"
+ }
+ }
},
"id": {
"type": "string",
@@ -42199,6 +53782,14 @@
"type": "string",
"description": "This is the ISO 8601 date-time string of when the assistant was last updated."
},
+ "authenticationSession": {
+ "description": "This is the authentication session for the credential. Available for credentials that have an authentication plan.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Oauth2AuthenticationSession"
+ }
+ ]
+ },
"name": {
"type": "string",
"description": "This is the name of credential. This is just for your reference.",
@@ -42208,25 +53799,131 @@
},
"required": [
"provider",
- "apiKey",
+ "authenticationPlan",
"id",
"orgId",
"createdAt",
- "updatedAt"
+ "updatedAt",
+ "authenticationSession"
]
},
- "TrieveCredential": {
+ "SpkiPemPublicKeyConfig": {
"type": "object",
"properties": {
- "provider": {
+ "name": {
+ "type": "string",
+ "description": "Optional name of the key for identification purposes.",
+ "maxLength": 100
+ },
+ "format": {
"type": "string",
+ "description": "The format of the public key.",
"enum": [
- "trieve"
+ "spki-pem"
]
},
- "apiKey": {
+ "pem": {
"type": "string",
- "description": "This is not returned in the API."
+ "description": "The PEM-encoded public key."
+ }
+ },
+ "required": [
+ "format",
+ "pem"
+ ]
+ },
+ "PublicKeyEncryptionPlan": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "The type of encryption plan.",
+ "enum": [
+ "public-key"
+ ]
+ },
+ "algorithm": {
+ "type": "string",
+ "description": "The encryption algorithm to use.",
+ "enum": [
+ "RSA-OAEP-256"
+ ]
+ },
+ "publicKey": {
+ "description": "The public key configuration.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/SpkiPemPublicKeyConfig"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "format",
+ "mapping": {
+ "spki-pem": "#/components/schemas/SpkiPemPublicKeyConfig"
+ }
+ },
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/SpkiPemPublicKeyConfig"
+ }
+ ]
+ }
+ },
+ "required": [
+ "type",
+ "algorithm",
+ "publicKey"
+ ]
+ },
+ "CustomCredential": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "custom-credential"
+ ]
+ },
+ "authenticationPlan": {
+ "description": "This is the authentication plan. Supports OAuth2 RFC 6749, HMAC signing, and Bearer authentication.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/OAuth2AuthenticationPlan"
+ },
+ {
+ "$ref": "#/components/schemas/HMACAuthenticationPlan"
+ },
+ {
+ "$ref": "#/components/schemas/BearerAuthenticationPlan"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "type",
+ "mapping": {
+ "oauth2": "#/components/schemas/OAuth2AuthenticationPlan",
+ "hmac": "#/components/schemas/HMACAuthenticationPlan",
+ "bearer": "#/components/schemas/BearerAuthenticationPlan"
+ }
+ }
+ },
+ "encryptionPlan": {
+ "description": "This is the encryption plan for encrypting sensitive data. Currently supports public-key encryption.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/PublicKeyEncryptionPlan"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "type",
+ "mapping": {
+ "public-key": "#/components/schemas/PublicKeyEncryptionPlan"
+ }
+ },
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/PublicKeyEncryptionPlan"
+ }
+ ]
},
"id": {
"type": "string",
@@ -42246,6 +53943,14 @@
"type": "string",
"description": "This is the ISO 8601 date-time string of when the assistant was last updated."
},
+ "authenticationSession": {
+ "description": "This is the authentication session for the credential. Available for credentials that have an authentication plan.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Oauth2AuthenticationSession"
+ }
+ ]
+ },
"name": {
"type": "string",
"description": "This is the name of credential. This is just for your reference.",
@@ -42255,32 +53960,27 @@
},
"required": [
"provider",
- "apiKey",
+ "authenticationPlan",
"id",
"orgId",
"createdAt",
- "updatedAt"
+ "updatedAt",
+ "authenticationSession"
]
},
- "TwilioCredential": {
+ "XAiCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
+ "description": "This is the api key for Grok in XAi's console. Get it from here: https://console.x.ai",
"enum": [
- "twilio"
+ "xai"
]
},
- "authToken": {
- "type": "string",
- "description": "This is not returned in the API."
- },
"apiKey": {
"type": "string",
- "description": "This is not returned in the API."
- },
- "apiSecret": {
- "type": "string",
+ "maxLength": 10000,
"description": "This is not returned in the API."
},
"id": {
@@ -42306,38 +54006,26 @@
"description": "This is the name of credential. This is just for your reference.",
"minLength": 1,
"maxLength": 40
- },
- "accountSid": {
- "type": "string"
}
},
"required": [
"provider",
+ "apiKey",
"id",
"orgId",
"createdAt",
- "updatedAt",
- "accountSid"
+ "updatedAt"
]
},
- "VonageCredential": {
+ "GoogleCalendarOAuth2ClientCredential": {
"type": "object",
"properties": {
- "vonageApplicationPrivateKey": {
- "type": "string",
- "description": "This is not returned in the API.",
- "maxLength": 10000
- },
"provider": {
"type": "string",
"enum": [
- "vonage"
+ "google.calendar.oauth2-client"
]
},
- "apiSecret": {
- "type": "string",
- "description": "This is not returned in the API."
- },
"id": {
"type": "string",
"description": "This is the unique identifier for the credential."
@@ -42356,59 +54044,33 @@
"type": "string",
"description": "This is the ISO 8601 date-time string of when the assistant was last updated."
},
- "vonageApplicationId": {
- "type": "string",
- "description": "This is the Vonage Application ID for the credential.\n\nOnly relevant for Vonage credentials.",
- "maxLength": 10000
- },
"name": {
"type": "string",
"description": "This is the name of credential. This is just for your reference.",
"minLength": 1,
"maxLength": 40
- },
- "apiKey": {
- "type": "string"
}
},
"required": [
- "vonageApplicationPrivateKey",
"provider",
- "apiSecret",
"id",
"orgId",
"createdAt",
- "updatedAt",
- "vonageApplicationId",
- "apiKey"
+ "updatedAt"
]
},
- "WebhookCredential": {
+ "GoogleCalendarOAuth2AuthorizationCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "webhook"
+ "google.calendar.oauth2-authorization"
]
},
- "authenticationPlan": {
- "description": "This is the authentication plan. Supports OAuth2 RFC 6749 and HMAC signing.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/OAuth2AuthenticationPlan"
- },
- {
- "$ref": "#/components/schemas/HMACAuthenticationPlan"
- }
- ],
- "discriminator": {
- "propertyName": "type",
- "mapping": {
- "oauth2": "#/components/schemas/OAuth2AuthenticationPlan",
- "hmac": "#/components/schemas/HMACAuthenticationPlan"
- }
- }
+ "authorizationId": {
+ "type": "string",
+ "description": "The authorization ID for the OAuth2 authorization"
},
"id": {
"type": "string",
@@ -42428,14 +54090,6 @@
"type": "string",
"description": "This is the ISO 8601 date-time string of when the assistant was last updated."
},
- "authenticationSession": {
- "description": "This is the authentication session for the credential. Available for credentials that have an authentication plan.",
- "allOf": [
- {
- "$ref": "#/components/schemas/Oauth2AuthenticationSession"
- }
- ]
- },
"name": {
"type": "string",
"description": "This is the name of credential. This is just for your reference.",
@@ -42445,28 +54099,25 @@
},
"required": [
"provider",
- "authenticationPlan",
+ "authorizationId",
"id",
"orgId",
"createdAt",
- "updatedAt",
- "authenticationSession"
+ "updatedAt"
]
},
- "XAiCredential": {
+ "GoogleSheetsOAuth2AuthorizationCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
- "description": "This is the api key for Grok in XAi's console. Get it from here: https://console.x.ai",
"enum": [
- "xai"
+ "google.sheets.oauth2-authorization"
]
},
- "apiKey": {
+ "authorizationId": {
"type": "string",
- "maxLength": 10000,
- "description": "This is not returned in the API."
+ "description": "The authorization ID for the OAuth2 authorization"
},
"id": {
"type": "string",
@@ -42495,22 +54146,26 @@
},
"required": [
"provider",
- "apiKey",
+ "authorizationId",
"id",
"orgId",
"createdAt",
"updatedAt"
]
},
- "GoogleCalendarOAuth2ClientCredential": {
+ "SlackOAuth2AuthorizationCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "google.calendar.oauth2-client"
+ "slack.oauth2-authorization"
]
},
+ "authorizationId": {
+ "type": "string",
+ "description": "The authorization ID for the OAuth2 authorization"
+ },
"id": {
"type": "string",
"description": "This is the unique identifier for the credential."
@@ -42538,24 +54193,29 @@
},
"required": [
"provider",
+ "authorizationId",
"id",
"orgId",
"createdAt",
"updatedAt"
]
},
- "GoogleCalendarOAuth2AuthorizationCredential": {
+ "GoHighLevelMCPCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "google.calendar.oauth2-authorization"
+ "ghl.oauth2-authorization"
]
},
- "authorizationId": {
- "type": "string",
- "description": "The authorization ID for the OAuth2 authorization"
+ "authenticationSession": {
+ "description": "This is the authentication session for the credential.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Oauth2AuthenticationSession"
+ }
+ ]
},
"id": {
"type": "string",
@@ -42584,25 +54244,26 @@
},
"required": [
"provider",
- "authorizationId",
+ "authenticationSession",
"id",
"orgId",
"createdAt",
"updatedAt"
]
},
- "GoogleSheetsOAuth2AuthorizationCredential": {
+ "InworldCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "google.sheets.oauth2-authorization"
+ "inworld"
]
},
- "authorizationId": {
+ "apiKey": {
"type": "string",
- "description": "The authorization ID for the OAuth2 authorization"
+ "description": "This is the Inworld Basic (Base64) authentication token. This is not returned in the API.",
+ "example": "your-base64-token-here"
},
"id": {
"type": "string",
@@ -42631,25 +54292,25 @@
},
"required": [
"provider",
- "authorizationId",
+ "apiKey",
"id",
"orgId",
"createdAt",
"updatedAt"
]
},
- "SlackOAuth2AuthorizationCredential": {
+ "EmailCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "slack.oauth2-authorization"
+ "email"
]
},
- "authorizationId": {
+ "email": {
"type": "string",
- "description": "The authorization ID for the OAuth2 authorization"
+ "description": "The recipient email address for alerts"
},
"id": {
"type": "string",
@@ -42678,29 +54339,25 @@
},
"required": [
"provider",
- "authorizationId",
+ "email",
"id",
"orgId",
"createdAt",
"updatedAt"
]
},
- "GoHighLevelMCPCredential": {
+ "SlackWebhookCredential": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "ghl.oauth2-authorization"
+ "slack-webhook"
]
},
- "authenticationSession": {
- "description": "This is the authentication session for the credential.",
- "allOf": [
- {
- "$ref": "#/components/schemas/Oauth2AuthenticationSession"
- }
- ]
+ "webhookUrl": {
+ "type": "string",
+ "description": "Slack incoming webhook URL. See https://api.slack.com/messaging/webhooks for setup instructions. This is not returned in the API."
},
"id": {
"type": "string",
@@ -42729,44 +54386,53 @@
},
"required": [
"provider",
- "authenticationSession",
+ "webhookUrl",
"id",
"orgId",
"createdAt",
"updatedAt"
]
},
- "InworldCredential": {
+ "CreateCerebrasCredentialDTO": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "inworld"
+ "cerebras"
]
},
"apiKey": {
"type": "string",
- "description": "This is the Inworld Basic (Base64) authentication token. This is not returned in the API.",
- "example": "your-base64-token-here"
- },
- "id": {
- "type": "string",
- "description": "This is the unique identifier for the credential."
+ "maxLength": 10000,
+ "description": "This is not returned in the API."
},
- "orgId": {
+ "name": {
"type": "string",
- "description": "This is the unique identifier for the org that this credential belongs to."
- },
- "createdAt": {
- "format": "date-time",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "apiKey"
+ ]
+ },
+ "CreateGoogleCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the credential was created."
+ "description": "This is the key for Gemini in Google AI Studio. Get it from here: https://aistudio.google.com/app/apikey",
+ "enum": [
+ "google"
+ ]
},
- "updatedAt": {
- "format": "date-time",
+ "apiKey": {
"type": "string",
- "description": "This is the ISO 8601 date-time string of when the assistant was last updated."
+ "maxLength": 10000,
+ "description": "This is not returned in the API."
},
"name": {
"type": "string",
@@ -42777,20 +54443,16 @@
},
"required": [
"provider",
- "apiKey",
- "id",
- "orgId",
- "createdAt",
- "updatedAt"
+ "apiKey"
]
},
- "CreateCerebrasCredentialDTO": {
+ "CreateHumeCredentialDTO": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "cerebras"
+ "hume"
]
},
"apiKey": {
@@ -42810,14 +54472,14 @@
"apiKey"
]
},
- "CreateGoogleCredentialDTO": {
+ "CreateInflectionAICredentialDTO": {
"type": "object",
"properties": {
"provider": {
"type": "string",
- "description": "This is the key for Gemini in Google AI Studio. Get it from here: https://aistudio.google.com/app/apikey",
+ "description": "This is the api key for Pi in InflectionAI's console. Get it from here: https://developers.inflection.ai/keys, billing will need to be setup",
"enum": [
- "google"
+ "inflection-ai"
]
},
"apiKey": {
@@ -42837,18 +54499,18 @@
"apiKey"
]
},
- "CreateHumeCredentialDTO": {
+ "CreateMistralCredentialDTO": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "hume"
+ "mistral"
]
},
"apiKey": {
"type": "string",
- "maxLength": 10000,
+ "maxLength": 100,
"description": "This is not returned in the API."
},
"name": {
@@ -42863,19 +54525,17 @@
"apiKey"
]
},
- "CreateInflectionAICredentialDTO": {
+ "CreateNeuphonicCredentialDTO": {
"type": "object",
"properties": {
"provider": {
"type": "string",
- "description": "This is the api key for Pi in InflectionAI's console. Get it from here: https://developers.inflection.ai/keys, billing will need to be setup",
"enum": [
- "inflection-ai"
+ "neuphonic"
]
},
"apiKey": {
"type": "string",
- "maxLength": 10000,
"description": "This is not returned in the API."
},
"name": {
@@ -42890,18 +54550,18 @@
"apiKey"
]
},
- "CreateMistralCredentialDTO": {
+ "CreateWellSaidCredentialDTO": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "mistral"
+ "wellsaid"
]
},
"apiKey": {
"type": "string",
- "maxLength": 100,
+ "maxLength": 10000,
"description": "This is not returned in the API."
},
"name": {
@@ -42916,13 +54576,13 @@
"apiKey"
]
},
- "CreateNeuphonicCredentialDTO": {
+ "CreateSonioxCredentialDTO": {
"type": "object",
"properties": {
"provider": {
"type": "string",
"enum": [
- "neuphonic"
+ "soniox"
]
},
"apiKey": {
@@ -42991,6 +54651,68 @@
"apiKey"
]
},
+ "CreateCustomCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "custom-credential"
+ ]
+ },
+ "authenticationPlan": {
+ "description": "This is the authentication plan. Supports OAuth2 RFC 6749, HMAC signing, and Bearer authentication.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/OAuth2AuthenticationPlan"
+ },
+ {
+ "$ref": "#/components/schemas/HMACAuthenticationPlan"
+ },
+ {
+ "$ref": "#/components/schemas/BearerAuthenticationPlan"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "type",
+ "mapping": {
+ "oauth2": "#/components/schemas/OAuth2AuthenticationPlan",
+ "hmac": "#/components/schemas/HMACAuthenticationPlan",
+ "bearer": "#/components/schemas/BearerAuthenticationPlan"
+ }
+ }
+ },
+ "encryptionPlan": {
+ "description": "This is the encryption plan for encrypting sensitive data. Currently supports public-key encryption.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/PublicKeyEncryptionPlan"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "type",
+ "mapping": {
+ "public-key": "#/components/schemas/PublicKeyEncryptionPlan"
+ }
+ },
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/PublicKeyEncryptionPlan"
+ }
+ ]
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "authenticationPlan"
+ ]
+ },
"CreateGoHighLevelMCPCredentialDTO": {
"type": "object",
"properties": {
@@ -43046,6 +54768,56 @@
"apiKey"
]
},
+ "CreateEmailCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "email"
+ ]
+ },
+ "email": {
+ "type": "string",
+ "description": "The recipient email address for alerts"
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "email"
+ ]
+ },
+ "CreateSlackWebhookCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "provider": {
+ "type": "string",
+ "enum": [
+ "slack-webhook"
+ ]
+ },
+ "webhookUrl": {
+ "type": "string",
+ "description": "Slack incoming webhook URL. See https://api.slack.com/messaging/webhooks for setup instructions. This is not returned in the API."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "provider",
+ "webhookUrl"
+ ]
+ },
"UpdateAnthropicCredentialDTO": {
"type": "object",
"properties": {
@@ -43062,6 +54834,40 @@
}
}
},
+ "UpdateAnthropicBedrockCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "region": {
+ "type": "string",
+ "description": "AWS region where Bedrock is configured.",
+ "enum": [
+ "us-east-1",
+ "us-west-2",
+ "eu-west-1",
+ "eu-west-3",
+ "ap-northeast-1",
+ "ap-southeast-2"
+ ]
+ },
+ "authenticationPlan": {
+ "description": "Authentication method - either direct IAM credentials or cross-account role assumption.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AWSIAMCredentialsAuthenticationPlan"
+ },
+ {
+ "$ref": "#/components/schemas/AWSStsAuthenticationPlan"
+ }
+ ]
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ }
+ },
"UpdateAnyscaleCredentialDTO": {
"type": "object",
"properties": {
@@ -43109,22 +54915,27 @@
"type": "string",
"description": "This is the region of the Azure resource.",
"enum": [
- "australia",
+ "australiaeast",
"canadaeast",
"canadacentral",
+ "centralus",
"eastus2",
"eastus",
"france",
+ "germanywestcentral",
"india",
"japaneast",
"japanwest",
- "uaenorth",
"northcentralus",
"norway",
+ "polandcentral",
"southcentralus",
+ "spaincentral",
"swedencentral",
"switzerland",
+ "uaenorth",
"uk",
+ "westeurope",
"westus",
"westus3"
]
@@ -43161,22 +54972,27 @@
"region": {
"type": "string",
"enum": [
- "australia",
+ "australiaeast",
"canadaeast",
"canadacentral",
+ "centralus",
"eastus2",
"eastus",
"france",
+ "germanywestcentral",
"india",
"japaneast",
"japanwest",
- "uaenorth",
"northcentralus",
"norway",
+ "polandcentral",
"southcentralus",
+ "spaincentral",
"swedencentral",
"switzerland",
+ "uaenorth",
"uk",
+ "westeurope",
"westus",
"westus3"
]
@@ -43184,6 +55000,13 @@
"models": {
"type": "array",
"enum": [
+ "gpt-5.4",
+ "gpt-5.4-mini",
+ "gpt-5.4-nano",
+ "gpt-5.2",
+ "gpt-5.2-chat",
+ "gpt-5.1",
+ "gpt-5.1-chat",
"gpt-5",
"gpt-5-mini",
"gpt-5-nano",
@@ -43208,6 +55031,13 @@
"items": {
"type": "string",
"enum": [
+ "gpt-5.4",
+ "gpt-5.4-mini",
+ "gpt-5.4-nano",
+ "gpt-5.2",
+ "gpt-5.2-chat",
+ "gpt-5.1",
+ "gpt-5.1-chat",
"gpt-5",
"gpt-5-mini",
"gpt-5-nano",
@@ -43762,6 +55592,22 @@
}
}
},
+ "UpdateWellSaidCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "apiKey": {
+ "type": "string",
+ "maxLength": 10000,
+ "description": "This is not returned in the API."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ }
+ },
"UpdateS3CredentialDTO": {
"type": "object",
"properties": {
@@ -43798,70 +55644,6 @@
}
}
},
- "UpdateSmallestAICredentialDTO": {
- "type": "object",
- "properties": {
- "apiKey": {
- "type": "string",
- "description": "This is not returned in the API."
- },
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
- }
- }
- },
- "UpdateSpeechmaticsCredentialDTO": {
- "type": "object",
- "properties": {
- "apiKey": {
- "type": "string",
- "description": "This is not returned in the API."
- },
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
- }
- }
- },
- "UpdateSupabaseCredentialDTO": {
- "type": "object",
- "properties": {
- "fallbackIndex": {
- "type": "number",
- "minimum": 1,
- "description": "This is the order in which this storage provider is tried during upload retries. Lower numbers are tried first in increasing order."
- },
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
- },
- "bucketPlan": {
- "$ref": "#/components/schemas/SupabaseBucketPlan"
- }
- }
- },
- "UpdateTavusCredentialDTO": {
- "type": "object",
- "properties": {
- "apiKey": {
- "type": "string",
- "description": "This is not returned in the API."
- },
- "name": {
- "type": "string",
- "description": "This is the name of credential. This is just for your reference.",
- "minLength": 1,
- "maxLength": 40
- }
- }
- },
"UpdateTogetherAICredentialDTO": {
"type": "object",
"properties": {
@@ -43940,23 +55722,79 @@
"type": "object",
"properties": {
"authenticationPlan": {
- "description": "This is the authentication plan. Supports OAuth2 RFC 6749 and HMAC signing.",
+ "description": "This is the authentication plan. Supports OAuth2 RFC 6749, HMAC signing, and Bearer authentication.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/OAuth2AuthenticationPlan"
+ },
+ {
+ "$ref": "#/components/schemas/HMACAuthenticationPlan"
+ },
+ {
+ "$ref": "#/components/schemas/BearerAuthenticationPlan"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "type",
+ "mapping": {
+ "oauth2": "#/components/schemas/OAuth2AuthenticationPlan",
+ "hmac": "#/components/schemas/HMACAuthenticationPlan",
+ "bearer": "#/components/schemas/BearerAuthenticationPlan"
+ }
+ }
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ }
+ },
+ "UpdateCustomCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "authenticationPlan": {
+ "description": "This is the authentication plan. Supports OAuth2 RFC 6749, HMAC signing, and Bearer authentication.",
"oneOf": [
{
"$ref": "#/components/schemas/OAuth2AuthenticationPlan"
},
{
"$ref": "#/components/schemas/HMACAuthenticationPlan"
+ },
+ {
+ "$ref": "#/components/schemas/BearerAuthenticationPlan"
}
],
"discriminator": {
"propertyName": "type",
"mapping": {
"oauth2": "#/components/schemas/OAuth2AuthenticationPlan",
- "hmac": "#/components/schemas/HMACAuthenticationPlan"
+ "hmac": "#/components/schemas/HMACAuthenticationPlan",
+ "bearer": "#/components/schemas/BearerAuthenticationPlan"
}
}
},
+ "encryptionPlan": {
+ "description": "This is the encryption plan for encrypting sensitive data. Currently supports public-key encryption.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/PublicKeyEncryptionPlan"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "type",
+ "mapping": {
+ "public-key": "#/components/schemas/PublicKeyEncryptionPlan"
+ }
+ },
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/PublicKeyEncryptionPlan"
+ }
+ ]
+ },
"name": {
"type": "string",
"description": "This is the name of credential. This is just for your reference.",
@@ -44072,6 +55910,51 @@
}
}
},
+ "UpdateEmailCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "email": {
+ "type": "string",
+ "description": "The recipient email address for alerts"
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ }
+ },
+ "UpdateSlackWebhookCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "webhookUrl": {
+ "type": "string",
+ "description": "Slack incoming webhook URL. See https://api.slack.com/messaging/webhooks for setup instructions. This is not returned in the API."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ }
+ },
+ "UpdateSonioxCredentialDTO": {
+ "type": "object",
+ "properties": {
+ "apiKey": {
+ "type": "string",
+ "description": "This is not returned in the API."
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of credential. This is just for your reference.",
+ "minLength": 1,
+ "maxLength": 40
+ }
+ }
+ },
"CredentialSessionResponse": {
"type": "object",
"properties": {
@@ -44086,11 +55969,18 @@
"CredentialEndUser": {
"type": "object",
"properties": {
+ "endUserEmail": {
+ "type": "string",
+ "nullable": true
+ },
"endUserId": {
"type": "string"
},
"organizationId": {
"type": "string"
+ },
+ "tags": {
+ "type": "object"
}
},
"required": [
@@ -44163,6 +56053,9 @@
},
"error": {
"$ref": "#/components/schemas/CredentialSessionError"
+ },
+ "tags": {
+ "type": "object"
}
},
"required": [
@@ -44258,22 +56151,82 @@
"algorithm"
]
},
- "CredentialSessionDTO": {
+ "BearerAuthenticationPlan": {
"type": "object",
"properties": {
- "provider": {
+ "type": {
"type": "string",
"enum": [
- "google.calendar.oauth2-client",
- "google.calendar.oauth2-authorization",
- "google.sheets.oauth2-authorization",
- "slack.oauth2-authorization"
- ],
- "description": "The type of credential to generate a session for. Only Nango user-facing providers are supported."
+ "bearer"
+ ]
+ },
+ "token": {
+ "type": "string",
+ "description": "This is the bearer token value."
+ },
+ "headerName": {
+ "type": "string",
+ "description": "This is the header name where the bearer token will be sent. Defaults to 'Authorization'."
+ },
+ "bearerPrefixEnabled": {
+ "type": "boolean",
+ "description": "Whether to include the 'Bearer ' prefix in the header value. Defaults to true."
}
},
"required": [
- "provider"
+ "type",
+ "token"
+ ]
+ },
+ "AWSIAMCredentialsAuthenticationPlan": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "aws-iam"
+ ]
+ },
+ "awsAccessKeyId": {
+ "type": "string",
+ "description": "AWS Access Key ID. This is not returned in the API.",
+ "maxLength": 128
+ },
+ "awsSecretAccessKey": {
+ "type": "string",
+ "description": "AWS Secret Access Key. This is not returned in the API.",
+ "maxLength": 256
+ }
+ },
+ "required": [
+ "type",
+ "awsAccessKeyId",
+ "awsSecretAccessKey"
+ ]
+ },
+ "AWSStsAuthenticationPlan": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "This is the type of authentication plan",
+ "enum": [
+ "aws-sts"
+ ]
+ },
+ "roleArn": {
+ "type": "string",
+ "description": "This is the role ARN for the AWS credential"
+ },
+ "externalId": {
+ "type": "string",
+ "description": "Optional external ID for additional security in the role trust policy.",
+ "maxLength": 256
+ }
+ },
+ "required": [
+ "type",
+ "roleArn"
]
},
"ToolTemplateSetup": {
@@ -44575,6 +56528,10 @@
"$ref": "#/components/schemas/CreateBashToolDTO",
"title": "BashTool"
},
+ {
+ "$ref": "#/components/schemas/CreateCodeToolDTO",
+ "title": "CodeTool"
+ },
{
"$ref": "#/components/schemas/CreateComputerToolDTO",
"title": "ComputerTool"
@@ -44646,6 +56603,14 @@
{
"$ref": "#/components/schemas/CreateTransferCallToolDTO",
"title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSipRequestToolDTO",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVoicemailToolDTO",
+ "title": "VoicemailTool"
}
]
},
@@ -44738,6 +56703,10 @@
"$ref": "#/components/schemas/CreateBashToolDTO",
"title": "BashTool"
},
+ {
+ "$ref": "#/components/schemas/CreateCodeToolDTO",
+ "title": "CodeTool"
+ },
{
"$ref": "#/components/schemas/CreateComputerToolDTO",
"title": "ComputerTool"
@@ -44809,6 +56778,14 @@
{
"$ref": "#/components/schemas/CreateTransferCallToolDTO",
"title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSipRequestToolDTO",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVoicemailToolDTO",
+ "title": "VoicemailTool"
}
]
},
@@ -44923,6 +56900,10 @@
"$ref": "#/components/schemas/CreateBashToolDTO",
"title": "BashTool"
},
+ {
+ "$ref": "#/components/schemas/CreateCodeToolDTO",
+ "title": "CodeTool"
+ },
{
"$ref": "#/components/schemas/CreateComputerToolDTO",
"title": "ComputerTool"
@@ -44994,6 +56975,14 @@
{
"$ref": "#/components/schemas/CreateTransferCallToolDTO",
"title": "TransferCallTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateSipRequestToolDTO",
+ "title": "SipRequestTool"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVoicemailToolDTO",
+ "title": "VoicemailTool"
}
]
},
@@ -45096,7 +57085,9 @@
"tavus",
"sesame",
"inworld",
- "minimax"
+ "minimax",
+ "wellsaid",
+ "orpheus"
]
},
"providerId": {
@@ -45143,6 +57134,10 @@
"type": "string",
"description": "The preview URL of the voice."
},
+ "sortOrder": {
+ "type": "number",
+ "description": "The sort order of the voice for display purposes. Lower values appear first."
+ },
"description": {
"type": "string",
"description": "The description of the voice."
@@ -45210,7 +57205,9 @@
"tavus",
"sesame",
"inworld",
- "minimax"
+ "minimax",
+ "wellsaid",
+ "orpheus"
],
"items": {
"type": "string",
@@ -45231,7 +57228,9 @@
"tavus",
"sesame",
"inworld",
- "minimax"
+ "minimax",
+ "wellsaid",
+ "orpheus"
]
}
}
@@ -45250,6 +57249,70 @@
}
}
},
+ "CartesiaPronunciationDictItem": {
+ "type": "object",
+ "properties": {
+ "text": {
+ "type": "string",
+ "description": "The text to be replaced in pronunciation",
+ "example": "Vapi"
+ },
+ "alias": {
+ "type": "string",
+ "description": "The pronunciation alias or IPA representation\nCan be a \"sounds-like\" guidance (e.g., \"VAH-pee\") or IPA notation (e.g., \"<<ˈ|v|ɑ|ˈ|p|i>>\")",
+ "example": "VAH-pee"
+ }
+ },
+ "required": [
+ "text",
+ "alias"
+ ]
+ },
+ "CartesiaPronunciationDictionary": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the pronunciation dictionary",
+ "example": "dict_abc123"
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the pronunciation dictionary",
+ "example": "My Dictionary"
+ },
+ "ownerId": {
+ "type": "string",
+ "description": "ID of the user who owns this dictionary",
+ "example": "user_xyz789"
+ },
+ "pinned": {
+ "type": "boolean",
+ "description": "Whether this dictionary is pinned for the user",
+ "example": false
+ },
+ "items": {
+ "description": "List of text-to-pronunciation mappings",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/CartesiaPronunciationDictItem"
+ }
+ },
+ "createdAt": {
+ "type": "string",
+ "description": "ISO 8601 timestamp of when the dictionary was created",
+ "example": "2024-01-15T10:30:00Z"
+ }
+ },
+ "required": [
+ "id",
+ "name",
+ "ownerId",
+ "pinned",
+ "items",
+ "createdAt"
+ ]
+ },
"ElevenLabsPronunciationDictionary": {
"type": "object",
"properties": {
@@ -45332,6 +57395,7 @@
"type": "string",
"description": "This is the provider that manages this resource.",
"enum": [
+ "cartesia",
"11labs"
]
},
@@ -45347,12 +57411,8 @@
"description": "This is the provider-specific identifier for the resource."
},
"resource": {
- "description": "This is the full resource data from the provider's API.",
- "allOf": [
- {
- "$ref": "#/components/schemas/ElevenLabsPronunciationDictionary"
- }
- ]
+ "type": "object",
+ "description": "This is the full resource data from the provider's API."
}
},
"required": [
@@ -45410,59 +57470,266 @@
}
},
"required": [
- "voiceId",
- "name"
+ "voiceId",
+ "name"
+ ]
+ },
+ "AddVoiceToProviderDTO": {
+ "type": "object",
+ "properties": {
+ "ownerId": {
+ "type": "string",
+ "description": "This is the owner_id of your shared voice which you want to add to your provider Account from Provider Voice Library"
+ },
+ "voiceId": {
+ "type": "string",
+ "description": "This is the voice_id of the shared voice which you want to add to your provider Account from Provider Voice Library"
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the new name of the voice which you want to have once you have added voice to your provider Account from Provider Voice Library"
+ }
+ },
+ "required": [
+ "ownerId",
+ "voiceId",
+ "name"
+ ]
+ },
+ "CloneVoiceDTO": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "This is the name of the cloned voice in the provider account."
+ },
+ "description": {
+ "type": "string",
+ "description": "This is the description of your cloned voice."
+ },
+ "labels": {
+ "type": "string",
+ "description": "Serialized labels dictionary for the voice."
+ },
+ "files": {
+ "description": "These are the files you want to use to clone your voice. Only Audio files are supported.",
+ "type": "array",
+ "items": {
+ "type": "string",
+ "format": "binary"
+ }
+ }
+ },
+ "required": [
+ "name",
+ "files"
+ ]
+ },
+ "VariableValueGroupBy": {
+ "type": "object",
+ "properties": {
+ "key": {
+ "type": "string",
+ "description": "This is the key of the variable value to group by.",
+ "maxLength": 100
+ }
+ },
+ "required": [
+ "key"
+ ]
+ },
+ "TimeRange": {
+ "type": "object",
+ "properties": {
+ "step": {
+ "type": "string",
+ "description": "This is the time step for aggregations.\n\nIf not provided, defaults to returning for the entire time range.",
+ "enum": [
+ "second",
+ "minute",
+ "hour",
+ "day",
+ "week",
+ "month",
+ "quarter",
+ "year",
+ "decade",
+ "century",
+ "millennium"
+ ]
+ },
+ "start": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the start date for the time range.\n\nIf not provided, defaults to the 7 days ago."
+ },
+ "end": {
+ "format": "date-time",
+ "type": "string",
+ "description": "This is the end date for the time range.\n\nIf not provided, defaults to now."
+ },
+ "timezone": {
+ "type": "string",
+ "description": "This is the timezone you want to set for the query.\n\nIf not provided, defaults to UTC."
+ }
+ }
+ },
+ "AnalyticsOperation": {
+ "type": "object",
+ "properties": {
+ "operation": {
+ "type": "string",
+ "description": "This is the aggregation operation you want to perform.",
+ "enum": [
+ "sum",
+ "avg",
+ "count",
+ "min",
+ "max",
+ "history"
+ ]
+ },
+ "column": {
+ "type": "string",
+ "description": "This is the columns you want to perform the aggregation operation on.",
+ "enum": [
+ "id",
+ "cost",
+ "costBreakdown.llm",
+ "costBreakdown.stt",
+ "costBreakdown.tts",
+ "costBreakdown.vapi",
+ "costBreakdown.transport",
+ "costBreakdown.analysisBreakdown.summary",
+ "costBreakdown.transcriber",
+ "costBreakdown.ttsCharacters",
+ "costBreakdown.llmPromptTokens",
+ "costBreakdown.llmCompletionTokens",
+ "costBreakdown.llmCachedPromptTokens",
+ "duration",
+ "concurrency",
+ "minutesUsed"
+ ]
+ },
+ "alias": {
+ "type": "string",
+ "description": "This is the alias for column name returned. Defaults to `${operation}${column}`.",
+ "maxLength": 40
+ }
+ },
+ "required": [
+ "operation",
+ "column"
+ ]
+ },
+ "AnalyticsQuery": {
+ "type": "object",
+ "properties": {
+ "table": {
+ "type": "string",
+ "description": "This is the table you want to query.",
+ "enum": [
+ "call",
+ "subscription"
+ ]
+ },
+ "groupBy": {
+ "type": "array",
+ "description": "This is the list of columns you want to group by.",
+ "enum": [
+ "type",
+ "assistantId",
+ "endedReason",
+ "analysis.successEvaluation",
+ "status"
+ ],
+ "items": {
+ "type": "string",
+ "enum": [
+ "type",
+ "assistantId",
+ "endedReason",
+ "analysis.successEvaluation",
+ "status"
+ ]
+ }
+ },
+ "groupByVariableValue": {
+ "description": "This is the list of variable value keys you want to group by.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/VariableValueGroupBy"
+ }
+ },
+ "name": {
+ "type": "string",
+ "description": "This is the name of the query. This will be used to identify the query in the response.",
+ "maxLength": 40
+ },
+ "timeRange": {
+ "description": "This is the time range for the query.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/TimeRange"
+ }
+ ]
+ },
+ "operations": {
+ "description": "This is the list of operations you want to perform.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/AnalyticsOperation"
+ }
+ }
+ },
+ "required": [
+ "table",
+ "name",
+ "operations"
]
},
- "AddVoiceToProviderDTO": {
+ "AnalyticsQueryDTO": {
"type": "object",
"properties": {
- "ownerId": {
- "type": "string",
- "description": "This is the owner_id of your shared voice which you want to add to your provider Account from Provider Voice Library"
- },
- "voiceId": {
- "type": "string",
- "description": "This is the voice_id of the shared voice which you want to add to your provider Account from Provider Voice Library"
- },
- "name": {
- "type": "string",
- "description": "This is the new name of the voice which you want to have once you have added voice to your provider Account from Provider Voice Library"
+ "queries": {
+ "description": "This is the list of metric queries you want to perform.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/AnalyticsQuery"
+ }
}
},
"required": [
- "ownerId",
- "voiceId",
- "name"
+ "queries"
]
},
- "CloneVoiceDTO": {
+ "AnalyticsQueryResult": {
"type": "object",
"properties": {
"name": {
"type": "string",
- "description": "This is the name of the cloned voice in the provider account."
- },
- "description": {
- "type": "string",
- "description": "This is the description of your cloned voice."
+ "description": "This is the unique key for the query."
},
- "labels": {
- "type": "string",
- "description": "Serialized labels dictionary for the voice."
+ "timeRange": {
+ "description": "This is the time range for the query.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/TimeRange"
+ }
+ ]
},
- "files": {
- "description": "These are the files you want to use to clone your voice. Only Audio files are supported.",
+ "result": {
+ "description": "This is the result of the query, a list of unique groups with result of their aggregations.\n\nExample:\n\"result\": [\n { \"date\": \"2023-01-01\", \"assistantId\": \"123\", \"endedReason\": \"customer-ended-call\", \"sumDuration\": 120, \"avgCost\": 10.5 },\n { \"date\": \"2023-01-02\", \"assistantId\": \"123\", \"endedReason\": \"customer-did-not-give-microphone-permission\", \"sumDuration\": 0, \"avgCost\": 0 },\n // Additional results\n]",
"type": "array",
"items": {
- "type": "string",
- "format": "binary"
+ "type": "object"
}
}
},
"required": [
"name",
- "files"
+ "timeRange",
+ "result"
]
},
"ClientMessageWorkflowNodeStarted": {
@@ -45538,6 +57805,83 @@
"node"
]
},
+ "ClientMessageAssistantStarted": {
+ "type": "object",
+ "properties": {
+ "phoneNumber": {
+ "description": "This is the phone number that the message is associated with.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateByoPhoneNumberDTO",
+ "title": "ByoPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTwilioPhoneNumberDTO",
+ "title": "TwilioPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVonagePhoneNumberDTO",
+ "title": "VonagePhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVapiPhoneNumberDTO",
+ "title": "VapiPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTelnyxPhoneNumberDTO",
+ "title": "TelnyxPhoneNumber"
+ }
+ ]
+ },
+ "type": {
+ "type": "string",
+ "description": "This is the type of the message. \"assistant.started\" is sent when the assistant is started.",
+ "enum": [
+ "assistant.started"
+ ]
+ },
+ "timestamp": {
+ "type": "number",
+ "description": "This is the timestamp of the message."
+ },
+ "call": {
+ "description": "This is the call that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Call"
+ }
+ ]
+ },
+ "customer": {
+ "description": "This is the customer that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateCustomerDTO"
+ }
+ ]
+ },
+ "assistant": {
+ "description": "This is the assistant that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateAssistantDTO"
+ }
+ ]
+ },
+ "newAssistant": {
+ "description": "This is the assistant that was updated.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateAssistantDTO"
+ }
+ ]
+ }
+ },
+ "required": [
+ "type",
+ "newAssistant"
+ ]
+ },
"ClientMessageConversationUpdate": {
"type": "object",
"properties": {
@@ -45818,6 +58162,10 @@
"model-output"
]
},
+ "turnId": {
+ "type": "string",
+ "description": "This is the unique identifier for the current LLM turn. All tokens from the same\nLLM response share the same turnId. Use this to group tokens and discard on interruption."
+ },
"timestamp": {
"type": "number",
"description": "This is the timestamp of the message."
@@ -45886,31 +58234,410 @@
},
"type": {
"type": "string",
- "description": "This is the type of the message. \"speech-update\" is sent whenever assistant or user start or stop speaking.",
- "enum": [
- "speech-update"
- ]
- },
- "status": {
- "type": "string",
- "description": "This is the status of the speech update.",
+ "description": "This is the type of the message. \"speech-update\" is sent whenever assistant or user start or stop speaking.",
+ "enum": [
+ "speech-update"
+ ]
+ },
+ "status": {
+ "type": "string",
+ "description": "This is the status of the speech update.",
+ "enum": [
+ "started",
+ "stopped"
+ ]
+ },
+ "role": {
+ "type": "string",
+ "description": "This is the role which the speech update is for.",
+ "enum": [
+ "assistant",
+ "user"
+ ]
+ },
+ "turn": {
+ "type": "number",
+ "description": "This is the turn number of the speech update (0-indexed)."
+ },
+ "timestamp": {
+ "type": "number",
+ "description": "This is the timestamp of the message."
+ },
+ "call": {
+ "description": "This is the call that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Call"
+ }
+ ]
+ },
+ "customer": {
+ "description": "This is the customer that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateCustomerDTO"
+ }
+ ]
+ },
+ "assistant": {
+ "description": "This is the assistant that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateAssistantDTO"
+ }
+ ]
+ }
+ },
+ "required": [
+ "type",
+ "status",
+ "role"
+ ]
+ },
+ "ClientMessageTranscript": {
+ "type": "object",
+ "properties": {
+ "phoneNumber": {
+ "description": "This is the phone number that the message is associated with.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateByoPhoneNumberDTO",
+ "title": "ByoPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTwilioPhoneNumberDTO",
+ "title": "TwilioPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVonagePhoneNumberDTO",
+ "title": "VonagePhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVapiPhoneNumberDTO",
+ "title": "VapiPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTelnyxPhoneNumberDTO",
+ "title": "TelnyxPhoneNumber"
+ }
+ ]
+ },
+ "type": {
+ "type": "string",
+ "description": "This is the type of the message. \"transcript\" is sent as transcriber outputs partial or final transcript.",
+ "enum": [
+ "transcript",
+ "transcript[transcriptType=\"final\"]"
+ ]
+ },
+ "timestamp": {
+ "type": "number",
+ "description": "This is the timestamp of the message."
+ },
+ "call": {
+ "description": "This is the call that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Call"
+ }
+ ]
+ },
+ "customer": {
+ "description": "This is the customer that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateCustomerDTO"
+ }
+ ]
+ },
+ "assistant": {
+ "description": "This is the assistant that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateAssistantDTO"
+ }
+ ]
+ },
+ "role": {
+ "type": "string",
+ "description": "This is the role for which the transcript is for.",
+ "enum": [
+ "assistant",
+ "user"
+ ]
+ },
+ "transcriptType": {
+ "type": "string",
+ "description": "This is the type of the transcript.",
+ "enum": [
+ "partial",
+ "final"
+ ]
+ },
+ "transcript": {
+ "type": "string",
+ "description": "This is the transcript content."
+ },
+ "isFiltered": {
+ "type": "boolean",
+ "description": "Indicates if the transcript was filtered for security reasons."
+ },
+ "detectedThreats": {
+ "description": "List of detected security threats if the transcript was filtered.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "originalTranscript": {
+ "type": "string",
+ "description": "The original transcript before filtering (only included if content was filtered)."
+ }
+ },
+ "required": [
+ "type",
+ "role",
+ "transcriptType",
+ "transcript"
+ ]
+ },
+ "ClientMessageToolCalls": {
+ "type": "object",
+ "properties": {
+ "phoneNumber": {
+ "description": "This is the phone number that the message is associated with.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateByoPhoneNumberDTO",
+ "title": "ByoPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTwilioPhoneNumberDTO",
+ "title": "TwilioPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVonagePhoneNumberDTO",
+ "title": "VonagePhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVapiPhoneNumberDTO",
+ "title": "VapiPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTelnyxPhoneNumberDTO",
+ "title": "TelnyxPhoneNumber"
+ }
+ ]
+ },
+ "type": {
+ "type": "string",
+ "description": "This is the type of the message. \"tool-calls\" is sent to call a tool.",
+ "enum": [
+ "tool-calls"
+ ]
+ },
+ "toolWithToolCallList": {
+ "type": "array",
+ "description": "This is the list of tools calls that the model is requesting along with the original tool configuration.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/FunctionToolWithToolCall",
+ "title": "FunctionToolWithToolCall"
+ },
+ {
+ "$ref": "#/components/schemas/GhlToolWithToolCall",
+ "title": "GhlToolWithToolCall"
+ },
+ {
+ "$ref": "#/components/schemas/MakeToolWithToolCall",
+ "title": "MakeToolWithToolCall"
+ },
+ {
+ "$ref": "#/components/schemas/BashToolWithToolCall",
+ "title": "BashToolWithToolCall"
+ },
+ {
+ "$ref": "#/components/schemas/ComputerToolWithToolCall",
+ "title": "ComputerToolWithToolCall"
+ },
+ {
+ "$ref": "#/components/schemas/TextEditorToolWithToolCall",
+ "title": "TextEditorToolWithToolCall"
+ },
+ {
+ "$ref": "#/components/schemas/GoogleCalendarCreateEventToolWithToolCall",
+ "title": "GoogleCalendarCreateEventToolWithToolCall"
+ }
+ ]
+ }
+ },
+ "timestamp": {
+ "type": "number",
+ "description": "This is the timestamp of the message."
+ },
+ "call": {
+ "description": "This is the call that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Call"
+ }
+ ]
+ },
+ "customer": {
+ "description": "This is the customer that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateCustomerDTO"
+ }
+ ]
+ },
+ "assistant": {
+ "description": "This is the assistant that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateAssistantDTO"
+ }
+ ]
+ },
+ "toolCallList": {
+ "description": "This is the list of tool calls that the model is requesting.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/ToolCall"
+ }
+ }
+ },
+ "required": [
+ "toolWithToolCallList",
+ "toolCallList"
+ ]
+ },
+ "ClientMessageToolCallsResult": {
+ "type": "object",
+ "properties": {
+ "phoneNumber": {
+ "description": "This is the phone number that the message is associated with.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateByoPhoneNumberDTO",
+ "title": "ByoPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTwilioPhoneNumberDTO",
+ "title": "TwilioPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVonagePhoneNumberDTO",
+ "title": "VonagePhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVapiPhoneNumberDTO",
+ "title": "VapiPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTelnyxPhoneNumberDTO",
+ "title": "TelnyxPhoneNumber"
+ }
+ ]
+ },
+ "type": {
+ "type": "string",
+ "description": "This is the type of the message. \"tool-calls-result\" is sent to forward the result of a tool call to the client.",
+ "enum": [
+ "tool-calls-result"
+ ]
+ },
+ "timestamp": {
+ "type": "number",
+ "description": "This is the timestamp of the message."
+ },
+ "call": {
+ "description": "This is the call that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Call"
+ }
+ ]
+ },
+ "customer": {
+ "description": "This is the customer that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateCustomerDTO"
+ }
+ ]
+ },
+ "assistant": {
+ "description": "This is the assistant that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateAssistantDTO"
+ }
+ ]
+ },
+ "toolCallResult": {
+ "type": "object",
+ "description": "This is the result of the tool call."
+ }
+ },
+ "required": [
+ "type",
+ "toolCallResult"
+ ]
+ },
+ "ClientMessageTransferUpdate": {
+ "type": "object",
+ "properties": {
+ "phoneNumber": {
+ "description": "This is the phone number that the message is associated with.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateByoPhoneNumberDTO",
+ "title": "ByoPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTwilioPhoneNumberDTO",
+ "title": "TwilioPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVonagePhoneNumberDTO",
+ "title": "VonagePhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVapiPhoneNumberDTO",
+ "title": "VapiPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTelnyxPhoneNumberDTO",
+ "title": "TelnyxPhoneNumber"
+ }
+ ]
+ },
+ "type": {
+ "type": "string",
+ "description": "This is the type of the message. \"transfer-update\" is sent whenever a transfer happens.",
"enum": [
- "started",
- "stopped"
+ "transfer-update"
]
},
- "role": {
- "type": "string",
- "description": "This is the role which the speech update is for.",
- "enum": [
- "assistant",
- "user"
+ "destination": {
+ "description": "This is the destination of the transfer.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TransferDestinationAssistant",
+ "title": "Assistant"
+ },
+ {
+ "$ref": "#/components/schemas/TransferDestinationNumber",
+ "title": "Number"
+ },
+ {
+ "$ref": "#/components/schemas/TransferDestinationSip",
+ "title": "Sip"
+ }
]
},
- "turn": {
- "type": "number",
- "description": "This is the turn number of the speech update (0-indexed)."
- },
"timestamp": {
"type": "number",
"description": "This is the timestamp of the message."
@@ -45938,15 +58665,37 @@
"$ref": "#/components/schemas/CreateAssistantDTO"
}
]
+ },
+ "toAssistant": {
+ "description": "This is the assistant that the call is being transferred to. This is only sent if `destination.type` is \"assistant\".",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateAssistantDTO"
+ }
+ ]
+ },
+ "fromAssistant": {
+ "description": "This is the assistant that the call is being transferred from. This is only sent if `destination.type` is \"assistant\".",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateAssistantDTO"
+ }
+ ]
+ },
+ "toStepRecord": {
+ "type": "object",
+ "description": "This is the step that the conversation moved to."
+ },
+ "fromStepRecord": {
+ "type": "object",
+ "description": "This is the step that the conversation moved from. ="
}
},
"required": [
- "type",
- "status",
- "role"
+ "type"
]
},
- "ClientMessageTranscript": {
+ "ClientMessageUserInterrupted": {
"type": "object",
"properties": {
"phoneNumber": {
@@ -45976,12 +58725,15 @@
},
"type": {
"type": "string",
- "description": "This is the type of the message. \"transcript\" is sent as transcriber outputs partial or final transcript.",
+ "description": "This is the type of the message. \"user-interrupted\" is sent when the user interrupts the assistant.",
"enum": [
- "transcript",
- "transcript[transcriptType=\"final\"]"
+ "user-interrupted"
]
},
+ "turnId": {
+ "type": "string",
+ "description": "This is the turnId of the LLM response that was interrupted. Matches the turnId\non model-output messages so clients can discard the interrupted turn's tokens."
+ },
"timestamp": {
"type": "number",
"description": "This is the timestamp of the message."
@@ -46009,51 +58761,13 @@
"$ref": "#/components/schemas/CreateAssistantDTO"
}
]
- },
- "role": {
- "type": "string",
- "description": "This is the role for which the transcript is for.",
- "enum": [
- "assistant",
- "user"
- ]
- },
- "transcriptType": {
- "type": "string",
- "description": "This is the type of the transcript.",
- "enum": [
- "partial",
- "final"
- ]
- },
- "transcript": {
- "type": "string",
- "description": "This is the transcript content."
- },
- "isFiltered": {
- "type": "boolean",
- "description": "Indicates if the transcript was filtered for security reasons."
- },
- "detectedThreats": {
- "description": "List of detected security threats if the transcript was filtered.",
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "originalTranscript": {
- "type": "string",
- "description": "The original transcript before filtering (only included if content was filtered)."
}
},
"required": [
- "type",
- "role",
- "transcriptType",
- "transcript"
+ "type"
]
},
- "ClientMessageToolCalls": {
+ "ClientMessageLanguageChangeDetected": {
"type": "object",
"properties": {
"phoneNumber": {
@@ -46083,47 +58797,11 @@
},
"type": {
"type": "string",
- "description": "This is the type of the message. \"tool-calls\" is sent to call a tool.",
+ "description": "This is the type of the message. \"language-change-detected\" is sent when the transcriber is automatically switched based on the detected language.",
"enum": [
- "tool-calls"
+ "language-change-detected"
]
},
- "toolWithToolCallList": {
- "type": "array",
- "description": "This is the list of tools calls that the model is requesting along with the original tool configuration.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/FunctionToolWithToolCall",
- "title": "FunctionToolWithToolCall"
- },
- {
- "$ref": "#/components/schemas/GhlToolWithToolCall",
- "title": "GhlToolWithToolCall"
- },
- {
- "$ref": "#/components/schemas/MakeToolWithToolCall",
- "title": "MakeToolWithToolCall"
- },
- {
- "$ref": "#/components/schemas/BashToolWithToolCall",
- "title": "BashToolWithToolCall"
- },
- {
- "$ref": "#/components/schemas/ComputerToolWithToolCall",
- "title": "ComputerToolWithToolCall"
- },
- {
- "$ref": "#/components/schemas/TextEditorToolWithToolCall",
- "title": "TextEditorToolWithToolCall"
- },
- {
- "$ref": "#/components/schemas/GoogleCalendarCreateEventToolWithToolCall",
- "title": "GoogleCalendarCreateEventToolWithToolCall"
- }
- ]
- }
- },
"timestamp": {
"type": "number",
"description": "This is the timestamp of the message."
@@ -46152,20 +58830,17 @@
}
]
},
- "toolCallList": {
- "description": "This is the list of tool calls that the model is requesting.",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/ToolCall"
- }
+ "language": {
+ "type": "string",
+ "description": "This is the language the transcriber is switched to."
}
},
"required": [
- "toolWithToolCallList",
- "toolCallList"
+ "type",
+ "language"
]
},
- "ClientMessageToolCallsResult": {
+ "ClientMessageVoiceInput": {
"type": "object",
"properties": {
"phoneNumber": {
@@ -46195,9 +58870,9 @@
},
"type": {
"type": "string",
- "description": "This is the type of the message. \"tool-calls-result\" is sent to forward the result of a tool call to the client.",
+ "description": "This is the type of the message. \"voice-input\" is sent when a generation is requested from voice provider.",
"enum": [
- "tool-calls-result"
+ "voice-input"
]
},
"timestamp": {
@@ -46228,17 +58903,17 @@
}
]
},
- "toolCallResult": {
- "type": "object",
- "description": "This is the result of the tool call."
+ "input": {
+ "type": "string",
+ "description": "This is the voice input content"
}
},
"required": [
"type",
- "toolCallResult"
+ "input"
]
},
- "ClientMessageTransferUpdate": {
+ "ClientMessageAssistantSpeech": {
"type": "object",
"properties": {
"phoneNumber": {
@@ -46268,27 +58943,43 @@
},
"type": {
"type": "string",
- "description": "This is the type of the message. \"transfer-update\" is sent whenever a transfer happens.",
+ "description": "This is the type of the message. \"assistant-speech\" is sent as assistant audio is being played.",
"enum": [
- "transfer-update"
+ "assistant.speechStarted"
]
},
- "destination": {
- "description": "This is the destination of the transfer.",
+ "text": {
+ "type": "string",
+ "description": "The full assistant text for the current turn. This is the complete text,\nnot an incremental delta — consumers should use `timing` metadata (e.g.\n`wordsSpoken`) to determine which portion has been spoken so far."
+ },
+ "turn": {
+ "type": "number",
+ "description": "This is the turn number of the assistant speech event (0-indexed)."
+ },
+ "source": {
+ "type": "string",
+ "description": "Indicates how the text was sourced.",
+ "enum": [
+ "model",
+ "force-say",
+ "custom-voice"
+ ]
+ },
+ "timing": {
+ "description": "Optional timing metadata. Shape depends on `timing.type`:\n\n- `word-alignment` (ElevenLabs): per-character timing at playback\n cadence. words[] includes space entries. Best consumed by tracking\n a running character count: join timing.words, add to a char cursor,\n and highlight text up to that position. No interpolation needed.\n\n- `word-progress` (Minimax with voice.subtitleType: 'word'): cursor-\n based word count per TTS segment. Use wordsSpoken as the anchor,\n interpolate forward using segmentDurationMs or timing.words until\n the next event arrives.\n\nWhen absent, the event is a text-only fallback for providers without\nword-level timing (e.g. Cartesia, Deepgram, Azure). Text emits once\nper TTS chunk when audio is playing. Optionally interpolate a word\ncursor at ~3.5 words/sec between events for approximate tracking.",
"oneOf": [
{
- "$ref": "#/components/schemas/TransferDestinationAssistant",
- "title": "Assistant"
- },
- {
- "$ref": "#/components/schemas/TransferDestinationNumber",
- "title": "Number"
+ "$ref": "#/components/schemas/AssistantSpeechWordAlignmentTiming",
+ "title": "WordAlignmentTiming"
},
{
- "$ref": "#/components/schemas/TransferDestinationSip",
- "title": "Sip"
+ "$ref": "#/components/schemas/AssistantSpeechWordProgressTiming",
+ "title": "WordProgressTiming"
}
- ]
+ ],
+ "discriminator": {
+ "propertyName": "type"
+ }
},
"timestamp": {
"type": "number",
@@ -46317,37 +59008,14 @@
"$ref": "#/components/schemas/CreateAssistantDTO"
}
]
- },
- "toAssistant": {
- "description": "This is the assistant that the call is being transferred to. This is only sent if `destination.type` is \"assistant\".",
- "allOf": [
- {
- "$ref": "#/components/schemas/CreateAssistantDTO"
- }
- ]
- },
- "fromAssistant": {
- "description": "This is the assistant that the call is being transferred from. This is only sent if `destination.type` is \"assistant\".",
- "allOf": [
- {
- "$ref": "#/components/schemas/CreateAssistantDTO"
- }
- ]
- },
- "toStepRecord": {
- "type": "object",
- "description": "This is the step that the conversation moved to."
- },
- "fromStepRecord": {
- "type": "object",
- "description": "This is the step that the conversation moved from. ="
}
},
"required": [
- "type"
+ "type",
+ "text"
]
},
- "ClientMessageUserInterrupted": {
+ "ClientMessageChatCreated": {
"type": "object",
"properties": {
"phoneNumber": {
@@ -46377,9 +59045,9 @@
},
"type": {
"type": "string",
- "description": "This is the type of the message. \"user-interrupted\" is sent when the user interrupts the assistant.",
+ "description": "This is the type of the message. \"chat.created\" is sent when a new chat is created.",
"enum": [
- "user-interrupted"
+ "chat.created"
]
},
"timestamp": {
@@ -46409,13 +59077,22 @@
"$ref": "#/components/schemas/CreateAssistantDTO"
}
]
+ },
+ "chat": {
+ "description": "This is the chat that was created.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Chat"
+ }
+ ]
}
},
"required": [
- "type"
+ "type",
+ "chat"
]
},
- "ClientMessageLanguageChangeDetected": {
+ "ClientMessageChatDeleted": {
"type": "object",
"properties": {
"phoneNumber": {
@@ -46445,9 +59122,9 @@
},
"type": {
"type": "string",
- "description": "This is the type of the message. \"language-change-detected\" is sent when the transcriber is automatically switched based on the detected language.",
+ "description": "This is the type of the message. \"chat.deleted\" is sent when a chat is deleted.",
"enum": [
- "language-change-detected"
+ "chat.deleted"
]
},
"timestamp": {
@@ -46478,17 +59155,21 @@
}
]
},
- "language": {
- "type": "string",
- "description": "This is the language the transcriber is switched to."
+ "chat": {
+ "description": "This is the chat that was deleted.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Chat"
+ }
+ ]
}
},
"required": [
"type",
- "language"
+ "chat"
]
},
- "ClientMessageVoiceInput": {
+ "ClientMessageSessionCreated": {
"type": "object",
"properties": {
"phoneNumber": {
@@ -46518,82 +59199,9 @@
},
"type": {
"type": "string",
- "description": "This is the type of the message. \"voice-input\" is sent when a generation is requested from voice provider.",
- "enum": [
- "voice-input"
- ]
- },
- "timestamp": {
- "type": "number",
- "description": "This is the timestamp of the message."
- },
- "call": {
- "description": "This is the call that the message is associated with.",
- "allOf": [
- {
- "$ref": "#/components/schemas/Call"
- }
- ]
- },
- "customer": {
- "description": "This is the customer that the message is associated with.",
- "allOf": [
- {
- "$ref": "#/components/schemas/CreateCustomerDTO"
- }
- ]
- },
- "assistant": {
- "description": "This is the assistant that the message is associated with.",
- "allOf": [
- {
- "$ref": "#/components/schemas/CreateAssistantDTO"
- }
- ]
- },
- "input": {
- "type": "string",
- "description": "This is the voice input content"
- }
- },
- "required": [
- "type",
- "input"
- ]
- },
- "ClientMessageChatCreated": {
- "type": "object",
- "properties": {
- "phoneNumber": {
- "description": "This is the phone number that the message is associated with.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/CreateByoPhoneNumberDTO",
- "title": "ByoPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/CreateTwilioPhoneNumberDTO",
- "title": "TwilioPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/CreateVonagePhoneNumberDTO",
- "title": "VonagePhoneNumber"
- },
- {
- "$ref": "#/components/schemas/CreateVapiPhoneNumberDTO",
- "title": "VapiPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/CreateTelnyxPhoneNumberDTO",
- "title": "TelnyxPhoneNumber"
- }
- ]
- },
- "type": {
- "type": "string",
- "description": "This is the type of the message. \"chat.created\" is sent when a new chat is created.",
+ "description": "This is the type of the message. \"session.created\" is sent when a new session is created.",
"enum": [
- "chat.created"
+ "session.created"
]
},
"timestamp": {
@@ -46624,21 +59232,21 @@
}
]
},
- "chat": {
- "description": "This is the chat that was created.",
+ "session": {
+ "description": "This is the session that was created.",
"allOf": [
{
- "$ref": "#/components/schemas/Chat"
+ "$ref": "#/components/schemas/Session"
}
]
}
},
"required": [
"type",
- "chat"
+ "session"
]
},
- "ClientMessageChatDeleted": {
+ "ClientMessageSessionUpdated": {
"type": "object",
"properties": {
"phoneNumber": {
@@ -46668,9 +59276,9 @@
},
"type": {
"type": "string",
- "description": "This is the type of the message. \"chat.deleted\" is sent when a chat is deleted.",
+ "description": "This is the type of the message. \"session.updated\" is sent when a session is updated.",
"enum": [
- "chat.deleted"
+ "session.updated"
]
},
"timestamp": {
@@ -46701,21 +59309,21 @@
}
]
},
- "chat": {
- "description": "This is the chat that was deleted.",
+ "session": {
+ "description": "This is the session that was updated.",
"allOf": [
{
- "$ref": "#/components/schemas/Chat"
+ "$ref": "#/components/schemas/Session"
}
]
}
},
"required": [
"type",
- "chat"
+ "session"
]
},
- "ClientMessageSessionCreated": {
+ "ClientMessageSessionDeleted": {
"type": "object",
"properties": {
"phoneNumber": {
@@ -46745,9 +59353,9 @@
},
"type": {
"type": "string",
- "description": "This is the type of the message. \"session.created\" is sent when a new session is created.",
+ "description": "This is the type of the message. \"session.deleted\" is sent when a session is deleted.",
"enum": [
- "session.created"
+ "session.deleted"
]
},
"timestamp": {
@@ -46779,7 +59387,7 @@
]
},
"session": {
- "description": "This is the session that was created.",
+ "description": "This is the session that was deleted.",
"allOf": [
{
"$ref": "#/components/schemas/Session"
@@ -46792,7 +59400,7 @@
"session"
]
},
- "ClientMessageSessionUpdated": {
+ "ClientMessageCallDeleted": {
"type": "object",
"properties": {
"phoneNumber": {
@@ -46822,9 +59430,9 @@
},
"type": {
"type": "string",
- "description": "This is the type of the message. \"session.updated\" is sent when a session is updated.",
+ "description": "This is the type of the message. \"call.deleted\" is sent when a call is deleted.",
"enum": [
- "session.updated"
+ "call.deleted"
]
},
"timestamp": {
@@ -46854,22 +59462,13 @@
"$ref": "#/components/schemas/CreateAssistantDTO"
}
]
- },
- "session": {
- "description": "This is the session that was updated.",
- "allOf": [
- {
- "$ref": "#/components/schemas/Session"
- }
- ]
}
},
"required": [
- "type",
- "session"
+ "type"
]
},
- "ClientMessageSessionDeleted": {
+ "ClientMessageCallDeleteFailed": {
"type": "object",
"properties": {
"phoneNumber": {
@@ -46899,9 +59498,9 @@
},
"type": {
"type": "string",
- "description": "This is the type of the message. \"session.deleted\" is sent when a session is deleted.",
+ "description": "This is the type of the message. \"call.deleted\" is sent when a call is deleted.",
"enum": [
- "session.deleted"
+ "call.delete.failed"
]
},
"timestamp": {
@@ -46931,19 +59530,10 @@
"$ref": "#/components/schemas/CreateAssistantDTO"
}
]
- },
- "session": {
- "description": "This is the session that was deleted.",
- "allOf": [
- {
- "$ref": "#/components/schemas/Session"
- }
- ]
}
},
"required": [
- "type",
- "session"
+ "type"
]
},
"ClientMessage": {
@@ -46956,6 +59546,10 @@
"$ref": "#/components/schemas/ClientMessageWorkflowNodeStarted",
"title": "WorkflowNodeStarted"
},
+ {
+ "$ref": "#/components/schemas/ClientMessageAssistantStarted",
+ "title": "AssistantStarted"
+ },
{
"$ref": "#/components/schemas/ClientMessageConversationUpdate",
"title": "ConversationUpdate"
@@ -47004,6 +59598,10 @@
"$ref": "#/components/schemas/ClientMessageVoiceInput",
"title": "VoiceInput"
},
+ {
+ "$ref": "#/components/schemas/ClientMessageAssistantSpeech",
+ "title": "AssistantSpeech"
+ },
{
"$ref": "#/components/schemas/ClientMessageChatCreated",
"title": "ChatCreated"
@@ -47023,6 +59621,14 @@
{
"$ref": "#/components/schemas/ClientMessageSessionDeleted",
"title": "SessionDeleted"
+ },
+ {
+ "$ref": "#/components/schemas/ClientMessageCallDeleted",
+ "title": "CallDeleted"
+ },
+ {
+ "$ref": "#/components/schemas/ClientMessageCallDeleteFailed",
+ "title": "CallDeleteFailed"
}
]
}
@@ -47292,12 +59898,14 @@
"call.start.error-vapi-number-outbound-daily-limit",
"call.start.error-get-transport",
"call.start.error-subscription-wallet-does-not-exist",
+ "call.start.error-fraud-check-failed",
"call.start.error-subscription-frozen",
"call.start.error-subscription-insufficient-credits",
"call.start.error-subscription-upgrade-failed",
"call.start.error-subscription-concurrency-limit-reached",
+ "call.start.error-enterprise-feature-not-available-recording-consent",
"assistant-not-valid",
- "database-error",
+ "call.start.error-vapifault-database-error",
"assistant-not-found",
"pipeline-error-openai-voice-failed",
"pipeline-error-cartesia-voice-failed",
@@ -47308,11 +59916,13 @@
"pipeline-error-azure-voice-failed",
"pipeline-error-rime-ai-voice-failed",
"pipeline-error-smallest-ai-voice-failed",
+ "pipeline-error-vapi-voice-failed",
"pipeline-error-neuphonic-voice-failed",
"pipeline-error-hume-voice-failed",
"pipeline-error-sesame-voice-failed",
"pipeline-error-inworld-voice-failed",
"pipeline-error-minimax-voice-failed",
+ "pipeline-error-wellsaid-voice-failed",
"pipeline-error-tavus-video-failed",
"call.in-progress.error-vapifault-openai-voice-failed",
"call.in-progress.error-vapifault-cartesia-voice-failed",
@@ -47323,11 +59933,13 @@
"call.in-progress.error-vapifault-azure-voice-failed",
"call.in-progress.error-vapifault-rime-ai-voice-failed",
"call.in-progress.error-vapifault-smallest-ai-voice-failed",
+ "call.in-progress.error-vapifault-vapi-voice-failed",
"call.in-progress.error-vapifault-neuphonic-voice-failed",
"call.in-progress.error-vapifault-hume-voice-failed",
"call.in-progress.error-vapifault-sesame-voice-failed",
"call.in-progress.error-vapifault-inworld-voice-failed",
"call.in-progress.error-vapifault-minimax-voice-failed",
+ "call.in-progress.error-vapifault-wellsaid-voice-failed",
"call.in-progress.error-vapifault-tavus-video-failed",
"pipeline-error-vapi-llm-failed",
"pipeline-error-vapi-400-bad-request-validation-failed",
@@ -47336,7 +59948,7 @@
"pipeline-error-vapi-429-exceeded-quota",
"pipeline-error-vapi-500-server-error",
"pipeline-error-vapi-503-server-overloaded-error",
- "call.in-progress.error-vapifault-vapi-llm-failed",
+ "call.in-progress.error-providerfault-vapi-llm-failed",
"call.in-progress.error-vapifault-vapi-400-bad-request-validation-failed",
"call.in-progress.error-vapifault-vapi-401-unauthorized",
"call.in-progress.error-vapifault-vapi-403-model-access-denied",
@@ -47344,6 +59956,7 @@
"call.in-progress.error-providerfault-vapi-500-server-error",
"call.in-progress.error-providerfault-vapi-503-server-overloaded-error",
"pipeline-error-deepgram-transcriber-failed",
+ "pipeline-error-deepgram-transcriber-api-key-missing",
"call.in-progress.error-vapifault-deepgram-transcriber-failed",
"pipeline-error-gladia-transcriber-failed",
"call.in-progress.error-vapifault-gladia-transcriber-failed",
@@ -47365,6 +59978,22 @@
"call.in-progress.error-vapifault-talkscriber-transcriber-failed",
"pipeline-error-azure-speech-transcriber-failed",
"call.in-progress.error-vapifault-azure-speech-transcriber-failed",
+ "pipeline-error-eleven-labs-transcriber-failed",
+ "call.in-progress.error-vapifault-eleven-labs-transcriber-failed",
+ "pipeline-error-google-transcriber-failed",
+ "call.in-progress.error-vapifault-google-transcriber-failed",
+ "pipeline-error-openai-transcriber-failed",
+ "call.in-progress.error-vapifault-openai-transcriber-failed",
+ "pipeline-error-soniox-transcriber-auth-failed",
+ "pipeline-error-soniox-transcriber-rate-limited",
+ "pipeline-error-soniox-transcriber-invalid-config",
+ "pipeline-error-soniox-transcriber-server-error",
+ "pipeline-error-soniox-transcriber-failed",
+ "call.in-progress.error-vapifault-soniox-transcriber-auth-failed",
+ "call.in-progress.error-vapifault-soniox-transcriber-rate-limited",
+ "call.in-progress.error-vapifault-soniox-transcriber-invalid-config",
+ "call.in-progress.error-vapifault-soniox-transcriber-server-error",
+ "call.in-progress.error-vapifault-soniox-transcriber-failed",
"call.in-progress.error-pipeline-no-available-llm-model",
"worker-shutdown",
"vonage-disconnected",
@@ -47379,15 +60008,21 @@
"call.in-progress.error-vapifault-worker-died",
"call.in-progress.twilio-completed-call",
"call.in-progress.sip-completed-call",
- "call.in-progress.error-vapifault-openai-llm-failed",
- "call.in-progress.error-vapifault-azure-openai-llm-failed",
- "call.in-progress.error-vapifault-groq-llm-failed",
- "call.in-progress.error-vapifault-google-llm-failed",
- "call.in-progress.error-vapifault-xai-llm-failed",
- "call.in-progress.error-vapifault-mistral-llm-failed",
- "call.in-progress.error-vapifault-inflection-ai-llm-failed",
- "call.in-progress.error-vapifault-cerebras-llm-failed",
- "call.in-progress.error-vapifault-deep-seek-llm-failed",
+ "call.in-progress.error-sip-inbound-call-failed-to-connect",
+ "call.in-progress.error-providerfault-outbound-sip-503-service-unavailable",
+ "call.in-progress.error-sip-outbound-call-failed-to-connect",
+ "call.ringing.error-sip-inbound-call-failed-to-connect",
+ "call.in-progress.error-providerfault-openai-llm-failed",
+ "call.in-progress.error-providerfault-azure-openai-llm-failed",
+ "call.in-progress.error-providerfault-groq-llm-failed",
+ "call.in-progress.error-providerfault-google-llm-failed",
+ "call.in-progress.error-providerfault-xai-llm-failed",
+ "call.in-progress.error-providerfault-mistral-llm-failed",
+ "call.in-progress.error-providerfault-minimax-llm-failed",
+ "call.in-progress.error-providerfault-inflection-ai-llm-failed",
+ "call.in-progress.error-providerfault-cerebras-llm-failed",
+ "call.in-progress.error-providerfault-deep-seek-llm-failed",
+ "call.in-progress.error-providerfault-baseten-llm-failed",
"call.in-progress.error-vapifault-chat-pipeline-failed-to-start",
"pipeline-error-openai-400-bad-request-validation-failed",
"pipeline-error-openai-401-unauthorized",
@@ -47447,6 +60082,19 @@
"call.in-progress.error-vapifault-xai-429-exceeded-quota",
"call.in-progress.error-providerfault-xai-500-server-error",
"call.in-progress.error-providerfault-xai-503-server-overloaded-error",
+ "pipeline-error-baseten-400-bad-request-validation-failed",
+ "pipeline-error-baseten-401-unauthorized",
+ "pipeline-error-baseten-403-model-access-denied",
+ "pipeline-error-baseten-429-exceeded-quota",
+ "pipeline-error-baseten-500-server-error",
+ "pipeline-error-baseten-503-server-overloaded-error",
+ "pipeline-error-baseten-llm-failed",
+ "call.in-progress.error-vapifault-baseten-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-baseten-401-unauthorized",
+ "call.in-progress.error-vapifault-baseten-403-model-access-denied",
+ "call.in-progress.error-vapifault-baseten-429-exceeded-quota",
+ "call.in-progress.error-providerfault-baseten-500-server-error",
+ "call.in-progress.error-providerfault-baseten-503-server-overloaded-error",
"pipeline-error-mistral-400-bad-request-validation-failed",
"pipeline-error-mistral-401-unauthorized",
"pipeline-error-mistral-403-model-access-denied",
@@ -47460,6 +60108,19 @@
"call.in-progress.error-vapifault-mistral-429-exceeded-quota",
"call.in-progress.error-providerfault-mistral-500-server-error",
"call.in-progress.error-providerfault-mistral-503-server-overloaded-error",
+ "pipeline-error-minimax-400-bad-request-validation-failed",
+ "pipeline-error-minimax-401-unauthorized",
+ "pipeline-error-minimax-403-model-access-denied",
+ "pipeline-error-minimax-429-exceeded-quota",
+ "pipeline-error-minimax-500-server-error",
+ "pipeline-error-minimax-503-server-overloaded-error",
+ "pipeline-error-minimax-llm-failed",
+ "call.in-progress.error-vapifault-minimax-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-minimax-401-unauthorized",
+ "call.in-progress.error-vapifault-minimax-403-model-access-denied",
+ "call.in-progress.error-vapifault-minimax-429-exceeded-quota",
+ "call.in-progress.error-providerfault-minimax-500-server-error",
+ "call.in-progress.error-providerfault-minimax-503-server-overloaded-error",
"pipeline-error-inflection-ai-400-bad-request-validation-failed",
"pipeline-error-inflection-ai-401-unauthorized",
"pipeline-error-inflection-ai-403-model-access-denied",
@@ -47519,7 +60180,7 @@
"pipeline-error-anthropic-500-server-error",
"pipeline-error-anthropic-503-server-overloaded-error",
"pipeline-error-anthropic-llm-failed",
- "call.in-progress.error-vapifault-anthropic-llm-failed",
+ "call.in-progress.error-providerfault-anthropic-llm-failed",
"call.in-progress.error-vapifault-anthropic-400-bad-request-validation-failed",
"call.in-progress.error-vapifault-anthropic-401-unauthorized",
"call.in-progress.error-vapifault-anthropic-403-model-access-denied",
@@ -47533,7 +60194,7 @@
"pipeline-error-anthropic-bedrock-500-server-error",
"pipeline-error-anthropic-bedrock-503-server-overloaded-error",
"pipeline-error-anthropic-bedrock-llm-failed",
- "call.in-progress.error-vapifault-anthropic-bedrock-llm-failed",
+ "call.in-progress.error-providerfault-anthropic-bedrock-llm-failed",
"call.in-progress.error-vapifault-anthropic-bedrock-400-bad-request-validation-failed",
"call.in-progress.error-vapifault-anthropic-bedrock-401-unauthorized",
"call.in-progress.error-vapifault-anthropic-bedrock-403-model-access-denied",
@@ -47547,7 +60208,7 @@
"pipeline-error-anthropic-vertex-500-server-error",
"pipeline-error-anthropic-vertex-503-server-overloaded-error",
"pipeline-error-anthropic-vertex-llm-failed",
- "call.in-progress.error-vapifault-anthropic-vertex-llm-failed",
+ "call.in-progress.error-providerfault-anthropic-vertex-llm-failed",
"call.in-progress.error-vapifault-anthropic-vertex-400-bad-request-validation-failed",
"call.in-progress.error-vapifault-anthropic-vertex-401-unauthorized",
"call.in-progress.error-vapifault-anthropic-vertex-403-model-access-denied",
@@ -47561,7 +60222,7 @@
"pipeline-error-together-ai-500-server-error",
"pipeline-error-together-ai-503-server-overloaded-error",
"pipeline-error-together-ai-llm-failed",
- "call.in-progress.error-vapifault-together-ai-llm-failed",
+ "call.in-progress.error-providerfault-together-ai-llm-failed",
"call.in-progress.error-vapifault-together-ai-400-bad-request-validation-failed",
"call.in-progress.error-vapifault-together-ai-401-unauthorized",
"call.in-progress.error-vapifault-together-ai-403-model-access-denied",
@@ -47575,7 +60236,7 @@
"pipeline-error-anyscale-500-server-error",
"pipeline-error-anyscale-503-server-overloaded-error",
"pipeline-error-anyscale-llm-failed",
- "call.in-progress.error-vapifault-anyscale-llm-failed",
+ "call.in-progress.error-providerfault-anyscale-llm-failed",
"call.in-progress.error-vapifault-anyscale-400-bad-request-validation-failed",
"call.in-progress.error-vapifault-anyscale-401-unauthorized",
"call.in-progress.error-vapifault-anyscale-403-model-access-denied",
@@ -47589,7 +60250,7 @@
"pipeline-error-openrouter-500-server-error",
"pipeline-error-openrouter-503-server-overloaded-error",
"pipeline-error-openrouter-llm-failed",
- "call.in-progress.error-vapifault-openrouter-llm-failed",
+ "call.in-progress.error-providerfault-openrouter-llm-failed",
"call.in-progress.error-vapifault-openrouter-400-bad-request-validation-failed",
"call.in-progress.error-vapifault-openrouter-401-unauthorized",
"call.in-progress.error-vapifault-openrouter-403-model-access-denied",
@@ -47603,7 +60264,7 @@
"pipeline-error-perplexity-ai-500-server-error",
"pipeline-error-perplexity-ai-503-server-overloaded-error",
"pipeline-error-perplexity-ai-llm-failed",
- "call.in-progress.error-vapifault-perplexity-ai-llm-failed",
+ "call.in-progress.error-providerfault-perplexity-ai-llm-failed",
"call.in-progress.error-vapifault-perplexity-ai-400-bad-request-validation-failed",
"call.in-progress.error-vapifault-perplexity-ai-401-unauthorized",
"call.in-progress.error-vapifault-perplexity-ai-403-model-access-denied",
@@ -47617,7 +60278,7 @@
"pipeline-error-deepinfra-500-server-error",
"pipeline-error-deepinfra-503-server-overloaded-error",
"pipeline-error-deepinfra-llm-failed",
- "call.in-progress.error-vapifault-deepinfra-llm-failed",
+ "call.in-progress.error-providerfault-deepinfra-llm-failed",
"call.in-progress.error-vapifault-deepinfra-400-bad-request-validation-failed",
"call.in-progress.error-vapifault-deepinfra-401-unauthorized",
"call.in-progress.error-vapifault-deepinfra-403-model-access-denied",
@@ -47631,7 +60292,7 @@
"pipeline-error-runpod-500-server-error",
"pipeline-error-runpod-503-server-overloaded-error",
"pipeline-error-runpod-llm-failed",
- "call.in-progress.error-vapifault-runpod-llm-failed",
+ "call.in-progress.error-providerfault-runpod-llm-failed",
"call.in-progress.error-vapifault-runpod-400-bad-request-validation-failed",
"call.in-progress.error-vapifault-runpod-401-unauthorized",
"call.in-progress.error-vapifault-runpod-403-model-access-denied",
@@ -47645,13 +60306,14 @@
"pipeline-error-custom-llm-500-server-error",
"pipeline-error-custom-llm-503-server-overloaded-error",
"pipeline-error-custom-llm-llm-failed",
- "call.in-progress.error-vapifault-custom-llm-llm-failed",
+ "call.in-progress.error-providerfault-custom-llm-llm-failed",
"call.in-progress.error-vapifault-custom-llm-400-bad-request-validation-failed",
"call.in-progress.error-vapifault-custom-llm-401-unauthorized",
"call.in-progress.error-vapifault-custom-llm-403-model-access-denied",
"call.in-progress.error-vapifault-custom-llm-429-exceeded-quota",
"call.in-progress.error-providerfault-custom-llm-500-server-error",
"call.in-progress.error-providerfault-custom-llm-503-server-overloaded-error",
+ "call.in-progress.error-pipeline-ws-model-connection-failed",
"pipeline-error-custom-voice-failed",
"pipeline-error-cartesia-socket-hang-up",
"pipeline-error-cartesia-requested-payment",
@@ -47707,6 +60369,7 @@
"call.in-progress.error-vapifault-eleven-labs-voice-not-allowed-for-free-users",
"call.in-progress.error-vapifault-eleven-labs-max-character-limit-exceeded",
"call.in-progress.error-vapifault-eleven-labs-blocked-voice-potentially-against-terms-of-service-and-awaiting-verification",
+ "call.in-progress.error-providerfault-eleven-labs-system-busy-and-requested-upgrade",
"call.in-progress.error-providerfault-eleven-labs-500-server-error",
"call.in-progress.error-providerfault-eleven-labs-503-server-error",
"pipeline-error-playht-request-timed-out",
@@ -47756,12 +60419,20 @@
"call.in-progress.error-vapifault-google-transcriber-failed",
"pipeline-error-openai-transcriber-failed",
"call.in-progress.error-vapifault-openai-transcriber-failed",
+ "pipeline-error-soniox-transcriber-auth-failed",
+ "pipeline-error-soniox-transcriber-rate-limited",
+ "pipeline-error-soniox-transcriber-invalid-config",
+ "pipeline-error-soniox-transcriber-server-error",
+ "pipeline-error-soniox-transcriber-failed",
+ "call.in-progress.error-vapifault-soniox-transcriber-auth-failed",
+ "call.in-progress.error-vapifault-soniox-transcriber-rate-limited",
+ "call.in-progress.error-vapifault-soniox-transcriber-invalid-config",
+ "call.in-progress.error-vapifault-soniox-transcriber-server-error",
+ "call.in-progress.error-vapifault-soniox-transcriber-failed",
"call.in-progress.error-warm-transfer-max-duration",
"call.in-progress.error-warm-transfer-assistant-cancelled",
"call.in-progress.error-warm-transfer-silence-timeout",
"call.in-progress.error-warm-transfer-microphone-timeout",
- "call.in-progress.error-warm-transfer-hang-timeout",
- "call.in-progress.error-warm-transfer-idle-timeout",
"assistant-ended-call",
"assistant-said-end-call-phrase",
"assistant-ended-call-with-hangup-task",
@@ -47772,7 +60443,9 @@
"call.in-progress.error-transfer-failed",
"customer-busy",
"customer-ended-call",
+ "customer-ended-call-before-warm-transfer",
"customer-ended-call-after-warm-transfer-attempt",
+ "customer-ended-call-during-transfer",
"customer-did-not-answer",
"customer-did-not-give-microphone-permission",
"exceeded-max-duration",
@@ -47783,6 +60456,7 @@
"call.in-progress.error-sip-inbound-call-failed-to-connect",
"call.in-progress.error-providerfault-outbound-sip-403-forbidden",
"call.in-progress.error-providerfault-outbound-sip-407-proxy-authentication-required",
+ "call.in-progress.error-providerfault-outbound-sip-408-request-timeout",
"call.in-progress.error-providerfault-outbound-sip-503-service-unavailable",
"call.in-progress.error-providerfault-outbound-sip-480-temporarily-unavailable",
"call.in-progress.error-sip-outbound-call-failed-to-connect",
@@ -47795,7 +60469,8 @@
"twilio-failed-to-connect-call",
"twilio-reported-customer-misdialed",
"vonage-rejected",
- "voicemail"
+ "voicemail",
+ "call-deleted"
]
},
"cost": {
@@ -47842,6 +60517,19 @@
]
}
},
+ "destination": {
+ "description": "This is the destination the call was transferred to, if the call was forwarded.\nThis can also be found at `call.destination` on GET /call/:id.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TransferDestinationNumber",
+ "title": "NumberTransferDestination"
+ },
+ {
+ "$ref": "#/components/schemas/TransferDestinationSip",
+ "title": "SipTransferDestination"
+ }
+ ]
+ },
"timestamp": {
"type": "number",
"description": "This is the timestamp of the message."
@@ -47903,6 +60591,14 @@
"format": "date-time",
"type": "string",
"description": "This is the ISO 8601 date-time string of when the call ended. This can also be found at `call.endedAt` on GET /call/:id."
+ },
+ "compliance": {
+ "description": "This is the compliance result of the call. This can also be found at `call.compliance` on GET /call/:id.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Compliance"
+ }
+ ]
}
},
"required": [
@@ -47912,6 +60608,95 @@
"analysis"
]
},
+ "ServerMessageHandoffDestinationRequest": {
+ "type": "object",
+ "properties": {
+ "phoneNumber": {
+ "description": "This is the phone number that the message is associated with.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateByoPhoneNumberDTO",
+ "title": "ByoPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTwilioPhoneNumberDTO",
+ "title": "TwilioPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVonagePhoneNumberDTO",
+ "title": "VonagePhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVapiPhoneNumberDTO",
+ "title": "VapiPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTelnyxPhoneNumberDTO",
+ "title": "TelnyxPhoneNumber"
+ }
+ ]
+ },
+ "type": {
+ "type": "string",
+ "description": "This is the type of the message. \"handoff-destination-request\" is sent when the model is requesting handoff but destination is unknown.",
+ "enum": [
+ "handoff-destination-request"
+ ]
+ },
+ "timestamp": {
+ "type": "number",
+ "description": "This is the timestamp of the message."
+ },
+ "artifact": {
+ "description": "This is a live version of the `call.artifact`.\n\nThis matches what is stored on `call.artifact` after the call.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Artifact"
+ }
+ ]
+ },
+ "assistant": {
+ "description": "This is the assistant that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateAssistantDTO"
+ }
+ ]
+ },
+ "customer": {
+ "description": "This is the customer that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateCustomerDTO"
+ }
+ ]
+ },
+ "call": {
+ "description": "This is the call that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Call"
+ }
+ ]
+ },
+ "chat": {
+ "description": "This is the chat object.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Chat"
+ }
+ ]
+ },
+ "parameters": {
+ "type": "object",
+ "description": "This is the parameters of the handoff destination request."
+ }
+ },
+ "required": [
+ "type",
+ "parameters"
+ ]
+ },
"ServerMessageHang": {
"type": "object",
"properties": {
@@ -48151,6 +60936,10 @@
"model-output"
]
},
+ "turnId": {
+ "type": "string",
+ "description": "This is the unique identifier for the current LLM turn. All tokens from the same\nLLM response share the same turnId. Use this to group tokens and discard on interruption."
+ },
"timestamp": {
"type": "number",
"description": "This is the timestamp of the message."
@@ -48461,7 +61250,9 @@
"ringing",
"in-progress",
"forwarding",
- "ended"
+ "ended",
+ "not-found",
+ "deletion-failed"
]
},
"endedReason": {
@@ -48486,12 +61277,14 @@
"call.start.error-vapi-number-outbound-daily-limit",
"call.start.error-get-transport",
"call.start.error-subscription-wallet-does-not-exist",
+ "call.start.error-fraud-check-failed",
"call.start.error-subscription-frozen",
"call.start.error-subscription-insufficient-credits",
"call.start.error-subscription-upgrade-failed",
"call.start.error-subscription-concurrency-limit-reached",
+ "call.start.error-enterprise-feature-not-available-recording-consent",
"assistant-not-valid",
- "database-error",
+ "call.start.error-vapifault-database-error",
"assistant-not-found",
"pipeline-error-openai-voice-failed",
"pipeline-error-cartesia-voice-failed",
@@ -48502,11 +61295,13 @@
"pipeline-error-azure-voice-failed",
"pipeline-error-rime-ai-voice-failed",
"pipeline-error-smallest-ai-voice-failed",
+ "pipeline-error-vapi-voice-failed",
"pipeline-error-neuphonic-voice-failed",
"pipeline-error-hume-voice-failed",
"pipeline-error-sesame-voice-failed",
"pipeline-error-inworld-voice-failed",
"pipeline-error-minimax-voice-failed",
+ "pipeline-error-wellsaid-voice-failed",
"pipeline-error-tavus-video-failed",
"call.in-progress.error-vapifault-openai-voice-failed",
"call.in-progress.error-vapifault-cartesia-voice-failed",
@@ -48517,11 +61312,13 @@
"call.in-progress.error-vapifault-azure-voice-failed",
"call.in-progress.error-vapifault-rime-ai-voice-failed",
"call.in-progress.error-vapifault-smallest-ai-voice-failed",
+ "call.in-progress.error-vapifault-vapi-voice-failed",
"call.in-progress.error-vapifault-neuphonic-voice-failed",
"call.in-progress.error-vapifault-hume-voice-failed",
"call.in-progress.error-vapifault-sesame-voice-failed",
"call.in-progress.error-vapifault-inworld-voice-failed",
"call.in-progress.error-vapifault-minimax-voice-failed",
+ "call.in-progress.error-vapifault-wellsaid-voice-failed",
"call.in-progress.error-vapifault-tavus-video-failed",
"pipeline-error-vapi-llm-failed",
"pipeline-error-vapi-400-bad-request-validation-failed",
@@ -48530,7 +61327,7 @@
"pipeline-error-vapi-429-exceeded-quota",
"pipeline-error-vapi-500-server-error",
"pipeline-error-vapi-503-server-overloaded-error",
- "call.in-progress.error-vapifault-vapi-llm-failed",
+ "call.in-progress.error-providerfault-vapi-llm-failed",
"call.in-progress.error-vapifault-vapi-400-bad-request-validation-failed",
"call.in-progress.error-vapifault-vapi-401-unauthorized",
"call.in-progress.error-vapifault-vapi-403-model-access-denied",
@@ -48538,6 +61335,7 @@
"call.in-progress.error-providerfault-vapi-500-server-error",
"call.in-progress.error-providerfault-vapi-503-server-overloaded-error",
"pipeline-error-deepgram-transcriber-failed",
+ "pipeline-error-deepgram-transcriber-api-key-missing",
"call.in-progress.error-vapifault-deepgram-transcriber-failed",
"pipeline-error-gladia-transcriber-failed",
"call.in-progress.error-vapifault-gladia-transcriber-failed",
@@ -48559,6 +61357,22 @@
"call.in-progress.error-vapifault-talkscriber-transcriber-failed",
"pipeline-error-azure-speech-transcriber-failed",
"call.in-progress.error-vapifault-azure-speech-transcriber-failed",
+ "pipeline-error-eleven-labs-transcriber-failed",
+ "call.in-progress.error-vapifault-eleven-labs-transcriber-failed",
+ "pipeline-error-google-transcriber-failed",
+ "call.in-progress.error-vapifault-google-transcriber-failed",
+ "pipeline-error-openai-transcriber-failed",
+ "call.in-progress.error-vapifault-openai-transcriber-failed",
+ "pipeline-error-soniox-transcriber-auth-failed",
+ "pipeline-error-soniox-transcriber-rate-limited",
+ "pipeline-error-soniox-transcriber-invalid-config",
+ "pipeline-error-soniox-transcriber-server-error",
+ "pipeline-error-soniox-transcriber-failed",
+ "call.in-progress.error-vapifault-soniox-transcriber-auth-failed",
+ "call.in-progress.error-vapifault-soniox-transcriber-rate-limited",
+ "call.in-progress.error-vapifault-soniox-transcriber-invalid-config",
+ "call.in-progress.error-vapifault-soniox-transcriber-server-error",
+ "call.in-progress.error-vapifault-soniox-transcriber-failed",
"call.in-progress.error-pipeline-no-available-llm-model",
"worker-shutdown",
"vonage-disconnected",
@@ -48573,15 +61387,21 @@
"call.in-progress.error-vapifault-worker-died",
"call.in-progress.twilio-completed-call",
"call.in-progress.sip-completed-call",
- "call.in-progress.error-vapifault-openai-llm-failed",
- "call.in-progress.error-vapifault-azure-openai-llm-failed",
- "call.in-progress.error-vapifault-groq-llm-failed",
- "call.in-progress.error-vapifault-google-llm-failed",
- "call.in-progress.error-vapifault-xai-llm-failed",
- "call.in-progress.error-vapifault-mistral-llm-failed",
- "call.in-progress.error-vapifault-inflection-ai-llm-failed",
- "call.in-progress.error-vapifault-cerebras-llm-failed",
- "call.in-progress.error-vapifault-deep-seek-llm-failed",
+ "call.in-progress.error-sip-inbound-call-failed-to-connect",
+ "call.in-progress.error-providerfault-outbound-sip-503-service-unavailable",
+ "call.in-progress.error-sip-outbound-call-failed-to-connect",
+ "call.ringing.error-sip-inbound-call-failed-to-connect",
+ "call.in-progress.error-providerfault-openai-llm-failed",
+ "call.in-progress.error-providerfault-azure-openai-llm-failed",
+ "call.in-progress.error-providerfault-groq-llm-failed",
+ "call.in-progress.error-providerfault-google-llm-failed",
+ "call.in-progress.error-providerfault-xai-llm-failed",
+ "call.in-progress.error-providerfault-mistral-llm-failed",
+ "call.in-progress.error-providerfault-minimax-llm-failed",
+ "call.in-progress.error-providerfault-inflection-ai-llm-failed",
+ "call.in-progress.error-providerfault-cerebras-llm-failed",
+ "call.in-progress.error-providerfault-deep-seek-llm-failed",
+ "call.in-progress.error-providerfault-baseten-llm-failed",
"call.in-progress.error-vapifault-chat-pipeline-failed-to-start",
"pipeline-error-openai-400-bad-request-validation-failed",
"pipeline-error-openai-401-unauthorized",
@@ -48641,6 +61461,19 @@
"call.in-progress.error-vapifault-xai-429-exceeded-quota",
"call.in-progress.error-providerfault-xai-500-server-error",
"call.in-progress.error-providerfault-xai-503-server-overloaded-error",
+ "pipeline-error-baseten-400-bad-request-validation-failed",
+ "pipeline-error-baseten-401-unauthorized",
+ "pipeline-error-baseten-403-model-access-denied",
+ "pipeline-error-baseten-429-exceeded-quota",
+ "pipeline-error-baseten-500-server-error",
+ "pipeline-error-baseten-503-server-overloaded-error",
+ "pipeline-error-baseten-llm-failed",
+ "call.in-progress.error-vapifault-baseten-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-baseten-401-unauthorized",
+ "call.in-progress.error-vapifault-baseten-403-model-access-denied",
+ "call.in-progress.error-vapifault-baseten-429-exceeded-quota",
+ "call.in-progress.error-providerfault-baseten-500-server-error",
+ "call.in-progress.error-providerfault-baseten-503-server-overloaded-error",
"pipeline-error-mistral-400-bad-request-validation-failed",
"pipeline-error-mistral-401-unauthorized",
"pipeline-error-mistral-403-model-access-denied",
@@ -48654,6 +61487,19 @@
"call.in-progress.error-vapifault-mistral-429-exceeded-quota",
"call.in-progress.error-providerfault-mistral-500-server-error",
"call.in-progress.error-providerfault-mistral-503-server-overloaded-error",
+ "pipeline-error-minimax-400-bad-request-validation-failed",
+ "pipeline-error-minimax-401-unauthorized",
+ "pipeline-error-minimax-403-model-access-denied",
+ "pipeline-error-minimax-429-exceeded-quota",
+ "pipeline-error-minimax-500-server-error",
+ "pipeline-error-minimax-503-server-overloaded-error",
+ "pipeline-error-minimax-llm-failed",
+ "call.in-progress.error-vapifault-minimax-400-bad-request-validation-failed",
+ "call.in-progress.error-vapifault-minimax-401-unauthorized",
+ "call.in-progress.error-vapifault-minimax-403-model-access-denied",
+ "call.in-progress.error-vapifault-minimax-429-exceeded-quota",
+ "call.in-progress.error-providerfault-minimax-500-server-error",
+ "call.in-progress.error-providerfault-minimax-503-server-overloaded-error",
"pipeline-error-inflection-ai-400-bad-request-validation-failed",
"pipeline-error-inflection-ai-401-unauthorized",
"pipeline-error-inflection-ai-403-model-access-denied",
@@ -48713,7 +61559,7 @@
"pipeline-error-anthropic-500-server-error",
"pipeline-error-anthropic-503-server-overloaded-error",
"pipeline-error-anthropic-llm-failed",
- "call.in-progress.error-vapifault-anthropic-llm-failed",
+ "call.in-progress.error-providerfault-anthropic-llm-failed",
"call.in-progress.error-vapifault-anthropic-400-bad-request-validation-failed",
"call.in-progress.error-vapifault-anthropic-401-unauthorized",
"call.in-progress.error-vapifault-anthropic-403-model-access-denied",
@@ -48727,7 +61573,7 @@
"pipeline-error-anthropic-bedrock-500-server-error",
"pipeline-error-anthropic-bedrock-503-server-overloaded-error",
"pipeline-error-anthropic-bedrock-llm-failed",
- "call.in-progress.error-vapifault-anthropic-bedrock-llm-failed",
+ "call.in-progress.error-providerfault-anthropic-bedrock-llm-failed",
"call.in-progress.error-vapifault-anthropic-bedrock-400-bad-request-validation-failed",
"call.in-progress.error-vapifault-anthropic-bedrock-401-unauthorized",
"call.in-progress.error-vapifault-anthropic-bedrock-403-model-access-denied",
@@ -48741,7 +61587,7 @@
"pipeline-error-anthropic-vertex-500-server-error",
"pipeline-error-anthropic-vertex-503-server-overloaded-error",
"pipeline-error-anthropic-vertex-llm-failed",
- "call.in-progress.error-vapifault-anthropic-vertex-llm-failed",
+ "call.in-progress.error-providerfault-anthropic-vertex-llm-failed",
"call.in-progress.error-vapifault-anthropic-vertex-400-bad-request-validation-failed",
"call.in-progress.error-vapifault-anthropic-vertex-401-unauthorized",
"call.in-progress.error-vapifault-anthropic-vertex-403-model-access-denied",
@@ -48755,7 +61601,7 @@
"pipeline-error-together-ai-500-server-error",
"pipeline-error-together-ai-503-server-overloaded-error",
"pipeline-error-together-ai-llm-failed",
- "call.in-progress.error-vapifault-together-ai-llm-failed",
+ "call.in-progress.error-providerfault-together-ai-llm-failed",
"call.in-progress.error-vapifault-together-ai-400-bad-request-validation-failed",
"call.in-progress.error-vapifault-together-ai-401-unauthorized",
"call.in-progress.error-vapifault-together-ai-403-model-access-denied",
@@ -48769,7 +61615,7 @@
"pipeline-error-anyscale-500-server-error",
"pipeline-error-anyscale-503-server-overloaded-error",
"pipeline-error-anyscale-llm-failed",
- "call.in-progress.error-vapifault-anyscale-llm-failed",
+ "call.in-progress.error-providerfault-anyscale-llm-failed",
"call.in-progress.error-vapifault-anyscale-400-bad-request-validation-failed",
"call.in-progress.error-vapifault-anyscale-401-unauthorized",
"call.in-progress.error-vapifault-anyscale-403-model-access-denied",
@@ -48783,7 +61629,7 @@
"pipeline-error-openrouter-500-server-error",
"pipeline-error-openrouter-503-server-overloaded-error",
"pipeline-error-openrouter-llm-failed",
- "call.in-progress.error-vapifault-openrouter-llm-failed",
+ "call.in-progress.error-providerfault-openrouter-llm-failed",
"call.in-progress.error-vapifault-openrouter-400-bad-request-validation-failed",
"call.in-progress.error-vapifault-openrouter-401-unauthorized",
"call.in-progress.error-vapifault-openrouter-403-model-access-denied",
@@ -48797,7 +61643,7 @@
"pipeline-error-perplexity-ai-500-server-error",
"pipeline-error-perplexity-ai-503-server-overloaded-error",
"pipeline-error-perplexity-ai-llm-failed",
- "call.in-progress.error-vapifault-perplexity-ai-llm-failed",
+ "call.in-progress.error-providerfault-perplexity-ai-llm-failed",
"call.in-progress.error-vapifault-perplexity-ai-400-bad-request-validation-failed",
"call.in-progress.error-vapifault-perplexity-ai-401-unauthorized",
"call.in-progress.error-vapifault-perplexity-ai-403-model-access-denied",
@@ -48811,7 +61657,7 @@
"pipeline-error-deepinfra-500-server-error",
"pipeline-error-deepinfra-503-server-overloaded-error",
"pipeline-error-deepinfra-llm-failed",
- "call.in-progress.error-vapifault-deepinfra-llm-failed",
+ "call.in-progress.error-providerfault-deepinfra-llm-failed",
"call.in-progress.error-vapifault-deepinfra-400-bad-request-validation-failed",
"call.in-progress.error-vapifault-deepinfra-401-unauthorized",
"call.in-progress.error-vapifault-deepinfra-403-model-access-denied",
@@ -48825,7 +61671,7 @@
"pipeline-error-runpod-500-server-error",
"pipeline-error-runpod-503-server-overloaded-error",
"pipeline-error-runpod-llm-failed",
- "call.in-progress.error-vapifault-runpod-llm-failed",
+ "call.in-progress.error-providerfault-runpod-llm-failed",
"call.in-progress.error-vapifault-runpod-400-bad-request-validation-failed",
"call.in-progress.error-vapifault-runpod-401-unauthorized",
"call.in-progress.error-vapifault-runpod-403-model-access-denied",
@@ -48839,13 +61685,14 @@
"pipeline-error-custom-llm-500-server-error",
"pipeline-error-custom-llm-503-server-overloaded-error",
"pipeline-error-custom-llm-llm-failed",
- "call.in-progress.error-vapifault-custom-llm-llm-failed",
+ "call.in-progress.error-providerfault-custom-llm-llm-failed",
"call.in-progress.error-vapifault-custom-llm-400-bad-request-validation-failed",
"call.in-progress.error-vapifault-custom-llm-401-unauthorized",
"call.in-progress.error-vapifault-custom-llm-403-model-access-denied",
"call.in-progress.error-vapifault-custom-llm-429-exceeded-quota",
"call.in-progress.error-providerfault-custom-llm-500-server-error",
"call.in-progress.error-providerfault-custom-llm-503-server-overloaded-error",
+ "call.in-progress.error-pipeline-ws-model-connection-failed",
"pipeline-error-custom-voice-failed",
"pipeline-error-cartesia-socket-hang-up",
"pipeline-error-cartesia-requested-payment",
@@ -48901,6 +61748,7 @@
"call.in-progress.error-vapifault-eleven-labs-voice-not-allowed-for-free-users",
"call.in-progress.error-vapifault-eleven-labs-max-character-limit-exceeded",
"call.in-progress.error-vapifault-eleven-labs-blocked-voice-potentially-against-terms-of-service-and-awaiting-verification",
+ "call.in-progress.error-providerfault-eleven-labs-system-busy-and-requested-upgrade",
"call.in-progress.error-providerfault-eleven-labs-500-server-error",
"call.in-progress.error-providerfault-eleven-labs-503-server-error",
"pipeline-error-playht-request-timed-out",
@@ -48950,12 +61798,20 @@
"call.in-progress.error-vapifault-google-transcriber-failed",
"pipeline-error-openai-transcriber-failed",
"call.in-progress.error-vapifault-openai-transcriber-failed",
+ "pipeline-error-soniox-transcriber-auth-failed",
+ "pipeline-error-soniox-transcriber-rate-limited",
+ "pipeline-error-soniox-transcriber-invalid-config",
+ "pipeline-error-soniox-transcriber-server-error",
+ "pipeline-error-soniox-transcriber-failed",
+ "call.in-progress.error-vapifault-soniox-transcriber-auth-failed",
+ "call.in-progress.error-vapifault-soniox-transcriber-rate-limited",
+ "call.in-progress.error-vapifault-soniox-transcriber-invalid-config",
+ "call.in-progress.error-vapifault-soniox-transcriber-server-error",
+ "call.in-progress.error-vapifault-soniox-transcriber-failed",
"call.in-progress.error-warm-transfer-max-duration",
"call.in-progress.error-warm-transfer-assistant-cancelled",
"call.in-progress.error-warm-transfer-silence-timeout",
"call.in-progress.error-warm-transfer-microphone-timeout",
- "call.in-progress.error-warm-transfer-hang-timeout",
- "call.in-progress.error-warm-transfer-idle-timeout",
"assistant-ended-call",
"assistant-said-end-call-phrase",
"assistant-ended-call-with-hangup-task",
@@ -48966,7 +61822,9 @@
"call.in-progress.error-transfer-failed",
"customer-busy",
"customer-ended-call",
+ "customer-ended-call-before-warm-transfer",
"customer-ended-call-after-warm-transfer-attempt",
+ "customer-ended-call-during-transfer",
"customer-did-not-answer",
"customer-did-not-give-microphone-permission",
"exceeded-max-duration",
@@ -48977,6 +61835,7 @@
"call.in-progress.error-sip-inbound-call-failed-to-connect",
"call.in-progress.error-providerfault-outbound-sip-403-forbidden",
"call.in-progress.error-providerfault-outbound-sip-407-proxy-authentication-required",
+ "call.in-progress.error-providerfault-outbound-sip-408-request-timeout",
"call.in-progress.error-providerfault-outbound-sip-503-service-unavailable",
"call.in-progress.error-providerfault-outbound-sip-480-temporarily-unavailable",
"call.in-progress.error-sip-outbound-call-failed-to-connect",
@@ -48989,7 +61848,8 @@
"twilio-failed-to-connect-call",
"twilio-reported-customer-misdialed",
"vonage-rejected",
- "voicemail"
+ "voicemail",
+ "call-deleted"
]
},
"messages": {
@@ -49311,10 +62171,435 @@
}
},
"required": [
- "type"
+ "type"
+ ]
+ },
+ "ServerMessageTransferUpdate": {
+ "type": "object",
+ "properties": {
+ "phoneNumber": {
+ "description": "This is the phone number that the message is associated with.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateByoPhoneNumberDTO",
+ "title": "ByoPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTwilioPhoneNumberDTO",
+ "title": "TwilioPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVonagePhoneNumberDTO",
+ "title": "VonagePhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVapiPhoneNumberDTO",
+ "title": "VapiPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTelnyxPhoneNumberDTO",
+ "title": "TelnyxPhoneNumber"
+ }
+ ]
+ },
+ "type": {
+ "type": "string",
+ "description": "This is the type of the message. \"transfer-update\" is sent whenever a transfer happens.",
+ "enum": [
+ "transfer-update"
+ ]
+ },
+ "destination": {
+ "description": "This is the destination of the transfer.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/TransferDestinationAssistant",
+ "title": "Assistant"
+ },
+ {
+ "$ref": "#/components/schemas/TransferDestinationNumber",
+ "title": "Number"
+ },
+ {
+ "$ref": "#/components/schemas/TransferDestinationSip",
+ "title": "Sip"
+ }
+ ]
+ },
+ "timestamp": {
+ "type": "number",
+ "description": "This is the timestamp of the message."
+ },
+ "artifact": {
+ "description": "This is a live version of the `call.artifact`.\n\nThis matches what is stored on `call.artifact` after the call.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Artifact"
+ }
+ ]
+ },
+ "assistant": {
+ "description": "This is the assistant that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateAssistantDTO"
+ }
+ ]
+ },
+ "customer": {
+ "description": "This is the customer that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateCustomerDTO"
+ }
+ ]
+ },
+ "call": {
+ "description": "This is the call that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Call"
+ }
+ ]
+ },
+ "chat": {
+ "description": "This is the chat object.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Chat"
+ }
+ ]
+ },
+ "toAssistant": {
+ "description": "This is the assistant that the call is being transferred to. This is only sent if `destination.type` is \"assistant\".",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateAssistantDTO"
+ }
+ ]
+ },
+ "fromAssistant": {
+ "description": "This is the assistant that the call is being transferred from. This is only sent if `destination.type` is \"assistant\".",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateAssistantDTO"
+ }
+ ]
+ },
+ "toStepRecord": {
+ "type": "object",
+ "description": "This is the step that the conversation moved to."
+ },
+ "fromStepRecord": {
+ "type": "object",
+ "description": "This is the step that the conversation moved from. ="
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "ServerMessageTranscript": {
+ "type": "object",
+ "properties": {
+ "phoneNumber": {
+ "description": "This is the phone number that the message is associated with.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateByoPhoneNumberDTO",
+ "title": "ByoPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTwilioPhoneNumberDTO",
+ "title": "TwilioPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVonagePhoneNumberDTO",
+ "title": "VonagePhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVapiPhoneNumberDTO",
+ "title": "VapiPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTelnyxPhoneNumberDTO",
+ "title": "TelnyxPhoneNumber"
+ }
+ ]
+ },
+ "type": {
+ "type": "string",
+ "description": "This is the type of the message. \"transcript\" is sent as transcriber outputs partial or final transcript.",
+ "enum": [
+ "transcript",
+ "transcript[transcriptType=\"final\"]"
+ ]
+ },
+ "timestamp": {
+ "type": "number",
+ "description": "This is the timestamp of the message."
+ },
+ "artifact": {
+ "description": "This is a live version of the `call.artifact`.\n\nThis matches what is stored on `call.artifact` after the call.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Artifact"
+ }
+ ]
+ },
+ "assistant": {
+ "description": "This is the assistant that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateAssistantDTO"
+ }
+ ]
+ },
+ "customer": {
+ "description": "This is the customer that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateCustomerDTO"
+ }
+ ]
+ },
+ "call": {
+ "description": "This is the call that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Call"
+ }
+ ]
+ },
+ "chat": {
+ "description": "This is the chat object.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Chat"
+ }
+ ]
+ },
+ "role": {
+ "type": "string",
+ "description": "This is the role for which the transcript is for.",
+ "enum": [
+ "assistant",
+ "user"
+ ]
+ },
+ "transcriptType": {
+ "type": "string",
+ "description": "This is the type of the transcript.",
+ "enum": [
+ "partial",
+ "final"
+ ]
+ },
+ "transcript": {
+ "type": "string",
+ "description": "This is the transcript content."
+ },
+ "isFiltered": {
+ "type": "boolean",
+ "description": "Indicates if the transcript was filtered for security reasons."
+ },
+ "detectedThreats": {
+ "description": "List of detected security threats if the transcript was filtered.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "originalTranscript": {
+ "type": "string",
+ "description": "The original transcript before filtering (only included if content was filtered)."
+ }
+ },
+ "required": [
+ "type",
+ "role",
+ "transcriptType",
+ "transcript"
+ ]
+ },
+ "ServerMessageUserInterrupted": {
+ "type": "object",
+ "properties": {
+ "phoneNumber": {
+ "description": "This is the phone number that the message is associated with.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateByoPhoneNumberDTO",
+ "title": "ByoPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTwilioPhoneNumberDTO",
+ "title": "TwilioPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVonagePhoneNumberDTO",
+ "title": "VonagePhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVapiPhoneNumberDTO",
+ "title": "VapiPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTelnyxPhoneNumberDTO",
+ "title": "TelnyxPhoneNumber"
+ }
+ ]
+ },
+ "type": {
+ "type": "string",
+ "description": "This is the type of the message. \"user-interrupted\" is sent when the user interrupts the assistant.",
+ "enum": [
+ "user-interrupted"
+ ]
+ },
+ "turnId": {
+ "type": "string",
+ "description": "This is the turnId of the LLM response that was interrupted. Matches the turnId\non model-output messages so clients can discard the interrupted turn's tokens."
+ },
+ "timestamp": {
+ "type": "number",
+ "description": "This is the timestamp of the message."
+ },
+ "artifact": {
+ "description": "This is a live version of the `call.artifact`.\n\nThis matches what is stored on `call.artifact` after the call.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Artifact"
+ }
+ ]
+ },
+ "assistant": {
+ "description": "This is the assistant that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateAssistantDTO"
+ }
+ ]
+ },
+ "customer": {
+ "description": "This is the customer that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateCustomerDTO"
+ }
+ ]
+ },
+ "call": {
+ "description": "This is the call that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Call"
+ }
+ ]
+ },
+ "chat": {
+ "description": "This is the chat object.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Chat"
+ }
+ ]
+ }
+ },
+ "required": [
+ "type"
+ ]
+ },
+ "ServerMessageLanguageChangeDetected": {
+ "type": "object",
+ "properties": {
+ "phoneNumber": {
+ "description": "This is the phone number that the message is associated with.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/CreateByoPhoneNumberDTO",
+ "title": "ByoPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTwilioPhoneNumberDTO",
+ "title": "TwilioPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVonagePhoneNumberDTO",
+ "title": "VonagePhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateVapiPhoneNumberDTO",
+ "title": "VapiPhoneNumber"
+ },
+ {
+ "$ref": "#/components/schemas/CreateTelnyxPhoneNumberDTO",
+ "title": "TelnyxPhoneNumber"
+ }
+ ]
+ },
+ "type": {
+ "type": "string",
+ "description": "This is the type of the message. \"language-change-detected\" is sent when the transcriber is automatically switched based on the detected language.",
+ "enum": [
+ "language-change-detected"
+ ]
+ },
+ "timestamp": {
+ "type": "number",
+ "description": "This is the timestamp of the message."
+ },
+ "artifact": {
+ "description": "This is a live version of the `call.artifact`.\n\nThis matches what is stored on `call.artifact` after the call.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Artifact"
+ }
+ ]
+ },
+ "assistant": {
+ "description": "This is the assistant that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateAssistantDTO"
+ }
+ ]
+ },
+ "customer": {
+ "description": "This is the customer that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/CreateCustomerDTO"
+ }
+ ]
+ },
+ "call": {
+ "description": "This is the call that the message is associated with.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Call"
+ }
+ ]
+ },
+ "chat": {
+ "description": "This is the chat object.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Chat"
+ }
+ ]
+ },
+ "language": {
+ "type": "string",
+ "description": "This is the language the transcriber is switched to."
+ }
+ },
+ "required": [
+ "type",
+ "language"
]
},
- "ServerMessageTransferUpdate": {
+ "ServerMessageVoiceInput": {
"type": "object",
"properties": {
"phoneNumber": {
@@ -49344,26 +62629,9 @@
},
"type": {
"type": "string",
- "description": "This is the type of the message. \"transfer-update\" is sent whenever a transfer happens.",
+ "description": "This is the type of the message. \"voice-input\" is sent when a generation is requested from voice provider.",
"enum": [
- "transfer-update"
- ]
- },
- "destination": {
- "description": "This is the destination of the transfer.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/TransferDestinationAssistant",
- "title": "Assistant"
- },
- {
- "$ref": "#/components/schemas/TransferDestinationNumber",
- "title": "Number"
- },
- {
- "$ref": "#/components/schemas/TransferDestinationSip",
- "title": "Sip"
- }
+ "voice-input"
]
},
"timestamp": {
@@ -49410,36 +62678,17 @@
}
]
},
- "toAssistant": {
- "description": "This is the assistant that the call is being transferred to. This is only sent if `destination.type` is \"assistant\".",
- "allOf": [
- {
- "$ref": "#/components/schemas/CreateAssistantDTO"
- }
- ]
- },
- "fromAssistant": {
- "description": "This is the assistant that the call is being transferred from. This is only sent if `destination.type` is \"assistant\".",
- "allOf": [
- {
- "$ref": "#/components/schemas/CreateAssistantDTO"
- }
- ]
- },
- "toStepRecord": {
- "type": "object",
- "description": "This is the step that the conversation moved to."
- },
- "fromStepRecord": {
- "type": "object",
- "description": "This is the step that the conversation moved from. ="
+ "input": {
+ "type": "string",
+ "description": "This is the voice input content"
}
},
"required": [
- "type"
+ "type",
+ "input"
]
},
- "ServerMessageTranscript": {
+ "ServerMessageAssistantSpeech": {
"type": "object",
"properties": {
"phoneNumber": {
@@ -49469,12 +62718,44 @@
},
"type": {
"type": "string",
- "description": "This is the type of the message. \"transcript\" is sent as transcriber outputs partial or final transcript.",
+ "description": "This is the type of the message. \"assistant-speech\" is sent as assistant audio is being played.",
"enum": [
- "transcript",
- "transcript[transcriptType=\"final\"]"
+ "assistant.speechStarted"
]
},
+ "text": {
+ "type": "string",
+ "description": "The full assistant text for the current turn. This is the complete text,\nnot an incremental delta — consumers should use `timing` metadata (e.g.\n`wordsSpoken`) to determine which portion has been spoken so far."
+ },
+ "turn": {
+ "type": "number",
+ "description": "This is the turn number of the assistant speech event (0-indexed)."
+ },
+ "source": {
+ "type": "string",
+ "description": "Indicates how the text was sourced.",
+ "enum": [
+ "model",
+ "force-say",
+ "custom-voice"
+ ]
+ },
+ "timing": {
+ "description": "Optional timing metadata. Shape depends on `timing.type`:\n\n- `word-alignment` (ElevenLabs): per-character timing at playback\n cadence. words[] includes space entries. Best consumed by tracking\n a running character count: join timing.words, add to a char cursor,\n and highlight text up to that position. No interpolation needed.\n\n- `word-progress` (Minimax with voice.subtitleType: 'word'): cursor-\n based word count per TTS segment. Use wordsSpoken as the anchor,\n interpolate forward using segmentDurationMs or timing.words until\n the next event arrives.\n\nWhen absent, the event is a text-only fallback for providers without\nword-level timing (e.g. Cartesia, Deepgram, Azure). Text emits once\nper TTS chunk when audio is playing. Optionally interpolate a word\ncursor at ~3.5 words/sec between events for approximate tracking.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/AssistantSpeechWordAlignmentTiming",
+ "title": "WordAlignmentTiming"
+ },
+ {
+ "$ref": "#/components/schemas/AssistantSpeechWordProgressTiming",
+ "title": "WordProgressTiming"
+ }
+ ],
+ "discriminator": {
+ "propertyName": "type"
+ }
+ },
"timestamp": {
"type": "number",
"description": "This is the timestamp of the message."
@@ -49518,51 +62799,14 @@
"$ref": "#/components/schemas/Chat"
}
]
- },
- "role": {
- "type": "string",
- "description": "This is the role for which the transcript is for.",
- "enum": [
- "assistant",
- "user"
- ]
- },
- "transcriptType": {
- "type": "string",
- "description": "This is the type of the transcript.",
- "enum": [
- "partial",
- "final"
- ]
- },
- "transcript": {
- "type": "string",
- "description": "This is the transcript content."
- },
- "isFiltered": {
- "type": "boolean",
- "description": "Indicates if the transcript was filtered for security reasons."
- },
- "detectedThreats": {
- "description": "List of detected security threats if the transcript was filtered.",
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "originalTranscript": {
- "type": "string",
- "description": "The original transcript before filtering (only included if content was filtered)."
}
},
"required": [
"type",
- "role",
- "transcriptType",
- "transcript"
+ "text"
]
},
- "ServerMessageUserInterrupted": {
+ "ServerMessageVoiceRequest": {
"type": "object",
"properties": {
"phoneNumber": {
@@ -49592,9 +62836,9 @@
},
"type": {
"type": "string",
- "description": "This is the type of the message. \"user-interrupted\" is sent when the user interrupts the assistant.",
+ "description": "This is the type of the message. \"voice-request\" is sent when using `assistant.voice={ \"type\": \"custom-voice\" }`.\n\nHere is what the request will look like:\n\nPOST https://{assistant.voice.server.url}\nContent-Type: application/json\n\n{\n \"messsage\": {\n \"type\": \"voice-request\",\n \"text\": \"Hello, world!\",\n \"sampleRate\": 24000,\n ...other metadata about the call...\n }\n}\n\nThe expected response is 1-channel 16-bit raw PCM audio at the sample rate specified in the request. Here is how the response will be piped to the transport:\n```\nresponse.on('data', (chunk: Buffer) => {\n outputStream.write(chunk);\n});\n```",
"enum": [
- "user-interrupted"
+ "voice-request"
]
},
"timestamp": {
@@ -49640,13 +62884,23 @@
"$ref": "#/components/schemas/Chat"
}
]
+ },
+ "text": {
+ "type": "string",
+ "description": "This is the text to be synthesized."
+ },
+ "sampleRate": {
+ "type": "number",
+ "description": "This is the sample rate to be synthesized."
}
},
"required": [
- "type"
+ "type",
+ "text",
+ "sampleRate"
]
},
- "ServerMessageLanguageChangeDetected": {
+ "ServerMessageCallEndpointingRequest": {
"type": "object",
"properties": {
"phoneNumber": {
@@ -49676,11 +62930,46 @@
},
"type": {
"type": "string",
- "description": "This is the type of the message. \"language-change-detected\" is sent when the transcriber is automatically switched based on the detected language.",
+ "description": "This is the type of the message. \"call.endpointing.request\" is sent when using `assistant.startSpeakingPlan.smartEndpointingPlan={ \"provider\": \"custom-endpointing-model\" }`.\n\nHere is what the request will look like:\n\nPOST https://{assistant.startSpeakingPlan.smartEndpointingPlan.server.url}\nContent-Type: application/json\n\n{\n \"message\": {\n \"type\": \"call.endpointing.request\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"message\": \"Hello, how are you?\",\n \"time\": 1234567890,\n \"secondsFromStart\": 0\n }\n ],\n ...other metadata about the call...\n }\n}\n\nThe expected response:\n{\n \"timeoutSeconds\": 0.5\n}",
"enum": [
- "language-change-detected"
+ "call.endpointing.request"
]
},
+ "messages": {
+ "type": "array",
+ "description": "This is the conversation history at the time of the endpointing request.",
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/UserMessage",
+ "title": "UserMessage"
+ },
+ {
+ "$ref": "#/components/schemas/SystemMessage",
+ "title": "SystemMessage"
+ },
+ {
+ "$ref": "#/components/schemas/BotMessage",
+ "title": "BotMessage"
+ },
+ {
+ "$ref": "#/components/schemas/ToolCallMessage",
+ "title": "ToolCallMessage"
+ },
+ {
+ "$ref": "#/components/schemas/ToolCallResultMessage",
+ "title": "ToolCallResultMessage"
+ }
+ ]
+ }
+ },
+ "messagesOpenAIFormatted": {
+ "description": "This is just `messages` formatted for OpenAI.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/OpenAIMessage"
+ }
+ },
"timestamp": {
"type": "number",
"description": "This is the timestamp of the message."
@@ -49724,18 +63013,14 @@
"$ref": "#/components/schemas/Chat"
}
]
- },
- "language": {
- "type": "string",
- "description": "This is the language the transcriber is switched to."
}
},
"required": [
"type",
- "language"
+ "messagesOpenAIFormatted"
]
},
- "ServerMessageVoiceInput": {
+ "ServerMessageChatCreated": {
"type": "object",
"properties": {
"phoneNumber": {
@@ -49765,9 +63050,9 @@
},
"type": {
"type": "string",
- "description": "This is the type of the message. \"voice-input\" is sent when a generation is requested from voice provider.",
+ "description": "This is the type of the message. \"chat.created\" is sent when a new chat is created.",
"enum": [
- "voice-input"
+ "chat.created"
]
},
"timestamp": {
@@ -49807,24 +63092,20 @@
]
},
"chat": {
- "description": "This is the chat object.",
+ "description": "This is the chat that was created.",
"allOf": [
{
"$ref": "#/components/schemas/Chat"
}
]
- },
- "input": {
- "type": "string",
- "description": "This is the voice input content"
}
},
"required": [
"type",
- "input"
+ "chat"
]
},
- "ServerMessageVoiceRequest": {
+ "ServerMessageChatDeleted": {
"type": "object",
"properties": {
"phoneNumber": {
@@ -49854,9 +63135,9 @@
},
"type": {
"type": "string",
- "description": "This is the type of the message. \"voice-request\" is sent when using `assistant.voice={ \"type\": \"custom-voice\" }`.\n\nHere is what the request will look like:\n\nPOST https://{assistant.voice.server.url}\nContent-Type: application/json\n\n{\n \"messsage\": {\n \"type\": \"voice-request\",\n \"text\": \"Hello, world!\",\n \"sampleRate\": 24000,\n ...other metadata about the call...\n }\n}\n\nThe expected response is 1-channel 16-bit raw PCM audio at the sample rate specified in the request. Here is how the response will be piped to the transport:\n```\nresponse.on('data', (chunk: Buffer) => {\n outputStream.write(chunk);\n});\n```",
+ "description": "This is the type of the message. \"chat.deleted\" is sent when a chat is deleted.",
"enum": [
- "voice-request"
+ "chat.deleted"
]
},
"timestamp": {
@@ -49896,29 +63177,20 @@
]
},
"chat": {
- "description": "This is the chat object.",
+ "description": "This is the chat that was deleted.",
"allOf": [
{
"$ref": "#/components/schemas/Chat"
}
]
- },
- "text": {
- "type": "string",
- "description": "This is the text to be synthesized."
- },
- "sampleRate": {
- "type": "number",
- "description": "This is the sample rate to be synthesized."
}
},
"required": [
"type",
- "text",
- "sampleRate"
+ "chat"
]
},
- "ServerMessageCallEndpointingRequest": {
+ "ServerMessageSessionCreated": {
"type": "object",
"properties": {
"phoneNumber": {
@@ -49948,46 +63220,11 @@
},
"type": {
"type": "string",
- "description": "This is the type of the message. \"call.endpointing.request\" is sent when using `assistant.startSpeakingPlan.smartEndpointingPlan={ \"provider\": \"custom-endpointing-model\" }`.\n\nHere is what the request will look like:\n\nPOST https://{assistant.startSpeakingPlan.smartEndpointingPlan.server.url}\nContent-Type: application/json\n\n{\n \"message\": {\n \"type\": \"call.endpointing.request\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"message\": \"Hello, how are you?\",\n \"time\": 1234567890,\n \"secondsFromStart\": 0\n }\n ],\n ...other metadata about the call...\n }\n}\n\nThe expected response:\n{\n \"timeoutSeconds\": 0.5\n}",
+ "description": "This is the type of the message. \"session.created\" is sent when a new session is created.",
"enum": [
- "call.endpointing.request"
+ "session.created"
]
},
- "messages": {
- "type": "array",
- "description": "This is the conversation history at the time of the endpointing request.",
- "items": {
- "oneOf": [
- {
- "$ref": "#/components/schemas/UserMessage",
- "title": "UserMessage"
- },
- {
- "$ref": "#/components/schemas/SystemMessage",
- "title": "SystemMessage"
- },
- {
- "$ref": "#/components/schemas/BotMessage",
- "title": "BotMessage"
- },
- {
- "$ref": "#/components/schemas/ToolCallMessage",
- "title": "ToolCallMessage"
- },
- {
- "$ref": "#/components/schemas/ToolCallResultMessage",
- "title": "ToolCallResultMessage"
- }
- ]
- }
- },
- "messagesOpenAIFormatted": {
- "description": "This is just `messages` formatted for OpenAI.",
- "type": "array",
- "items": {
- "$ref": "#/components/schemas/OpenAIMessage"
- }
- },
"timestamp": {
"type": "number",
"description": "This is the timestamp of the message."
@@ -50031,99 +63268,22 @@
"$ref": "#/components/schemas/Chat"
}
]
- }
- },
- "required": [
- "type",
- "messagesOpenAIFormatted"
- ]
- },
- "ServerMessageChatCreated": {
- "type": "object",
- "properties": {
- "phoneNumber": {
- "description": "This is the phone number that the message is associated with.",
- "oneOf": [
- {
- "$ref": "#/components/schemas/CreateByoPhoneNumberDTO",
- "title": "ByoPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/CreateTwilioPhoneNumberDTO",
- "title": "TwilioPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/CreateVonagePhoneNumberDTO",
- "title": "VonagePhoneNumber"
- },
- {
- "$ref": "#/components/schemas/CreateVapiPhoneNumberDTO",
- "title": "VapiPhoneNumber"
- },
- {
- "$ref": "#/components/schemas/CreateTelnyxPhoneNumberDTO",
- "title": "TelnyxPhoneNumber"
- }
- ]
},
- "type": {
- "type": "string",
- "description": "This is the type of the message. \"chat.created\" is sent when a new chat is created.",
- "enum": [
- "chat.created"
- ]
- },
- "timestamp": {
- "type": "number",
- "description": "This is the timestamp of the message."
- },
- "artifact": {
- "description": "This is a live version of the `call.artifact`.\n\nThis matches what is stored on `call.artifact` after the call.",
- "allOf": [
- {
- "$ref": "#/components/schemas/Artifact"
- }
- ]
- },
- "assistant": {
- "description": "This is the assistant that the message is associated with.",
- "allOf": [
- {
- "$ref": "#/components/schemas/CreateAssistantDTO"
- }
- ]
- },
- "customer": {
- "description": "This is the customer that the message is associated with.",
- "allOf": [
- {
- "$ref": "#/components/schemas/CreateCustomerDTO"
- }
- ]
- },
- "call": {
- "description": "This is the call that the message is associated with.",
- "allOf": [
- {
- "$ref": "#/components/schemas/Call"
- }
- ]
- },
- "chat": {
- "description": "This is the chat that was created.",
+ "session": {
+ "description": "This is the session that was created.",
"allOf": [
{
- "$ref": "#/components/schemas/Chat"
+ "$ref": "#/components/schemas/Session"
}
]
}
},
"required": [
"type",
- "chat"
+ "session"
]
},
- "ServerMessageChatDeleted": {
+ "ServerMessageSessionUpdated": {
"type": "object",
"properties": {
"phoneNumber": {
@@ -50153,9 +63313,9 @@
},
"type": {
"type": "string",
- "description": "This is the type of the message. \"chat.deleted\" is sent when a chat is deleted.",
+ "description": "This is the type of the message. \"session.updated\" is sent when a session is updated.",
"enum": [
- "chat.deleted"
+ "session.updated"
]
},
"timestamp": {
@@ -50195,20 +63355,28 @@
]
},
"chat": {
- "description": "This is the chat that was deleted.",
+ "description": "This is the chat object.",
"allOf": [
{
"$ref": "#/components/schemas/Chat"
}
]
+ },
+ "session": {
+ "description": "This is the session that was updated.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/Session"
+ }
+ ]
}
},
"required": [
"type",
- "chat"
+ "session"
]
},
- "ServerMessageSessionCreated": {
+ "ServerMessageSessionDeleted": {
"type": "object",
"properties": {
"phoneNumber": {
@@ -50238,9 +63406,9 @@
},
"type": {
"type": "string",
- "description": "This is the type of the message. \"session.created\" is sent when a new session is created.",
+ "description": "This is the type of the message. \"session.deleted\" is sent when a session is deleted.",
"enum": [
- "session.created"
+ "session.deleted"
]
},
"timestamp": {
@@ -50288,7 +63456,7 @@
]
},
"session": {
- "description": "This is the session that was created.",
+ "description": "This is the session that was deleted.",
"allOf": [
{
"$ref": "#/components/schemas/Session"
@@ -50301,7 +63469,7 @@
"session"
]
},
- "ServerMessageSessionUpdated": {
+ "ServerMessageCallDeleted": {
"type": "object",
"properties": {
"phoneNumber": {
@@ -50331,9 +63499,9 @@
},
"type": {
"type": "string",
- "description": "This is the type of the message. \"session.updated\" is sent when a session is updated.",
+ "description": "This is the type of the message. \"call.deleted\" is sent when a call is deleted.",
"enum": [
- "session.updated"
+ "call.deleted"
]
},
"timestamp": {
@@ -50379,22 +63547,13 @@
"$ref": "#/components/schemas/Chat"
}
]
- },
- "session": {
- "description": "This is the session that was updated.",
- "allOf": [
- {
- "$ref": "#/components/schemas/Session"
- }
- ]
}
},
"required": [
- "type",
- "session"
+ "type"
]
},
- "ServerMessageSessionDeleted": {
+ "ServerMessageCallDeleteFailed": {
"type": "object",
"properties": {
"phoneNumber": {
@@ -50424,9 +63583,9 @@
},
"type": {
"type": "string",
- "description": "This is the type of the message. \"session.deleted\" is sent when a session is deleted.",
+ "description": "This is the type of the message. \"call.deleted\" is sent when a call is deleted.",
"enum": [
- "session.deleted"
+ "call.delete.failed"
]
},
"timestamp": {
@@ -50472,19 +63631,10 @@
"$ref": "#/components/schemas/Chat"
}
]
- },
- "session": {
- "description": "This is the session that was deleted.",
- "allOf": [
- {
- "$ref": "#/components/schemas/Session"
- }
- ]
}
},
"required": [
- "type",
- "session"
+ "type"
]
},
"ServerMessage": {
@@ -50505,6 +63655,10 @@
"$ref": "#/components/schemas/ServerMessageEndOfCallReport",
"title": "EndOfCallReport"
},
+ {
+ "$ref": "#/components/schemas/ServerMessageHandoffDestinationRequest",
+ "title": "HandoffDestinationRequest"
+ },
{
"$ref": "#/components/schemas/ServerMessageHang",
"title": "Hang"
@@ -50557,6 +63711,10 @@
"$ref": "#/components/schemas/ServerMessageVoiceInput",
"title": "VoiceInput"
},
+ {
+ "$ref": "#/components/schemas/ServerMessageAssistantSpeech",
+ "title": "AssistantSpeech"
+ },
{
"$ref": "#/components/schemas/ServerMessageVoiceRequest",
"title": "VoiceRequest"
@@ -50584,6 +63742,14 @@
{
"$ref": "#/components/schemas/ServerMessageSessionDeleted",
"title": "SessionDeleted"
+ },
+ {
+ "$ref": "#/components/schemas/ServerMessageCallDeleted",
+ "title": "CallDeleted"
+ },
+ {
+ "$ref": "#/components/schemas/ServerMessageCallDeleteFailed",
+ "title": "CallDeleteFailed"
}
]
}
@@ -50640,6 +63806,14 @@
}
]
},
+ "squadOverrides": {
+ "description": "These are the overrides for the `squad` or `squadId`'s member settings and template variables.\nThis will apply to all members of the squad.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/AssistantOverrides"
+ }
+ ]
+ },
"workflowId": {
"type": "string",
"description": "This is the workflow that will be used for the call. To use a transient workflow, use `workflow` instead.\n\nTo start a call with:\n- Assistant, use `assistant` or `assistantId`\n- Squad, use `squad` or `squadId`\n- Workflow, use `workflow` or `workflowId`"
@@ -50669,13 +63843,17 @@
"ServerMessageResponseHandoffDestinationRequest": {
"type": "object",
"properties": {
+ "result": {
+ "type": "string",
+ "description": "This is the local tool result message returned for the handoff tool call."
+ },
"destination": {
- "description": "This is the destination you'd like the call to be transferred to.",
- "allOf": [
- {
- "$ref": "#/components/schemas/HandoffDestinationAssistant"
- }
- ]
+ "type": "object",
+ "description": "This is the destination you'd like the call to be transferred to."
+ },
+ "error": {
+ "type": "string",
+ "description": "This is the error message if the handoff should not be made."
}
},
"required": [
@@ -50929,7 +64107,7 @@
"properties": {
"type": {
"type": "string",
- "description": "This is the type of the message. Send \"control\" message to control the assistant. `control` options are:\n- \"mute-assistant\" - mute the assistant\n- \"unmute-assistant\" - unmute the assistant\n- \"say-first-message\" - say the first message (this is used when video recording is enabled and the conversation is only started once the client side kicks off the recording)",
+ "description": "This is the type of the message. Send \"control\" message to control the assistant. `control` options are:\n- \"mute-assistant\" - mute the assistant\n- \"unmute-assistant\" - unmute the assistant\n- \"mute-customer\" - mute the user\n- \"unmute-customer\" - unmute the user\n- \"say-first-message\" - say the first message (this is used when video recording is enabled and the conversation is only started once the client side kicks off the recording)",
"enum": [
"control"
]
@@ -50940,6 +64118,8 @@
"enum": [
"mute-assistant",
"unmute-assistant",
+ "mute-customer",
+ "unmute-customer",
"say-first-message"
]
}
@@ -51025,6 +64205,35 @@
"type"
]
},
+ "ClientInboundMessageSendTransportMessage": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "This is the type of the message. Send \"send-transport-message\" to send a transport-specific message during the call.",
+ "enum": [
+ "send-transport-message"
+ ]
+ },
+ "message": {
+ "description": "This is the transport-specific message to send.",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/VapiSipTransportMessage",
+ "title": "VapiSipTransportMessage"
+ },
+ {
+ "$ref": "#/components/schemas/TwilioTransportMessage",
+ "title": "TwilioTransportMessage"
+ }
+ ]
+ }
+ },
+ "required": [
+ "type",
+ "message"
+ ]
+ },
"ClientInboundMessage": {
"type": "object",
"properties": {
@@ -51050,6 +64259,10 @@
{
"$ref": "#/components/schemas/ClientInboundMessageTransfer",
"title": "Transfer"
+ },
+ {
+ "$ref": "#/components/schemas/ClientInboundMessageSendTransportMessage",
+ "title": "SendTransportMessage"
}
]
}
@@ -51187,9 +64400,12 @@
"provider": {
"type": "string",
"enum": [
+ "daily",
+ "vapi.websocket",
"twilio",
"vonage",
- "vapi"
+ "telnyx",
+ "vapi.sip"
]
},
"minutes": {
@@ -51259,6 +64475,10 @@
"type": "number",
"description": "This is the number of completion tokens generated in the call. These should be total completion tokens used in the call for single assistant calls, while squad calls will have multiple model costs one for each assistant that was used."
},
+ "cachedPromptTokens": {
+ "type": "number",
+ "description": "This is the number of cached prompt tokens used in the call. This is only applicable to certain providers (e.g., OpenAI, Azure OpenAI) that support prompt caching. Cached tokens are billed at a discounted rate."
+ },
"cost": {
"type": "number",
"description": "This is the cost of the component in USD."
@@ -51368,6 +64588,10 @@
"type": "number",
"description": "This is the number of completion tokens generated in the analysis."
},
+ "cachedPromptTokens": {
+ "type": "number",
+ "description": "This is the number of cached prompt tokens used in the analysis. This is only applicable to certain providers (e.g., OpenAI, Azure OpenAI) that support prompt caching. Cached tokens are billed at a discounted rate."
+ },
"cost": {
"type": "number",
"description": "This is the cost of the component in USD."
@@ -51493,6 +64717,26 @@
"cost"
]
},
+ "SessionCost": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "This is the type of cost, always 'session' for this class.",
+ "enum": [
+ "session"
+ ]
+ },
+ "cost": {
+ "type": "number",
+ "description": "This is the cost of the component in USD."
+ }
+ },
+ "required": [
+ "type",
+ "cost"
+ ]
+ },
"FunctionToolWithToolCall": {
"type": "object",
"properties": {
@@ -51540,6 +64784,21 @@
}
]
},
+ "variableExtractionPlan": {
+ "description": "Plan to extract variables from the tool response",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/VariableExtractionPlan"
+ }
+ ]
+ },
+ "parameters": {
+ "description": "Static key-value pairs merged into the request body. Values support Liquid templates.",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/ToolParameter"
+ }
+ },
"toolCall": {
"$ref": "#/components/schemas/ToolCall"
},
@@ -52222,6 +65481,59 @@
"type",
"toolCall"
]
+ },
+ "VapiSipTransportMessage": {
+ "type": "object",
+ "properties": {
+ "transport": {
+ "type": "string",
+ "description": "This is the transport type.",
+ "enum": [
+ "vapi.sip"
+ ]
+ },
+ "sipVerb": {
+ "type": "string",
+ "description": "This is the SIP verb to use. Must be one of INFO, MESSAGE, or NOTIFY.",
+ "enum": [
+ "INFO",
+ "MESSAGE",
+ "NOTIFY"
+ ]
+ },
+ "headers": {
+ "type": "object",
+ "description": "These are the headers to include with the SIP request."
+ },
+ "body": {
+ "type": "string",
+ "description": "This is the body of the SIP request, if any."
+ }
+ },
+ "required": [
+ "transport",
+ "sipVerb"
+ ]
+ },
+ "TwilioTransportMessage": {
+ "type": "object",
+ "properties": {
+ "transport": {
+ "type": "string",
+ "description": "This is the transport type.",
+ "enum": [
+ "twilio"
+ ]
+ },
+ "twiml": {
+ "type": "string",
+ "description": "This is the TwiML to send to the Twilio call."
+ }
+ },
+ "required": [
+ "transport",
+ "twiml"
+ ]
}
}
}
diff --git a/fern/apis/webhooks/generators.yml b/fern/apis/webhooks/generators.yml
index 3741feeea..66ae0f0d6 100644
--- a/fern/apis/webhooks/generators.yml
+++ b/fern/apis/webhooks/generators.yml
@@ -1,4 +1,6 @@
api:
- specs:
+ specs:
- openapi: ./openapi.yml
overrides: ./openapi-overrides.yml
+ settings:
+ coerce-enums-to-literals: true
diff --git a/fern/apis/webhooks/openapi-overrides.yml b/fern/apis/webhooks/openapi-overrides.yml
index 8f9aca57c..047e8aed6 100644
--- a/fern/apis/webhooks/openapi-overrides.yml
+++ b/fern/apis/webhooks/openapi-overrides.yml
@@ -246,6 +246,14 @@ components:
name: Asterisk
"#":
name: Hash
+ items:
+ x-fern-enum:
+ "":
+ name: Empty
+ "*":
+ name: Asterisk
+ "#":
+ name: Hash
TransferDestinationAssistant:
properties:
transferMode:
diff --git a/fern/assets/styles.css b/fern/assets/styles.css
index 9cf81a30c..8f1df1b39 100644
--- a/fern/assets/styles.css
+++ b/fern/assets/styles.css
@@ -31,6 +31,21 @@
border: 1px solid #C7D2FE;
}
+/* Optional: Squad badge (reuse assistant palette for now) */
+.vapi-badge-squad {
+ background-color: #EEF2FF;
+ color: #4338CA;
+ border: 1px solid #C7D2FE;
+}
+
+/* Alpha badge - purple to match dashboard */
+.vapi-badge-alpha {
+ background-color: rgba(168, 85, 247, 0.2) !important;
+ color: #A78BFA !important;
+ border: 1px solid rgba(168, 85, 247, 0.4) !important;
+ border-color: rgba(168, 85, 247, 0.4) !important;
+}
+
/* Dark mode adjustments */
:is(.dark) .vapi-badge-assistant {
background-color: #134E4A;
@@ -44,6 +59,48 @@
border: 1px solid #6366F1;
}
+:is(.dark) .vapi-badge-squad {
+ background-color: #312E81;
+ color: #C7D2FE;
+ border: 1px solid #6366F1;
+}
+
+:is(.dark) .vapi-badge-alpha {
+ background-color: rgba(168, 85, 247, 0.2) !important;
+ color: #C4B5FD !important;
+ border: 1px solid rgba(168, 85, 247, 0.4) !important;
+ border-color: rgba(168, 85, 247, 0.4) !important;
+}
+
+/* Override Fern's pre-release availability badge to show "Alpha" with purple styling */
+.fern-docs-badge[title="Pre-release"] {
+ background-color: rgba(168, 85, 247, 0.2) !important;
+ border-color: rgba(168, 85, 247, 0.4) !important;
+ font-size: 0 !important;
+ position: relative !important;
+ min-width: 52px !important;
+ min-height: 24px !important;
+ display: inline-flex !important;
+}
+
+.fern-docs-badge[title="Pre-release"]::after {
+ content: "Alpha";
+ position: absolute;
+ top: 50%;
+ left: 50%;
+ transform: translate(-50%, -50%);
+ color: #A78BFA;
+ font-size: 0.75rem;
+ line-height: 1;
+ white-space: nowrap;
+}
+
+:is(.dark) .fern-docs-badge[title="Pre-release"] {
+ background-color: rgba(168, 85, 247, 0.2) !important;
+ color: #C4B5FD !important;
+ border-color: rgba(168, 85, 247, 0.4) !important;
+}
+
/* for a grid of videos */
.video-grid {
@@ -221,3 +278,8 @@ html.dark button[data-highlighted] .fern-api-property-meta {
.light .fern-theme-default.fern-container {
background-color: #fff !important;
}
+
+/* Fix: Make subtitle white on Simulations pages in dark mode */
+:is(.dark) [id*="simulations"] .prose-p\:text-\(color\:--grayscale-a11\) :where(p):not(:where([class~=not-prose],[class~=not-prose] *)) {
+ color: var(--grayscale-12) !important;
+}
\ No newline at end of file
diff --git a/fern/assistants/assistant-hooks.mdx b/fern/assistants/assistant-hooks.mdx
index 54760f4e1..cc31e8ce5 100644
--- a/fern/assistants/assistant-hooks.mdx
+++ b/fern/assistants/assistant-hooks.mdx
@@ -11,9 +11,11 @@ Assistant hooks let you automate actions when specific events occur during a cal
Supported events include:
- `call.ending`: When a call is ending
+- `call.timeElapsed`: When a specified number of seconds has elapsed from call start
- `assistant.speech.interrupted`: When the assistant's speech is interrupted
- `customer.speech.interrupted`: When the customer's speech is interrupted
- `customer.speech.timeout`: When the customer doesn't speak within a specified time
+- `assistant.transcriber.endpointedSpeechLowConfidence`: When a final transcript has low confidence (below threshold but within configurable range)
You can combine actions and add filters to control when hooks trigger. Multiple `customer.speech.timeout` hooks can be attached to an assistant with staggered trigger delay to support different actions at different timing in the conversation.
@@ -24,10 +26,11 @@ Hooks are defined in the `hooks` array of your assistant configuration. Each hoo
- `on`: The event that triggers the hook
- `do`: The actions to perform (supports `tool` and `say`)
- `filters`: (Optional) Conditions that must be met for the hook to trigger
-- `options`: (Optional) Configuration options for certain hook types like `customer.speech.timeout`
+- `options`: (Optional) Configuration options for certain hook types like `call.timeElapsed`, `customer.speech.timeout`, and `assistant.transcriber.endpointedSpeechLowConfidence`
- `name`: (Optional) Custom name to identify the hook
**Action Types:**
+
- `say`: Speak a message. Use `exact` for predetermined text or `prompt` for AI-generated responses
- `tool`: Execute a tool like `transferCall`, `function`, `endCall`, etc.
@@ -42,25 +45,33 @@ Transfer a call to a fallback number if a pipeline error occurs:
```json
{
- "hooks": [{
- "on": "call.ending",
- "filters": [{
- "type": "oneOf",
- "key": "call.endedReason",
- "oneOf": ["pipeline-error"]
- }],
- "do": [{
- "type": "tool",
- "tool": {
- "type": "transferCall",
- "destinations": [{
- "type": "number",
- "number": "+1234567890",
- "callerId": "+1987654321"
- }]
- }
- }]
- }]
+ "hooks": [
+ {
+ "on": "call.ending",
+ "filters": [
+ {
+ "type": "oneOf",
+ "key": "call.endedReason",
+ "oneOf": ["pipeline-error"]
+ }
+ ],
+ "do": [
+ {
+ "type": "tool",
+ "tool": {
+ "type": "transferCall",
+ "destinations": [
+ {
+ "type": "number",
+ "number": "+1234567890",
+ "callerId": "+1987654321"
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
}
```
@@ -68,24 +79,32 @@ You can also transfer to a SIP destination:
```json
{
- "hooks": [{
- "on": "call.ending",
- "filters": [{
- "type": "oneOf",
- "key": "call.endedReason",
- "oneOf": ["pipeline-error"]
- }],
- "do": [{
- "type": "tool",
- "tool": {
- "type": "transferCall",
- "destinations": [{
- "type": "sip",
- "sipUri": "sip:user@domain.com"
- }]
- }
- }]
- }]
+ "hooks": [
+ {
+ "on": "call.ending",
+ "filters": [
+ {
+ "type": "oneOf",
+ "key": "call.endedReason",
+ "oneOf": ["pipeline-error"]
+ }
+ ],
+ "do": [
+ {
+ "type": "tool",
+ "tool": {
+ "type": "transferCall",
+ "destinations": [
+ {
+ "type": "sip",
+ "sipUri": "sip:user@domain.com"
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
}
```
@@ -95,57 +114,63 @@ Perform multiple actions—say a message, call a function, and transfer the call
```json
{
- "hooks": [{
- "on": "call.ending",
- "filters": [{
- "type": "oneOf",
- "key": "call.endedReason",
- "oneOf": ["pipeline-error"]
- }],
- "do": [
- {
- "type": "say",
- "exact": "I apologize for the technical difficulty. Let me transfer you to our support team."
- },
- {
- "type": "tool",
- "tool": {
- "type": "function",
- "function": {
- "name": "log_error",
- "parameters": {
- "type": "object",
- "properties": {
- "error_type": {
- "type": "string",
- "value": "pipeline_error"
+ "hooks": [
+ {
+ "on": "call.ending",
+ "filters": [
+ {
+ "type": "oneOf",
+ "key": "call.endedReason",
+ "oneOf": ["pipeline-error"]
+ }
+ ],
+ "do": [
+ {
+ "type": "say",
+ "exact": "I apologize for the technical difficulty. Let me transfer you to our support team."
+ },
+ {
+ "type": "tool",
+ "tool": {
+ "type": "function",
+ "function": {
+ "name": "log_error",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "error_type": {
+ "type": "string",
+ "value": "pipeline_error"
+ }
}
- }
+ },
+ "description": "Logs the error details for monitoring"
},
- "description": "Logs the error details for monitoring"
- },
- "async": true,
- "server": {
- "url": "https://your-server.com/api"
+ "async": true,
+ "server": {
+ "url": "https://your-server.com/api"
+ }
+ }
+ },
+ {
+ "type": "tool",
+ "tool": {
+ "type": "transferCall",
+ "destinations": [
+ {
+ "type": "number",
+ "number": "+1234567890",
+ "callerId": "+1987654321"
+ }
+ ]
}
}
- },
- {
- "type": "tool",
- "tool": {
- "type": "transferCall",
- "destinations": [{
- "type": "number",
- "number": "+1234567890",
- "callerId": "+1987654321"
- }]
- }
- }
- ]
- }]
+ ]
+ }
+ ]
}
```
-
+
Use `"oneOf": ["pipeline-error"]` as a catch-all filter for any pipeline-related error reason.
@@ -156,13 +181,17 @@ Respond when the assistant's speech is interrupted by the customer:
```json
{
- "hooks": [{
- "on": "assistant.speech.interrupted",
- "do": [{
- "type": "say",
- "exact": ["Sorry about that", "Go ahead", "Please continue"]
- }]
- }]
+ "hooks": [
+ {
+ "on": "assistant.speech.interrupted",
+ "do": [
+ {
+ "type": "say",
+ "exact": ["Sorry about that", "Go ahead", "Please continue"]
+ }
+ ]
+ }
+ ]
}
```
@@ -170,13 +199,17 @@ Handle customer speech interruptions in a similar way:
```json
{
- "hooks": [{
- "on": "customer.speech.interrupted",
- "do": [{
- "type": "say",
- "exact": "I apologize for interrupting. Please continue."
- }]
- }]
+ "hooks": [
+ {
+ "on": "customer.speech.interrupted",
+ "do": [
+ {
+ "type": "say",
+ "exact": "I apologize for interrupting. Please continue."
+ }
+ ]
+ }
+ ]
}
```
@@ -186,19 +219,23 @@ Respond when the customer doesn't speak within a specified time:
```json
{
- "hooks": [{
- "on": "customer.speech.timeout",
- "options": {
- "timeoutSeconds": 10,
- "triggerMaxCount": 2,
- "triggerResetMode": "onUserSpeech"
- },
- "do": [{
- "type": "say",
- "prompt": "Are you still there? Please let me know how I can help you."
- }],
- "name": "customer_timeout_check"
- }]
+ "hooks": [
+ {
+ "on": "customer.speech.timeout",
+ "options": {
+ "timeoutSeconds": 10,
+ "triggerMaxCount": 2,
+ "triggerResetMode": "onUserSpeech"
+ },
+ "do": [
+ {
+ "type": "say",
+ "prompt": "Are you still there? Please let me know how I can help you."
+ }
+ ],
+ "name": "customer_timeout_check"
+ }
+ ]
}
```
@@ -209,9 +246,283 @@ The `customer.speech.timeout` hook supports special options:
- `triggerResetMode`: Whether to reset the trigger count when user speaks (default: "never")
+## Example: Trigger actions at a specific time
+
+The `call.timeElapsed` hook fires once when a specified number of seconds has elapsed from call start. Use it to enforce call duration limits, prompt wrap-up behavior, or trigger time-based actions.
+
+Each `call.timeElapsed` hook fires **once** at the specified time. To trigger actions at multiple time points, add separate hooks with different `seconds` values.
+
+### Basic usage
+
+Speak a message 5 minutes into the call:
+
+```json
+{
+ "hooks": [
+ {
+ "on": "call.timeElapsed",
+ "options": {
+ "seconds": 300
+ },
+ "do": [
+ {
+ "type": "say",
+ "exact": "Just a heads up, we've been on the call for 5 minutes."
+ }
+ ]
+ }
+ ]
+}
+```
+
+### Call discipline (wrap-up and graceful close)
+
+Combine multiple `call.timeElapsed` hooks with `maxDurationSeconds` to enforce structured call discipline. This example begins wrapping up at 8 minutes, warns at 9 minutes, and hard-cuts at 10 minutes:
+
+```json
+{
+ "maxDurationSeconds": 600,
+ "hooks": [
+ {
+ "on": "call.timeElapsed",
+ "options": {
+ "seconds": 480
+ },
+ "do": [
+ {
+ "type": "say",
+ "exact": "We're approaching our time limit. Let's start wrapping up."
+ }
+ ]
+ },
+ {
+ "on": "call.timeElapsed",
+ "options": {
+ "seconds": 540
+ },
+ "do": [
+ {
+ "type": "say",
+ "exact": "We have about one minute left. Let me know if there's anything else urgent."
+ }
+ ]
+ },
+ {
+ "on": "call.timeElapsed",
+ "options": {
+ "seconds": 590
+ },
+ "do": [
+ {
+ "type": "say",
+ "exact": "Thank you for your time. I need to end the call now. Goodbye."
+ },
+ {
+ "type": "tool",
+ "tool": {
+ "type": "endCall"
+ }
+ }
+ ]
+ }
+ ]
+}
+```
+
+
+The `call.timeElapsed` hook supports one option:
+- `seconds`: Number of seconds from call start when the hook should trigger (1-3600)
+
+The hook fires once at the specified time. `maxDurationSeconds` (default: 600 seconds) acts as a hard cutoff that ends the call immediately. Use `call.timeElapsed` hooks before that limit to allow for a graceful close.
+
+
+### Inject a system message to guide the LLM
+
+Instead of speaking a fixed message, you can inject a system message into the conversation to change the LLM's behavior for the remainder of the call:
+
+```json
+{
+ "hooks": [
+ {
+ "on": "call.timeElapsed",
+ "options": {
+ "seconds": 480
+ },
+ "do": [
+ {
+ "type": "message.add",
+ "message": {
+ "role": "system",
+ "content": "The call has been going on for 8 minutes. Begin wrapping up the conversation. Summarize any action items and ask if there is anything else before ending the call."
+ }
+ }
+ ]
+ }
+ ]
+}
+```
+
+## Example: Handle low confidence transcripts
+
+When a transcriber produces a final transcript with low confidence (below the set confidence threshold or default of 0.4), it's normally discarded. The `assistant.transcriber.endpointedSpeechLowConfidence` hook allows you to handle these borderline cases by triggering actions like asking the user to repeat or logging the event.
+
+This hook only triggers for **final/endpointed transcripts** that fall within a configurable confidence range. Transcripts with confidence at or above the threshold are processed normally, while those below the minimum range are still discarded.
+
+### Basic usage
+
+Ask the user to repeat when a transcript has low confidence:
+
+```json
+{
+ "hooks": [
+ {
+ "on": "assistant.transcriber.endpointedSpeechLowConfidence",
+ "do": [
+ {
+ "type": "say",
+ "exact": "I'm sorry, I didn't quite catch that. Could you please repeat?"
+ }
+ ]
+ }
+ ]
+}
+```
+
+### Using confidence options
+
+Configure a specific confidence range for when the hook should trigger:
+
+```json
+{
+ "hooks": [
+ {
+ "on": "assistant.transcriber.endpointedSpeechLowConfidence",
+ "options": {
+ "confidenceMin": 0.2,
+ "confidenceMax": 0.4
+ },
+ "do": [
+ {
+ "type": "say",
+ "prompt": "You are having trouble understanding or properly hearing what the user is saying. Based on the conversation in {{transcript}}, ask the user to repeat what they just said."
+ }
+ ]
+ }
+ ]
+}
+```
+
+### Shorthand syntax
+
+You can use shorthand syntax similar to `customer.speech.timeout` hooks. The shorthand format is `[confidence=min:max]` where both min and max are optional:
+
+**Set both min and max:**
+
+```json
+{
+ "hooks": [
+ {
+ "on": "assistant.transcriber.endpointedSpeechLowConfidence[confidence=0.2:0.4]",
+ "do": [
+ {
+ "type": "say",
+ "exact": "Could you please repeat that?"
+ }
+ ]
+ }
+ ]
+}
+```
+
+**Set only minimum (max defaults to transcriber's confidence threshold):**
+
+```json
+{
+ "hooks": [
+ {
+ "on": "assistant.transcriber.endpointedSpeechLowConfidence[confidence=0.2:]",
+ "do": [
+ {
+ "type": "say",
+ "exact": "I didn't catch that clearly. Could you repeat?"
+ }
+ ]
+ }
+ ]
+}
+```
+
+**Set only maximum (min defaults to max - 0.2):**
+
+```json
+{
+ "hooks": [
+ {
+ "on": "assistant.transcriber.endpointedSpeechLowConfidence[confidence=:0.4]",
+ "do": [
+ {
+ "type": "say",
+ "exact": "Could you please speak a bit more clearly?"
+ }
+ ]
+ }
+ ]
+}
+```
+
+### Default behavior
+
+When no options are specified, the hook uses these defaults:
+
+- `confidenceMax`: Uses the transcriber's `confidenceThreshold` (typically 0.4)
+- `confidenceMin`: `confidenceMax - 0.2` (minimum 0)
+
+For example, if your transcriber has a `confidenceThreshold` of 0.4:
+
+- Transcripts with confidence ≥ 0.4: Processed normally
+- Transcripts with confidence 0.2-0.4: Hook triggers
+- Transcripts with confidence < 0.2: Discarded
+
+### Multiple hooks for different ranges
+
+You can configure multiple hooks to handle different confidence ranges with different actions:
+
+```json
+{
+ "hooks": [
+ {
+ "on": "assistant.transcriber.endpointedSpeechLowConfidence[confidence=0.3:0.4]",
+ "do": [
+ {
+ "type": "say",
+ "exact": "I'm having a bit of trouble hearing you. Could you speak a bit louder?"
+ }
+ ]
+ },
+ {
+ "on": "assistant.transcriber.endpointedSpeechLowConfidence[confidence=0.2:0.3]",
+ "do": [
+ {
+ "type": "say",
+ "exact": "I'm sorry, I really couldn't understand that. Could you please repeat what you said?"
+ }
+ ]
+ }
+ ]
+}
+```
+
+
+The `assistant.transcriber.endpointedSpeechLowConfidence` hook supports these options:
+- `confidenceMin`: Minimum confidence threshold (0-1, default: confidenceMax - 0.2)
+- `confidenceMax`: Maximum confidence threshold (0-1, default: transcriber's confidenceThreshold)
+
+This hook is supported for transcribers that have `confidenceThreshold` configuration: Deepgram, Gladia, and AssemblyAI.
+
+
## Example: End call if user hasn't spoken for 30s
-Assistant checks with the user at the 10 and 20s mark from when the user is silent, and ends the call after 30s of silence.
+Assistant checks with the user at the 10 and 20s mark from when the user is silent, and ends the call after 30s of silence.
```json
{
@@ -259,8 +570,8 @@ Assistant checks with the user at the 10 and 20s mark from when the user is sile
},
"do": [
{
- "type" : "say",
- "exact" : "I'll be ending the call now, please feel free to call back at any time."
+ "type": "say",
+ "exact": "I'll be ending the call now, please feel free to call back at any time."
},
{
"type": "tool",
@@ -276,16 +587,17 @@ Assistant checks with the user at the 10 and 20s mark from when the user is sile
}
```
-
## Common use cases
- Transfer to a human agent on errors
- Route to a fallback system if the assistant fails
- Handle customer or assistant interruptions gracefully
- Prompt customers who become unresponsive during a call
+- Enforce call duration limits with graceful wrap-up behavior
+- Handle low confidence transcripts by asking users to repeat or speak more clearly
- Log errors or events for monitoring
-## Slack Webhook on Call Failure
+## Slack Webhook on Call Failure
You can set up automatic Slack notifications when calls fail by combining assistant hooks with Slack webhooks. This is useful for monitoring call quality and getting immediate alerts when issues occur.
@@ -307,10 +619,10 @@ export default async function(req: Request): Promise {
try {
const json = await req.json();
console.log(json);
-
+
const callId = json.message.call.id;
const reason = json.message.toolCalls[0].function.arguments.properties.callEndedReason.value;
-
+
fetch("", {
"method": "POST",
"headers": {
@@ -320,11 +632,11 @@ export default async function(req: Request): Promise {
text: `🚨 Call Failed\nCall ID: ${callId}\nReason: ${reason}`
}),
});
-
+
return Response.json({
- results: [{
- "result": "success",
- "toolCallId": "hook-function-call"
+ results: [{
+ "result": "success",
+ "toolCallId": "hook-function-call"
}],
});
} catch (err) {
@@ -340,37 +652,43 @@ Add this hook configuration to your assistant to trigger Slack notifications on
```json
{
- "hooks": [{
- "on": "call.ending",
- "filters": [{
- "type": "oneOf",
- "key": "call.endedReason",
- "oneOf": ["pipeline-error"]
- }],
- "do": [{
- "type": "tool",
- "tool": {
- "type": "function",
- "function": {
- "name": "report_error",
- "parameters": {
- "type": "object",
- "properties": {
- "text": {
- "type": "string",
- "value": "A call error occurred."
- }
+ "hooks": [
+ {
+ "on": "call.ending",
+ "filters": [
+ {
+ "type": "oneOf",
+ "key": "call.endedReason",
+ "oneOf": ["pipeline-error"]
+ }
+ ],
+ "do": [
+ {
+ "type": "tool",
+ "tool": {
+ "type": "function",
+ "function": {
+ "name": "report_error",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "text": {
+ "type": "string",
+ "value": "A call error occurred."
+ }
+ }
+ },
+ "description": "Reports a call error to Slack."
+ },
+ "async": false,
+ "server": {
+ "url": ""
}
- },
- "description": "Reports a call error to Slack."
- },
- "async": false,
- "server": {
- "url": ""
+ }
}
- }
- }]
- }]
+ ]
+ }
+ ]
}
```
diff --git a/fern/assistants/call-recording.mdx b/fern/assistants/call-recording.mdx
index 16c215360..855949d14 100644
--- a/fern/assistants/call-recording.mdx
+++ b/fern/assistants/call-recording.mdx
@@ -1,31 +1,140 @@
---
-title: Call recording
-subtitle: Record and store calls for analysis and training
+title: Call recording, logging and transcribing
+subtitle: Record, log, and transcribe calls using artifact plans
slug: assistants/call-recording
-description: Learn how to record calls and store them for quality assurance and analysis
+description: Learn how to configure artifact plans to record calls, generate transcripts, and store logs for analysis
---
+
+ **Deprecated**: The old `recordingEnabled`, `recordingPath`, and `recordingCredentials` properties are deprecated. Use the new `artifactPlan` configuration instead for recording, logging, and transcript generation.
+
+
## Overview
-Vapi provides comprehensive call recording capabilities that allow you to capture, store, and analyze voice conversations for quality assurance, training, and compliance purposes.
+Vapi's artifact plan system provides comprehensive call recording, logging, and transcription capabilities that allow you to capture, store, and analyze voice conversations for quality assurance, training, and compliance purposes.
-**Call recording enables you to:**
+**Artifact plans enable you to:**
+- Record audio conversations for quality monitoring
+- Generate detailed call logs for debugging and analysis
+- Create transcripts for conversation analysis
+- Store artifacts in custom storage or Vapi's secure cloud
- Monitor conversation quality and assistant performance
-- Train and improve your voice AI models
- Ensure compliance with regulatory requirements
-- Analyze customer interactions for insights
-## Recording Configuration
+## Use Cases
+
+### Payment Processing Flows
+
+Protect sensitive payment information by using a middle assistant with all artifacts disabled:
+
+```json
+{
+ "squad": {
+ "members": [
+ {
+ "assistant": {
+ "name": "Service Assistant",
+ "artifactPlan": {
+ "recordingEnabled": true,
+ "loggingEnabled": true,
+ "transcriptPlan": { "enabled": true }
+ }
+ }
+ },
+ {
+ "assistant": {
+ "name": "Payment Assistant",
+ "artifactPlan": {
+ "recordingEnabled": false,
+ "loggingEnabled": false,
+ "transcriptPlan": { "enabled": false }
+ }
+ }
+ },
+ {
+ "assistant": {
+ "name": "Confirmation Assistant",
+ "artifactPlan": {
+ "recordingEnabled": true,
+ "loggingEnabled": true,
+ "transcriptPlan": { "enabled": true }
+ }
+ }
+ }
+ ]
+ }
+}
+```
+
+This flow ensures that:
+- Service conversations are recorded for quality assurance
+- Payment details (credit card numbers, CVV codes) are never recorded or logged
+- Confirmation conversations are recorded for compliance
+
+### Consent Gathering
+
+For Enterprise customers, Vapi provides built-in [recording consent plans](/security-and-privacy/recording-consent-plan) that automatically create a consent assistant to request recording permission before transferring to your main assistant.
+
+The Enterprise consent feature supports both:
+- **Verbal consent**: Requires explicit "yes" confirmation from users
+- **Implicit consent**: Assumes consent if users stay on the line after hearing the consent message
+
+The system automatically tracks consent decisions and provides compliance data in the end-of-call report, including whether consent was granted and when it was obtained.
+
+Alternatively, you can implement custom consent flows using squads:
+
+```json
+{
+ "squad": {
+ "members": [
+ {
+ "assistant": {
+ "name": "Consent Assistant",
+ "artifactPlan": {
+ "recordingEnabled": false,
+ "loggingEnabled": false,
+ "transcriptPlan": { "enabled": false }
+ }
+ }
+ },
+ {
+ "assistant": {
+ "name": "Main Assistant",
+ "artifactPlan": {
+ "recordingEnabled": true,
+ "loggingEnabled": true,
+ "transcriptPlan": { "enabled": true }
+ }
+ }
+ }
+ ]
+ }
+}
+```
+
+The consent assistant runs without generating artifacts, while the main assistant records the actual conversation after consent is obtained.
+
+## Artifact Plan Configuration
-### Enable Recording
+### Basic Configuration
-You can enable call recording at the assistant level or per individual call:
+Configure recording, logging, and transcript generation using the `artifactPlan`:
```json title="Assistant Configuration"
{
"name": "Customer Support Assistant",
- "recordingEnabled": true,
+ "artifactPlan": {
+ "recordingEnabled": true,
+ "recordingFormat": "wav;l16",
+ "loggingEnabled": true,
+ "pcapEnabled": true,
+ "transcriptPlan": {
+ "enabled": true,
+ "assistantName": "Assistant",
+ "userName": "Customer"
+ }
+ },
"model": {
"provider": "openai",
"model": "gpt-4"
@@ -40,82 +149,253 @@ You can enable call recording at the assistant level or per individual call:
```json title="Per-Call Configuration"
{
"assistant": {
- "name": "Support Agent"
+ "name": "Support Agent",
+ "artifactPlan": {
+ "recordingEnabled": true,
+ "recordingFormat": "wav;l16",
+ "loggingEnabled": true,
+ "pcapEnabled": true,
+ "transcriptPlan": {
+ "enabled": true,
+ "assistantName": "Support Agent",
+ "userName": "Customer"
+ }
+ }
},
- "recordingEnabled": true,
"phoneNumberId": "your-phone-number-id"
}
```
-### Recording Options
+### Artifact Plan Options
-Configure recording behavior with these options:
+Configure different types of artifacts with these options:
-- **`recordingEnabled`**: Enable or disable recording for this assistant/call
-- **`recordingChannelCount`**: Number of audio channels to record (1 for mono, 2 for stereo)
-- **`recordingFormat`**: Audio format for recordings (mp3, wav, etc.)
+- **`recordingEnabled`**: Enable call recording (stored in `call.artifact.recording`) Default: `true`
+- **`recordingFormat`**: Audio format for recordings (e.g., "wav;l16", "mp3") Default: "wav;l16"
+- **`loggingEnabled`**: Enable detailed call logs (stored in `call.artifact.logUrl`) Default: `true`
+- **`pcapEnabled`**: Enable SIP packet capture for phone calls (stored in `call.artifact.pcapUrl`) Default: `true`
+- **`transcriptPlan`**: Configure transcript generation with speaker names
-## Storage Options
+## Storage Configuration
### Default Storage
-By default, Vapi stores recordings securely in the cloud:
+By default, Vapi stores artifacts securely in the cloud:
-- Recordings are encrypted at rest and in transit
+- Recordings, logs, and transcripts are encrypted at rest and in transit
- Access is controlled through your API credentials
-- Recordings are automatically cleaned up based on your retention policy
+- Artifacts are automatically cleaned up based on your retention policy
### Custom Storage
-For advanced use cases, you can configure custom storage:
+Configure custom storage for different artifact types:
```json title="S3 Storage Configuration"
{
- "recordingEnabled": true,
- "recordingPath": "https://your-bucket.s3.amazonaws.com/recordings/",
- "recordingCredentials": {
- "provider": "aws",
- "region": "us-east-1",
- "accessKeyId": "your-access-key",
- "secretAccessKey": "your-secret-key"
- }
+ "artifactPlan": {
+ "recordingEnabled": true,
+ "recordingFormat": "wav;l16",
+ "loggingEnabled": true,
+ "pcapEnabled": true,
+ "recordingUseCustomStorageEnabled": true,
+ "loggingUseCustomStorageEnabled": true,
+ "pcapUseCustomStorageEnabled": true,
+ "recordingPath": "https://your-bucket.s3.amazonaws.com/recordings/",
+ "loggingPath": "https://your-bucket.s3.amazonaws.com/logs/",
+ "pcapS3PathPrefix": "/pcaps",
+ "transcriptPlan": {
+ "enabled": true,
+ "assistantName": "Assistant",
+ "userName": "Customer"
+ }
+ },
+ "credentials": [
+ {
+ "provider": "aws",
+ "region": "us-east-1",
+ "accessKeyId": "your-access-key",
+ "secretAccessKey": "your-secret-key"
+ }
+ ]
}
```
```json title="Google Cloud Storage"
{
- "recordingEnabled": true,
- "recordingPath": "gs://your-bucket/recordings/",
- "recordingCredentials": {
- "provider": "gcp",
- "serviceAccountKey": "your-service-account-json"
- }
+ "artifactPlan": {
+ "recordingEnabled": true,
+ "recordingFormat": "wav;l16",
+ "loggingEnabled": true,
+ "pcapEnabled": true,
+ "recordingUseCustomStorageEnabled": true,
+ "loggingUseCustomStorageEnabled": true,
+ "pcapUseCustomStorageEnabled": true,
+ "recordingPath": "gs://your-bucket/recordings/",
+ "loggingPath": "gs://your-bucket/logs/",
+ "transcriptPlan": {
+ "enabled": true,
+ "assistantName": "Assistant",
+ "userName": "Customer"
+ }
+ },
+ "credentials": [
+ {
+ "provider": "gcp",
+ "serviceAccountKey": "your-service-account-json"
+ }
+ ]
}
```
-## Accessing Recordings
+### Storage Control Options
+
+Control where each artifact type is stored:
+
+- **`recordingUseCustomStorageEnabled`**: Use custom storage for recordings (default: true)
+- **`loggingUseCustomStorageEnabled`**: Use custom storage for logs (default: true)
+- **`pcapUseCustomStorageEnabled`**: Use custom storage for SIP packet capture (default: true)
+- **`recordingPath`**: Custom path for recording storage
+- **`loggingPath`**: Custom path for log storage
+- **`pcapS3PathPrefix`**: S3 path prefix for PCAP files
+
+## Squad and Transfer Behavior
+
+### Dynamic Artifact Control
+
+When handing off between assistants in a Squad, you may choose to change the local context for each assistant via the `contextEngineeringPlan`. By default, only the final context will be used in the artifact and analysis (Structured outputs and success evaluation). To include the full message history across all assistants in the call, set [`artifactPlan.fullMessageHistoryEnabled`](/api-reference/squads/create#request.body.membersOverrides.artifactPlan.fullMessageHistoryEnabled) to true.
+
+In squads with multiple assistants, artifact generation (recording, logging, transcripts) can be controlled per assistant. When assistants are swapped or transferred during a call:
+
+- **Recording**: Pauses when `recordingEnabled: false` assistant is active, resumes when `recordingEnabled: true` assistant takes over
+- **Logging**: Pauses when `loggingEnabled: false` assistant is active, resumes when `loggingEnabled: true` assistant takes over
+- **Transcripts**: Pauses when `transcriptPlan.enabled: false` assistant is active, resumes when `transcriptPlan.enabled: true` assistant takes over
+
+## Transcript Configuration
+
+### Automatic Transcription
+
+Configure transcript generation using the `transcriptPlan`:
+
+```json
+{
+ "artifactPlan": {
+ "transcriptPlan": {
+ "enabled": true,
+ "assistantName": "Assistant",
+ "userName": "Customer"
+ }
+ }
+}
+```
+
+### Transcript Plan Options
+
+- **`enabled`**: Enable or disable transcript generation
+- **`assistantName`**: Name to use for assistant messages in transcripts
+- **`userName`**: Name to use for user messages in transcripts
+
+### Transcript Features
+
+- **Real-time transcription**: Transcripts are generated during the call
+- **Speaker identification**: Distinguishes between assistant and user messages
+- **Timestamps**: Each message includes timing information
+- **OpenAI formatting**: Available in `call.artifact.messagesOpenAIFormatted`
+
+### Transcript Structure
+
+```json
+{
+ "callId": "call-123",
+ "artifact": {
+ "transcript": [
+ {
+ "role": "assistant",
+ "message": "Hello! How can I help you today?",
+ "time": 0.5
+ },
+ {
+ "role": "user",
+ "message": "I need help with my account",
+ "time": 3.2
+ }
+ ],
+ "messagesOpenAIFormatted": [
+ {
+ "role": "assistant",
+ "content": "Hello! How can I help you today?"
+ },
+ {
+ "role": "user",
+ "content": "I need help with my account"
+ }
+ ]
+ }
+}
+```
+
+## Logging Configuration
+
+### Detailed Call Logs
+
+Enable comprehensive logging for debugging and analysis:
+
+```json
+{
+ "artifactPlan": {
+ "loggingEnabled": true,
+ "loggingUseCustomStorageEnabled": true,
+ "loggingPath": "https://your-bucket.s3.amazonaws.com/logs/"
+ }
+}
+```
+
+### Log Features
+
+- **Debug information**: Detailed logs for troubleshooting
+- **Performance metrics**: Call timing and performance data
+- **Error tracking**: Comprehensive error logs and stack traces
+- **Workflow execution**: Step-by-step workflow node execution
+
+### Accessing Logs
+
+Logs are available through:
+- **Dashboard**: View logs in the call details page
+- **API**: Access via `call.artifact.logUrl`
+- **Custom storage**: Store logs in your S3/GCP bucket
+
+## Accessing Artifacts
### Via Dashboard
1. Navigate to **Calls** in your Vapi dashboard
2. Select a specific call from the list
-3. Click on the **Recording** tab to play or download the audio
+3. View artifacts in the **Artifacts** section:
+ - **Recording**: Play or download the audio recording
+ - **Transcript**: View the full conversation transcript
+ - **Logs**: Access detailed call logs for debugging
+ - **PCAP**: Download SIP packet capture (phone calls only)
### Via API
-Retrieve recording URLs programmatically:
+Retrieve artifact URLs programmatically:
```typescript
import { VapiClient } from "@vapi-ai/server-sdk";
const client = new VapiClient({ token: "your-api-key" });
-// Get call details including recording URL
+// Get call details including all artifacts
const call = await client.calls.get("call-id");
-console.log("Recording URL:", call.recordingUrl);
+
+console.log("Recording URL:", call.artifact.recording);
+console.log("Transcript:", call.artifact.transcript);
+console.log("Log URL:", call.artifact.logUrl);
+console.log("PCAP URL:", call.artifact.pcapUrl);
+console.log("Messages:", call.artifact.messages);
+console.log("Nodes:", call.artifact.nodes);
```
## Privacy and Compliance
@@ -128,6 +408,15 @@ console.log("Recording URL:", call.recordingUrl);
- **Data protection** regulations (GDPR, CCPA, etc.)
- **Industry standards** (PCI DSS, HIPAA, etc.)
+### Built-in Consent Management
+
+For Enterprise customers, Vapi provides automated consent management through [recording consent plans](/security-and-privacy/recording-consent-plan). This feature automatically:
+
+- Creates a consent assistant that requests recording permission
+- Handles both verbal and stay-on-line consent types
+- Only begins recording after consent is granted
+- Maintains audit trails of consent decisions
+
### Best Practices
- **Inform callers** about recording at the start of conversations
@@ -141,60 +430,109 @@ console.log("Recording URL:", call.recordingUrl);
## Recording Analysis
-### Transcription
+### Call Artifacts
-Recorded calls are automatically transcribed for analysis:
+Use the comprehensive artifact data for analysis:
```json
{
"callId": "call-123",
- "transcript": [
- {
- "role": "assistant",
- "message": "Hello! How can I help you today?",
- "time": 0.5
- },
- {
- "role": "user",
- "message": "I need help with my account",
- "time": 3.2
+ "artifact": {
+ "recording": "https://api.vapi.ai/recordings/call-123.mp3",
+ "transcript": [
+ {
+ "role": "assistant",
+ "message": "Hello! How can I help you today?",
+ "time": 0.5
+ },
+ {
+ "role": "user",
+ "message": "I need help with my account",
+ "time": 3.2
+ }
+ ],
+ "logUrl": "https://api.vapi.ai/logs/call-123.json",
+ "messages": [
+ {
+ "role": "assistant",
+ "message": "Hello! How can I help you today?"
+ },
+ {
+ "role": "user",
+ "message": "I need help with my account"
+ }
+ ],
+ "nodes": [
+ {
+ "name": "greeting",
+ "messages": ["Hello! How can I help you today?"],
+ "variables": {}
+ }
+ ],
+ "variableValues": {
+ "customerName": "John Doe",
+ "issueType": "account_access"
}
- ],
- "recordingUrl": "https://api.vapi.ai/recordings/call-123.mp3"
+ }
}
```
-### Call Analysis
+### Analysis Use Cases
-Use recorded data for insights:
+Use artifact data for comprehensive insights:
-- **Conversation flow** analysis
-- **Response quality** evaluation
-- **Customer satisfaction** metrics
-- **Assistant performance** tracking
+- **Conversation flow** analysis using `artifact.nodes`
+- **Response quality** evaluation using `artifact.messages`
+- **Customer satisfaction** metrics from transcript analysis
+- **Assistant performance** tracking using logs and recordings
+- **Workflow optimization** using node execution data
+- **Variable tracking** using `artifact.variableValues`
## FAQ
-
- Yes, all recordings are automatically transcribed and available through the API and dashboard.
+
+ Use the `artifactPlan` configuration in your assistant or call settings. Set `recordingEnabled`, `loggingEnabled`, and configure `transcriptPlan.enabled` to `true` to enable all three features.
+
+
+
+ By default, artifacts are stored securely in Vapi's cloud storage. You can configure custom storage using S3 or Google Cloud Storage with the `recordingPath`, `loggingPath`, and storage control options.
+
+
+
+ Artifacts are available through the API via `call.artifact.recording`, `call.artifact.transcript`, `call.artifact.logUrl`, and other properties. You can also view them in the Vapi dashboard.
-
- Default retention is 30 days. You can configure custom retention policies for your account.
+
+ The `transcript` includes timing information and is optimized for conversation analysis. The `messages` property contains the same content but in a simpler format, while `messagesOpenAIFormatted` is structured for OpenAI API compatibility.
+
+
+
+ Use the `transcriptPlan` configuration with `assistantName` and `userName` properties to customize how speakers are identified in the transcript output.
- Yes, you can enable/disable recording at both the assistant level and per individual call.
+ Yes, you can configure artifact plans at both the assistant level and per individual call. This gives you granular control over what artifacts are generated for each conversation.
+
+
+
+ PCAP (Packet Capture) is available for phone calls and contains SIP packet data for debugging telephony issues. It's stored in `call.artifact.pcapUrl` when `pcapEnabled` is set to `true`.
+
+
+
+ Retention periods vary by plan:
+ - **Pay-As-You-Go:** Up to 30 days for chats and 14 days for calls
+ - **Enterprise:** Configurable retention policies
-
- Call recording is available in all supported Vapi regions with local data residency options.
+
+ In squads, each assistant can have different artifact settings. Recording, logging, and transcripts pause when an assistant with disabled artifacts is active, and resume when an assistant with enabled artifacts takes over. This allows for privacy-conscious flows like consent collection.
## Next Steps
- **[Call Analysis](/assistants/call-analysis)** - Analyze recorded conversations for insights
+- **[Recording Consent Plans](/security-and-privacy/recording-consent-plan)** - Automated consent management for Enterprise customers
- **[Privacy Compliance](/security-and-privacy/GDPR)** - Ensure GDPR and privacy compliance
- **[API Reference](/api-reference/calls/create)** - Explore recording configuration options
diff --git a/fern/assistants/dynamic-variables.mdx b/fern/assistants/dynamic-variables.mdx
index 29606af9e..4b38a4a24 100644
--- a/fern/assistants/dynamic-variables.mdx
+++ b/fern/assistants/dynamic-variables.mdx
@@ -79,6 +79,8 @@ These variables are automatically filled based on the current (UTC) time, so you
| `{{year}}` | Current year (UTC) | 2024 |
| `{{customer.number}}` | Customer's phone number | +1xxxxxxxxxx |
| `{{customer.X}}` | Any other customer property | |
+| `{{transport.conversationType}}` | Whether the conversation is over `chat` or `voice` | `chat` |
+| `{{transport.X}}` | Any other transport-related property | |
## Advanced date and time usage
@@ -116,13 +118,13 @@ Outputs: `Monday, January 01, 2024, 03:45 PM`
| `%H:%M` | 15:45 | 24-hour time |
| `%A` | Monday | Day of week |
| `%b %d, %Y` | Jan 01, 2024 | Abbrev. Month Day |
-```
+
## Using dynamic variables in the dashboard
To use dynamic variables in the dashboard, include them in your prompts or messages using double curly braces. For example:
-```
+```liquid
Hello, {{name}}!
```
@@ -131,4 +133,36 @@ When you start a call, you must provide a value for each variable (like `name`)
Always use double curly braces (`{{variableName}}`) to reference dynamic variables in your prompts and messages.
-
\ No newline at end of file
+
+## Setting assistant prompt using conversation type
+
+Using the `transport.conversationType` variable in your assistant's system prompt, you can modify how it behaves in chats or calls:
+
+```liquid
+[Identity]
+You are a helpful assistant, talking with a customer via {{transport.conversationType}}.
+
+[Response Guidelines]
+{% if transport.conversationType == "chat" -%}
+- Format numbers naturally; eg, a US phone number could be +1 (555) 123-4567
+- Use Markdown formatting for lists (both unordered and ordered)
+{%- elsif transport.conversationType == "voice" -%}
+- Always spell numbers in words; eg, 256 should be 'two hundred and fifty six'
+- Only ask one question at a time
+- Never use Markdown format; write everything as if it is being said aloud
+{%- endif -%}
+```
+
+## HIPAA and Zero Data Retention mode
+
+When HIPAA mode or Zero Data Retention is enabled for your organization or assistant, Vapi does not store any call data, including variable values. This means:
+
+- Variable values passed via `variableValues` are processed during the call but **not persisted** after the call ends
+- Call logs, recordings, and transcriptions are not stored
+- Variables are only available in real-time during the active call session
+
+This ensures compliance with privacy requirements while still allowing you to personalize conversations with dynamic variables.
+
+
+For more information on enabling HIPAA compliance and understanding data retention policies, see the [HIPAA Compliance](/security-and-privacy/hipaa) documentation.
+
diff --git a/fern/assistants/examples/appointment-scheduling.mdx b/fern/assistants/examples/appointment-scheduling.mdx
new file mode 100644
index 000000000..c4a433462
--- /dev/null
+++ b/fern/assistants/examples/appointment-scheduling.mdx
@@ -0,0 +1,277 @@
+---
+title: Appointment scheduling assistant
+subtitle: Build an AI receptionist that books, reschedules, and cancels appointments using Assistants and tools
+slug: assistants/examples/appointment-scheduling
+description: Build a voice AI appointment scheduling assistant with Google Calendar integration, availability checking, and automated confirmations using Vapi Assistants.
+---
+
+## Overview
+
+Build an AI-powered appointment scheduling assistant that handles inbound calls for booking, rescheduling, and canceling appointments. This approach uses a single Assistant with tools for calendar availability, customer lookups, and confirmations.
+
+**Assistant Capabilities:**
+* Real-time availability checks and booking
+* Reschedule and cancel with confirmation
+* Customer verification and data lookups
+* SMS/email confirmations via tools
+
+**What You'll Build:**
+* An assistant with a focused prompt for scheduling flows
+* Tools for calendar availability and booking
+* Optional CSV knowledge bases for customers/services
+* A phone number attached to your assistant
+
+## Prerequisites
+
+* A [Vapi account](https://dashboard.vapi.ai/)
+* Google Calendar or a scheduling backend
+
+## 1. Prepare data (optional)
+
+Use sample CSVs for customers, services, and appointments during development.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 1. Open your [Vapi Dashboard](https://dashboard.vapi.ai) → Files
+ 2. Upload the three CSVs and note their file IDs
+
+
+
+
+ ```typescript
+ import { VapiClient } from "@vapi-ai/server-sdk";
+ import fs from "fs";
+
+ const vapi = new VapiClient({ token: process.env.VAPI_API_KEY! });
+
+ async function upload(file: string) {
+ const res = await vapi.files.create({ file: fs.createReadStream(file) });
+ console.log(file, res.id);
+ return res.id;
+ }
+
+ const servicesFileId = await upload("services.csv");
+ const customersFileId = await upload("customers.csv");
+ const appointmentsFileId = await upload("appointments.csv");
+ ```
+
+
+ ```python
+ import requests, os
+
+ def upload(path: str):
+ r = requests.post(
+ "https://api.vapi.ai/file",
+ headers={"Authorization": f"Bearer {os.getenv('VAPI_API_KEY')}"},
+ files={"file": open(path, "rb")},
+ )
+ r.raise_for_status()
+ print(path, r.json()["id"])
+ return r.json()["id"]
+
+ services_file_id = upload("services.csv")
+ customers_file_id = upload("customers.csv")
+ appointments_file_id = upload("appointments.csv")
+ ```
+
+
+
+
+
+---
+
+## 2. Create calendar tools
+
+Use the Google Calendar integration for availability and booking, or your own API via a custom tool.
+
+
+
+ See: [Google Calendar Integration](/tools/google-calendar)
+
+ Configure tools for:
+ - `check_availability(service, date)`
+ - `book_appointment(customer, service, time)`
+ - `reschedule_appointment(appointmentId, time)`
+ - `cancel_appointment(appointmentId)`
+
+
+ See: [Custom Tools](/tools/custom-tools)
+
+ Define function tools that call your scheduling backend. Attach CSV knowledge bases (customers/services) if using the sample data above.
+
+
+
+---
+
+## 3. Create the assistant
+
+
+
+
+
+ - Go to Assistants → Create Assistant → Blank template
+ - Name it `Receptionist`
+
+
+ ```txt title="System Prompt" maxLines=12
+ You are an AI receptionist for a barbershop. Your goals:
+ 1) Verify the customer
+ 2) Offer booking, rescheduling, or cancellation
+ 3) Confirm details and send a confirmation
+
+ When needed, call tools: check_availability, book_appointment, reschedule_appointment, cancel_appointment.
+ Keep replies under 30 words. Confirm date/time clearly.
+ ```
+
+
+ Add your scheduling tools to the assistant and publish.
+
+
+
+
+ ```typescript
+ import { VapiClient } from "@vapi-ai/server-sdk";
+
+ const vapi = new VapiClient({ token: process.env.VAPI_API_KEY! });
+
+ const systemPrompt = `You are an AI receptionist for a barbershop. Verify the customer, then offer booking, rescheduling, or cancellation. Use scheduling tools when needed. Keep replies under 30 words.`;
+
+ const assistant = await vapi.assistants.create({
+ name: "Receptionist",
+ firstMessage: "Welcome to Tony's Barbershop! How can I help you today?",
+ model: {
+ provider: "openai",
+ model: "gpt-4o",
+ messages: [{ role: "system", content: systemPrompt }],
+ // toolIds: [ "CHECK_AVAILABILITY_ID", "BOOK_ID", "RESCHEDULE_ID", "CANCEL_ID" ]
+ }
+ });
+ ```
+
+
+ ```python
+ from vapi import Vapi
+ import os
+
+ client = Vapi(token=os.getenv("VAPI_API_KEY"))
+
+ assistant = client.assistants.create(
+ name="Receptionist",
+ first_message="Welcome to Tony's Barbershop! How can I help you today?",
+ model={
+ "provider": "openai",
+ "model": "gpt-4o",
+ "messages": [{"role": "system", "content": "You are an AI receptionist for a barbershop. Verify the customer, then handle booking/rescheduling/cancel."}]
+ }
+ )
+ ```
+
+
+
+---
+
+## 4. Make calls
+
+
+
+ ```typescript title="create web call"
+ import { VapiClient } from "@vapi-ai/server-sdk";
+
+ const vapi = new VapiClient({ token: process.env.VAPI_API_KEY! });
+
+ await vapi.calls.create({
+ transport: { type: "web" },
+ assistant: { assistantId: "your-assistant-id" }
+ });
+ ```
+
+ ```typescript title="create phone call"
+ await vapi.calls.create({
+ phoneNumberId: "your-phone-number-id",
+ customer: { number: "+15551234567" },
+ assistant: { assistantId: "your-assistant-id" }
+ });
+ ```
+
+
+
+ ```python title="create web call"
+ import os
+ from vapi import Vapi
+
+ client = Vapi(token=os.getenv("VAPI_API_KEY"))
+
+ client.calls.create(
+ transport={"type": "web"},
+ assistant_id="your-assistant-id",
+ )
+ ```
+
+ ```python title="create phone call"
+ client.calls.create(
+ phone_number_id="your-phone-number-id",
+ customer={"number": "+15551234567"},
+ assistant_id="your-assistant-id",
+ )
+ ```
+
+
+
+ ```bash
+ curl -X POST "https://api.vapi.ai/call/web" \
+ -H "Authorization: Bearer $VAPI_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "assistant": { "assistantId": "your-assistant-id" }
+ }'
+ ```
+
+
+
+ ```bash
+ curl -X POST "https://api.vapi.ai/call" \
+ -H "Authorization: Bearer $VAPI_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "assistant": { "assistantId": "your-assistant-id" },
+ "phoneNumberId": "your-phone-number-id",
+ "customer": { "number": "+15551234567" }
+ }'
+ ```
+
+
+
+## 5. Test and validate
+
+
+
+ Create a phone number and assign your assistant. See [Phone calls quickstart](/quickstart/phone).
+
+
+ - New booking → check availability → book → confirm
+ - Reschedule existing appointment → confirm
+ - Cancel appointment → confirm
+
+
+
+## Next steps
+
+- **Tools**: [Google Calendar](/tools/google-calendar), [Custom Tools](/tools/custom-tools)
+- **Structured outputs**: [Extract structured data](/assistants/structured-outputs-quickstart)
+- **Multichannel**: [Web integration](/quickstart/web)
+
diff --git a/fern/assistants/examples/inbound-support.mdx b/fern/assistants/examples/inbound-support.mdx
index d0b726861..3c10c11c4 100644
--- a/fern/assistants/examples/inbound-support.mdx
+++ b/fern/assistants/examples/inbound-support.mdx
@@ -1146,7 +1146,7 @@ Consider the reading the following guides to further enhance your assistant:
* [**Knowledge Bases**](../knowledge-base/) - Learn more about knowledge bases to build knowledge-based agents.
* [**External Integrations**](../tools/) - Configure integrations with [Google Calendar](../tools/google-calendar), [Google Sheets](../tools/google-sheets), [Slack](../tools/slack), etc.
-* [**Workflows**](../workflows/) - Learn about workflows to build voice agents for more complex use cases.
+* [**Squads**](../squads) - Learn how to compose multiple assistants and transfer seamlessly for complex use cases.
Need help? Chat with the team on our [Discord](https://discord.com/invite/pUFNcf2WmH) or mention us on [X/Twitter](https://x.com/Vapi_AI).
diff --git a/fern/assistants/examples/lead-qualification.mdx b/fern/assistants/examples/lead-qualification.mdx
new file mode 100644
index 000000000..ff761c360
--- /dev/null
+++ b/fern/assistants/examples/lead-qualification.mdx
@@ -0,0 +1,172 @@
+---
+title: Lead qualification assistant
+subtitle: Build an outbound sales assistant that qualifies leads and books meetings using tools
+slug: assistants/examples/lead-qualification
+description: Build a voice AI outbound sales assistant with BANT data capture, CRM integration, objection handling, and automated follow-ups using Vapi Assistants.
+---
+
+## Overview
+
+Create an outbound sales assistant that calls prospects, qualifies them using BANT signals, and books meetings. The assistant uses tools to look up leads, score qualification, update CRM, and schedule on a calendar.
+
+**Assistant Capabilities:**
+* BANT data capture with structured outputs
+* Objection handling and call outcome logging
+* Calendar booking and follow-up creation
+* CRM updates with next steps
+
+**What You'll Build:**
+* Focused sales prompt with clear call flow
+* Tools for lead lookup, scoring, CRM update, and scheduling
+* Optional CSV knowledge bases for demo leads/products
+
+## Prerequisites
+
+* A [Vapi account](https://dashboard.vapi.ai/)
+* CRM or spreadsheet data; Google Calendar or scheduling backend
+
+## 1. Prepare sample data (optional)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Upload `leads.csv`, `products.csv`, and `call_outcomes.csv` and note file IDs.
+
+
+
+
+ ```typescript
+ import { VapiClient } from "@vapi-ai/server-sdk";
+ import fs from "fs";
+ const vapi = new VapiClient({ token: process.env.VAPI_API_KEY! });
+ async function upload(p: string) { return (await vapi.files.create({ file: fs.createReadStream(p) })).id; }
+ const leadsFileId = await upload("leads.csv");
+ const productsFileId = await upload("products.csv");
+ const outcomesFileId = await upload("call_outcomes.csv");
+ ```
+
+
+
+
+
+---
+
+## 2. Create sales tools
+
+Configure function tools or your CRM API for:
+- `lookup_lead(leadId)`
+- `score_lead(budget, authority, need, timeline)`
+- `update_crm(leadId, callOutcome, nextSteps)`
+- `book_meeting(prospect, time)`
+
+See [Custom Tools](/tools/custom-tools) and [Google Calendar](/tools/google-calendar).
+
+---
+
+## 3. Define the assistant
+
+
+
+
+
+ - Name: `Outbound SDR`
+ - First message: concise opener asking permission to talk
+
+
+ ```txt title="System Prompt" maxLines=12
+ You are an outbound SDR. Goals: get permission, qualify with BANT, schedule a meeting, and log the outcome. Keep answers ≤ 25 words and be respectful. Use tools when needed.
+ ```
+
+
+ Capture: `permission_status`, `bant_budget`, `bant_authority`, `bant_need`, `bant_timeline`, `meeting_time`, `call_outcome`.
+ See [Structured outputs](/assistants/structured-outputs).
+
+
+
+
+ ```typescript
+ import { VapiClient } from "@vapi-ai/server-sdk";
+ const vapi = new VapiClient({ token: process.env.VAPI_API_KEY! });
+ const systemPrompt = `Outbound SDR. Get permission, qualify with BANT, then book. Keep responses short. Use lookup_lead, score_lead, update_crm, book_meeting.`;
+ const assistant = await vapi.assistants.create({
+ name: "Outbound SDR",
+ firstMessage: "Hi, this is Alex from TechFlow. Is now a good time to chat for 2 minutes?",
+ model: { provider: "openai", model: "gpt-4o", messages: [{ role: "system", content: systemPrompt }] }
+ });
+ ```
+
+
+
+---
+
+## 4. Make calls
+
+
+
+ ```typescript title="create web call"
+ import { VapiClient } from "@vapi-ai/server-sdk";
+ const vapi = new VapiClient({ token: process.env.VAPI_API_KEY! });
+ await vapi.calls.create({ transport: { type: "web" }, assistant: { assistantId: "your-assistant-id" } });
+ ```
+
+ ```typescript title="create phone call"
+ await vapi.calls.create({ phoneNumberId: "your-phone-number-id", customer: { number: "+15551234567" }, assistant: { assistantId: "your-assistant-id" } });
+ ```
+
+
+
+ ```python title="create web call"
+ import os
+ from vapi import Vapi
+ client = Vapi(token=os.getenv("VAPI_API_KEY"))
+ client.calls.create(transport={"type": "web"}, assistant_id="your-assistant-id")
+ ```
+
+ ```python title="create phone call"
+ client.calls.create(phone_number_id="your-phone-number-id", customer={"number": "+15551234567"}, assistant_id="your-assistant-id")
+ ```
+
+
+
+ ```bash
+ curl -X POST "https://api.vapi.ai/call/web" \
+ -H "Authorization: Bearer $VAPI_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{ "assistant": { "assistantId": "your-assistant-id" } }'
+ ```
+
+
+
+ ```bash
+ curl -X POST "https://api.vapi.ai/call" \
+ -H "Authorization: Bearer $VAPI_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{ "assistant": { "assistantId": "your-assistant-id" }, "phoneNumberId": "your-phone-number-id", "customer": { "number": "+15551234567" } }'
+ ```
+
+
+
+## 5. Test outbound calls
+
+Create a phone number or trigger an outbound call. See [Phone calls](/quickstart/phone).
+
+## Next steps
+
+- **CRM integration**: Connect your CRM via [Custom Tools](/tools/custom-tools)
+- **Calendar**: [Google Calendar](/tools/google-calendar)
+- **Escalation**: Use a [Squad](/squads) to hand off to a specialized closer
+
diff --git a/fern/assistants/examples/multilingual-agent.mdx b/fern/assistants/examples/multilingual-agent.mdx
index 0726fffe5..8afe1fc52 100644
--- a/fern/assistants/examples/multilingual-agent.mdx
+++ b/fern/assistants/examples/multilingual-agent.mdx
@@ -18,7 +18,7 @@ Build a dynamic customer support agent for GlobalTech International that automat
* Advanced prompting for cultural context awareness
-**Alternative Approach**: For a more structured multilingual experience with explicit language selection, see our [Workflow-based multilingual support](../../workflows/examples/multilingual-support) that guides customers through language selection and dedicated conversation paths.
+**Alternative Approach**: For a more structured multilingual experience with explicit language selection, see our [Squad-based multilingual support](../../squads/examples/multilingual-support) that guides customers through language selection and dedicated conversation paths.
## Prerequisites
@@ -1082,9 +1082,9 @@ Keep responses concise (under 50 words) while being thorough and helpful."""
---
-## Alternative: Workflow-Based Language Selection
+## Alternative: Squad-Based Language Selection
-For a more structured approach with explicit language selection, see our comprehensive [Workflow-based multilingual support](../../workflows/examples/multilingual-support) guide. This approach lets customers choose their language at the start of the call, then routes them to dedicated conversation paths optimized for each language.
+For a more structured approach with explicit language selection, see our comprehensive [Squad-based multilingual support](../../squads/examples/multilingual-support) guide. This approach lets customers choose their language at the start of the call, then routes them to dedicated conversation paths optimized for each language.
@@ -1155,6 +1155,7 @@ For a more structured approach with explicit language selection, see our compreh
## Provider Support Summary
**Speech-to-Text (Transcription):**
+- **Gladia**: Solaria, automatic language detection and code-switching.
- **Deepgram**: Nova 2, Nova 3 with "Multi" language setting
- **Google**: Latest models with "Multilingual" language setting
- **All other providers**: Single language only, no automatic detection
@@ -1175,7 +1176,7 @@ Just like that, you've built a dynamic multilingual customer support agent that
Consider reading the following guides to further enhance your multilingual implementation:
-* [**Workflow-based Multilingual Support**](../../workflows/examples/multilingual-support) - Compare with structured language selection approach
+* [**Squad-based Multilingual Support**](../../squads/examples/multilingual-support) - Compare with structured language selection approach
* [**Multilingual Configuration Guide**](../../../customization/multilingual) - Learn about all multilingual configuration options
* [**Custom Tools**](../../../tools/custom-tools) - Build advanced multilingual tools and integrations
diff --git a/fern/assistants/flush-syntax.mdx b/fern/assistants/flush-syntax.mdx
index 4ed7c5334..dbdea9579 100644
--- a/fern/assistants/flush-syntax.mdx
+++ b/fern/assistants/flush-syntax.mdx
@@ -53,8 +53,12 @@ VAPI supports three flush formats with case-insensitive matching:
```html title="Self-closing (Recommended)"
- ``` ```html title="Opening tag"
- ``` ```html title="Closing tag"
+ ```
+ ```html title="Opening tag"
+
+ ```
+ ```html title="Closing tag"
+
```
diff --git a/fern/assistants/quickstart.mdx b/fern/assistants/quickstart.mdx
new file mode 100644
index 000000000..e7c42ba04
--- /dev/null
+++ b/fern/assistants/quickstart.mdx
@@ -0,0 +1,97 @@
+---
+title: Assistants quickstart
+subtitle: Build your first assistant and make a phone call in minutes
+slug: assistants/quickstart
+---
+
+## Overview
+
+Create a voice assistant with a simple prompt, attach a phone number, and make your first call. You’ll also learn how to add tools to take real actions.
+
+**In this quickstart, you’ll:**
+- Create an assistant (Dashboard or SDK)
+- Attach a phone number
+- Make inbound and outbound calls
+
+## Prerequisites
+
+- A Vapi account and API key
+
+## 1) Create an assistant
+
+
+
+
+
+ Go to the [Vapi Dashboard](https://dashboard.vapi.ai) → Assistants → Create Assistant.
+
+
+ ```txt title="System Prompt" maxLines=8
+ You are a friendly phone support assistant. Greet the caller and offer help. Keep responses under 30 words. If a transfer is requested, confirm reason first.
+ ```
+
+
+ Click Publish and then “Talk to Assistant” to validate behavior.
+
+
+
+
+ ```typescript
+ import { VapiClient } from "@vapi-ai/server-sdk";
+
+ const vapi = new VapiClient({ token: process.env.VAPI_API_KEY! });
+
+ const assistant = await vapi.assistants.create({
+ name: "Support Assistant",
+ firstMessage: "Hello! How can I help you today?",
+ model: {
+ provider: "openai",
+ model: "gpt-4o",
+ messages: [
+ { role: "system", content: "You are a friendly phone support assistant. Keep responses under 30 words." }
+ ]
+ }
+ });
+ ```
+
+
+
+## 2) Add a phone number
+
+
+
+ In the Dashboard, go to Phone Numbers → Create Phone Number → assign your assistant.
+
+
+ ```typescript
+ const number = await vapi.phoneNumbers.create({
+ name: "Support Line",
+ assistantId: assistant.id
+ });
+ ```
+
+
+
+## 3) Make your first calls
+
+
+
+ Call the phone number you created. Your assistant will answer with the first message.
+
+
+ ```typescript
+ await vapi.calls.create({
+ assistantId: assistant.id,
+ customer: { number: "+1234567890" }
+ });
+ ```
+
+
+
+## Next steps
+
+- **Add tools**: [Custom tools](/tools/custom-tools)
+- **Tune speech**: [Speech configuration](/customization/speech-configuration)
+- **Structure data**: [Structured outputs](/assistants/structured-outputs)
+- **Move to multi-assistant**: [Squads](/squads)
+
diff --git a/fern/assistants/structured-outputs-quickstart.mdx b/fern/assistants/structured-outputs-quickstart.mdx
index 30ae147d1..cf4d7474f 100644
--- a/fern/assistants/structured-outputs-quickstart.mdx
+++ b/fern/assistants/structured-outputs-quickstart.mdx
@@ -8,24 +8,42 @@ slug: assistants/structured-outputs-quickstart
This quickstart guide will help you set up structured outputs to automatically extract customer information from phone calls. In just a few minutes, you'll create a structured output, link it to an assistant, and test data extraction.
+
+
### What are structured outputs?
-Structured outputs are AI-powered data extraction templates that automatically capture and organize information from conversations. They work by:
+Structured outputs are AI-powered analysis and extraction tools that intelligently process conversation data after calls end. They go beyond simple data extraction to provide intelligent analysis and evaluation. They work by:
-1. **Listening to conversations** - As your assistant talks with customers, structured outputs analyze the conversation in real-time
-2. **Extracting key information** - Based on your defined schema, they identify and extract relevant data points like names, emails, preferences, and issues
-3. **Validating and formatting** - The extracted data is validated against your schema rules and formatted into clean, structured JSON
-4. **Delivering results** - The structured data is available immediately after the call ends via API or webhooks
+1. **Processing complete call context** - After the call ends, structured outputs analyze the full transcript, messages, tool call results, and call metadata
+2. **Intelligent extraction & analysis** - Based on your schema, they can extract data, evaluate outcomes, analyze sentiment, determine success criteria, and summarize complex interactions
+3. **Validating and formatting** - Results are validated against your schema rules and formatted into clean, structured JSON
+4. **Delivering insights** - The processed data and insights are available via API or webhooks once analysis is complete
### When are structured outputs generated?
Structured outputs are processed:
-- **During the call** - Data is extracted in real-time as the conversation happens
-- **After call completion** - Final validation and formatting occurs when the call ends
+- **After call completion** - The full conversation is analyzed once the call ends
+- **Processing time** - Typically completes within a few seconds after call termination
- **Available via** - Call artifacts in the API response or webhook events
+### What data do structured outputs have access to?
+
+When processing, structured outputs can analyze:
+- **Complete transcript** - The full conversation between assistant and customer
+- **Messages history** - All messages exchanged during the call
+- **Tool call results** - Outcomes from any tools or functions executed
+- **Assistant context** - System prompts and configuration used during the call
+
### Why use structured outputs?
+**Beyond simple data extraction:**
+- **Call evaluation** - Determine if objectives were met (appointment booked, issue resolved)
+- **Sentiment analysis** - Understand customer satisfaction and emotional state
+- **CSAT scoring** - Extract customer satisfaction scores from feedback
+- **Intelligent summaries** - Generate contextual summaries of complex conversations
+- **Success metrics** - Evaluate agent performance and call outcomes
+
+**Operational benefits:**
- **Automate data entry** - No more manual transcription or form filling
- **Ensure consistency** - Every call captures the same structured information
- **Enable integrations** - Automatically sync data to CRMs, ticketing systems, or databases
@@ -45,79 +63,88 @@ A customer support assistant that automatically extracts:
Sign up at [dashboard.vapi.ai](https://dashboard.vapi.ai)
- Get your API key from the Dashboard settings
+ Get your API key from **API Keys** on sidebar
## Step 1: Create your structured output
-You can create structured outputs using either the Dashboard UI or the API.
-
-### Option A: Using the Dashboard (Recommended for beginners)
+Define what information you want to extract using a [JSON Schema](https://json-schema.org/learn/getting-started-step-by-step). JSON Schema is a standard for describing data structures - [learn more about JSON Schema here](https://json-schema.org/understanding-json-schema/).
-
-
- 1. Log in to [dashboard.vapi.ai](https://dashboard.vapi.ai)
- 2. Click on **Structured Outputs** in the left sidebar
- 3. Click **Create New Structured Output**
-
-
-
- 1. **Name**: Enter "Support Ticket"
- 2. **Type**: Select "AI" (for automatic extraction)
- 3. **Description**: Add "Extract support ticket information from customer calls"
-
-
-
- Use the visual schema builder or paste this JSON directly:
- ```json
- {
- "type": "object",
- "properties": {
- "customer": {
- "type": "object",
- "properties": {
- "name": {"type": "string", "description": "Customer full name"},
- "email": {"type": "string", "format": "email", "description": "Customer email"},
- "phone": {"type": "string", "description": "Customer phone number"}
- },
- "required": ["name"]
- },
- "issue": {
+
+
+
+
+ 1. Log in to [dashboard.vapi.ai](https://dashboard.vapi.ai)
+ 2. Click on **Structured Outputs** in the left sidebar
+ 3. Click **Create New Structured Output**
+
+
+
+ 1. **Name**: Enter "Support Ticket"
+ 2. **Type**: Select "Object"
+ 3. **Description**: Add "Extract support ticket information from customer calls"
+
+
+
+ Use the visual schema builder:
+ ```json
+ {
"type": "object",
"properties": {
- "description": {"type": "string", "description": "Issue description"},
- "category": {
- "type": "string",
- "enum": ["billing", "technical", "general", "complaint"],
- "description": "Issue category"
+ "customer": {
+ "type": "object",
+ "properties": {
+ "name": {"type": "string", "description": "Customer full name"},
+ "email": {"type": "string", "format": "email", "description": "Customer email"},
+ "phone": {"type": "string", "description": "Customer phone number"}
+ },
+ "required": ["name"]
},
- "priority": {
- "type": "string",
- "enum": ["low", "medium", "high", "urgent"],
- "description": "Priority level"
+ "issue": {
+ "type": "object",
+ "properties": {
+ "description": {"type": "string", "description": "Issue description"},
+ "category": {
+ "type": "string",
+ "enum": ["billing", "technical", "general", "complaint"],
+ "description": "Issue category"
+ },
+ "priority": {
+ "type": "string",
+ "enum": ["low", "medium", "high", "urgent"],
+ "description": "Priority level"
+ }
+ },
+ "required": ["description", "category"]
+ },
+ "followUp": {
+ "type": "object",
+ "properties": {
+ "required": {"type": "boolean", "description": "Whether follow-up is needed"},
+ "method": {
+ "type": "string",
+ "enum": ["email", "phone", "none"],
+ "description": "Preferred follow-up method"
+ },
+ "notes": {"type": "string", "description": "Additional notes for follow-up"}
+ }
}
},
- "required": ["description", "category"]
+ "required": ["customer", "issue"]
}
- },
- "required": ["customer", "issue"]
- }
- ```
-
-
-
- 1. Click **Create Structured Output**
- 2. Copy the generated ID from the details page
- 3. You'll use this ID to link to your assistant
-
-
-
-### Option B: Using the API
-
-Define what information you want to extract using a [JSON Schema](https://json-schema.org/learn/getting-started-step-by-step). JSON Schema is a standard for describing data structures - [learn more about JSON Schema here](https://json-schema.org/understanding-json-schema/).
-
-
+ ```
+
+
+
+ 1. Click **Create Structured Output**
+ 2. In the structured output dialog, you can directly attach it to an assistant or workflow
+ 3. Select an existing assistant to attach this output to that assistant
+
+
+
+
+
```bash title="cURL"
curl -X POST https://api.vapi.ai/structured-output \
-H "Authorization: Bearer $VAPI_API_KEY" \
@@ -191,356 +218,364 @@ curl -X POST https://api.vapi.ai/structured-output \
}
}'
```
-
-```javascript title="Node.js"
-const response = await fetch('https://api.vapi.ai/structured-output', {
- method: 'POST',
- headers: {
- 'Authorization': `Bearer ${process.env.VAPI_API_KEY}`,
- 'Content-Type': 'application/json'
- },
- body: JSON.stringify({
- name: "Support Ticket",
- type: "ai",
- description: "Extract support ticket information from customer calls",
- schema: {
- type: "object",
- properties: {
- customer: {
- type: "object",
- properties: {
- name: {
- type: "string",
- description: "Customer full name"
- },
- email: {
- type: "string",
- format: "email",
- description: "Customer email address"
- },
- phone: {
- type: "string",
- description: "Customer phone number"
- }
+
+
+
+ ```typescript
+import { VapiClient } from "@vapi-ai/server-sdk";
+
+const vapi = new VapiClient({ token: process.env.VAPI_API_KEY! });
+
+const structuredOutput = await vapi.structuredOutputs.create({
+ name: "Support Ticket",
+ type: "ai",
+ description: "Extract support ticket information from customer calls",
+ schema: {
+ type: "object",
+ properties: {
+ customer: {
+ type: "object",
+ properties: {
+ name: {
+ type: "string",
+ description: "Customer full name"
+ },
+ email: {
+ type: "string",
+ format: "email",
+ description: "Customer email address"
},
- required: ["name"]
+ phone: {
+ type: "string",
+ description: "Customer phone number"
+ }
},
- issue: {
- type: "object",
- properties: {
- description: {
- type: "string",
- description: "Description of the customer issue"
- },
- category: {
- type: "string",
- enum: ["billing", "technical", "general", "complaint"],
- description: "Issue category"
- },
- priority: {
- type: "string",
- enum: ["low", "medium", "high", "urgent"],
- description: "Issue priority level"
- }
+ required: ["name"]
+ },
+ issue: {
+ type: "object",
+ properties: {
+ description: {
+ type: "string",
+ description: "Description of the customer issue"
+ },
+ category: {
+ type: "string",
+ enum: ["billing", "technical", "general", "complaint"],
+ description: "Issue category"
},
- required: ["description", "category"]
+ priority: {
+ type: "string",
+ enum: ["low", "medium", "high", "urgent"],
+ description: "Issue priority level"
+ }
},
- followUp: {
- type: "object",
- properties: {
- required: {
- type: "boolean",
- description: "Whether follow-up is needed"
- },
- method: {
- type: "string",
- enum: ["email", "phone", "none"],
- description: "Preferred follow-up method"
- },
- notes: {
- type: "string",
- description: "Additional notes for follow-up"
- }
+ required: ["description", "category"]
+ },
+ followUp: {
+ type: "object",
+ properties: {
+ required: {
+ type: "boolean",
+ description: "Whether follow-up is needed"
+ },
+ method: {
+ type: "string",
+ enum: ["email", "phone", "none"],
+ description: "Preferred follow-up method"
+ },
+ notes: {
+ type: "string",
+ description: "Additional notes for follow-up"
}
}
- },
- required: ["customer", "issue"]
- }
- })
+ }
+ },
+ required: ["customer", "issue"]
+ }
});
-const structuredOutput = await response.json();
console.log('Created structured output:', structuredOutput.id);
// Save this ID - you'll need it in the next step
```
+
-```python title="Python"
-import requests
+
+ ```python
+from vapi import Vapi
import os
-response = requests.post(
- 'https://api.vapi.ai/structured-output',
- headers={
- 'Authorization': f'Bearer {os.environ["VAPI_API_KEY"]}',
- 'Content-Type': 'application/json'
- },
- json={
- "name": "Support Ticket",
- "type": "ai",
- "description": "Extract support ticket information from customer calls",
- "schema": {
- "type": "object",
- "properties": {
- "customer": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string",
- "description": "Customer full name"
- },
- "email": {
- "type": "string",
- "format": "email",
- "description": "Customer email address"
- },
- "phone": {
- "type": "string",
- "description": "Customer phone number"
- }
+vapi = Vapi(token=os.environ.get("VAPI_API_KEY"))
+
+structured_output = vapi.structured_outputs.create(
+ name="Support Ticket",
+ type="ai",
+ description="Extract support ticket information from customer calls",
+ schema={
+ "type": "object",
+ "properties": {
+ "customer": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "Customer full name"
},
- "required": ["name"]
+ "email": {
+ "type": "string",
+ "format": "email",
+ "description": "Customer email address"
+ },
+ "phone": {
+ "type": "string",
+ "description": "Customer phone number"
+ }
},
- "issue": {
- "type": "object",
- "properties": {
- "description": {
- "type": "string",
- "description": "Description of the customer issue"
- },
- "category": {
- "type": "string",
- "enum": ["billing", "technical", "general", "complaint"],
- "description": "Issue category"
- },
- "priority": {
- "type": "string",
- "enum": ["low", "medium", "high", "urgent"],
- "description": "Issue priority level"
- }
+ "required": ["name"]
+ },
+ "issue": {
+ "type": "object",
+ "properties": {
+ "description": {
+ "type": "string",
+ "description": "Description of the customer issue"
+ },
+ "category": {
+ "type": "string",
+ "enum": ["billing", "technical", "general", "complaint"],
+ "description": "Issue category"
},
- "required": ["description", "category"]
+ "priority": {
+ "type": "string",
+ "enum": ["low", "medium", "high", "urgent"],
+ "description": "Issue priority level"
+ }
},
- "followUp": {
- "type": "object",
- "properties": {
- "required": {
- "type": "boolean",
- "description": "Whether follow-up is needed"
- },
- "method": {
- "type": "string",
- "enum": ["email", "phone", "none"],
- "description": "Preferred follow-up method"
- },
- "notes": {
- "type": "string",
- "description": "Additional notes for follow-up"
- }
+ "required": ["description", "category"]
+ },
+ "followUp": {
+ "type": "object",
+ "properties": {
+ "required": {
+ "type": "boolean",
+ "description": "Whether follow-up is needed"
+ },
+ "method": {
+ "type": "string",
+ "enum": ["email", "phone", "none"],
+ "description": "Preferred follow-up method"
+ },
+ "notes": {
+ "type": "string",
+ "description": "Additional notes for follow-up"
}
}
- },
- "required": ["customer", "issue"]
- }
+ }
+ },
+ "required": ["customer", "issue"]
}
)
-structured_output = response.json()
-print(f'Created structured output: {structured_output["id"]}')
+print(f'Created structured output: {structured_output.id}')
# Save this ID - you'll need it in the next step
```
-
+
+
-
-Save the returned `id` from the response - you'll need it to link to your assistant.
-
+
+In the API approach, you'll need to save the returned `id` to attach it to an assistant. In the Dashboard, you can attach it directly when creating the structured output.
+
-## Step 2: Create an assistant with structured outputs
+## Step 2: Create and test a call
-Now create an assistant that uses your structured output:
+Now test your structured output by making a call.
-
-```bash title="cURL"
-curl -X POST https://api.vapi.ai/assistant \
- -H "Authorization: Bearer $VAPI_API_KEY" \
- -H "Content-Type: application/json" \
- -d '{
- "name": "Customer Support Agent",
- "firstMessage": "Hello! I'\''m here to help you with your support request. Can you please tell me your name and describe the issue you'\''re experiencing?",
- "model": {
- "provider": "openai",
- "model": "gpt-4-turbo-preview",
- "messages": [
- {
- "role": "system",
- "content": "You are a helpful customer support agent. Gather the customer'\''s information and understand their issue. Be empathetic and professional."
- }
- ]
- },
- "voice": {
- "provider": "vapi",
- "voiceId": "jennifer"
- },
- "artifactPlan": {
- "structuredOutputIds": ["YOUR_STRUCTURED_OUTPUT_ID_HERE"]
- }
- }'
-```
+
+**Prerequisites**: You need an assistant already created with:
+- The structured output from Step 1 attached in `artifactPlan.structuredOutputIds`
+- A model and voice configured
+- System prompt appropriate for your use case
-```javascript title="Node.js"
-const assistant = await fetch('https://api.vapi.ai/assistant', {
- method: 'POST',
- headers: {
- 'Authorization': `Bearer ${process.env.VAPI_API_KEY}`,
- 'Content-Type': 'application/json'
- },
- body: JSON.stringify({
- name: "Customer Support Agent",
- firstMessage: "Hello! I'm here to help you with your support request. Can you please tell me your name and describe the issue you're experiencing?",
- model: {
- provider: "openai",
- model: "gpt-4-turbo-preview",
- messages: [
- {
- role: "system",
- content: "You are a helpful customer support agent. Gather the customer's information and understand their issue. Be empathetic and professional."
- }
- ]
- },
- voice: {
- provider: "vapi",
- voiceId: "jennifer"
- },
- artifactPlan: {
- structuredOutputIds: [structuredOutput.id] // Use the ID from step 1
- }
- })
-}).then(res => res.json());
+You can create an assistant via the Dashboard or API, then use its ID in the examples below.
+
-console.log('Created assistant:', assistant.id);
+
+
+
+
+ 1. Navigate to your assistant (from **Assistants** in the sidebar)
+ 2. Ensure your structured output is attached in the **Artifact Plan** section
+ 3. Click **Talk to Assistant** in the top right corner
+ 4. The assistant will start speaking
+
+
+
+ Try saying: "Hi, my name is John Smith. My email is john@example.com. I'm having trouble logging into my account - it keeps showing an error message. This is pretty urgent for me."
+
+
+
+ Click **End Call** when you're done testing
+
+
+
+
+
+ ```typescript
+import { VapiClient } from "@vapi-ai/server-sdk";
+
+const vapi = new VapiClient({ token: process.env.VAPI_API_KEY! });
+
+// Start a web call with your assistant (replace with your assistant ID)
+const call = await vapi.calls.create({
+ assistantId: "your-assistant-id", // Use an assistant with structured outputs attached
+ type: "webCall"
+});
+
+console.log('Call started:', call.id);
+console.log('Join URL:', call.webCallUrl);
+
+// For phone calls, use:
+// const call = await vapi.calls.create({
+// assistantId: "your-assistant-id",
+// type: "outboundPhoneCall",
+// phoneNumberId: "your-phone-number-id",
+// customer: {
+// number: "+1234567890"
+// }
+// });
```
+
-```python title="Python"
-assistant_response = requests.post(
- 'https://api.vapi.ai/assistant',
- headers={
- 'Authorization': f'Bearer {os.environ["VAPI_API_KEY"]}',
- 'Content-Type': 'application/json'
- },
- json={
- "name": "Customer Support Agent",
- "firstMessage": "Hello! I'm here to help you with your support request. Can you please tell me your name and describe the issue you're experiencing?",
- "model": {
- "provider": "openai",
- "model": "gpt-4-turbo-preview",
- "messages": [
- {
- "role": "system",
- "content": "You are a helpful customer support agent. Gather the customer's information and understand their issue. Be empathetic and professional."
- }
- ]
- },
- "voice": {
- "provider": "vapi",
- "voiceId": "jennifer"
- },
- "artifactPlan": {
- "structuredOutputIds": [structured_output["id"]] # Use the ID from step 1
- }
- }
-)
+
+ ```python
+from vapi import Vapi
+import os
-assistant = assistant_response.json()
-print(f'Created assistant: {assistant["id"]}')
-```
-
+vapi = Vapi(token=os.environ.get("VAPI_API_KEY"))
-## Step 3: Test with a phone call
+# Start a web call with your assistant (replace with your assistant ID)
+call = vapi.calls.create(
+ assistant_id="your-assistant-id", # Use an assistant with structured outputs attached
+ type="webCall"
+)
-Make a test call to your assistant:
+print(f'Call started: {call.id}')
+print(f'Join URL: {call.web_call_url}')
+
+# For phone calls, use:
+# call = vapi.calls.create(
+# assistant_id="your-assistant-id",
+# type="outboundPhoneCall",
+# phone_number_id="your-phone-number-id",
+# customer={
+# "number": "+1234567890"
+# }
+# )
+```
+
-
-```bash title="cURL"
+
+ ```bash
+# Start a web call
curl -X POST https://api.vapi.ai/call \
-H "Authorization: Bearer $VAPI_API_KEY" \
-H "Content-Type: application/json" \
-d '{
- "assistantId": "YOUR_ASSISTANT_ID_HERE",
- "customer": {
- "number": "+1234567890"
- }
+ "assistantId": "your-assistant-id",
+ "type": "webCall"
}'
-```
-```javascript title="Node.js"
-const call = await fetch('https://api.vapi.ai/call', {
- method: 'POST',
- headers: {
- 'Authorization': `Bearer ${process.env.VAPI_API_KEY}`,
- 'Content-Type': 'application/json'
- },
- body: JSON.stringify({
- assistantId: assistant.id,
- customer: {
- number: "+1234567890" // Replace with your phone number
- }
- })
-}).then(res => res.json());
-
-console.log('Call initiated:', call.id);
+# For phone calls:
+# curl -X POST https://api.vapi.ai/call \
+# -H "Authorization: Bearer $VAPI_API_KEY" \
+# -H "Content-Type: application/json" \
+# -d '{
+# "assistantId": "your-assistant-id",
+# "type": "outboundPhoneCall",
+# "phoneNumberId": "your-phone-number-id",
+# "customer": {
+# "number": "+1234567890"
+# }
+# }'
```
-
-```python title="Python"
-call_response = requests.post(
- 'https://api.vapi.ai/call',
- headers={
- 'Authorization': f'Bearer {os.environ["VAPI_API_KEY"]}',
- 'Content-Type': 'application/json'
- },
- json={
- "assistantId": assistant["id"],
- "customer": {
- "number": "+1234567890" # Replace with your phone number
- }
- }
-)
-
-call = call_response.json()
-print(f'Call initiated: {call["id"]}')
-```
-
+
+
During the call, try saying something like: "Hi, my name is John Smith. My email is john@example.com. I'm having trouble logging into my account - it keeps showing an error message. This is pretty urgent for me."
-## Step 4: Retrieve extracted data
+## Step 3: Retrieve extracted data
After the call ends, retrieve the extracted information:
-
-```bash title="cURL"
-curl -X GET "https://api.vapi.ai/call/YOUR_CALL_ID_HERE" \
- -H "Authorization: Bearer $VAPI_API_KEY"
-```
+
+
+
+
+ 1. Navigate to **Call Logs** in the left sidebar
+ 2. Click on your recent call to view details
+
+
+
+ 1. In the call details, find the **Structured Outputs** section
+ 2. View the extracted JSON data for your "Support Ticket" output
+ 3. The data will be displayed in a formatted JSON view showing each output with its ID, name, and result
+
+
+
+ ### How structured outputs appear in Call Logs
+
+ When you view a call in the Call Logs page, structured outputs are displayed in the following format:
+
+ ```json
+ {
+ "550e8400-e29b-41d4-a716-446655440001": {
+ "name": "Support Ticket",
+ "result": {
+ "customer": {
+ "name": "John Smith",
+ "email": "john@example.com",
+ "phone": "+1234567890"
+ },
+ "issue": {
+ "description": "Unable to login to account, receiving error message",
+ "category": "technical",
+ "priority": "urgent"
+ },
+ "followUp": {
+ "required": true,
+ "method": "email",
+ "notes": "Customer needs immediate assistance with login issue"
+ }
+ }
+ }
+ }
+ ```
+
+ **Structure explanation:**
+ - **Root level**: Contains output IDs (UUIDs) as keys
+ - **name**: The name of the structured output configuration
+ - **result**: The actual extracted data based on your schema
+ - For object schemas: Contains the nested structure with all extracted fields
+ - For boolean schemas: Contains `true` or `false`
+ - For string schemas: Contains the extracted text
+ - For number schemas: Contains the numeric value
+
+
+ If you have multiple structured outputs attached to an assistant, each will appear with its own UUID key in the structuredOutputs object.
+
+
+
+
+ ```typescript
+import { VapiClient } from "@vapi-ai/server-sdk";
+
+const vapi = new VapiClient({ token: process.env.VAPI_API_KEY! });
-```javascript title="Node.js"
// Wait a few seconds after call ends for processing
setTimeout(async () => {
- const callData = await fetch(`https://api.vapi.ai/call/${call.id}`, {
- headers: {
- 'Authorization': `Bearer ${process.env.VAPI_API_KEY}`
- }
- }).then(res => res.json());
+ const callData = await vapi.calls.get(call.id);
const outputs = callData.artifact?.structuredOutputs;
@@ -552,32 +587,41 @@ setTimeout(async () => {
}
}, 5000);
```
+
-```python title="Python"
+
+ ```python
+from vapi import Vapi
import time
import json
+import os
+
+vapi = Vapi(token=os.environ.get("VAPI_API_KEY"))
# Wait a few seconds after call ends for processing
time.sleep(5)
-call_data = requests.get(
- f'https://api.vapi.ai/call/{call["id"]}',
- headers={
- 'Authorization': f'Bearer {os.environ["VAPI_API_KEY"]}'
- }
-).json()
+call_data = vapi.calls.get(call.id)
-outputs = call_data.get('artifact', {}).get('structuredOutputs', {})
+outputs = call_data.artifact.get('structuredOutputs', {}) if call_data.artifact else {}
for output_id, data in outputs.items():
print('Extracted Support Ticket:')
print(json.dumps(data['result'], indent=2))
```
-
+
+
+
+ ```bash
+curl -X GET "https://api.vapi.ai/call/YOUR_CALL_ID_HERE" \
+ -H "Authorization: Bearer $VAPI_API_KEY"
+```
+
+
### Expected output
-You should see extracted data like this:
+The extracted data (the `result` field from the API response) will look like this:
```json
{
@@ -599,103 +643,160 @@ You should see extracted data like this:
}
```
-## Step 5: Set up webhook (optional)
+
+When accessing via API, this data is nested inside the structured output object at `call.artifact.structuredOutputs[outputId].result`. The Dashboard shows the complete structure including the output ID and name.
+
-To automatically receive extracted data when calls end, set up a webhook:
+## HIPAA Compliance & Storage Settings
-
-```javascript title="Express.js webhook handler"
-const express = require('express');
-const app = express();
+
+**Important for HIPAA users:** When HIPAA mode is enabled, Vapi does not store structured outputs by default. This protects privacy but limits your ability to view structured outputs in Insights and Call Logs.
+
-app.use(express.json());
+### Understanding the default behavior
-app.post('/vapi/webhook', (req, res) => {
- const { type, call } = req.body;
-
- if (type === 'call.ended') {
- const outputs = call.artifact?.structuredOutputs;
-
- if (outputs) {
- Object.entries(outputs).forEach(([outputId, data]) => {
- if (data.result) {
- // Process the extracted support ticket
- console.log('New support ticket:', data.result);
-
- // Example: Create ticket in your system
- createSupportTicket({
- customer: data.result.customer,
- issue: data.result.issue,
- priority: data.result.issue.priority,
- followUp: data.result.followUp
- });
- }
- });
- }
- }
-
- res.status(200).send('OK');
-});
+When your organization or assistant has HIPAA mode enabled (`hipaaEnabled: true`):
+- **Structured outputs are NOT stored** - Results are generated but not persisted in Vapi's systems
+- **Limited visibility** - You cannot view outputs in the Dashboard's Call Logs or Insights
+- **Privacy first** - This ensures sensitive data is not retained
+- **Webhook access only** - You can still receive outputs via webhooks during the call
-function createSupportTicket(ticketData) {
- // Your ticket creation logic here
- console.log('Creating ticket in system:', ticketData);
-}
+This default behavior protects patient privacy and ensures compliance with HIPAA regulations.
-app.listen(3000, () => {
- console.log('Webhook server running on port 3000');
-});
-```
+### Enabling storage for non-sensitive outputs
-```python title="Flask webhook handler"
-from flask import Flask, request, jsonify
+For structured outputs that extract **non-sensitive, non-PHI information**, you can override this behavior using the `compliancePlan.forceStoreOnHipaaEnabled` setting.
-app = Flask(__name__)
+
+**Your responsibility:** You must ensure that any structured output with storage enabled does NOT extract or generate PHI or sensitive data.
+
-@app.route('/vapi/webhook', methods=['POST'])
-def vapi_webhook():
- data = request.json
-
- if data.get('type') == 'call.ended':
- call = data.get('call', {})
- outputs = call.get('artifact', {}).get('structuredOutputs', {})
-
- for output_id, output_data in outputs.items():
- if output_data.get('result'):
- # Process the extracted support ticket
- print('New support ticket:', output_data['result'])
-
- # Example: Create ticket in your system
- create_support_ticket({
- 'customer': output_data['result']['customer'],
- 'issue': output_data['result']['issue'],
- 'priority': output_data['result']['issue']['priority'],
- 'followUp': output_data['result']['followUp']
- })
-
- return jsonify({'status': 'ok'}), 200
+#### Safe use cases for storage override
-def create_support_ticket(ticket_data):
- # Your ticket creation logic here
- print('Creating ticket in system:', ticket_data)
+Enable storage for these types of non-sensitive outputs:
-if __name__ == '__main__':
- app.run(port=3000)
-```
-
+- **Boolean outcomes**: `appointmentBooked: true/false`, `callSuccessful: true/false`
+- **General categories**: `issueCategory: "billing" | "technical" | "general"`
+- **Satisfaction scores**: `csatScore: 1-10`
+- **Call metrics**: `sentiment: "positive" | "neutral" | "negative"`
+- **Success indicators**: `issueResolved: boolean`, `followUpRequired: boolean`
+
+#### Never enable storage for these
+
+**Do not** enable storage for outputs that extract:
+- Patient names, dates of birth, or contact information
+- Diagnosis, treatment, or medication information
+- Medical record numbers or identifiers
+- Social security numbers
+- Credit card or payment details
-Then update your assistant with the webhook URL:
+### Configuration examples
+
+
+ 1. Navigate to **Structured Outputs** in the left sidebar
+ 2. Create or edit a structured output
+ 3. Expand the **Compliance Settings** section
+ 4. Enable the toggle for "Enable Storage of Structured Outputs while on HIPAA Mode"
+ 5. **Recommendation**: Only enable if your output does not extract sensitive information
+
+
+
```bash
-curl -X PATCH "https://api.vapi.ai/assistant/YOUR_ASSISTANT_ID" \
+# Creating a HIPAA-safe structured output with storage enabled
+curl -X POST https://api.vapi.ai/structured-output \
-H "Authorization: Bearer $VAPI_API_KEY" \
-H "Content-Type: application/json" \
-d '{
- "server": {
- "url": "https://your-domain.com/vapi/webhook"
+ "name": "Appointment Booked",
+ "type": "ai",
+ "description": "Boolean indicator of whether appointment was booked",
+ "schema": {
+ "type": "boolean",
+ "description": "Whether an appointment was successfully booked during the call"
+ },
+ "compliancePlan": {
+ "forceStoreOnHipaaEnabled": true
}
}'
```
+
+
+
+```typescript
+import { VapiClient } from "@vapi-ai/server-sdk";
+
+const vapi = new VapiClient({ token: process.env.VAPI_API_KEY! });
+
+// Safe: Boolean outcome, no PHI
+const structuredOutput = await vapi.structuredOutputs.create({
+ name: "Appointment Booked",
+ type: "ai",
+ description: "Boolean indicator of whether appointment was booked",
+ schema: {
+ type: "boolean",
+ description: "Whether an appointment was successfully booked during the call"
+ },
+ compliancePlan: {
+ forceStoreOnHipaaEnabled: true // Safe because output contains no PHI
+ }
+});
+
+// Update existing structured output to enable storage
+await vapi.structuredOutputs.update(structuredOutput.id, {
+ compliancePlan: {
+ forceStoreOnHipaaEnabled: true
+ }
+});
+```
+
+
+
+```python
+from vapi import Vapi
+import os
+
+vapi = Vapi(token=os.environ.get("VAPI_API_KEY"))
+
+# Safe: Boolean outcome, no PHI
+structured_output = vapi.structured_outputs.create(
+ name="Appointment Booked",
+ type="ai",
+ description="Boolean indicator of whether appointment was booked",
+ schema={
+ "type": "boolean",
+ "description": "Whether an appointment was successfully booked during the call"
+ },
+ compliance_plan={
+ "forceStoreOnHipaaEnabled": True
+ }
+)
+
+# Update existing structured output to enable storage
+vapi.structured_outputs.update(
+ structured_output.id,
+ compliance_plan={
+ "forceStoreOnHipaaEnabled": True
+ }
+)
+```
+
+
+
+
+**IMPORTANT:** Only set `forceStoreOnHipaaEnabled: true` if you are certain your structured output does NOT extract PHI or sensitive data. Review your schema carefully before enabling storage.
+
+
+### Best practices for HIPAA compliance
+
+1. **Default to privacy**: Keep storage disabled for all outputs that might contain PHI
+2. **Review schemas carefully**: Ensure your extraction logic cannot accidentally capture sensitive data
+3. **Use specific schemas**: Design narrow schemas that target only non-sensitive data
+4. **Test thoroughly**: Verify outputs don't contain PHI before enabling storage
+5. **Document decisions**: Maintain records of which outputs have storage enabled and why
+6. **Regular audits**: Periodically review stored outputs to ensure compliance
+
+For more information about HIPAA compliance with Vapi, see our [HIPAA Compliance Guide](/security-and-privacy/hipaa).
## Next steps
@@ -737,23 +838,49 @@ You can attach multiple structured outputs to extract different types of data:
The `structuredOutputIds` are UUIDs returned when you create each structured output configuration.
-### Conditional extraction
+### Example: Intelligent analysis with multiple outputs
-Use conditional logic in your schema to handle different scenarios:
+Structured outputs can perform sophisticated analysis beyond simple data extraction. Here's a real example showing various types of intelligent evaluation:
```json
{
- "if": {
- "properties": {
- "requestType": {"const": "appointment"}
- }
+ "2ca00f20-f2c3-4d74-af2e-52842be5885c": {
+ "name": "informationOnFileIsCorrect",
+ "result": false
},
- "then": {
- "required": ["preferredDate", "preferredTime"]
+ "4748e1aa-6c7a-49e6-bbde-c4365ef69c6e": {
+ "name": "Appointment Rescheduled",
+ "result": false
+ },
+ "4d4bac33-2cea-43d4-a3b3-4554932b8933": {
+ "name": "CSAT",
+ "result": 8
+ },
+ "7898e478-c8dc-4ff8-a3f6-4a46555a957f": {
+ "name": "Appointment Booked",
+ "result": true
+ },
+ "a0ca58b1-c343-4628-b088-bf53aabacab9": {
+ "name": "Call Summary",
+ "result": "The user called to schedule a consultation appointment for next week, specifically on Wednesday afternoon..."
+ },
+ "b5a390d8-87c5-4015-b1ad-ed237201bdf0": {
+ "name": "Success Evaluation - Pass/Fail",
+ "result": true
}
}
```
+This example demonstrates intelligent extraction capabilities:
+- **Call outcome evaluation**: `Appointment Booked` (true) - Analyzed if the call's objective was achieved
+- **Data verification**: `informationOnFileIsCorrect` (false) - Evaluated if customer data needed updates
+- **Success metrics**: `Success Evaluation - Pass/Fail` (true) - Determined overall call success based on multiple criteria
+- **CSAT extraction**: `CSAT` (8) - Extracted satisfaction score from customer feedback
+- **Intelligent summarization**: `Call Summary` - Generated contextual summary of the conversation
+- **Process tracking**: `Appointment Rescheduled` (false) - Tracked specific actions taken during the call
+
+Each output analyzes the complete call context including transcript, tool results, and metadata to provide actionable insights.
+
### Validation patterns
Common validation patterns for reliable extraction:
@@ -801,4 +928,4 @@ Common validation patterns for reliable extraction:
Need assistance? We're here to help:
- [API Documentation](/api-reference)
- [Discord Community](https://discord.gg/pUFNcf2WmH)
-- [Support](mailto:support@vapi.ai)
\ No newline at end of file
+- [Support](mailto:support@vapi.ai)
diff --git a/fern/call-forwarding.mdx b/fern/call-forwarding.mdx
index 58e1c75d8..e607e841e 100644
--- a/fern/call-forwarding.mdx
+++ b/fern/call-forwarding.mdx
@@ -11,6 +11,13 @@ Vapi's call forwarding functionality allows you to redirect calls to different p
- **`transferCall` Tool**: This tool enables call forwarding to predefined phone numbers with specific messages based on the destination.
+
+Looking for dynamic routing decided at runtime? Use a `transferCall` tool with an empty `destinations` array and either:
+- Have the assistant supply a destination parameter (e.g., `phoneNumber`) directly; no webhook is sent.
+- Or respond from your server to the `transfer-destination-request` webhook with a destination.
+See: Dynamic call transfers.
+
+
### Parameters and Messages
- **Destinations**: A list of phone numbers where the call can be forwarded.
diff --git a/fern/calls/assistant-based-warm-transfer.mdx b/fern/calls/assistant-based-warm-transfer.mdx
index 1e7d3ce33..c530980d3 100644
--- a/fern/calls/assistant-based-warm-transfer.mdx
+++ b/fern/calls/assistant-based-warm-transfer.mdx
@@ -52,7 +52,9 @@ The `function.name` property identifies your transfer tool. Use this name when i
"mode": "warm-transfer-experimental",
"transferAssistant": {
"firstMessage": "Hello, I have a customer on the line. Are you available to take this call?",
+ "firstMessageMode": "assistant-speaks-first", // Default behavior - assistant speaks immediately
"maxDurationSeconds": 120,
+ "silenceTimeoutSeconds": 30,
"model": {
"provider": "openai",
"model": "gpt-4o",
@@ -87,14 +89,60 @@ The `function.name` property identifies your transfer tool. Use this name when i
The initial message spoken by the transfer assistant when the operator answers
+
+ Controls when the transfer assistant delivers the first message:
+ - `assistant-speaks-first`: The assistant immediately speaks the `firstMessage` when the operator answers
+ - `assistant-waits-for-user`: The assistant waits for the operator to speak before responding with the `firstMessage`
+
+
Maximum duration in seconds for the operator call. The transfer is automatically cancelled if this limit is reached.
+
+ Number of seconds to wait during silence before automatically cancelling the transfer. Must be between 10 and 3600 seconds.
+
+
Assistant configuration including provider, model, and system messages that control the transfer assistant's behavior
+### Customizing hold audio
+
+You can customize the audio played to the customer while they're on hold during the transfer process using the `request-complete` message type. If not specified, Vapi's default ringtone will be used.
+
+
+```json title="Custom Hold Music"
+{
+ "type": "transferCall",
+ "function": {
+ "name": "transferCall"
+ },
+ "destinations": [
+ // ... destination configuration
+ ],
+ "messages": [
+ {
+ "type": "request-start",
+ "content": "I'll transfer you now. Please hold."
+ },
+ {
+ "type": "request-complete",
+ "content": "https://your-domain.com/assets/custom-hold-music.mp3" // Custom hold audio URL
+ },
+ {
+ "type": "request-failed",
+ "content": "I couldn't complete the transfer."
+ }
+ ]
+}
+```
+
+
+
+The `request-complete` message with an audio URL will be played as hold music on the customer's side of the call while the transfer assistant connects with the operator. If you don't specify this message, Vapi's default ringtone will play.
+
+
## Built-in tools
The transfer assistant has access to two built-in tools:
@@ -141,6 +189,7 @@ The transfer assistant can be configured to handle various operator responses:
"mode": "warm-transfer-experimental",
"transferAssistant": {
"firstMessage": "Hi, I have a customer on the line who needs help with their recent order. Are you available?",
+ "firstMessageMode": "assistant-waits-for-user",
"maxDurationSeconds": 90,
"model": {
"provider": "openai",
@@ -309,7 +358,7 @@ Configure your transfer assistant to:
- Requires `warm-transfer-experimental` mode
-- Only works with Twilio phone numbers
+- Only works with Twilio, Vapi phone numbers, and SIP trunks. Does not support Telnyx or Vonage
- Calls are limited by `maxDurationSeconds` to prevent indefinite duration
- Built-in tools (`transferSuccessful`, `transferCancel`) are predefined and cannot be removed
- The transfer assistant has access to the previous conversation context
@@ -318,6 +367,6 @@ Configure your transfer assistant to:
## Next steps
Now that you've configured assistant-based warm transfers:
-- **[Call forwarding](mdc:docs/call-forwarding):** Learn about other transfer modes and options
-- **[Assistant configuration](mdc:docs/assistants):** Configure assistant models and prompts
-- **[Custom tools](mdc:docs/tools/custom-tools):** Add custom tools to your assistants
+- **[Call forwarding](../call-forwarding):** Learn about other transfer modes and options
+- **[Assistant configuration](../assistants):** Configure assistant models and prompts
+- **[Custom tools](../tools/custom-tools):** Add custom tools to your assistants
diff --git a/fern/calls/call-concurrency.mdx b/fern/calls/call-concurrency.mdx
new file mode 100644
index 000000000..69c4bbb19
--- /dev/null
+++ b/fern/calls/call-concurrency.mdx
@@ -0,0 +1,153 @@
+---
+title: Understanding Call Concurrency
+subtitle: Plan, monitor, and scale simultaneous Vapi calls
+slug: calls/call-concurrency
+description: Learn how concurrency slots work, how to stay within the default limit, and how to raise capacity for larger campaigns.
+---
+
+## Overview
+
+Call concurrency represents how many Vapi calls can be active at the same time. Each call occupies one slot, similar to using a finite set of phone lines.
+
+**In this guide, you'll learn to:**
+- Understand the default concurrency allocation and when it is usually sufficient
+- Keep outbound and inbound workloads within plan limits
+- Increase reserved capacity directly from the Vapi Dashboard
+- Inspect concurrency data through API responses and analytics queries
+
+## What is concurrency?
+
+Every Vapi account includes **10 concurrent call slots** by default. When all slots are busy, new outbound dials or inbound connections wait until a slot becomes free.
+
+
+
+ Rarely hit concurrency caps unless traffic surges (launches, seasonal spikes).
+
+
+ More likely to reach limits when running large calling batches.
+
+
+
+These limits ensure the underlying compute stays reliable for every customer. Higher concurrency requires reserving additional capacity, which Vapi provides through custom or add-on plans.
+
+## Managing concurrency
+
+### Outbound campaigns
+
+Batch long lead lists into smaller chunks (for example, 50–100 numbers) and run those batches sequentially. This keeps your peak concurrent calls near the default limit while still working through large sets quickly.
+
+### High-volume operations
+
+If you regularly exceed **50,000 minutes per month**, talk with Vapi about:
+
+- **Custom plans** that include higher baked-in concurrency
+- **Add-on bundles** that let you purchase extra call lines only when you need them
+
+
+Use billing reports to pair minute usage with concurrency spikes so you can upgrade before calls are blocked.
+
+
+## Increase your concurrency limit
+
+You can raise or reserve more call lines without contacting support:
+
+1. Open the [Vapi Dashboard](https://dashboard.vapi.ai/settings/billing).
+2. Navigate to **Settings → Billing**.
+3. Find **Reserved Concurrency (Call Lines)**.
+4. Increase the limit or purchase add-on concurrency lines.
+
+Changes apply immediately, so you can scale ahead of known traffic surges.
+
+## View concurrency in call responses
+
+When you create a call with `POST /call`, the response includes a `subscriptionLimits` object that shows the current state of your account.
+
+### Example request
+
+```bash
+curl 'https://api.vapi.ai/call' \
+ -H 'authorization: Bearer {VAPI-PRIVATE-TOKEN}' \
+ -H 'content-type: application/json' \
+ --data-raw '{
+ "assistantId": "4a170597-a0c2-4657-8c32-cb93f080cead",
+ "customer": {"number": "+918936850777"},
+ "phoneNumberId": "c6ea6cb0-0dfb-4a65-918f-6a33abb54b64"
+ }'
+```
+
+### Example response snippet
+
+```json
+{
+ "subscriptionLimits": {
+ "concurrencyBlocked": false,
+ "concurrencyLimit": 10,
+ "remainingConcurrentCalls": 9
+ },
+ "id": "019a9046-121e-766d-bd1f-84f3ccc309c1",
+ "status": "queued"
+}
+```
+
+### Field reference
+
+- **`concurrencyBlocked`** — `true` if the call could not start because all slots were full.
+- **`concurrencyLimit`** — Total concurrent call slots currently available to your org.
+- **`remainingConcurrentCalls`** — How many slots were open at the time you created the call.
+
+Build monitoring around these values to alert when you approach the cap.
+
+## Track concurrency with the Analytics API
+
+Use the `/analytics` endpoint to review historical concurrency usage and spot patterns that justify more capacity.
+
+### Example request
+
+```bash
+curl 'https://api.vapi.ai/analytics' \
+ -H 'authorization: Bearer {VAPI-PRIVATE-TOKEN}' \
+ -H 'content-type: application/json' \
+ --data-raw '{
+ "queries": [{
+ "name": "Number of Concurrent Calls",
+ "table": "subscription",
+ "timeRange": {
+ "start": "2025-10-16T18:30:00.000Z",
+ "end": "2025-11-17T05:31:10.184Z",
+ "step": "day"
+ },
+ "operations": [{
+ "operation": "max",
+ "column": "concurrency",
+ "alias": "concurrency"
+ }]
+ }]
+ }'
+```
+
+### Example response
+
+```json
+[{
+ "name": "Number of Concurrent Calls",
+ "timeRange": {
+ "start": "2025-10-16T18:30:00.000Z",
+ "end": "2025-11-17T05:31:10.184Z",
+ "step": "day",
+ "timezone": "UTC"
+ },
+ "result": [
+ { "date": "2025-11-05T00:00:00.000Z", "concurrency": 0 },
+ { "date": "2025-11-10T00:00:00.000Z", "concurrency": 1 },
+ { "date": "2025-11-17T00:00:00.000Z", "concurrency": 1 }
+ ]
+}]
+```
+
+Adjust the `timeRange.step` to inspect usage by hour, day, or week. Peaks that align with campaign launches, seasonality, or support events highlight when you should reserve additional call lines.
+
+## Next steps
+
+- **[Call queue management](mdc:docs/calls/call-queue-management):** Build a Twilio queue to buffer calls when you hit concurrency caps.
+- **[Outbound campaign planning](mdc:docs/outbound-campaigns/overview):** Design outbound strategies that pair batching with analytics.
+- **[Enterprise plans](mdc:docs/enterprise/plans):** Review larger plans that include higher default concurrency.
diff --git a/fern/calls/call-dynamic-transfers.mdx b/fern/calls/call-dynamic-transfers.mdx
index 81c256ac6..15e62661c 100644
--- a/fern/calls/call-dynamic-transfers.mdx
+++ b/fern/calls/call-dynamic-transfers.mdx
@@ -14,7 +14,8 @@ Dynamic call transfers enable intelligent routing by determining transfer destin
* Integration with CRM systems, databases, and external APIs
* Conditional routing logic for departments, specialists, or geographic regions
* Context-aware transfers with conversation summaries
-* Fallback handling for unavailable destinations
+* Custom business logic execution before completing the transfer
+* Programmatic transfer control via Vapi's Call Control API
## Prerequisites
@@ -24,31 +25,67 @@ Dynamic call transfers enable intelligent routing by determining transfer destin
## How It Works
-Dynamic transfers operate by leaving the destination unspecified initially, then using webhooks to determine the appropriate destination when needed.
+Dynamic transfers with live call control use a server-controlled pattern that gives you maximum flexibility:
-**Transfer flow:**
-1. **Trigger** - Voice agent determines a transfer is needed based on conversation
-2. **Webhook** - Vapi sends `transfer-destination-request` to your server with call context
-3. **Decision** - Your server analyzes context and external data to determine routing
-4. **Response** - Server returns destination details and transfer configuration
-5. **Transfer** - Vapi executes the transfer to the determined destination
+1. **User initiates transfer**: The user requests a transfer in natural language during the conversation
+2. **Vapi triggers custom tool**: Vapi fires your custom tool to your HTTP server
+3. **Server receives control URL**: The tool payload includes `message.call.monitor.controlUrl` for live call control
+4. **Execute business logic**: Your server performs any necessary operations:
+ - Update CRM records with call summaries
+ - Extract and store conversation data
+ - Query databases for routing decisions
+ - Enrich destination systems with context
+5. **Complete transfer**: Your server makes a POST request to the `controlUrl` with the transfer destination
+6. **Call connected**: Vapi transfers the call to the specified SIP or PSTN destination
-**Available context:** Your webhook receives conversation transcript, extracted variables, customer information, function parameters, and call metadata.
+Available context: Your server receives the full conversation transcript, custom parameters, call metadata, and the control URL, allowing you to make informed routing decisions and execute the transfer programmatically.
+
+
+Parameters for custom tools are fully customizable. You can name and structure them however you like to guide routing (for example `department`, `reason`, `urgency`, `customerId`, etc.).
+
+
+Sequence diagram
+
+```mermaid
+sequenceDiagram
+ participant Customer
+ participant Vapi
+ participant Server as HTTP Server
+ participant CRM as CRM (Optional)
+ participant Dest as SIP/PSTN Destination
+
+ Customer->>Vapi: "Can you transfer me to support?"
+
+ Vapi->>Server: Tool call: custom_transfer_call ({ "reason": "escalation" })
+
+ opt Business Logic
+ Server->>CRM: Update customer record
+ CRM-->>Server: Confirm updated
+ end
+
+ Server->>Vapi: POST {controlUrl}/control (transfer destination)
+
+ Vapi->>Dest: Transfer call
+ Dest-->>Customer: Connected to destination
+```
---
## Quick Implementation Guide
-
+
+ Create a custom tool that will receive the transfer request and provide you with the control URL to execute the transfer.
+
- Navigate to **Tools** in your dashboard
- Click **Create Tool**
- - Select **Transfer Call** as the tool type
- - **Important**: Leave the destinations array empty - this creates a dynamic transfer tool
- - Set function name: `dynamicTransfer`
- - Add description explaining when this tool should be used
+ - Select **Custom** as the tool type
+ - Set function name: `transfer_call`
+ - Add a description: "Transfer the call to the appropriate department or agent"
+ - Define custom parameters based on your routing needs (e.g., `department`, `reason`, `urgency`, `customerId`)
+ - Set your server URL to receive the tool call
```typescript
@@ -56,30 +93,38 @@ Dynamic transfers operate by leaving the destination unspecified initially, then
const vapi = new VapiClient({ token: process.env.VAPI_API_KEY });
- const dynamicTool = await vapi.tools.create({
- type: "transferCall",
- // Empty destinations array makes this dynamic
- destinations: [],
+ const transferTool = await vapi.tools.create({
+ type: "function",
+ async: true,
function: {
- name: "dynamicTransfer",
- description: "Transfer call to appropriate destination based on customer needs",
+ name: "transfer_call",
+ description: "Transfer the call to the appropriate department or agent based on customer needs",
parameters: {
type: "object",
properties: {
+ department: {
+ type: "string",
+ description: "Department to transfer to (e.g., 'support', 'sales', 'billing')"
+ },
reason: {
type: "string",
- description: "Reason for transfer"
+ description: "Reason for the transfer"
},
urgency: {
type: "string",
- enum: ["low", "medium", "high", "critical"]
+ enum: ["low", "medium", "high", "critical"],
+ description: "Urgency level of the transfer"
}
- }
+ },
+ required: ["department", "reason"]
}
+ },
+ server: {
+ url: "https://your-server.com/webhook"
}
});
- console.log(`Dynamic transfer tool created: ${dynamicTool.id}`);
+ console.log(`Transfer tool created: ${transferTool.id}`);
```
@@ -87,40 +132,49 @@ Dynamic transfers operate by leaving the destination unspecified initially, then
import requests
import os
- def create_dynamic_transfer_tool():
+ def create_transfer_tool():
url = "https://api.vapi.ai/tool"
headers = {
"Authorization": f"Bearer {os.getenv('VAPI_API_KEY')}",
"Content-Type": "application/json"
}
-
- data = {
- "type": "transferCall",
- "destinations": [], # Empty for dynamic routing
+
+ tool_config = {
+ "type": "function",
+ "async": True,
"function": {
- "name": "dynamicTransfer",
- "description": "Transfer call to appropriate destination based on customer needs",
+ "name": "transfer_call",
+ "description": "Transfer the call to the appropriate department or agent based on customer needs",
"parameters": {
"type": "object",
"properties": {
+ "department": {
+ "type": "string",
+ "description": "Department to transfer to (e.g., 'support', 'sales', 'billing')"
+ },
"reason": {
"type": "string",
- "description": "Reason for transfer"
+ "description": "Reason for the transfer"
},
"urgency": {
"type": "string",
- "enum": ["low", "medium", "high", "critical"]
+ "enum": ["low", "medium", "high", "critical"],
+ "description": "Urgency level of the transfer"
}
- }
+ },
+ "required": ["department", "reason"]
}
+ },
+ "server": {
+ "url": "https://your-server.com/webhook"
}
}
-
- response = requests.post(url, headers=headers, json=data)
+
+ response = requests.post(url, headers=headers, json=tool_config)
return response.json()
- tool = create_dynamic_transfer_tool()
- print(f"Dynamic transfer tool created: {tool['id']}")
+ tool = create_transfer_tool()
+ print(f"Transfer tool created: {tool['id']}")
```
@@ -129,18 +183,33 @@ Dynamic transfers operate by leaving the destination unspecified initially, then
-H "Authorization: Bearer $VAPI_API_KEY" \
-H "Content-Type: application/json" \
-d '{
- "type": "transferCall",
- "destinations": [],
+ "type": "function",
+ "async": true,
"function": {
- "name": "dynamicTransfer",
- "description": "Transfer call to appropriate destination based on customer needs",
+ "name": "transfer_call",
+ "description": "Transfer the call to the appropriate department or agent based on customer needs",
"parameters": {
"type": "object",
"properties": {
- "reason": {"type": "string", "description": "Reason for transfer"},
- "urgency": {"type": "string", "enum": ["low", "medium", "high", "critical"]}
- }
+ "department": {
+ "type": "string",
+ "description": "Department to transfer to (e.g., support, sales, billing)"
+ },
+ "reason": {
+ "type": "string",
+ "description": "Reason for the transfer"
+ },
+ "urgency": {
+ "type": "string",
+ "enum": ["low", "medium", "high", "critical"],
+ "description": "Urgency level of the transfer"
+ }
+ },
+ "required": ["department", "reason"]
}
+ },
+ "server": {
+ "url": "https://your-server.com/webhook"
}
}'
```
@@ -153,9 +222,8 @@ Dynamic transfers operate by leaving the destination unspecified initially, then
- Navigate to **Assistants**
- Create a new assistant or edit an existing one
- - Add your dynamic transfer tool to the assistant
- - Enable the **transfer-destination-request** server event
- - Set your server URL to handle the webhook
+ - Add your custom transfer tool to the assistant
+ - Configure the system prompt to guide when transfers should occur
```typescript
@@ -168,23 +236,23 @@ Dynamic transfers operate by leaving the destination unspecified initially, then
messages: [
{
role: "system",
- content: "You help customers and transfer them when needed using the dynamicTransfer tool. Assess the customer's needs and transfer to the appropriate department."
+ content: "You help customers and can transfer them to the appropriate department when needed. Use the transfer_call tool when a customer requests to speak with someone or when you determine their issue requires specialist assistance. Always gather the reason for transfer before initiating it."
}
],
- toolIds: ["YOUR_DYNAMIC_TOOL_ID"]
+ toolIds: ["YOUR_TRANSFER_TOOL_ID"]
},
voice: {
provider: "11labs",
voiceId: "burt"
- },
- serverUrl: "https://your-server.com/webhook",
- serverUrlSecret: process.env.WEBHOOK_SECRET
+ }
});
+
+ console.log(`Assistant created: ${assistant.id}`);
```
```python
- def create_assistant_with_dynamic_transfer(tool_id):
+ def create_assistant_with_transfer(tool_id):
url = "https://api.vapi.ai/assistant"
headers = {
"Authorization": f"Bearer {os.getenv('VAPI_API_KEY')}",
@@ -199,17 +267,18 @@ Dynamic transfers operate by leaving the destination unspecified initially, then
"model": "gpt-4o",
"messages": [{
"role": "system",
- "content": "You help customers and transfer them when needed using the dynamicTransfer tool. Assess the customer's needs and transfer to the appropriate department."
+ "content": "You help customers and can transfer them to the appropriate department when needed. Use the transfer_call tool when a customer requests to speak with someone or when you determine their issue requires specialist assistance. Always gather the reason for transfer before initiating it."
}],
"toolIds": [tool_id]
},
- "voice": {"provider": "11labs", "voiceId": "burt"},
- "serverUrl": "https://your-server.com/webhook",
- "serverUrlSecret": os.getenv("WEBHOOK_SECRET")
+ "voice": {"provider": "11labs", "voiceId": "burt"}
}
response = requests.post(url, headers=headers, json=data)
return response.json()
+
+ assistant = create_assistant_with_transfer("YOUR_TRANSFER_TOOL_ID")
+ print(f"Assistant created: {assistant['id']}")
```
@@ -225,78 +294,84 @@ Dynamic transfers operate by leaving the destination unspecified initially, then
"model": "gpt-4o",
"messages": [{
"role": "system",
- "content": "You help customers and transfer them when needed using the dynamicTransfer tool."
+ "content": "You help customers and can transfer them to the appropriate department when needed. Use the transfer_call tool when a customer requests to speak with someone or when you determine their issue requires specialist assistance."
}],
- "toolIds": ["YOUR_DYNAMIC_TOOL_ID"]
+ "toolIds": ["YOUR_TRANSFER_TOOL_ID"]
},
- "serverUrl": "https://your-server.com/webhook"
+ "voice": {"provider": "11labs", "voiceId": "burt"}
}'
```
-
+
+ Your server will receive the tool call with `message.call.monitor.controlUrl` and use it to execute the transfer via Live Call Control.
+
```typescript
import express from 'express';
- import crypto from 'crypto';
+ import axios from 'axios';
const app = express();
app.use(express.json());
- function verifyWebhookSignature(payload: string, signature: string) {
- const expectedSignature = crypto
- .createHmac('sha256', process.env.WEBHOOK_SECRET!)
- .update(payload)
- .digest('hex');
- return crypto.timingSafeEqual(
- Buffer.from(signature),
- Buffer.from(expectedSignature)
- );
- }
-
- app.post('/webhook', (req, res) => {
+ app.post('/webhook', async (req, res) => {
try {
- const signature = req.headers['x-vapi-signature'] as string;
- const payload = JSON.stringify(req.body);
-
- if (!verifyWebhookSignature(payload, signature)) {
- return res.status(401).json({ error: 'Invalid signature' });
- }
+ const { message } = req.body;
- const request = req.body;
+ // Extract control URL from the call monitor
+ const controlUrl = message?.call?.monitor?.controlUrl;
+
+ // Extract tool call from toolWithToolCallList
+ const toolWithToolCall = message?.toolWithToolCallList?.[0];
+ const toolCall = toolWithToolCall?.toolCall;
- if (request.type !== 'transfer-destination-request') {
- return res.status(200).json({ received: true });
+ if (!controlUrl || !toolCall) {
+ return res.status(400).json({ error: 'Missing required data' });
}
- // Simple routing logic - customize for your needs
- const { functionCall, customer } = request;
- const urgency = functionCall.parameters?.urgency || 'medium';
+ // Extract parameters from the tool call
+ const { department, reason, urgency } = toolCall.function.arguments;
+ // Execute business logic (optional)
+ console.log(`Transfer request: ${department} - ${reason} (${urgency})`);
+
+ // Determine destination based on department
let destination;
- if (urgency === 'critical') {
+ if (department === 'support') {
+ destination = {
+ type: "number",
+ number: "+1234567890"
+ };
+ } else if (department === 'sales') {
destination = {
type: "number",
- number: "+1-555-EMERGENCY",
- message: "Connecting you to our emergency team."
+ number: "+1987654321"
};
} else {
destination = {
- type: "number",
- number: "+1-555-SUPPORT",
- message: "Transferring you to our support team."
+ type: "number",
+ number: "+1555555555"
};
}
- res.json({ destination });
- } catch (error) {
- console.error('Webhook error:', error);
- res.status(500).json({
- error: 'Transfer routing failed. Please try again.'
+ // Execute transfer via Live Call Control
+ await axios.post(`${controlUrl}/control`, {
+ type: "transfer",
+ destination: destination,
+ content: `Transferring you to ${department} now.`
+ }, {
+ headers: { 'Content-Type': 'application/json' }
});
+
+ // Respond to Vapi (optional acknowledgment)
+ res.json({ success: true });
+
+ } catch (error) {
+ console.error('Transfer error:', error);
+ res.status(500).json({ error: 'Transfer failed' });
}
});
@@ -308,70 +383,96 @@ Dynamic transfers operate by leaving the destination unspecified initially, then
```python
import os
- import hmac
- import hashlib
+ import httpx
from fastapi import FastAPI, HTTPException, Request
app = FastAPI()
- def verify_webhook_signature(payload: bytes, signature: str) -> bool:
- webhook_secret = os.getenv('WEBHOOK_SECRET', '').encode()
- expected_signature = hmac.new(
- webhook_secret, payload, hashlib.sha256
- ).hexdigest()
- return hmac.compare_digest(signature, expected_signature)
-
@app.post("/webhook")
async def handle_webhook(request: Request):
try:
- body = await request.body()
- signature = request.headers.get('x-vapi-signature', '')
+ body = await request.json()
+ message = body.get('message', {})
- if not verify_webhook_signature(body, signature):
- raise HTTPException(status_code=401, detail="Invalid signature")
+ # Extract control URL from the call monitor
+ control_url = message.get('call', {}).get('monitor', {}).get('controlUrl')
- request_data = await request.json()
+ # Extract tool call from toolWithToolCallList
+ tool_with_tool_call = message.get('toolWithToolCallList', [{}])[0]
+ tool_call = tool_with_tool_call.get('toolCall', {})
- if request_data.get('type') != 'transfer-destination-request':
- return {"received": True}
+ if not control_url or not tool_call:
+ raise HTTPException(status_code=400, detail="Missing required data")
- # Simple routing logic - customize for your needs
- function_call = request_data.get('functionCall', {})
- urgency = function_call.get('parameters', {}).get('urgency', 'medium')
+ # Extract parameters from the tool call
+ arguments = tool_call.get('function', {}).get('arguments', {})
+ department = arguments.get('department')
+ reason = arguments.get('reason')
+ urgency = arguments.get('urgency', 'medium')
- if urgency == 'critical':
+ print(f"Transfer request: {department} - {reason} ({urgency})")
+
+ # Determine destination based on department
+ if department == 'support':
+ destination = {
+ "type": "number",
+ "number": "+1234567890"
+ }
+ elif department == 'sales':
destination = {
"type": "number",
- "number": "+1-555-EMERGENCY",
- "message": "Connecting you to our emergency team."
+ "number": "+1987654321"
}
else:
destination = {
"type": "number",
- "number": "+1-555-SUPPORT",
- "message": "Transferring you to our support team."
+ "number": "+1555555555"
}
- return {"destination": destination}
+ # Execute transfer via Live Call Control
+ async with httpx.AsyncClient() as client:
+ await client.post(
+ f"{control_url}/control",
+ json={
+ "type": "transfer",
+ "destination": destination,
+ "content": f"Transferring you to {department} now."
+ },
+ headers={"Content-Type": "application/json"}
+ )
+
+ return {"success": True}
except Exception as error:
- print(f"Webhook error: {error}")
- raise HTTPException(
- status_code=500,
- detail="Transfer routing failed. Please try again."
- )
+ print(f"Transfer error: {error}")
+ raise HTTPException(status_code=500, detail="Transfer failed")
```
+
+
+ **SIP transfers:** To transfer to a SIP endpoint, use `"type": "sip"` with `"sipUri"` instead:
+
+ ```json
+ {
+ "type": "transfer",
+ "destination": {
+ "type": "sip",
+ "sipUri": "sip:+1234567890@sip.telnyx.com"
+ },
+ "content": "Transferring your call now."
+ }
+ ```
+
- Create a phone number and assign your assistant
- - Call the number and test different transfer scenarios
- - Monitor your webhook server logs to see the routing decisions
- - Verify transfers are working to the correct destinations
+ - Call the number and request a transfer to different departments
+ - Monitor your webhook server logs to see the tool calls and control URL
+ - Verify transfers are executing to the correct destinations
```typescript
@@ -385,7 +486,8 @@ Dynamic transfers operate by leaving the destination unspecified initially, then
console.log(`Test call created: ${testCall.id}`);
- // Monitor webhook server logs to see transfer requests
+ // During the call, say "I need to speak with support"
+ // Monitor webhook server logs to see the transfer execution
```
@@ -405,6 +507,9 @@ Dynamic transfers operate by leaving the destination unspecified initially, then
response = requests.post(url, headers=headers, json=data)
call = response.json()
print(f"Test call created: {call['id']}")
+
+ # During the call, say "I need to speak with support"
+ # Monitor webhook server logs to see the transfer execution
return call
```
@@ -414,26 +519,14 @@ Dynamic transfers operate by leaving the destination unspecified initially, then
---
-## Implementation Approaches
-
-**Assistant-based implementation** uses transfer-type tools with conditions interpreted by the assistant through system prompts. The assistant determines when and where to route calls based on clearly defined tool purposes and routing logic in the prompt. Best for quick setup and simpler routing scenarios.
-
-**Workflow-based implementation** uses conditional logic based on outputs from any workflow node - tools, API requests, conversation variables, or other data sources. Conditions evaluate node outputs to determine routing paths within visual workflows. Best for complex business logic, structured decision trees, and team-friendly configuration.
-
-
-
-
**Assistant-based routing**
Route customers to appropriate support tiers based on conversation analysis and customer data
-
-
-
-
- **Workflow-based routing**
+
+ **Squad-based routing**
Direct tenant calls to the right department with automated verification
@@ -465,6 +558,13 @@ Dynamic transfers operate by leaving the destination unspecified initially, then
**Security considerations:** Always verify webhook signatures to ensure requests come from Vapi. Never log sensitive customer data, implement proper access controls, and follow privacy regulations like GDPR and CCPA when handling customer information in routing decisions.
+## Troubleshooting
+
+- **Tool call not received**: Verify your server URL is correctly configured in the custom tool and is publicly accessible. Check your server logs for incoming requests.
+- **Transfer not executing**: Make sure that you are sending a valid destination object (type number or sip). See API reference [here](https://docs.vapi.ai/api-reference/tools/create#request.body.TransferCallTool.destinations).
+- **Invalid destination format**: For phone numbers, use `"type": "number"` with E.164 format. For SIP, use `"type": "sip"` with a valid SIP URI.
+- **Transfer fails silently**: Check your server logs for errors in the axios/httpx request.
+
## Related Documentation
* **[Call Forwarding](/call-forwarding)** - Static transfer options and transfer plans
diff --git a/fern/calls/call-ended-reason.mdx b/fern/calls/call-ended-reason.mdx
index d9f9b081f..4349eb71e 100644
--- a/fern/calls/call-ended-reason.mdx
+++ b/fern/calls/call-ended-reason.mdx
@@ -4,88 +4,219 @@ subtitle: All possible call ended reason codes and what they mean.
slug: calls/call-ended-reason
---
-This guide will discuss all possible `endedReason` codes for a call.
+Every call in Vapi ends with an `endedReason` code that tells you exactly why it ended. You can find this value in the **"Ended Reason"** column of your [call logs](https://dashboard.vapi.ai/calls), or under the `endedReason` field on the [Call object](/api-reference/calls/get-call).
For the full list of possible `endedReason` values, see the [API reference](/api-reference/calls/list#response.body.endedReason).
-You can find these under the **"Ended Reason"** section of your [call logs](https://dashboard.vapi.ai/calls) (or under the `endedReason` field on the [Call Object](/api-reference/calls/get-call)).
-
-#### Assistant-Related
-
-- `assistant-ended-call`: The assistant intentionally ended the call based on the user's response.
-- `assistant-ended-call-after-message-spoken`: The assistant intentionally ended the call after speaking a pre-defined message.
-- `assistant-ended-call-with-hangup-task`: The assistant ended the call using a hangup task.
-- `assistant-error`: This general error occurs within the assistant's logic or processing due to bugs, misconfigurations, or unexpected inputs.
-- `assistant-forwarded-call`: The assistant successfully transferred the call to another number or service.
-- `assistant-join-timed-out`: The assistant failed to join the call within the expected timeframe.
-- `assistant-not-found`: The specified assistant cannot be located or accessed, possibly due to an incorrect assistant ID or configuration issue.
-- `assistant-not-valid`: The assistant ID provided is not valid or recognized by the system.
-- `assistant-not-provided`: No assistant ID was specified in the request, causing the system to fail.
-- `assistant-request-failed`: The request to the assistant failed to complete successfully.
-- `assistant-request-returned-error`: Communicating with the assistant resulted in an error, possibly due to network issues or problems with the assistant itself.
-- `assistant-request-returned-forwarding-phone-number`: The assistant triggered a call forwarding action, ending the current call.
-- `assistant-request-returned-invalid-assistant`: The assistant returned an invalid response or failed to fulfill the request properly.
-- `assistant-request-returned-no-assistant`: The assistant didn't provide any response or action to the request.
-- `assistant-request-returned-unspeakable-error`: The assistant returned an error that cannot be spoken to the user.
-- `assistant-said-end-call-phrase`: The assistant recognized a phrase or keyword triggering call termination.
-
-#### Pipeline and LLM
-
-These relate to issues within the AI processing pipeline or the Large Language Models (LLMs) used for understanding and generating text:
-
-- `call.in-progress.error-vapifault-*`: Various error codes indicate specific failures within the processing pipeline, such as function execution, LLM responses, or external service integration. Examples include OpenAI, Azure OpenAI, Together AI, and several other LLMs or voice providers.
-- `call.in-progress.error-providerfault-*`: Similar to `call.in-progress.error-vapifault-*`. However, these error codes are surfaced when Vapi receives an error that has occured on the provider's side. Examples include internal server errors, or service unavailability.
-- `pipeline-error-*`: Similar to `call.in-progress.error-vapifault-*`. However, these error codes are surfaced when you are using your own provider keys.
-- `pipeline-no-available-llm-model`: No suitable LLM was available to process the request. Previously `pipeline-no-available-model`.
-- `call.in-progress.error-pipeline-no-available-llm-model`: No suitable LLM was available to process the request during the call.
-
-#### Phone Calls and Connectivity
-
-- `customer-busy`: The customer's line was busy.
-- `customer-ended-call`: The customer (end human user) ended the call for both inbound and outbound calls.
-- `customer-did-not-answer`: The customer didn't answer the call. If you're looking to build a use case where you need the bot to talk to automated IVRs, set `assistant.voicemailDetectionEnabled=false`.
-- `customer-did-not-give-microphone-permission`: The user didn't grant the necessary microphone access for the call.
-- `call.in-progress.error-assistant-did-not-receive-customer-audio`: Similar to `customer-did-not-give-microphone-permission`, but more generalized to situations where no customer audio was received.
-- `phone-call-provider-closed-websocket`: The connection with the call provider was unexpectedly closed.
-- `phone-call-provider-bypass-enabled-but-no-call-received`: The phone call provider bypass was enabled but no call was received.
-- `twilio-failed-to-connect-call`: The Twilio service, responsible for managing calls, failed to establish a connection.
-- `twilio-reported-customer-misdialed`: Twilio reported that the customer dialed an invalid or incomplete number.
-- `vonage-disconnected`: The call was disconnected by Vonage, another call management service.
-- `vonage-failed-to-connect-call`: Vonage failed to establish the call connection.
-- `vonage-rejected`: The call was rejected by Vonage due to an issue or configuration problem.
-- `vonage-completed`: The call was completed successfully by Vonage.
-- `call.in-progress.error-sip-telephony-provider-failed-to-connect-call`: The SIP telephony provider failed to establish the call connection.
-
-#### Call Start Errors
-
-- `call-start-error-neither-assistant-nor-server-set`: Neither an assistant nor server was configured for the call.
-- `call.start.error-get-org`: Error retrieving organization information during call start.
-- `call.start.error-get-subscription`: Error retrieving subscription information during call start.
-- `call.start.error-get-assistant`: Error retrieving assistant information during call start.
-- `call.start.error-get-phone-number`: Error retrieving phone number information during call start.
-- `call.start.error-get-customer`: Error retrieving customer information during call start.
-- `call.start.error-get-resources-validation`: Error validating resources during call start.
-- `call.start.error-vapi-number-international`: Error with international Vapi number during call start.
-- `call.start.error-vapi-number-outbound-daily-limit`: Outbound daily limit reached for Vapi number.
-- `call.start.error-get-transport`: Error retrieving transport information during call start.
-
-#### Call Forwarding and Hooks
-
-- `call.forwarding.operator-busy`: The operator was busy during call forwarding.
-- `call.ringing.hook-executed-say`: A say hook was executed during the ringing phase.
-- `call.ringing.hook-executed-transfer`: A transfer hook was executed during the ringing phase.
-
-#### Other Reasons
-
-- `database-error`: A database error occurred during the call.
-- `exceeded-max-duration`: The call reached its maximum allowed duration and was automatically terminated.
-- `manually-canceled`: The call was manually canceled.
-- `silence-timed-out`: The call was ended due to prolonged silence, indicating inactivity.
-- `voicemail`: The call was diverted to voicemail.
-- `worker-shutdown`: The worker handling the call was shut down.
-
-#### Unknown
-
-- `unknown-error`: An unexpected error occurred, and the cause is unknown. For this, please [contact support](/support) with your `call_id` and account email address, & we will investigate.
+## Quick diagnosis
+
+Start here if a call failed and you want to quickly understand what happened:
+
+| The caller experienced... | Look for these errors | Likely cause |
+|---|---|---|
+| Phone never rang | `call.start.error-*`, `assistant-not-found`, `*-transport-never-connected` | Account/billing issue, bad configuration, or Vapi infrastructure error |
+| Phone rang but no answer | `customer-did-not-answer`, `customer-busy`, SIP 408/480 | Normal behavior — callee was unavailable |
+| Call dropped mid-conversation | `*-worker-died`, `phone-call-provider-closed-websocket`, `worker-shutdown` | Network issue or Vapi infrastructure error (usually transient) |
+| Assistant went silent or unresponsive | `*-llm-failed`, `*-voice-failed`, `*-transcriber-failed`, `*-429-*`, `*-500-*` | Provider outage or credential/quota issue — configure fallback providers for the 3 core services (TTS, LLM, STT) |
+| Call worked normally, then ended | `assistant-ended-call`, `customer-ended-call`, `silence-timed-out`, `exceeded-max-duration` | Expected behavior — adjust timeout settings if calls end too early |
+| Transfer failed | `*-transfer-failed`, `*-warm-transfer-*`, SIP 403/503 | Bad transfer destination or SIP configuration |
+
+For a detailed symptom-based walkthrough, see [Troubleshoot call errors](/calls/troubleshoot-call-errors).
+
+## Understanding error prefixes
+
+Many error codes include a prefix that indicates who is responsible for the failure:
+
+| Prefix | Meaning | What to do |
+|---|---|---|
+| `call.in-progress.error-vapifault-*` | Vapi infrastructure or platform credential failure. You are typically **not charged** for these calls. | Contact [Vapi support](/support) if persistent. |
+| `call.in-progress.error-providerfault-*` | A third-party provider (OpenAI, Deepgram, etc.) returned a server error. Outside Vapi's control. | Check the provider's status page. Consider configuring a fallback provider. |
+| `pipeline-error-*` | Legacy error format. When using your own provider keys (BYOK), these typically indicate credential or quota issues on your account with that provider. When using Vapi's platform keys, treat as a `vapifault`. | Verify your API key, billing status, and quota with the provider. |
+
+## Call start errors
+
+These occur before the call connects, during resource setup.
+
+### Account and billing
+
+- `call.start.error-subscription-frozen` — Your subscription is frozen due to a failed payment. Update your payment method in the [dashboard](https://dashboard.vapi.ai/).
+- `call.start.error-subscription-insufficient-credits` — Not enough credits to start the call. Add credits or enable auto-reload.
+- `call.start.error-subscription-wallet-does-not-exist` — No billing wallet found for the subscription. Contact [support](/support).
+- `call.start.error-subscription-upgrade-failed` — An automatic subscription upgrade attempt failed.
+- `call.start.error-subscription-concurrency-limit-reached` — You've hit the maximum number of simultaneous calls for your plan. Upgrade your plan or wait for an active call to end.
+- `call.start.error-fraud-check-failed` — The call was blocked by Vapi's fraud detection system.
+- `call.start.error-enterprise-feature-not-available-recording-consent` — Recording consent requires an enterprise plan.
+
+### Resource resolution
+
+- `call-start-error-neither-assistant-nor-server-set` — Neither an assistant nor a server URL was configured for the call.
+- `call.start.error-get-org` — Error retrieving your organization during call start. Verify your API key.
+- `call.start.error-get-subscription` — Error retrieving subscription information during call start.
+- `call.start.error-get-assistant` — Error retrieving the assistant. Verify the assistant ID exists.
+- `call.start.error-get-phone-number` — Error retrieving the phone number. Verify the number is imported and active.
+- `call.start.error-get-customer` — Error retrieving customer information.
+- `call.start.error-get-resources-validation` — The assistant, tools, or other resources failed validation.
+- `call.start.error-get-transport` — Error setting up the call transport (Twilio, Vonage, etc.).
+- `call.start.error-vapifault-database-error` — Internal database error during call setup. Retry or contact [support](/support).
+
+### Phone number limits
+
+- `call.start.error-vapi-number-international` — International calling is not supported on this Vapi number.
+- `call.start.error-vapi-number-outbound-daily-limit` — The daily outbound call limit for this Vapi number has been reached.
+
+### Assistant resolution (via server URL)
+
+- `assistant-not-found` — The specified assistant ID does not exist.
+- `assistant-not-valid` — The assistant configuration is invalid.
+- `assistant-request-failed` — The request to your server URL to fetch an assistant failed.
+- `assistant-request-returned-error` — Your server URL returned an error response.
+- `assistant-request-returned-unspeakable-error` — Your server URL returned an error that cannot be spoken to the user.
+- `assistant-request-returned-invalid-assistant` — Your server URL returned a response that is not a valid assistant configuration.
+- `assistant-request-returned-no-assistant` — Your server URL returned an empty response with no assistant.
+- `assistant-request-returned-forwarding-phone-number` — Your server URL returned a phone number for forwarding instead of an assistant.
+- `scheduled-call-deleted` — A scheduled call was deleted before it could execute.
+
+## Assistant actions
+
+These indicate the assistant intentionally ended the call — not errors.
+
+- `assistant-ended-call` — The assistant ended the call (via an end-call tool or function).
+- `assistant-ended-call-after-message-spoken` — The assistant ended the call after speaking its final message.
+- `assistant-ended-call-with-hangup-task` — The assistant ended the call using a hangup workflow node.
+- `assistant-said-end-call-phrase` — The assistant said a phrase configured to trigger call termination.
+- `assistant-forwarded-call` — The assistant transferred the call to another number or service.
+- `assistant-join-timed-out` — The assistant failed to join the call within the expected timeframe.
+
+## Customer actions
+
+- `customer-ended-call` — The customer hung up.
+- `customer-busy` — The customer's line was busy (outbound calls).
+- `customer-did-not-answer` — The customer did not answer (outbound calls).
+- `customer-did-not-give-microphone-permission` — The user denied microphone access (web calls).
+- `call.in-progress.error-assistant-did-not-receive-customer-audio` — No audio was received from the customer. This can indicate a network issue, mic problem, or the customer disconnected silently.
+- `customer-ended-call-before-warm-transfer` — The customer hung up before a warm transfer completed.
+- `customer-ended-call-after-warm-transfer-attempt` — The customer hung up after a warm transfer was attempted.
+- `customer-ended-call-during-transfer` — The customer hung up during a transfer.
+
+## Timeouts
+
+- `exceeded-max-duration` — The call reached `maxDurationSeconds` and was automatically terminated.
+- `silence-timed-out` — No speech was detected for the configured silence timeout duration.
+
+## Pipeline errors: LLM
+
+Each LLM provider has error codes that follow a consistent pattern. The status code in the error name tells you what went wrong:
+
+| Status code in error | Meaning | What to do |
+|---|---|---|
+| `400-bad-request-validation-failed` | Invalid request (bad model name, malformed messages, etc.) | Check your assistant's model configuration. |
+| `401-unauthorized` / `401-incorrect-api-key` | Invalid API key. | Verify your API key for this provider. |
+| `403-model-access-denied` | Your API key doesn't have access to the requested model. | Check model permissions in your provider account. |
+| `429-exceeded-quota` / `429-rate-limit-reached` | Rate limit or quota exceeded. | Upgrade your plan with the provider or reduce call volume. |
+| `500-server-error` | Provider internal server error. | Retry. Check the provider's status page. |
+| `503-server-overloaded-error` | Provider temporarily overloaded. | Retry after a brief wait. |
+| `llm-failed` | Generic LLM failure. | Check call logs for details. |
+
+**Supported providers:** OpenAI, Azure OpenAI, Anthropic, Anthropic Bedrock, Anthropic Vertex, Google, Groq, xAI, Mistral, Together AI, Perplexity AI, DeepInfra, DeepSeek, Cerebras, Inflection AI, Anyscale, OpenRouter, Runpod, Baseten, Custom LLM.
+
+Additional model errors:
+
+- `pipeline-no-available-llm-model` / `call.in-progress.error-pipeline-no-available-llm-model` — No suitable LLM model was available. Check your model configuration.
+- `call.in-progress.error-pipeline-ws-model-connection-failed` — Failed to connect to a custom LLM WebSocket endpoint.
+
+## Pipeline errors: voice (TTS)
+
+Each voice provider has specific error codes. Common patterns:
+
+- `*-voice-failed` — Generic voice synthesis failure for that provider.
+- `*-voice-not-found` / `*-invalid-voice` — The configured voice ID does not exist or is invalid.
+- `*-quota-exceeded` / `*-out-of-credits` — Voice provider credits exhausted.
+- `*-unauthorized-access` / `*-invalid-api-key` — Voice provider credential issue.
+- `*-socket-hang-up` / `*-500-server-error` / `*-503-server-error` — Voice provider infrastructure issue.
+
+**Supported providers:** ElevenLabs, Cartesia, Deepgram, PlayHT, Azure, OpenAI, Rime AI, Smallest AI, Neuphonic, Hume, Sesame, Inworld, Minimax, WellSaid, Custom Voice.
+
+## Pipeline errors: transcriber (STT)
+
+Common transcriber error patterns:
+
+- `*-transcriber-failed` — Generic transcriber failure.
+- `*-returning-400-*` — Bad request (invalid model/language combination, invalid config, etc.).
+- `*-returning-401-*` — Invalid transcriber credentials.
+- `*-returning-403-*` — Model access denied on the transcriber.
+- `*-returning-500-*` / `*-returning-502-*` — Transcriber provider server error.
+
+**Supported providers:** Deepgram, AssemblyAI, Gladia, Speechmatics, Talkscriber, Azure Speech, Google, OpenAI, Soniox, ElevenLabs, Custom Transcriber.
+
+## Transfer errors
+
+- `call.in-progress.error-transfer-failed` — A call transfer attempt failed.
+- `call.in-progress.error-warm-transfer-max-duration` — The warm transfer exceeded its maximum duration.
+- `call.in-progress.error-warm-transfer-assistant-cancelled` — The transfer assistant cancelled the warm transfer.
+- `call.in-progress.error-warm-transfer-silence-timeout` — Silence timeout during a warm transfer.
+- `call.in-progress.error-warm-transfer-microphone-timeout` — Microphone timeout during a warm transfer.
+
+For step-by-step transfer debugging, see [Debug forwarding drops](/calls/troubleshoot-call-forwarding-drops).
+
+## Transport and connectivity
+
+- `phone-call-provider-closed-websocket` — The call provider's WebSocket connection closed unexpectedly. The caller experiences an abrupt call drop.
+- `phone-call-provider-bypass-enabled-but-no-call-received` — Phone call provider bypass was enabled but no call arrived.
+- `call.in-progress.error-vapifault-transport-never-connected` — The transport never connected. Vapi infrastructure issue.
+- `call.in-progress.error-providerfault-transport-never-connected` — The transport provider failed to connect. Provider-side issue.
+- `call.in-progress.error-vapifault-transport-connected-but-call-not-active` — Transport connected but the call was no longer active.
+- `call.in-progress.error-vapifault-call-started-but-connection-to-transport-missing` — Call started but the transport connection was lost.
+- `call.in-progress.error-vapifault-worker-not-available` — No call worker was available to process the call.
+- `call.in-progress.error-vapifault-worker-died` — The call worker process crashed during the call.
+- `call.in-progress.error-vapifault-chat-pipeline-failed-to-start` — The chat pipeline failed to initialize.
+
+### Twilio
+
+- `twilio-failed-to-connect-call` — Twilio failed to establish the call.
+- `twilio-reported-customer-misdialed` — Twilio reported the customer dialed an invalid number.
+- `call.in-progress.twilio-completed-call` — Twilio reported the call as completed on their side.
+
+### Vonage
+
+- `vonage-disconnected` — Call disconnected by Vonage.
+- `vonage-failed-to-connect-call` — Vonage failed to connect the call.
+- `vonage-rejected` — Call rejected by Vonage.
+- `vonage-completed` — Call completed by Vonage.
+
+### SIP
+
+- `call.in-progress.error-sip-inbound-call-failed-to-connect` — Inbound SIP call failed to connect.
+- `call.in-progress.error-sip-outbound-call-failed-to-connect` — Outbound SIP call failed to connect.
+- `call.in-progress.error-providerfault-outbound-sip-403-forbidden` — SIP 403: call forbidden by the SIP provider.
+- `call.in-progress.error-providerfault-outbound-sip-407-proxy-authentication-required` — SIP 407: proxy authentication required.
+- `call.in-progress.error-providerfault-outbound-sip-408-request-timeout` — SIP 408: request timed out.
+- `call.in-progress.error-providerfault-outbound-sip-480-temporarily-unavailable` — SIP 480: destination temporarily unavailable.
+- `call.in-progress.error-providerfault-outbound-sip-503-service-unavailable` — SIP 503: service unavailable.
+- `call.ringing.error-sip-inbound-call-failed-to-connect` — SIP inbound call failed during ringing.
+- `call.ringing.sip-inbound-caller-hungup-before-call-connect` — SIP caller hung up before the call connected.
+- `call.in-progress.sip-completed-call` — SIP provider reported the call as completed.
+
+For SIP trunk setup issues, see [Troubleshoot SIP trunk credential errors](/advanced/sip/troubleshoot-sip-trunk-credential-errors).
+
+## Call hooks
+
+- `call.ringing.hook-executed-say` — A say hook executed during ringing ended the call.
+- `call.ringing.hook-executed-transfer` — A transfer hook executed during ringing ended the call.
+- `call.ending.hook-executed-say` — A say hook executed during the ending phase.
+- `call.ending.hook-executed-transfer` — A transfer hook executed during the ending phase.
+- `call.forwarding.operator-busy` — The operator was busy during call forwarding.
+
+## Other reasons
+
+- `manually-canceled` — The call was manually canceled via the API or dashboard.
+- `voicemail` — The call was diverted to or detected as voicemail.
+- `worker-shutdown` — The call worker was shut down (e.g., during a deployment). The call should be retried automatically.
+- `call-deleted` — The call record was deleted.
+
+## Next steps
+
+- **[Troubleshoot call errors](/calls/troubleshoot-call-errors):** Step-by-step diagnosis guide organized by what the caller experienced.
+- **[Debugging voice agents](/debugging):** General debugging workflow using dashboard tools, logs, and test suites.
+- **[How to report issues](/issue-reporting):** Include your `call_id` and account email when contacting support.
diff --git a/fern/calls/call-features.mdx b/fern/calls/call-features.mdx
index 7bc202d07..fed5b8a6a 100644
--- a/fern/calls/call-features.mdx
+++ b/fern/calls/call-features.mdx
@@ -129,6 +129,45 @@ curl -X POST 'https://aws-us-west-2-production1-phone-call-websocket.vapi.ai/742
}'
```
+You can also transfer to a SIP URI:
+
+```bash
+curl -X POST 'https://aws-us-west-2-production1-phone-call-websocket.vapi.ai/7420f27a-30fd-4f49-a995-5549ae7cc00d/control'
+-H 'content-type: application/json'
+--data-raw '{
+ "type": "transfer",
+ "destination": {
+ "type": "sip",
+ "sipUri": "sip:+transferPhoneNumber@sip.telnyx.com"
+ },
+ "content": "Testing transfer call."
+}'
+```
+
+### 6. Handoff Call
+Handoff the call to a different assistant.
+
+```bash
+curl -X POST 'https://aws-us-west-2-production1-phone-call-websocket.vapi.ai/7420f27a-30fd-4f49-a995-5549ae7cc00d/control'
+-H 'content-type: application/json'
+--data-raw '{
+ "type": "handoff",
+ "destination": {
+ "type": "assistant",
+ "contextEngineeringPlan": "none",
+ "assistant": {
+ "name": "new_assistant",
+ "voice": {
+ "provider": "vapi",
+ "voiceId": "Neha"
+ },
+ }
+ },
+ "content": "Handing off your call now"
+}'
+```
+
+
## Call Listen Feature
The `listenUrl` allows you to connect to a WebSocket and stream the audio data in real-time. You can either process the audio directly or save the binary data to analyze or replay later.
diff --git a/fern/calls/call-queue-management.mdx b/fern/calls/call-queue-management.mdx
new file mode 100644
index 000000000..69b9942e7
--- /dev/null
+++ b/fern/calls/call-queue-management.mdx
@@ -0,0 +1,711 @@
+---
+title: Call queue management with Twilio
+subtitle: Handle high-volume calls with Twilio queues when hitting Vapi concurrency limits
+slug: calls/call-queue-management
+description: Build a call queue system using Twilio to handle large volumes of calls while respecting Vapi concurrency limits, ensuring no calls are dropped.
+---
+
+## Overview
+
+When your application receives more simultaneous calls than your Vapi concurrency limit allows, calls can be rejected. A call queue system using Twilio queues solves this by holding excess calls in a queue and processing them as capacity becomes available.
+
+**In this guide, you'll learn to:**
+- Set up Twilio call queues for high-volume scenarios
+- Implement concurrency tracking to respect Vapi limits
+- Build a queue processing system with JavaScript
+- Handle call dequeuing and Vapi integration seamlessly
+
+
+This approach is ideal for call centers, customer support lines, or any application expecting call volumes that exceed your Vapi concurrency limit.
+
+
+## Prerequisites
+
+Before implementing call queue management, ensure you have:
+
+- **Vapi Account**: Access to the [Vapi Dashboard](https://dashboard.vapi.ai/org/api-keys) with your API key
+- **Twilio Account**: Active Twilio account with Account SID and Auth Token
+- **Twilio CLI**: Install from [twil.io/cli](https://twil.io/cli) for queue management
+- **Phone Number**: Twilio phone number configured for incoming calls
+- **Assistant**: Configured Vapi assistant ID for handling calls
+- **Server Environment**: Node.js server capable of receiving webhooks
+- **Redis Instance**: Redis server for persistent state management (local, cloud, or serverless-compatible)
+
+
+You'll need to know your Vapi account's concurrency limit. Check your plan details in the [Vapi Dashboard](https://dashboard.vapi.ai/settings/billing) under billing settings.
+
+
+
+For production deployments, especially in serverless environments, Redis ensures your call counters persist across server restarts and function invocations.
+
+
+## How it works
+
+The queue management system operates in three phases:
+
+
+
+ Incoming calls are automatically placed in a Twilio queue when received
+
+
+ Server monitors active Vapi calls against your concurrency limit
+
+
+ When capacity is available, calls are dequeued and connected to Vapi
+
+
+
+**Call Flow:**
+
+1. **Incoming call** → Twilio receives call and executes webhook
+2. **Queue placement** → Call is placed in Twilio queue with hold music
+3. **Automatic processing** → Server processes queue immediately when capacity changes
+4. **Capacity check** → Server verifies if Vapi concurrency limit allows new calls using Redis
+5. **Dequeue & connect** → Available calls are dequeued and connected to Vapi assistants
+6. **Persistent tracking** → Redis tracks active calls across server restarts and serverless invocations
+
+---
+
+## Implementation Guide
+
+
+
+ First, create a Twilio queue using the Twilio CLI to hold incoming calls.
+
+ ```bash
+ twilio api:core:queues:create \
+ --friendly-name customer-support
+ ```
+
+ **Expected Response:**
+ ```json
+ {
+ "account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "average_wait_time": 0,
+ "current_size": 0,
+ "date_created": "2024-01-15T18:39:09.000Z",
+ "date_updated": "2024-01-15T18:39:09.000Z",
+ "friendly_name": "customer-support",
+ "max_size": 100,
+ "sid": "QUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Queues/QUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
+ }
+ ```
+
+
+ Save the queue `sid` (e.g., `QUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa`) - you'll need this for queue operations.
+
+
+
+
+ Configure your Twilio phone number to send incoming calls to your queue endpoint.
+
+ 1. Go to [Twilio Console > Phone Numbers](https://console.twilio.com/us1/develop/phone-numbers/manage/incoming)
+ 2. Select your phone number
+ 3. Set **A call comes in** webhook to: `https://your-server.com/incoming`
+ 4. Set HTTP method to `POST`
+ 5. Save configuration
+
+
+
+ Configure Redis for persistent call counter storage. Choose the option that best fits your deployment:
+
+
+
+ **Install Redis locally:**
+ ```bash
+ # macOS (using Homebrew)
+ brew install redis
+ brew services start redis
+
+ # Ubuntu/Debian
+ sudo apt update
+ sudo apt install redis-server
+ sudo systemctl start redis-server
+
+ # Docker
+ docker run -d -p 6379:6379 redis:alpine
+ ```
+
+ **Test connection:**
+ ```bash
+ redis-cli ping
+ # Should return: PONG
+ ```
+
+
+
+ **Popular Redis cloud providers:**
+
+ - **[Redis Cloud](https://redis.com/redis-enterprise-cloud/)**: Free tier available
+ - **[AWS ElastiCache](https://aws.amazon.com/elasticache/)**: Managed Redis on AWS
+ - **[Google Cloud Memorystore](https://cloud.google.com/memorystore)**: Managed Redis on GCP
+ - **[Azure Cache for Redis](https://azure.microsoft.com/services/cache/)**: Managed Redis on Azure
+
+ Get your connection URL from your provider's dashboard.
+
+
+
+ **[Upstash Redis](https://upstash.com/)** is optimized for serverless environments:
+
+ 1. Create free account at [console.upstash.com](https://console.upstash.com)
+ 2. Create new Redis database
+ 3. Copy the REST URL for serverless compatibility
+ 4. Use connection pooling for better performance
+
+ **Upstash offers:**
+ - Pay-per-request pricing
+ - Global edge locations
+ - Built-in connection pooling
+
+
+
+
+
+ Create your Node.js server with the required dependencies and environment variables.
+
+ **Install Dependencies:**
+ ```bash
+ npm install express twilio axios dotenv redis
+ ```
+
+ **Environment Variables (.env):**
+ ```bash
+ # Vapi Configuration
+ VAPI_API_KEY=your_vapi_api_key_here
+ VAPI_PHONE_NUMBER_ID=your_phone_number_id
+ VAPI_ASSISTANT_ID=your_assistant_id
+
+ # Twilio Configuration
+ TWILIO_ACCOUNT_SID=your_twilio_account_sid
+ TWILIO_AUTH_TOKEN=your_twilio_auth_token
+ TWILIO_QUEUE_SID=QUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+
+ # Redis Configuration (for persistent state)
+ REDIS_URL=redis://localhost:6379
+ # For Redis Cloud: REDIS_URL=rediss://username:password@host:port
+ # For Upstash (serverless): REDIS_URL=rediss://default:password@host:port
+
+ # Server Configuration
+ PORT=3000
+ MAX_CONCURRENCY=5
+ ```
+
+
+
+ Create the main server file with queue handling, concurrency tracking, and Vapi integration.
+
+ ```javascript title="server.js"
+ const express = require('express');
+ const twilio = require('twilio');
+ const axios = require('axios');
+ const redis = require('redis');
+ require('dotenv').config();
+
+ const app = express();
+ const twilioClient = twilio(process.env.TWILIO_ACCOUNT_SID, process.env.TWILIO_AUTH_TOKEN);
+
+ // Redis client for persistent state management
+ const redisClient = redis.createClient({
+ url: process.env.REDIS_URL || 'redis://localhost:6379'
+ });
+
+ const MAX_CONCURRENCY = parseInt(process.env.MAX_CONCURRENCY) || 5;
+ const REDIS_KEYS = {
+ ACTIVE_CALLS: 'vapi:queue:active_calls',
+ CALLS_IN_QUEUE: 'vapi:queue:calls_in_queue'
+ };
+
+ // Middleware
+ app.use(express.json());
+ app.use(express.urlencoded({ extended: true }));
+
+ // Initialize Redis connection
+ async function initializeRedis() {
+ try {
+ await redisClient.connect();
+ console.log('Connected to Redis');
+
+ // Initialize counters if they don't exist
+ const activeCalls = await redisClient.get(REDIS_KEYS.ACTIVE_CALLS);
+ const callsInQueue = await redisClient.get(REDIS_KEYS.CALLS_IN_QUEUE);
+
+ if (activeCalls === null) {
+ await redisClient.set(REDIS_KEYS.ACTIVE_CALLS, '0');
+ }
+ if (callsInQueue === null) {
+ await redisClient.set(REDIS_KEYS.CALLS_IN_QUEUE, '0');
+ }
+ } catch (error) {
+ console.error('Redis connection failed:', error);
+ process.exit(1);
+ }
+ }
+
+ // Helper functions for Redis operations
+ async function getActiveCalls() {
+ const count = await redisClient.get(REDIS_KEYS.ACTIVE_CALLS);
+ return parseInt(count) || 0;
+ }
+
+ async function getCallsInQueue() {
+ const count = await redisClient.get(REDIS_KEYS.CALLS_IN_QUEUE);
+ return parseInt(count) || 0;
+ }
+
+ async function incrementActiveCalls() {
+ return await redisClient.incr(REDIS_KEYS.ACTIVE_CALLS);
+ }
+
+ async function decrementActiveCalls() {
+ const current = await getActiveCalls();
+ if (current > 0) {
+ return await redisClient.decr(REDIS_KEYS.ACTIVE_CALLS);
+ }
+ return current;
+ }
+
+ async function incrementCallsInQueue() {
+ return await redisClient.incr(REDIS_KEYS.CALLS_IN_QUEUE);
+ }
+
+ async function decrementCallsInQueue() {
+ const current = await getCallsInQueue();
+ if (current > 0) {
+ return await redisClient.decr(REDIS_KEYS.CALLS_IN_QUEUE);
+ }
+ return current;
+ }
+
+ async function syncCallsInQueue() {
+ await redisClient.set(REDIS_KEYS.CALLS_IN_QUEUE, '0');
+ }
+
+ // Incoming call handler - adds calls to queue
+ app.post('/incoming', async (req, res) => {
+ try {
+ const twiml = `
+
+ customer-support
+ `;
+
+ res.set('Content-Type', 'application/xml');
+ res.send(twiml);
+
+ // Increment queue counter in Redis
+ const queueCount = await incrementCallsInQueue();
+ console.log(`Call ${req.body.CallSid} added to queue. Calls in queue: ${queueCount}`);
+
+ // Immediately check if we can process this call
+ setImmediate(() => processQueue());
+
+ } catch (error) {
+ console.error('Error handling incoming call:', error);
+ res.status(500).send('Error processing call');
+ }
+ });
+
+ async function processQueue() {
+ try {
+ const activeCalls = await getActiveCalls();
+ const callsInQueue = await getCallsInQueue();
+
+ // Check if we have capacity for more calls
+ if (activeCalls >= MAX_CONCURRENCY) {
+ return;
+ }
+
+ // Check if there are calls in queue
+ if (callsInQueue === 0) {
+ return;
+ }
+
+ // Get next call from queue
+ const members = await twilioClient.queues(process.env.TWILIO_QUEUE_SID)
+ .members
+ .list({ limit: 1 });
+
+ if (members.length === 0) {
+ // No calls in queue - sync our counter
+ await syncCallsInQueue();
+ return;
+ }
+
+ const member = members[0];
+ console.log(`Processing queued call: ${member.callSid}`);
+
+ // Get Vapi TwiML for this call
+ const twiml = await initiateVapiCall(member.callSid, member.phoneNumber);
+
+ if (twiml) {
+ // Update call with Vapi TwiML
+ await twilioClient.calls(member.callSid).update({ twiml });
+
+ // Update counters in Redis
+ const newActiveCalls = await incrementActiveCalls();
+ const newQueueCount = await decrementCallsInQueue();
+
+ console.log(`Call connected to Vapi. Active calls: ${newActiveCalls}/${MAX_CONCURRENCY}, Queue: ${newQueueCount}`);
+
+ // Check if we can process more calls immediately
+ if (newActiveCalls < MAX_CONCURRENCY && newQueueCount > 0) {
+ setImmediate(() => processQueue());
+ }
+ } else {
+ console.error(`Failed to get TwiML for call ${member.callSid}`);
+ }
+ } catch (error) {
+ console.error('Error processing queue:', error);
+ }
+ }
+
+ // Generate Vapi TwiML for a call
+ async function initiateVapiCall(callSid, customerNumber) {
+ const payload = {
+ phoneNumberId: process.env.VAPI_PHONE_NUMBER_ID,
+ phoneCallProviderBypassEnabled: true,
+ customer: { number: customerNumber },
+ assistantId: process.env.VAPI_ASSISTANT_ID,
+ };
+
+ const headers = {
+ 'Authorization': `Bearer ${process.env.VAPI_API_KEY}`,
+ 'Content-Type': 'application/json',
+ };
+
+ try {
+ const response = await axios.post('https://api.vapi.ai/call', payload, { headers });
+
+ if (response.data && response.data.phoneCallProviderDetails) {
+ return response.data.phoneCallProviderDetails.twiml;
+ } else {
+ throw new Error('Invalid response structure from Vapi');
+ }
+ } catch (error) {
+ console.error(`Error initiating Vapi call for ${callSid}:`, error.message);
+ return null;
+ }
+ }
+
+ // Webhook for call completion - triggers immediate queue processing
+ app.post('/call-ended', async (req, res) => {
+ try {
+ // Handle Vapi end-of-call-report webhook
+ const message = req.body.message;
+
+ if (message && message.type === 'end-of-call-report') {
+ const callId = message.call?.id;
+
+ const newActiveCalls = await decrementActiveCalls();
+ console.log(`Vapi call ${callId} ended. Active calls: ${newActiveCalls}/${MAX_CONCURRENCY}`);
+
+ // Immediately process queue when capacity becomes available
+ setImmediate(() => processQueue());
+ }
+
+ res.status(200).send('OK');
+ } catch (error) {
+ console.error('Error handling Vapi webhook:', error);
+ res.status(500).send('Error');
+ }
+ });
+
+ // Manual queue processing endpoint (for testing/monitoring)
+ app.post('/process-queue', async (req, res) => {
+ try {
+ await processQueue();
+ const activeCalls = await getActiveCalls();
+ const callsInQueue = await getCallsInQueue();
+
+ res.json({
+ message: 'Queue processing triggered',
+ activeCalls,
+ callsInQueue,
+ maxConcurrency: MAX_CONCURRENCY
+ });
+ } catch (error) {
+ console.error('Error in manual queue processing:', error);
+ res.status(500).json({ error: 'Failed to process queue' });
+ }
+ });
+
+ // Health check endpoint
+ app.get('/health', async (req, res) => {
+ try {
+ const activeCalls = await getActiveCalls();
+ const callsInQueue = await getCallsInQueue();
+
+ res.json({
+ status: 'healthy',
+ activeCalls,
+ callsInQueue,
+ maxConcurrency: MAX_CONCURRENCY,
+ availableCapacity: MAX_CONCURRENCY - activeCalls,
+ redis: redisClient.isOpen ? 'connected' : 'disconnected'
+ });
+ } catch (error) {
+ console.error('Error in health check:', error);
+ res.status(500).json({
+ status: 'error',
+ error: error.message,
+ redis: redisClient.isOpen ? 'connected' : 'disconnected'
+ });
+ }
+ });
+
+ // Graceful shutdown
+ process.on('SIGINT', async () => {
+ console.log('Shutting down gracefully...');
+ await redisClient.quit();
+ process.exit(0);
+ });
+
+ process.on('SIGTERM', async () => {
+ console.log('Shutting down gracefully...');
+ await redisClient.quit();
+ process.exit(0);
+ });
+
+ // Start server
+ async function startServer() {
+ await initializeRedis();
+
+ const PORT = process.env.PORT || 3000;
+ app.listen(PORT, () => {
+ console.log(`Queue management server running on port ${PORT}`);
+ console.log(`Max concurrency: ${MAX_CONCURRENCY}`);
+ console.log('Using callback-driven queue processing (no timers)');
+ });
+ }
+
+ startServer().catch(console.error);
+
+ module.exports = app;
+ ```
+
+
+
+ Configure your Vapi assistant to send end-of-call-report webhooks for accurate concurrency tracking.
+
+ **Assistant Configuration:**
+ You need to configure your assistant with proper webhook settings to receive call status updates.
+
+ ```javascript title="assistant-configuration.js"
+ const assistantConfig = {
+ name: "Queue Management Assistant",
+ // ... other assistant configuration
+
+ // Configure server URL for webhooks
+ server: {
+ url: "https://your-server.com",
+ timeoutSeconds: 20
+ },
+
+ // Configure which messages to send to your server
+ serverMessages: ["end-of-call-report", "status-update"]
+ };
+ ```
+
+
+ The webhook will be sent to your server URL with the message type `end-of-call-report` when calls end. This allows you to decrement your active call counter accurately. See the [Assistant API reference](https://docs.vapi.ai/api-reference/assistants/create#request.body.serverMessages) for all available server message types.
+
+
+ **Webhook Payload Example:**
+ Your `/call-ended` endpoint will receive a webhook with this structure:
+
+ ```json title="end-of-call-report-payload.json"
+ {
+ "message": {
+ "type": "end-of-call-report",
+ "call": {
+ "id": "73a6da0f-c455-4bb6-bf4a-5f0634871430",
+ "status": "ended",
+ "endedReason": "assistant-ended-call"
+ }
+ }
+ }
+ ```
+
+
+
+ Deploy your server and test the complete queue management flow.
+
+ **Start Your Server:**
+ ```bash
+ node server.js
+ ```
+
+ **Test Scenarios:**
+ 1. **Single call**: Call your Twilio number - should connect immediately
+ 2. **Multiple calls**: Make several simultaneous calls to test queuing
+ 3. **Capacity limit**: Make more calls than your `MAX_CONCURRENCY` setting
+ 4. **Queue processing**: Check that calls are processed as others end
+
+ **Monitor Queue Status:**
+ ```bash
+ # Check server health and capacity
+ curl https://your-server.com/health
+
+ # Manually trigger queue processing
+ curl -X POST https://your-server.com/process-queue
+ ```
+
+
+
+## Callback-Driven Queue Processing
+
+The system uses **event-driven queue processing** that responds immediately to capacity changes, eliminating the need for timers and preventing memory leaks:
+
+### How It Works
+
+- **Event-driven**: Queue processing is triggered by actual events (call start, call end)
+- **Redis persistence**: Call counters are stored in Redis, surviving server restarts and serverless deployments
+- **Immediate processing**: Uses `setImmediate()` to process queue as soon as capacity becomes available
+- **No timers**: Eliminates memory leak risks from long-running intervals
+- **Recursive processing**: Automatically processes multiple queued calls when capacity allows
+
+### Key Improvements
+
+
+
+ Queue processing happens immediately when calls end or arrive
+
+
+ Redis persistence works across serverless function invocations
+
+
+ No timers means no memory leaks from long-running processes
+
+
+ Counters survive server restarts and deployments
+
+
+
+### Architecture Benefits
+
+- **Event-driven triggers**: Processing occurs on actual state changes, not arbitrary intervals
+- **Persistent state**: Redis ensures counters are never lost, even in serverless environments
+- **Efficient resource usage**: No CPU cycles wasted on empty queue checks
+- **Immediate capacity utilization**: New calls are processed instantly when space becomes available
+- **Graceful degradation**: Redis connection failures are handled with proper error logging
+
+### Processing Triggers
+
+Queue processing is automatically triggered when:
+
+1. **New call arrives** → `setImmediate(() => processQueue())` after adding to queue
+2. **Call ends** → `setImmediate(() => processQueue())` after decrementing active count
+3. **Successful processing** → Recursively processes more calls if capacity and queue allow
+
+
+Redis is required for this implementation. Ensure your Redis instance is properly configured and accessible from your deployment environment.
+
+
+## Troubleshooting
+
+
+
+ **Common causes:**
+ - Redis server not running or unreachable
+ - Incorrect `REDIS_URL` configuration
+ - Network connectivity issues in production
+
+ **Solutions:**
+ - Test Redis connection: `redis-cli ping` (should return PONG)
+ - Verify `REDIS_URL` format matches your provider
+ - Check firewall rules and security groups
+ - Monitor Redis logs for authentication errors
+
+ **Health check endpoint shows Redis status:**
+ ```bash
+ curl https://your-server.com/health
+ # Check "redis" field in response
+ ```
+
+
+
+ **Common causes:**
+ - Server not receiving call-ended webhooks (check webhook URLs)
+ - Redis counter desync (rare, but possible)
+ - Vapi API errors (check API key and assistant ID)
+
+ **Solutions:**
+ - Verify webhook URLs are publicly accessible
+ - Check Redis counters: `redis-cli get vapi:queue:active_calls`
+ - Reset counters manually if needed: `redis-cli set vapi:queue:active_calls 0`
+ - Test Vapi API calls independently
+
+ **Debug Redis state:**
+ ```bash
+ # Check current counter values
+ redis-cli mget vapi:queue:active_calls vapi:queue:calls_in_queue
+ ```
+
+
+
+ **Check these items:**
+ - `MAX_CONCURRENCY` setting is appropriate for your Vapi plan
+ - Redis counters are accurate (compare with actual Twilio queue)
+ - No errors in Vapi TwiML generation
+
+ **Debug steps:**
+ - Call `/process-queue` endpoint manually
+ - Check `/health` endpoint for current capacity and Redis status
+ - Review server logs for Redis connection errors
+ - Verify queue processing triggers are firing
+
+
+
+ **Serverless-specific considerations:**
+ - Use connection pooling for Redis (Upstash recommended)
+ - Cold starts may cause initial Redis connection delays
+ - Function timeout limits may interrupt long-running operations
+
+ **Solutions:**
+ - Configure appropriate function timeout (30+ seconds)
+ - Use Redis providers optimized for serverless (Upstash)
+ - Implement connection retry logic
+ - Monitor function execution logs for timeout errors
+
+
+
+ **Potential issues:**
+ - Invalid phone number format (use E.164 format)
+ - Incorrect Vapi configuration (phone number ID, assistant ID)
+ - Network timeouts during TwiML generation
+ - Redis operations timing out
+
+ **Solutions:**
+ - Validate all phone numbers before processing
+ - Add timeout handling to API calls and Redis operations
+ - Implement retry logic for failed Vapi requests
+ - Monitor Redis response times
+
+
+
+ **Production considerations:**
+ - Redis connection pooling for high-traffic scenarios
+ - Monitor Redis memory usage and eviction policies
+ - Consider Redis clustering for extreme scale
+ - Implement circuit breakers for external API calls
+
+ **Monitoring recommendations:**
+ - Track Redis connection health
+ - Monitor queue processing latency
+ - Alert on Redis counter anomalies
+ - Log all state transitions for debugging
+
+
+
+## Next steps
+
+Now that you have a production-ready call queue system with Redis persistence and callback-driven processing:
+
+- **[Advanced Call Features](mdc:docs/calls/call-features):** Explore call recording, analysis, and advanced routing options
+- **[Monitoring & Analytics](mdc:docs/assistants/call-analysis):** Set up comprehensive call analytics and performance monitoring
+- **[Scaling Considerations](mdc:docs/enterprise/plans):** Learn about enterprise features for high-volume deployments
+- **[Assistant Optimization](mdc:docs/assistants/personalization):** Enhance your assistants with personalization and dynamic variables
+
+
+Consider implementing health checks, metrics collection, and alerting around your Redis counters and queue processing latency for production monitoring.
+
\ No newline at end of file
diff --git a/fern/calls/customer-join-timeout.mdx b/fern/calls/customer-join-timeout.mdx
index 486b7b862..4866366fa 100644
--- a/fern/calls/customer-join-timeout.mdx
+++ b/fern/calls/customer-join-timeout.mdx
@@ -1,5 +1,5 @@
---
-title: Customer join timeout
+title: Customer Join Timeout
subtitle: Configure web call join timeout for better success rates
slug: calls/customer-join-timeout
description: Set maximum time for users to join web calls before automatic termination
@@ -7,7 +7,7 @@ description: Set maximum time for users to join web calls before automatic termi
## Overview
-**Customer join timeout** sets the maximum time users have to join a web call before it's automatically terminated. This parameter helps you optimize call success rates by accounting for real-world connection challenges.
+**Customer Join Timeout** sets the maximum time users have to join a web call before it's automatically terminated. This parameter helps you optimize call success rates by accounting for real-world connection challenges.
**You'll learn to:**
@@ -269,7 +269,7 @@ A user attempting to join needs:
Start with 30-60 seconds and adjust based on your success rate analytics.
-### "Meeting has ended" message
+### Meeting has ended message
This message appears when a call ends naturally and is **informational only**—not an error.
diff --git a/fern/calls/troubleshoot-call-errors.mdx b/fern/calls/troubleshoot-call-errors.mdx
new file mode 100644
index 000000000..0c0965adc
--- /dev/null
+++ b/fern/calls/troubleshoot-call-errors.mdx
@@ -0,0 +1,270 @@
+---
+title: Troubleshoot call errors
+subtitle: Learn to diagnose failed calls based on what the caller experienced.
+slug: calls/troubleshoot-call-errors
+---
+
+## Overview
+
+When a call fails, the fastest path to a fix is identifying **what the caller experienced**. This guide organizes errors by symptom so you can jump to the right section and resolve the issue.
+
+**In this guide, you'll learn to:**
+
+- Match caller-reported symptoms to specific error codes
+- Understand the fault classification system (`vapifault` vs `providerfault`)
+- Take the right corrective action for each error category
+
+
+This guide explains errors by symptom. For a complete reference of every `endedReason` code, see [Call end reasons](/calls/call-ended-reason).
+
+
+## Start here: identify the symptom
+
+
+
+ Call failed immediately — no ring on the customer's end
+
+
+ Phone rang but was never picked up, or line was busy
+
+
+ Caller was talking, then the line went dead abruptly
+
+
+ Call connected but the assistant stopped speaking or responding
+
+
+ Assistant attempted a transfer but it didn't go through
+
+
+ Call worked as expected — someone or something decided it should end
+
+
+
+## Phone never rang
+
+**What the caller experiences:** Nothing. The phone never rings. For web calls, the connection fails immediately.
+
+**What you see in the dashboard:** The call object is created with status `ended` almost immediately. Duration is zero or near-zero. No transcript.
+
+
+
+ These are the most common cause of calls failing before they start.
+
+ | Error code | Meaning | Fix |
+ |---|---|---|
+ | `call.start.error-subscription-frozen` | Payment failed, subscription frozen | Update payment method in [dashboard](https://dashboard.vapi.ai/) |
+ | `call.start.error-subscription-insufficient-credits` | Not enough credits | Add credits or enable auto-reload |
+ | `call.start.error-subscription-concurrency-limit-reached` | Too many simultaneous calls | Upgrade plan or wait for active calls to end |
+ | `call.start.error-fraud-check-failed` | Blocked by fraud detection | Contact [support](/support) |
+ | `call.start.error-subscription-wallet-does-not-exist` | No billing wallet found | Contact [support](/support) |
+
+
+
+ The call couldn't start because something is missing or misconfigured.
+
+ | Error code | Meaning | Fix |
+ |---|---|---|
+ | `assistant-not-found` | Assistant ID doesn't exist | Verify the assistant ID in your [dashboard](https://dashboard.vapi.ai/) |
+ | `assistant-not-valid` | Assistant configuration is invalid | Check required fields on the assistant |
+ | `call-start-error-neither-assistant-nor-server-set` | No assistant or server URL configured | Set an `assistantId` or `serverUrl` on the call |
+ | `call.start.error-get-assistant` | Error fetching the assistant | Verify the assistant ID exists and your API key is correct |
+ | `call.start.error-get-phone-number` | Error fetching the phone number | Verify the number is imported and active |
+ | `call.start.error-get-resources-validation` | Resources failed validation | Check assistant, tools, and provider configurations |
+ | `call.start.error-vapi-number-international` | International calling not supported | Use a number that supports international calling |
+ | `call.start.error-vapi-number-outbound-daily-limit` | Daily outbound limit reached | Wait until the limit resets or use a different number |
+
+
+
+ If you use a server URL to dynamically provide an assistant, these errors mean your server didn't respond correctly.
+
+ | Error code | Meaning | Fix |
+ |---|---|---|
+ | `assistant-request-failed` | Request to your server URL failed | Check your server is running and reachable |
+ | `assistant-request-returned-error` | Server returned an error response | Check your server logs for the error |
+ | `assistant-request-returned-invalid-assistant` | Server returned invalid assistant config | Validate the response matches the [assistant schema](/api-reference/assistants/create) |
+ | `assistant-request-returned-no-assistant` | Server returned an empty response | Ensure your server returns an assistant object |
+ | `assistant-request-returned-unspeakable-error` | Server returned a non-speakable error | Return a user-friendly error message |
+
+
+
+ These indicate a problem on Vapi's side. You are typically not charged.
+
+ | Error code | Meaning | Fix |
+ |---|---|---|
+ | `call.in-progress.error-vapifault-transport-never-connected` | Transport never connected | Retry. Contact [support](/support) if persistent. |
+ | `call.in-progress.error-vapifault-worker-not-available` | No call worker available | Retry. This is a transient capacity issue. |
+ | `call.start.error-vapifault-database-error` | Internal database error | Retry. Contact [support](/support) if persistent. |
+ | `call.start.error-get-org` | Error fetching organization | Verify your API key is correct |
+
+
+
+## Phone rang but nobody answered
+
+**What the caller experiences:** The phone rings but nobody picks up, or they hear a busy signal.
+
+**What you see in the dashboard:** Short duration, no transcript, no messages.
+
+| Error code | Meaning | What to do |
+|---|---|---|
+| `customer-did-not-answer` | Callee didn't pick up (outbound) | Normal behavior. For IVR use cases, check your voicemail detection settings. |
+| `customer-busy` | Line was busy (outbound) | Normal behavior. Retry later. |
+| `customer-did-not-give-microphone-permission` | User denied mic access (web calls) | Ensure your UI requests microphone permissions before starting the call. |
+| `call.ringing.sip-inbound-caller-hungup-before-call-connect` | SIP caller hung up during ringing | Normal behavior — caller abandoned before pickup. |
+
+
+For outbound calls where you expect to reach an IVR or automated system, configure your [voicemail detection](/calls/voicemail-detection) settings to prevent the call from ending prematurely.
+
+
+## Call dropped mid-conversation
+
+**What the caller experiences:** They're in the middle of a conversation and the call suddenly cuts off with no warning. The assistant stops speaking and the line goes dead.
+
+**What you see in the dashboard:** Partial transcript, `messages` array that ends abruptly, non-zero duration.
+
+
+
+ These are on Vapi's side. You are typically not charged. Most are transient.
+
+ | Error code | Meaning |
+ |---|---|
+ | `call.in-progress.error-vapifault-worker-died` | The Vapi process handling the call crashed |
+ | `call.in-progress.error-vapifault-transport-connected-but-call-not-active` | Transport connected but call was no longer active |
+ | `call.in-progress.error-vapifault-call-started-but-connection-to-transport-missing` | Transport connection was lost after call started |
+ | `worker-shutdown` | A Vapi deployment occurred while the call was active |
+
+ **What to do:** These are transient issues. If `worker-died` errors are frequent, contact [support](/support) with the affected `call_id` values.
+
+
+
+ The telephony provider (Twilio, Vonage, or your SIP trunk) dropped the connection.
+
+ | Error code | Meaning |
+ |---|---|
+ | `phone-call-provider-closed-websocket` | Audio WebSocket between Vapi and the provider broke |
+ | `call.in-progress.error-providerfault-transport-never-connected` | Provider failed to maintain the connection |
+ | `call.in-progress.twilio-completed-call` | Twilio ended the call from their side |
+ | `call.in-progress.sip-completed-call` | SIP provider ended the call from their side |
+ | `vonage-disconnected` | Vonage disconnected the call |
+
+ **What to do:** Check your telephony provider's dashboard for connection logs. For SIP trunks, verify your network connectivity to Vapi's SBC.
+
+
+
+## Assistant went silent or unresponsive
+
+**What the caller experiences:** The call is connected and the line is open, but the assistant either doesn't speak, speaks with extreme delay, responds once then stops, or produces garbled audio. The call eventually times out or the caller hangs up in frustration.
+
+**What you see in the dashboard:** Partial messages, the `endedReason` points to a specific pipeline component failure.
+
+
+If you've configured **fallback providers**, some transcriber and voice errors will trigger a provider swap instead of ending the call. The caller might hear a brief 1-2 second pause while the fallback initializes, then the conversation continues normally.
+
+
+
+
+ The AI model that generates responses is unreachable or returning errors.
+
+ | Status code pattern | Meaning | Fix |
+ |---|---|---|
+ | `*-401-*` / `*-incorrect-api-key` | Invalid API key | Verify your API key for this provider |
+ | `*-403-*` / `*-model-access-denied` | Model access denied | Check model permissions in your provider account |
+ | `*-429-*` / `*-exceeded-quota` | Rate limit or quota hit | Upgrade your plan with the provider or reduce volume |
+ | `*-500-*` / `*-server-error` | Provider internal error | Retry. Check the provider's [status page](https://status.openai.com/) |
+ | `*-503-*` / `*-server-overloaded` | Provider overloaded | Retry after a brief wait |
+ | `*-llm-failed` | Generic LLM failure | Check call logs for the detailed error message |
+ | `pipeline-no-available-llm-model` | No LLM model available | Check your model configuration |
+
+
+
+ The text-to-speech service can't produce audio. The assistant "thinks" but can't speak.
+
+ | Pattern | Meaning | Fix |
+ |---|---|---|
+ | `*-voice-failed` | Generic synthesis failure | Check call logs. May be a transient provider issue. |
+ | `*-voice-not-found` / `*-invalid-voice` | Voice ID doesn't exist | Verify the voice ID in your provider account |
+ | `*-quota-exceeded` / `*-out-of-credits` | Voice provider credits exhausted | Add credits to your voice provider account |
+ | `*-unauthorized-access` / `*-invalid-api-key` | Bad voice provider credentials | Verify your API key for this provider |
+ | `*-500-*` / `*-503-*` | Provider infrastructure issue | Retry. Check the provider's status page. |
+
+
+
+ The speech-to-text service can't hear the caller. The assistant can speak but can't understand input.
+
+ | Pattern | Meaning | Fix |
+ |---|---|---|
+ | `*-transcriber-failed` | Generic transcriber failure | Check call logs for details |
+ | `*-returning-400-*` | Bad request (invalid model/language) | Check your transcriber model and language configuration |
+ | `*-returning-401-*` | Invalid transcriber credentials | Verify your API key for this provider |
+ | `*-returning-403-*` | Model access denied | Check model permissions in your provider account |
+ | `*-returning-500-*` / `*-returning-502-*` | Provider server error | Retry. Check the provider's status page. |
+
+
+
+
+To prevent provider outages from killing your calls, configure fallback providers for your transcriber, voice, and model. Non-fatal errors will trigger a provider swap instead of ending the call.
+
+
+## Transfer failed
+
+**What the caller experiences:** The assistant says it's transferring the call, but the transfer doesn't go through. The caller may hear silence, get disconnected, or return to the original assistant (for warm transfers).
+
+**What you see in the dashboard:** Transcript shows the transfer attempt, followed by the error.
+
+
+
+ | Error code | Meaning | Fix |
+ |---|---|---|
+ | `call.in-progress.error-transfer-failed` | Transfer attempt failed | Verify the destination number is correct and reachable |
+ | `call.in-progress.error-warm-transfer-max-duration` | Warm transfer exceeded max duration | Increase the warm transfer timeout or check if the destination is answering |
+ | `call.in-progress.error-warm-transfer-assistant-cancelled` | Transfer assistant cancelled | Check the transfer assistant's configuration |
+ | `call.in-progress.error-warm-transfer-silence-timeout` | Silence during warm transfer | Verify the transfer destination is responding with audio |
+ | `call.in-progress.error-warm-transfer-microphone-timeout` | Mic timeout during warm transfer | Check audio connectivity to the transfer destination |
+
+
+
+ | Error code | Meaning | Fix |
+ |---|---|---|
+ | `*-outbound-sip-403-forbidden` | SIP provider rejected the call | Check your SIP trunk credentials and allowed destinations |
+ | `*-outbound-sip-407-proxy-authentication-required` | SIP auth required | Configure proxy authentication on your SIP trunk |
+ | `*-outbound-sip-408-request-timeout` | SIP request timed out | Check network connectivity to the SIP destination |
+ | `*-outbound-sip-480-temporarily-unavailable` | SIP destination unavailable | Verify the destination is online and accepting calls |
+ | `*-outbound-sip-503-service-unavailable` | SIP service unavailable | Check the SIP provider's service status |
+
+
+
+ | Error code | Meaning | Fix |
+ |---|---|---|
+ | `twilio-failed-to-connect-call` | Twilio couldn't connect the transfer | Check the destination number format and Twilio geo permissions |
+ | `vonage-failed-to-connect-call` | Vonage couldn't connect the transfer | Check the destination number and Vonage configuration |
+ | `vonage-rejected` | Vonage rejected the transfer | Check Vonage configuration and allowed destinations |
+
+
+
+For a detailed transfer debugging walkthrough, see [Debug forwarding drops](/calls/troubleshoot-call-forwarding-drops).
+
+## Call ended normally
+
+These are not errors — they indicate the call ended as expected.
+
+| Error code | Meaning | Adjust if needed |
+|---|---|---|
+| `assistant-ended-call` | Assistant ended the call via a tool or function | Expected behavior |
+| `assistant-ended-call-after-message-spoken` | Assistant spoke its final message and ended | Expected behavior |
+| `assistant-ended-call-with-hangup-task` | Assistant used a hangup workflow node | Expected behavior |
+| `assistant-said-end-call-phrase` | Assistant said a configured end-call phrase | Check your end-call phrases if calls end too early |
+| `assistant-forwarded-call` | Assistant transferred the call | Expected behavior |
+| `customer-ended-call` | Customer hung up | Expected behavior |
+| `exceeded-max-duration` | Hit `maxDurationSeconds` | Increase `maxDurationSeconds` if calls are being cut short |
+| `silence-timed-out` | Silence timeout | Increase `silenceTimeoutSeconds` if the timeout is too aggressive |
+| `voicemail` | Call went to voicemail | Configure [voicemail detection](/calls/voicemail-detection) settings |
+| `manually-canceled` | Canceled via API or dashboard | Expected behavior |
+| `vonage-completed` | Vonage reported call completed | Expected behavior |
+
+## Next steps
+
+- **[Call end reasons](/calls/call-ended-reason):** Complete reference of every `endedReason` code.
+- **[Debugging voice agents](/debugging):** General debugging workflow using dashboard tools, logs, and test suites.
+- **[Debug forwarding drops](/calls/troubleshoot-call-forwarding-drops):** Deep dive into transfer failures.
+- **[Troubleshoot SIP trunk errors](/advanced/sip/troubleshoot-sip-trunk-credential-errors):** Resolve SIP credential validation failures.
+- **[How to report issues](/issue-reporting):** Include your `call_id` and account email when contacting support.
diff --git a/fern/calls/websocket-transport.mdx b/fern/calls/websocket-transport.mdx
index c3accb0dc..7ac27279a 100644
--- a/fern/calls/websocket-transport.mdx
+++ b/fern/calls/websocket-transport.mdx
@@ -4,8 +4,6 @@ description: Stream audio directly via WebSockets for real-time, bidirectional c
slug: calls/websocket-transport
---
-# WebSocket Transport
-
Vapi's WebSocket transport enables real-time, bidirectional audio communication directly between your application and Vapi's AI assistants. Unlike traditional phone or web calls, this transport method lets you stream raw audio data instantly with minimal latency.
## Key Benefits
@@ -20,6 +18,8 @@ Vapi's WebSocket transport enables real-time, bidirectional audio communication
To initiate a call using WebSocket transport:
+### PCM Format (16-bit, default)
+
```bash
curl 'https://api.vapi.ai/call' \
-H 'authorization: Bearer YOUR_API_KEY' \
@@ -37,6 +37,25 @@ curl 'https://api.vapi.ai/call' \
}'
```
+### Mu-Law Format
+
+```bash
+curl 'https://api.vapi.ai/call' \
+ -H 'authorization: Bearer YOUR_API_KEY' \
+ -H 'content-type: application/json' \
+ --data-raw '{
+ "assistantId": "YOUR_ASSISTANT_ID",
+ "transport": {
+ "provider": "vapi.websocket",
+ "audioFormat": {
+ "format": "mulaw",
+ "container": "raw",
+ "sampleRate": 8000
+ }
+ }
+ }'
+```
+
### Sample API Response
```json
@@ -63,13 +82,25 @@ When creating a WebSocket call, the audio format can be customized:
| Parameter | Description | Default |
|-------------|-------------------------|---------------------|
| `format` | Audio encoding format | `pcm_s16le` (16-bit PCM) |
-| `container` | Audio container format | `raw` (Raw PCM) |
-| `sampleRate`| Sample rate in Hz | `16000` (16kHz) |
+| `container` | Audio container format | `raw` (Raw audio) |
+| `sampleRate`| Sample rate in Hz | `16000` for PCM, `8000` for Mu-Law |
+
+### Supported Audio Formats
+
+Vapi supports the following audio formats:
+
+- **`pcm_s16le`**: 16-bit PCM, signed little-endian (default)
+- **`mulaw`**: Mu-Law encoded audio (ITU-T G.711 standard)
-Currently, Vapi supports only raw PCM (`pcm_s16le` with `raw` container). Additional formats may be supported in future updates.
+Both formats use the `raw` container format for direct audio streaming.
+
+### Format Selection Guidelines
+
+- **PCM (`pcm_s16le`)**: Higher quality audio, larger bandwidth usage. Ideal for high-quality applications.
+- **Mu-Law (`mulaw`)**: Lower bandwidth, telephony-standard encoding. Ideal for telephony integrations and bandwidth-constrained environments.
-Vapi automatically converts sample rates as needed. You can stream audio at 8kHz, 44.1kHz, etc., and Vapi will handle conversions seamlessly.
+Vapi automatically converts sample rates as needed. You can stream audio at 8kHz, 44.1kHz, etc., and Vapi will handle conversions seamlessly. The system also handles format conversions internally when needed.
## Connecting to the WebSocket
@@ -88,9 +119,16 @@ socket.onerror = (error) => console.error("WebSocket error:", error);
The WebSocket supports two types of messages:
-- **Binary audio data** (PCM, 16-bit signed little-endian)
+- **Binary audio data** (format depends on your configuration: PCM or Mu-Law)
- **Text-based JSON control messages**
+### Audio Data Format
+
+The binary audio data format depends on your `audioFormat` configuration:
+
+- **PCM (`pcm_s16le`)**: 16-bit signed little-endian samples
+- **Mu-Law (`mulaw`)**: 8-bit Mu-Law encoded samples (ITU-T G.711)
+
### Sending Audio Data
```javascript
@@ -158,10 +196,12 @@ function hangupCall() {
## Ending the Call
-To gracefully end the WebSocket call:
+The recommended way to end a call is using [Live Call Control](/calls/call-features#end-call) which provides more control and proper cleanup.
+
+Alternatively, you can end the WebSocket call directly:
```javascript
-sendControlMessage({ type: "hangup" });
+sendControlMessage({ type: "end-call" });
socket.close();
```
diff --git a/fern/changelog/2025-02-27.mdx b/fern/changelog/2025-02-27.mdx
index d8cf366b1..27a6b6c50 100644
--- a/fern/changelog/2025-02-27.mdx
+++ b/fern/changelog/2025-02-27.mdx
@@ -7,7 +7,7 @@ Configuration options:
{
"keypadInputPlan": {
"enabled": true, // Default: false
- "delimiters": "#", // Options: "#", "*", or "" (empty string)
+ "delimiters": ["#"], // Options: ["#"], ["*"], [""]
"timeoutSeconds": 2 // Range: 0.5-10 seconds, Default: 2
}
}
diff --git a/fern/changelog/2025-08-28.mdx b/fern/changelog/2025-08-28.mdx
new file mode 100644
index 000000000..6aeaa9540
--- /dev/null
+++ b/fern/changelog/2025-08-28.mdx
@@ -0,0 +1 @@
+1. **End AI call transfers after set timeout period**: You can now configure [AI-managed transfers](https://docs.vapi.ai/call-forwarding#7-assistant-based-warm-transfer-experimental) with a [Transfer Assistant](https://api.vapi.ai/api#:~:text=TransferAssistant) to automatically end the call after a specified period of silence with `silenceTimeoutSeconds` (default 30 seconds). This helps prevent idle calls from lingering and saves costs.
diff --git a/fern/changelog/2025-08-29.mdx b/fern/changelog/2025-08-29.mdx
new file mode 100644
index 000000000..8341591a7
--- /dev/null
+++ b/fern/changelog/2025-08-29.mdx
@@ -0,0 +1,5 @@
+1. **Per-Artifact Storage Routing in [Artifact Plans](https://api.vapi.ai/api#:~:text=ArtifactPlan)**: You can now override artifact storage behavior per assistant/call for SIP packet capture (PCAP), logging, and call recording artifacts:
+
+- `Assistant.artifactPlan.pcapUseCustomStorageEnabled` (default true): Use custom storage for SIP packet capture, which are stored in `Assistant.artifactPlan.pcapUrl`.
+- `Assistant.artifactPlan.loggingUseCustomStorageEnabled` (default true): Determines whether to use your custom storage (S3 or GCP) for call logs when storage credentials are configured; set to false to store logs on Vapi's storage for this assistant, even if custom storage is set globally.
+- `Assistant.artifactPlan.recordingUseCustomStorageEnabled` (default true): Determines whether to use your custom storage (S3 or GCP) for call recordings when storage credentials are configured; set to false to store recordings on Vapi's storage for this assistant, even if custom storage is set globally.
diff --git a/fern/changelog/2025-08-30.mdx b/fern/changelog/2025-08-30.mdx
new file mode 100644
index 000000000..5fac2a92d
--- /dev/null
+++ b/fern/changelog/2025-08-30.mdx
@@ -0,0 +1,17 @@
+# Enhanced Authentication & Custom Credentials
+
+1. **Custom Credential System**: You can now create and manage custom authentication credentials using the new [`CustomCredential`](https://api.vapi.ai/api#:~:text=CustomCredential) system. This powerful new feature supports multiple authentication methods:
+ - **OAuth2 RFC 6749**: Full OAuth2 implementation for secure third-party integrations
+ - **HMAC Signing**: Cryptographic message authentication for enhanced security
+ - **Bearer Token**: Simple token-based authentication for API access
+
+2. **Bearer Authentication Plans**: Implement secure token-based authentication with [`BearerAuthenticationPlan`](https://api.vapi.ai/api#:~:text=BearerAuthenticationPlan). Key features include:
+ - `token`: Your secure bearer token value
+ - `headerName`: Custom header name (defaults to 'Authorization')
+ - `bearerPrefixEnabled`: Toggle 'Bearer ' prefix inclusion (defaults to true)
+
+3. **Enhanced Webhook Credentials**: Webhook integrations now support advanced authentication through [`WebhookCredential.authenticationPlan`](https://api.vapi.ai/api#:~:text=WebhookCredential.authenticationPlan), enabling secure webhook communications with OAuth2, HMAC, or Bearer authentication.
+
+4. **Server Authentication**: Secure your server endpoints with credential-based authentication using [`Server.credentialId`](https://api.vapi.ai/api#:~:text=Server.credentialId) to link your custom credentials to webhook destinations.
+
+5. **Tool Authentication Integration**: API request tools can now use custom credentials for secure external API calls via [`ApiRequestTool.credentialId`](https://api.vapi.ai/api#:~:text=ApiRequestTool.credentialId), eliminating the need to embed sensitive authentication details directly in tool configurations.
diff --git a/fern/changelog/2025-09-02.mdx b/fern/changelog/2025-09-02.mdx
new file mode 100644
index 000000000..9ebf516cf
--- /dev/null
+++ b/fern/changelog/2025-09-02.mdx
@@ -0,0 +1,36 @@
+# Recording Consent & Compliance Management
+
+1. **Recording Consent Plans**: Ensure legal compliance with call recording regulations using the new [`CompliancePlan.recordingConsentPlan`](https://api.vapi.ai/api#:~:text=CompliancePlan.recordingConsentPlan). This feature helps you meet GDPR, CCPA, and other privacy regulations by properly obtaining user consent before recording calls.
+
+2. **Verbal Consent Collection**: Implement active consent collection with [`RecordingConsentPlanVerbal`](https://api.vapi.ai/api#:~:text=RecordingConsentPlanVerbal) where users explicitly agree or decline recording:
+ - `message`: Custom consent message (e.g., "This call may be recorded for quality purposes. Say 'I agree' to consent.")
+ - `voice`: Optional dedicated voice for consent messages for better user experience
+ - `declineTool`: Execute specific tools when users decline consent
+ - `declineToolId`: Reference existing tools for decline handling
+
+3. **Stay-on-Line Consent**: Use passive consent collection with [`RecordingConsentPlanStayOnLine`](https://api.vapi.ai/api#:~:text=RecordingConsentPlanStayOnLine) where staying on the call implies consent:
+ - `message`: Informational message about recording (e.g., "For quality purposes, this call may be recorded. Please hang up if you do not consent.")
+ - `waitSeconds`: Configurable wait time (1-6 seconds) before proceeding
+ - `voice`: Optional separate voice for consent announcements
+
+4. **Recording Consent Tracking**: Monitor consent status throughout the call lifecycle with [`Call.compliance.recordingConsent`](https://api.vapi.ai/api#:~:text=Call.compliance.recordingConsent):
+ - `type`: The type of consent obtained
+ - `grantedAt`: Timestamp when consent was granted (null if not granted)
+
+5. **Enhanced End-of-Call Reports**: Recording consent information is now included in [`ServerMessageEndOfCallReport.compliance`](https://api.vapi.ai/api#:~:text=ServerMessageEndOfCallReport.compliance), providing complete compliance audit trails for your records.
+
+## Compliance Features
+
+
+ Meet GDPR, CCPA, and other privacy regulations with built-in consent management and audit trails.
+
+
+ Choose between verbal consent requiring explicit agreement or stay-on-line consent with implied agreement.
+
+
+ Customize consent messages to match your brand voice and legal requirements with up to 1000 characters.
+
+
+ Complete compliance records with timestamps and consent status in call artifacts and end-of-call reports.
+
+
\ No newline at end of file
diff --git a/fern/changelog/2025-09-05.mdx b/fern/changelog/2025-09-05.mdx
new file mode 100644
index 000000000..3fca7b352
--- /dev/null
+++ b/fern/changelog/2025-09-05.mdx
@@ -0,0 +1,47 @@
+# Evaluation System Foundation
+
+1. **Evaluation Framework**: You can now systematically test your Vapi voice assistants with the new [`Eval`](https://api.vapi.ai/api#:~:text=Eval) system. Create comprehensive test scenarios to validate assistant behavior, conversation flow, and tool usage through mock conversations.
+
+2. **Mock Conversation Builder**: Design test conversations using [`Eval.messages`](https://api.vapi.ai/api#:~:text=Eval.messages) with support for multiple message types:
+ - [`ChatEvalUserMessageMock`](https://api.vapi.ai/api#:~:text=ChatEvalUserMessageMock): Simulate user inputs and questions
+ - [`ChatEvalSystemMessageMock`](https://api.vapi.ai/api#:~:text=ChatEvalSystemMessageMock): Inject system messages mid-conversation
+ - [`ChatEvalToolResponseMessageMock`](https://api.vapi.ai/api#:~:text=ChatEvalToolResponseMessageMock): Mock tool responses for consistent testing
+ - [`ChatEvalAssistantMessageEvaluation`](https://api.vapi.ai/api#:~:text=ChatEvalAssistantMessageEvaluation): Define evaluation checkpoints
+
+3. **Evaluation Types**: Currently focused on `chat.mockConversation` type evaluations, with the framework designed to support additional evaluation methods in future releases.
+
+4. **Evaluation Management**: Organize your tests with [`CreateEvalDTO`](https://api.vapi.ai/api#:~:text=CreateEvalDTO) and [`UpdateEvalDTO`](https://api.vapi.ai/api#:~:text=UpdateEvalDTO):
+ - `name`: Descriptive names up to 80 characters (e.g., "Customer Support Flow Validation")
+ - `description`: Detailed descriptions up to 500 characters explaining the test purpose
+ - `messages`: The complete mock conversation flow
+
+5. **Evaluation Endpoints**: Access your evaluations through the new [`/eval`](https://api.vapi.ai/api#:~:text=/eval) endpoint family:
+ - `GET /eval`: List all evaluations with pagination support
+ - `POST /eval`: Create new evaluations
+ - `GET /eval/{id}`: Retrieve specific evaluation details
+ - `PUT /eval/{id}`: Update existing evaluations
+
+6. **Judge Plan Architecture**: Define how assistant responses are validated using [`AssistantMessageJudgePlan`](https://api.vapi.ai/api#:~:text=AssistantMessageJudgePlan) with three evaluation methods:
+ - **Exact Match**: [`AssistantMessageJudgePlanExact`](https://api.vapi.ai/api#:~:text=AssistantMessageJudgePlanExact) for precise content and tool call validation
+ - **Regex Pattern**: [`AssistantMessageJudgePlanRegex`](https://api.vapi.ai/api#:~:text=AssistantMessageJudgePlanRegex) for flexible pattern-based evaluation
+ - **AI Judge**: [`AssistantMessageJudgePlanAI`](https://api.vapi.ai/api#:~:text=AssistantMessageJudgePlanAI) for intelligent evaluation using LLM-as-a-judge
+
+
+ This is the foundation release for the evaluation system. Evaluation execution and results processing will be available in upcoming releases. Start designing your test scenarios now to be ready for full evaluation capabilities.
+
+
+## Testing Capabilities
+
+
+ Create realistic test scenarios with user messages, system prompts, and expected assistant responses for comprehensive flow validation.
+
+
+ Validate that your assistant calls the right tools with correct parameters using ChatEvalAssistantMessageMockToolCall.
+
+
+ Choose from exact matching, regex patterns, or AI-powered evaluation to suit different testing needs and complexity levels.
+
+
+ Organize tests with descriptive names and detailed documentation to maintain clear testing workflows across your team.
+
+
\ No newline at end of file
diff --git a/fern/changelog/2025-09-08.mdx b/fern/changelog/2025-09-08.mdx
new file mode 100644
index 000000000..5b2441c95
--- /dev/null
+++ b/fern/changelog/2025-09-08.mdx
@@ -0,0 +1,44 @@
+# Enhanced Transcription Features & Speech Processing
+
+1. **Gladia Transcription Enhancements**: Improve transcription accuracy and performance with new [`GladiaTranscriber`](https://api.vapi.ai/api#:~:text=GladiaTranscriber) features:
+ - `region`: Choose between `us-west` and `eu-west` for optimal latency and data residency compliance
+ - `receivePartialTranscripts`: Enable low-latency streaming transcription for real-time conversation flow
+ - Enhanced language detection with support for both single and multiple language modes
+
+2. **Advanced Deepgram Controls**: Fine-tune speech recognition with enhanced [`DeepgramTranscriber`](https://api.vapi.ai/api#:~:text=DeepgramTranscriber) settings:
+ - `eotThreshold`: End-of-turn detection threshold for precise conversation boundaries (e.g., 0.7)
+ - `eotTimeoutMs`: Maximum wait time for end-of-turn detection in milliseconds (e.g., 5000ms)
+ - `eagerEotThreshold`: Early end-of-turn detection for responsive conversations (e.g., 0.3)
+
+3. **AssemblyAI Keyterms Enhancement**: Boost recognition accuracy for critical terms with [`AssemblyAITranscriber.keytermsPrompt`](https://api.vapi.ai/api#:~:text=AssemblyAITranscriber.keytermsPrompt):
+ - Support for up to 100 keyterms, each up to 50 characters
+ - Improved recognition for specific words and phrases
+ - Additional cost: $0.04/hour when enabled
+
+4. **Speechmatics Custom Vocabulary**: Enhance recognition accuracy with [`SpeechmaticsCustomVocabularyItem`](https://api.vapi.ai/api#:~:text=SpeechmaticsCustomVocabularyItem):
+ - `content`: The word or phrase to add (e.g., "Speechmatics")
+ - `soundsLike`: Alternative phonetic representations (e.g., ["speech mattix"]) for better pronunciation handling
+
+5. **Word-Level Confidence**: Access detailed transcription confidence data with [`CustomLLMModel.wordLevelConfidenceEnabled`](https://api.vapi.ai/api#:~:text=CustomLLMModel.wordLevelConfidenceEnabled), providing word-by-word accuracy metrics for quality assessment and debugging.
+
+6. **Enhanced Message Metadata**: Store transcription confidence and other metadata in [`UserMessage.metadata`](https://api.vapi.ai/api#:~:text=UserMessage.metadata), enabling detailed analysis of transcription quality and user speech patterns.
+
+
+ `AssemblyAITranscriber.wordFinalizationMaxWaitTime` is now deprecated. Use the new smart endpointing plans for better speech timing control. The deprecated property will be removed in a future release.
+
+
+## Transcription Improvements
+
+
+ Choose optimal transcription regions with Gladia's us-west and eu-west options for reduced latency and compliance.
+
+
+ Enable partial transcripts for immediate response processing, reducing perceived latency in conversations.
+
+
+ Fine-tune end-of-turn detection with configurable thresholds and timeouts for natural conversation flow.
+
+
+ Improve accuracy for domain-specific terms, company names, and technical jargon with enhanced vocabulary support.
+
+
\ No newline at end of file
diff --git a/fern/changelog/2025-09-11.mdx b/fern/changelog/2025-09-11.mdx
new file mode 100644
index 000000000..4a631ec9c
--- /dev/null
+++ b/fern/changelog/2025-09-11.mdx
@@ -0,0 +1,37 @@
+# Voice Enhancements & Minimax Improvements
+
+1. **Minimax Voice Language Support**: Enhance multilingual conversations with [`MinimaxVoice.languageBoost`](https://api.vapi.ai/api#:~:text=MinimaxVoice.languageBoost). Support for 40+ languages including:
+ - `Chinese` and `Chinese,Yue` for Mandarin and Cantonese
+ - `English`, `Spanish`, `French`, `German`, `Japanese`, `Korean`
+ - Regional variants and specialized languages like `Arabic`, `Hindi`, `Thai`
+ - `auto` mode for automatic language detection
+
+2. **Text Normalization**: Improve number reading and formatting with [`MinimaxVoice.textNormalizationEnabled`](https://api.vapi.ai/api#:~:text=MinimaxVoice.textNormalizationEnabled). When enabled, spoken numbers, dates, and formatted text are properly pronounced for natural-sounding conversations.
+
+3. **Enhanced Voice Caching**: Voice responses are now cached by default with [`MinimaxVoice.cachingEnabled`](https://api.vapi.ai/api#:~:text=MinimaxVoice.cachingEnabled) set to `true`, reducing latency for repeated phrases and improving overall conversation performance.
+
+4. **Fallback Voice Configuration**: Ensure conversation continuity with [`FallbackMinimaxVoice`](https://api.vapi.ai/api#:~:text=FallbackMinimaxVoice) featuring the same language boost and text normalization capabilities as the primary voice configuration.
+
+5. **Speaker Labeling**: Track multiple speakers in conversations with [`BotMessage.speakerLabel`](https://api.vapi.ai/api#:~:text=BotMessage.speakerLabel), providing stable speaker identification (e.g., "Speaker 1") for better conversation analysis and diarization.
+
+6. **Voice Region Support**: Choose optimal performance regions with Minimax's `worldwide` (default) or `china` regional settings for better latency and compliance with local regulations.
+
+
+ Language boost settings help the text-to-speech model better understand context and pronunciation for specific languages, resulting in more natural and accurate voice synthesis.
+
+
+## Voice Quality Features
+
+
+ Support for 40+ languages with automatic detection and language-specific optimizations for natural pronunciation.
+
+
+ Intelligent normalization of numbers, dates, and formatted text for natural-sounding speech synthesis.
+
+
+ Voice caching reduces latency for common phrases, while regional settings optimize for local performance.
+
+
+ Speaker labeling and diarization support for multi-participant conversation analysis and management.
+
+
\ No newline at end of file
diff --git a/fern/changelog/2025-09-14.mdx b/fern/changelog/2025-09-14.mdx
new file mode 100644
index 000000000..2248b3303
--- /dev/null
+++ b/fern/changelog/2025-09-14.mdx
@@ -0,0 +1,35 @@
+# Squad Management & Session Enhancement
+
+1. **Squad-Based Sessions**: Organize your assistants into collaborative teams with [`Session.squad`](https://api.vapi.ai/api#:~:text=Session.squad) and [`Session.squadId`](https://api.vapi.ai/api#:~:text=Session.squadId). Sessions can now be associated with squads for team-based conversation management and coordinated assistant behavior.
+
+2. **Squad Chat Integration**: Enable squad-based chat conversations using [`Chat.squad`](https://api.vapi.ai/api#:~:text=Chat.squad) and [`Chat.squadId`](https://api.vapi.ai/api#:~:text=Chat.squadId). This allows multiple assistants to participate in or be aware of chat contexts for more sophisticated conversation handling.
+
+3. **Enhanced Session Creation**: Create squad-enabled sessions with [`CreateSessionDTO.squad`](https://api.vapi.ai/api#:~:text=CreateSessionDTO.squad) and [`CreateSessionDTO.squadId`](https://api.vapi.ai/api#:~:text=CreateSessionDTO.squadId), enabling persistent conversation contexts across multiple assistants and interaction types.
+
+4. **Chat Management by Squad**: Filter and organize chats by squad membership using [`GetChatPaginatedDTO.squadId`](https://api.vapi.ai/api#:~:text=GetChatPaginatedDTO.squadId) for better conversation management and team-based analytics.
+
+5. **Session Management by Squad**: Query sessions by squad association with [`GetSessionPaginatedDTO.squadId`](https://api.vapi.ai/api#:~:text=GetSessionPaginatedDTO.squadId), providing team-based session organization and management capabilities.
+
+6. **Full Message History**: Control conversation context retention with [`ArtifactPlan.fullMessageHistoryEnabled`](https://api.vapi.ai/api#:~:text=ArtifactPlan.fullMessageHistoryEnabled). When enabled, artifacts contain complete message history even after handoff context engineering, preserving full conversation flow for analysis.
+
+7. **Transfer Records**: Track warm transfer details with [`Artifact.transfers`](https://api.vapi.ai/api#:~:text=Artifact.transfers), providing comprehensive records of transfer destinations, transcripts, and status information for multi-assistant conversations.
+
+
+ Squad management enables sophisticated multi-assistant workflows where different specialists can handle different parts of a conversation while maintaining shared context and coordination.
+
+
+## Team Collaboration Features
+
+
+ Enable multiple assistants to work together within squads for specialized conversation handling and seamless handoffs.
+
+
+ Maintain conversation context across squad members and session boundaries for continuous conversation experiences.
+
+
+ Filter conversations, sessions, and analytics by squad membership for team-based performance insights and management.
+
+
+ Track all transfers and handoffs with detailed records including destinations, transcripts, and status information.
+
+
\ No newline at end of file
diff --git a/fern/changelog/2025-09-17.mdx b/fern/changelog/2025-09-17.mdx
new file mode 100644
index 000000000..f9a8b4d90
--- /dev/null
+++ b/fern/changelog/2025-09-17.mdx
@@ -0,0 +1,36 @@
+# API Versioning & Infrastructure Updates
+
+1. **API Version 2 Introduction**: Access enhanced functionality through new versioned endpoints while maintaining full backward compatibility:
+ - [`/v2/call`](https://api.vapi.ai/api#:~:text=/v2/call): Enhanced call management with new features and improved response formats
+ - [`/v2/phone-number`](https://api.vapi.ai/api#:~:text=/v2/phone-number): Advanced phone number management with extended capabilities
+
+2. **Enhanced Pagination**: Improved pagination controls across all endpoints with [`PaginationMeta`](https://api.vapi.ai/api#:~:text=PaginationMeta) enhancements:
+ - `createdAtGe` and `createdAtLe`: Date range filtering for creation timestamps
+ - Better sorting and filtering options for large datasets
+ - Enhanced metadata for pagination state management
+
+3. **Workflow Message Configuration**: Customize voicemail handling in workflows with [`CreateWorkflowDTO.voicemailMessage`](https://api.vapi.ai/api#:~:text=CreateWorkflowDTO.voicemailMessage) and [`CreateWorkflowDTO.voicemailDetection`](https://api.vapi.ai/api#:~:text=CreateWorkflowDTO.voicemailDetection) for comprehensive call flow management.
+
+4. **Credential Integration**: Seamless credential management across all workflow and assistant configurations with enhanced [`credentials.items.discriminator.mapping.custom-credential`](https://api.vapi.ai/api#:~:text=credentials.items.discriminator.mapping.custom-credential) support.
+
+5. **Transport Infrastructure**: Foundation for advanced communication channels with improved transport configuration and management capabilities.
+
+
+ Version 2 endpoints provide enhanced features while v1 endpoints remain fully functional. Migrate to v2 when you need access to new capabilities or improved performance characteristics.
+
+
+## Infrastructure Improvements
+
+
+ Existing v1 endpoints continue to work unchanged, ensuring smooth transitions and zero downtime for existing integrations.
+
+
+ Improved date range filtering and pagination controls for better data management and API performance.
+
+
+ Enhanced workflow configuration with better voicemail handling and credential management throughout the call flow.
+
+
+ Foundation for advanced features and capabilities that will be built on the v2 API structure.
+
+
\ No newline at end of file
diff --git a/fern/changelog/2025-09-20.mdx b/fern/changelog/2025-09-20.mdx
new file mode 100644
index 000000000..6064b183a
--- /dev/null
+++ b/fern/changelog/2025-09-20.mdx
@@ -0,0 +1,41 @@
+# Chat Transport & SMS Integration
+
+1. **Twilio SMS Transport**: Send chat responses directly via SMS using [`TwilioSMSChatTransport`](https://api.vapi.ai/api#:~:text=TwilioSMSChatTransport) in [`CreateChatDTO.transport`](https://api.vapi.ai/api#:~:text=CreateChatDTO.transport). This enables programmatic SMS conversations with your voice assistants, bridging the gap between voice and text communication.
+
+2. **SMS Session Management**: Create new sessions automatically when using SMS transport by providing:
+ - `customer`: Customer information for SMS delivery
+ - `phoneNumberId`: SMS-enabled phone number from your organization
+ - Automatic session creation when both fields are provided
+
+3. **LLM-Generated vs Direct SMS**: Control message processing with [`TwilioSMSChatTransport.useLLMGeneratedMessageForOutbound`](https://api.vapi.ai/api#:~:text=TwilioSMSChatTransport.useLLMGeneratedMessageForOutbound):
+ - `true` (default): Input processed by assistant for intelligent responses
+ - `false`: Direct message forwarding without LLM processing for notifications and alerts
+
+4. **Enhanced Chat Creation**: [`CreateChatDTO`](https://api.vapi.ai/api#:~:text=CreateChatDTO) now supports sophisticated session management:
+ - `transport`: SMS delivery configuration
+ - `sessionId`: Use existing session data
+ - Mutual exclusivity between `sessionId` and transport fields for clear session boundaries
+
+5. **OpenAI Responses Integration**: Streamlined chat processing with [`OpenAIResponsesRequest`](https://api.vapi.ai/api#:~:text=OpenAIResponsesRequest) supporting the same transport and squad integration features for consistent API experience.
+
+6. **Cross-Platform Continuity**: Seamlessly transition between voice calls and SMS conversations within the same session, maintaining context and conversation history across communication channels.
+
+
+ SMS transport requires SMS-enabled phone numbers in your organization. The phone number must support SMS functionality and belong to your account for successful message delivery.
+
+
+## SMS Communication Features
+
+
+ Send and receive SMS messages through your voice assistant, enabling text-based interactions alongside voice conversations.
+
+
+ Choose between AI-processed responses and direct message forwarding based on your use case requirements.
+
+
+ Maintain conversation context across SMS and voice interactions within unified sessions for seamless user experiences.
+
+
+ Automatic session creation and management when using transport fields, simplifying SMS conversation setup.
+
+
\ No newline at end of file
diff --git a/fern/changelog/2025-09-23.mdx b/fern/changelog/2025-09-23.mdx
new file mode 100644
index 000000000..f2479defe
--- /dev/null
+++ b/fern/changelog/2025-09-23.mdx
@@ -0,0 +1,43 @@
+# Advanced Analytics & Variable Grouping
+
+1. **Variable Value Analytics**: Gain deeper insights into your assistant performance with [`AnalyticsQuery.groupByVariableValue`](https://api.vapi.ai/api#:~:text=AnalyticsQuery.groupByVariableValue). Group analytics data by specific variable values extracted during calls for granular performance analysis.
+
+2. **Enhanced Grouping Options**: Use [`VariableValueGroupBy`](https://api.vapi.ai/api#:~:text=VariableValueGroupBy) to specify custom grouping criteria:
+ - `key`: The variable value key to group by (up to 100 characters)
+ - Combine with existing grouping options like `assistantId`, `endedReason`, and `status`
+
+3. **Multi-Dimensional Analysis**: Create complex analytics queries by combining traditional grouping fields with variable values:
+ - Group by assistant performance AND custom business metrics
+ - Analyze conversation outcomes by extracted data points
+ - Track success rates across different variable value segments
+
+4. **Advanced Query Capabilities**: Enhanced [`AnalyticsQuery`](https://api.vapi.ai/api#:~:text=AnalyticsQuery) functionality enables sophisticated data analysis:
+ - Multiple grouping dimensions for comprehensive insights
+ - Variable-based segmentation for business intelligence
+ - Custom metric tracking through extracted call variables
+
+5. **Business Intelligence Integration**: Connect your call data to business outcomes by grouping analytics on:
+ - Customer satisfaction scores extracted from calls
+ - Product interest levels determined during conversations
+ - Lead qualification status gathered through assistant interactions
+ - Custom KPIs specific to your business logic
+
+
+ Variable values are extracted during calls using tool response schemas and aliases. Set up variable extraction in your tools to enable powerful analytics grouping based on conversation outcomes.
+
+
+## Analytics Enhancements
+
+
+ Group analytics by any variable extracted during calls, enabling business-specific performance insights and KPI tracking.
+
+
+ Combine traditional call metrics with custom variable grouping for comprehensive conversation analysis.
+
+
+ Connect call performance to business outcomes through variable-based analytics and custom grouping options.
+
+
+ Create detailed reports by grouping on extracted conversation data like satisfaction scores, intent categories, or custom business metrics.
+
+
\ No newline at end of file
diff --git a/fern/changelog/2025-09-26.mdx b/fern/changelog/2025-09-26.mdx
new file mode 100644
index 000000000..706f757f6
--- /dev/null
+++ b/fern/changelog/2025-09-26.mdx
@@ -0,0 +1,39 @@
+# Voicemail Detection & Handling Improvements
+
+1. **Enhanced Beep Detection**: Improve voicemail detection accuracy with [`CreateVoicemailToolDTO.beepDetectionEnabled`](https://api.vapi.ai/api#:~:text=CreateVoicemailToolDTO.beepDetectionEnabled) specifically for Twilio-based calls. This feature detects the characteristic beep sound that indicates voicemail recording has started.
+
+2. **Workflow Voicemail Integration**: Configure comprehensive voicemail handling in workflows with enhanced message and detection capabilities:
+ - [`Workflow.voicemailMessage`](https://api.vapi.ai/api#:~:text=Workflow.voicemailMessage): Custom messages for voicemail scenarios (up to 1000 characters)
+ - [`Workflow.voicemailDetection`](https://api.vapi.ai/api#:~:text=Workflow.voicemailDetection): Configurable detection methods for different providers
+
+3. **Assistant Voicemail Enhancement**: Improved voicemail handling in assistant configurations with [`Assistant.voicemailMessage`](https://api.vapi.ai/api#:~:text=Assistant.voicemailMessage) and [`Assistant.voicemailDetection`](https://api.vapi.ai/api#:~:text=Assistant.voicemailDetection) for consistent behavior across all conversation types.
+
+4. **Multiple Detection Methods**: Choose from various voicemail detection providers:
+ - **Google**: [`GoogleVoicemailDetectionPlan`](https://api.vapi.ai/api#:~:text=GoogleVoicemailDetectionPlan) for AI-powered detection
+ - **OpenAI**: [`OpenAIVoicemailDetectionPlan`](https://api.vapi.ai/api#:~:text=OpenAIVoicemailDetectionPlan) for intelligent voicemail recognition
+ - **Twilio**: [`TwilioVoicemailDetectionPlan`](https://api.vapi.ai/api#:~:text=TwilioVoicemailDetectionPlan) for carrier-level detection
+ - **Vapi**: [`VapiVoicemailDetectionPlan`](https://api.vapi.ai/api#:~:text=VapiVoicemailDetectionPlan) for integrated detection
+
+5. **Beep Detection for Call Flows**: The new beep detection capability works specifically with Twilio transport, providing reliable voicemail identification when traditional detection methods may not be sufficient.
+
+6. **Voicemail Tool Configuration**: Enhanced tool rejection and messaging capabilities ensure appropriate handling when voicemail is detected, with configurable responses based on your business requirements.
+
+
+ Beep detection is currently available only for Twilio-based calls. If you're using other providers, consider combining multiple detection methods for better accuracy.
+
+
+## Voicemail Management Features
+
+
+ Support for Google, OpenAI, Twilio, and Vapi detection methods, allowing you to choose the best option for your use case.
+
+
+ Advanced audio analysis to detect voicemail beeps on Twilio calls for more reliable voicemail identification.
+
+
+ Configure personalized voicemail messages up to 1000 characters for better user experience and brand consistency.
+
+
+ Comprehensive voicemail handling throughout workflow nodes with consistent configuration across conversation flows.
+
+
\ No newline at end of file
diff --git a/fern/changelog/2025-09-28.mdx b/fern/changelog/2025-09-28.mdx
new file mode 100644
index 000000000..0d6c91e31
--- /dev/null
+++ b/fern/changelog/2025-09-28.mdx
@@ -0,0 +1,49 @@
+# Evaluation Execution & Results Processing
+
+1. **Evaluation Execution Engine**: Run comprehensive assistant evaluations with [`EvalRun`](https://api.vapi.ai/api#:~:text=EvalRun) and [`CreateEvalRunDTO`](https://api.vapi.ai/api#:~:text=CreateEvalRunDTO). Execute your mock conversations against live assistants and squads to validate performance and behavior in controlled environments.
+
+2. **Multiple Evaluation Models**: Choose from various AI models for LLM-as-a-judge evaluation:
+ - [`EvalOpenAIModel`](https://api.vapi.ai/api#:~:text=EvalOpenAIModel): GPT models including GPT-4.1, o1-mini, o3, and regional variants
+ - [`EvalAnthropicModel`](https://api.vapi.ai/api#:~:text=EvalAnthropicModel): Claude models with optional thinking features for complex evaluations
+ - [`EvalGoogleModel`](https://api.vapi.ai/api#:~:text=EvalGoogleModel): Gemini models from 1.0 Pro to 2.5 Pro for diverse evaluation needs
+ - [`EvalGroqModel`](https://api.vapi.ai/api#:~:text=EvalGroqModel): High-speed inference models including Llama and custom options
+ - [`EvalCustomModel`](https://api.vapi.ai/api#:~:text=EvalCustomModel): Your own evaluation models with custom endpoints
+
+3. **Evaluation Results**: Comprehensive result tracking with [`EvalRunResult`](https://api.vapi.ai/api#:~:text=EvalRunResult):
+ - `status`: Pass/fail evaluation outcomes
+ - `messages`: Complete conversation transcript from the evaluation
+ - `startedAt` and `endedAt`: Precise timing information for performance analysis
+
+4. **Target Flexibility**: Run evaluations against different targets:
+ - [`EvalRunTargetAssistant`](https://api.vapi.ai/api#:~:text=EvalRunTargetAssistant): Test individual assistants with optional overrides
+ - [`EvalRunTargetSquad`](https://api.vapi.ai/api#:~:text=EvalRunTargetSquad): Evaluate entire squad performance and coordination
+
+5. **Evaluation Status Tracking**: Monitor evaluation progress with detailed status information:
+ - `running`: Evaluation in progress
+ - `ended`: Evaluation completed
+ - `queued`: Evaluation waiting to start
+ - Detailed `endedReason` including success, error, timeout, and cancellation states
+
+6. **Judge Configuration**: Optimize evaluation accuracy with model-specific settings:
+ - `maxTokens`: Recommended 50-10000 tokens (1 token for simple pass/fail responses)
+ - `temperature`: 0-0.3 recommended for LLM-as-a-judge to reduce hallucinations
+
+
+ For LLM-as-a-judge evaluations, the judge model must respond with exactly \"pass\" or \"fail\". Design your evaluation prompts to ensure clear, deterministic responses.
+
+
+## Evaluation Capabilities
+
+
+ Choose from OpenAI, Anthropic, Google, Groq, or custom models for evaluation, matching your quality and performance requirements.
+
+
+ Detailed pass/fail results with complete conversation transcripts and timing information for thorough analysis.
+
+
+ Test individual assistants or entire squads with optional configuration overrides for comprehensive validation.
+
+
+ Real-time evaluation status tracking with detailed reason codes for failures, timeouts, and cancellations.
+
+
\ No newline at end of file
diff --git a/fern/changelog/2025-09-29.mdx b/fern/changelog/2025-09-29.mdx
new file mode 100644
index 000000000..8de49ca2d
--- /dev/null
+++ b/fern/changelog/2025-09-29.mdx
@@ -0,0 +1,66 @@
+# Breaking Changes & API Cleanup
+
+1. **Legacy Endpoint Removal**: The following deprecated endpoints have been removed as part of our API modernization effort:
+ - `/logs` - Use call artifacts and monitoring instead
+ - `/workflow/{id}` - Access workflows through the main workflow endpoints
+ - `/test-suite` and related paths - Replaced by the new evaluation system
+ - `/knowledge-base` and related paths - Integrated into model configurations
+
+2. **Knowledge Base Architecture Change**: The `knowledgeBaseId` property has been removed from all model configurations. This affects:
+ - [`XaiModel`](https://api.vapi.ai/api#:~:text=XaiModel), [`GroqModel`](https://api.vapi.ai/api#:~:text=GroqModel), [`GoogleModel`](https://api.vapi.ai/api#:~:text=GoogleModel)
+ - [`OpenAIModel`](https://api.vapi.ai/api#:~:text=OpenAIModel), [`AnthropicModel`](https://api.vapi.ai/api#:~:text=AnthropicModel), [`CustomLLMModel`](https://api.vapi.ai/api#:~:text=CustomLLMModel)
+ - All other model provider configurations
+
+3. **Transcriber Property Deprecation**: [`AssemblyAITranscriber.wordFinalizationMaxWaitTime`](https://api.vapi.ai/api#:~:text=AssemblyAITranscriber.wordFinalizationMaxWaitTime) and [`FallbackAssemblyAITranscriber.wordFinalizationMaxWaitTime`](https://api.vapi.ai/api#:~:text=FallbackAssemblyAITranscriber.wordFinalizationMaxWaitTime) are now deprecated:
+ - Use smart endpointing plans for better speech timing control
+ - More precise conversation flow management
+ - Enhanced end-of-turn detection capabilities
+
+4. **Schema Path Cleanup**: Removed numerous unused schema paths from model configurations to simplify the API structure and improve performance. This cleanup affects internal schema references but doesn't impact your existing integrations.
+
+5. **New v2 API**: We are introducing a new API version v2. These changes are part of our ongoing effort to:
+ - Simplify the API structure for better developer experience
+ - Remove redundant and deprecated functionality
+ - Complete the transition to new evaluation and compliance systems
+ - Improve API performance and maintainability
+
+For details on the new features that replace these deprecated endpoints, see our recent changelog entries:
+- [Enhanced Authentication & Custom Credentials (Aug 30)](./2025-08-30.mdx)
+- [Recording Consent & Compliance Management (Sep 2)](./2025-09-02.mdx)
+- [Evaluation System Foundation (Sep 5)](./2025-09-05.mdx)
+- [Evaluation Execution & Results Processing (Sep 28)](./2025-09-28.mdx)
+
+
+ If you're currently using any of the removed endpoints or properties, you must migrate to the new alternatives before this release. Contact support if you need assistance with migration strategies.
+
+
+## Migration Guide
+
+
+ Replace /logs endpoint usage with call artifacts, monitoring plans, and end-of-call reports for comprehensive logging.
+
+
+ Migrate from test-suite endpoints to the new evaluation system with mock conversations and comprehensive result tracking.
+
+
+ Update model configurations to use the integrated knowledge base system instead of separate knowledgeBaseId references.
+
+
+ Replace deprecated transcriber timing properties with smart endpointing plans for better conversation flow control.
+
+
+
+## Removed Endpoints
+The following endpoints are no longer available:
+- `GET /logs` - Use call artifacts instead
+- `GET /workflow/{id}` - Use main workflow endpoints
+- `GET /test-suite`, `POST /test-suite` - Use [evaluation endpoints](./2025-09-05.mdx)
+- `GET /test-suite/{id}`, `PUT /test-suite/{id}`, `DELETE /test-suite/{id}` - Use [evaluation management](./2025-09-28.mdx)
+- `POST /test-suite/{testSuiteId}/run` - Use [evaluation runs](./2025-09-28.mdx)
+- `GET /knowledge-base`, `POST /knowledge-base` - Integrated into model configurations
+- All related nested endpoints and operations
+
+**See Also:**
+- [Authentication System Updates (Aug 30)](./2025-08-30.mdx) - For credential management migration
+- [Recording Consent Features (Sep 2)](./2025-09-02.mdx) - For compliance system details
+- [Enhanced Transcription (Sep 8)](./2025-09-08.mdx) - For AssemblyAI timing alternatives
\ No newline at end of file
diff --git a/fern/changelog/2026-03-31.mdx b/fern/changelog/2026-03-31.mdx
new file mode 100644
index 000000000..8a94351de
--- /dev/null
+++ b/fern/changelog/2026-03-31.mdx
@@ -0,0 +1,79 @@
+# What's New: October 2025 – March 2026
+
+Here's a summary of major items shipped from October 2025 through March 2026.
+
+---
+
+## Platform
+
+1. **Squads v2**: Visual builder to simplify sophisticated multi-assistant orchestration with seamless handoffs between specialized agents.
+
+2. **Composer (Alpha)**: Intelligent assistant inside the dashboard that allows you to describe what you need through plain text prompts to help build, adjust, and debug voice agents.
+
+3. **Simulations (Alpha)**: Voice agent testing feature to build confidence through enabling systematic, AI-powered testing in specific scenarios with evaluation of outcomes.
+
+4. **Monitoring & Issues**: Automated call quality monitoring with trigger-based issue detection, alerting, and resolution suggestions.
+
+5. **HIPAA with Data Retention**: New compliance mode with private storage and in-dashboard toggle/purchase flow — available for additional cost.
+
+6. **Zero Data Retention**: Compliance mode that keeps context data during call as needed to execute tasks and retains no data afterwards.
+
+7. **Consolidated Logs**: Unified log viewing into a single page.
+
+8. **Vapi Voices**: 12 new ultra-realistic voices released, optimized for latency and cost with adjustable speed controls exposed. 8 legacy voices deprecated.
+
+---
+
+## New Models & Provider Support
+
+### Transcriber Models (Speech-to-Text)
+
+1. **Deepgram Nova-3 Languages**: Added Hebrew, Urdu, Tagalog, and Arabic bilingual support.
+
+2. **Cartesia Transcriber**: ink-whisper.
+
+3. **Soniox**: stt-rt-v4.
+
+### Intelligence Models (LLM)
+
+1. **GPT-5 Family**: OpenAI's latest intelligence models, including GPT-5, 5-Mini, 5-Nano, 5.1, 5.2, 5.4, 5.4-Mini, 5.4-Nano.
+
+2. **Claude 4.5–4.6**: Anthropic's latest intelligence models Sonnet 4.5, Opus 4.5, Opus 4.6, Sonnet 4.6.
+
+3. **Gemini 3 Flash**: Google's latest intelligence models.
+
+4. **Grok 4 Fast**: Reasoning and non-reasoning variants.
+
+5. **GPT Realtime Mini**: OpenAI's lightweight realtime model.
+
+### Voice Models (Text-to-Speech)
+
+1. **Cartesia**: sonic-3, sonic-3-2026-01-12, sonic-3-2025-10-27.
+
+2. **WellSaid**: Caruso (new), legacy.
+
+3. **Inworld**: inworld-tts-1 (REST, original), inworld-tts-1.5-max (WebSocket, \$10/M chars), inworld-tts-1.5-mini (WebSocket, \$5/M chars).
+
+4. **ElevenLabs Scribe v2**: Latest version of ElevenLabs speech-to-text.
+
+---
+
+## Developer Tools & API
+
+1. **Structured Outputs Improvements**: Updates to our AI-powered analysis and data extraction tool, including transient structured outputs, audio-based extraction, and regex extraction.
+
+2. **SIP Request Tool + DTMF over SIP INFO**: Send SIP requests and DTMF tones via SIP INFO messages during calls.
+
+3. **Variable Passing Between Tool Calls**: Pass output variables from one tool call as input to subsequent tool calls.
+
+4. **Encrypted Tool Arguments**: Encrypt sensitive tool arguments to protect data in transit.
+
+5. **Low Confidence Speech Hook**: Hook that triggers when the transcriber returns low-confidence speech results.
+
+6. **Time Elapsed Hook**: Hook that triggers at specified time intervals during a call.
+
+7. **assistant.speechStarted Event**: New event fired when the assistant begins speaking.
+
+8. **MCP Improvements**: Bearer auth, $ref dereferencing, child tool messages/discovery.
+
+9. **Warm Transfer Improvements**: SIP support, caller ID, context engineering, variable filling.
diff --git a/fern/changelog/overview.mdx b/fern/changelog/overview.mdx
index 6beee95dd..aedfd063f 100644
--- a/fern/changelog/overview.mdx
+++ b/fern/changelog/overview.mdx
@@ -1,14 +1,14 @@
---
-slug: changelog
+slug: whats-new
---
document.querySelector('input[type="email"]').focus()}>Get the (almost) daily changelog}
+ title={
document.querySelector('input[type="email"]').focus()}>Subscribe to the latest product updates
}
icon="envelope"
iconType="solid"
>
@@ -181,12 +150,9 @@ The Vapi CLI brings the full power of the platform to your terminal:
title="Appointment Scheduling"
icon="calendar-check"
iconType="solid"
- href="/workflows/examples/appointment-scheduling"
+ href="/assistants/examples/appointment-scheduling"
>
-
-
-
-
Built with Workflows
+
Built with Assistants
Handle booking requests, check availability, and confirm appointments with conditional routing.
@@ -194,12 +160,9 @@ The Vapi CLI brings the full power of the platform to your terminal:
title="Medical Triage & Scheduling"
icon="stethoscope"
iconType="solid"
- href="/workflows/examples/clinic-triage-scheduling"
+ href="/squads/examples/clinic-triage-scheduling"
>
-
-
-
-
Built with Workflows
+
Built with Squads
Emergency routing and appointment scheduling for healthcare.
@@ -207,12 +170,9 @@ The Vapi CLI brings the full power of the platform to your terminal:
title="E-commerce Order Management"
icon="shopping-cart"
iconType="solid"
- href="/workflows/examples/ecommerce-order-management"
+ href="/squads/examples/ecommerce-order-management"
>
-
-
-
-
Built with Workflows
+
Built with Squads
Order tracking, returns, and customer support workflows.
@@ -222,9 +182,6 @@ The Vapi CLI brings the full power of the platform to your terminal:
iconType="solid"
href="/examples"
>
-
-
-
See our collection of examples covering a wide range of use cases.
diff --git a/fern/quickstart/web.mdx b/fern/quickstart/web.mdx
index c616563af..77f7b8de8 100644
--- a/fern/quickstart/web.mdx
+++ b/fern/quickstart/web.mdx
@@ -1260,7 +1260,7 @@ Now that you understand both client and server SDK capabilities:
- **Explore use cases:** Check out our [examples section](/assistants/examples/inbound-support) for complete implementations
- **Add tools:** Connect your voice agents to external APIs and databases with [custom tools](/tools/custom-tools)
- **Configure models:** Try different [speech and language models](/assistants/speech-configuration) for better performance
-- **Scale with workflows:** Use [Vapi workflows](/workflows/quickstart) for complex multi-step processes
+- **Scale with squads:** Use [Squads](/squads) for multi-assistant setups and complex processes
## Resources
diff --git a/fern/security-and-privacy/PCI.mdx b/fern/security-and-privacy/PCI.mdx
index 34f4ee7ef..3a6ab8726 100644
--- a/fern/security-and-privacy/PCI.mdx
+++ b/fern/security-and-privacy/PCI.mdx
@@ -58,6 +58,175 @@ Example configuration for `PCI compliant` assistant is:
```
Note: The default value for `compliancePlan.pciEnabled` is false. Activating this setting aligns your assistant with PCI DSS standards by ensuring data is securely transmitted without being stored on Vapi’s systems.
+## Selective Recording with Squads
+
+For businesses that need to collect payment information while maintaining compliance, you can use **squads** to selectively disable recording, logging, and transcription only during sensitive payment collection phases. This approach allows you to:
+
+- **Record the beginning and end** of calls for quality assurance
+- **Disable all artifacts** during payment data collection
+- **Stay compliant** while gathering credit card information
+- **Use handoff tools** to seamlessly transfer between assistants
+- **Leverage Vapi's logging and trace recording** for non-sensitive portions while staying compliant
+
+### Payment Collection Squad Example
+
+Here's a complete squad configuration that demonstrates this approach:
+
+```json
+{
+ "name": "Payment Squad Without Recording",
+ "members": [
+ {
+ "assistant": {
+ "name": "Assistant 1",
+ "model": {
+ "model": "gpt-4o",
+ "provider": "openai",
+ "messages": [
+ {
+ "content": "You are a helpful QuickSend assistant. Greet the caller, let them know you'll help them add a new payment method, and then smoothly transfer them to the payment setup assistant.",
+ "role": "system"
+ }
+ ]
+ },
+ "voice": {
+ "voiceId": "Elliot",
+ "provider": "vapi"
+ },
+ "transcriber": {
+ "model": "nova-2",
+ "provider": "deepgram",
+ "language": "en"
+ },
+ "keypadInputPlan": {
+ "enabled": true,
+ "timeoutSeconds": 10,
+ "delimiters": [
+ "#"
+ ]
+ },
+ "firstMessage": "Welcome to QuickSend! I'll help you add a new payment method. Let's get started.",
+ "firstMessageMode": "assistant-speaks-first"
+ },
+ "assistantDestinations": [
+ {
+ "type": "assistant",
+ "assistantName": "Assistant 2",
+ "description": "Transfer the caller to the payment setup assistant."
+ }
+ ]
+ },
+ {
+ "assistant": {
+ "name": "Assistant 2",
+ "model": {
+ "model": "gpt-4o",
+ "provider": "openai",
+ "messages": [
+ {
+ "content": "You are a payment setup assistant. Ask the caller to enter their new card number followed by the pound (#) key. When you receive it, repeat it back clearly for confirmation. Once confirmed, transfer them to the payment confirmation assistant.",
+ "role": "system"
+ }
+ ]
+ },
+ "artifactPlan": {
+ "recordingEnabled": false,
+ "loggingEnabled": false,
+ "transcriptPlan": {
+ "enabled": false
+ }
+ },
+ "voice": {
+ "voiceId": "Elliot",
+ "provider": "vapi"
+ },
+ "transcriber": {
+ "model": "nova-2",
+ "provider": "deepgram",
+ "language": "en"
+ },
+ "firstMessage": "Please enter your new card number followed by the POUND key.",
+ "firstMessageMode": "assistant-speaks-first",
+ "keypadInputPlan": {
+ "enabled": true,
+ "timeoutSeconds": 10,
+ "delimiters": [
+ "#"
+ ]
+ }
+ },
+ "assistantDestinations": [
+ {
+ "type": "assistant",
+ "assistantName": "Assistant 3",
+ "description": "Transfer the caller to the payment confirmation assistant.",
+ "contextEngineeringPlan": {
+ "type": "none"
+ },
+ "variableExtractionPlan": {
+ "schema": {
+ "type": "object",
+ "properties": {
+ "lastFourDigits": {
+ "type": "string",
+ "description": "last four digits of the card the user gave as input"
+ }
+ }
+ }
+ }
+ }
+ ]
+ },
+ {
+ "assistant": {
+ "name": "Assistant 3",
+ "model": {
+ "model": "gpt-4o",
+ "provider": "openai",
+ "messages": [
+ {
+ "content": "You are a payment confirmation assistant. Thank the caller for providing their card number. Read out the last four digits to them and confirm that they are correct. Once confirmed, let the caller know the payment method has been added successfully and close the conversation politely. Last four digits are {{lastFourDigits}}",
+ "role": "system"
+ }
+ ]
+ },
+ "voice": {
+ "voiceId": "Elliot",
+ "provider": "vapi"
+ },
+ "transcriber": {
+ "model": "nova-2",
+ "provider": "deepgram",
+ "language": "en"
+ },
+ "firstMessage": "Thanks for providing your card number. Do you want to proceed with your payment?",
+ "firstMessageMode": "assistant-speaks-first"
+ }
+ }
+ ]
+}
+```
+
+### How This Squad Works
+
+1. **Assistant 1** (Greeting): Records and logs the initial conversation
+2. **Assistant 2** (Payment Collection): **Disables all artifacts** using `artifactPlan` while collecting credit card data via keypad input
+3. **Assistant 3** (Confirmation): Records and logs the final confirmation
+
+The key component is the `artifactPlan` in Assistant 2:
+
+```json
+"artifactPlan": {
+ "recordingEnabled": false,
+ "loggingEnabled": false,
+ "transcriptPlan": {
+ "enabled": false
+ }
+}
+```
+
+This ensures that sensitive payment information is never recorded, logged, or transcribed, while still allowing you to maintain call quality data for the non-sensitive portions of the conversation.
+
## Can PCI be used alongside HIPAA?
Yes, you can enable both HIPAA and PCI compliance for an assistant. In this case, the restrictions from both compliances will apply, meaning that no recordings or transcripts will be stored or transmitted, even if you have specified cloud storage endpoints or webhooks for storing transcripts.
diff --git a/fern/security-and-privacy/data-flow.mdx b/fern/security-and-privacy/data-flow.mdx
new file mode 100644
index 000000000..444af65ce
--- /dev/null
+++ b/fern/security-and-privacy/data-flow.mdx
@@ -0,0 +1,425 @@
+---
+title: Data Flow
+subtitle: Understand how data flows through Vapi when using custom storage and custom models
+slug: security-and-privacy/data-flow
+---
+
+## Overview
+
+When using Vapi, data flows through multiple components during a voice conversation. Understanding this flow is essential for security-conscious organizations, especially when integrating custom bucket storage or custom model providers.
+
+**This guide explains:**
+- The complete voice pipeline architecture
+- What data passes through each component
+- What data is stored on Vapi's infrastructure vs your own
+- Which components support "bring your own" infrastructure
+
+---
+
+## Understanding Log Types
+
+Vapi generates two distinct types of logs during calls:
+
+| Log Type | Description | Visibility | Custom Storage |
+|----------|-------------|------------|----------------|
+| **System Logs** | Internal operational logs used by Vapi for debugging, monitoring, and system health | Vapi internal only | ❌ Never uploaded to custom bucket |
+| **Call Logs** | Conversation data including transcripts, recordings, and call metadata | Available to customers via API/Dashboard | ✅ Can be uploaded to custom bucket |
+
+
+**System Logs** are strictly internal to Vapi and are never shared with customers or uploaded to custom storage buckets. They contain infrastructure-level data used for Vapi's operational purposes only.
+
+
+---
+
+## Voice Pipeline Architecture
+
+Vapi orchestrates a sophisticated voice pipeline with multiple modular components. Each component can be configured to use Vapi's default providers, your own API keys, or your own custom servers.
+
+### Complete Pipeline Flow
+
+```mermaid
+flowchart TB
+ subgraph Transport["Transport Layer"]
+ T1[SIP / Telephony / WebSocket / WebRTC]
+ end
+
+ subgraph Transcriber["Speech-to-Text"]
+ STT[Transcriber]
+ end
+
+ subgraph Orchestration["Orchestration Layer"]
+ O1[Endpointing]
+ O2[Interruption Detection]
+ O3[Emotion Detection]
+ O4[Backchanneling]
+ end
+
+ subgraph Model["Language Model"]
+ LLM[LLM]
+ end
+
+ subgraph Voice["Text-to-Speech"]
+ TTS[Voice]
+ end
+
+ subgraph Artifacts["Artifacts"]
+ A1[(Call Recordings)]
+ A2[(Transcripts)]
+ A3[(Call Logs)]
+ A4[(Usage Metrics)]
+ end
+
+ Transport --> Transcriber
+ Transcriber --> Orchestration
+ Orchestration --> Model
+ Model --> Voice
+ Voice --> Transport
+
+ Transcriber -.-> A2
+ Orchestration -.-> A3
+ Model -.-> A3
+ Voice -.-> A1
+```
+
+---
+
+## Pipeline Components
+
+### 1. Transport Layer
+
+The transport layer handles real-time audio streaming between users and Vapi.
+
+| Transport Type | Description | Use Case |
+|---------------|-------------|----------|
+| **SIP** | Session Initiation Protocol | Traditional phone systems, PBX integration |
+| **Telephony** | Twilio, Telnyx, Plivo integrations | PSTN calls, phone numbers |
+| **WebSocket** | Direct bidirectional audio streaming | Web applications, custom integrations |
+| **WebRTC** | Browser-based real-time communication | Web and mobile apps via LiveKit/Daily |
+
+**Audio Formats:**
+- PCM: 16-bit, 16kHz (highest quality)
+- Mu-Law: 8-bit, 8kHz (telephony standard)
+
+### 2. Speech-to-Text (Transcriber)
+
+Converts user audio into text in real-time using streaming recognition.
+
+
+**Custom Transcriber:** Vapi supports custom transcriber integration via WebSocket. See [Custom Transcriber](/customization/custom-transcriber).
+
+
+**Bring Your Own API Key:**
+- ✅ Supported: Deepgram, Gladia, AssemblyAI, Speechmatics, Google, Azure
+- ❌ Not supported: Talkscriber
+
+### 3. Orchestration Layer (Vapi Proprietary)
+
+Vapi runs proprietary real-time models that make conversations feel natural. These models are **not customizable** and run on Vapi's infrastructure.
+
+| Model | Purpose |
+|-------|---------|
+| **Endpointing** | Detects when user finishes speaking using audio-text fusion |
+| **Interruption Detection** | Distinguishes barge-in from affirmations like "uh-huh" |
+| **Background Noise Filtering** | Removes ambient sounds in real-time |
+| **Background Voice Filtering** | Isolates primary speaker from TVs, echoes, others |
+| **Backchanneling** | Adds natural affirmations ("uh-huh", "yeah", "got it") |
+| **Emotion Detection** | Analyzes emotional tone and passes to LLM |
+| **Filler Injection** | Adds natural speech patterns ("um", "like", "so") |
+
+
+Orchestration models process data in real-time but do **not persist** the audio or intermediate results. All processing is **ephemeral**. Only final transcripts and call logs are stored (unless HIPAA mode is enabled).
+
+
+### 4. Language Model (LLM)
+
+Generates conversational responses based on transcribed user input.
+
+
+**Custom LLM:** Vapi supports custom LLM integration via OpenAI-compatible endpoints. See [Custom LLM](/customization/custom-llm/using-your-server).
+
+
+**Bring Your Own API Key:**
+- ✅ Supported: OpenAI, Anthropic, Azure OpenAI, Google Gemini, Groq, DeepSeek, OpenRouter, Together AI, Cerebras, DeepInfra, Perplexity, Anyscale, xAI
+
+### 5. Text-to-Speech (Voice)
+
+Converts LLM responses into spoken audio.
+
+
+**Custom Voice:** Vapi supports custom TTS integration via audio streaming endpoints. See [Custom TTS](/customization/custom-voices/custom-tts).
+
+
+**Bring Your Own API Key:**
+- ✅ Supported: ElevenLabs, PlayHT, Cartesia, Deepgram, OpenAI TTS, Azure, LMNT, Rime AI, Smallest AI, Neuphonic, WellSaid, Hume
+
+---
+
+## Default Data Flow
+
+In the default configuration, Vapi handles all pipeline components and stores artifacts on Vapi's infrastructure.
+
+```mermaid
+flowchart LR
+ subgraph User["User"]
+ A[Voice]
+ end
+
+ subgraph Vapi["Vapi Infrastructure"]
+ T[Transport]
+ STT[Transcriber]
+ O[Orchestration]
+ LLM[LLM]
+ TTS[Voice]
+ end
+
+ subgraph VapiStorage["Vapi Storage"]
+ subgraph CallData["Call Logs (Customer-Accessible)"]
+ R[(Recordings)]
+ TR[(Transcripts)]
+ L[(Call Logs)]
+ end
+ subgraph InternalData["Internal (Vapi Only)"]
+ M[(Product Usage Metrics)]
+ SL[(System Logs)]
+ end
+ end
+
+ A --> T --> STT --> O --> LLM --> TTS --> T --> A
+ STT -.-> TR
+ O -.-> L
+ LLM -.-> L
+ TTS -.-> R
+ Vapi -.-> InternalData
+```
+
+**Default storage on Vapi:**
+- **Call Logs (Customer-Accessible):**
+ - Call recordings (configurable retention)
+ - Full transcripts with timestamps
+ - Call logs with component-level detail
+ - Structured outputs from call analysis
+- **Internal (Vapi Only):**
+ - Product usage metrics and analytics
+ - System logs for operational monitoring
+
+---
+
+## Custom Storage Data Flow
+
+When you configure custom bucket storage, call recordings and call logs are uploaded to your infrastructure. System logs and product usage metrics remain on Vapi's infrastructure.
+
+```mermaid
+flowchart LR
+ subgraph User["User"]
+ A[Voice]
+ end
+
+ subgraph Vapi["Vapi Infrastructure"]
+ T[Transport]
+ STT[Transcriber]
+ O[Orchestration]
+ LLM[LLM]
+ TTS[Voice]
+ end
+
+ subgraph Customer["Your Cloud Storage"]
+ subgraph CustomerCallData["Call Logs (Customer Data)"]
+ R[(Recordings)]
+ TR[(Transcripts)]
+ L[(Call Logs)]
+ end
+ end
+
+ subgraph VapiStorage["Vapi Storage"]
+ subgraph InternalData["Internal (Vapi Only)"]
+ M[(Product Usage Metrics)]
+ SL[(System Logs)]
+ end
+ end
+
+ A --> T --> STT --> O --> LLM --> TTS --> T --> A
+ STT -.-> TR
+ TTS -.-> R
+ O -.-> L
+ Vapi -.-> InternalData
+```
+
+**Supported storage providers:**
+- AWS S3
+- GCP Cloud Storage
+- Cloudflare R2
+- Supabase Storage
+- Azure Blob Storage
+
+
+**System Logs** and **Product Usage Metrics** are always stored on Vapi's infrastructure and are never uploaded to custom storage buckets. These are internal operational data used by Vapi only.
+
+
+---
+
+## Custom Models Data Flow
+
+When using custom transcriber, LLM, or voice servers, data flows to your infrastructure for processing.
+
+```mermaid
+flowchart TB
+ subgraph User["User"]
+ A[Voice]
+ end
+
+ subgraph Vapi["Vapi Infrastructure"]
+ T[Transport]
+ O[Orchestration]
+ end
+
+ subgraph YourSTT["Your Transcriber Server"]
+ STT[Transcriber]
+ end
+
+ subgraph YourLLM["Your LLM Server"]
+ LLM[LLM]
+ end
+
+ subgraph YourTTS["Your Voice Server"]
+ TTS[Voice]
+ end
+
+ subgraph YourStorage["Your Cloud Storage"]
+ subgraph CustomerCallData["Call Logs (Customer Data)"]
+ S[(Recordings)]
+ S2[(Transcripts)]
+ S3[(Call Logs)]
+ end
+ end
+
+ subgraph VapiStorage["Vapi Storage"]
+ subgraph InternalData["Internal (Vapi Only)"]
+ M[(Product Usage Metrics)]
+ SL[(System Logs)]
+ end
+ end
+
+ A --> T
+ T --> O
+ O <--> STT
+ O <--> LLM
+ O <--> TTS
+ TTS --> T --> A
+ O -.-> CustomerCallData
+ Vapi -.-> InternalData
+```
+
+**With full custom configuration:**
+- **Your servers process:** Audio transcription, LLM inference, speech synthesis
+- **Vapi handles:** Orchestration (endpointing, interruptions, etc.), transport routing
+- **Your storage receives:** Recordings, transcripts, call logs
+- **Vapi storage retains:** Product usage metrics, system logs (internal only)
+
+---
+
+## Bring Your Own Infrastructure Summary
+
+| Component | Bring Your Own Key (BYOK) | Custom Server |
+|-----------|---------------------------|---------------|
+| **Transport** | ✅ Twilio, Telnyx, Vonage, etc. | ✅ WebSocket/SIP |
+| **Transcriber** | ✅ Most providers | ✅ [Custom Transcriber](/customization/custom-transcriber) |
+| **Orchestration** | ❌ Vapi only | ❌ Vapi only |
+| **LLM** | ✅ All providers | ✅ [Custom LLM](/customization/custom-llm/using-your-server) |
+| **Voice** | ✅ All providers | ✅ [Custom TTS](/customization/custom-voices/custom-tts) |
+| **Storage** | ✅ S3/GCP/R2/Azure | ✅ S3/GCP/R2/Azure |
+
+
+The **Orchestration Layer** (endpointing, interruption detection, emotion detection, backchanneling, filler injection) is Vapi's core value proposition and runs exclusively on Vapi infrastructure. Audio processed by these models is **ephemeral** and not stored.
+
+
+---
+
+## Artifacts Storage Summary
+
+| Artifact | Default Location | Custom Storage Supported | HIPAA Mode |
+|----------|-----------------|--------------------------|------------|
+| **Call Recordings** | Vapi | ✅ Yes | Not stored on Vapi |
+| **Transcripts** | Vapi | ✅ Yes | Not stored on Vapi |
+| **Call Logs** | Vapi | ✅ Yes | Not stored on Vapi |
+| **Product Usage Metrics** | Vapi | ❌ No | Vapi only |
+| **System Logs** | Vapi | ❌ No | Vapi only |
+| **Structured Outputs** | Vapi | ✅ Yes (via webhook) | Configurable |
+
+
+**HIPAA Mode Important Notice:** When HIPAA mode is enabled (`hipaaEnabled: true`) and no custom storage is configured, Vapi will **not store** call recordings or transcripts. This data will be lost after the call ends. To retain call data in HIPAA mode, you **must** configure a custom storage bucket.
+
+
+---
+
+## What Data Passes Through Vapi
+
+Even with maximum custom configuration, certain data passes through Vapi's orchestration:
+
+| Data Type | Processing | Retention |
+|-----------|-----------|-----------|
+| Raw audio streams | Real-time routing to Transcriber/Voice | **Ephemeral** (not stored) |
+| Transcribed text | Orchestration analysis, LLM routing | Call logs (unless HIPAA) |
+| LLM responses | Filler injection, Voice routing | Call logs (unless HIPAA) |
+| Emotion metadata | Passed to LLM context | **Ephemeral** |
+| Call signaling | SIP/WebSocket management | Metadata only |
+
+---
+
+## Recommendations by Use Case
+
+
+
+ Configure:
+ - **Custom Transcriber** via WebSocket endpoint
+ - **Custom LLM** via OpenAI-compatible server
+ - **Custom Voice** via audio streaming endpoint
+ - **Custom bucket storage** for all call logs
+ - **HIPAA mode** to prevent Vapi call log storage
+
+ Result: Only orchestration signals (ephemeral) pass through Vapi. System logs remain on Vapi infrastructure (never shared).
+
+
+
+ - Use **custom bucket storage** in your required region
+ - Use **custom LLM** hosted in-region OR provider with regional endpoints
+ - Use **custom Voice** hosted in-region if needed
+
+ Note: Orchestration models run on Vapi's US/EU infrastructure (data is ephemeral). System logs remain on Vapi infrastructure.
+
+
+
+ - Enable **Provider Keys** for Transcriber, LLM, and Voice
+ - Vapi uses your API keys, you're billed directly by providers
+ - No custom server setup required
+
+
+
+ - Enable `hipaaEnabled: true`
+ - **Important:** Configure custom storage to retain call recordings and transcripts
+ - Use only HIPAA-compliant providers (Deepgram, Azure, OpenAI, Anthropic, ElevenLabs)
+ - See [HIPAA Compliance](/security-and-privacy/hipaa)
+
+
+ Without custom storage configured, HIPAA mode will result in **no call recordings or transcripts being stored**. Data will be lost after call completion.
+
+
+
+
+---
+
+## Next Steps
+
+### Custom Integration Guides
+- [Custom Transcriber](/customization/custom-transcriber) - Bring your own speech-to-text
+- [Custom LLM](/customization/custom-llm/using-your-server) - Bring your own language model
+- [Custom TTS](/customization/custom-voices/custom-tts) - Bring your own voice synthesis
+
+### Storage Configuration
+- [AWS S3](/providers/cloud/s3) - S3 bucket setup
+- [GCP Cloud Storage](/providers/cloud/gcp) - GCP bucket setup
+- [Cloudflare R2](/providers/cloud/cloudflare) - R2 setup
+
+### Compliance
+- [HIPAA Compliance](/security-and-privacy/hipaa) - Healthcare data handling
+- [PCI Compliance](/security-and-privacy/PCI) - Payment data handling
+- [GDPR Compliance](/security-and-privacy/GDPR) - EU data protection
diff --git a/fern/security-and-privacy/hipaa.mdx b/fern/security-and-privacy/hipaa.mdx
index fe0c70ffc..6881fcd47 100644
--- a/fern/security-and-privacy/hipaa.mdx
+++ b/fern/security-and-privacy/hipaa.mdx
@@ -35,6 +35,32 @@ To enable HIPAA compliance, set hipaaEnabled to true within your assistant's con
Note: The default value for hipaaEnabled is false. Activating this setting is a proactive measure to align with HIPAA standards, requiring manual configuration adjustment.
+## HIPAA Compliant providers
+
+When enabling HIPAA compliance, only HIPAA compliant providers may be chosen.
+
+### Model Providers (LLM)
+
+- **OpenAI**
+- **Azure OpenAI**
+- **Anthropic**
+- **Google**
+- **Together AI**
+
+### Voice Providers (TTS)
+
+- **Vapi**
+- **ElevenLabs**
+- **Cartesia**
+- **Rime AI**
+- **Deepgram**
+- **Azure**
+
+### Transcription Providers (STT)
+
+- **Azure**
+- **Deepgram**
+
# FAQs
@@ -71,6 +97,80 @@ Note: The default value for hipaaEnabled is false. Activating this setting is a
+## Structured Outputs with HIPAA Mode
+
+When HIPAA mode is enabled, Vapi does not store structured outputs by default. This protects privacy but limits your ability to use structured outputs in Insights and Call Logs. For non-sensitive outputs, you can override this behavior.
+
+
+
+ By default, when HIPAA mode is on, Vapi doesn't store structured outputs. This keeps data private but limits your ability to use structured outputs in Insights and Call Logs.
+
+ You can enable storage for specific structured outputs using the `compliancePlan.forceStoreOnHipaaEnabled` setting. This allows you to store non-sensitive outputs even when HIPAA mode is active.
+
+ **Important:** Your organization is responsible for ensuring that any structured output with storage enabled does NOT extract or generate PHI or sensitive data. Only use this for non-sensitive information.
+
+
+
+ Enable storage ONLY for structured outputs that extract non-sensitive, non-PHI information.
+
+ **Safe use cases:**
+ - Boolean outcomes: `appointmentBooked: true/false`
+ - Call success indicators: `issueResolved: true/false`
+ - General categories: `issueCategory: "billing" | "technical" | "general"`
+ - Satisfaction scores: `csatScore: 1-10`
+ - Call sentiment: `sentiment: "positive" | "neutral" | "negative"`
+
+ **Never enable storage for:**
+ - Patient diagnosis information
+ - Medical record numbers
+ - Social security numbers
+ - Credit card details
+ - Patient names, dates of birth, or contact information
+ - Treatment plans or medication information
+
+ **Warning:** Enabling storage for outputs containing PHI violates HIPAA compliance and your BAA with Vapi.
+
+
+
+ You can enable storage for specific structured outputs via the Dashboard or API.
+
+ **Via Dashboard:**
+ 1. Navigate to **Structured Outputs** in the left sidebar
+ 2. Create or edit a structured output
+ 3. Expand the **Compliance Settings** section
+ 4. Enable the toggle for "Enable Storage of Structured Outputs while on HIPAA Mode"
+ 5. Only enable if your output does not extract sensitive information
+
+ **Via API:**
+
+ When creating a structured output:
+ ```json
+ {
+ "name": "Appointment Booked",
+ "type": "ai",
+ "schema": {
+ "type": "boolean",
+ "description": "Whether an appointment was successfully booked"
+ },
+ "compliancePlan": {
+ "forceStoreOnHipaaEnabled": true
+ }
+ }
+ ```
+
+ When updating a structured output:
+ ```json
+ {
+ "compliancePlan": {
+ "forceStoreOnHipaaEnabled": true
+ }
+ }
+ ```
+
+ **IMPORTANT:** Only set `forceStoreOnHipaaEnabled: true` if you are certain your structured output does NOT extract PHI or sensitive data. Your organization is responsible for ensuring compliance. Misuse could result in BAA violations.
+
+
+
## Best Practices
diff --git a/fern/security-and-privacy/recording-consent-plan.mdx b/fern/security-and-privacy/recording-consent-plan.mdx
new file mode 100644
index 000000000..dff59c8f6
--- /dev/null
+++ b/fern/security-and-privacy/recording-consent-plan.mdx
@@ -0,0 +1,327 @@
+---
+title: Recording consent plan
+subtitle: Configure consent management for call recording compliance
+slug: security-and-privacy/recording-consent-plan
+description: Learn how to configure recording consent plans to ensure compliance with privacy laws and regulations
+---
+
+
+ **Enterprise Feature**: Recording consent plans are only available for
+ Enterprise customers. Contact your account manager or [sales
+ team](https://form.typeform.com/to/iOcCsqVP?typeform-source=vapi.ai) to enable
+ this feature.
+
+
+## Overview
+
+The recording consent plan feature automatically creates a consent assistant that asks users for permission to record calls before transferring them to your main assistant. Call recording only begins after consent is granted, ensuring compliance with privacy laws and regulations across different jurisdictions.
+
+**Recording consent plans enable you to:**
+
+- Automatically request recording consent before each call
+- Handle consent through two different interaction patterns
+- Ensure compliance with privacy regulations (GDPR, CCPA, etc.)
+- Maintain audit trails of consent decisions while ensuring privacy during the consent process
+
+**How it works:**
+
+1. A consent assistant is automatically created and placed first in the call flow
+2. Users interact with the consent assistant to grant or deny recording permission
+3. If consent is granted, the call transfers to your original assistant
+4. If consent is denied, the call ends or transfers based on your configuration
+
+## Consent Types
+
+Vapi supports two types of recording consent plans, each designed for different use cases and legal requirements.
+
+### Stay-on-Line Consent
+
+This type assumes consent is granted if the user remains on the call after hearing the consent message. It's commonly used for customer service scenarios where staying on the line implies agreement.
+
+**Best practices for stay-on-line messages:**
+
+- Clearly state that staying on the line implies consent
+- Mention the purpose of recording (quality, training, etc.)
+- Provide clear instructions to hang up if they don't consent
+
+**Example message:**
+
+```
+"For quality and training purposes, this call may be recorded. Please stay on the line if you agree to being recorded, or hang up if you do not consent."
+```
+
+### Verbal Consent
+
+This type requires explicit verbal consent from the user. The AI assistant will ask for clear confirmation and continue asking until the user provides explicit consent or declines.
+
+**Best practices for verbal consent messages:**
+
+- Ask for explicit verbal confirmation
+- Use clear yes/no language
+- Explain what happens if they decline
+
+**Example message:**
+
+```
+"This call may be recorded for quality and training purposes. Do you agree to being recorded? Please say 'yes' if you agree or 'no' if you decline."
+```
+
+## Configuration
+
+Add the recording consent plan to your assistant's `compliancePlan`:
+
+### Basic Stay-on-Line Configuration
+
+```json
+{
+ "compliancePlan": {
+ "recordingConsentPlan": {
+ "type": "stay-on-line",
+ "message": "For quality and training purposes, this call may be recorded. Please stay on the line if you agree to being recorded, or hang up if you do not consent.",
+ "voice": {
+ "voiceId": "Neha",
+ "provider": "vapi"
+ },
+ "waitSeconds": 3
+ }
+ }
+}
+```
+
+### Basic Verbal Consent Configuration
+
+```json
+{
+ "compliancePlan": {
+ "recordingConsentPlan": {
+ "type": "verbal",
+ "message": "This call may be recorded for quality and training purposes. Do you agree to being recorded? Please say 'yes' if you agree or 'no' if you decline.",
+ "voice": {
+ "voiceId": "Neha",
+ "provider": "vapi"
+ },
+ "declineToolId": "09dd39cc-75f0-45eb-ace3-796ee3aa9c1e"
+ }
+ }
+}
+```
+
+## End-of-Call Report Structure
+
+When you add a recording consent plan to your assistant, the compliance data will be included in the end-of-call-report webhook. Here's what the compliance section looks like in the webhook payload:
+
+### Successful Consent Webhook
+
+```json
+{
+ "message": {
+ "type": "end-of-call-report",
+ "analysis": {
+ /* call analysis data */
+ },
+ "artifact": {
+ /* call artifacts */
+ },
+ "startedAt": "2024-01-15T10:25:00Z",
+ "endedAt": "2024-01-15T10:35:00Z",
+ "endedReason": "assistantEndedCall",
+ "cost": 0.15,
+ "compliance": {
+ "recordingConsent": {
+ "type": "verbal",
+ "grantedAt": "2024-01-15T10:30:00Z"
+ }
+ },
+ "transcript": "/* call transcript */",
+ "recordingUrl": "https://...",
+ "stereoRecordingUrl": "https://..."
+ }
+}
+```
+
+### No Consent Webhook
+
+```json
+{
+ "message": {
+ "type": "end-of-call-report",
+ "analysis": {
+ /* call analysis data */
+ },
+ "artifact": {
+ /* call artifacts */
+ },
+ "startedAt": "2024-01-15T10:25:00Z",
+ "endedAt": "2024-01-15T10:30:00Z",
+ "endedReason": "assistantEndedCall",
+ "cost": 0.1,
+ "compliance": {
+ "recordingConsent": {
+ "type": "verbal"
+ }
+ },
+ "transcript": "/* call transcript */"
+ }
+}
+```
+
+**Key points about the webhook structure:**
+
+- **`recordingConsent` field**: Always present in the `compliance` object when a consent plan is configured
+- **`type`**: Shows which consent type was used (`"verbal"` or `"stay-on-line"`)
+- **`grantedAt`**: Only set when the user explicitly grants permission
+- **Missing `grantedAt`**: Indicates the user declined consent or hung up before granting permission
+
+This webhook structure allows you to easily determine whether recording consent was granted and audit compliance decisions for each call. The compliance data is sent as part of the end-of-call-report webhook, which includes all call details, analysis, and compliance information.
+
+## Implementation
+
+
+
+ Add the recording consent plan to your assistant's compliance plan configuration.
+
+
+
+ 1. Navigate to **Assistants** in your Vapi dashboard
+ 2. Create a new assistant or edit an existing one
+ 3. Go to the **Compliance** section
+ 4. Enable **Recording Consent Plan**
+ 5. Choose your consent type (Stay-on-Line or Verbal)
+ 6. Enter your consent message
+ 7. Configure additional options (voice, decline tool, etc.)
+ 8. Save your assistant
+
+
+ ```typescript
+ import { VapiClient } from "@vapi-ai/server-sdk";
+
+ const client = new VapiClient({ token: process.env.VAPI_API_KEY });
+
+ const assistant = await client.assistants.create({
+ name: "Customer Support Assistant",
+ model: {
+ provider: "openai",
+ model: "gpt-4o"
+ },
+ voice: {
+ provider: "11labs",
+ voiceId: "sarah"
+ },
+ compliancePlan: {
+ recordingConsentPlan: {
+ type: "verbal",
+ message: "This call may be recorded for quality and training purposes. Do you agree to being recorded? Please say 'yes' if you agree or 'no' if you decline.",
+ declineTool: {
+ type: "endCall"
+ }
+ }
+ }
+ });
+ ```
+
+
+
+
+
+
+ Create a test call to verify your consent flow works correctly.
+
+
+
+ 1. Go to your assistant's page
+ 2. Click **Test Call**
+ 3. Verify the consent message plays correctly
+ 4. Test both consent and decline scenarios
+ 5. Check that the call transfers properly after consent
+
+
+ ```typescript
+ // Create a test call
+ const call = await client.calls.create({
+ assistantId: assistant.id,
+ phoneNumberId: "your-phone-number-id"
+ });
+
+ // Monitor the call to verify consent flow
+ console.log("Call created:", call.id);
+ ```
+
+
+
+
+
+
+ Check your call logs to verify consent decisions are being recorded correctly.
+
+ ```typescript
+ // Get call details to check consent status
+ const call = await client.calls.get(callId);
+
+ if (call.compliance?.recordingConsent?.grantedAt) {
+ console.log("Consent granted at:", call.compliance.recordingConsent.grantedAt);
+ console.log("Consent type:", call.compliance.recordingConsent.type);
+ } else {
+ console.log("No consent was granted for this call");
+ }
+ ```
+
+
+
+
+## Decline Tool Options
+
+When users decline recording consent, you can configure different actions using decline tools:
+
+For detailed information about these tools, see:
+
+- **[Handoff Tool](/tools/handoff)** - Transfer to other assistants
+- **[Default Tools](/tools/default-tools)** - Transfer calls and end calls
+
+## Best Practices
+
+### Message Design
+
+- **Stay-on-Line**: Clearly state that staying implies consent
+- **Verbal**: Ask for explicit confirmation with clear yes/no options
+- **Length**: Keep messages concise but comprehensive
+- **Tone**: Use professional, clear language appropriate for your industry
+
+### Voice Selection
+
+- Use a different voice for consent messages to create distinction
+- Choose voices that match your brand and industry requirements
+- Consider using more formal voices for compliance scenarios
+
+### Decline Handling
+
+- **End Call**: Use when you cannot provide service without recording
+- **Transfer**: Use when you have alternative service options
+- **Handoff**: Use when you have non-recording assistants available
+
+### Testing
+
+- Test both consent and decline scenarios thoroughly
+- Verify timing works correctly for your use case
+- Check that decline tools execute properly
+- Monitor call logs to ensure compliance data is recorded
+
+## Troubleshooting
+
+**Users not hearing consent message**
+
+- Verify the consent message is not empty
+- Check that the voice configuration is valid
+- Test with different voice providers if needed
+
+**Decline tool not executing**
+
+- Verify the decline tool configuration is valid
+- Check that referenced assistants or phone numbers exist
+- Ensure the tool type matches your intended action
+
+## Next Steps
+
+- **[Call Recording](/assistants/call-recording)** - Learn about technical recording implementation
+- **[Handoff Tool](/tools/handoff)** - Transfer between assistants
+- **[Default Tools](/tools/default-tools)** - Transfer calls and end calls
+- **[API Reference](/api-reference/assistants/create)** - Explore assistant configuration options
diff --git a/fern/security-and-privacy/static-ip-addresses.mdx b/fern/security-and-privacy/static-ip-addresses.mdx
new file mode 100644
index 000000000..16731e5ce
--- /dev/null
+++ b/fern/security-and-privacy/static-ip-addresses.mdx
@@ -0,0 +1,46 @@
+---
+title: Static IP addresses
+subtitle: Whitelist Vapi IP addresses
+slug: security-and-privacy/static-ip-addresses
+---
+
+
+## Introduction to Vapi static IP addresses
+
+Vapi supports static IP addresses for outbound HTTP requests. When enabled, all HTTP requests from Vapi to your server will originate from a fixed set of IP addresses, allowing you to configure strict firewall rules and network security policies.
+
+## Why use static IP addresses
+
+Static IP addresses provide an additional layer of security for your infrastructure by allowing you to:
+
+- **Control network access** - Restrict incoming traffic to only trusted sources
+- **Simplify firewall rules** - Define precise IP based access controls
+- **Meet compliance requirements** - Satisfy security policies that mandate IP whitelisting
+- **Audit traffic sources** - Verify that requests are genuinely from Vapi's infrastructure
+
+## Vapi's static IP addresses
+
+When static IP addressing is enabled, all webhook requests from Vapi will originate from the following CIDR block:
+
+- `167.150.224.0/23`
+
+## Enabling static IP addresses
+
+You can enable static IP addressing through the server object
+
+### Example
+
+```json
+{
+ "serverUrl": "https://your-server.example.com/webhook",
+ "staticIpAddressesEnabled": true
+}
+```
+
+
+Always test static IP configuration in a staging environment before deploying to production to avoid service disruptions.
+
+
+## Need help?
+
+If you have questions about static IP addressing, contact our support team at support@vapi.ai.
\ No newline at end of file
diff --git a/fern/server-url/events.mdx b/fern/server-url/events.mdx
index 98761bdfb..f6a9178b3 100644
--- a/fern/server-url/events.mdx
+++ b/fern/server-url/events.mdx
@@ -113,6 +113,14 @@ For inbound phone calls, you can specify the assistant dynamically. If a PhoneNu
}
```
+
+ You must respond to the `assistant-request` webhook within 7.5 seconds end-to-end. This limit is fixed and not configurable: the telephony provider enforces a 15-second cap, and Vapi reserves ~7.5 seconds for call setup. The timeout value shown elsewhere in the dashboard does not apply to this webhook.
+
+ To avoid timeouts:
+ - Return quickly with an existing assistantId or a minimal assistant, then enrich context asynchronously after the call starts using Live Call Control.
+ - Host your webhook close to us-west-2 to reduce latency, and target < ~6s to allow for network jitter.
+
+
Respond with either an existing assistant ID, a transient assistant, or transfer destination:
```json
@@ -135,9 +143,43 @@ Respond with either an existing assistant ID, a transient assistant, or transfer
```
```json
-{ "destination": { "type": "number", "phoneNumber": "+11234567890" } }
+{ "destination": { "type": "number", "number": "+11234567890" } }
+```
+
+#### Transfer only (skip AI)
+
+If you want to immediately transfer the call without using an assistant, return a `destination` in your `assistant-request` response. This bypasses AI handling.
+
+```json
+{
+ "destination": {
+ "type": "number",
+ "number": "+14155552671",
+ "callerId": "{{phoneNumber.number}}",
+ "extension": "101",
+ "message": "Connecting you to support."
+ }
+}
+```
+
+```json
+{
+ "destination": {
+ "type": "sip",
+ "sipUri": "sip:support@example.com",
+ "sipHeaders": { "X-Account": "gold" },
+ "message": "Transferring you now."
+ }
+}
```
+
+ When `destination` is present in the `assistant-request` response, the call forwards immediately and assistantId, assistant, squadId, and squad are ignored.
+ You must still respond within 7.5 seconds.
+ To transfer silently, set destination.message to an empty string.
+ For caller ID behavior, see Call features.
+
+
Or return an error message to be spoken to the caller:
```json
@@ -247,20 +289,21 @@ For final-only events, you may receive `type: "transcript[transcriptType=\"final
### Model Output
-Tokens or tool-call outputs as the model generates.
+Tokens or tool-call outputs as the model generates. The optional `turnId` groups all tokens from the same LLM response, so you can correlate output with a specific turn.
```json
{
"message": {
"type": "model-output",
- "output": { /* token or tool call */ }
+ "output": { /* token or tool call */ },
+ "turnId": "abc-123"
}
}
```
### Transfer Destination Request
-Requested when the model wants to transfer but the destination is not yet known.
+Requested when the model wants to transfer but the destination is not yet known and must be provided by your server.
```json
{
@@ -271,11 +314,13 @@ Requested when the model wants to transfer but the destination is not yet known.
}
```
+This event is emitted only if the assistant did not supply a destination when calling a `transferCall` tool (for example, it did not include a custom parameter like `phoneNumber`). If the assistant includes the destination directly, Vapi will transfer immediately and will not send this webhook.
+
Respond with a destination and optionally a message:
```json
{
- "destination": { "type": "number", "phoneNumber": "+11234567890" },
+ "destination": { "type": "number", "number": "+11234567890" },
"message": { "type": "request-start", "message": "Transferring you now" }
}
```
@@ -295,10 +340,13 @@ Fires whenever a transfer occurs.
### User Interrupted
+Sent when the user interrupts the assistant. The optional `turnId` identifies the LLM turn that was interrupted, matching the `turnId` on `model-output` messages so you can discard that turn's tokens.
+
```json
{
"message": {
- "type": "user-interrupted"
+ "type": "user-interrupted",
+ "turnId": "abc-123"
}
}
```
diff --git a/fern/server-url/server-authentication.mdx b/fern/server-url/server-authentication.mdx
index 16aa9e994..03df2eb89 100644
--- a/fern/server-url/server-authentication.mdx
+++ b/fern/server-url/server-authentication.mdx
@@ -3,131 +3,553 @@ title: Server authentication
slug: server-url/server-authentication
---
-When configuring webhooks for your assistant, you can authenticate your server endpoints using either a secret token, custom headers, or OAuth2. This ensures that only authorized requests from Vapi are processed by your server.
+When configuring webhooks for your assistant, you can authenticate your server endpoints by creating **Custom Credentials** and referencing them using a `credentialId`. This approach provides better security, reusability, and centralized management of your authentication credentials.
-## Credential Configuration
+## Overview
-Credentials can be configured at multiple levels:
+Vapi now uses a **credential-based authentication system** where you:
-1. **Tool Call Level**: Create individual credentials for each tool call
-2. **Assistant Level**: Set credentials directly in the assistant configuration
-3. **Phone Number Level**: Configure credentials for specific phone numbers
-4. **Organization Level**: Manage credentials in the [API Keys page](https://dashboard.vapi.ai/keys)
+1. **Create Custom Credentials** through the dashboard
+2. **Reference credentials by ID** in your server configurations
+3. **Reuse credentials** across multiple assistants, phone numbers, and tools
-The order of precedence is:
-1. Tool call-level credentials
-2. Assistant-level credentials
-3. Phone number-level credentials
-4. Organization-level credentials from the API Keys page
+This replaces the previous inline authentication approach and provides better security and management capabilities.
-## Authentication Methods
+## Quick start
-### Secret Token Authentication
+
+
+ In the Vapi dashboard, navigate to Custom Credentials and create a new credential:
+
+ - Choose **Bearer Token** for simple API key authentication
+ - Enter a descriptive name like "Production API Auth"
+ - Add your API token
+ - Save the credential and note the generated ID (e.g., `cred_abc123`)
+
+
+
+ Reference the credential when configuring server webhooks:
+
+ ```json
+ {
+ "name": "Support Assistant",
+ "server": {
+ "url": "https://api.yourcompany.com/webhook",
+ "credentialId": "cred_abc123"
+ },
+ "model": {
+ "provider": "openai",
+ "model": "gpt-4"
+ }
+ }
+ ```
+
+
+
+ Make a test call - Vapi will now authenticate requests to your webhook using the configured credential.
+
+
+
+## Creating Custom Credentials
+
+### Dashboard Management
+
+Custom Credentials are managed through the Vapi dashboard. Navigate to your organization settings to create and manage authentication credentials.
+
+
+
+
+
+You can create different types of authentication credentials:
+
+- **Bearer Token**: Simple token-based authentication
+- **OAuth 2.0**: OAuth 2.0 client credentials flow
+- **HMAC**: HMAC signature-based authentication
+
+## Authentication Types
+
+### Bearer Token Authentication
+
+The most common authentication method using a bearer token in the Authorization header.
+
+
+
+ In the dashboard, select "Bearer Token" as the authentication type and configure:
+
+ - **Credential Name**: A descriptive name for the credential
+ - **Token**: Your API token or secret
+ - **Header Name**: The header to send the token in (default: `Authorization`)
+ - **Include Bearer Prefix**: Whether to prefix the token with "Bearer "
+
+
+
+ Reference the credential by its ID in your server configuration:
+
+ ```json
+ {
+ "server": {
+ "url": "https://your-server.com/webhook",
+ "credentialId": "cred_abc123"
+ }
+ }
+ ```
+
+
+
+
+
+
+
+#### Standard Authorization Header
+
+The most common Bearer Token configuration uses the standard `Authorization` header with the Bearer prefix:
+
+
+
+ Configure a Bearer Token credential with:
+
+ - **Header Name**: `Authorization` (default)
+ - **Include Bearer Prefix**: Enabled (toggle on)
+ - **Token**: Your API token or secret key
+
+
+
+ Reference this credential in your server setup - Vapi will send your token as `Authorization: Bearer your-token`.
+
+ ```json
+ {
+ "server": {
+ "url": "https://api.example.com/webhook",
+ "credentialId": "cred_bearer_standard_123"
+ }
+ }
+ ```
+
+
+
+ Your server will receive the standard Authorization header:
+
+ ```http
+ POST /webhook HTTP/1.1
+ Host: api.example.com
+ Authorization: Bearer your-api-token-here
+ Content-Type: application/json
+ ```
+
+
+
+This is the recommended approach for modern API authentication and works with most authentication frameworks and libraries.
+
+#### Legacy X-Vapi-Secret Support
+
+For backward compatibility with existing implementations, you can configure a Bearer Token credential to use the `X-Vapi-Secret` header (matching the previous inline `secret` field behavior):
+
+
+
+ Configure a Bearer Token credential with:
+
+ - **Header Name**: `X-Vapi-Secret` (instead of `Authorization`)
+ - **Include Bearer Prefix**: Disabled (toggle off)
+ - **Token**: Your secret token value
+
+
+
+ Reference this credential in your server setup - Vapi will send your token in the `X-Vapi-Secret` header exactly like the previous inline behavior.
+
+
+
+
+
+
+
+### OAuth 2.0 Authentication
+
+For OAuth 2.0 protected endpoints, configure client credentials flow with automatic token refresh.
+
+
+
+ Select "OAuth 2.0" as the authentication type and configure:
+
+ - **Credential Name**: A descriptive name for the credential
+ - **Token URL**: Your OAuth token endpoint
+ - **Client ID**: OAuth client identifier
+ - **Client Secret**: OAuth client secret
+ - **Scope**: Optional scopes to request
+
+
+
+ Use the credential ID in your server setup:
+
+ ```json
+ {
+ "server": {
+ "url": "https://your-server.com/webhook",
+ "credentialId": "cred_oauth_xyz789"
+ }
+ }
+ ```
+
+
+
+
+
+
+
+#### OAuth 2.0 Flow
+
+1. Vapi makes a token request to your OAuth endpoint with client credentials
+2. Your server validates the credentials and returns an access token
+3. Vapi includes the access token in the Authorization header for webhook requests
+4. When tokens expire, Vapi automatically requests new ones
-The simplest way to authenticate webhook requests is using a secret token. Vapi will include this token in the `X-Vapi-Signature` header of each request.
+#### Token Response Format
-#### Configuration
+Your OAuth server should return:
```json
+{
+ "access_token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...",
+ "token_type": "Bearer",
+ "expires_in": 3600
+}
+```
+
+### HMAC Authentication
+
+For maximum security, use HMAC signature-based authentication to verify request integrity.
+
+
+
+ Select "HMAC" as the authentication type and configure:
+
+ - **Credential Name**: A descriptive name for the credential
+ - **Secret Key**: Your HMAC secret key
+ - **Algorithm**: Hash algorithm (SHA256, SHA1, etc.)
+ - **Signature Header**: Header name for the signature (e.g., `x-signature`)
+ - **Timestamp Header**: Optional timestamp header for replay protection
+ - **Payload Format**: How to format the payload for signing
+
+
+
+ Reference the HMAC credential:
+
+ ```json
+ {
+ "server": {
+ "url": "https://your-server.com/webhook",
+ "credentialId": "cred_hmac_456"
+ }
+ }
+ ```
+
+
+
+
+
+
+
+## Using Credentials
+
+### In Assistant Configuration
+
+Reference credentials in your assistant's server configuration:
+
+
+```json title="API Request"
{
"server": {
- "url": "https://your-server.com/webhook",
- "secret": "your-secret-token"
+ "url": "https://api.example.com/webhook",
+ "credentialId": "cred_bearer_auth_123"
}
}
```
-### Custom Headers Authentication
+```typescript title="TypeScript SDK"
+import { VapiClient } from "@vapi-ai/server-sdk";
-For more complex authentication scenarios, you can configure custom headers that Vapi will include with each webhook request.
+const client = new VapiClient({ token: process.env.VAPI_API_KEY });
-This could include short lived JWTs/API Keys passed along via the Authorization header, or any other header that your server checks for.
+const assistant = await client.assistants.create({
+ server: {
+ url: "https://api.example.com/webhook",
+ credentialId: "cred_bearer_auth_123"
+ },
+ // ... other assistant config
+});
+```
-#### Configuration
+```python title="Python SDK"
+from vapi import Vapi
-```json
+client = Vapi(token=os.getenv("VAPI_API_KEY"))
+
+assistant = client.assistants.create(
+ server={
+ "url": "https://api.example.com/webhook",
+ "credentialId": "cred_bearer_auth_123"
+ }
+ # ... other assistant config
+)
+```
+
+
+
+
+
+
+### In Phone Number Configuration
+
+Assign credentials to phone numbers for incoming call authentication:
+
+
+```json title="API Request"
{
+ "phoneNumber": "+1234567890",
"server": {
- "url": "https://your-server.com/webhook",
- "headers": {
- "Authorization": "Bearer your-api-key",
- "Custom-Header": "custom-value"
- }
+ "url": "https://api.example.com/calls",
+ "credentialId": "cred_oauth_456"
}
}
```
-### OAuth2 Authentication
+```typescript title="TypeScript SDK"
+const phoneNumber = await client.phoneNumbers.create({
+ phoneNumber: "+1234567890",
+ server: {
+ url: "https://api.example.com/calls",
+ credentialId: "cred_oauth_456"
+ }
+});
+```
+
+
+
+
+
-For OAuth2-protected webhook endpoints, you can configure OAuth2 credentials that Vapi will use to obtain and refresh access tokens.
+### In Tool Configuration
-#### Configuration (at the assistant-level)
+Secure your function tool endpoints with credentials:
-```json
+
+```json title="API Request"
{
- "server": {
- "url": "https://your-server.com/webhook"
- },
- "credentials": [
- {
- "provider": "webhook",
- "authenticationPlan": {
- "type": "oauth2",
- "url": "https://your-server.com/oauth/token",
- "clientId": "your-client-id",
- "clientSecret": "your-client-secret",
- "scope": "optional, only needed to specify which scopes to request access for"
+ "type": "function",
+ "function": {
+ "name": "get_weather",
+ "description": "Get current weather",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": { "type": "string" }
}
}
- ]
+ },
+ "server": {
+ "url": "https://api.example.com/weather",
+ "credentialId": "cred_hmac_789"
+ }
}
```
-#### Configuration (via our Dashboard)
+```typescript title="TypeScript SDK"
+const tool = await client.tools.create({
+ type: "function",
+ function: {
+ name: "get_weather",
+ description: "Get current weather",
+ parameters: {
+ type: "object",
+ properties: {
+ location: { type: "string" }
+ }
+ }
+ },
+ server: {
+ url: "https://api.example.com/weather",
+ credentialId: "cred_hmac_789"
+ }
+});
+```
+
+
+## Credential Management
+
+### Dashboard Features
+
+The Custom Credentials dashboard provides:
+
+- **Credential Creation**: Create new authentication credentials
+- **Credential Editing**: Modify existing credential configurations
+- **Credential Deletion**: Remove unused credentials
+- **Usage Tracking**: See where credentials are being used
+
+
+
+
+
+### Best Practices
+
+
+**Credential Naming**: Use descriptive names like "Production API Key" or "Staging OAuth" to easily identify credentials.
+
+
+
+**Credential Rotation**: Regularly rotate credentials for enhanced security. Update the credential in the dashboard without changing your configurations.
+
+
+
+**Credential Security**: Store credential secrets securely. Once created, secrets are encrypted and cannot be viewed in the dashboard.
+
+
+### Migration from Inline Authentication
+
+If you're currently using inline authentication, migrate to the credential system:
-
- Go to [https://dashboard.vapi.ai/keys](https://dashboard.vapi.ai/keys) to manage your OAuth2 credentials.
+
+ For each inline authentication configuration, create a matching Custom Credential in the dashboard:
+
+ - **For `secret` field**: Create a Bearer Token credential with header `X-Vapi-Secret` and no Bearer prefix (see [Legacy X-Vapi-Secret Support](#legacy-x-vapi-secret-support))
+ - **For `headers` field**: Create a Bearer Token credential with the appropriate header name
+ - **For OAuth configurations**: Create an OAuth 2.0 credential
+
+
+
+ Replace inline authentication with `credentialId` references:
+
+ **Before (inline secret):**
+ ```json
+ {
+ "server": {
+ "url": "https://api.example.com/webhook",
+ "secret": "your-secret-token"
+ }
+ }
+ ```
+
+ **After (credential reference):**
+ ```json
+ {
+ "server": {
+ "url": "https://api.example.com/webhook",
+ "credentialId": "cred_x_vapi_secret_123"
+ }
+ }
+ ```
+
+ Your server will continue receiving the same `X-Vapi-Secret` header with identical behavior.
+
+
+
+ Verify that your webhooks continue working with the new credential system. The authentication behavior should be identical to your previous inline configuration.
-
-
-
+## Common Use Cases
-#### OAuth2 Flow
+### Single Credential for Multiple Resources
-1. Vapi makes a request to your token endpoint with client credentials (Content-Type `application/x-www-form-urlencoded`)
-2. Your server validates the credentials and returns an access token
-3. Vapi includes the access token in the Authorization header for webhook requests
-4. Your server validates the access token before processing the webhook
-5. When the token expires, Vapi automatically requests a new one
+Reuse the same credential across different components:
+
+
+```json title="Shared Credential Usage"
+{
+ "assistant": {
+ "server": {
+ "url": "https://api.yourcompany.com/assistant-webhook",
+ "credentialId": "cred_production_api_123"
+ }
+ },
+ "phoneNumber": {
+ "server": {
+ "url": "https://api.yourcompany.com/call-webhook",
+ "credentialId": "cred_production_api_123"
+ }
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_info"
+ },
+ "server": {
+ "url": "https://api.yourcompany.com/user-info",
+ "credentialId": "cred_production_api_123"
+ }
+ }
+ ]
+}
+```
+
-#### OAuth2 Token Response Format
+### Environment-Specific Credentials
-Your server should return a JSON response with the following format:
+Use different credentials for staging and production:
-```json
+
+```json title="Staging Environment"
{
- "access_token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...",
- "token_type": "Bearer",
- "expires_in": 3600
+ "server": {
+ "url": "https://staging-api.yourcompany.com/webhook",
+ "credentialId": "cred_staging_api_456"
+ }
}
```
-Example error response:
+```json title="Production Environment"
+{
+ "server": {
+ "url": "https://api.yourcompany.com/webhook",
+ "credentialId": "cred_production_api_123"
+ }
+}
+```
+
-```json
+### Service-Specific Credentials
+
+Use different credentials for different services:
+
+
+```json title="Multiple Service Credentials"
{
- "error": "invalid_client",
- "error_description": "Invalid client credentials"
+ "assistant": {
+ "server": {
+ "url": "https://auth.yourcompany.com/webhook",
+ "credentialId": "cred_auth_service_789"
+ }
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "payment_processing"
+ },
+ "server": {
+ "url": "https://payments.yourcompany.com/process",
+ "credentialId": "cred_payment_service_321"
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "user_management"
+ },
+ "server": {
+ "url": "https://users.yourcompany.com/manage",
+ "credentialId": "cred_user_service_654"
+ }
+ }
+ ]
}
```
+
+
+## Next steps
-Common error types:
-- `invalid_client`: Invalid client credentials
-- `invalid_grant`: Invalid or expired refresh token
-- `invalid_scope`: Invalid scope requested
-- `unauthorized_client`: Client not authorized for this grant type
+Now that you have authentication configured:
- If using the OAuth2 flow for authenticating tool calls, make sure the server for the tool is the URL that should be hit *after* we have completed the token exchange.
\ No newline at end of file
+- **[Setting server URLs](./setting-server-urls):** Learn where server URLs can be configured
+- **[Server events](./events):** Understand what webhook events Vapi sends
+- **[Local development](./developing-locally):** Set up local webhook testing
\ No newline at end of file
diff --git a/fern/server-url/setting-server-urls.mdx b/fern/server-url/setting-server-urls.mdx
index d450d062b..80f96b08c 100644
--- a/fern/server-url/setting-server-urls.mdx
+++ b/fern/server-url/setting-server-urls.mdx
@@ -43,7 +43,13 @@ Here's a breakdown of where you can set server URLs in Vapi:
- **At Import:** when you [import from Twilio](/api-reference/phone-numbers/import-twilio-number) or [Vonage](/api-reference/phone-numbers/import-vonage-number)
- **Via Update:** you can [update a number](/api-reference/phone-numbers/update-phone-number) already in your account
- The field `phoneNumber.serverUrl` will contain the server URL for the phone number.
+ The phone number's server configuration includes both the URL and optional authentication:
+ - `phoneNumber.server.url`: The webhook endpoint URL
+ - `phoneNumber.server.credentialId`: Authentication credential ID (optional)
+
+
+ For secured webhooks, create [Custom Credentials](./server-authentication) and reference them using `credentialId`.
+
@@ -60,9 +66,15 @@ Here's a breakdown of where you can set server URLs in Vapi:
- At [assistant creation](/api-reference/assistants/create-assistant) (or via an [update](/api-reference/assistants/update-assistant)) you can set the assistant's server URL.
-
- The server URL for an assistant is stored in the `assistant.serverUrl` field.
+ At [assistant creation](/api-reference/assistants/create-assistant) (or via an [update](/api-reference/assistants/update-assistant)) you can set the assistant's server configuration.
+
+ The assistant's server configuration includes:
+ - `assistant.server.url`: The webhook endpoint URL
+ - `assistant.server.credentialId`: Authentication credential ID (optional)
+
+
+ For secured webhooks, use [Custom Credentials](./server-authentication) with `credentialId` instead of inline authentication.
+
@@ -79,9 +91,17 @@ Here's a breakdown of where you can set server URLs in Vapi:
- The server URL for a function call can be found on an assistant at `assistant.model.functions[].serverUrl`.
+ Function tools can be configured with server endpoints via the [tools API](/api-reference/tools) or within assistant configurations.
+
+ The server configuration for function tools includes:
+ - `tool.server.url`: The function endpoint URL
+ - `tool.server.credentialId`: Authentication credential ID (optional)
- You can either set the URL for a function call at [assistant creation](/api-reference/assistants/create-assistant), or in an [assistant update](/api-reference/assistants/update-assistant).
+ You can configure function tool servers at [tool creation](/api-reference/tools/create), [assistant creation](/api-reference/assistants/create-assistant), or in updates.
+
+
+ Use [Custom Credentials](./server-authentication) to secure your function endpoints with `credentialId`.
+
diff --git a/fern/snippets/faq-snippet.mdx b/fern/snippets/faq-snippet.mdx
index 95bad3b5c..0f64b66d3 100644
--- a/fern/snippets/faq-snippet.mdx
+++ b/fern/snippets/faq-snippet.mdx
@@ -20,26 +20,21 @@ You can bring your own custom models for any part of the pipeline.
Everything is interchangeable, mix & match to suit your usecase.
-
+
-You could (and the person writing this right now did, from scratch) — but there are good reasons for not doing so.
+Vapi solves problems of orchestration and call quality that dramatically improve the experience of your users. We've built extremely low-latency, highly intelligent steps that work with multiple model providers.
-Writing a great realtime voice AI application from scratch is a fairly challenging task (more on those challenges [here](/challenges-of-realtime-conversation)). Most of these challenges are not apparent until you face them, then you realize you are 3 weeks into a rabbit hole that may take months to properly solve out of.
-
-Think of Vapi as hiring a software engineering team for this hard problem, while you focus on what uniquely generates value for your voice AI application.
-
----
-
-But to address cost, the vast majority of cost in running your application will come from provider cost (Speect-to-text, LLM, Text-to-speech) direct with vendors (Deepgram, OpenAI, ElevenLabs, etc) — where we add no fee (vendor cost passes-through). These would have to be incurred anyway.
+The vast majority of cost in running your application will come from provider cost (Speech-to-text, LLM, Text-to-speech) direct with vendors (Deepgram, OpenAI, ElevenLabs, etc) — where we add no fee (vendor cost passes-through). These would have to be incurred anyway.
Vapi only charges its small fee on top of these for the continuous maintenance & improvement of these hardest components of your system (which would have costed you time to write/maintain).
No matter what, some cost is inescapable (in money, time, etc) to solve this challenging technical problem.
-Our focus is solely on foundational Voice AI orchestration, & it’s what we put our full time and resources into.
+
To learn more about Vapi’s pricing, you can visit our [pricing page](/pricing).
+
diff --git a/fern/snippets/quickstart/dashboard/assistant-setup-inbound.mdx b/fern/snippets/quickstart/dashboard/assistant-setup-inbound.mdx
index 81018e2e6..1b73615db 100644
--- a/fern/snippets/quickstart/dashboard/assistant-setup-inbound.mdx
+++ b/fern/snippets/quickstart/dashboard/assistant-setup-inbound.mdx
@@ -58,8 +58,8 @@
Before we proceed, we can set our [provider key](/customization/provider-keys) for OpenAI (this is just your OpenAI secret key).
- You can see all of your provider keys in the "Provider Keys" dashboard tab. You can also go
- directly to [dashboard.vapi.ai/keys](https://dashboard.vapi.ai/keys).
+ You can see all of your provider keys in the "Integrations" dashboard tab. You can also go
+ directly to [dashboard.vapi.ai/settings/integrations](https://dashboard.vapi.ai/settings/integrations).
Vapi uses [provider keys](/customization/provider-keys) you provide to communicate with LLM, TTS, & STT vendors on your behalf. It is most ideal that we set keys for the vendors we intend to use ahead of time.
diff --git a/fern/squads.mdx b/fern/squads.mdx
index c8e6eb7f2..af316eb7c 100644
--- a/fern/squads.mdx
+++ b/fern/squads.mdx
@@ -4,12 +4,14 @@ subtitle: Use Squads to handle complex workflows and tasks.
slug: squads
---
-Sometimes, complex workflows are easier to manage with multiple assistants.
-You can think of each assistant in a Squad as a leg of a conversation tree.
-For example, you might have one assistant for lead qualification, which transfers to another for booking an appointment if they’re qualified.
+Squads let you break complex workflows into multiple specialized assistants that hand off to each other during a conversation. Each assistant in a Squad handles a specific part of your workflow; for example, one assistant for lead qualification that transfers to another for appointment booking.
-Prior to Squads you would put all functionality in one assistant, but Squads were added to break up the complexity of larger prompts into smaller specialized assistants with specific tools and fewer goals.
-Squads enable calls to transfer assistants mid-conversation, while maintaining full conversation context.
+**Why use Squads?** Large, all-in-one assistants with lengthy prompts and extensive context lead to:
+- **Higher hallucination rates** - Models lose focus with too many and potentially conflicting instructions
+- **Increased costs** - Longer prompts consume more tokens per request
+- **Greater latency** - Processing large contexts takes more time and will increase the latency of your assistant.
+
+Squads solve this by splitting complex prompts into focused assistants with specific tools and clear goals, while maintaining full conversation context across handoffs.
View all configurable properties in the [API Reference](/api-reference/squads/create-squad).
@@ -17,11 +19,14 @@ Squads enable calls to transfer assistants mid-conversation, while maintaining f
## Usage
-To use Squads, you can create a `squad` when starting a call and specify `members` as a list of assistants and destinations.
-The first member is the assistant that will start the call, and assistants can be either persistent or transient.
+To use Squads, you can create a `squad` when starting a call and specify `members` as a list of assistants and destinations. Assistants can be either persistent or transient.
+
+
+The first member is the assistant that will start the call.
+
+
+We recommend using [Handoff Tools](/tools/handoff) to specify which destinations the current assistant can handoff too, and when to handoff to each assistant. Each assistant within the squad can use its saved handoff tools as well as handoff tools from Assistant Overrides (see below).
-Each assistant should be assigned the relevant assistant transfer destinations.
-Transfers are specified by assistant name and are used when the model recognizes a specific trigger.
```json
{
@@ -29,17 +34,30 @@ Transfers are specified by assistant name and are used when the model recognizes
"members": [
{
"assistantId": "information-gathering-assistant-id",
- "assistantDestinations": [{
- "type": "assistant",
- "assistantName": "Appointment Booking",
- "message": "Please hold on while I transfer you to our appointment booking assistant.",
- "description": "Transfer the user to the appointment booking assistant after they say their name."
- }],
},
{
"assistant": {
"name": "Appointment Booking",
- ...
+ "model": {
+ "provider": "openai",
+ "model": "gpt-4o",
+ "toolIds": ["handoff-tool-id"],
+ "tools": [
+ {
+ "type": "handoff",
+ "destinations": [
+ {
+ "type": "assistant",
+ "assistantId": "assistant-123",
+ "description": "Call this tool when the customer wants to talk about pricing"
+ }
+ ],
+ "function": {
+ "name": "handoff_to_assistant_123"
+ }
+ }
+ ]
+ },
},
}
]
@@ -47,12 +65,120 @@ Transfers are specified by assistant name and are used when the model recognizes
}
```
+## Overrides
+
+### Assistant Overrides
+To override the configuration of a saved assistant without modifying the underlying assistant, use the `assistantsOverrides` to alter individual assistants. For example, if you have assistants in a squad with different voices, you can use `assistantOverrides` to make sure all of the assistants are using the same voice without changing the assistant (in case it's being used in another squad).
+
+```json
+{
+ "squad": {
+ "members": [
+ {
+ "assistant": {
+ "name": "Appointment Booking",
+ "voice": {
+ "provider": "vapi",
+ "voiceId": "Elliot",
+ },
+ },
+ },
+ {
+ "assistantId": "saved-assistant-id",
+ "assistantOverrides": {
+ "voice": {
+ "provider": "vapi",
+ "voiceId": "Elliot",
+ },
+ }
+ },
+ ]
+ }
+}
+```
+
+You may also define inline tools via `assistantOverrides` through the `tools:append` array, so that the assistant will only handoff if it is a part of this squad.
+```json
+{
+ "squad": {
+ "members": [
+ {
+ "assistant": {
+ "name": "Appointment Booking",
+ "voice": {
+ "provider": "vapi",
+ "voiceId": "Elliot",
+ },
+ },
+ },
+ {
+ "assistantId": "saved-assistant-id",
+ "assistantOverrides": {
+ "tools:append": [
+ {
+ "type": "handoff",
+ "destinations": [
+ {
+ "type": "assistant",
+ "assistantId": "assistant-123",
+ "description": "Call this tool when the customer wants to talk about pricing"
+ }
+ ],
+ "function": {
+ "name": "handoff_to_assistant_123"
+ }
+ }
+ ]
+ }
+ },
+ ]
+ }
+}
+```
+
+
+### Member Overrides
+To override the configuration of _all_ assistants in a squad without modifying the underlying assistants, use the `memberOverrides`.
+
+Note: This is `squadOverrides` for the [`assistant-request`](api-reference/webhooks/server-message#response.body.messageResponse.AssistantRequest.squadOverrides) webhook response.
+
+
+```json
+{
+ "squad": {
+ "members": [
+ {
+ "assistant": {
+ "name": "Appointment Booking",
+ "voice": {
+ "provider": "vapi",
+ "voiceId": "Elliot",
+ },
+ },
+ },
+ {
+ "assistantId": "saved-assistant-id",
+ },
+ ],
+ "memberOverrides": {
+ "voice": {
+ "provider": "vapi",
+ "voiceId": "Elliot",
+ },
+ }
+ }
+}
+```
## Best Practices
-The following are some best practices for using Squads to reduce errors:
+**Keep assistants focused** - Each assistant should have a single, well-defined responsibility with 1-3 goals maximum. Assign only the tools needed for that specific task.
+
+**Minimize squad size** - Try to reduce the number of squad members. Only split into separate assistants when there's a clear functional boundary (lead qualification → sales → booking).
+
+**Define clear handoff conditions** - Write specific handoff descriptions that state exact trigger conditions and what information to collect before transferring. Make sure to specify this in the assistant's prompt and/or tool description.
+
+**Engineer context carefully** - Use [context engineering](/tools/handoff#context-engineering) to control what conversation history is passed between assistants. As the context grows throughout the call, you may want to limit message history to reduce tokens, improve performance, and prevent context poisoning. Utilize [variable extraction](/tools/handoff#variable-extraction) to save information and generate summaries during a handoff to pass to other assistants.
+
-- Group assistants by closely related tasks
-- Create as few assistants as possible to reduce complexity
-- Make sure descriptions for transfers are clear and concise
diff --git a/fern/squads/examples/clinic-triage-scheduling-handoff-tool.mdx b/fern/squads/examples/clinic-triage-scheduling-handoff-tool.mdx
new file mode 100644
index 000000000..870b4efae
--- /dev/null
+++ b/fern/squads/examples/clinic-triage-scheduling-handoff-tool.mdx
@@ -0,0 +1,209 @@
+---
+title: Clinic triage with handoff tools
+subtitle: Route patients between triage, emergency, and scheduler assistants using handoff tools for seamless transfers
+slug: squads/examples/clinic-triage-scheduling-handoff-tool
+description: Build a multi-assistant clinic experience with specialized assistants for triage, emergency handling, and scheduling using handoff tools.
+---
+
+## Overview
+
+Build a multi-assistant clinic system using handoff tools for seamless patient routing: a triage assistant assesses symptoms and routes patients to either emergency care or appointment scheduling based on their needs.
+
+**System Capabilities:**
+* Intelligent triage assessment with handoff routing
+* Emergency detection → immediate handoff to emergency assistant
+* Appointment scheduling with provider matching
+* Context-preserving transfers between specialized assistants
+
+**What You'll Build:**
+* Triage assistant with handoff tools for routing decisions
+* Emergency assistant for urgent medical situations
+* Scheduler assistant for appointment management
+
+## 1. Create individual assistants
+
+Create three specialized assistants that work together using handoff tools for seamless patient routing.
+
+### Triage Assistant
+
+The main entry point that assesses patient needs and routes to appropriate care:
+
+
+```json title="Triage Assistant"
+{
+ "name": "Triage",
+ "firstMessage": "Hello, how can I help you today?",
+ "model": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "system",
+ "content": "[System context]\nYou are part of a multi-agent system designed to make agent coordination and execution easy. \nAgents uses two primary abstraction: **Agents** and **Handoffs**. An agent encompasses \ninstructions and tools and can hand off a conversation to another agent when appropriate. \nHandoffs are achieved by calling a handoff function, generally named `handoff_to_`. \nHandoffs between agents are handled seamlessly in the background; do not mention or draw \nattention to these handoffs in your conversation with the user.\n\n\n[Identity] \nYou are a simple and efficient medical triage assistant. Your role is to ask the patient for their name and then determine if they need to be connected to a Scheduler for setting up an appointment or an Emergency line for urgent assistance.\n\n[Style] \n- Use a calm and professional tone. \n- Be concise and clear in communication. \n- Display empathy where needed.\n\n[Response Guidelines] \n- Begin with a polite greeting. \n- Request the patient's full name. \n- Confirm the appointment scheduling or emergency request. \n- Use simple language for clarity.\n\n[Task & Goals] \n1. Greet the user politely. \n2. Ask the user, \"Can I please have your full name?\" \n3. Ask if they want to manage an appointment or if they are experiencing an emergency.\n4. Determine user intent based on their need: \n - If the user expresses the need to schedule an appointment, use handoff_to_Scheduler tool\n - If the user indicates it's an emergency, use handoff_to_Emergency tool\n\n[Error Handling / Fallback] \n- If user input is unclear regarding their intent, ask, \"Could you please specify if you need to schedule an appointment or if this is an emergency?\" \n- If the system experiences an error, apologize and say, \"I am experiencing some technical issues. Please try again or contact support.\""
+ }
+ ],
+ "provider": "openai",
+ "tools": [
+ {
+ "type": "handoff",
+ "function": {
+ "name": "handoff_to_Scheduler",
+ "description": "Use it when user needs help managing their appointment.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "destination": {
+ "type": "string",
+ "enum": [""]
+ },
+ "patientName": {
+ "type": "string",
+ "description": "Full name of the patient"
+ }
+ },
+ "required": ["destination", "patientName"]
+ }
+ },
+ "destinations": [
+ {
+ "type": "assistant",
+ "assistantId": "",
+ "contextEngineeringPlan": {
+ "type": "all"
+ }
+ }
+ ]
+ },
+ {
+ "type": "handoff",
+ "function": {
+ "name": "handoff_to_Emergency",
+ "description": "Use it when user is having an emergency.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "destination": {
+ "type": "string",
+ "enum": [""]
+ },
+ "patientName": {
+ "type": "string",
+ "description": "Full name of the patient"
+ }
+ },
+ "required": ["destination", "patientName"]
+ }
+ },
+ "destinations": [
+ {
+ "type": "assistant",
+ "assistantId": "",
+ "contextEngineeringPlan": {
+ "type": "all"
+ }
+ }
+ ]
+ }
+ ]
+ }
+}
+```
+
+
+### Emergency Assistant
+
+Handles urgent medical situations with immediate care protocols. Uses the `{{patientName}}` variable passed from the triage handoff to provide personalized emergency care:
+
+
+```json title="Emergency Assistant"
+{
+ "name": "Emergency",
+ "firstMessage": "Hello {{patientName}}, I understand this is an emergency. I'm connecting you with our emergency protocol immediately.",
+ "model": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "system",
+ "content": "[Identity]\nYou are an emergency medical protocol assistant for a healthcare clinic. Your role is to handle urgent medical situations with immediate care and clear instructions.\n\n[Patient Information]\n- Patient Name: {{patientName}}\n- Use the patient's name to personalize your responses and maintain continuity from the triage conversation\n\n[Style]\n- Use a calm but urgent tone\n- Be direct and clear\n- Provide immediate actionable guidance\n- Show empathy while maintaining efficiency\n- Address the patient by name when appropriate\n\n[Response Guidelines]\n- Acknowledge the emergency immediately and use the patient's name\n- Gather essential information quickly (location, nature of emergency)\n- Provide immediate safety instructions if applicable\n- Connect to emergency services or on-call medical staff\n- Keep interactions brief but thorough\n\n[Task & Goals]\n1. Acknowledge the emergency situation and greet the patient by name\n2. Quickly assess the severity and type of emergency\n3. Provide immediate safety instructions if needed\n4. Connect the patient to appropriate emergency resources\n5. Document the emergency for follow-up using the patient's name\n\n[Safety Protocols]\n- For life-threatening emergencies: Direct to call 911 immediately\n- For urgent but non-life-threatening: Connect to on-call medical staff\n- Always prioritize patient safety over administrative procedures"
+ }
+ ],
+ "provider": "openai"
+ }
+}
+```
+
+
+### Scheduler Assistant
+
+Manages appointment booking and provider coordination. Uses the `{{patientName}}` variable passed from the triage handoff to provide personalized scheduling service:
+
+
+```json title="Scheduler Assistant"
+{
+ "name": "Scheduler",
+ "firstMessage": "Hello {{patientName}}, I'll help you schedule your appointment. Let me check our available times.",
+ "model": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "system",
+ "content": "[Identity]\nYou are a clinic scheduling assistant. Your role is to help patients book, modify, or cancel appointments with healthcare providers.\n\n[Patient Information]\n- Patient Name: {{patientName}}\n- Use the patient's name throughout the conversation to provide personalized service\n- The patient has already been triaged and is ready for appointment scheduling\n\n[Style]\n- Use a friendly and professional tone\n- Be patient and accommodating\n- Provide clear options and confirmations\n- Show flexibility with scheduling needs\n- Address the patient by name to create a welcoming experience\n\n[Response Guidelines]\n- Greet the patient warmly using their name\n- Understand their scheduling needs (new appointment, reschedule, cancel)\n- Offer available time slots\n- Confirm appointment details using the patient's name\n- Provide preparation instructions if needed\n\n[Task & Goals]\n1. Greet the patient by name and determine the type of appointment needed\n2. Check provider availability\n3. Offer suitable time slots\n4. Collect any additional patient information needed\n5. Confirm appointment details with the patient's name\n6. Send confirmation and preparation instructions\n\n[Scheduling Rules]\n- Patient name is already known ({{patientName}}) - confirm contact information\n- Verify insurance information if required\n- Provide clear appointment details (date, time, provider, location)\n- Offer reminder preferences (call, text, email)\n- Use the patient's name in all confirmations and communications"
+ }
+ ],
+ "provider": "openai",
+ "toolIds": ["BOOKING_TOOL_ID", "PROVIDER_LOOKUP_ID", "CALENDAR_INTEGRATION_ID"]
+ }
+}
+```
+
+
+## 2. Configure handoff tools
+
+The Triage assistant uses handoff tools to route patients to the appropriate specialist:
+
+**Handoff Configuration:**
+- **`handoff_to_Emergency`**: Triggered when patient indicates emergency or urgent symptoms
+- **`handoff_to_Scheduler`**: Used for routine appointment scheduling needs
+- **Context preservation**: Full conversation history transfers with `"type": "all"`
+
+**Key Features:**
+- Seamless background transfers (users don't see handoff mechanics)
+- Context-aware routing based on patient responses
+- Specialized assistant UUIDs for precise routing
+- Automatic conversation continuity
+- **Variable extraction**: Patient name is automatically extracted and passed to receiving assistants
+- **Personalized experience**: Receiving assistants greet patients by name using `{{patientName}}` variable
+
+
+Replace `` and `` with the actual assistant IDs after creating each assistant.
+
+
+## 3. Test
+
+Test the handoff system with different patient scenarios:
+
+**Routine Appointment Scenarios:**
+- "I need to schedule a checkup"
+- "Can I reschedule my appointment?"
+- "I'd like to see Dr. Smith next week"
+
+**Emergency Scenarios:**
+- "I'm having chest pain"
+- "My child has a high fever"
+- "I think I broke my arm"
+
+**Expected Flow:**
+1. Patient calls → Triage assistant answers
+2. Triage collects name and assesses need
+3. Handoff triggers automatically based on patient response
+4. **Patient name is automatically extracted and passed as a variable**
+5. Emergency or Scheduler assistant takes over with full context and patient name
+
+## 4. Next steps
+
+Now that you have a working handoff-based clinic system:
+
+- **Advanced Handoffs**: [Handoff Tools](/tools/handoff)
+- **Custom Tools**: [Scheduling Tools](/tools/custom-tools)
+- **Calendar Integration**: [Google Calendar](/tools/google-calendar)
+- **Emergency Protocols**: [Call Forwarding](/call-forwarding)
+
diff --git a/fern/squads/examples/clinic-triage-scheduling.mdx b/fern/squads/examples/clinic-triage-scheduling.mdx
new file mode 100644
index 000000000..42cff12a6
--- /dev/null
+++ b/fern/squads/examples/clinic-triage-scheduling.mdx
@@ -0,0 +1,181 @@
+---
+title: Clinic triage and scheduling squad
+subtitle: Route patients between triage, emergency, and scheduler assistants with context-preserving transfers
+slug: squads/examples/clinic-triage-scheduling
+description: Build a multi-assistant clinic experience with specialized assistants for triage, emergency handling, and scheduling using Squads.
+---
+
+## Overview
+
+Compose multiple assistants into a Squad for safe, specialized healthcare flows: a triage assistant assesses symptoms, an emergency assistant handles urgent cases, and a scheduler books appointments.
+
+**Squad Capabilities:**
+* Structured triage evaluation and safety gates
+* Emergency detection → immediate handoff
+* Provider matching and scheduling tools
+* Transfers preserve full conversation context
+
+## 1. Define members
+
+
+```json title="Example squad payload"
+{
+ "members": [
+ { "assistant": { "name": "Triage", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Medical triage assistant. Identify red flags."}] }, "firstMessage": "Hello, how can I help you today?", "firstMessageMode": "assistant-speaks-first" } },
+ { "assistant": { "name": "Emergency", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Emergency protocol assistant. Keep interaction brief and connect immediately."}] } } },
+ { "assistant": { "name": "Scheduler", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Clinic scheduler. Offer next available slots, then confirm."}] }, "toolIds": ["BOOK_TOOL_ID", "PROVIDER_LOOKUP_ID"] } }
+ ]
+}
+```
+
+
+## 2. Configure transfers
+
+- From Triage → Emergency when red flags detected
+- From Triage → Scheduler for routine care
+- Warm-transfer with a short summary for human escalation
+
+## 3. Implement
+
+
+
+ ```typescript title="create web call with transient squad"
+ import { VapiClient } from "@vapi-ai/server-sdk";
+
+ const vapi = new VapiClient({ token: process.env.VAPI_API_KEY! });
+
+ await vapi.calls.create({
+ transport: { type: "web" },
+ squad: {
+ members: [
+ {
+ assistant: {
+ name: "Triage",
+ model: {
+ provider: "openai",
+ model: "gpt-4o",
+ messages: [{ role: "system", content: "Medical triage assistant. Identify red flags." }],
+ },
+ firstMessage: "Hello, how can I help you today?",
+ firstMessageMode: "assistant-speaks-first",
+ },
+ },
+ {
+ assistant: {
+ name: "Emergency",
+ model: {
+ provider: "openai",
+ model: "gpt-4o",
+ messages: [{ role: "system", content: "Emergency protocol assistant. Keep interaction brief and connect immediately." }],
+ },
+ },
+ },
+ {
+ assistant: {
+ name: "Scheduler",
+ model: {
+ provider: "openai",
+ model: "gpt-4o",
+ messages: [{ role: "system", content: "Clinic scheduler. Offer next available slots, then confirm." }],
+ },
+ },
+ },
+ ],
+ },
+ });
+ ```
+
+ ```typescript title="create phone call with transient squad"
+ await vapi.calls.create({
+ phoneNumberId: "YOUR_PHONE_NUMBER_ID",
+ customer: { number: "+15551234567" },
+ squad: { /* same squad as above */ members: [] },
+ });
+ ```
+
+ ```typescript title="create and reuse a squad (optional)"
+ const squad = await vapi.squads.create({
+ name: "Clinic Triage",
+ members: [ /* same members as above */ ],
+ });
+
+ await vapi.calls.create({ transport: { type: "web" }, squadId: squad.id });
+ ```
+
+
+
+ ```python title="create web call with transient squad"
+ import os
+ from vapi import Vapi
+
+ client = Vapi(token=os.getenv("VAPI_API_KEY"))
+
+ client.calls.create(
+ transport={"type": "web"},
+ squad={
+ "members": [
+ {"assistant": {"name": "Triage", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Medical triage assistant. Identify red flags."}]}, "first_message": "Hello, how can I help you today?", "first_message_mode": "assistant-speaks-first"}},
+ {"assistant": {"name": "Emergency", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Emergency protocol assistant. Keep interaction brief and connect immediately."}]}}},
+ {"assistant": {"name": "Scheduler", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Clinic scheduler. Offer next available slots, then confirm."}]}}},
+ ]
+ },
+ )
+ ```
+
+ ```python title="create phone call with transient squad"
+ client.calls.create(
+ phone_number_id="YOUR_PHONE_NUMBER_ID",
+ customer={"number": "+15551234567"},
+ squad={"members": []}, # same members as above
+ )
+ ```
+
+ ```python title="create and reuse a squad (optional)"
+ squad = client.squads.create(
+ name="Clinic Triage",
+ members=[ /* same members as above */ ],
+ )
+ client.calls.create(transport={"type": "web"}, squad_id=squad["id"])
+ ```
+
+
+
+ ```bash title="create web call"
+ curl -X POST "https://api.vapi.ai/call/web" \
+ -H "Authorization: Bearer $VAPI_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "squad": {
+ "members": [
+ { "assistant": { "name": "Triage", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Medical triage assistant. Identify red flags."}] }, "firstMessage": "Hello, how can I help you today?", "firstMessageMode": "assistant-speaks-first" } },
+ { "assistant": { "name": "Emergency", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Emergency protocol assistant. Keep interaction brief and connect immediately."}] } } },
+ { "assistant": { "name": "Scheduler", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Clinic scheduler. Offer next available slots, then confirm."}] } } }
+ ]
+ }
+ }'
+ ```
+
+
+
+ ```bash title="create phone call"
+ curl -X POST "https://api.vapi.ai/call" \
+ -H "Authorization: Bearer $VAPI_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "phoneNumberId": "YOUR_PHONE_NUMBER_ID",
+ "customer": { "number": "+15551234567" },
+ "squad": { "members": [] }
+ }'
+ ```
+
+
+
+## 4. Test
+
+Attach a phone number to the Squad (or start with a Squad when creating a call) and test urgent vs routine scenarios.
+
+## Next steps
+
+- **Tools**: [Custom Tools](/tools/custom-tools)
+- **Scheduling**: [Google Calendar](/tools/google-calendar)
+
diff --git a/fern/squads/examples/ecommerce-order-management.mdx b/fern/squads/examples/ecommerce-order-management.mdx
new file mode 100644
index 000000000..eff0f659b
--- /dev/null
+++ b/fern/squads/examples/ecommerce-order-management.mdx
@@ -0,0 +1,103 @@
+---
+title: E‑commerce order management squad
+subtitle: Separate assistants for orders, returns, and VIP concierge with intelligent transfers
+slug: squads/examples/ecommerce-order-management
+description: Build a multi-assistant experience for order tracking, returns processing, and VIP handling using Squads.
+---
+
+## Overview
+
+Use Squads to split responsibilities: an Orders assistant handles tracking/status, a Returns assistant manages eligibility and labels, and a VIP assistant provides white‑glove support.
+
+**Squad Capabilities:**
+* Order lookup and status updates
+* Return eligibility and label creation
+* VIP routing and concierge service
+* Context‑preserving warm transfers
+
+## 1. Define members
+
+
+```json title="Example squad payload"
+{
+ "members": [
+ { "assistant": { "name": "Orders", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Orders specialist. Handle tracking and delivery questions."}] }, "toolIds": ["ORDER_LOOKUP_ID"], "firstMessage": "Hello, how can I help with your order?", "firstMessageMode": "assistant-speaks-first" } },
+ { "assistant": { "name": "Returns", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Returns specialist. Check eligibility and generate labels."}] }, "toolIds": ["RETURNS_TOOL_ID"] } },
+ { "assistant": { "name": "VIP", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "VIP concierge. Prioritize premium customers and coordinate resolutions."}] } } }
+ ]
+}
+```
+
+
+## 2. Configure transfer rules
+
+- Orders → Returns for return requests
+- Any → VIP for high‑value customers or sentiment issues
+- Warm-transfer summary for human agents if needed
+
+## 3. Implement
+
+
+
+ ```typescript
+ import { VapiClient } from "@vapi-ai/server-sdk";
+ const vapi = new VapiClient({ token: process.env.VAPI_API_KEY! });
+
+ await vapi.calls.create({
+ transport: { type: "web" },
+ squad: {
+ members: [
+ { assistant: { name: "Orders", model: { provider: "openai", model: "gpt-4o", messages: [{ role: "system", content: "Orders specialist. Handle tracking and delivery questions." }] }, firstMessage: "Hello, how can I help with your order?", firstMessageMode: "assistant-speaks-first" } },
+ { assistant: { name: "Returns", model: { provider: "openai", model: "gpt-4o", messages: [{ role: "system", content: "Returns specialist. Check eligibility and generate labels." }] } } },
+ { assistant: { name: "VIP", model: { provider: "openai", model: "gpt-4o", messages: [{ role: "system", content: "VIP concierge. Prioritize premium customers and coordinate resolutions." }] } } }
+ ],
+ },
+ });
+ ```
+
+
+
+ ```python
+ import os
+ from vapi import Vapi
+
+ client = Vapi(token=os.getenv("VAPI_API_KEY"))
+ client.calls.create(
+ transport={"type": "web"},
+ squad={
+ "members": [
+ {"assistant": {"name": "Orders", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Orders specialist. Handle tracking and delivery questions."}]}, "first_message": "Hello, how can I help with your order?", "first_message_mode": "assistant-speaks-first"}},
+ {"assistant": {"name": "Returns", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Returns specialist. Check eligibility and generate labels."}]}}},
+ {"assistant": {"name": "VIP", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "VIP concierge. Prioritize premium customers and coordinate resolutions."}]}}},
+ ]
+ },
+ )
+ ```
+
+
+
+ ```bash
+ curl -X POST "https://api.vapi.ai/call/web" \
+ -H "Authorization: Bearer $VAPI_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "squad": {
+ "members": [
+ { "assistant": { "name": "Orders", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Orders specialist. Handle tracking and delivery questions."}] }, "firstMessage": "Hello, how can I help with your order?", "firstMessageMode": "assistant-speaks-first" } },
+ { "assistant": { "name": "Returns", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Returns specialist. Check eligibility and generate labels."}] } } },
+ { "assistant": { "name": "VIP", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "VIP concierge. Prioritize premium customers and coordinate resolutions."}] } } }
+ ]
+ }
+ }'
+ ```
+
+
+
+## 4. Test and validate
+
+Attach a phone number to the Squad and simulate order, return, and VIP scenarios.
+
+## Next steps
+
+- **Custom Tools**: [Build tools](/tools/custom-tools)
+
diff --git a/fern/squads/examples/multilingual-support.mdx b/fern/squads/examples/multilingual-support.mdx
new file mode 100644
index 000000000..f10a670d9
--- /dev/null
+++ b/fern/squads/examples/multilingual-support.mdx
@@ -0,0 +1,100 @@
+---
+title: Multilingual support squad
+subtitle: Use language‑specific assistants with selection and seamless context handoff
+slug: squads/examples/multilingual-support
+description: Build a Squad with dedicated English, Spanish, and French assistants and a language selection entrance flow.
+---
+
+## Overview
+
+Provide structured multilingual support using a Squad: present a short language selection, then route to dedicated EN/ES/FR assistants with tuned prompts and voices.
+
+**Squad Capabilities:**
+* Explicit language choice for clarity
+* Language‑specific prompts and voices
+* Seamless handoffs while preserving context
+
+## 1. Define members
+
+
+```json title="Example squad payload"
+{
+ "members": [
+ { "assistant": { "name": "English Support", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "English support. Direct, friendly, professional."}] }, "voice": {"provider": "azure", "voiceId": "en-US-AriaNeural"}, "firstMessage": "Hello! How can I help you today?", "firstMessageMode": "assistant-speaks-first" } },
+ { "assistant": { "name": "Soporte Español", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Soporte en español. Cálido y respetuoso; usa 'usted' inicialmente."}] }, "voice": {"provider": "azure", "voiceId": "es-ES-ElviraNeural"} } },
+ { "assistant": { "name": "Support Français", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Support français. Poli, courtois et formel."}] }, "voice": {"provider": "azure", "voiceId": "fr-FR-DeniseNeural"} } }
+ ]
+}
+```
+
+
+## 2. Entrance flow
+
+Start with a brief selection (EN/ES/FR). Route to the matching assistant. Optionally auto‑detect and confirm.
+
+## 3. Implement
+
+
+
+ ```typescript
+ import { VapiClient } from "@vapi-ai/server-sdk";
+ const vapi = new VapiClient({ token: process.env.VAPI_API_KEY! });
+
+ await vapi.calls.create({
+ transport: { type: "web" },
+ squad: {
+ members: [
+ { assistant: { name: "English Support", model: { provider: "openai", model: "gpt-4o", messages: [{ role: "system", content: "English support. Direct, friendly, professional." }] }, voice: { provider: "azure", voiceId: "en-US-AriaNeural" }, firstMessage: "Hello! How can I help you today?", firstMessageMode: "assistant-speaks-first" } },
+ { assistant: { name: "Soporte Español", model: { provider: "openai", model: "gpt-4o", messages: [{ role: "system", content: "Soporte en español. Cálido y respetuoso; usa 'usted' inicialmente." }] }, voice: { provider: "azure", voiceId: "es-ES-ElviraNeural" } } },
+ { assistant: { name: "Support Français", model: { provider: "openai", model: "gpt-4o", messages: [{ role: "system", content: "Support français. Poli, courtois et formel." }] }, voice: { provider: "azure", voiceId: "fr-FR-DeniseNeural" } } }
+ ],
+ },
+ });
+ ```
+
+
+
+ ```python
+ import os
+ from vapi import Vapi
+
+ client = Vapi(token=os.getenv("VAPI_API_KEY"))
+ client.calls.create(
+ transport={"type": "web"},
+ squad={
+ "members": [
+ {"assistant": {"name": "English Support", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "English support. Direct, friendly, professional."}]}, "voice": {"provider": "azure", "voiceId": "en-US-AriaNeural"}, "first_message": "Hello! How can I help you today?", "first_message_mode": "assistant-speaks-first"}},
+ {"assistant": {"name": "Soporte Español", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Soporte en español. Cálido y respetuoso; usa 'usted' inicialmente."}]}, "voice": {"provider": "azure", "voiceId": "es-ES-ElviraNeural"}}},
+ {"assistant": {"name": "Support Français", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Support français. Poli, courtois et formel."}]}, "voice": {"provider": "azure", "voiceId": "fr-FR-DeniseNeural"}}},
+ ]
+ },
+ )
+ ```
+
+
+
+ ```bash
+ curl -X POST "https://api.vapi.ai/call/web" \
+ -H "Authorization: Bearer $VAPI_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "squad": {
+ "members": [
+ { "assistant": { "name": "English Support", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "English support. Direct, friendly, professional."}] }, "voice": {"provider": "azure", "voiceId": "en-US-AriaNeural"}, "firstMessage": "Hello! How can I help you today?", "firstMessageMode": "assistant-speaks-first" } },
+ { "assistant": { "name": "Soporte Español", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Soporte en español. Cálido y respetuoso; usa 'usted' inicialmente."}] }, "voice": {"provider": "azure", "voiceId": "es-ES-ElviraNeural" } } },
+ { "assistant": { "name": "Support Français", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Support français. Poli, courtois et formel."}] }, "voice": {"provider": "azure", "voiceId": "fr-FR-DeniseNeural" } } }
+ ]
+ }
+ }'
+ ```
+
+
+
+## 4. Test
+
+Create a phone number for the Squad and test each language path.
+
+## Next steps
+
+- **Assistant alternative**: [Multilingual agent](/assistants/examples/multilingual-agent)
+
diff --git a/fern/squads/examples/property-management.mdx b/fern/squads/examples/property-management.mdx
new file mode 100644
index 000000000..3e6b93e30
--- /dev/null
+++ b/fern/squads/examples/property-management.mdx
@@ -0,0 +1,102 @@
+---
+title: Property management routing squad
+subtitle: Route maintenance vs leasing with a router assistant and domain specialists
+slug: squads/examples/property-management
+description: Build a property management Squad with a router assistant plus maintenance and leasing specialists for accurate transfers.
+---
+
+## Overview
+
+Replace visual flows with a Squad: a Router assistant classifies the inquiry and transfers to Maintenance or Leasing specialists. Use a dynamic transfer tool for human escalation.
+
+**Squad Capabilities:**
+* Tenant verification and inquiry classification
+* Maintenance vs leasing specialist assistants
+* Human transfer with warm summary when needed
+
+## 1. Define members
+
+
+```json title="Example squad payload"
+{
+ "members": [
+ { "assistant": { "name": "Router", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Classify tenant inquiries: emergency, maintenance, leasing, rent, general. Transfer accordingly."}] }, "toolIds": ["TENANT_LOOKUP_ID"], "firstMessage": "Thanks for calling. How can I help?", "firstMessageMode": "assistant-speaks-first" } },
+ { "assistant": { "name": "Maintenance", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Maintenance specialist. Collect details and prioritize emergencies."}] } } },
+ { "assistant": { "name": "Leasing", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Leasing specialist. Answer leasing and rent questions."}] } } }
+ ]
+}
+```
+
+
+## 2. Transfers and escalation
+
+- Router → Maintenance for emergency/maintenance
+- Router → Leasing for leasing/rent/general
+- Dynamic transfer tool for human dispatch
+
+## 3. Implement
+
+
+
+ ```typescript
+ import { VapiClient } from "@vapi-ai/server-sdk";
+ const vapi = new VapiClient({ token: process.env.VAPI_API_KEY! });
+
+ await vapi.calls.create({
+ transport: { type: "web" },
+ squad: {
+ members: [
+ { assistant: { name: "Router", model: { provider: "openai", model: "gpt-4o", messages: [{ role: "system", content: "Classify tenant inquiries: emergency, maintenance, leasing, rent, general. Transfer accordingly." }] }, firstMessage: "Thanks for calling. How can I help?", firstMessageMode: "assistant-speaks-first" } },
+ { assistant: { name: "Maintenance", model: { provider: "openai", model: "gpt-4o", messages: [{ role: "system", content: "Maintenance specialist. Collect details and prioritize emergencies." }] } } },
+ { assistant: { name: "Leasing", model: { provider: "openai", model: "gpt-4o", messages: [{ role: "system", content: "Leasing specialist. Answer leasing and rent questions." }] } } }
+ ],
+ },
+ });
+ ```
+
+
+
+ ```python
+ import os
+ from vapi import Vapi
+
+ client = Vapi(token=os.getenv("VAPI_API_KEY"))
+ client.calls.create(
+ transport={"type": "web"},
+ squad={
+ "members": [
+ {"assistant": {"name": "Router", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Classify tenant inquiries: emergency, maintenance, leasing, rent, general. Transfer accordingly."}]}, "first_message": "Thanks for calling. How can I help?", "first_message_mode": "assistant-speaks-first"}},
+ {"assistant": {"name": "Maintenance", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Maintenance specialist. Collect details and prioritize emergencies."}]}}},
+ {"assistant": {"name": "Leasing", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Leasing specialist. Answer leasing and rent questions."}]}}},
+ ]
+ },
+ )
+ ```
+
+
+
+ ```bash
+ curl -X POST "https://api.vapi.ai/call/web" \
+ -H "Authorization: Bearer $VAPI_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "squad": {
+ "members": [
+ { "assistant": { "name": "Router", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Classify tenant inquiries: emergency, maintenance, leasing, rent, general. Transfer accordingly."}] }, "firstMessage": "Thanks for calling. How can I help?", "firstMessageMode": "assistant-speaks-first" } },
+ { "assistant": { "name": "Maintenance", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Maintenance specialist. Collect details and prioritize emergencies."}] } } },
+ { "assistant": { "name": "Leasing", "model": {"provider": "openai", "model": "gpt-4o", "messages": [{"role": "system", "content": "Leasing specialist. Answer leasing and rent questions."}] } } }
+ ]
+ }
+ }'
+ ```
+
+
+
+## 4. Test
+
+Assign the Squad to a phone number and test each path.
+
+## Next steps
+
+- **Dynamic transfers**: [How to build](/calls/call-dynamic-transfers)
+
diff --git a/fern/squads/handoff.mdx b/fern/squads/handoff.mdx
new file mode 100644
index 000000000..fcc763470
--- /dev/null
+++ b/fern/squads/handoff.mdx
@@ -0,0 +1,879 @@
+---
+title: Handoff tool
+subtitle: 'Transfer the call to another assistant, squad, or dynamically determined destination.'
+slug: squads/handoff
+---
+
+The handoff tool enables seamless call transfers between assistants in a multi-agent system. This guide covers all configuration patterns, destination types, context management, and advanced features.
+
+## Table of contents
+
+- [Overview](#overview)
+- [System prompt best practices](#system-prompt-best-practices)
+- [Basic configuration](#basic-configuration)
+- [Multiple destinations](#multiple-destinations)
+- [Dynamic handoffs](#dynamic-handoffs)
+- [Squad destinations](#squad-destinations)
+- [Context engineering](#context-engineering)
+- [Variable extraction](#variable-extraction)
+- [Tool messages](#tool-messages)
+- [Rejection plan](#rejection-plan)
+- [Custom function definitions](#custom-function-definitions)
+
+## Overview
+
+The handoff tool transfers calls between assistants during a conversation. You can:
+
+- Transfer to a specific assistant by ID or by name (within a squad)
+- Transfer to an entire squad with a designated entry assistant
+- Support multiple destination options for the AI to choose from
+- Determine the destination dynamically at runtime via a webhook
+- Control what conversation history the next assistant receives
+- Extract structured variables from the conversation for downstream use
+- Configure spoken messages for each phase of the handoff
+- Reject handoff attempts based on conversation state
+
+## System prompt best practices
+
+When using the handoff tool, add this to your system prompt for optimal agent coordination (adapted from the [OpenAI Agents Handoff Prompt](https://openai.github.io/openai-agents-python/ref/extensions/handoff_prompt/)):
+
+```markdown
+# System context
+
+You are part of a multi-agent system designed to make agent coordination and execution easy.
+Agents uses two primary abstraction: **Agents** and **Handoffs**. An agent encompasses
+instructions and tools and can hand off a conversation to another agent when appropriate.
+Handoffs are achieved by calling a handoff function, generally named `handoff_to_`.
+Handoffs between agents are handled seamlessly in the background; do not mention or draw
+attention to these handoffs in your conversation with the user.
+
+# Agent context
+
+{put your agent system prompt here}
+```
+
+## Basic configuration
+
+### Single destination handoff
+
+#### Using assistant ID
+
+```json
+{
+ "tools": [
+ {
+ "type": "handoff",
+ "destinations": [
+ {
+ "type": "assistant",
+ "assistantId": "03e11cfe-4528-4243-a43d-6aded66ab7ba",
+ "description": "customer wants to speak with technical support",
+ "contextEngineeringPlan": {
+ "type": "all"
+ }
+ }
+ ]
+ }
+ ]
+}
+```
+
+#### Using assistant name (for squad members)
+
+```json
+{
+ "tools": [
+ {
+ "type": "handoff",
+ "destinations": [
+ {
+ "type": "assistant",
+ "assistantName": "TechnicalSupportAgent",
+ "description": "customer needs technical assistance",
+ "contextEngineeringPlan": {
+ "type": "all"
+ }
+ }
+ ]
+ }
+ ]
+}
+```
+
+
+Each assistant destination also supports **assistantOverrides** to override settings on the destination assistant, and an inline **assistant** property to create a transient assistant without saving it first. See the [API reference](/api-reference/tools/create#request.body.HandoffTool.destinations.HandoffDestinationAssistant) for all available properties.
+
+
+## Multiple destinations
+
+### Multiple tools pattern (OpenAI recommended)
+
+Best for OpenAI models -- creates separate tool definitions for each destination:
+
+```json
+{
+ "tools": [
+ {
+ "type": "handoff",
+ "destinations": [
+ {
+ "type": "assistant",
+ "assistantId": "sales-assistant-123",
+ "description": "customer wants to learn about pricing or make a purchase",
+ "contextEngineeringPlan": {
+ "type": "all"
+ }
+ }
+ ]
+ },
+ {
+ "type": "handoff",
+ "destinations": [
+ {
+ "type": "assistant",
+ "assistantId": "support-assistant-456",
+ "description": "customer needs help with an existing product or service",
+ "contextEngineeringPlan": {
+ "type": "all"
+ }
+ }
+ ]
+ },
+ {
+ "type": "handoff",
+ "destinations": [
+ {
+ "type": "assistant",
+ "assistantId": "billing-assistant-789",
+ "description": "customer has questions about invoices, payments, or refunds",
+ "contextEngineeringPlan": {
+ "type": "lastNMessages",
+ "maxMessages": 5
+ }
+ }
+ ]
+ }
+ ]
+}
+```
+
+### Single tool pattern (Anthropic recommended)
+
+Best for Anthropic models -- single tool with multiple destination options:
+
+```json
+{
+ "tools": [
+ {
+ "type": "handoff",
+ "destinations": [
+ {
+ "type": "assistant",
+ "assistantId": "03e11cfe-4528-4243-a43d-6aded66ab7ba",
+ "description": "customer wants to learn about pricing or make a purchase"
+ },
+ {
+ "type": "assistant",
+ "assistantName": "support-assistant",
+ "description": "customer needs help with an existing product or service"
+ },
+ {
+ "type": "assistant",
+ "assistantName": "billing-assistant",
+ "description": "customer has questions about invoices, payments, or refunds"
+ }
+ ]
+ }
+ ]
+}
+```
+
+## Dynamic handoffs
+
+### Basic dynamic handoff
+
+The destination is determined at runtime via the `handoff-destination-request` webhook:
+
+```json
+{
+ "tools": [
+ {
+ "type": "handoff",
+ "destinations": [
+ {
+ "type": "dynamic",
+ "server": {
+ "url": "https://api.example.com/determine-handoff-destination",
+ "headers": {
+ "Authorization": "Bearer YOUR_API_KEY"
+ }
+ }
+ }
+ ]
+ }
+ ]
+}
+```
+
+Your server must respond with a single destination. You can return an `assistantId`, `assistantName` (if using squads), or a transient `assistant`. For example:
+
+```json
+{
+ "destination": {
+ "type": "assistant",
+ "assistantId": "assistant-id",
+ "variableExtractionPlan": {
+ "schema": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "Name of the customer"
+ }
+ },
+ "required": ["name"]
+ }
+ },
+ "contextEngineeringPlan": {
+ "type": "none"
+ }
+ }
+}
+```
+
+If the handoff should not execute, either respond with an empty destination, or provide a custom error. The custom error is added to the message history.
+
+```json
+{
+ "error": "Example custom error message"
+}
+```
+
+### Dynamic handoff with custom parameters
+
+Pass additional context to your webhook for intelligent routing:
+
+```json
+{
+ "tools": [
+ {
+ "type": "handoff",
+ "destinations": [
+ {
+ "type": "dynamic",
+ "server": {
+ "url": "https://api.example.com/intelligent-routing"
+ }
+ }
+ ],
+ "function": {
+ "name": "handoff_with_context",
+ "description": "Transfer the call to the most appropriate specialist",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "destination": {
+ "type": "string",
+ "description": "Use 'dynamic' to route to the best available agent",
+ "enum": ["dynamic"]
+ },
+ "customerAreaCode": {
+ "type": "number",
+ "description": "Customer's area code for regional routing"
+ },
+ "customerIntent": {
+ "type": "string",
+ "enum": ["new-customer", "existing-customer", "partner"],
+ "description": "Customer type for proper routing"
+ },
+ "customerSentiment": {
+ "type": "string",
+ "enum": ["positive", "negative", "neutral", "escalated"],
+ "description": "Current emotional state of the customer"
+ },
+ "issueCategory": {
+ "type": "string",
+ "enum": ["technical", "billing", "sales", "general"],
+ "description": "Primary category of the customer's issue"
+ },
+ "priority": {
+ "type": "string",
+ "enum": ["low", "medium", "high", "urgent"],
+ "description": "Urgency level of the request"
+ }
+ },
+ "required": ["destination", "customerIntent", "issueCategory"]
+ }
+ }
+ }
+ ]
+}
+```
+
+## Squad destinations
+
+In addition to assistant and dynamic destinations, you can hand off a call to an entire squad. This transfers the caller into a new multi-agent system where the squad's own routing logic takes over.
+
+### Using squad ID
+
+Reference a saved squad by its ID:
+
+```json
+{
+ "tools": [
+ {
+ "type": "handoff",
+ "destinations": [
+ {
+ "type": "squad",
+ "squadId": "your-squad-id",
+ "description": "customer needs specialized support from the enterprise team",
+ "entryAssistantName": "EnterpriseGreeter",
+ "contextEngineeringPlan": {
+ "type": "userAndAssistantMessages"
+ }
+ }
+ ]
+ }
+ ]
+}
+```
+
+### Using a transient squad
+
+Define the squad inline without saving it first:
+
+```json
+{
+ "tools": [
+ {
+ "type": "handoff",
+ "destinations": [
+ {
+ "type": "squad",
+ "squad": {
+ "members": [
+ {
+ "assistantId": "greeter-assistant-id",
+ "assistantDestinations": [
+ {
+ "type": "assistant",
+ "assistantName": "SalesSpecialist",
+ "description": "customer is interested in purchasing"
+ }
+ ]
+ },
+ {
+ "assistantId": "sales-assistant-id"
+ }
+ ]
+ },
+ "entryAssistantName": "GreeterAssistant",
+ "description": "route customer to the sales squad"
+ }
+ ]
+ }
+ ]
+}
+```
+
+### Squad destination properties
+
+| Property | Type | Description |
+|----------|------|-------------|
+| **type** | `"squad"` | Required. Identifies this as a squad destination. |
+| **squadId** | string | The ID of a saved squad. Provide either `squadId` or `squad`. |
+| **squad** | object | A transient squad definition. Provide either `squadId` or `squad`. |
+| **entryAssistantName** | string | The name of the assistant to start with. If not provided, the first squad member is used. |
+| **description** | string | Describes when the AI should choose this destination. |
+| **contextEngineeringPlan** | object | Controls what conversation history transfers to the squad. |
+| **variableExtractionPlan** | object | Extracts structured data from the conversation before handoff. |
+| **squadOverrides** | object | Overrides applied to the squad configuration (maps to squad-level `membersOverrides`). |
+
+For the full schema, see the [API reference](/api-reference/tools/create#request.body.HandoffTool.destinations.HandoffDestinationSquad).
+
+## Context engineering
+
+Control what conversation history transfers to the next assistant or squad. Set **contextEngineeringPlan** on any destination.
+
+### All messages (default)
+
+Transfers the entire conversation history:
+
+```json
+{
+ "contextEngineeringPlan": {
+ "type": "all"
+ }
+}
+```
+
+### Last N messages
+
+Transfers only the most recent N messages. Use this to limit context size for performance:
+
+```json
+{
+ "contextEngineeringPlan": {
+ "type": "lastNMessages",
+ "maxMessages": 10
+ }
+}
+```
+
+### User and assistant messages only
+
+Transfers only user and assistant messages, filtering out system messages, tool calls, and tool results. This gives the next assistant a clean view of the conversation without internal implementation details:
+
+```json
+{
+ "contextEngineeringPlan": {
+ "type": "userAndAssistantMessages"
+ }
+}
+```
+
+
+Use `userAndAssistantMessages` when the destination assistant does not need to see tool call history or system prompts from the previous assistant. This produces a cleaner context and reduces token usage.
+
+
+### No context
+
+Starts the next assistant with a blank conversation:
+
+```json
+{
+ "contextEngineeringPlan": {
+ "type": "none"
+ }
+}
+```
+
+## Variable extraction
+
+Extract and pass structured data during handoff. Variables extracted by the handoff tool are available to all subsequent assistants in the conversation chain. When a handoff extracts a variable with the same name as an existing one, the new value replaces the previous value.
+
+### Extraction via `variableExtractionPlan` in destinations
+
+This extraction method makes an OpenAI structured output request to extract variables. Use this when you have multiple destinations, each with different variables that need to be extracted.
+
+```json
+{
+ "tools": [
+ {
+ "type": "handoff",
+ "destinations": [
+ {
+ "type": "assistant",
+ "assistantName": "order-processing-assistant",
+ "description": "customer is ready to place an order",
+ "variableExtractionPlan": {
+ "schema": {
+ "type": "object",
+ "properties": {
+ "customerName": {
+ "type": "string",
+ "description": "Full name of the customer"
+ },
+ "email": {
+ "type": "string",
+ "format": "email",
+ "description": "Customer's email address"
+ },
+ "productIds": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "description": "List of product IDs customer wants to order"
+ },
+ "shippingAddress": {
+ "type": "object",
+ "properties": {
+ "street": { "type": "string" },
+ "city": { "type": "string" },
+ "state": { "type": "string" },
+ "zipCode": { "type": "string" }
+ }
+ }
+ },
+ "required": ["customerName", "productIds"]
+ }
+ }
+ }
+ ]
+ }
+ ]
+}
+```
+
+### Variable access patterns
+
+Once extracted, variables are accessible using Liquid template syntax (`{{variableName}}`). The access pattern depends on the schema structure:
+
+| Schema type | Access pattern | Example |
+|-------------|---------------|---------|
+| Simple property | `{{propertyName}}` | `{{customerName}}` |
+| Nested object | `{{object.property}}` | `{{name.first}}`, `{{name.last}}` |
+| Array item | `{{array[index]}}` | `{{zipCodes[0]}}`, `{{zipCodes[1]}}` |
+| Array of objects | `{{array[index].property}}` | `{{people[0].name}}`, `{{people[0].age}}` |
+| Nested array | `{{array[index].nestedArray[index]}}` | `{{people[0].zipCodes[1]}}` |
+
+
+Top-level object properties are extracted as direct global variables. For example, a schema with properties `name` and `age` produces `{{name}}` and `{{age}}` -- not `{{root.name}}`.
+
+
+### Variable aliases
+
+Use **aliases** to create additional variables derived from extracted values. Aliases support Liquid template syntax for transformations and compositions.
+
+```json
+{
+ "variableExtractionPlan": {
+ "schema": {
+ "type": "object",
+ "properties": {
+ "firstName": {
+ "type": "string",
+ "description": "Customer's first name"
+ },
+ "lastName": {
+ "type": "string",
+ "description": "Customer's last name"
+ },
+ "company": {
+ "type": "string",
+ "description": "Customer's company name"
+ }
+ }
+ },
+ "aliases": [
+ {
+ "key": "fullName",
+ "value": "{{firstName}} {{lastName}}"
+ },
+ {
+ "key": "greeting",
+ "value": "Hello {{firstName}}, welcome to {{company}}!"
+ },
+ {
+ "key": "customerCity",
+ "value": "{{addresses[0].city}}"
+ }
+ ]
+ }
+}
+```
+
+Each alias creates a new variable accessible as `{{key}}` during the call and stored in `call.artifact.variableValues` after the call. Alias keys must start with a letter and contain only letters, numbers, or underscores (max 40 characters).
+
+### Extraction via `tool.function`
+
+You can also extract variables through the LLM tool call parameters (in addition to sending these parameters to your server in a `handoff-destination-request` for dynamic handoffs). Include the **destination** parameter with the assistant names or IDs in `enum` -- Vapi uses this to determine where to hand off the call. The `destination` parameter itself is not extracted as a variable. Add `destination` and all other required variables to the schema's `required` array.
+
+```json
+{
+ "tools": [
+ {
+ "type": "handoff",
+ "destinations": [
+ {
+ "type": "assistant",
+ "assistantName": "order-processing-assistant",
+ "description": "customer is ready to place an order"
+ }
+ ],
+ "function": {
+ "name": "handoff_to_order_processing_assistant",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "destination": {
+ "type": "string",
+ "description": "The destination to handoff the call to.",
+ "enum": ["order-processing-assistant"]
+ },
+ "customerName": {
+ "type": "string",
+ "description": "Full name of the customer"
+ },
+ "email": {
+ "type": "string",
+ "format": "email",
+ "description": "Customer's email address"
+ },
+ "productIds": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "description": "List of product IDs customer wants to order"
+ },
+ "shippingAddress": {
+ "type": "object",
+ "properties": {
+ "street": { "type": "string" },
+ "city": { "type": "string" },
+ "state": { "type": "string" },
+ "zipCode": { "type": "string" }
+ }
+ }
+ },
+ "required": ["destination", "customerName", "email"]
+ }
+ }
+ }
+ ]
+}
+```
+
+## Tool messages
+
+Configure what the assistant says during each phase of the handoff. Add a **messages** array to the handoff tool to control the spoken responses.
+
+### Message types
+
+| Type | Trigger | Default behavior |
+|------|---------|-----------------|
+| `request-start` | Handoff begins executing | Says a random filler: "Hold on a sec", "One moment", etc. |
+| `request-complete` | Handoff completes successfully | Model generates a response |
+| `request-failed` | Handoff fails | Model generates a response |
+| `request-response-delayed` | Server is slow or user speaks during processing | Says "Sorry, a few more seconds." |
+
+### Example configuration
+
+```json
+{
+ "tools": [
+ {
+ "type": "handoff",
+ "messages": [
+ {
+ "type": "request-start",
+ "content": "Let me transfer you now. One moment please."
+ },
+ {
+ "type": "request-complete",
+ "content": "You're now connected. How can the next specialist help you?"
+ },
+ {
+ "type": "request-failed",
+ "content": "I'm sorry, I wasn't able to complete the transfer. Let me try to help you directly."
+ },
+ {
+ "type": "request-response-delayed",
+ "content": "Still working on the transfer, thank you for your patience.",
+ "timingMilliseconds": 3000
+ }
+ ],
+ "destinations": [
+ {
+ "type": "assistant",
+ "assistantId": "your-assistant-id",
+ "description": "transfer to specialist"
+ }
+ ]
+ }
+ ]
+}
+```
+
+### Message properties
+
+**request-start**
+- **content** (string) -- The text the assistant speaks when the handoff begins.
+- **blocking** (boolean, default: `false`) -- When `true`, the tool call waits until the message finishes speaking before executing.
+- **conditions** (array) -- Optional conditions that must match for this message to trigger.
+- **contents** (array) -- Multilingual variants of the content. Overrides `content` when provided.
+
+**request-complete**
+- **content** (string) -- The text the assistant speaks when the handoff completes.
+- **role** (`"assistant"` | `"system"`, default: `"assistant"`) -- When `"assistant"`, the content is spoken aloud. When `"system"`, the content is passed as a system message hint to the model.
+- **endCallAfterSpokenEnabled** (boolean, default: `false`) -- When `true`, the call ends after this message is spoken.
+- **conditions** (array) -- Optional conditions for triggering this message.
+- **contents** (array) -- Multilingual variants.
+
+**request-failed**
+- **content** (string) -- The text the assistant speaks when the handoff fails.
+- **endCallAfterSpokenEnabled** (boolean, default: `false`) -- When `true`, the call ends after this message.
+- **conditions** (array) -- Optional conditions for triggering.
+- **contents** (array) -- Multilingual variants.
+
+**request-response-delayed**
+- **content** (string) -- The text the assistant speaks when the handoff is taking longer than expected.
+- **timingMilliseconds** (number, 100-120000) -- Milliseconds to wait before triggering this message.
+- **conditions** (array) -- Optional conditions for triggering.
+- **contents** (array) -- Multilingual variants.
+
+For the full schema, see the [API reference](/api-reference/tools/create#request.body.HandoffTool.messages).
+
+## Rejection plan
+
+Use **rejectionPlan** to prevent a handoff from executing based on conversation state. When all conditions in the plan match, the tool call is rejected and the rejection message is added to the conversation.
+
+### Regex condition
+
+Match against message content using regular expressions:
+
+```json
+{
+ "tools": [
+ {
+ "type": "handoff",
+ "rejectionPlan": {
+ "conditions": [
+ {
+ "type": "regex",
+ "regex": "(?i)\\b(cancel|stop|nevermind)\\b",
+ "target": {
+ "role": "user",
+ "position": -1
+ }
+ }
+ ]
+ },
+ "destinations": [
+ {
+ "type": "assistant",
+ "assistantId": "your-assistant-id",
+ "description": "transfer to billing"
+ }
+ ]
+ }
+ ]
+}
+```
+
+This rejects the handoff if the user's most recent message contains "cancel", "stop", or "nevermind" (case-insensitive).
+
+### Liquid condition
+
+Use Liquid templates for more complex logic. The template must return exactly `"true"` or `"false"`:
+
+```json
+{
+ "rejectionPlan": {
+ "conditions": [
+ {
+ "type": "liquid",
+ "liquid": "{% assign userMsgs = messages | where: 'role', 'user' %}{% if userMsgs.size < 3 %}true{% else %}false{% endif %}"
+ }
+ ]
+ }
+}
+```
+
+This rejects the handoff if fewer than 3 user messages exist in the conversation. Available Liquid variables include `messages` (array of recent messages), `now` (current timestamp), and any assistant variable values.
+
+### Group condition
+
+Combine multiple conditions with `AND` or `OR` logic:
+
+```json
+{
+ "rejectionPlan": {
+ "conditions": [
+ {
+ "type": "group",
+ "operator": "OR",
+ "conditions": [
+ {
+ "type": "regex",
+ "regex": "(?i)\\b(cancel|stop)\\b",
+ "target": { "role": "user" }
+ },
+ {
+ "type": "liquid",
+ "liquid": "{% assign userMsgs = messages | where: 'role', 'user' %}{% if userMsgs.size < 2 %}true{% else %}false{% endif %}"
+ }
+ ]
+ }
+ ]
+ }
+}
+```
+
+
+By default, all top-level conditions in the `conditions` array use AND logic -- all must match for the rejection to trigger. Use a group condition with `operator: "OR"` to reject when any single condition matches.
+
+
+For the full schema, see the [API reference](/api-reference/tools/create#request.body.HandoffTool.rejectionPlan).
+
+## Custom function definitions
+
+Override the default function definition for more control. You can overwrite the function name for each tool to reference in the system prompt, or pass custom parameters in a dynamic handoff request.
+
+```json
+{
+ "tools": [
+ {
+ "type": "handoff",
+ "function": {
+ "name": "handoff_to_department",
+ "description": "Transfer the customer to the appropriate department based on their needs. Only use when explicitly requested or when the current assistant cannot help.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "destination": {
+ "type": "string",
+ "description": "Department to transfer to",
+ "enum": ["sales-team", "technical-support", "billing-department", "management"]
+ },
+ "reason": {
+ "type": "string",
+ "description": "Brief reason for the transfer"
+ },
+ "urgency": {
+ "type": "boolean",
+ "description": "Whether this is an urgent transfer"
+ }
+ },
+ "required": ["destination", "reason"]
+ }
+ },
+ "destinations": [
+ {
+ "type": "assistant",
+ "assistantId": "sales-team",
+ "description": "Sales inquiries and purchases"
+ },
+ {
+ "type": "assistant",
+ "assistantId": "technical-support",
+ "description": "Technical issues and support"
+ },
+ {
+ "type": "assistant",
+ "assistantId": "billing-department",
+ "description": "Billing and payment issues"
+ },
+ {
+ "type": "assistant",
+ "assistantId": "management",
+ "description": "Escalations and complaints"
+ }
+ ]
+ }
+ ]
+}
+```
+
+## Best practices
+
+1. **Clear descriptions**: Write specific, actionable descriptions for each destination in your system prompt. Use `tool.function.name` to customize the name of the function to reference in your prompt.
+2. **Context management**: Use `lastNMessages` or `userAndAssistantMessages` to limit context size for performance.
+3. **Model optimization**: Use multiple tools for OpenAI, single tool for Anthropic.
+4. **Variable extraction**: Extract key data before handoff to maintain context across assistants.
+5. **Tool messages**: Add custom `request-start` messages to set caller expectations during transfers.
+6. **Testing**: Test handoff scenarios thoroughly, including edge cases and rejection conditions.
+7. **Monitoring and analysis**: Enable [`artifactPlan.fullMessageHistoryEnabled`](/api-reference/assistants/create#response.body.artifactPlan.fullMessageHistoryEnabled) to capture the complete message history across all handoffs in your artifacts. See [squad artifact behavior](/assistants/call-recording#squad-and-transfer-behavior) for details.
+
+## Troubleshooting
+
+- Ensure assistant IDs are valid and accessible
+- Verify webhook server URLs are reachable and return the proper format
+- Check that required parameters in custom functions match destinations
+- Monitor context size to avoid token limits
+- Test variable extraction schemas with sample data
+- Validate that assistant names exist in the same squad
+- Verify rejection plan conditions use correct regex syntax (remember to double-escape `\\` in JSON)
diff --git a/fern/squads/silent-transfers.mdx b/fern/squads/silent-handoffs.mdx
similarity index 77%
rename from fern/squads/silent-transfers.mdx
rename to fern/squads/silent-handoffs.mdx
index e589f2980..637db598c 100644
--- a/fern/squads/silent-transfers.mdx
+++ b/fern/squads/silent-handoffs.mdx
@@ -1,9 +1,9 @@
---
-title: Silent Transfers
-slug: squads/silent-transfers
+title: Silent Handoffs
+slug: squads/silent-handoffs
---
-- **The Problem**: In traditional AI call flows, when transferring from one agent to another, announcing the transfer verbally can confuse or annoy callers and disrupt the conversation's flow.
-- **The Solution**: Silent transfers keep the call experience _uninterrupted_, so the user doesn’t know multiple assistants are involved. The conversation flows more naturally, boosting customer satisfaction.
+- **The Problem**: In traditional AI call flows, when handing off from one agent to another, announcing the handoff verbally can confuse or annoy callers and disrupt the conversation's flow.
+- **The Solution**: Silent handoffs keep the call experience _uninterrupted_, so the user doesn’t know multiple assistants are involved. The conversation flows more naturally, boosting customer satisfaction.
If you want to allow your call flow to move seamlessly from one assistant to another _without_ the caller hearing `Please hold while we transfer you` here’s what to do:
@@ -11,15 +11,15 @@ If you want to allow your call flow to move seamlessly from one assistant to ano
- Set the assistant's `firstMessage` to an _empty string_.
- Set the assistant's `firstMessageMode` to `assistant-speaks-first-with-model-generated-message`.
-2. **Update the Squad's assistant destinations messages**
- - For every `members[*].assistantDestinations[*]`, set the `message` property to an _empty string_.
+2. **Update the Squad's handoff messages**
+ - For every `members[*].model.tools/toolIds`, unset the `messages` property.
3. **Trigger the Transfer from the Source Assistant**
- - In that assistant’s prompt, include a line instructing it to transfer to the desired assistant:
+ - In that assistant’s prompt, include a line instructing it to hand off to the desired assistant:
```json
- trigger the transferCall tool with 'assistantName' Assistant.
+ trigger the 'handoff' tool with 'assistantName' Assistant.
```
- Replace `'assistantName'` with the exact name of the next assistant.
@@ -32,49 +32,65 @@ If you want to allow your call flow to move seamlessly from one assistant to ano
- **HPMA (Main Assistant)** is talking to the customer. They confirm the order details and then quietly passes the conversation to **HPPA (Payment Assistant)**.
- **HPPA** collects payment details without the customer ever hearing, `We’re now transferring you to the Payment Assistant.` It feels like one continuous conversation.
-- Once payment is done, **HPPA** transfers the call again—this time to **HPMA-SA (Main Sub Assistant)**—which takes over final shipping arrangements.
+- Once payment is done, **HPPA** hands off the call again—this time to **HPMA-SA (Main Sub Assistant)**—which takes over final shipping arrangements.
Everything happens smoothly behind the scenes!
## **Squad and Assistant Configurations**
-Below are the key JSON examples you’ll need. These show how to structure your assistants and squads so they work together for silent transfers.
+Below are the key JSON examples you’ll need. These show how to structure your assistants and squads so they work together for silent handoffs.
### **HP Payment Squad With SubAgent**
- Make sure the `members[*].assistantDestinations[*].message` properties are set to an _empty string_.
+ Make sure the `members[*].model.tools.messages` properties are set to null or empty array.
```json
{
"members": [
{
- "assistantId": "2d8e0d13-1b3c-4358-aa72-cf6204d6244e",
- "assistantDestinations": [
- {
- "message": " ",
- "description": "Transfer call to the payment agent",
- "type": "assistant",
- "assistantName": "HPPA"
- }
- ]
+ "name": "HPMA (Main Assistant)",
+ "model": {
+ "provider": "openai",
+ "model": "gpt-4o",
+ "tools": [
+ {
+ "type": "handoff",
+ "destinations": [
+ {
+ "type": "assistant",
+ "assistantName": "HPPA"
+ }
+ ],
+ "messages": [],
+ }
+ ]
+ },
+ ...
},
{
- "assistantId": "ad1c5347-bc32-4b31-8bb7-6ff5fcb131f4",
- "assistantDestinations": [
- {
- "message": " ",
- "description": "Transfer call to the main sub agent",
- "type": "assistant",
- "assistantName": "HPMA-SA"
- }
- ]
+ "name": "HPPA",
+ "model": {
+ "tools": [
+ {
+ "type": "handoff",
+ "destinations": [
+ {
+ "type": "assistant",
+ "assistantName": "HPMA-SA"
+ }
+ ],
+ "messages": null,
+ }
+ ]
+ },
+ ...
},
{
- "assistantId": "f1c258bc-4c8b-4c51-9b44-883ab5e40b2f",
- "assistantDestinations": []
- }
+ "name": "HPMA-SA",
+ ...
+ },
],
"name": "HP Payment Squad With SubAgent"
}
@@ -160,7 +176,7 @@ Answer only questions related to the context.
3.Ask how many widgets the customer would like to purchase.
- Wait for the customer's response.
4.Confirm the order details with the customer.
- - trigger the transferCall tool with Payment `HPPA` Assistant.
+ - trigger the handoff tool with Payment `HPPA` Assistant.
```
### **HPPA (Payment Assistant Prompt)**
@@ -200,7 +216,7 @@ any greetings or small talk.
3. Ask for the CVV number.
- Wait for the customer's response.
4. Confirm that the payment has been processed successfully.
- - trigger the transferCall tool with Payment `HPMA-SA` Assistant.
+ - trigger the handoff tool with Payment `HPMA-SA` Assistant.
```
### **HPMA-SA (Main Sub Assistant Prompt)**
@@ -245,6 +261,6 @@ or small talk.
## **Conclusion**
-By following these steps and examples, you can configure your call system to conduct **silent transfers** ensuring that callers experience a single, uninterrupted conversation. Each assistant does its job smoothly, whether it’s capturing payment, finalizing a shipping address, or collecting basic info.
+By following these steps and examples, you can configure your call system to conduct **silent handoffs** ensuring that callers experience a single, uninterrupted conversation. Each assistant does its job smoothly, whether it’s capturing payment, finalizing a shipping address, or collecting basic info.
-Enjoy setting up your silent transfers!
+Enjoy setting up your silent handoffs!
diff --git a/fern/static/images/call-queue-management.png b/fern/static/images/call-queue-management.png
new file mode 100644
index 000000000..66dae1f93
Binary files /dev/null and b/fern/static/images/call-queue-management.png differ
diff --git a/fern/static/images/encryption/custom-credential-pem-key.png b/fern/static/images/encryption/custom-credential-pem-key.png
new file mode 100644
index 000000000..ea44b7f69
Binary files /dev/null and b/fern/static/images/encryption/custom-credential-pem-key.png differ
diff --git a/fern/static/images/encryption/custom-credential.png b/fern/static/images/encryption/custom-credential.png
new file mode 100644
index 000000000..9306c5e3b
Binary files /dev/null and b/fern/static/images/encryption/custom-credential.png differ
diff --git a/fern/static/images/encryption/tool-with-credential.png b/fern/static/images/encryption/tool-with-credential.png
new file mode 100644
index 000000000..069afee67
Binary files /dev/null and b/fern/static/images/encryption/tool-with-credential.png differ
diff --git a/fern/static/images/encryption/tool-with-encryption.png b/fern/static/images/encryption/tool-with-encryption.png
new file mode 100644
index 000000000..e9a6b0c29
Binary files /dev/null and b/fern/static/images/encryption/tool-with-encryption.png differ
diff --git a/fern/static/images/providers/speechmatics/VapiSpeechmatics.png b/fern/static/images/providers/speechmatics/VapiSpeechmatics.png
new file mode 100644
index 000000000..32001128b
Binary files /dev/null and b/fern/static/images/providers/speechmatics/VapiSpeechmatics.png differ
diff --git a/fern/static/images/server-url/authentication/bearer-token-credential.png b/fern/static/images/server-url/authentication/bearer-token-credential.png
new file mode 100644
index 000000000..3cbcc39fa
Binary files /dev/null and b/fern/static/images/server-url/authentication/bearer-token-credential.png differ
diff --git a/fern/static/images/server-url/authentication/credential-selection-assistant-server.png b/fern/static/images/server-url/authentication/credential-selection-assistant-server.png
new file mode 100644
index 000000000..8136878d9
Binary files /dev/null and b/fern/static/images/server-url/authentication/credential-selection-assistant-server.png differ
diff --git a/fern/static/images/server-url/authentication/credential-selection-phone-number-server.png b/fern/static/images/server-url/authentication/credential-selection-phone-number-server.png
new file mode 100644
index 000000000..b6e2f3a40
Binary files /dev/null and b/fern/static/images/server-url/authentication/credential-selection-phone-number-server.png differ
diff --git a/fern/static/images/server-url/authentication/credential-selection.png b/fern/static/images/server-url/authentication/credential-selection.png
new file mode 100644
index 000000000..f146b2da1
Binary files /dev/null and b/fern/static/images/server-url/authentication/credential-selection.png differ
diff --git a/fern/static/images/server-url/authentication/custom-credentials-dashboard.png b/fern/static/images/server-url/authentication/custom-credentials-dashboard.png
new file mode 100644
index 000000000..df6e1d532
Binary files /dev/null and b/fern/static/images/server-url/authentication/custom-credentials-dashboard.png differ
diff --git a/fern/static/images/server-url/authentication/hmac-credential.png b/fern/static/images/server-url/authentication/hmac-credential.png
new file mode 100644
index 000000000..7baf82e1e
Binary files /dev/null and b/fern/static/images/server-url/authentication/hmac-credential.png differ
diff --git a/fern/static/images/server-url/authentication/oauth2-credential.png b/fern/static/images/server-url/authentication/oauth2-credential.png
new file mode 100644
index 000000000..419ab0860
Binary files /dev/null and b/fern/static/images/server-url/authentication/oauth2-credential.png differ
diff --git a/fern/static/images/server-url/authentication/x-vapi-secret-credential.png b/fern/static/images/server-url/authentication/x-vapi-secret-credential.png
new file mode 100644
index 000000000..3d754d39a
Binary files /dev/null and b/fern/static/images/server-url/authentication/x-vapi-secret-credential.png differ
diff --git a/fern/static/images/sip/sip-chime-assign-phone-number.png b/fern/static/images/sip/sip-chime-assign-phone-number.png
new file mode 100644
index 000000000..196e3936e
Binary files /dev/null and b/fern/static/images/sip/sip-chime-assign-phone-number.png differ
diff --git a/fern/static/images/sip/sip-chime-create-credential.png b/fern/static/images/sip/sip-chime-create-credential.png
new file mode 100644
index 000000000..3487f4f8a
Binary files /dev/null and b/fern/static/images/sip/sip-chime-create-credential.png differ
diff --git a/fern/static/images/sip/sip-chime-create-inbound-route.png b/fern/static/images/sip/sip-chime-create-inbound-route.png
new file mode 100644
index 000000000..a6123c0d5
Binary files /dev/null and b/fern/static/images/sip/sip-chime-create-inbound-route.png differ
diff --git a/fern/static/images/sip/sip-chime-create-sip-media-application.png b/fern/static/images/sip/sip-chime-create-sip-media-application.png
new file mode 100644
index 000000000..bff43d339
Binary files /dev/null and b/fern/static/images/sip/sip-chime-create-sip-media-application.png differ
diff --git a/fern/static/images/sip/sip-chime-create-sip-rule.png b/fern/static/images/sip/sip-chime-create-sip-rule.png
new file mode 100644
index 000000000..85ce0700d
Binary files /dev/null and b/fern/static/images/sip/sip-chime-create-sip-rule.png differ
diff --git a/fern/static/images/sip/sip-chime-create-voice-connector.png b/fern/static/images/sip/sip-chime-create-voice-connector.png
new file mode 100644
index 000000000..2c34d18ba
Binary files /dev/null and b/fern/static/images/sip/sip-chime-create-voice-connector.png differ
diff --git a/fern/static/images/sip/sip-chime-enable-origination.png b/fern/static/images/sip/sip-chime-enable-origination.png
new file mode 100644
index 000000000..f33a18dd1
Binary files /dev/null and b/fern/static/images/sip/sip-chime-enable-origination.png differ
diff --git a/fern/static/images/sip/sip-chime-ip-1.png b/fern/static/images/sip/sip-chime-ip-1.png
new file mode 100644
index 000000000..b4e80c509
Binary files /dev/null and b/fern/static/images/sip/sip-chime-ip-1.png differ
diff --git a/fern/static/images/sip/sip-chime-ip-2.png b/fern/static/images/sip/sip-chime-ip-2.png
new file mode 100644
index 000000000..3cd8f54ae
Binary files /dev/null and b/fern/static/images/sip/sip-chime-ip-2.png differ
diff --git a/fern/static/images/sip/sip-chime-phone-number.png b/fern/static/images/sip/sip-chime-phone-number.png
new file mode 100644
index 000000000..eea09d3e1
Binary files /dev/null and b/fern/static/images/sip/sip-chime-phone-number.png differ
diff --git a/fern/static/images/sip/sip-chime-update-phone-number.png b/fern/static/images/sip/sip-chime-update-phone-number.png
new file mode 100644
index 000000000..c0fdb6a19
Binary files /dev/null and b/fern/static/images/sip/sip-chime-update-phone-number.png differ
diff --git a/fern/test/test-suites.mdx b/fern/test/test-suites.mdx
index e5e0a9811..2787d214f 100644
--- a/fern/test/test-suites.mdx
+++ b/fern/test/test-suites.mdx
@@ -4,6 +4,10 @@ subtitle: End-to-end test automation for AI voice agents
slug: /test/test-suites
---
+
+Test Suites is being deprecated. It will be replaced by Simulations, a more powerful way to test your voice agents. You can keep using Test Suites in the meantime, and we'll share a migration guide once Simulations is ready.
+
+
## Overview
**Test Suite** is an end-to-end feature that automates testing of your AI voice agents. Our platform simulates an AI tester that interacts with your voice agent by following a pre-defined script. After the interaction, the transcript is sent to a language model (LLM) along with your evaluation rubric. The LLM then determines if the interaction met the defined objectives.
diff --git a/fern/tools/arguments-encryption.mdx b/fern/tools/arguments-encryption.mdx
new file mode 100644
index 000000000..ffe402ca9
--- /dev/null
+++ b/fern/tools/arguments-encryption.mdx
@@ -0,0 +1,218 @@
+---
+title: Tool Arguments Encryption
+subtitle: Learn to encrypt tool arguments and protect sensitive data
+slug: tools/encryption
+---
+
+## Overview
+
+Tool argument encryption protects sensitive data like Social Security Numbers, Credit Card Numbers, and other PII by encrypting specific fields before they're sent to your server.
+
+**In this guide, you'll learn to:**
+- Create and configure a custom credential with encryption enabled
+- Generate RSA public/private key pairs
+- Configure tools to encrypt specific argument fields
+- Decrypt encrypted data on your server
+
+## Prerequisites
+
+- A Vapi account with access to the dashboard
+- OpenSSL or a similar tool for generating RSA keys
+- A server endpoint that can receive and decrypt encrypted data
+
+
+
+ Navigate to the custom credentials page and enable encryption settings.
+
+ 1. Go to [https://dashboard.vapi.ai/settings/integrations/custom-credential](https://dashboard.vapi.ai/settings/integrations/custom-credential) and click "Add Custom Credential"
+ 2. Check **Enable Encryption**
+ 3. Select **RSA-OAEP-256** as the algorithm
+ 4. Select **SPKI-PEM** as the format
+
+
+
+
+
+
+
+ Use OpenSSL to generate a public/private key pair in PEM format.
+
+ Run this command in your terminal to generate both keys:
+
+```bash
+# Generate a 2048-bit RSA private key
+openssl genrsa -out private-key.pem 2048
+
+# Extract the public key in SPKI format
+openssl rsa -in private-key.pem -pubout -out public-key.pem
+```
+
+ This creates two files:
+ - `private-key.pem` - Keep this secure on your server for decryption
+ - `public-key.pem` - Copy this to Vapi for encryption
+
+
+ Never share or commit your private key. Store it securely in your server's environment variables.
+
+
+
+
+ Copy and paste your public key into the Vapi dashboard.
+
+ 1. Open `public-key.pem` and copy the entire contents
+ 2. Paste the public key PEM into the **Public Key PEM** field
+ 3. Click **Save**
+
+
+
+
+
+ Your credential is now ready to use with encrypted tool arguments.
+
+
+
+ Navigate to your tools and choose which tool should use encryption.
+
+ 1. Go to the [Tools page](https://dashboard.vapi.ai/tools)
+ 2. Select an existing **Custom Tool** or **API Request Tool**
+ 3. Alternatively, create a new tool if needed
+
+
+
+ Link your encryption credential and specify which fields to encrypt.
+
+ 1. In the tool settings, find the **Credential** dropdown
+ 2. Select the credential you created in Step 1
+ 3. Scroll to **Encryption Settings**
+ 4. Add the exact JSON paths to the arguments you want encrypted
+
+ **Example JSON paths:**
+ - `ssn` - Encrypts the `ssn` field
+ - `payment.cardNumber` - Encrypts nested fields
+
+
+ JSON paths are relative to the tool's argument structure. Only specified fields will be encrypted.
+
+
+
+
+
+
+
+
+
+
+
+
+ Save your tool configuration and verify encryption works with a test call.
+
+ 1. Click **Save** to apply your changes
+ 2. Make a test call using an assistant with this tool
+ 3. Trigger the tool during the call
+ 4. Check your server logs to confirm encrypted data arrives
+
+ When your server receives the webhook, encrypted fields will appear as base64-encoded strings:
+
+```json
+{
+ "fullName": "John Doe",
+ "dateOfBirth": "ZCT0EvFkJRHShBd06Ldu7ImHgl7YCuX8l8IF/7xuQSydafVWRR2eCGqTeXK7HyMaXyDc3hHyaTwTKyd0kJH0TCgQEJwviTLSlt7IzH4BIVXIadYcmCUbcSN77R6HoYtGE/De8hEYZ0t+bfuKnDY1IyiQXViI1oE+A2hiscrl4x9Or+n3CUSvxXQ3fJREsCHVN4Y4jbLtQOh0bhlsKLol7GEXBGnOG+oBlXvIzEgyco/peusg7Vzeq42F9odQyZZop9u8+ynwz3DOCm9JBZdOuf7iCKKos0NU+VeWanUHvJ2aJfGPck7qleFWDFsCb+F6QcIcn3fkiKTqoYa44vQ+NA=="
+}
+```
+
+
+
+ Use your private key to decrypt the base64-encoded encrypted values.
+
+ Here's how to decrypt the data in your server code:
+
+
+```typescript title="TypeScript (Node.js)"
+import crypto from 'crypto';
+import fs from 'fs';
+
+function decryptToolArgument(encryptedBase64: string): string {
+ // Load your private key
+ const privateKey = fs.readFileSync('private-key.pem', 'utf8');
+
+ // Decode from base64
+ const encryptedBuffer = Buffer.from(encryptedBase64, 'base64');
+
+ // Decrypt using RSA-OAEP with SHA-256
+ const decrypted = crypto.privateDecrypt(
+ {
+ key: privateKey,
+ padding: crypto.constants.RSA_PKCS1_OAEP_PADDING,
+ oaepHash: 'sha256',
+ },
+ encryptedBuffer
+ );
+
+ return decrypted.toString('utf8');
+}
+
+// Example usage
+const encryptedDateOfBirth = "ZW5jcnlwdGVkX2RhdGFfaGVyZQ==...";
+const decryptedDateOfBirth = decryptToolArgument(encryptedDateOfBirth);
+console.log(decryptedDateOfBirth); // Original value
+```
+```python title="Python"
+from cryptography.hazmat.primitives import serialization, hashes
+from cryptography.hazmat.primitives.asymmetric import padding
+from cryptography.hazmat.backends import default_backend
+import base64
+
+def decrypt_tool_argument(encrypted_base64: str) -> str:
+ # Load your private key
+ with open('private-key.pem', 'rb') as key_file:
+ private_key = serialization.load_pem_private_key(
+ key_file.read(),
+ password=None,
+ backend=default_backend()
+ )
+
+ # Decode from base64
+ encrypted_data = base64.b64decode(encrypted_base64)
+
+ # Decrypt using RSA-OAEP with SHA-256
+ decrypted = private_key.decrypt(
+ encrypted_data,
+ padding.OAEP(
+ mgf=padding.MGF1(algorithm=hashes.SHA256()),
+ algorithm=hashes.SHA256(),
+ label=None
+ )
+ )
+
+ return decrypted.decode('utf-8')
+
+# Example usage
+encrypted_date_of_birth = "ZW5jcnlwdGVkX2RhdGFfaGVyZQ==..."
+decrypted_date_of_birth = decrypt_tool_argument(encrypted_date_of_birth)
+print(decrypted_date_of_birth) # Original value
+```
+
+
+
+ Store your private key in environment variables rather than hardcoding the file path. Use `process.env.PRIVATE_KEY` or `os.getenv("PRIVATE_KEY")`.
+
+
+
+
+## Security best practices
+
+Follow these guidelines to maintain secure encryption:
+
+- **Never commit private keys** - Use environment variables or secret management systems
+- **Rotate keys periodically** - Generate new key pairs and update credentials regularly
+- **Encrypt selectively** - Only encrypt fields that contain sensitive data to minimize overhead
+- **Validate decrypted data** - Always validate and sanitize decrypted values before use
+- **Use HTTPS** - Ensure your server endpoint uses HTTPS for transport security
+
+## Next steps
+
+Now that you have tool argument encryption configured:
+
+- **[Custom tools](mdc:fern/tools/custom-tools):** Learn more about creating custom tools
+- **[API request tools](mdc:fern/tools/api-request):** Configure API request tools with encrypted arguments
+- **[Server URLs](mdc:fern/server-url):** Set up secure server endpoints for receiving encrypted data
diff --git a/fern/tools/client-side-websdk.mdx b/fern/tools/client-side-websdk.mdx
new file mode 100644
index 000000000..cca52c8e5
--- /dev/null
+++ b/fern/tools/client-side-websdk.mdx
@@ -0,0 +1,296 @@
+---
+title: Client-side Tools (Web SDK)
+subtitle: Handle tool-calls in the browser without a server URL
+slug: tools/client-side-websdk
+---
+
+## Overview
+
+Use the Web SDK to handle tool-calls entirely on the client. This lets your assistant trigger UI-side effects (like showing notifications or changing state) directly in the browser.
+
+**In this guide, you'll learn to:**
+- Define a client-side tool with the Web SDK
+- Receive and handle `tool-calls` events on the client
+- Inject extra context during a call with `addMessage`
+
+
+Client-side tools cannot send a tool "result" back to the model. If the model must use the output of a tool to continue reasoning, implement a server-based tool instead. See: Server-based Custom Tools.
+
+
+
+To make a tool client-side, simply do not provide a server URL. The tool specification is delivered to the browser, and the Web SDK emits tool-calls messages that your frontend can handle.
+
+
+## Quickstart
+
+1. Install the Web SDK:
+
+```bash
+npm install @vapi-ai/web
+```
+
+2. Start a call with your tool defined in the model.tools array and subscribe to clientMessages: ['tool-calls'].
+3. Listen for message.type === 'tool-calls' and perform the desired UI update. No response is sent back to the model.
+4. (Optional) Inject context mid-call using vapi.addMessage(...).
+
+## Complete example (React + Web SDK)
+
+```tsx
+import Vapi from '@vapi-ai/web';
+import { useCallback, useState } from 'react';
+
+const vapi = new Vapi('');
+
+function App() {
+ const [notification, setNotification] = useState(null);
+
+ const handleUIUpdate = useCallback((message?: string) => {
+ setNotification(message || 'UI Update Triggered!');
+ setTimeout(() => setNotification(null), 3000);
+ }, []);
+
+ // 1) Listen for client tool-calls and update the UI
+ vapi.on('message', (message) => {
+ console.log('Message:', message);
+
+ if (message.type === 'tool-calls') {
+ const toolCalls = message.toolCallList;
+
+ toolCalls.forEach((toolCall) => {
+ const functionName = toolCall.function?.name;
+ let parameters: Record = {};
+
+ try {
+ const args = toolCall.function?.arguments;
+ if (typeof args === 'string') {
+ parameters = JSON.parse(args || '{}');
+ } else if (typeof args === 'object' && args !== null) {
+ parameters = args as Record;
+ } else {
+ parameters = {};
+ }
+ } catch (err) {
+ console.error('Failed to parse toolCall arguments:', err);
+ return;
+ }
+
+ if (functionName === 'updateUI') {
+ handleUIUpdate((parameters as any).message);
+ }
+ });
+ }
+ });
+
+ // 2) Start the call with a client-side tool (no server URL)
+ const startCall = useCallback(() => {
+ vapi.start({
+ model: {
+ provider: 'openai',
+ model: 'gpt-4.1',
+ messages: [
+ {
+ role: 'system',
+ content:
+ "You are an attentive assistant who can interact with the application's user interface by calling available tools. Whenever the user asks to update, refresh, change, or otherwise modify the UI, or hints that some UI update should occur, always use the 'updateUI' tool call with the requested action and relevant data. Use tool calls proactively if you determine that a UI update would be helpful.",
+ },
+ ],
+ tools: [
+ {
+ type: 'function',
+ async: true,
+ function: {
+ name: 'updateUI',
+ description:
+ 'Call this function to initiate any UI update whenever the user requests or implies they want the user interface to change (for example: show a message, highlight something, trigger an animation, etc). Provide an \'action\' describing the update and an optional \'data\' object with specifics.',
+ parameters: {
+ type: 'object',
+ properties: {
+ message: {
+ description:
+ 'Feel free to start with any brief introduction message in 10 words.',
+ type: 'string',
+ default: '',
+ },
+ },
+ required: ['message'],
+ },
+ },
+ messages: [
+ {
+ type: 'request-start',
+ content: 'Updating UI...',
+ blocking: false,
+ },
+ ],
+ },
+ ],
+ },
+ voice: { provider: 'vapi', voiceId: 'Elliot' },
+ transcriber: { provider: 'deepgram', model: 'nova-2', language: 'en' },
+ name: 'Alex - Test',
+ firstMessage: 'Hello.',
+ voicemailMessage: "Please call back when you're available.",
+ endCallMessage: 'Goodbye.',
+ clientMessages: ['tool-calls'], // subscribe to client-side tool calls
+ });
+ }, []);
+
+ const stopCall = useCallback(() => {
+ vapi.stop();
+ }, []);
+
+ return (
+
+ {notification && (
+
+ {notification}
+
+ )}
+
+
+
+ Vapi Client Tool Calls
+
+
+
+ Start a call and ask the assistant to trigger UI updates
+
+
+
+
+
+
+
+
+
+ );
+}
+
+export default App;
+```
+
+## Inject data during the call
+
+Use addMessage to provide extra context mid-call. This does not return results for a tool; it adds messages the model can see.
+
+```ts
+// Inject system-level context
+vapi.addMessage({
+ role: 'system',
+ content: 'Context: userId=123, plan=premium, theme=dark',
+});
+
+// Inject a user message
+vapi.addMessage({
+ role: 'user',
+ content: 'FYI: I switched to the settings tab.',
+});
+```
+
+
+If you need the model to consume tool outputs (e.g., fetch data and continue reasoning with it), implement a server-based tool. See Custom Tools.
+
+
+## Key points
+
+- **Client-only execution**: Omit the server URL to run tools on the client.
+- **One-way side effects**: Client tools do not send results back to the model.
+- **Subscribe to events**: Use clientMessages: ['tool-calls'] and handle message.type === 'tool-calls'.
+- **Add context**: Use vapi.addMessage to inject data mid-call.
+
+## Next steps
+
+- **Server-based tools**: Learn how to return results back to the model with Custom Tools.
+- **API reference**: See Tools API for full configuration options.
diff --git a/fern/tools/code-tool.mdx b/fern/tools/code-tool.mdx
new file mode 100644
index 000000000..5bc4c397f
--- /dev/null
+++ b/fern/tools/code-tool.mdx
@@ -0,0 +1,281 @@
+---
+title: Code Tool
+subtitle: Execute custom TypeScript code directly within your assistant without setting up a server.
+slug: tools/code-tool
+---
+
+The Code Tool allows you to write and execute custom TypeScript code that runs when your assistant needs to perform a specific action. Unlike custom function tools that require you to host a server, code tools run directly on Vapi's infrastructure.
+
+## When to Use Code Tools
+
+Code tools are ideal when you need to:
+- Transform or process data during a conversation
+- Make HTTP requests to external APIs
+- Perform calculations or business logic
+- Avoid the overhead of setting up and maintaining a webhook server
+
+## Creating a Code Tool
+
+Create code tools using the [Vapi API](/api-reference/tools/create). Each code tool requires:
+
+- **Tool Name**: A descriptive identifier (e.g., `get_customer_data`)
+- **Description**: Explain what your tool does - this helps the AI understand when to use it
+- **TypeScript Code**: Write the code that will execute when the tool is called
+- **Parameters**: Define the input parameters your code expects
+- **Environment Variables**: Store sensitive values like API keys securely
+
+### Writing Your Code
+
+Your code has access to two objects:
+- **`args`**: Contains the parameters passed by the assistant
+- **`env`**: Contains your environment variables
+
+```typescript
+// Access parameters from the assistant
+const { customerId, orderType } = args;
+
+// Access secure environment variables
+const { API_KEY, API_URL } = env;
+
+// Make HTTP requests to external services
+const response = await fetch(`${API_URL}/customers/${customerId}`, {
+ headers: {
+ 'Authorization': `Bearer ${API_KEY}`,
+ 'Content-Type': 'application/json'
+ }
+});
+
+const customer = await response.json();
+
+// Return data to the assistant
+return {
+ name: customer.name,
+ email: customer.email,
+ memberSince: customer.createdAt
+};
+```
+
+
+Your code runs in an isolated environment with a configurable timeout (default: 10 seconds, max: 60 seconds).
+
+
+## Example: Customer Lookup Tool
+
+Let's create a tool that looks up customer information:
+
+### Configuration
+
+| Field | Value |
+|-------|-------|
+| Tool Name | `get_customer` |
+| Description | Retrieves customer information by their ID |
+
+### Parameters
+
+| Name | Type | Required | Description |
+|------|------|----------|-------------|
+| customerId | string | Yes | The unique customer identifier |
+
+### Environment Variables
+
+| Name | Value |
+|------|-------|
+| API_KEY | Your API key |
+| API_BASE_URL | https://api.yourservice.com |
+
+### Code
+
+```typescript
+const { customerId } = args;
+const { API_KEY, API_BASE_URL } = env;
+
+const response = await fetch(`${API_BASE_URL}/customers/${customerId}`, {
+ headers: {
+ 'Authorization': `Bearer ${API_KEY}`
+ }
+});
+
+if (!response.ok) {
+ return { error: 'Customer not found' };
+}
+
+const customer = await response.json();
+
+return {
+ name: customer.name,
+ email: customer.email,
+ plan: customer.subscription.plan,
+ status: customer.status
+};
+```
+
+## Example: Order Processing Tool
+
+A more complex example that processes an order:
+
+### Parameters
+
+| Name | Type | Required | Description |
+|------|------|----------|-------------|
+| items | array | Yes | Array of item objects with id and quantity |
+| customerId | string | Yes | The customer placing the order |
+| shippingAddress | string | No | Delivery address |
+
+### Code
+
+```typescript
+const { items, customerId, shippingAddress } = args;
+const { ORDER_API_KEY, ORDER_API_URL } = env;
+
+// Calculate total
+let total = 0;
+const itemDetails = [];
+
+for (const item of items) {
+ const priceResponse = await fetch(`${ORDER_API_URL}/products/${item.id}`);
+ const product = await priceResponse.json();
+
+ const itemTotal = product.price * item.quantity;
+ total += itemTotal;
+
+ itemDetails.push({
+ name: product.name,
+ quantity: item.quantity,
+ price: product.price,
+ subtotal: itemTotal
+ });
+}
+
+// Create the order
+const orderResponse = await fetch(`${ORDER_API_URL}/orders`, {
+ method: 'POST',
+ headers: {
+ 'Authorization': `Bearer ${ORDER_API_KEY}`,
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify({
+ customerId,
+ items: itemDetails,
+ total,
+ shippingAddress
+ })
+});
+
+const order = await orderResponse.json();
+
+return {
+ orderId: order.id,
+ total: `$${total.toFixed(2)}`,
+ estimatedDelivery: order.estimatedDelivery,
+ items: itemDetails.map(i => `${i.quantity}x ${i.name}`)
+};
+```
+
+## Using Code Tools in Assistants
+
+Once created, add your code tool to any assistant by updating the assistant configuration via API:
+
+```bash
+curl --location --request PATCH 'https://api.vapi.ai/assistant/ASSISTANT_ID' \
+--header 'Authorization: Bearer ' \
+--header 'Content-Type: application/json' \
+--data '{
+ "model": {
+ "toolIds": ["your-code-tool-id"]
+ }
+}'
+```
+
+## Creating Code Tools via API
+
+Create code tools programmatically with the following request:
+
+```bash
+curl --location 'https://api.vapi.ai/tool' \
+--header 'Content-Type: application/json' \
+--header 'Authorization: Bearer ' \
+--data '{
+ "type": "code",
+ "name": "get_customer",
+ "description": "Retrieves customer information by their ID",
+ "code": "const { customerId } = args;\nconst { API_KEY } = env;\n\nconst response = await fetch(`https://api.example.com/customers/${customerId}`, {\n headers: { \"Authorization\": `Bearer ${API_KEY}` }\n});\n\nreturn await response.json();",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "customerId": {
+ "type": "string",
+ "description": "The unique customer identifier"
+ }
+ },
+ "required": ["customerId"]
+ },
+ "environmentVariables": [
+ {
+ "name": "API_KEY",
+ "value": "your-api-key-here"
+ }
+ ]
+}'
+```
+
+## Best Practices
+
+### Security
+- Store sensitive values (API keys, secrets) in **Environment Variables**, not in your code
+- Environment variable values support Liquid templates to reference call variables
+
+### Performance
+- Keep code execution under the timeout limit
+- Use efficient API calls and avoid unnecessary loops
+- Consider caching strategies for repeated lookups
+
+### Error Handling
+- Always handle potential errors from API calls
+- Return meaningful error messages that help the assistant respond appropriately
+
+```typescript
+const { customerId } = args;
+
+try {
+ const response = await fetch(`${env.API_URL}/customers/${customerId}`);
+
+ if (!response.ok) {
+ return {
+ error: true,
+ message: `Customer ${customerId} not found`
+ };
+ }
+
+ return await response.json();
+} catch (error) {
+ return {
+ error: true,
+ message: 'Unable to reach customer service'
+ };
+}
+```
+
+### Return Values
+- Return structured data that the assistant can easily interpret
+- Include relevant information the assistant needs to continue the conversation
+
+## Limitations
+
+- **Timeout**: Maximum execution time is 60 seconds (default: 10 seconds)
+- **No file system access**: Code runs in an isolated environment without file access
+- **Memory**: Code runs with limited memory allocation
+- **Network**: Only outbound HTTP/HTTPS requests are supported
+
+## Code Tool vs Custom Function Tool
+
+| Feature | Code Tool | Custom Function Tool |
+|---------|-----------|---------------------|
+| Server Required | No | Yes |
+| Language | TypeScript | Any |
+| Setup Complexity | Low | Higher |
+| Customization | Moderate | Full control |
+| Secrets Management | Environment Variables | Your server |
+| Best For | Quick integrations, API calls | Complex logic, existing infrastructure |
+
+Choose **Code Tools** when you want to quickly add functionality without managing infrastructure. Choose **Custom Function Tools** when you need full control over the execution environment or have existing server infrastructure.
+
diff --git a/fern/tools/default-tools.mdx b/fern/tools/default-tools.mdx
index a3741a768..9f6372cc4 100644
--- a/fern/tools/default-tools.mdx
+++ b/fern/tools/default-tools.mdx
@@ -124,7 +124,7 @@ There are three methods for sending DTMF in a phone call:
Vapi's DTMF tool integrates with telephony provider APIs to send DTMF tones using the out-of-band RFC 2833 method. This approach is widely supported and more reliable for transmitting the signals, especially in VoIP environments.
-Note, the tool's effectiveness depends on the IVR system's configuration and their capturing method. See our [IVR navigation guide](https://docs.vapi.ai/tools/ivr-navigation) for best practices.
+Note, the tool's effectiveness depends on the IVR system's configuration and their capturing method. See our [IVR navigation guide](https://docs.vapi.ai/ivr-navigation) for best practices.
#### API Request
diff --git a/fern/tools/google-calendar.mdx b/fern/tools/google-calendar.mdx
index e0806fe25..fba2cb110 100644
--- a/fern/tools/google-calendar.mdx
+++ b/fern/tools/google-calendar.mdx
@@ -24,7 +24,7 @@ Before you can use the Google Calendar integration, you need to:
First, you need to connect your Google Calendar account to Vapi:
1. Navigate to the Vapi Dashboard
-2. Go to **Providers Keys** > **Tools Provider** > **Google Calendar**
+2. Go to **Integrations** > **Tools Provider** > **Google Calendar**
3. Click the **Connect** button
4. A Google authorization popup will appear
5. Follow the prompts to authorize Vapi to access your Google Calendar
diff --git a/fern/tools/handoff.mdx b/fern/tools/handoff.mdx
deleted file mode 100644
index 691d19fb2..000000000
--- a/fern/tools/handoff.mdx
+++ /dev/null
@@ -1,460 +0,0 @@
----
-title: Handoff Tool
-subtitle: 'Transfer the call to another assistant.'
-slug: tools/handoff
----
-
-The handoff tool enables seamless call transfers between assistants in a multi-agent system. This guide covers all configuration patterns and use cases.
-
-## Table of Contents
-- [Overview](#overview)
-- [System Prompt Best Practices](#system-prompt-best-practices)
-- [Basic Configuration](#basic-configuration)
-- [Multiple Destinations](#multiple-destinations)
-- [Dynamic Handoffs](#dynamic-handoffs)
-- [Context Engineering](#context-engineering)
-- [Variable Extraction](#variable-extraction)
-- [Custom Function Definitions](#custom-function-definitions)
-
-## Overview
-
-The handoff tool allows assistants to transfer calls to other assistants. Key features:
-- Transfer to specific assistants by ID or name (in a Squad)
-- Support for multiple destination options
-- Dynamic destination determination via webhook
-- Context manipulation during handoff
-- Variable extraction from conversations for subsequent assistants to use
-
-## System Prompt Best Practices
-
-When using the handoff tool, add this to your system prompt for optimal agent coordination:
-https://openai.github.io/openai-agents-python/ref/extensions/handoff_prompt/
-
-```markdown
-# System context
-
-You are part of a multi-agent system designed to make agent coordination and execution easy.
-Agents uses two primary abstraction: **Agents** and **Handoffs**. An agent encompasses
-instructions and tools and can hand off a conversation to another agent when appropriate.
-Handoffs are achieved by calling a handoff function, generally named `handoff_to_`.
-Handoffs between agents are handled seamlessly in the background; do not mention or draw
-attention to these handoffs in your conversation with the user.
-
-# Agent context
-
-{put your agent system prompt here}
-```
-
-## Basic Configuration
-
-### 1. Single Destination Handoff
-
-#### Using Assistant ID
-```json
-{
- "tools": [
- {
- "type": "handoff",
- "destinations": [
- {
- "type": "assistant",
- "assistantId": "03e11cfe-4528-4243-a43d-6aded66ab7ba",
- "description": "customer wants to speak with technical support",
- "contextEngineeringPlan": {
- "type": "all"
- }
- }
- ]
- }
- ]
-}
-```
-
-#### Using Assistant Name (for Squad Members)
-```json
-{
- "tools": [
- {
- "type": "handoff",
- "destinations": [
- {
- "type": "assistant",
- "assistantName": "TechnicalSupportAgent",
- "description": "customer needs technical assistance",
- "contextEngineeringPlan": {
- "type": "all"
- }
- }
- ]
- }
- ]
-}
-```
-
-## Multiple Destinations
-
-### 2.1 Multiple Tools Pattern (OpenAI Recommended)
-
-Best for OpenAI models - creates separate tool definitions for each destination:
-
-```json
-{
- "tools": [
- {
- "type": "handoff",
- "destinations": [
- {
- "type": "assistant",
- "assistantId": "sales-assistant-123",
- "description": "customer wants to learn about pricing or make a purchase",
- "contextEngineeringPlan": {
- "type": "all"
- }
- }
- ]
- },
- {
- "type": "handoff",
- "destinations": [
- {
- "type": "assistant",
- "assistantId": "support-assistant-456",
- "description": "customer needs help with an existing product or service",
- "contextEngineeringPlan": {
- "type": "all"
- }
- }
- ]
- },
- {
- "type": "handoff",
- "destinations": [
- {
- "type": "assistant",
- "assistantId": "billing-assistant-789",
- "description": "customer has questions about invoices, payments, or refunds",
- "contextEngineeringPlan": {
- "type": "lastNMessages",
- "maxMessages": 5 // Only keeps the last 5 messages
- }
- }
- ]
- }
- ]
-}
-```
-
-### 2.2 Single Tool Pattern (Anthropic Recommended)
-
-Best for Anthropic models - single tool with multiple destination options:
-
-```json
-{
- "tools": [
- {
- "type": "handoff",
- "destinations": [
- {
- "type": "assistant",
- "assistantId": "03e11cfe-4528-4243-a43d-6aded66ab7ba",
- "description": "customer wants to learn about pricing or make a purchase"
- },
- {
- "type": "assistant",
- "assistantName": "support-assistant",
- "description": "customer needs help with an existing product or service"
- },
- {
- "type": "assistant",
- "assistantName": "billing-assistant",
- "description": "customer has questions about invoices, payments, or refunds"
- }
- ]
- }
- ]
-}
-```
-
-## Dynamic Handoffs
-
-### 3.1 Basic Dynamic Handoff
-
-The destination is determined at runtime via `handoff-destination-request` webhook:
-
-```json
-{
- "tools": [
- {
- "type": "handoff",
- "destinations": [
- {
- "type": "dynamic",
- "server": {
- "url": "https://api.example.com/determine-handoff-destination",
- "headers": {
- "Authorization": "Bearer YOUR_API_KEY"
- }
- }
- }
- ]
- }
- ]
-}
-```
-
-Your server must respond to this request with a single destination. You may pass `assistantId`, `assistantName` (if using squads), or a transient `assistant`. For example:
-
-```json
-destination: {
- "type": "assistant",
- "assistantId": "assistant-id",
- "variableExtractionPlan": {
- "schema": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string",
- "description": "Name of the customer",
- },
- },
- "required": ["name"],
- },
- },
- "contextEngineeringPlan": {
- "type": "none",
- },
-},
-```
-
-### 3.2 Dynamic Handoff with Custom Parameters
-
-Pass additional context to your webhook for intelligent routing:
-
-```json
-{
- "tools": [
- {
- "type": "handoff",
- "destinations": [
- {
- "type": "dynamic",
- "server": {
- "url": "https://api.example.com/intelligent-routing"
- }
- }
- ],
- "function": {
- "name": "handoff_with_context",
- "description": "Transfer the call to the most appropriate specialist",
- "parameters": {
- "type": "object",
- "properties": {
- "destination": {
- "type": "string",
- "description": "Use 'dynamic' to route to the best available agent",
- "enum": ["dynamic"]
- },
- "customerAreaCode": {
- "type": "number",
- "description": "Customer's area code for regional routing"
- },
- "customerIntent": {
- "type": "string",
- "enum": ["new-customer", "existing-customer", "partner"],
- "description": "Customer type for proper routing"
- },
- "customerSentiment": {
- "type": "string",
- "enum": ["positive", "negative", "neutral", "escalated"],
- "description": "Current emotional state of the customer"
- },
- "issueCategory": {
- "type": "string",
- "enum": ["technical", "billing", "sales", "general"],
- "description": "Primary category of the customer's issue"
- },
- "priority": {
- "type": "string",
- "enum": ["low", "medium", "high", "urgent"],
- "description": "Urgency level of the request"
- }
- },
- "required": ["destination", "customerIntent", "issueCategory"]
- }
- }
- }
- ]
-}
-```
-
-## Context Engineering
-
-Control what conversation history is passed to the next assistant:
-
-### All Messages (Default)
-```json
-{
- "contextEngineeringPlan": {
- "type": "all"
- }
-}
-```
-
-### Last N Messages
-```json
-{
- "contextEngineeringPlan": {
- "type": "lastNMessages",
- "maxMessages": 10
- }
-}
-```
-
-### No Context
-```json
-{
- "contextEngineeringPlan": {
- "type": "none"
- }
-}
-```
-
-## Variable Extraction
-
-Extract and pass structured data during handoff:
-
-```json
-{
- "tools": [
- {
- "type": "handoff",
- "destinations": [
- {
- "type": "assistant",
- "assistantId": "order-processing-assistant",
- "description": "customer is ready to place an order",
- "contextEngineeringPlan": {
- "type": "lastNMessages",
- "maxMessages": 5
- },
- "variableExtractionPlan": {
- "schema": {
- "type": "object",
- "properties": {
- "customerName": {
- "type": "string",
- "description": "Full name of the customer"
- },
- "email": {
- "type": "string",
- "format": "email",
- "description": "Customer's email address"
- },
- "productIds": {
- "type": "array",
- "items": {
- "type": "string"
- },
- "description": "List of product IDs customer wants to order"
- },
- "shippingAddress": {
- "type": "object",
- "properties": {
- "street": { "type": "string" },
- "city": { "type": "string" },
- "state": { "type": "string" },
- "zipCode": { "type": "string" }
- }
- }
- },
- "required": ["customerName", "productIds"]
- }
- }
- }
- ]
- }
- ]
-}
-```
-
-## Custom Function Definitions
-
-Override the default function definition for more control. You can overwrite the function name for each tool to put into the system prompt or pass custom parameters in a dynamic handoff request.
-
-```json
-{
- "tools": [
- {
- "type": "handoff",
- "function": {
- "name": "transfer_to_department",
- "description": "Transfer the customer to the appropriate department based on their needs. Only use when explicitly requested or when the current assistant cannot help.",
- "parameters": {
- "type": "object",
- "properties": {
- "destination": {
- "type": "string",
- "description": "Department to transfer to",
- "enum": ["sales-team", "technical-support", "billing-department", "management"]
- },
- "reason": {
- "type": "string",
- "description": "Brief reason for the transfer"
- },
- "urgency": {
- "type": "boolean",
- "description": "Whether this is an urgent transfer"
- }
- },
- "required": ["destination", "reason"]
- }
- },
- "destinations": [
- {
- "type": "assistant",
- "assistantId": "sales-team",
- "description": "Sales inquiries and purchases"
- },
- {
- "type": "assistant",
- "assistantId": "technical-support",
- "description": "Technical issues and support"
- },
- {
- "type": "assistant",
- "assistantId": "billing-department",
- "description": "Billing and payment issues"
- },
- {
- "type": "assistant",
- "assistantId": "management",
- "description": "Escalations and complaints"
- }
- ]
- }
- ]
-}
-```
-
-## Best Practices
-
-1. **Clear Descriptions**: Write specific, actionable descriptions for each destination in your sytem prompt. Use `tool.function.name` to customize the name of the function to put into your prompt.
-2. **Context Management**: Use `lastNMessages` to limit context size for performance
-3. **Model Optimization**: Use multiple tools for OpenAI, single tool for Anthropic
-4. **Variable Extraction**: Extract key data before handoff to maintain context
-5. **Testing**: Test handoff scenarios thoroughly, including edge cases
-
-
-## Troubleshooting
-
-- Ensure assistant IDs are valid and accessible
-- Verify webhook server URLs are reachable and return proper format
-- Check that required parameters in custom functions match destinations
-- Monitor context size to avoid token limits
-- Test variable extraction schemas with sample data
-- Validate that assistant names exist in the same squad
-
----
-
-*Last updated: August 2025*
-*VAPI Documentation - Handoff Tool*
\ No newline at end of file
diff --git a/fern/tools/introduction.mdx b/fern/tools/introduction.mdx
index abcd8eb66..42e7af35f 100644
--- a/fern/tools/introduction.mdx
+++ b/fern/tools/introduction.mdx
@@ -6,11 +6,12 @@ slug: tools
[**Tools**](/api-reference/tools/create) allow your assistant to take actions beyond just conversation. They enable your assistant to perform tasks like transferring calls, accessing external data, or triggering actions in your application. Tools can be either built-in default tools provided by Vapi or custom tools that you create.
-There are three types of tools available:
+There are four types of tools available:
1. **Default Tools**: Built-in functions provided by Vapi for common operations like call transfers and control.
-2. **Custom Tools**: Your own functions that can be called by the assistant to interact with your systems.
-3. **Integration Tools**: Pre-built integrations with platforms like [Make](https://www.make.com/en/integrations/vapi) and GoHighLevel (GHL) that let you trigger automated workflows via voice.
+2. **Custom Tools**: Your own functions that can be called by the assistant to interact with your systems via webhooks.
+3. **Code Tools**: Write TypeScript code that executes directly on Vapi's infrastructure without setting up a server.
+4. **Integration Tools**: Pre-built integrations with platforms like [Make](https://www.make.com/en/integrations/vapi) and GoHighLevel (GHL) that let you trigger automated workflows via voice.
Tools are configured as part of your assistant's model configuration. You can find the complete API reference [here](/api-reference/tools/create-tool).
@@ -18,7 +19,7 @@ There are three types of tools available:
## Available Tools
-
+
- Create your own tools to extend assistant capabilities
+ Create your own webhook-based tools to extend assistant capabilities
+
+
+ Write TypeScript code that runs directly without a server
For Zapier MCP, visit https://mcp.zapier.com/mcp/?client=vapi? to generate your MCP server URL. This URL should be treated as a credential and kept secure.
+
+ To generate your Make MCP Server URL (also known as MCP Token), navigate to your Make profile > API Access tab > Tokens > Add token. See [Obtaining MCP Token documentation](https://developers.make.com/mcp-server/make-cloud-mcp-server/obtaining-mcp-token) for detailed instructions. This URL should be treated as a credential and kept secure.
### 2. Create and Configure MCP Tool
@@ -64,14 +66,17 @@ Now, add the MCP tool to your assistant:
## How MCP Works
-The MCP integration follows these steps during a call:
+The MCP integration follows these steps during a call or chat session:
-1. When a call starts, Vapi connects to your configured MCP server using **Streamable HTTP** protocol by default
-2. The MCP server returns a list of available tools and their capabilities
-3. These tools are dynamically added to your assistant's available tools
-4. The assistant can then use these tools during the call
-5. When a tool is invoked, Vapi sends the request to the MCP server
-6. The MCP server executes the action and returns the result
+1. When a call or chat starts, Vapi connects to your configured MCP server using **Streamable HTTP** protocol by default, fetches the list of available tools, and dynamically adds them to your assistant's available tools
+2. The assistant can then use these tools during the interaction
+3. **Each time the model invokes a specific MCP tool**, Vapi creates a new connection to the MCP server and sends the request with the `X-Call-Id`/`X-Chat-Id` header to identify the call or chat
+4. The MCP server executes the action and returns the result
+5. This process repeats for every tool invocation, meaning **multiple MCP sessions are created per call or chat**
+
+
+ Vapi uses multiple MCP sessions throughout a single conversation to ensure consistent behavior across both calls and chat interactions. Each tool execution creates a separate connection to the MCP server, allowing for isolated and reliable tool execution. All tool invocations include the `X-Call-Id`/`X-Chat-Id` header to identify the specific call or chat.
+
The MCP tool itself is not meant to be invoked by the model. It serves as a configuration mechanism for Vapi to fetch and inject the specific tool definitions from the MCP server into the model's context.
@@ -189,6 +194,20 @@ If you need to use Server-Sent Events protocol instead:
## Example MCP Providers
+### Make MCP
+
+The Make MCP Server provides access to the Make scenarios you select, allowing you to provision them as Custom Tools through MCP.
+
+1. Define your Make scenarios, configuring scenario [inputs and outputs](https://help.make.com/scenario-inputs-and-outputs) and setting them to be [scheduled on demand](https://help.make.com/schedule-a-scenario#30pY_)
+2. Get your [Make MCP Token](https://developers.make.com/mcp-server/make-cloud-mcp-server/obtaining-mcp-token)
+3. Choose your MCP [Tool Access Control](https://developers.make.com/mcp-server/make-cloud-mcp-server/tool-access-control) mechanism and define your MCP URL
+4. Add the URL to your Vapi MCP tool configuration
+5. Your assistant will now have access to your chosen Make scenarios
+
+
+ Make Cloud MCP allows you to build simple or complex Custom Tools using business logic to access the most important apps in your business tech stack. Check the full list in the Make app gallery.
+
+
### Zapier MCP
Zapier offers an MCP server that provides access to thousands of app integrations:
@@ -227,6 +246,7 @@ Composio also offers an MCP server for integration:
- [Model Context Protocol Introduction](https://modelcontextprotocol.io/introduction)
- [Zapier MCP](https://zapier.com/mcp)
+- [Make MCP](https://developers.make.com/mcp-server)
+Static parameters override LLM-generated arguments with the same key. If the LLM generates `"source": "chat"` and your static parameters include `"source": "vapi-call"`, the webhook receives `"source": "vapi-call"`.
+
+
+### Liquid template variables
+
+String values in static parameters can reference any variable available in the call context:
+
+| Variable | Example | Description |
+|----------|---------|-------------|
+| `customer.number` | `{{ customer.number }}` | The customer's phone number |
+| `transport.callSid` | `{{ transport.callSid }}` | The transport call session ID |
+| `now` | `{{ now }}` | Current timestamp |
+| `date` | `{{ date }}` | Current date |
+| Previously extracted variables | `{{ userId }}` | Variables extracted by earlier tools via aliases |
+
+## Variable extraction plan (aliases)
+
+The `variableExtractionPlan` field lets you extract specific values from a tool's JSON response and store them as named variables. These variables become available to all subsequent tool calls in the same conversation.
+
+### How it works
+
+- `variableExtractionPlan` is an object with an `aliases` array.
+- Each alias has `{ key, value }` where `key` is the variable name to store and `value` is a Liquid template expression.
+- The parsed JSON response body is available as **`$`** (dollar sign). Reference nested fields with dot notation: `{{ $.data.id }}`.
+- Top-level response properties are also spread at the root level, so `{{ name }}` works for a top-level `name` field.
+- Liquid filters are supported: `{{ $.email | downcase }}`, `{{ $.name | upcase }}`.
+- Extracted variables are stored in the call's artifact and are available in subsequent tool calls via Liquid templates.
+
+### Supported tool types
+
+| Tool type | Variable extraction supported |
+|-----------|------------------------------|
+| `apiRequest` | Yes |
+| `function` | Yes |
+| `code` | Yes |
+| `handoff` | Yes |
+
+### Example: extract fields from an API response
+
+Suppose your API returns:
+
+```json title="API response"
+{
+ "data": {
+ "id": "usr_abc123",
+ "name": "Jane Smith",
+ "email": "Jane.Smith@example.com"
+ },
+ "status": "active"
+}
+```
+
+Configure aliases to extract the fields you need:
+
+```json title="API request tool with variable extraction"
+{
+ "type": "apiRequest",
+ "method": "GET",
+ "url": "https://api.example.com/users/{{ customer.number }}",
+ "variableExtractionPlan": {
+ "aliases": [
+ { "key": "userId", "value": "{{ $.data.id }}" },
+ { "key": "userName", "value": "{{ $.data.name }}" },
+ { "key": "userEmail", "value": "{{ $.data.email | downcase }}" },
+ { "key": "accountStatus", "value": "{{ $.status }}" }
+ ]
+ }
+}
+```
+
+After this tool executes, the variables `userId`, `userName`, `userEmail`, and `accountStatus` are available for use in any subsequent tool call.
+
+
+Use the `$` reference for clarity when accessing nested fields (`{{ $.data.id }}`). For top-level fields, you can reference them directly (`{{ status }}`), but using `$` is more explicit.
+
+
+### Using extracted variables in subsequent tools
+
+Once variables are extracted, reference them by name in any Liquid template context -- URLs, headers, request bodies, or static parameters:
+
+```json title="Subsequent tool using extracted variables in the URL and body"
+{
+ "type": "apiRequest",
+ "method": "POST",
+ "url": "https://api.example.com/orders",
+ "body": {
+ "type": "json",
+ "value": "{ \"user_id\": \"{{ userId }}\", \"user_name\": \"{{ userName }}\" }"
+ }
+}
+```
+
+Or via static parameters on a function tool:
+
+```json title="Function tool using extracted variables in static parameters"
+{
+ "type": "function",
+ "function": {
+ "name": "create_order",
+ "description": "Create an order for a user",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "items": {
+ "type": "array",
+ "description": "Items to order"
+ }
+ },
+ "required": ["items"]
+ }
+ },
+ "server": {
+ "url": "https://my-server.com/webhook"
+ },
+ "parameters": [
+ { "key": "user_id", "value": "{{ userId }}" },
+ { "key": "user_email", "value": "{{ userEmail }}" }
+ ]
+}
+```
+
+## Deterministic tool chaining
+
+By combining static parameters and variable extraction, you can build tool chains where data flows from one tool's response to the next tool's request -- all without LLM involvement in the data transfer.
+
+### Example: look up a user, then create an order
+
+**Tool A** calls an external API to look up a user and extracts the user's ID and name:
+
+```json title="Tool A: User lookup with variable extraction"
+{
+ "type": "apiRequest",
+ "method": "GET",
+ "url": "https://api.example.com/users/{{ customer.number }}",
+ "variableExtractionPlan": {
+ "aliases": [
+ { "key": "userId", "value": "{{ $.data.id }}" },
+ { "key": "userName", "value": "{{ $.data.name }}" }
+ ]
+ }
+}
+```
+
+**Tool B** uses the extracted `userId` as a static parameter, ensuring the correct user ID reaches your webhook without the LLM needing to parse or forward it:
+
+```json title="Tool B: Create order with extracted user ID"
+{
+ "type": "function",
+ "function": {
+ "name": "create_order",
+ "description": "Create an order for the current user",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "items": {
+ "type": "array",
+ "description": "The items to include in the order"
+ }
+ },
+ "required": ["items"]
+ }
+ },
+ "server": {
+ "url": "https://my-server.com/webhook"
+ },
+ "parameters": [
+ { "key": "user_id", "value": "{{ userId }}" },
+ { "key": "user_name", "value": "{{ userName }}" }
+ ]
+}
+```
+
+The LLM decides *when* to call each tool based on the conversation, but the `user_id` and `user_name` values flow directly from Tool A's response to Tool B's request through the variable system.
+
+
+Variable extraction depends on the tool response being valid JSON. If the response cannot be parsed as JSON, no variables are extracted. Make sure the APIs you call return JSON responses.
+
+
+## Full API example
+
+Create an assistant with two chained tools using cURL:
+
+```bash title="Create tools and assistant with tool chaining"
+# Step 1: Create the user lookup tool (Tool A)
+curl -X POST "https://api.vapi.ai/tool" \
+ -H "Authorization: Bearer $VAPI_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "type": "apiRequest",
+ "name": "User Lookup",
+ "method": "GET",
+ "url": "https://api.example.com/users/{{ customer.number }}",
+ "variableExtractionPlan": {
+ "aliases": [
+ { "key": "userId", "value": "{{ $.data.id }}" },
+ { "key": "userName", "value": "{{ $.data.name }}" },
+ { "key": "userEmail", "value": "{{ $.data.email | downcase }}" }
+ ]
+ }
+ }'
+
+# Step 2: Create the order tool (Tool B)
+curl -X POST "https://api.vapi.ai/tool" \
+ -H "Authorization: Bearer $VAPI_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "type": "function",
+ "function": {
+ "name": "create_order",
+ "description": "Create an order for the current user",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "items": {
+ "type": "array",
+ "description": "The items to include in the order"
+ }
+ },
+ "required": ["items"]
+ }
+ },
+ "server": {
+ "url": "https://my-server.com/webhook"
+ },
+ "parameters": [
+ { "key": "user_id", "value": "{{ userId }}" },
+ { "key": "user_name", "value": "{{ userName }}" },
+ { "key": "user_email", "value": "{{ userEmail }}" }
+ ]
+ }'
+
+# Step 3: Attach both tools to your assistant
+curl -X PATCH "https://api.vapi.ai/assistant/YOUR_ASSISTANT_ID" \
+ -H "Authorization: Bearer $VAPI_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "model": {
+ "provider": "openai",
+ "model": "gpt-4o",
+ "toolIds": ["TOOL_A_ID", "TOOL_B_ID"]
+ }
+ }'
+```
+
+## Tips
+
+- **Static parameters are invisible to the LLM.** The model does not see them in the tool schema and cannot override them (they are merged last).
+- **Aliases extract from JSON only.** The tool response must be parseable as JSON. Non-JSON responses (plain text, HTML) do not support variable extraction.
+- **Variable names are global to the call.** Extracted variables persist for the entire call and can be referenced by any subsequent tool. Choose unique, descriptive key names to avoid collisions.
+- **Liquid templates resolve at execution time.** Template expressions in static parameters and aliases are evaluated when the tool runs, not when the tool is created.
+- **Combine with Liquid filters.** Use Liquid filters in aliases for transformations: `{{ $.name | upcase }}`, `{{ $.price | divided_by: 100 }}`, `{{ $.email | downcase }}`.
+
+## Next steps
+
+Now that you understand static variables and aliases:
+
+- **[Custom tools](/tools/custom-tools):** Learn how to create and configure custom function tools.
+- **[Code tool](/tools/code-tool):** Run TypeScript code directly on Vapi's infrastructure without a server.
+- **[Tool rejection plan](/tools/tool-rejection-plan):** Add conditions to prevent unintended tool calls.
+- **[API reference](/api-reference/tools/create):** See the complete tool creation API reference.
diff --git a/fern/tools/voicemail-tool.mdx b/fern/tools/voicemail-tool.mdx
index 465b1f999..6dd17239f 100644
--- a/fern/tools/voicemail-tool.mdx
+++ b/fern/tools/voicemail-tool.mdx
@@ -4,15 +4,12 @@ subtitle: Learn how to use the assistant-controlled voicemail tool for flexible
slug: tools/voicemail-tool
---
-
-**Beta Feature**: The voicemail tool is currently in beta. Features and behavior may change as we continue to improve this functionality based on user feedback.
-
-
## Overview
The voicemail tool gives your assistant direct control over when and how to leave voicemail messages. Unlike [automatic voicemail detection](/calls/voicemail-detection), which operates independently of your assistant, this tool allows your assistant to decide when it's reached a voicemail system and leave a configured message.
**Key benefits:**
+
- **Maximum flexibility** - Assistant decides when and what to say
- **Cost-effective** - Only triggers when needed
- **Context-aware** - Messages can be customized based on conversation
@@ -28,7 +25,10 @@ When you add the voicemail tool to your assistant:
4. The call ends automatically after message delivery
-This approach differs from [automatic voicemail detection](/calls/voicemail-detection), which detects voicemail at the system level. The voicemail tool puts detection and response entirely in the assistant's hands.
+ This approach differs from [automatic voicemail
+ detection](/calls/voicemail-detection), which detects voicemail at the system
+ level. The voicemail tool puts detection and response entirely in the
+ assistant's hands.
## Configuration
@@ -71,30 +71,31 @@ import { VapiClient } from "@vapi-ai/server-sdk";
const vapi = new VapiClient({ token: process.env.VAPI_API_KEY });
const assistant = await vapi.assistants.create({
- model: {
- provider: "openai",
- model: "gpt-4o",
- messages: [{
- type: "system",
- content: `You are a sales representative for Acme Corp.
- If at any point you determine you're speaking to a voicemail system
- (greeting mentions 'unavailable', 'leave a message', 'voicemail', etc.),
- immediately use the leave_voicemail tool.`
- }],
- tools: [{
- type: "voicemail",
- function: {
- name: "leave_voicemail",
- description: "Leave a voicemail message when you detect you've reached a voicemail system"
- },
- messages: [{
- type: "request-start",
- content: "Hi, this is {{company}}. {{message}}. Please call us back at {{phone}}."
- }]
- }]
- }
+model: {
+provider: "openai",
+model: "gpt-4o",
+messages: [{
+type: "system",
+content: `You are a sales representative for Acme Corp.
+If at any point you determine you're speaking to a voicemail system
+(greeting mentions 'unavailable', 'leave a message', 'voicemail', etc.),
+immediately use the leave_voicemail tool.`
+}],
+tools: [{
+type: "voicemail",
+function: {
+name: "leave_voicemail",
+description: "Leave a voicemail message when you detect you've reached a voicemail system"
+},
+messages: [{
+type: "request-start",
+content: "Hi, this is {{company}}. {{message}}. Please call us back at {{phone}}."
+}]
+}]
+}
});
-```
+
+````
```python title="Python SDK"
from vapi import Vapi
@@ -106,9 +107,9 @@ assistant = client.assistants.create(
"model": "gpt-4o",
"messages": [{
"type": "system",
- "content": """You are a sales representative for Acme Corp.
- If at any point you determine you're speaking to a voicemail system
- (greeting mentions 'unavailable', 'leave a message', 'voicemail', etc.),
+ "content": """You are a sales representative for Acme Corp.
+ If at any point you determine you're speaking to a voicemail system
+ (greeting mentions 'unavailable', 'leave a message', 'voicemail', etc.),
immediately use the leave_voicemail tool."""
}],
"tools": [{
@@ -124,7 +125,8 @@ assistant = client.assistants.create(
}]
}
)
-```
+````
+
## Message Configuration
@@ -145,7 +147,8 @@ Define the voicemail message in the tool configuration:
```
-Use template variables like `{{company}}`, `{{message}}`, and `{{phone}}` to make your voicemail messages dynamic while keeping them consistent.
+ Use template variables like `{{ company }}`, `{{ message }}`, and `{{ phone }}`
+ to make your voicemail messages dynamic while keeping them consistent.
### **Pre-recorded Audio Messages**
@@ -166,7 +169,8 @@ For consistent quality and pronunciation, use pre-recorded audio files by provid
**Supported formats**: `.wav` and `.mp3` files
-Pre-recorded audio messages are ideal for brand-specific messaging or when you need precise pronunciation of phone numbers, website URLs, or company names.
+ Pre-recorded audio messages are ideal for brand-specific messaging or when you
+ need precise pronunciation of phone numbers, website URLs, or company names.
## Advanced Examples
@@ -237,12 +241,12 @@ Using pre-recorded audio for professional voicemail messages:
}
```
-
-
## Best Practices
### Detection prompting
+
Be specific about voicemail indicators in your system prompt:
+
- "unavailable"
- "leave a message"
- "voicemail"
@@ -250,31 +254,42 @@ Be specific about voicemail indicators in your system prompt:
- "beep"
### Message structure
+
Keep voicemail messages:
+
- **Brief** - Under 30 seconds
- **Clear** - State name, company, and purpose
- **Actionable** - Include callback number or next steps
- **Professional** - Match your brand voice
### Error handling
+
Consider edge cases:
+
- Long voicemail greetings
- Voicemail box full scenarios
- Systems requiring keypad input
## Voicemail Tool vs. Automatic Detection
-| Feature | Voicemail Tool | [Automatic Detection](/calls/voicemail-detection) |
-|---------|----------------|---------------------------------------------------|
-| Control | Assistant-driven | System-driven |
-| Flexibility | High - custom logic | Medium - predefined behavior |
-| Cost | Lower - only when used | Higher - continuous monitoring |
-| Setup complexity | Simple - just add tool | Moderate - configure detection |
-| Message customization | Full control | Limited to configured message |
-| Detection accuracy | Depends on prompt | Provider-specific (Vapi, Google, etc.) |
+| Feature | Voicemail Tool | [Automatic Detection](/calls/voicemail-detection) |
+| --------------------- | ---------------------- | ------------------------------------------------- |
+| Control | Assistant-driven | System-driven |
+| Flexibility | High - custom logic | Medium - predefined behavior |
+| Cost | Lower - only when used | Higher - continuous monitoring |
+| Setup complexity | Simple - just add tool | Moderate - configure detection |
+| Message customization | Full control | Limited to configured message |
+| Detection accuracy | Depends on prompt | Provider-specific (Vapi, Google, etc.) |
+
+
+ Avoid combining the **voicemail tool** with **automatic detection**, as this
+ could result in false positives and other complications.
+
-Choose the **voicemail tool** when you need maximum flexibility and cost efficiency. Choose **automatic detection** when you need guaranteed system-level detection without relying on assistant prompting.
+ Choose the **voicemail tool** when you need maximum flexibility and cost
+ efficiency. Choose **automatic detection** when you need guaranteed
+ system-level detection without relying on assistant prompting.
## Common Use Cases
@@ -289,4 +304,3 @@ Choose the **voicemail tool** when you need maximum flexibility and cost efficie
- Learn about other [default tools](/tools/default-tools)
- Explore [automatic voicemail detection](/calls/voicemail-detection) for system-level handling
- See how to create [custom tools](/tools/custom-tools) for your specific needs
-
diff --git a/fern/voice-fallback-plan.mdx b/fern/voice-fallback-plan.mdx
index 7b8836e46..c735359d4 100644
--- a/fern/voice-fallback-plan.mdx
+++ b/fern/voice-fallback-plan.mdx
@@ -1,35 +1,56 @@
---
-title: Voice Fallback Plan
+title: Voice fallback configuration
subtitle: Configure fallback voices that activate automatically if your primary voice fails.
slug: voice-fallback-plan
---
-
- Voice fallback plans can currently only be configured through the API. We are working on making this available through our dashboard.
-
-
-## Introduction
+## Overview
-Voice fallback plans give you the ability to continue your call in the event that your primary voice fails. Your assistant will sequentially fallback to only the voices you configure within your plan, in the exact order you specify.
+Voice fallback configuration gives you the ability to continue your call in the event that your primary voice fails. Your assistant will sequentially fallback to only the voices you configure within your plan, in the exact order you specify.
Without a fallback plan configured, your call will end with an error in the event that your chosen voice provider fails.
-## How It Works
+## How it works
When a voice failure occurs, Vapi will:
1. Detect the failure of the primary voice
2. If a custom fallback plan exists:
- - Switch to the first fallback voice in your plan
- - Continue through your specified list if subsequent failures occur
- - Terminate only if all voices in your plan have failed
+ - Switch to the first fallback voice in your plan
+ - Continue through your specified list if subsequent failures occur
+ - Terminate only if all voices in your plan have failed
-## Configuration
+## Configure via Dashboard
+
+
+
+ Navigate to your assistant and select the **Voice** tab.
+
+
+ Scroll down to find the **Fallback Voices** collapsible section. A warning indicator appears if no fallback voices are configured.
+
+
+ Click **Add Fallback Voice** to configure your first fallback:
+ - Select a **provider** from the dropdown (supports 20+ voice providers)
+ - Choose a **voice** from the searchable popover (shows gender, language, and deprecated status)
+ - The **model** is automatically selected based on your voice choice
+
+
+ Expand **Additional Configuration** to access provider-specific settings like stability, speed, and emotion controls.
+
+
+ Repeat to add additional fallback voices. Order matters—the first fallback in your list is tried first.
+
+
+
+## Configure via API
Add the `fallbackPlan` property to your assistant's voice configuration, and specify the fallback voices within the `voices` property.
-- Please note that fallback voices must be valid JSON configurations, and not strings.
-- The order matters. Vapi will choose fallback voices starting from the beginning of the list.
+
+
+ Fallback voices must be valid JSON configurations, not strings. The order matters—Vapi will choose fallback voices starting from the beginning of the list.
+
```json
{
@@ -37,26 +58,125 @@ Add the `fallbackPlan` property to your assistant's voice configuration, and spe
"provider": "openai",
"voiceId": "shimmer",
"fallbackPlan": {
- "voices": [
- {
- "provider": "cartesia",
- "voiceId": "248be419-c632-4f23-adf1-5324ed7dbf1d"
- },
- {
- "provider": "11labs",
- "voiceId": "cgSgspJ2msm6clMCkdW9"
- }
- ]
+ "voices": [
+ {
+ "provider": "cartesia",
+ "voiceId": "248be419-c632-4f23-adf1-5324ed7dbf1d"
+ },
+ {
+ "provider": "11labs",
+ "voiceId": "cgSgspJ2msm6clMCkdW9",
+ "stability": 0.5,
+ "similarityBoost": 0.75
+ }
+ ]
}
}
}
```
+## Provider-specific settings
+
+Each voice provider supports different configuration options. Expand the accordion below to see available settings for each provider.
+
+
+
+ - **stability** (0-1): Controls voice consistency. Lower values allow more emotional range; higher values produce more stable output.
+ - **similarityBoost** (0-1): Enhances similarity to the original voice. Higher values make the voice more similar to the reference.
+ - **style** (0-1): Voice style intensity. Higher values amplify the speaker's style.
+ - **useSpeakerBoost** (boolean): Enable to boost similarity to the original speaker.
+ - **speed** (0.7-1.2): Speech speed multiplier. Default is 1.0.
+ - **optimizeStreamingLatency** (0-4): Controls streaming latency optimization. Default is 3.
+ - **enableSsmlParsing** (boolean): Enable SSML pronunciation support.
+ - **model**: Select from `eleven_multilingual_v2`, `eleven_turbo_v2`, `eleven_turbo_v2_5`, `eleven_flash_v2`, `eleven_flash_v2_5`, or `eleven_monolingual_v1`.
+
+
+ - **model**: Model selection (`sonic-english`, `sonic-3`, etc.).
+ - **language**: Language code for the voice.
+ - **experimentalControls.speed**: Speech speed adjustment (-1 to 1). Negative values slow down; positive values speed up.
+ - **experimentalControls.emotion**: Array of emotion configurations (e.g., `["happiness:high", "curiosity:medium"]`).
+ - **generationConfig** (sonic-3 only):
+ - **speed** (0.6-1.5): Fine-grained speed control.
+ - **volume** (0.5-2.0): Volume adjustment.
+ - **experimental.accentLocalization** (0 or 1): Toggle accent localization.
+
+
+ - **speed** (0.5-2): Speech rate multiplier. Default is 1.0.
+
+
+ - **speed** (0.25-4): Speech speed multiplier. Default is 1.0.
+ - **model**: Select from `tts-1`, `tts-1-hd`, or realtime models.
+ - **instructions**: Voice prompt to control the generated audio style. Does not work with `tts-1` or `tts-1-hd` models.
+
+
+ - **speed** (0.25-2): Speech rate multiplier. Default is 1.0.
+ - **language**: Two-letter ISO 639-1 language code, or `auto` for auto-detection.
+
+
+ - **model**: Select from `arcana`, `mistv2`, or `mist`. Defaults to `arcana`.
+ - **speed** (0.1+): Speech speed multiplier.
+ - **pauseBetweenBrackets** (boolean): Enable pause control using angle brackets (e.g., `<200>` for 200ms pause).
+ - **phonemizeBetweenBrackets** (boolean): Enable phonemization using curly brackets (e.g., `{h'El.o}`).
+ - **reduceLatency** (boolean): Optimize for reduced streaming latency.
+ - **inlineSpeedAlpha**: Inline speed control using alpha notation.
+
+
+ - **speed** (0.1-5): Speech rate multiplier.
+ - **temperature** (0.1-2): Controls voice variance. Lower values are more predictable; higher values allow more variation.
+ - **emotion**: Emotion preset (e.g., `female_happy`, `male_sad`, `female_angry`, `male_surprised`).
+ - **voiceGuidance** (1-6): Controls voice uniqueness. Lower values reduce uniqueness.
+ - **styleGuidance** (1-30): Controls emotion intensity. Higher values create more emotional performance.
+ - **textGuidance** (1-2): Controls text adherence. Higher values are more accurate to input text.
+ - **model**: Select from `PlayHT2.0`, `PlayHT2.0-turbo`, `Play3.0-mini`, or `PlayDialog`.
+
+
+ - **model**: Select from `aura` or `aura-2`. Defaults to `aura-2`.
+ - **mipOptOut** (boolean): Opt out of the Deepgram Model Improvement Partnership program.
+
+
+ - **model**: Model selection (e.g., `octave2`).
+ - **description**: Natural language instructions describing how the speech should sound (tone, intonation, pacing, accent).
+ - **isCustomHumeVoice** (boolean): Indicates whether using a custom Hume voice.
+
+
+ - **model**: Select from `speech-02-hd` (high-fidelity) or `speech-02-turbo` (low latency). Defaults to `speech-02-turbo`.
+ - **emotion**: Emotion preset (`happy`, `sad`, `angry`, `fearful`, `surprised`, `disgusted`, `neutral`).
+ - **pitch** (-12 to 12): Voice pitch adjustment in semitones.
+ - **speed** (0.5-2): Speech speed adjustment.
+ - **volume** (0.5-2): Volume adjustment.
+
+
+ - **model**: Model selection.
+ - **enableSsml** (boolean): Enable limited SSML translation for input text.
+ - **libraryIds**: Array of library IDs to use for voice synthesis.
+
+
+ - **model**: Model selection (e.g., `neu_fast`).
+ - **language**: Language code (required).
+ - **speed** (0.25-2): Speech speed multiplier.
+
+
+ - **model**: Model selection (e.g., `lightning`).
+ - **speed**: Speech speed multiplier.
+
+
+
## Best practices
-- Use different providers for your fallback voices to protect against provider-wide outages.
+- Use **different providers** for your fallback voices to protect against provider-wide outages.
- Select voices with **similar characteristics** (tone, accent, gender) to maintain consistency in the user experience.
+- Test your fallback configuration to ensure smooth transitions between voices.
-## How will pricing work?
+## FAQ
-There is no change to the pricing of the voices. Your call will not incur any extra fees while using fallback voices, and you will be able to see the cost for each voice in your end-of-call report.
+
+
+ There is no change to the pricing of the voices. Your call will not incur any extra fees while using fallback voices, and you will be able to see the cost for each voice in your end-of-call report.
+
+
+ You can configure as many fallback voices as you need. However, we recommend 2-3 fallbacks from different providers for optimal reliability.
+
+
+ Users may notice a brief pause and a change in voice characteristics when switching to a fallback voice. Selecting voices with similar properties helps minimize this disruption.
+
+
diff --git a/fern/workflows/examples/appointment-scheduling.mdx b/fern/workflows/examples/appointment-scheduling.mdx
index 5d7459034..f5631c476 100644
--- a/fern/workflows/examples/appointment-scheduling.mdx
+++ b/fern/workflows/examples/appointment-scheduling.mdx
@@ -5,6 +5,10 @@ slug: workflows/examples/appointment-scheduling
description: Build a voice AI appointment scheduling workflow with calendar integration, availability checking, and automated confirmations using Vapi's workflow builder.
---
+
+This example uses Workflows. For new builds, use **Assistants** or **Squads**. See the updated guides: [Assistant - Appointment Scheduling](/assistants/examples/appointment-scheduling) or [Squads](/squads).
+
+
## Overview
Build an AI-powered appointment scheduling workflow that handles inbound calls for booking, rescheduling, and canceling appointments. The workflow uses visual nodes to create branching logic, integrates with calendar systems, checks availability in real-time, and sends confirmation messages.
diff --git a/fern/workflows/examples/clinic-triage-scheduling.mdx b/fern/workflows/examples/clinic-triage-scheduling.mdx
index 6d327871e..18004c1c8 100644
--- a/fern/workflows/examples/clinic-triage-scheduling.mdx
+++ b/fern/workflows/examples/clinic-triage-scheduling.mdx
@@ -5,6 +5,10 @@ slug: workflows/examples/clinic-triage-scheduling
description: Build a voice AI clinic workflow with medical triage protocols, appointment booking, and emergency routing using Vapi's visual workflow builder.
---
+
+This example uses Workflows. For new builds, use **Squads** for multi-assistant triage and scheduling. See: [Squad - Clinic Triage & Scheduling](/squads/examples/clinic-triage-scheduling).
+
+
## Overview
Build an AI-powered clinic receptionist workflow that handles patient triage, appointment scheduling, and emergency routing using Vapi workflows with medical protocol compliance and safety monitoring.
diff --git a/fern/workflows/examples/ecommerce-order-management.mdx b/fern/workflows/examples/ecommerce-order-management.mdx
index b48d2a86a..20cc50d4d 100644
--- a/fern/workflows/examples/ecommerce-order-management.mdx
+++ b/fern/workflows/examples/ecommerce-order-management.mdx
@@ -5,6 +5,10 @@ slug: workflows/examples/ecommerce-order-management
description: Build a voice AI e-commerce workflow with order tracking, return processing, and customer support automation using Vapi's visual workflow builder.
---
+
+This example uses Workflows. For new builds, use **Squads** with specialized assistants for orders, returns, and VIP support. See: [Squad - E‑commerce Order Management](/squads/examples/ecommerce-order-management).
+
+
## Overview
Build an AI-powered e-commerce customer service workflow that handles order inquiries, returns, and customer support using Vapi workflows with tier-based routing and global monitoring for comprehensive automation.
diff --git a/fern/workflows/examples/lead-qualification.mdx b/fern/workflows/examples/lead-qualification.mdx
index e7ad00a99..96cbd96d4 100644
--- a/fern/workflows/examples/lead-qualification.mdx
+++ b/fern/workflows/examples/lead-qualification.mdx
@@ -5,6 +5,10 @@ slug: workflows/examples/lead-qualification
description: Build a voice AI outbound sales workflow with lead qualification, CRM integration, and automated follow-up using Vapi's visual workflow builder.
---
+
+This example uses Workflows. For new builds, use **Assistants** or **Squads**. See: [Assistant - Lead Qualification](/assistants/examples/lead-qualification) and [Squads](/squads).
+
+
## Overview
Build an AI-powered outbound sales workflow that qualifies leads, handles objections, and schedules appointments using Vapi workflows with sophisticated branching logic and CRM integration.
diff --git a/fern/workflows/examples/multilingual-support.mdx b/fern/workflows/examples/multilingual-support.mdx
index fc31642bf..c30bab4b7 100644
--- a/fern/workflows/examples/multilingual-support.mdx
+++ b/fern/workflows/examples/multilingual-support.mdx
@@ -5,6 +5,10 @@ slug: workflows/examples/multilingual-support
description: Build a multilingual voice AI customer support workflow with language selection, dedicated conversation nodes, and cultural context using Vapi's workflow builder.
---
+
+This example uses Workflows. For new builds, use a **Squad** with language‑specific assistants. See: [Squad - Multilingual Support](/squads/examples/multilingual-support).
+
+
## Overview
Build a structured multilingual customer support workflow that guides customers through language selection at the start of the call, then routes them to dedicated conversation paths optimized for English, Spanish, and French support.
diff --git a/fern/workflows/examples/property-management.mdx b/fern/workflows/examples/property-management.mdx
index d93b84371..6b356b238 100644
--- a/fern/workflows/examples/property-management.mdx
+++ b/fern/workflows/examples/property-management.mdx
@@ -9,6 +9,10 @@ description: Build a voice AI property management system with dynamic call routi
+
+This example uses Workflows. For new builds, use **Squads** with a router assistant and domain specialists. See: [Squad - Property Management Routing](/squads/examples/property-management).
+
+
## Overview
Build a property management call routing workflow that determines transfer destinations dynamically using tenant verification, inquiry type analysis, and real-time agent availability. This approach uses visual workflow nodes with API Request nodes for maximum routing flexibility.
diff --git a/fern/workflows/overview.mdx b/fern/workflows/overview.mdx
index c1263e763..e4bb2b8a2 100644
--- a/fern/workflows/overview.mdx
+++ b/fern/workflows/overview.mdx
@@ -4,6 +4,20 @@ subtitle: Learn to create robust, deterministic conversation flows with a visual
slug: workflows/overview
---
+
+We no longer recommend Workflows for new builds. Prefer **Assistants** or **Squads** depending on complexity. See [Assistants](/assistants/dynamic-variables) and [Squads](/squads). This page is retained for legacy reference.
+
+Why We’ve Moved Away from Workflows
+
+While the Workflows product is fully built, we’ve found that current AI systems aren’t yet capable of acting as truly autonomous agents that can:
+
+1. Maintain awareness of the current node’s instructions
+2. Understand all possible next steps and the conditions required to reach them
+
+In contrast, the Squads pattern has consistently led to better results for customers. We’re now focusing on improving the ergonomics and developer experience around Squads to make this approach even more effective.
+
+
+
## Introduction
Workflows is a visual builder designed for creating robust, deterministic conversation flows. It empowers developers and low-code builders to design agents through an intuitive interface representing interactions via nodes and edges.
diff --git a/fern/workflows/quickstart.mdx b/fern/workflows/quickstart.mdx
index 7bea279b5..8183f0dcb 100644
--- a/fern/workflows/quickstart.mdx
+++ b/fern/workflows/quickstart.mdx
@@ -5,6 +5,10 @@ slug: workflows/quickstart
description: Build a simple agent that greets users and gathers basic information using Vapi workflows.
---
+
+We no longer recommend Workflows for new builds. Use **Assistants** for most cases or **Squads** for multi-assistant setups. See [Assistants](/assistants/dynamic-variables) and [Squads](/squads). Existing workflow content remains for reference.
+
+
## Overview
Build a simple voice agent using Vapi's visual workflow builder that greets users, collects their information, and demonstrates core workflow concepts like variable extraction, conditional routing, and global nodes.