Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
fix(blocks): resolve Ollama models incorrectly requiring API key in D…
…ocker

Server-side validation failed for Ollama models like mistral:latest because
the Zustand providers store is empty on the server and getProviderFromModel
misidentified them via regex pattern matching (e.g. mistral:latest matched
Mistral AI's /^mistral/ pattern).

Replace the hardcoded CLOUD_PROVIDER_PREFIXES list with existing data sources:
- Provider store (definitive on client, checks all provider buckets)
- getBaseModelProviders() from PROVIDER_DEFINITIONS (server-side static cloud model lookup)
- Slash convention for dynamic cloud providers (fireworks/, openrouter/, etc.)
- isOllamaConfigured feature flag using existing OLLAMA_URL env var

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
  • Loading branch information
waleedlatif1 and claude committed Apr 5, 2026
commit 3f855a1b8ebd53d605a465c325e908876249c09f
303 changes: 303 additions & 0 deletions apps/sim/blocks/utils.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,303 @@
/**
* @vitest-environment node
*/
import { beforeEach, describe, expect, it, vi } from 'vitest'

const { mockIsHosted, mockIsAzureConfigured, mockIsOllamaConfigured } = vi.hoisted(() => ({
mockIsHosted: { value: false },
mockIsAzureConfigured: { value: false },
mockIsOllamaConfigured: { value: false },
}))

const {
mockGetHostedModels,
mockGetProviderModels,
mockGetProviderIcon,
mockGetProviderFromModel,
mockGetBaseModelProviders,
} = vi.hoisted(() => ({
mockGetHostedModels: vi.fn(() => []),
mockGetProviderModels: vi.fn(() => []),
mockGetProviderIcon: vi.fn(() => null),
mockGetProviderFromModel: vi.fn(() => 'ollama'),
mockGetBaseModelProviders: vi.fn(() => ({})),
}))

const { mockProviders } = vi.hoisted(() => ({
mockProviders: {
value: {
base: { models: [] as string[], isLoading: false },
ollama: { models: [] as string[], isLoading: false },
vllm: { models: [] as string[], isLoading: false },
openrouter: { models: [] as string[], isLoading: false },
fireworks: { models: [] as string[], isLoading: false },
},
},
}))

vi.mock('@/lib/core/config/feature-flags', () => ({
get isHosted() {
return mockIsHosted.value
},
get isAzureConfigured() {
return mockIsAzureConfigured.value
},
get isOllamaConfigured() {
return mockIsOllamaConfigured.value
},
}))

vi.mock('@/providers/models', () => ({
getHostedModels: mockGetHostedModels,
getProviderModels: mockGetProviderModels,
getProviderIcon: mockGetProviderIcon,
getProviderFromModel: mockGetProviderFromModel,
getBaseModelProviders: mockGetBaseModelProviders,
}))

vi.mock('@/stores/providers/store', () => ({
useProvidersStore: {
getState: () => ({
get providers() {
return mockProviders.value
},
}),
},
}))

vi.mock('@/lib/oauth/utils', () => ({
getScopesForService: vi.fn(() => []),
}))

import { getApiKeyCondition } from '@/blocks/utils'

/**
* Simulates getProviderFromModel behavior: checks known prefix patterns,
* defaults to 'ollama' for unrecognized models (matching real implementation).
*/
function simulateGetProviderFromModel(model: string): string {
const m = model.toLowerCase()
if (m.startsWith('fireworks/')) return 'fireworks'
if (m.startsWith('openrouter/')) return 'openrouter'
if (m.startsWith('vllm/')) return 'vllm'
if (m.startsWith('vertex/')) return 'vertex'
if (m.startsWith('bedrock/')) return 'bedrock'
if (m.startsWith('azure/')) return 'azure-openai'
if (m.startsWith('azure-openai/')) return 'azure-openai'
if (m.startsWith('azure-anthropic/')) return 'azure-anthropic'
if (m.startsWith('groq/')) return 'groq'
if (m.startsWith('cerebras/')) return 'cerebras'
if (/^gpt/.test(m) || /^o\d/.test(m)) return 'openai'
if (/^claude/.test(m)) return 'anthropic'
if (/^gemini/.test(m)) return 'google'
if (/^grok/.test(m)) return 'xai'
if (/^mistral/.test(m) || /^magistral/.test(m)) return 'mistral'
return 'ollama'
}

const BASE_CLOUD_MODELS: Record<string, string> = {
'gpt-4o': 'openai',
'claude-sonnet-4-5': 'anthropic',
'gemini-2.5-pro': 'google',
'mistral-large-latest': 'mistral',
}

describe('getApiKeyCondition / shouldRequireApiKeyForModel', () => {
const evaluateCondition = (model: string): boolean => {
const conditionFn = getApiKeyCondition()
const condition = conditionFn({ model })
if ('not' in condition && condition.not) return false
if (condition.value === '__no_model_selected__') return false
return true
}

beforeEach(() => {
vi.clearAllMocks()
mockIsHosted.value = false
mockIsAzureConfigured.value = false
mockIsOllamaConfigured.value = false
mockProviders.value = {
base: { models: [], isLoading: false },
ollama: { models: [], isLoading: false },
vllm: { models: [], isLoading: false },
openrouter: { models: [], isLoading: false },
fireworks: { models: [], isLoading: false },
}
mockGetHostedModels.mockReturnValue([])
mockGetProviderModels.mockReturnValue([])
mockGetProviderFromModel.mockImplementation(simulateGetProviderFromModel)
mockGetBaseModelProviders.mockReturnValue({})
})

describe('empty or missing model', () => {
it('does not require API key when model is empty', () => {
expect(evaluateCondition('')).toBe(false)
})

it('does not require API key when model is whitespace', () => {
expect(evaluateCondition(' ')).toBe(false)
})
})

describe('hosted models', () => {
it('does not require API key for hosted models on hosted platform', () => {
mockIsHosted.value = true
mockGetHostedModels.mockReturnValue(['gpt-4o', 'claude-sonnet-4-5'])
expect(evaluateCondition('gpt-4o')).toBe(false)
expect(evaluateCondition('claude-sonnet-4-5')).toBe(false)
})

it('requires API key for non-hosted models on hosted platform', () => {
mockIsHosted.value = true
mockGetHostedModels.mockReturnValue(['gpt-4o'])
expect(evaluateCondition('claude-sonnet-4-5')).toBe(true)
})
})

describe('Vertex AI models', () => {
it('does not require API key for vertex/ prefixed models', () => {
expect(evaluateCondition('vertex/gemini-2.5-pro')).toBe(false)
})
})

describe('Bedrock models', () => {
it('does not require API key for bedrock/ prefixed models', () => {
expect(evaluateCondition('bedrock/anthropic.claude-v2')).toBe(false)
})
})

describe('Azure models', () => {
it('does not require API key for azure/ models when Azure is configured', () => {
mockIsAzureConfigured.value = true
expect(evaluateCondition('azure/gpt-4o')).toBe(false)
expect(evaluateCondition('azure-openai/gpt-4o')).toBe(false)
expect(evaluateCondition('azure-anthropic/claude-sonnet-4-5')).toBe(false)
})

it('requires API key for azure/ models when Azure is not configured', () => {
mockIsAzureConfigured.value = false
expect(evaluateCondition('azure/gpt-4o')).toBe(true)
})
})

describe('vLLM models', () => {
it('does not require API key for vllm/ prefixed models', () => {
expect(evaluateCondition('vllm/my-model')).toBe(false)
expect(evaluateCondition('vllm/llama-3-70b')).toBe(false)
})
})

describe('provider store lookup (client-side)', () => {
it('does not require API key when model is in the Ollama store bucket', () => {
mockProviders.value.ollama.models = ['llama3:latest', 'mistral:latest']
expect(evaluateCondition('llama3:latest')).toBe(false)
expect(evaluateCondition('mistral:latest')).toBe(false)
})

it('requires API key when model is in the base store bucket', () => {
mockProviders.value.base.models = ['gpt-4o', 'claude-sonnet-4-5']
expect(evaluateCondition('gpt-4o')).toBe(true)
expect(evaluateCondition('claude-sonnet-4-5')).toBe(true)
})

it('requires API key when model is in the fireworks store bucket', () => {
mockProviders.value.fireworks.models = ['fireworks/llama-3']
expect(evaluateCondition('fireworks/llama-3')).toBe(true)
})

it('requires API key when model is in the openrouter store bucket', () => {
mockProviders.value.openrouter.models = ['openrouter/anthropic/claude']
expect(evaluateCondition('openrouter/anthropic/claude')).toBe(true)
})

it('is case-insensitive for store lookup', () => {
mockProviders.value.ollama.models = ['Llama3:Latest']
expect(evaluateCondition('llama3:latest')).toBe(false)
})
})

describe('Ollama — OLLAMA_URL env var (server-safe)', () => {
it('does not require API key for unknown models when OLLAMA_URL is set', () => {
mockIsOllamaConfigured.value = true
expect(evaluateCondition('llama3:latest')).toBe(false)
expect(evaluateCondition('phi3:latest')).toBe(false)
expect(evaluateCondition('gemma2:latest')).toBe(false)
expect(evaluateCondition('deepseek-coder:latest')).toBe(false)
})

it('does not require API key for Ollama models that match cloud provider regex patterns', () => {
mockIsOllamaConfigured.value = true
expect(evaluateCondition('mistral:latest')).toBe(false)
expect(evaluateCondition('mistral')).toBe(false)
expect(evaluateCondition('mistral-nemo')).toBe(false)
expect(evaluateCondition('gpt2')).toBe(false)
})

it('requires API key for known cloud models even when OLLAMA_URL is set', () => {
mockIsOllamaConfigured.value = true
mockGetBaseModelProviders.mockReturnValue(BASE_CLOUD_MODELS)
expect(evaluateCondition('gpt-4o')).toBe(true)
expect(evaluateCondition('claude-sonnet-4-5')).toBe(true)
expect(evaluateCondition('gemini-2.5-pro')).toBe(true)
expect(evaluateCondition('mistral-large-latest')).toBe(true)
})

it('requires API key for slash-prefixed cloud models when OLLAMA_URL is set', () => {
mockIsOllamaConfigured.value = true
expect(evaluateCondition('azure/gpt-4o')).toBe(true)
expect(evaluateCondition('fireworks/llama-3')).toBe(true)
expect(evaluateCondition('openrouter/anthropic/claude')).toBe(true)
expect(evaluateCondition('groq/llama-3')).toBe(true)
})
})

describe('cloud provider models that need API key', () => {
it('requires API key for standard cloud models on hosted platform', () => {
mockIsHosted.value = true
mockGetHostedModels.mockReturnValue([])
expect(evaluateCondition('gpt-4o')).toBe(true)
expect(evaluateCondition('claude-sonnet-4-5')).toBe(true)
expect(evaluateCondition('gemini-2.5-pro')).toBe(true)
expect(evaluateCondition('mistral-large-latest')).toBe(true)
})

it('requires API key for prefixed cloud models on hosted platform', () => {
mockIsHosted.value = true
expect(evaluateCondition('fireworks/llama-3')).toBe(true)
expect(evaluateCondition('openrouter/anthropic/claude')).toBe(true)
expect(evaluateCondition('groq/llama-3')).toBe(true)
expect(evaluateCondition('cerebras/gpt-oss-120b')).toBe(true)
})

it('requires API key for prefixed cloud models on self-hosted', () => {
mockIsHosted.value = false
expect(evaluateCondition('fireworks/llama-3')).toBe(true)
expect(evaluateCondition('openrouter/anthropic/claude')).toBe(true)
expect(evaluateCondition('groq/llama-3')).toBe(true)
expect(evaluateCondition('cerebras/gpt-oss-120b')).toBe(true)
})
})

describe('self-hosted getProviderFromModel fallback', () => {
it('does not require API key when getProviderFromModel defaults to ollama', () => {
mockIsHosted.value = false
mockIsOllamaConfigured.value = false
expect(evaluateCondition('llama3:latest')).toBe(false)
expect(evaluateCondition('phi3:latest')).toBe(false)
})

it('requires API key when getProviderFromModel returns a cloud provider', () => {
mockIsHosted.value = false
mockIsOllamaConfigured.value = false
expect(evaluateCondition('mistral:latest')).toBe(true)
expect(evaluateCondition('gpt2')).toBe(true)
})

it('does not run getProviderFromModel fallback on hosted platform', () => {
mockIsHosted.value = true
mockGetHostedModels.mockReturnValue([])
expect(evaluateCondition('llama3:latest')).toBe(true)
expect(mockGetProviderFromModel).not.toHaveBeenCalled()
})
})
})
50 changes: 24 additions & 26 deletions apps/sim/blocks/utils.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
import { isAzureConfigured, isHosted } from '@/lib/core/config/feature-flags'
import { isAzureConfigured, isHosted, isOllamaConfigured } from '@/lib/core/config/feature-flags'
import { getScopesForService } from '@/lib/oauth/utils'
import type { BlockOutput, OutputFieldDefinition, SubBlockConfig } from '@/blocks/types'
import {
getBaseModelProviders,
getHostedModels,
getProviderFromModel,
getProviderIcon,
Expand Down Expand Up @@ -100,11 +101,15 @@ export function resolveOutputType(
return resolvedOutputs
}

/**
* Helper to get current Ollama models from store
*/
const getCurrentOllamaModels = () => {
return useProvidersStore.getState().providers.ollama.models
function getProviderFromStore(model: string): string | null {
const { providers } = useProvidersStore.getState()
const normalized = model.toLowerCase()
for (const [key, state] of Object.entries(providers)) {
if (state.models.some((m: string) => m.toLowerCase() === normalized)) {
return key
}
}
return null
}
Comment thread
waleedlatif1 marked this conversation as resolved.

function buildModelVisibilityCondition(model: string, shouldShow: boolean) {
Expand All @@ -119,16 +124,14 @@ function shouldRequireApiKeyForModel(model: string): boolean {
const normalizedModel = model.trim().toLowerCase()
if (!normalizedModel) return false

const hostedModels = getHostedModels()
const isHostedModel = hostedModels.some(
(hostedModel) => hostedModel.toLowerCase() === normalizedModel
)
if (isHosted && isHostedModel) return false
if (isHosted) {
const hostedModels = getHostedModels()
if (hostedModels.some((m) => m.toLowerCase() === normalizedModel)) return false
}

if (normalizedModel.startsWith('vertex/') || normalizedModel.startsWith('bedrock/')) {
return false
}

if (
isAzureConfigured &&
(normalizedModel.startsWith('azure/') ||
Expand All @@ -138,30 +141,25 @@ function shouldRequireApiKeyForModel(model: string): boolean {
) {
return false
}

if (normalizedModel.startsWith('vllm/')) {
return false
}

const currentOllamaModels = getCurrentOllamaModels()
if (currentOllamaModels.some((ollamaModel) => ollamaModel.toLowerCase() === normalizedModel)) {
const storeProvider = getProviderFromStore(normalizedModel)
if (storeProvider === 'ollama') return false
if (storeProvider) return true

if (isOllamaConfigured) {
if (normalizedModel.includes('/')) return true
if (normalizedModel in getBaseModelProviders()) return true
return false
}
Comment thread
waleedlatif1 marked this conversation as resolved.

if (!isHosted) {
try {
const providerId = getProviderFromModel(model)
if (
providerId === 'ollama' ||
providerId === 'vllm' ||
providerId === 'vertex' ||
providerId === 'bedrock'
) {
return false
}
} catch {
// If model resolution fails, fall through and require an API key.
}
if (['ollama', 'vllm', 'vertex', 'bedrock'].includes(providerId)) return false
} catch {}
}

return true
Expand Down
Loading
Loading