-
Notifications
You must be signed in to change notification settings - Fork 3.5k
fix(blocks): resolve Ollama models incorrectly requiring API key in Docker #3976
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Changes from 1 commit
Commits
Show all changes
4 commits
Select commit
Hold shift + click to select a range
3f855a1
fix(blocks): resolve Ollama models incorrectly requiring API key in D…
waleedlatif1 28f8186
refactor: remove getProviderFromModel regex fallback from API key val…
waleedlatif1 adfdf8f
lint
waleedlatif1 8225115
fix: handle vLLM models in store provider check
waleedlatif1 File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next
Next commit
fix(blocks): resolve Ollama models incorrectly requiring API key in D…
…ocker Server-side validation failed for Ollama models like mistral:latest because the Zustand providers store is empty on the server and getProviderFromModel misidentified them via regex pattern matching (e.g. mistral:latest matched Mistral AI's /^mistral/ pattern). Replace the hardcoded CLOUD_PROVIDER_PREFIXES list with existing data sources: - Provider store (definitive on client, checks all provider buckets) - getBaseModelProviders() from PROVIDER_DEFINITIONS (server-side static cloud model lookup) - Slash convention for dynamic cloud providers (fireworks/, openrouter/, etc.) - isOllamaConfigured feature flag using existing OLLAMA_URL env var Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
- Loading branch information
commit 3f855a1b8ebd53d605a465c325e908876249c09f
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,303 @@ | ||
| /** | ||
| * @vitest-environment node | ||
| */ | ||
| import { beforeEach, describe, expect, it, vi } from 'vitest' | ||
|
|
||
| const { mockIsHosted, mockIsAzureConfigured, mockIsOllamaConfigured } = vi.hoisted(() => ({ | ||
| mockIsHosted: { value: false }, | ||
| mockIsAzureConfigured: { value: false }, | ||
| mockIsOllamaConfigured: { value: false }, | ||
| })) | ||
|
|
||
| const { | ||
| mockGetHostedModels, | ||
| mockGetProviderModels, | ||
| mockGetProviderIcon, | ||
| mockGetProviderFromModel, | ||
| mockGetBaseModelProviders, | ||
| } = vi.hoisted(() => ({ | ||
| mockGetHostedModels: vi.fn(() => []), | ||
| mockGetProviderModels: vi.fn(() => []), | ||
| mockGetProviderIcon: vi.fn(() => null), | ||
| mockGetProviderFromModel: vi.fn(() => 'ollama'), | ||
| mockGetBaseModelProviders: vi.fn(() => ({})), | ||
| })) | ||
|
|
||
| const { mockProviders } = vi.hoisted(() => ({ | ||
| mockProviders: { | ||
| value: { | ||
| base: { models: [] as string[], isLoading: false }, | ||
| ollama: { models: [] as string[], isLoading: false }, | ||
| vllm: { models: [] as string[], isLoading: false }, | ||
| openrouter: { models: [] as string[], isLoading: false }, | ||
| fireworks: { models: [] as string[], isLoading: false }, | ||
| }, | ||
| }, | ||
| })) | ||
|
|
||
| vi.mock('@/lib/core/config/feature-flags', () => ({ | ||
| get isHosted() { | ||
| return mockIsHosted.value | ||
| }, | ||
| get isAzureConfigured() { | ||
| return mockIsAzureConfigured.value | ||
| }, | ||
| get isOllamaConfigured() { | ||
| return mockIsOllamaConfigured.value | ||
| }, | ||
| })) | ||
|
|
||
| vi.mock('@/providers/models', () => ({ | ||
| getHostedModels: mockGetHostedModels, | ||
| getProviderModels: mockGetProviderModels, | ||
| getProviderIcon: mockGetProviderIcon, | ||
| getProviderFromModel: mockGetProviderFromModel, | ||
| getBaseModelProviders: mockGetBaseModelProviders, | ||
| })) | ||
|
|
||
| vi.mock('@/stores/providers/store', () => ({ | ||
| useProvidersStore: { | ||
| getState: () => ({ | ||
| get providers() { | ||
| return mockProviders.value | ||
| }, | ||
| }), | ||
| }, | ||
| })) | ||
|
|
||
| vi.mock('@/lib/oauth/utils', () => ({ | ||
| getScopesForService: vi.fn(() => []), | ||
| })) | ||
|
|
||
| import { getApiKeyCondition } from '@/blocks/utils' | ||
|
|
||
| /** | ||
| * Simulates getProviderFromModel behavior: checks known prefix patterns, | ||
| * defaults to 'ollama' for unrecognized models (matching real implementation). | ||
| */ | ||
| function simulateGetProviderFromModel(model: string): string { | ||
| const m = model.toLowerCase() | ||
| if (m.startsWith('fireworks/')) return 'fireworks' | ||
| if (m.startsWith('openrouter/')) return 'openrouter' | ||
| if (m.startsWith('vllm/')) return 'vllm' | ||
| if (m.startsWith('vertex/')) return 'vertex' | ||
| if (m.startsWith('bedrock/')) return 'bedrock' | ||
| if (m.startsWith('azure/')) return 'azure-openai' | ||
| if (m.startsWith('azure-openai/')) return 'azure-openai' | ||
| if (m.startsWith('azure-anthropic/')) return 'azure-anthropic' | ||
| if (m.startsWith('groq/')) return 'groq' | ||
| if (m.startsWith('cerebras/')) return 'cerebras' | ||
| if (/^gpt/.test(m) || /^o\d/.test(m)) return 'openai' | ||
| if (/^claude/.test(m)) return 'anthropic' | ||
| if (/^gemini/.test(m)) return 'google' | ||
| if (/^grok/.test(m)) return 'xai' | ||
| if (/^mistral/.test(m) || /^magistral/.test(m)) return 'mistral' | ||
| return 'ollama' | ||
| } | ||
|
|
||
| const BASE_CLOUD_MODELS: Record<string, string> = { | ||
| 'gpt-4o': 'openai', | ||
| 'claude-sonnet-4-5': 'anthropic', | ||
| 'gemini-2.5-pro': 'google', | ||
| 'mistral-large-latest': 'mistral', | ||
| } | ||
|
|
||
| describe('getApiKeyCondition / shouldRequireApiKeyForModel', () => { | ||
| const evaluateCondition = (model: string): boolean => { | ||
| const conditionFn = getApiKeyCondition() | ||
| const condition = conditionFn({ model }) | ||
| if ('not' in condition && condition.not) return false | ||
| if (condition.value === '__no_model_selected__') return false | ||
| return true | ||
| } | ||
|
|
||
| beforeEach(() => { | ||
| vi.clearAllMocks() | ||
| mockIsHosted.value = false | ||
| mockIsAzureConfigured.value = false | ||
| mockIsOllamaConfigured.value = false | ||
| mockProviders.value = { | ||
| base: { models: [], isLoading: false }, | ||
| ollama: { models: [], isLoading: false }, | ||
| vllm: { models: [], isLoading: false }, | ||
| openrouter: { models: [], isLoading: false }, | ||
| fireworks: { models: [], isLoading: false }, | ||
| } | ||
| mockGetHostedModels.mockReturnValue([]) | ||
| mockGetProviderModels.mockReturnValue([]) | ||
| mockGetProviderFromModel.mockImplementation(simulateGetProviderFromModel) | ||
| mockGetBaseModelProviders.mockReturnValue({}) | ||
| }) | ||
|
|
||
| describe('empty or missing model', () => { | ||
| it('does not require API key when model is empty', () => { | ||
| expect(evaluateCondition('')).toBe(false) | ||
| }) | ||
|
|
||
| it('does not require API key when model is whitespace', () => { | ||
| expect(evaluateCondition(' ')).toBe(false) | ||
| }) | ||
| }) | ||
|
|
||
| describe('hosted models', () => { | ||
| it('does not require API key for hosted models on hosted platform', () => { | ||
| mockIsHosted.value = true | ||
| mockGetHostedModels.mockReturnValue(['gpt-4o', 'claude-sonnet-4-5']) | ||
| expect(evaluateCondition('gpt-4o')).toBe(false) | ||
| expect(evaluateCondition('claude-sonnet-4-5')).toBe(false) | ||
| }) | ||
|
|
||
| it('requires API key for non-hosted models on hosted platform', () => { | ||
| mockIsHosted.value = true | ||
| mockGetHostedModels.mockReturnValue(['gpt-4o']) | ||
| expect(evaluateCondition('claude-sonnet-4-5')).toBe(true) | ||
| }) | ||
| }) | ||
|
|
||
| describe('Vertex AI models', () => { | ||
| it('does not require API key for vertex/ prefixed models', () => { | ||
| expect(evaluateCondition('vertex/gemini-2.5-pro')).toBe(false) | ||
| }) | ||
| }) | ||
|
|
||
| describe('Bedrock models', () => { | ||
| it('does not require API key for bedrock/ prefixed models', () => { | ||
| expect(evaluateCondition('bedrock/anthropic.claude-v2')).toBe(false) | ||
| }) | ||
| }) | ||
|
|
||
| describe('Azure models', () => { | ||
| it('does not require API key for azure/ models when Azure is configured', () => { | ||
| mockIsAzureConfigured.value = true | ||
| expect(evaluateCondition('azure/gpt-4o')).toBe(false) | ||
| expect(evaluateCondition('azure-openai/gpt-4o')).toBe(false) | ||
| expect(evaluateCondition('azure-anthropic/claude-sonnet-4-5')).toBe(false) | ||
| }) | ||
|
|
||
| it('requires API key for azure/ models when Azure is not configured', () => { | ||
| mockIsAzureConfigured.value = false | ||
| expect(evaluateCondition('azure/gpt-4o')).toBe(true) | ||
| }) | ||
| }) | ||
|
|
||
| describe('vLLM models', () => { | ||
| it('does not require API key for vllm/ prefixed models', () => { | ||
| expect(evaluateCondition('vllm/my-model')).toBe(false) | ||
| expect(evaluateCondition('vllm/llama-3-70b')).toBe(false) | ||
| }) | ||
| }) | ||
|
|
||
| describe('provider store lookup (client-side)', () => { | ||
| it('does not require API key when model is in the Ollama store bucket', () => { | ||
| mockProviders.value.ollama.models = ['llama3:latest', 'mistral:latest'] | ||
| expect(evaluateCondition('llama3:latest')).toBe(false) | ||
| expect(evaluateCondition('mistral:latest')).toBe(false) | ||
| }) | ||
|
|
||
| it('requires API key when model is in the base store bucket', () => { | ||
| mockProviders.value.base.models = ['gpt-4o', 'claude-sonnet-4-5'] | ||
| expect(evaluateCondition('gpt-4o')).toBe(true) | ||
| expect(evaluateCondition('claude-sonnet-4-5')).toBe(true) | ||
| }) | ||
|
|
||
| it('requires API key when model is in the fireworks store bucket', () => { | ||
| mockProviders.value.fireworks.models = ['fireworks/llama-3'] | ||
| expect(evaluateCondition('fireworks/llama-3')).toBe(true) | ||
| }) | ||
|
|
||
| it('requires API key when model is in the openrouter store bucket', () => { | ||
| mockProviders.value.openrouter.models = ['openrouter/anthropic/claude'] | ||
| expect(evaluateCondition('openrouter/anthropic/claude')).toBe(true) | ||
| }) | ||
|
|
||
| it('is case-insensitive for store lookup', () => { | ||
| mockProviders.value.ollama.models = ['Llama3:Latest'] | ||
| expect(evaluateCondition('llama3:latest')).toBe(false) | ||
| }) | ||
| }) | ||
|
|
||
| describe('Ollama — OLLAMA_URL env var (server-safe)', () => { | ||
| it('does not require API key for unknown models when OLLAMA_URL is set', () => { | ||
| mockIsOllamaConfigured.value = true | ||
| expect(evaluateCondition('llama3:latest')).toBe(false) | ||
| expect(evaluateCondition('phi3:latest')).toBe(false) | ||
| expect(evaluateCondition('gemma2:latest')).toBe(false) | ||
| expect(evaluateCondition('deepseek-coder:latest')).toBe(false) | ||
| }) | ||
|
|
||
| it('does not require API key for Ollama models that match cloud provider regex patterns', () => { | ||
| mockIsOllamaConfigured.value = true | ||
| expect(evaluateCondition('mistral:latest')).toBe(false) | ||
| expect(evaluateCondition('mistral')).toBe(false) | ||
| expect(evaluateCondition('mistral-nemo')).toBe(false) | ||
| expect(evaluateCondition('gpt2')).toBe(false) | ||
| }) | ||
|
|
||
| it('requires API key for known cloud models even when OLLAMA_URL is set', () => { | ||
| mockIsOllamaConfigured.value = true | ||
| mockGetBaseModelProviders.mockReturnValue(BASE_CLOUD_MODELS) | ||
| expect(evaluateCondition('gpt-4o')).toBe(true) | ||
| expect(evaluateCondition('claude-sonnet-4-5')).toBe(true) | ||
| expect(evaluateCondition('gemini-2.5-pro')).toBe(true) | ||
| expect(evaluateCondition('mistral-large-latest')).toBe(true) | ||
| }) | ||
|
|
||
| it('requires API key for slash-prefixed cloud models when OLLAMA_URL is set', () => { | ||
| mockIsOllamaConfigured.value = true | ||
| expect(evaluateCondition('azure/gpt-4o')).toBe(true) | ||
| expect(evaluateCondition('fireworks/llama-3')).toBe(true) | ||
| expect(evaluateCondition('openrouter/anthropic/claude')).toBe(true) | ||
| expect(evaluateCondition('groq/llama-3')).toBe(true) | ||
| }) | ||
| }) | ||
|
|
||
| describe('cloud provider models that need API key', () => { | ||
| it('requires API key for standard cloud models on hosted platform', () => { | ||
| mockIsHosted.value = true | ||
| mockGetHostedModels.mockReturnValue([]) | ||
| expect(evaluateCondition('gpt-4o')).toBe(true) | ||
| expect(evaluateCondition('claude-sonnet-4-5')).toBe(true) | ||
| expect(evaluateCondition('gemini-2.5-pro')).toBe(true) | ||
| expect(evaluateCondition('mistral-large-latest')).toBe(true) | ||
| }) | ||
|
|
||
| it('requires API key for prefixed cloud models on hosted platform', () => { | ||
| mockIsHosted.value = true | ||
| expect(evaluateCondition('fireworks/llama-3')).toBe(true) | ||
| expect(evaluateCondition('openrouter/anthropic/claude')).toBe(true) | ||
| expect(evaluateCondition('groq/llama-3')).toBe(true) | ||
| expect(evaluateCondition('cerebras/gpt-oss-120b')).toBe(true) | ||
| }) | ||
|
|
||
| it('requires API key for prefixed cloud models on self-hosted', () => { | ||
| mockIsHosted.value = false | ||
| expect(evaluateCondition('fireworks/llama-3')).toBe(true) | ||
| expect(evaluateCondition('openrouter/anthropic/claude')).toBe(true) | ||
| expect(evaluateCondition('groq/llama-3')).toBe(true) | ||
| expect(evaluateCondition('cerebras/gpt-oss-120b')).toBe(true) | ||
| }) | ||
| }) | ||
|
|
||
| describe('self-hosted getProviderFromModel fallback', () => { | ||
| it('does not require API key when getProviderFromModel defaults to ollama', () => { | ||
| mockIsHosted.value = false | ||
| mockIsOllamaConfigured.value = false | ||
| expect(evaluateCondition('llama3:latest')).toBe(false) | ||
| expect(evaluateCondition('phi3:latest')).toBe(false) | ||
| }) | ||
|
|
||
| it('requires API key when getProviderFromModel returns a cloud provider', () => { | ||
| mockIsHosted.value = false | ||
| mockIsOllamaConfigured.value = false | ||
| expect(evaluateCondition('mistral:latest')).toBe(true) | ||
| expect(evaluateCondition('gpt2')).toBe(true) | ||
| }) | ||
|
|
||
| it('does not run getProviderFromModel fallback on hosted platform', () => { | ||
| mockIsHosted.value = true | ||
| mockGetHostedModels.mockReturnValue([]) | ||
| expect(evaluateCondition('llama3:latest')).toBe(true) | ||
| expect(mockGetProviderFromModel).not.toHaveBeenCalled() | ||
| }) | ||
| }) | ||
| }) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.