diff --git a/src/renderer/src/config/models/__tests__/utils.test.ts b/src/renderer/src/config/models/__tests__/utils.test.ts index f3f4d402a..a163061ea 100644 --- a/src/renderer/src/config/models/__tests__/utils.test.ts +++ b/src/renderer/src/config/models/__tests__/utils.test.ts @@ -125,195 +125,371 @@ describe('model utils', () => { openAIWebSearchOnlyMock.mockReturnValue(false) }) - it('detects OpenAI LLM models through reasoning and GPT prefix', () => { - expect(isOpenAILLMModel(undefined as unknown as Model)).toBe(false) - expect(isOpenAILLMModel(createModel({ id: 'gpt-4o-image' }))).toBe(false) + describe('OpenAI model detection', () => { + describe('isOpenAILLMModel', () => { + it('returns false for undefined model', () => { + expect(isOpenAILLMModel(undefined as unknown as Model)).toBe(false) + }) - reasoningMock.mockReturnValueOnce(true) - expect(isOpenAILLMModel(createModel({ id: 'o1-preview' }))).toBe(true) + it('returns false for image generation models', () => { + expect(isOpenAILLMModel(createModel({ id: 'gpt-4o-image' }))).toBe(false) + }) - expect(isOpenAILLMModel(createModel({ id: 'GPT-5-turbo' }))).toBe(true) - }) + it('returns true for reasoning models', () => { + reasoningMock.mockReturnValueOnce(true) + expect(isOpenAILLMModel(createModel({ id: 'o1-preview' }))).toBe(true) + }) - it('detects OpenAI models via GPT prefix or reasoning support', () => { - expect(isOpenAIModel(createModel({ id: 'gpt-4.1' }))).toBe(true) - reasoningMock.mockReturnValueOnce(true) - expect(isOpenAIModel(createModel({ id: 'o3' }))).toBe(true) - }) - - it('evaluates support for flex service tier and alias helper', () => { - expect(isSupportFlexServiceTierModel(createModel({ id: 'o3' }))).toBe(true) - expect(isSupportFlexServiceTierModel(createModel({ id: 'o3-mini' }))).toBe(false) - expect(isSupportFlexServiceTierModel(createModel({ id: 'o4-mini' }))).toBe(true) - expect(isSupportFlexServiceTierModel(createModel({ id: 'gpt-5-preview' }))).toBe(true) - expect(isSupportedFlexServiceTier(createModel({ id: 'gpt-4o' }))).toBe(false) - }) - - it('detects verbosity support for GPT-5+ families', () => { - expect(isSupportVerbosityModel(createModel({ id: 'gpt-5' }))).toBe(true) - expect(isSupportVerbosityModel(createModel({ id: 'gpt-5-chat' }))).toBe(false) - expect(isSupportVerbosityModel(createModel({ id: 'gpt-5.1-preview' }))).toBe(true) - }) - - it('limits verbosity controls for GPT-5 Pro models', () => { - const proModel = createModel({ id: 'gpt-5-pro' }) - const previewModel = createModel({ id: 'gpt-5-preview' }) - expect(getModelSupportedVerbosity(proModel)).toEqual([undefined, 'high']) - expect(getModelSupportedVerbosity(previewModel)).toEqual([undefined, 'low', 'medium', 'high']) - expect(isGPT5ProModel(proModel)).toBe(true) - expect(isGPT5ProModel(previewModel)).toBe(false) - }) - - it('identifies OpenAI chat-completion-only models', () => { - expect(isOpenAIChatCompletionOnlyModel(createModel({ id: 'gpt-4o-search-preview' }))).toBe(true) - expect(isOpenAIChatCompletionOnlyModel(createModel({ id: 'o1-mini' }))).toBe(true) - expect(isOpenAIChatCompletionOnlyModel(createModel({ id: 'gpt-4o' }))).toBe(false) - }) - - it('filters unsupported OpenAI catalog entries', () => { - expect(isSupportedModel({ id: 'gpt-4', object: 'model' } as any)).toBe(true) - expect(isSupportedModel({ id: 'tts-1', object: 'model' } as any)).toBe(false) - }) - - it('calculates temperature/top-p support correctly', () => { - const model = createModel({ id: 'o1' }) - reasoningMock.mockReturnValue(true) - expect(isNotSupportTemperatureAndTopP(model)).toBe(true) - - const openWeight = createModel({ id: 'gpt-oss-debug' }) - expect(isNotSupportTemperatureAndTopP(openWeight)).toBe(false) - - const chatOnly = createModel({ id: 'o1-preview' }) - reasoningMock.mockReturnValue(false) - expect(isNotSupportTemperatureAndTopP(chatOnly)).toBe(true) - - const qwenMt = createModel({ id: 'qwen-mt-large', provider: 'aliyun' }) - expect(isNotSupportTemperatureAndTopP(qwenMt)).toBe(true) - }) - - it('handles gemma and gemini detections plus zhipu tagging', () => { - expect(isGemmaModel(createModel({ id: 'Gemma-3-27B' }))).toBe(true) - expect(isGemmaModel(createModel({ group: 'Gemma' }))).toBe(true) - expect(isGemmaModel(createModel({ id: 'gpt-4o' }))).toBe(false) - - expect(isGeminiModel(createModel({ id: 'Gemini-2.0' }))).toBe(true) - - expect(isZhipuModel(createModel({ provider: 'zhipu' }))).toBe(true) - expect(isZhipuModel(createModel({ provider: 'openai' }))).toBe(false) - }) - - it('groups qwen models by prefix', () => { - const qwen = createModel({ id: 'Qwen-7B', provider: 'qwen', name: 'Qwen-7B' }) - const qwenOmni = createModel({ id: 'qwen2.5-omni', name: 'qwen2.5-omni' }) - const other = createModel({ id: 'deepseek-v3', group: 'DeepSeek' }) - - const grouped = groupQwenModels([qwen, qwenOmni, other]) - expect(Object.keys(grouped)).toContain('qwen-7b') - expect(Object.keys(grouped)).toContain('qwen2.5') - expect(grouped.DeepSeek).toContain(other) - }) - - it('aggregates boolean helpers based on regex rules', () => { - expect(isAnthropicModel(createModel({ id: 'claude-3.5' }))).toBe(true) - expect(isQwenMTModel(createModel({ id: 'qwen-mt-plus' }))).toBe(true) - expect(isNotSupportSystemMessageModel(createModel({ id: 'gemma-moe' }))).toBe(true) - expect(isOpenAIOpenWeightModel(createModel({ id: 'gpt-oss-free' }))).toBe(true) - }) - - describe('isNotSupportedTextDelta', () => { - it('returns true for qwen-mt-turbo and qwen-mt-plus models', () => { - // qwen-mt series that don't support text delta - expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-turbo' }))).toBe(true) - expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-plus' }))).toBe(true) - expect(isNotSupportTextDeltaModel(createModel({ id: 'Qwen-MT-Turbo' }))).toBe(true) - expect(isNotSupportTextDeltaModel(createModel({ id: 'QWEN-MT-PLUS' }))).toBe(true) + it('returns true for GPT-prefixed models', () => { + expect(isOpenAILLMModel(createModel({ id: 'GPT-5-turbo' }))).toBe(true) + }) }) - it('returns false for qwen-mt-flash and other models', () => { - // qwen-mt-flash supports text delta - expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-flash' }))).toBe(false) - expect(isNotSupportTextDeltaModel(createModel({ id: 'Qwen-MT-Flash' }))).toBe(false) + describe('isOpenAIModel', () => { + it('detects models via GPT prefix', () => { + expect(isOpenAIModel(createModel({ id: 'gpt-4.1' }))).toBe(true) + }) - // Legacy qwen models without mt prefix (support text delta) - expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-turbo' }))).toBe(false) - expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-plus' }))).toBe(false) - - // Other qwen models - expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-max' }))).toBe(false) - expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen2.5-72b' }))).toBe(false) - expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-vl-plus' }))).toBe(false) - - // Non-qwen models - expect(isNotSupportTextDeltaModel(createModel({ id: 'gpt-4o' }))).toBe(false) - expect(isNotSupportTextDeltaModel(createModel({ id: 'claude-3.5' }))).toBe(false) - expect(isNotSupportTextDeltaModel(createModel({ id: 'glm-4-plus' }))).toBe(false) + it('detects models via reasoning support', () => { + reasoningMock.mockReturnValueOnce(true) + expect(isOpenAIModel(createModel({ id: 'o3' }))).toBe(true) + }) }) - it('handles models with version suffixes', () => { - // qwen-mt models with version suffixes - expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-turbo-1201' }))).toBe(true) - expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-plus-0828' }))).toBe(true) + describe('isOpenAIChatCompletionOnlyModel', () => { + it('identifies chat-completion-only models', () => { + expect(isOpenAIChatCompletionOnlyModel(createModel({ id: 'gpt-4o-search-preview' }))).toBe(true) + expect(isOpenAIChatCompletionOnlyModel(createModel({ id: 'o1-mini' }))).toBe(true) + }) - // Legacy qwen models with version suffixes (support text delta) - expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-turbo-0828' }))).toBe(false) - expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-plus-latest' }))).toBe(false) + it('returns false for general models', () => { + expect(isOpenAIChatCompletionOnlyModel(createModel({ id: 'gpt-4o' }))).toBe(false) + }) }) }) - it('evaluates GPT-5 family helpers', () => { - expect(isGPT5SeriesModel(createModel({ id: 'gpt-5-preview' }))).toBe(true) - expect(isGPT5SeriesModel(createModel({ id: 'gpt-5.1-preview' }))).toBe(false) - expect(isGPT51SeriesModel(createModel({ id: 'gpt-5.1-mini' }))).toBe(true) - expect(isGPT5SeriesReasoningModel(createModel({ id: 'gpt-5-prompt' }))).toBe(true) - expect(isSupportVerbosityModel(createModel({ id: 'gpt-5-chat' }))).toBe(false) + describe('GPT-5 family detection', () => { + describe('isGPT5SeriesModel', () => { + it('returns true for GPT-5 models', () => { + expect(isGPT5SeriesModel(createModel({ id: 'gpt-5-preview' }))).toBe(true) + }) + + it('returns false for GPT-5.1 models', () => { + expect(isGPT5SeriesModel(createModel({ id: 'gpt-5.1-preview' }))).toBe(false) + }) + }) + + describe('isGPT51SeriesModel', () => { + it('returns true for GPT-5.1 models', () => { + expect(isGPT51SeriesModel(createModel({ id: 'gpt-5.1-mini' }))).toBe(true) + }) + }) + + describe('isGPT5SeriesReasoningModel', () => { + it('returns true for GPT-5 reasoning models', () => { + expect(isGPT5SeriesReasoningModel(createModel({ id: 'gpt-5' }))).toBe(true) + }) + it('returns false for gpt-5-chat', () => { + expect(isGPT5SeriesReasoningModel(createModel({ id: 'gpt-5-chat' }))).toBe(false) + }) + }) + + describe('isGPT5ProModel', () => { + it('returns true for GPT-5 Pro models', () => { + expect(isGPT5ProModel(createModel({ id: 'gpt-5-pro' }))).toBe(true) + }) + + it('returns false for non-Pro GPT-5 models', () => { + expect(isGPT5ProModel(createModel({ id: 'gpt-5-preview' }))).toBe(false) + }) + }) }) - it('wraps generate/vision helpers that operate on arrays', () => { - const models = [createModel({ id: 'gpt-4o' }), createModel({ id: 'gpt-4o-mini' })] - expect(isVisionModels(models)).toBe(true) - visionMock.mockReturnValueOnce(true).mockReturnValueOnce(false) - expect(isVisionModels(models)).toBe(false) + describe('Verbosity support', () => { + describe('isSupportVerbosityModel', () => { + it('returns true for GPT-5 models', () => { + expect(isSupportVerbosityModel(createModel({ id: 'gpt-5' }))).toBe(true) + }) - expect(isGenerateImageModels(models)).toBe(true) - generateImageMock.mockReturnValueOnce(true).mockReturnValueOnce(false) - expect(isGenerateImageModels(models)).toBe(false) + it('returns false for GPT-5 chat models', () => { + expect(isSupportVerbosityModel(createModel({ id: 'gpt-5-chat' }))).toBe(false) + }) + + it('returns true for GPT-5.1 models', () => { + expect(isSupportVerbosityModel(createModel({ id: 'gpt-5.1-preview' }))).toBe(true) + }) + }) + + describe('getModelSupportedVerbosity', () => { + it('returns only "high" for GPT-5 Pro models', () => { + expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro' }))).toEqual([undefined, 'high']) + expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro-2025-10-06' }))).toEqual([undefined, 'high']) + }) + + it('returns all levels for non-Pro GPT-5 models', () => { + const previewModel = createModel({ id: 'gpt-5-preview' }) + expect(getModelSupportedVerbosity(previewModel)).toEqual([undefined, 'low', 'medium', 'high']) + }) + + it('returns all levels for GPT-5.1 models', () => { + const gpt51Model = createModel({ id: 'gpt-5.1-preview' }) + expect(getModelSupportedVerbosity(gpt51Model)).toEqual([undefined, 'low', 'medium', 'high']) + }) + + it('returns only undefined for non-GPT-5 models', () => { + expect(getModelSupportedVerbosity(createModel({ id: 'gpt-4o' }))).toEqual([undefined]) + expect(getModelSupportedVerbosity(createModel({ id: 'claude-3.5' }))).toEqual([undefined]) + }) + + it('returns only undefined for undefiend/null input', () => { + expect(getModelSupportedVerbosity(undefined)).toEqual([undefined]) + expect(getModelSupportedVerbosity(null)).toEqual([undefined]) + }) + }) }) - it('filters models for agent usage', () => { - expect(agentModelFilter(createModel())).toBe(true) + describe('Flex service tier support', () => { + describe('isSupportFlexServiceTierModel', () => { + it('returns true for supported models', () => { + expect(isSupportFlexServiceTierModel(createModel({ id: 'o3' }))).toBe(true) + expect(isSupportFlexServiceTierModel(createModel({ id: 'o4-mini' }))).toBe(true) + expect(isSupportFlexServiceTierModel(createModel({ id: 'gpt-5-preview' }))).toBe(true) + }) - embeddingMock.mockReturnValueOnce(true) - expect(agentModelFilter(createModel({ id: 'text-embedding' }))).toBe(false) + it('returns false for unsupported models', () => { + expect(isSupportFlexServiceTierModel(createModel({ id: 'o3-mini' }))).toBe(false) + }) + }) - embeddingMock.mockReturnValue(false) - rerankMock.mockReturnValueOnce(true) - expect(agentModelFilter(createModel({ id: 'rerank' }))).toBe(false) - - rerankMock.mockReturnValue(false) - textToImageMock.mockReturnValueOnce(true) - expect(agentModelFilter(createModel({ id: 'gpt-image-1' }))).toBe(false) + describe('isSupportedFlexServiceTier', () => { + it('returns false for non-flex models', () => { + expect(isSupportedFlexServiceTier(createModel({ id: 'gpt-4o' }))).toBe(false) + }) + }) }) - it('identifies models with maximum temperature of 1.0', () => { - // Zhipu models should have max temperature of 1.0 - expect(isMaxTemperatureOneModel(createModel({ id: 'glm-4' }))).toBe(true) - expect(isMaxTemperatureOneModel(createModel({ id: 'GLM-4-Plus' }))).toBe(true) - expect(isMaxTemperatureOneModel(createModel({ id: 'glm-3-turbo' }))).toBe(true) + describe('Temperature and top-p support', () => { + describe('isNotSupportTemperatureAndTopP', () => { + it('returns true for reasoning models', () => { + const model = createModel({ id: 'o1' }) + reasoningMock.mockReturnValue(true) + expect(isNotSupportTemperatureAndTopP(model)).toBe(true) + }) - // Anthropic models should have max temperature of 1.0 - expect(isMaxTemperatureOneModel(createModel({ id: 'claude-3.5-sonnet' }))).toBe(true) - expect(isMaxTemperatureOneModel(createModel({ id: 'Claude-3-opus' }))).toBe(true) - expect(isMaxTemperatureOneModel(createModel({ id: 'claude-2.1' }))).toBe(true) + it('returns false for open weight models', () => { + const openWeight = createModel({ id: 'gpt-oss-debug' }) + expect(isNotSupportTemperatureAndTopP(openWeight)).toBe(false) + }) - // Moonshot models should have max temperature of 1.0 - expect(isMaxTemperatureOneModel(createModel({ id: 'moonshot-1.0' }))).toBe(true) - expect(isMaxTemperatureOneModel(createModel({ id: 'kimi-k2-thinking' }))).toBe(true) - expect(isMaxTemperatureOneModel(createModel({ id: 'Moonshot-Pro' }))).toBe(true) + it('returns true for chat-only models without reasoning', () => { + const chatOnly = createModel({ id: 'o1-preview' }) + reasoningMock.mockReturnValue(false) + expect(isNotSupportTemperatureAndTopP(chatOnly)).toBe(true) + }) - // Other models should return false - expect(isMaxTemperatureOneModel(createModel({ id: 'gpt-4o' }))).toBe(false) - expect(isMaxTemperatureOneModel(createModel({ id: 'gpt-4-turbo' }))).toBe(false) - expect(isMaxTemperatureOneModel(createModel({ id: 'qwen-max' }))).toBe(false) - expect(isMaxTemperatureOneModel(createModel({ id: 'gemini-pro' }))).toBe(false) + it('returns true for Qwen MT models', () => { + const qwenMt = createModel({ id: 'qwen-mt-large', provider: 'aliyun' }) + expect(isNotSupportTemperatureAndTopP(qwenMt)).toBe(true) + }) + }) + }) + + describe('Text delta support', () => { + describe('isNotSupportTextDeltaModel', () => { + it('returns true for qwen-mt-turbo and qwen-mt-plus models', () => { + expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-turbo' }))).toBe(true) + expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-plus' }))).toBe(true) + expect(isNotSupportTextDeltaModel(createModel({ id: 'Qwen-MT-Turbo' }))).toBe(true) + expect(isNotSupportTextDeltaModel(createModel({ id: 'QWEN-MT-PLUS' }))).toBe(true) + }) + + it('returns false for qwen-mt-flash and other models', () => { + expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-flash' }))).toBe(false) + expect(isNotSupportTextDeltaModel(createModel({ id: 'Qwen-MT-Flash' }))).toBe(false) + expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-turbo' }))).toBe(false) + expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-plus' }))).toBe(false) + expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-max' }))).toBe(false) + expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen2.5-72b' }))).toBe(false) + expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-vl-plus' }))).toBe(false) + }) + + it('returns false for non-qwen models', () => { + expect(isNotSupportTextDeltaModel(createModel({ id: 'gpt-4o' }))).toBe(false) + expect(isNotSupportTextDeltaModel(createModel({ id: 'claude-3.5' }))).toBe(false) + expect(isNotSupportTextDeltaModel(createModel({ id: 'glm-4-plus' }))).toBe(false) + }) + + it('handles models with version suffixes', () => { + expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-turbo-1201' }))).toBe(true) + expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-plus-0828' }))).toBe(true) + expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-turbo-0828' }))).toBe(false) + expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-plus-latest' }))).toBe(false) + }) + }) + }) + + describe('Model provider detection', () => { + describe('isGemmaModel', () => { + it('detects Gemma models by ID', () => { + expect(isGemmaModel(createModel({ id: 'Gemma-3-27B' }))).toBe(true) + }) + + it('detects Gemma models by group', () => { + expect(isGemmaModel(createModel({ group: 'Gemma' }))).toBe(true) + }) + + it('returns false for non-Gemma models', () => { + expect(isGemmaModel(createModel({ id: 'gpt-4o' }))).toBe(false) + }) + }) + + describe('isGeminiModel', () => { + it('detects Gemini models', () => { + expect(isGeminiModel(createModel({ id: 'Gemini-2.0' }))).toBe(true) + }) + }) + + describe('isZhipuModel', () => { + it('detects Zhipu models by provider', () => { + expect(isZhipuModel(createModel({ provider: 'zhipu' }))).toBe(true) + }) + + it('returns false for non-Zhipu models', () => { + expect(isZhipuModel(createModel({ provider: 'openai' }))).toBe(false) + }) + }) + + describe('isAnthropicModel', () => { + it('detects Anthropic models', () => { + expect(isAnthropicModel(createModel({ id: 'claude-3.5' }))).toBe(true) + }) + }) + + describe('isQwenMTModel', () => { + it('detects Qwen MT models', () => { + expect(isQwenMTModel(createModel({ id: 'qwen-mt-plus' }))).toBe(true) + }) + }) + + describe('isOpenAIOpenWeightModel', () => { + it('detects OpenAI open weight models', () => { + expect(isOpenAIOpenWeightModel(createModel({ id: 'gpt-oss-free' }))).toBe(true) + }) + }) + }) + + describe('System message support', () => { + describe('isNotSupportSystemMessageModel', () => { + it('returns true for models that do not support system messages', () => { + expect(isNotSupportSystemMessageModel(createModel({ id: 'gemma-moe' }))).toBe(true) + }) + }) + }) + + describe('Model grouping', () => { + describe('groupQwenModels', () => { + it('groups qwen models by prefix', () => { + const qwen = createModel({ id: 'Qwen-7B', provider: 'qwen', name: 'Qwen-7B' }) + const qwenOmni = createModel({ id: 'qwen2.5-omni', name: 'qwen2.5-omni' }) + const other = createModel({ id: 'deepseek-v3', group: 'DeepSeek' }) + + const grouped = groupQwenModels([qwen, qwenOmni, other]) + expect(Object.keys(grouped)).toContain('qwen-7b') + expect(Object.keys(grouped)).toContain('qwen2.5') + expect(grouped.DeepSeek).toContain(other) + }) + }) + }) + + describe('Vision and image generation', () => { + describe('isVisionModels', () => { + it('returns true when all models support vision', () => { + const models = [createModel({ id: 'gpt-4o' }), createModel({ id: 'gpt-4o-mini' })] + expect(isVisionModels(models)).toBe(true) + }) + + it('returns false when some models do not support vision', () => { + const models = [createModel({ id: 'gpt-4o' }), createModel({ id: 'gpt-4o-mini' })] + visionMock.mockReturnValueOnce(true).mockReturnValueOnce(false) + expect(isVisionModels(models)).toBe(false) + }) + }) + + describe('isGenerateImageModels', () => { + it('returns true when all models support image generation', () => { + const models = [createModel({ id: 'gpt-4o' }), createModel({ id: 'gpt-4o-mini' })] + expect(isGenerateImageModels(models)).toBe(true) + }) + + it('returns false when some models do not support image generation', () => { + const models = [createModel({ id: 'gpt-4o' }), createModel({ id: 'gpt-4o-mini' })] + generateImageMock.mockReturnValueOnce(true).mockReturnValueOnce(false) + expect(isGenerateImageModels(models)).toBe(false) + }) + }) + }) + + describe('Model filtering', () => { + describe('isSupportedModel', () => { + it('filters supported OpenAI catalog entries', () => { + expect(isSupportedModel({ id: 'gpt-4', object: 'model' } as any)).toBe(true) + }) + + it('filters unsupported OpenAI catalog entries', () => { + expect(isSupportedModel({ id: 'tts-1', object: 'model' } as any)).toBe(false) + }) + }) + + describe('agentModelFilter', () => { + it('returns true for regular models', () => { + expect(agentModelFilter(createModel())).toBe(true) + }) + + it('filters out embedding models', () => { + embeddingMock.mockReturnValueOnce(true) + expect(agentModelFilter(createModel({ id: 'text-embedding' }))).toBe(false) + }) + + it('filters out rerank models', () => { + embeddingMock.mockReturnValue(false) + rerankMock.mockReturnValueOnce(true) + expect(agentModelFilter(createModel({ id: 'rerank' }))).toBe(false) + }) + + it('filters out text-to-image models', () => { + rerankMock.mockReturnValue(false) + textToImageMock.mockReturnValueOnce(true) + expect(agentModelFilter(createModel({ id: 'gpt-image-1' }))).toBe(false) + }) + }) + }) + + describe('Temperature limits', () => { + describe('isMaxTemperatureOneModel', () => { + it('returns true for Zhipu models', () => { + expect(isMaxTemperatureOneModel(createModel({ id: 'glm-4' }))).toBe(true) + expect(isMaxTemperatureOneModel(createModel({ id: 'GLM-4-Plus' }))).toBe(true) + expect(isMaxTemperatureOneModel(createModel({ id: 'glm-3-turbo' }))).toBe(true) + }) + + it('returns true for Anthropic models', () => { + expect(isMaxTemperatureOneModel(createModel({ id: 'claude-3.5-sonnet' }))).toBe(true) + expect(isMaxTemperatureOneModel(createModel({ id: 'Claude-3-opus' }))).toBe(true) + expect(isMaxTemperatureOneModel(createModel({ id: 'claude-2.1' }))).toBe(true) + }) + + it('returns true for Moonshot models', () => { + expect(isMaxTemperatureOneModel(createModel({ id: 'moonshot-1.0' }))).toBe(true) + expect(isMaxTemperatureOneModel(createModel({ id: 'kimi-k2-thinking' }))).toBe(true) + expect(isMaxTemperatureOneModel(createModel({ id: 'Moonshot-Pro' }))).toBe(true) + }) + + it('returns false for other models', () => { + expect(isMaxTemperatureOneModel(createModel({ id: 'gpt-4o' }))).toBe(false) + expect(isMaxTemperatureOneModel(createModel({ id: 'gpt-4-turbo' }))).toBe(false) + expect(isMaxTemperatureOneModel(createModel({ id: 'qwen-max' }))).toBe(false) + expect(isMaxTemperatureOneModel(createModel({ id: 'gemini-pro' }))).toBe(false) + }) + }) }) }) diff --git a/src/renderer/src/config/models/utils.ts b/src/renderer/src/config/models/utils.ts index 1d5c9a644..25e802b25 100644 --- a/src/renderer/src/config/models/utils.ts +++ b/src/renderer/src/config/models/utils.ts @@ -4,7 +4,14 @@ import { type Model, SystemProviderIds } from '@renderer/types' import type { OpenAIVerbosity, ValidOpenAIVerbosity } from '@renderer/types/aiCoreTypes' import { getLowerBaseModelName } from '@renderer/utils' -import { isOpenAIChatCompletionOnlyModel, isOpenAIOpenWeightModel, isOpenAIReasoningModel } from './openai' +import { + isGPT5ProModel, + isGPT5SeriesModel, + isGPT51SeriesModel, + isOpenAIChatCompletionOnlyModel, + isOpenAIOpenWeightModel, + isOpenAIReasoningModel +} from './openai' import { isQwenMTModel } from './qwen' import { isGenerateImageModel, isTextToImageModel, isVisionModel } from './vision' export const NOT_SUPPORTED_REGEX = /(?:^tts|whisper|speech)/i @@ -123,21 +130,46 @@ export const isNotSupportSystemMessageModel = (model: Model): boolean => { return isQwenMTModel(model) || isGemmaModel(model) } -// GPT-5 verbosity configuration +// Verbosity settings is only supported by GPT-5 and newer models +// Specifically, GPT-5 and GPT-5.1 for now // gpt-5-pro only supports 'high', other GPT-5 models support all levels -export const MODEL_SUPPORTED_VERBOSITY: Record = { - 'gpt-5-pro': ['high'], - default: ['low', 'medium', 'high'] -} as const +const MODEL_SUPPORTED_VERBOSITY: readonly { + readonly validator: (model: Model) => boolean + readonly values: readonly ValidOpenAIVerbosity[] +}[] = [ + // gpt-5-pro + { validator: isGPT5ProModel, values: ['high'] }, + // gpt-5 except gpt-5-pro + { + validator: (model: Model) => isGPT5SeriesModel(model) && !isGPT5ProModel(model), + values: ['low', 'medium', 'high'] + }, + // gpt-5.1 + { validator: isGPT51SeriesModel, values: ['low', 'medium', 'high'] } +] -export const getModelSupportedVerbosity = (model: Model): OpenAIVerbosity[] => { - const modelId = getLowerBaseModelName(model.id) - let supportedValues: ValidOpenAIVerbosity[] - if (modelId.includes('gpt-5-pro')) { - supportedValues = MODEL_SUPPORTED_VERBOSITY['gpt-5-pro'] - } else { - supportedValues = MODEL_SUPPORTED_VERBOSITY.default +/** + * Returns the list of supported verbosity levels for the given model. + * If the model is not recognized as a GPT-5 series model, only `undefined` is returned. + * For GPT-5-pro, only 'high' is supported; for other GPT-5 models, 'low', 'medium', and 'high' are supported. + * For GPT-5.1 series models, 'low', 'medium', and 'high' are supported. + * @param model - The model to check + * @returns An array of supported verbosity levels, always including `undefined` as the first element + */ +export const getModelSupportedVerbosity = (model: Model | undefined | null): OpenAIVerbosity[] => { + if (!model) { + return [undefined] } + + let supportedValues: ValidOpenAIVerbosity[] = [] + + for (const { validator, values } of MODEL_SUPPORTED_VERBOSITY) { + if (validator(model)) { + supportedValues = [...values] + break + } + } + return [undefined, ...supportedValues] }