refactor: improve verbosity configuration with type-safe validators (#11463)

* refactor(models): improve verbosity level handling for GPT-5 models

Replace hardcoded verbosity configuration with validator functions
Add support for GPT-5.1 series models

* test(models): restructure model utility tests into logical groups

Improve test organization by grouping related test cases under descriptive describe blocks for better maintainability and readability. Each model utility function now has its own dedicated test section with clear subcategories for different behaviors.

* fix: add null check for model in getModelSupportedVerbosity

Handle null model case defensively by returning default verbosity

* refactor(config): remove redundant as const from MODEL_SUPPORTED_VERBOSITY array

* refactor(models): simplify validator function in MODEL_SUPPORTED_VERBOSITY

* test(model utils): add tests for undefined/null input handling

* fix(models): handle undefined/null input in getModelSupportedVerbosity

Remove ts-expect-error comments and update type signature to explicitly handle undefined/null inputs. Also add support for GPT-5.1 series models.

* test(models): add test case for gpt-5-pro variant model
This commit is contained in:
Phantom 2025-11-27 17:22:33 +08:00 committed by GitHub
parent d15571c727
commit d8191bd4fb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 387 additions and 179 deletions

View File

@ -125,195 +125,371 @@ describe('model utils', () => {
openAIWebSearchOnlyMock.mockReturnValue(false) openAIWebSearchOnlyMock.mockReturnValue(false)
}) })
it('detects OpenAI LLM models through reasoning and GPT prefix', () => { describe('OpenAI model detection', () => {
expect(isOpenAILLMModel(undefined as unknown as Model)).toBe(false) describe('isOpenAILLMModel', () => {
expect(isOpenAILLMModel(createModel({ id: 'gpt-4o-image' }))).toBe(false) it('returns false for undefined model', () => {
expect(isOpenAILLMModel(undefined as unknown as Model)).toBe(false)
})
reasoningMock.mockReturnValueOnce(true) it('returns false for image generation models', () => {
expect(isOpenAILLMModel(createModel({ id: 'o1-preview' }))).toBe(true) expect(isOpenAILLMModel(createModel({ id: 'gpt-4o-image' }))).toBe(false)
})
expect(isOpenAILLMModel(createModel({ id: 'GPT-5-turbo' }))).toBe(true) it('returns true for reasoning models', () => {
}) reasoningMock.mockReturnValueOnce(true)
expect(isOpenAILLMModel(createModel({ id: 'o1-preview' }))).toBe(true)
})
it('detects OpenAI models via GPT prefix or reasoning support', () => { it('returns true for GPT-prefixed models', () => {
expect(isOpenAIModel(createModel({ id: 'gpt-4.1' }))).toBe(true) expect(isOpenAILLMModel(createModel({ id: 'GPT-5-turbo' }))).toBe(true)
reasoningMock.mockReturnValueOnce(true) })
expect(isOpenAIModel(createModel({ id: 'o3' }))).toBe(true)
})
it('evaluates support for flex service tier and alias helper', () => {
expect(isSupportFlexServiceTierModel(createModel({ id: 'o3' }))).toBe(true)
expect(isSupportFlexServiceTierModel(createModel({ id: 'o3-mini' }))).toBe(false)
expect(isSupportFlexServiceTierModel(createModel({ id: 'o4-mini' }))).toBe(true)
expect(isSupportFlexServiceTierModel(createModel({ id: 'gpt-5-preview' }))).toBe(true)
expect(isSupportedFlexServiceTier(createModel({ id: 'gpt-4o' }))).toBe(false)
})
it('detects verbosity support for GPT-5+ families', () => {
expect(isSupportVerbosityModel(createModel({ id: 'gpt-5' }))).toBe(true)
expect(isSupportVerbosityModel(createModel({ id: 'gpt-5-chat' }))).toBe(false)
expect(isSupportVerbosityModel(createModel({ id: 'gpt-5.1-preview' }))).toBe(true)
})
it('limits verbosity controls for GPT-5 Pro models', () => {
const proModel = createModel({ id: 'gpt-5-pro' })
const previewModel = createModel({ id: 'gpt-5-preview' })
expect(getModelSupportedVerbosity(proModel)).toEqual([undefined, 'high'])
expect(getModelSupportedVerbosity(previewModel)).toEqual([undefined, 'low', 'medium', 'high'])
expect(isGPT5ProModel(proModel)).toBe(true)
expect(isGPT5ProModel(previewModel)).toBe(false)
})
it('identifies OpenAI chat-completion-only models', () => {
expect(isOpenAIChatCompletionOnlyModel(createModel({ id: 'gpt-4o-search-preview' }))).toBe(true)
expect(isOpenAIChatCompletionOnlyModel(createModel({ id: 'o1-mini' }))).toBe(true)
expect(isOpenAIChatCompletionOnlyModel(createModel({ id: 'gpt-4o' }))).toBe(false)
})
it('filters unsupported OpenAI catalog entries', () => {
expect(isSupportedModel({ id: 'gpt-4', object: 'model' } as any)).toBe(true)
expect(isSupportedModel({ id: 'tts-1', object: 'model' } as any)).toBe(false)
})
it('calculates temperature/top-p support correctly', () => {
const model = createModel({ id: 'o1' })
reasoningMock.mockReturnValue(true)
expect(isNotSupportTemperatureAndTopP(model)).toBe(true)
const openWeight = createModel({ id: 'gpt-oss-debug' })
expect(isNotSupportTemperatureAndTopP(openWeight)).toBe(false)
const chatOnly = createModel({ id: 'o1-preview' })
reasoningMock.mockReturnValue(false)
expect(isNotSupportTemperatureAndTopP(chatOnly)).toBe(true)
const qwenMt = createModel({ id: 'qwen-mt-large', provider: 'aliyun' })
expect(isNotSupportTemperatureAndTopP(qwenMt)).toBe(true)
})
it('handles gemma and gemini detections plus zhipu tagging', () => {
expect(isGemmaModel(createModel({ id: 'Gemma-3-27B' }))).toBe(true)
expect(isGemmaModel(createModel({ group: 'Gemma' }))).toBe(true)
expect(isGemmaModel(createModel({ id: 'gpt-4o' }))).toBe(false)
expect(isGeminiModel(createModel({ id: 'Gemini-2.0' }))).toBe(true)
expect(isZhipuModel(createModel({ provider: 'zhipu' }))).toBe(true)
expect(isZhipuModel(createModel({ provider: 'openai' }))).toBe(false)
})
it('groups qwen models by prefix', () => {
const qwen = createModel({ id: 'Qwen-7B', provider: 'qwen', name: 'Qwen-7B' })
const qwenOmni = createModel({ id: 'qwen2.5-omni', name: 'qwen2.5-omni' })
const other = createModel({ id: 'deepseek-v3', group: 'DeepSeek' })
const grouped = groupQwenModels([qwen, qwenOmni, other])
expect(Object.keys(grouped)).toContain('qwen-7b')
expect(Object.keys(grouped)).toContain('qwen2.5')
expect(grouped.DeepSeek).toContain(other)
})
it('aggregates boolean helpers based on regex rules', () => {
expect(isAnthropicModel(createModel({ id: 'claude-3.5' }))).toBe(true)
expect(isQwenMTModel(createModel({ id: 'qwen-mt-plus' }))).toBe(true)
expect(isNotSupportSystemMessageModel(createModel({ id: 'gemma-moe' }))).toBe(true)
expect(isOpenAIOpenWeightModel(createModel({ id: 'gpt-oss-free' }))).toBe(true)
})
describe('isNotSupportedTextDelta', () => {
it('returns true for qwen-mt-turbo and qwen-mt-plus models', () => {
// qwen-mt series that don't support text delta
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-turbo' }))).toBe(true)
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-plus' }))).toBe(true)
expect(isNotSupportTextDeltaModel(createModel({ id: 'Qwen-MT-Turbo' }))).toBe(true)
expect(isNotSupportTextDeltaModel(createModel({ id: 'QWEN-MT-PLUS' }))).toBe(true)
}) })
it('returns false for qwen-mt-flash and other models', () => { describe('isOpenAIModel', () => {
// qwen-mt-flash supports text delta it('detects models via GPT prefix', () => {
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-flash' }))).toBe(false) expect(isOpenAIModel(createModel({ id: 'gpt-4.1' }))).toBe(true)
expect(isNotSupportTextDeltaModel(createModel({ id: 'Qwen-MT-Flash' }))).toBe(false) })
// Legacy qwen models without mt prefix (support text delta) it('detects models via reasoning support', () => {
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-turbo' }))).toBe(false) reasoningMock.mockReturnValueOnce(true)
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-plus' }))).toBe(false) expect(isOpenAIModel(createModel({ id: 'o3' }))).toBe(true)
})
// Other qwen models
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-max' }))).toBe(false)
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen2.5-72b' }))).toBe(false)
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-vl-plus' }))).toBe(false)
// Non-qwen models
expect(isNotSupportTextDeltaModel(createModel({ id: 'gpt-4o' }))).toBe(false)
expect(isNotSupportTextDeltaModel(createModel({ id: 'claude-3.5' }))).toBe(false)
expect(isNotSupportTextDeltaModel(createModel({ id: 'glm-4-plus' }))).toBe(false)
}) })
it('handles models with version suffixes', () => { describe('isOpenAIChatCompletionOnlyModel', () => {
// qwen-mt models with version suffixes it('identifies chat-completion-only models', () => {
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-turbo-1201' }))).toBe(true) expect(isOpenAIChatCompletionOnlyModel(createModel({ id: 'gpt-4o-search-preview' }))).toBe(true)
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-plus-0828' }))).toBe(true) expect(isOpenAIChatCompletionOnlyModel(createModel({ id: 'o1-mini' }))).toBe(true)
})
// Legacy qwen models with version suffixes (support text delta) it('returns false for general models', () => {
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-turbo-0828' }))).toBe(false) expect(isOpenAIChatCompletionOnlyModel(createModel({ id: 'gpt-4o' }))).toBe(false)
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-plus-latest' }))).toBe(false) })
}) })
}) })
it('evaluates GPT-5 family helpers', () => { describe('GPT-5 family detection', () => {
expect(isGPT5SeriesModel(createModel({ id: 'gpt-5-preview' }))).toBe(true) describe('isGPT5SeriesModel', () => {
expect(isGPT5SeriesModel(createModel({ id: 'gpt-5.1-preview' }))).toBe(false) it('returns true for GPT-5 models', () => {
expect(isGPT51SeriesModel(createModel({ id: 'gpt-5.1-mini' }))).toBe(true) expect(isGPT5SeriesModel(createModel({ id: 'gpt-5-preview' }))).toBe(true)
expect(isGPT5SeriesReasoningModel(createModel({ id: 'gpt-5-prompt' }))).toBe(true) })
expect(isSupportVerbosityModel(createModel({ id: 'gpt-5-chat' }))).toBe(false)
it('returns false for GPT-5.1 models', () => {
expect(isGPT5SeriesModel(createModel({ id: 'gpt-5.1-preview' }))).toBe(false)
})
})
describe('isGPT51SeriesModel', () => {
it('returns true for GPT-5.1 models', () => {
expect(isGPT51SeriesModel(createModel({ id: 'gpt-5.1-mini' }))).toBe(true)
})
})
describe('isGPT5SeriesReasoningModel', () => {
it('returns true for GPT-5 reasoning models', () => {
expect(isGPT5SeriesReasoningModel(createModel({ id: 'gpt-5' }))).toBe(true)
})
it('returns false for gpt-5-chat', () => {
expect(isGPT5SeriesReasoningModel(createModel({ id: 'gpt-5-chat' }))).toBe(false)
})
})
describe('isGPT5ProModel', () => {
it('returns true for GPT-5 Pro models', () => {
expect(isGPT5ProModel(createModel({ id: 'gpt-5-pro' }))).toBe(true)
})
it('returns false for non-Pro GPT-5 models', () => {
expect(isGPT5ProModel(createModel({ id: 'gpt-5-preview' }))).toBe(false)
})
})
}) })
it('wraps generate/vision helpers that operate on arrays', () => { describe('Verbosity support', () => {
const models = [createModel({ id: 'gpt-4o' }), createModel({ id: 'gpt-4o-mini' })] describe('isSupportVerbosityModel', () => {
expect(isVisionModels(models)).toBe(true) it('returns true for GPT-5 models', () => {
visionMock.mockReturnValueOnce(true).mockReturnValueOnce(false) expect(isSupportVerbosityModel(createModel({ id: 'gpt-5' }))).toBe(true)
expect(isVisionModels(models)).toBe(false) })
expect(isGenerateImageModels(models)).toBe(true) it('returns false for GPT-5 chat models', () => {
generateImageMock.mockReturnValueOnce(true).mockReturnValueOnce(false) expect(isSupportVerbosityModel(createModel({ id: 'gpt-5-chat' }))).toBe(false)
expect(isGenerateImageModels(models)).toBe(false) })
it('returns true for GPT-5.1 models', () => {
expect(isSupportVerbosityModel(createModel({ id: 'gpt-5.1-preview' }))).toBe(true)
})
})
describe('getModelSupportedVerbosity', () => {
it('returns only "high" for GPT-5 Pro models', () => {
expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro' }))).toEqual([undefined, 'high'])
expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro-2025-10-06' }))).toEqual([undefined, 'high'])
})
it('returns all levels for non-Pro GPT-5 models', () => {
const previewModel = createModel({ id: 'gpt-5-preview' })
expect(getModelSupportedVerbosity(previewModel)).toEqual([undefined, 'low', 'medium', 'high'])
})
it('returns all levels for GPT-5.1 models', () => {
const gpt51Model = createModel({ id: 'gpt-5.1-preview' })
expect(getModelSupportedVerbosity(gpt51Model)).toEqual([undefined, 'low', 'medium', 'high'])
})
it('returns only undefined for non-GPT-5 models', () => {
expect(getModelSupportedVerbosity(createModel({ id: 'gpt-4o' }))).toEqual([undefined])
expect(getModelSupportedVerbosity(createModel({ id: 'claude-3.5' }))).toEqual([undefined])
})
it('returns only undefined for undefiend/null input', () => {
expect(getModelSupportedVerbosity(undefined)).toEqual([undefined])
expect(getModelSupportedVerbosity(null)).toEqual([undefined])
})
})
}) })
it('filters models for agent usage', () => { describe('Flex service tier support', () => {
expect(agentModelFilter(createModel())).toBe(true) describe('isSupportFlexServiceTierModel', () => {
it('returns true for supported models', () => {
expect(isSupportFlexServiceTierModel(createModel({ id: 'o3' }))).toBe(true)
expect(isSupportFlexServiceTierModel(createModel({ id: 'o4-mini' }))).toBe(true)
expect(isSupportFlexServiceTierModel(createModel({ id: 'gpt-5-preview' }))).toBe(true)
})
embeddingMock.mockReturnValueOnce(true) it('returns false for unsupported models', () => {
expect(agentModelFilter(createModel({ id: 'text-embedding' }))).toBe(false) expect(isSupportFlexServiceTierModel(createModel({ id: 'o3-mini' }))).toBe(false)
})
})
embeddingMock.mockReturnValue(false) describe('isSupportedFlexServiceTier', () => {
rerankMock.mockReturnValueOnce(true) it('returns false for non-flex models', () => {
expect(agentModelFilter(createModel({ id: 'rerank' }))).toBe(false) expect(isSupportedFlexServiceTier(createModel({ id: 'gpt-4o' }))).toBe(false)
})
rerankMock.mockReturnValue(false) })
textToImageMock.mockReturnValueOnce(true)
expect(agentModelFilter(createModel({ id: 'gpt-image-1' }))).toBe(false)
}) })
it('identifies models with maximum temperature of 1.0', () => { describe('Temperature and top-p support', () => {
// Zhipu models should have max temperature of 1.0 describe('isNotSupportTemperatureAndTopP', () => {
expect(isMaxTemperatureOneModel(createModel({ id: 'glm-4' }))).toBe(true) it('returns true for reasoning models', () => {
expect(isMaxTemperatureOneModel(createModel({ id: 'GLM-4-Plus' }))).toBe(true) const model = createModel({ id: 'o1' })
expect(isMaxTemperatureOneModel(createModel({ id: 'glm-3-turbo' }))).toBe(true) reasoningMock.mockReturnValue(true)
expect(isNotSupportTemperatureAndTopP(model)).toBe(true)
})
// Anthropic models should have max temperature of 1.0 it('returns false for open weight models', () => {
expect(isMaxTemperatureOneModel(createModel({ id: 'claude-3.5-sonnet' }))).toBe(true) const openWeight = createModel({ id: 'gpt-oss-debug' })
expect(isMaxTemperatureOneModel(createModel({ id: 'Claude-3-opus' }))).toBe(true) expect(isNotSupportTemperatureAndTopP(openWeight)).toBe(false)
expect(isMaxTemperatureOneModel(createModel({ id: 'claude-2.1' }))).toBe(true) })
// Moonshot models should have max temperature of 1.0 it('returns true for chat-only models without reasoning', () => {
expect(isMaxTemperatureOneModel(createModel({ id: 'moonshot-1.0' }))).toBe(true) const chatOnly = createModel({ id: 'o1-preview' })
expect(isMaxTemperatureOneModel(createModel({ id: 'kimi-k2-thinking' }))).toBe(true) reasoningMock.mockReturnValue(false)
expect(isMaxTemperatureOneModel(createModel({ id: 'Moonshot-Pro' }))).toBe(true) expect(isNotSupportTemperatureAndTopP(chatOnly)).toBe(true)
})
// Other models should return false it('returns true for Qwen MT models', () => {
expect(isMaxTemperatureOneModel(createModel({ id: 'gpt-4o' }))).toBe(false) const qwenMt = createModel({ id: 'qwen-mt-large', provider: 'aliyun' })
expect(isMaxTemperatureOneModel(createModel({ id: 'gpt-4-turbo' }))).toBe(false) expect(isNotSupportTemperatureAndTopP(qwenMt)).toBe(true)
expect(isMaxTemperatureOneModel(createModel({ id: 'qwen-max' }))).toBe(false) })
expect(isMaxTemperatureOneModel(createModel({ id: 'gemini-pro' }))).toBe(false) })
})
describe('Text delta support', () => {
describe('isNotSupportTextDeltaModel', () => {
it('returns true for qwen-mt-turbo and qwen-mt-plus models', () => {
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-turbo' }))).toBe(true)
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-plus' }))).toBe(true)
expect(isNotSupportTextDeltaModel(createModel({ id: 'Qwen-MT-Turbo' }))).toBe(true)
expect(isNotSupportTextDeltaModel(createModel({ id: 'QWEN-MT-PLUS' }))).toBe(true)
})
it('returns false for qwen-mt-flash and other models', () => {
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-flash' }))).toBe(false)
expect(isNotSupportTextDeltaModel(createModel({ id: 'Qwen-MT-Flash' }))).toBe(false)
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-turbo' }))).toBe(false)
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-plus' }))).toBe(false)
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-max' }))).toBe(false)
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen2.5-72b' }))).toBe(false)
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-vl-plus' }))).toBe(false)
})
it('returns false for non-qwen models', () => {
expect(isNotSupportTextDeltaModel(createModel({ id: 'gpt-4o' }))).toBe(false)
expect(isNotSupportTextDeltaModel(createModel({ id: 'claude-3.5' }))).toBe(false)
expect(isNotSupportTextDeltaModel(createModel({ id: 'glm-4-plus' }))).toBe(false)
})
it('handles models with version suffixes', () => {
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-turbo-1201' }))).toBe(true)
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-mt-plus-0828' }))).toBe(true)
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-turbo-0828' }))).toBe(false)
expect(isNotSupportTextDeltaModel(createModel({ id: 'qwen-plus-latest' }))).toBe(false)
})
})
})
describe('Model provider detection', () => {
describe('isGemmaModel', () => {
it('detects Gemma models by ID', () => {
expect(isGemmaModel(createModel({ id: 'Gemma-3-27B' }))).toBe(true)
})
it('detects Gemma models by group', () => {
expect(isGemmaModel(createModel({ group: 'Gemma' }))).toBe(true)
})
it('returns false for non-Gemma models', () => {
expect(isGemmaModel(createModel({ id: 'gpt-4o' }))).toBe(false)
})
})
describe('isGeminiModel', () => {
it('detects Gemini models', () => {
expect(isGeminiModel(createModel({ id: 'Gemini-2.0' }))).toBe(true)
})
})
describe('isZhipuModel', () => {
it('detects Zhipu models by provider', () => {
expect(isZhipuModel(createModel({ provider: 'zhipu' }))).toBe(true)
})
it('returns false for non-Zhipu models', () => {
expect(isZhipuModel(createModel({ provider: 'openai' }))).toBe(false)
})
})
describe('isAnthropicModel', () => {
it('detects Anthropic models', () => {
expect(isAnthropicModel(createModel({ id: 'claude-3.5' }))).toBe(true)
})
})
describe('isQwenMTModel', () => {
it('detects Qwen MT models', () => {
expect(isQwenMTModel(createModel({ id: 'qwen-mt-plus' }))).toBe(true)
})
})
describe('isOpenAIOpenWeightModel', () => {
it('detects OpenAI open weight models', () => {
expect(isOpenAIOpenWeightModel(createModel({ id: 'gpt-oss-free' }))).toBe(true)
})
})
})
describe('System message support', () => {
describe('isNotSupportSystemMessageModel', () => {
it('returns true for models that do not support system messages', () => {
expect(isNotSupportSystemMessageModel(createModel({ id: 'gemma-moe' }))).toBe(true)
})
})
})
describe('Model grouping', () => {
describe('groupQwenModels', () => {
it('groups qwen models by prefix', () => {
const qwen = createModel({ id: 'Qwen-7B', provider: 'qwen', name: 'Qwen-7B' })
const qwenOmni = createModel({ id: 'qwen2.5-omni', name: 'qwen2.5-omni' })
const other = createModel({ id: 'deepseek-v3', group: 'DeepSeek' })
const grouped = groupQwenModels([qwen, qwenOmni, other])
expect(Object.keys(grouped)).toContain('qwen-7b')
expect(Object.keys(grouped)).toContain('qwen2.5')
expect(grouped.DeepSeek).toContain(other)
})
})
})
describe('Vision and image generation', () => {
describe('isVisionModels', () => {
it('returns true when all models support vision', () => {
const models = [createModel({ id: 'gpt-4o' }), createModel({ id: 'gpt-4o-mini' })]
expect(isVisionModels(models)).toBe(true)
})
it('returns false when some models do not support vision', () => {
const models = [createModel({ id: 'gpt-4o' }), createModel({ id: 'gpt-4o-mini' })]
visionMock.mockReturnValueOnce(true).mockReturnValueOnce(false)
expect(isVisionModels(models)).toBe(false)
})
})
describe('isGenerateImageModels', () => {
it('returns true when all models support image generation', () => {
const models = [createModel({ id: 'gpt-4o' }), createModel({ id: 'gpt-4o-mini' })]
expect(isGenerateImageModels(models)).toBe(true)
})
it('returns false when some models do not support image generation', () => {
const models = [createModel({ id: 'gpt-4o' }), createModel({ id: 'gpt-4o-mini' })]
generateImageMock.mockReturnValueOnce(true).mockReturnValueOnce(false)
expect(isGenerateImageModels(models)).toBe(false)
})
})
})
describe('Model filtering', () => {
describe('isSupportedModel', () => {
it('filters supported OpenAI catalog entries', () => {
expect(isSupportedModel({ id: 'gpt-4', object: 'model' } as any)).toBe(true)
})
it('filters unsupported OpenAI catalog entries', () => {
expect(isSupportedModel({ id: 'tts-1', object: 'model' } as any)).toBe(false)
})
})
describe('agentModelFilter', () => {
it('returns true for regular models', () => {
expect(agentModelFilter(createModel())).toBe(true)
})
it('filters out embedding models', () => {
embeddingMock.mockReturnValueOnce(true)
expect(agentModelFilter(createModel({ id: 'text-embedding' }))).toBe(false)
})
it('filters out rerank models', () => {
embeddingMock.mockReturnValue(false)
rerankMock.mockReturnValueOnce(true)
expect(agentModelFilter(createModel({ id: 'rerank' }))).toBe(false)
})
it('filters out text-to-image models', () => {
rerankMock.mockReturnValue(false)
textToImageMock.mockReturnValueOnce(true)
expect(agentModelFilter(createModel({ id: 'gpt-image-1' }))).toBe(false)
})
})
})
describe('Temperature limits', () => {
describe('isMaxTemperatureOneModel', () => {
it('returns true for Zhipu models', () => {
expect(isMaxTemperatureOneModel(createModel({ id: 'glm-4' }))).toBe(true)
expect(isMaxTemperatureOneModel(createModel({ id: 'GLM-4-Plus' }))).toBe(true)
expect(isMaxTemperatureOneModel(createModel({ id: 'glm-3-turbo' }))).toBe(true)
})
it('returns true for Anthropic models', () => {
expect(isMaxTemperatureOneModel(createModel({ id: 'claude-3.5-sonnet' }))).toBe(true)
expect(isMaxTemperatureOneModel(createModel({ id: 'Claude-3-opus' }))).toBe(true)
expect(isMaxTemperatureOneModel(createModel({ id: 'claude-2.1' }))).toBe(true)
})
it('returns true for Moonshot models', () => {
expect(isMaxTemperatureOneModel(createModel({ id: 'moonshot-1.0' }))).toBe(true)
expect(isMaxTemperatureOneModel(createModel({ id: 'kimi-k2-thinking' }))).toBe(true)
expect(isMaxTemperatureOneModel(createModel({ id: 'Moonshot-Pro' }))).toBe(true)
})
it('returns false for other models', () => {
expect(isMaxTemperatureOneModel(createModel({ id: 'gpt-4o' }))).toBe(false)
expect(isMaxTemperatureOneModel(createModel({ id: 'gpt-4-turbo' }))).toBe(false)
expect(isMaxTemperatureOneModel(createModel({ id: 'qwen-max' }))).toBe(false)
expect(isMaxTemperatureOneModel(createModel({ id: 'gemini-pro' }))).toBe(false)
})
})
}) })
}) })

View File

@ -4,7 +4,14 @@ import { type Model, SystemProviderIds } from '@renderer/types'
import type { OpenAIVerbosity, ValidOpenAIVerbosity } from '@renderer/types/aiCoreTypes' import type { OpenAIVerbosity, ValidOpenAIVerbosity } from '@renderer/types/aiCoreTypes'
import { getLowerBaseModelName } from '@renderer/utils' import { getLowerBaseModelName } from '@renderer/utils'
import { isOpenAIChatCompletionOnlyModel, isOpenAIOpenWeightModel, isOpenAIReasoningModel } from './openai' import {
isGPT5ProModel,
isGPT5SeriesModel,
isGPT51SeriesModel,
isOpenAIChatCompletionOnlyModel,
isOpenAIOpenWeightModel,
isOpenAIReasoningModel
} from './openai'
import { isQwenMTModel } from './qwen' import { isQwenMTModel } from './qwen'
import { isGenerateImageModel, isTextToImageModel, isVisionModel } from './vision' import { isGenerateImageModel, isTextToImageModel, isVisionModel } from './vision'
export const NOT_SUPPORTED_REGEX = /(?:^tts|whisper|speech)/i export const NOT_SUPPORTED_REGEX = /(?:^tts|whisper|speech)/i
@ -123,21 +130,46 @@ export const isNotSupportSystemMessageModel = (model: Model): boolean => {
return isQwenMTModel(model) || isGemmaModel(model) return isQwenMTModel(model) || isGemmaModel(model)
} }
// GPT-5 verbosity configuration // Verbosity settings is only supported by GPT-5 and newer models
// Specifically, GPT-5 and GPT-5.1 for now
// gpt-5-pro only supports 'high', other GPT-5 models support all levels // gpt-5-pro only supports 'high', other GPT-5 models support all levels
export const MODEL_SUPPORTED_VERBOSITY: Record<string, ValidOpenAIVerbosity[]> = { const MODEL_SUPPORTED_VERBOSITY: readonly {
'gpt-5-pro': ['high'], readonly validator: (model: Model) => boolean
default: ['low', 'medium', 'high'] readonly values: readonly ValidOpenAIVerbosity[]
} as const }[] = [
// gpt-5-pro
{ validator: isGPT5ProModel, values: ['high'] },
// gpt-5 except gpt-5-pro
{
validator: (model: Model) => isGPT5SeriesModel(model) && !isGPT5ProModel(model),
values: ['low', 'medium', 'high']
},
// gpt-5.1
{ validator: isGPT51SeriesModel, values: ['low', 'medium', 'high'] }
]
export const getModelSupportedVerbosity = (model: Model): OpenAIVerbosity[] => { /**
const modelId = getLowerBaseModelName(model.id) * Returns the list of supported verbosity levels for the given model.
let supportedValues: ValidOpenAIVerbosity[] * If the model is not recognized as a GPT-5 series model, only `undefined` is returned.
if (modelId.includes('gpt-5-pro')) { * For GPT-5-pro, only 'high' is supported; for other GPT-5 models, 'low', 'medium', and 'high' are supported.
supportedValues = MODEL_SUPPORTED_VERBOSITY['gpt-5-pro'] * For GPT-5.1 series models, 'low', 'medium', and 'high' are supported.
} else { * @param model - The model to check
supportedValues = MODEL_SUPPORTED_VERBOSITY.default * @returns An array of supported verbosity levels, always including `undefined` as the first element
*/
export const getModelSupportedVerbosity = (model: Model | undefined | null): OpenAIVerbosity[] => {
if (!model) {
return [undefined]
} }
let supportedValues: ValidOpenAIVerbosity[] = []
for (const { validator, values } of MODEL_SUPPORTED_VERBOSITY) {
if (validator(model)) {
supportedValues = [...values]
break
}
}
return [undefined, ...supportedValues] return [undefined, ...supportedValues]
} }