mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2026-01-04 20:00:00 +08:00
feat(models): add support for Zhipu model and enhance reasoning checks (#8609)
* feat(models): add support for Zhipu model and enhance reasoning checks - Introduced support for the new Zhipu model (GLM-4.5) in the models configuration. - Added functions to check if the Zhipu model supports reasoning and thinking tokens. - Updated the ThinkingButton component to recognize Zhipu model options and integrate them into the reasoning effort logic. - Ensured that the reasoning checks are streamlined to include the new Zhipu model alongside existing models. * feat(models): expand Zhipu model support and refine reasoning checks - Added new Zhipu models: GLM-4.5-Flash, GLM-4.5-AIR, GLM-4.5-AIRX, and GLM-4.5-X to the models configuration. - Enhanced the isZhipuReasoningModel function to include checks for the new models and ensure robust reasoning validation. - Removed redundant checks for 'glm-z1' from the isReasoningModel function to streamline logic.
This commit is contained in:
parent
18521c93b4
commit
42918cf306
@ -18,6 +18,7 @@ import {
|
||||
isSupportedThinkingTokenHunyuanModel,
|
||||
isSupportedThinkingTokenModel,
|
||||
isSupportedThinkingTokenQwenModel,
|
||||
isSupportedThinkingTokenZhipuModel,
|
||||
isVisionModel
|
||||
} from '@renderer/config/models'
|
||||
import { processPostsuffixQwen3Model, processReqMessages } from '@renderer/services/ModelMessageService'
|
||||
@ -119,6 +120,13 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
|
||||
return {}
|
||||
}
|
||||
|
||||
if (isSupportedThinkingTokenZhipuModel(model)) {
|
||||
if (!reasoningEffort) {
|
||||
return { thinking: { type: 'disabled' } }
|
||||
}
|
||||
return { thinking: { type: 'enabled' } }
|
||||
}
|
||||
|
||||
if (!reasoningEffort) {
|
||||
if (model.provider === 'openrouter') {
|
||||
// Don't disable reasoning for Gemini models that support thinking tokens
|
||||
|
||||
@ -243,6 +243,7 @@ export const FUNCTION_CALLING_MODELS = [
|
||||
'hunyuan',
|
||||
'deepseek',
|
||||
'glm-4(?:-[\\w-]+)?',
|
||||
'glm-4.5(?:-[\\w-]+)?',
|
||||
'learnlm(?:-[\\w-]+)?',
|
||||
'gemini(?:-[\\w-]+)?', // 提前排除了gemini的嵌入模型
|
||||
'grok-3(?:-[\\w-]+)?',
|
||||
@ -1187,6 +1188,36 @@ export const SYSTEM_MODELS: Record<string, Model[]> = {
|
||||
{ id: 'yi-vision-v2', name: 'Yi Vision v2', provider: 'yi', group: 'yi-vision', owned_by: '01.ai' }
|
||||
],
|
||||
zhipu: [
|
||||
{
|
||||
id: 'glm-4.5',
|
||||
provider: 'zhipu',
|
||||
name: 'GLM-4.5',
|
||||
group: 'GLM-4.5'
|
||||
},
|
||||
{
|
||||
id: 'glm-4.5-flash',
|
||||
provider: 'zhipu',
|
||||
name: 'GLM-4.5-Flash',
|
||||
group: 'GLM-4.5'
|
||||
},
|
||||
{
|
||||
id: 'glm-4.5-air',
|
||||
provider: 'zhipu',
|
||||
name: 'GLM-4.5-AIR',
|
||||
group: 'GLM-4.5'
|
||||
},
|
||||
{
|
||||
id: 'glm-4.5-airx',
|
||||
provider: 'zhipu',
|
||||
name: 'GLM-4.5-AIRX',
|
||||
group: 'GLM-4.5'
|
||||
},
|
||||
{
|
||||
id: 'glm-4.5-x',
|
||||
provider: 'zhipu',
|
||||
name: 'GLM-4.5-X',
|
||||
group: 'GLM-4.5'
|
||||
},
|
||||
{
|
||||
id: 'glm-z1-air',
|
||||
provider: 'zhipu',
|
||||
@ -2559,7 +2590,8 @@ export function isSupportedThinkingTokenModel(model?: Model): boolean {
|
||||
isSupportedThinkingTokenQwenModel(model) ||
|
||||
isSupportedThinkingTokenClaudeModel(model) ||
|
||||
isSupportedThinkingTokenDoubaoModel(model) ||
|
||||
isSupportedThinkingTokenHunyuanModel(model)
|
||||
isSupportedThinkingTokenHunyuanModel(model) ||
|
||||
isSupportedThinkingTokenZhipuModel(model)
|
||||
)
|
||||
}
|
||||
|
||||
@ -2722,6 +2754,18 @@ export const isSupportedReasoningEffortPerplexityModel = (model: Model): boolean
|
||||
return baseName.includes('sonar-deep-research')
|
||||
}
|
||||
|
||||
export const isSupportedThinkingTokenZhipuModel = (model: Model): boolean => {
|
||||
const baseName = getLowerBaseModelName(model.id, '/')
|
||||
return baseName.includes('glm-4.5')
|
||||
}
|
||||
|
||||
export const isZhipuReasoningModel = (model?: Model): boolean => {
|
||||
if (!model) {
|
||||
return false
|
||||
}
|
||||
return isSupportedThinkingTokenZhipuModel(model) || model.id.toLowerCase().includes('glm-z1')
|
||||
}
|
||||
|
||||
export function isReasoningModel(model?: Model): boolean {
|
||||
if (!model || isEmbeddingModel(model) || isRerankModel(model) || isTextToImageModel(model)) {
|
||||
return false
|
||||
@ -2748,7 +2792,7 @@ export function isReasoningModel(model?: Model): boolean {
|
||||
isGrokReasoningModel(model) ||
|
||||
isHunyuanReasoningModel(model) ||
|
||||
isPerplexityReasoningModel(model) ||
|
||||
model.id.toLowerCase().includes('glm-z1') ||
|
||||
isZhipuReasoningModel(model) ||
|
||||
model.id.toLowerCase().includes('magistral') ||
|
||||
model.id.toLowerCase().includes('minimax-m1') ||
|
||||
model.id.toLowerCase().includes('pangu-pro-moe')
|
||||
|
||||
@ -14,7 +14,8 @@ import {
|
||||
isSupportedThinkingTokenDoubaoModel,
|
||||
isSupportedThinkingTokenGeminiModel,
|
||||
isSupportedThinkingTokenHunyuanModel,
|
||||
isSupportedThinkingTokenQwenModel
|
||||
isSupportedThinkingTokenQwenModel,
|
||||
isSupportedThinkingTokenZhipuModel
|
||||
} from '@renderer/config/models'
|
||||
import { useAssistant } from '@renderer/hooks/useAssistant'
|
||||
import { getReasoningEffortOptionsLabel } from '@renderer/i18n/label'
|
||||
@ -46,6 +47,7 @@ const MODEL_SUPPORTED_OPTIONS: Record<string, ThinkingOption[]> = {
|
||||
qwen_3235ba22b_thinking: ['low', 'medium', 'high'],
|
||||
doubao: ['off', 'auto', 'high'],
|
||||
hunyuan: ['off', 'auto'],
|
||||
zhipu: ['off', 'auto'],
|
||||
perplexity: ['low', 'medium', 'high']
|
||||
}
|
||||
|
||||
@ -71,6 +73,7 @@ const ThinkingButton: FC<Props> = ({ ref, model, assistant, ToolbarButton }): Re
|
||||
const isDoubaoModel = isSupportedThinkingTokenDoubaoModel(model)
|
||||
const isHunyuanModel = isSupportedThinkingTokenHunyuanModel(model)
|
||||
const isPerplexityModel = isSupportedReasoningEffortPerplexityModel(model)
|
||||
const isZhipuModel = isSupportedThinkingTokenZhipuModel(model)
|
||||
|
||||
const currentReasoningEffort = useMemo(() => {
|
||||
return assistant.settings?.reasoning_effort || 'off'
|
||||
@ -95,6 +98,7 @@ const ThinkingButton: FC<Props> = ({ ref, model, assistant, ToolbarButton }): Re
|
||||
if (isDoubaoModel) return 'doubao'
|
||||
if (isHunyuanModel) return 'hunyuan'
|
||||
if (isPerplexityModel) return 'perplexity'
|
||||
if (isZhipuModel) return 'zhipu'
|
||||
return 'default'
|
||||
}, [
|
||||
isGeminiModel,
|
||||
@ -104,7 +108,8 @@ const ThinkingButton: FC<Props> = ({ ref, model, assistant, ToolbarButton }): Re
|
||||
isGeminiFlashModel,
|
||||
isHunyuanModel,
|
||||
isPerplexityModel,
|
||||
isQwen3235BA22BThinkingModel
|
||||
isQwen3235BA22BThinkingModel,
|
||||
isZhipuModel
|
||||
])
|
||||
|
||||
// 获取当前模型支持的选项
|
||||
|
||||
Loading…
Reference in New Issue
Block a user