diff --git a/src/renderer/src/aiCore/clients/openai/OpenAIApiClient.ts b/src/renderer/src/aiCore/clients/openai/OpenAIApiClient.ts index 1de6cdbbd0..eac3741d1c 100644 --- a/src/renderer/src/aiCore/clients/openai/OpenAIApiClient.ts +++ b/src/renderer/src/aiCore/clients/openai/OpenAIApiClient.ts @@ -8,7 +8,7 @@ import { isDoubaoThinkingAutoModel, isGrokReasoningModel, isNotSupportSystemMessageModel, - isQwen3235BA22BThinkingModel, + isQwenAlwaysThinkModel, isQwenMTModel, isQwenReasoningModel, isReasoningModel, @@ -150,10 +150,8 @@ export class OpenAIAPIClient extends OpenAIBaseClient< } return { reasoning: { enabled: false, exclude: true } } } + if (isSupportedThinkingTokenQwenModel(model) || isSupportedThinkingTokenHunyuanModel(model)) { - if (isQwen3235BA22BThinkingModel(model)) { - return {} - } return { enable_thinking: false } } @@ -182,6 +180,8 @@ export class OpenAIAPIClient extends OpenAIBaseClient< return {} } + + // reasoningEffort有效的情况 const effortRatio = EFFORT_RATIO[reasoningEffort] const budgetTokens = Math.floor( (findTokenLimit(model.id)?.max! - findTokenLimit(model.id)?.min!) * effortRatio + findTokenLimit(model.id)?.min! @@ -199,9 +199,9 @@ export class OpenAIAPIClient extends OpenAIBaseClient< } // Qwen models - if (isSupportedThinkingTokenQwenModel(model)) { + if (isQwenReasoningModel(model)) { const thinkConfig = { - enable_thinking: isQwen3235BA22BThinkingModel(model) ? undefined : true, + enable_thinking: isQwenAlwaysThinkModel(model) ? undefined : true, thinking_budget: budgetTokens } if (this.provider.id === 'dashscope') { diff --git a/src/renderer/src/config/models.ts b/src/renderer/src/config/models.ts index 4c594fe8ab..e81f8f03ab 100644 --- a/src/renderer/src/config/models.ts +++ b/src/renderer/src/config/models.ts @@ -287,7 +287,7 @@ export const MODEL_SUPPORTED_REASONING_EFFORT: ReasoningEffortConfig = { gemini: ['low', 'medium', 'high', 'auto'] as const, gemini_pro: ['low', 'medium', 'high', 'auto'] as const, qwen: ['low', 'medium', 'high'] as const, - qwen_3235ba22b_thinking: ['low', 'medium', 'high'] as const, + qwen_thinking: ['low', 'medium', 'high'] as const, doubao: ['auto', 'high'] as const, hunyuan: ['auto'] as const, zhipu: ['auto'] as const, @@ -301,7 +301,7 @@ export const MODEL_SUPPORTED_OPTIONS: ThinkingOptionConfig = { gemini: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini] as const, gemini_pro: [...MODEL_SUPPORTED_REASONING_EFFORT.gemini_pro] as const, qwen: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.qwen] as const, - qwen_3235ba22b_thinking: [...MODEL_SUPPORTED_REASONING_EFFORT.qwen_3235ba22b_thinking] as const, + qwen_thinking: [...MODEL_SUPPORTED_REASONING_EFFORT.qwen_thinking] as const, doubao: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao] as const, hunyuan: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.hunyuan] as const, zhipu: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.zhipu] as const, @@ -318,8 +318,8 @@ export const getThinkModelType = (model: Model): ThinkingModelType => { } if (isSupportedReasoningEffortGrokModel(model)) return 'grok' if (isSupportedThinkingTokenQwenModel(model)) { - if (isQwen3235BA22BThinkingModel(model)) { - return 'qwen_3235ba22b_thinking' + if (isQwenAlwaysThinkModel(model)) { + return 'qwen_thinking' } return 'qwen' } @@ -2591,6 +2591,7 @@ export const isSupportedThinkingTokenGeminiModel = (model: Model): boolean => { return model.id.includes('gemini-2.5') } +/** 是否为Qwen推理模型 */ export function isQwenReasoningModel(model?: Model): boolean { if (!model) { return false @@ -2618,6 +2619,7 @@ export function isQwenReasoningModel(model?: Model): boolean { return false } +/** 是否为支持思考控制的Qwen3推理模型 */ export function isSupportedThinkingTokenQwenModel(model?: Model): boolean { if (!model) { return false @@ -2630,12 +2632,10 @@ export function isSupportedThinkingTokenQwenModel(model?: Model): boolean { } if (baseName.startsWith('qwen3')) { - if (baseName.includes('instruct')) { + // instruct 是非思考模型 thinking 是思考模型,二者都不能控制思考 + if (baseName.includes('instruct') || baseName.includes('thinking')) { return false } - if (baseName.includes('thinking')) { - return true - } return true } @@ -2655,12 +2655,13 @@ export function isSupportedThinkingTokenQwenModel(model?: Model): boolean { ].includes(baseName) } -export function isQwen3235BA22BThinkingModel(model?: Model): boolean { +/** 是否为不支持思考控制的Qwen推理模型 */ +export function isQwenAlwaysThinkModel(model?: Model): boolean { if (!model) { return false } const baseName = getLowerBaseModelName(model.id, '/') - return baseName.includes('qwen3-235b-a22b-thinking') + return baseName.startsWith('qwen3') && baseName.includes('thinking') } export function isSupportedThinkingTokenDoubaoModel(model?: Model): boolean { diff --git a/src/renderer/src/types/index.ts b/src/renderer/src/types/index.ts index c4c76d3cbd..fe1d145b7c 100644 --- a/src/renderer/src/types/index.ts +++ b/src/renderer/src/types/index.ts @@ -60,7 +60,7 @@ export type ThinkingModelType = | 'gemini' | 'gemini_pro' | 'qwen' - | 'qwen_3235ba22b_thinking' + | 'qwen_thinking' | 'doubao' | 'hunyuan' | 'zhipu' @@ -75,7 +75,7 @@ const ThinkModelTypes: ThinkingModelType[] = [ 'gemini', 'gemini_pro', 'qwen', - 'qwen_3235ba22b_thinking', + 'qwen_thinking', 'doubao', 'hunyuan', 'zhipu', @@ -83,7 +83,7 @@ const ThinkModelTypes: ThinkingModelType[] = [ ] as const export function isThinkModelType(type: string): type is ThinkingModelType { - return ThinkModelTypes.includes(type as ThinkingModelType) + return ThinkModelTypes.some((t) => t === type) } export const EFFORT_RATIO: EffortRatio = {