diff --git a/src/renderer/src/aiCore/clients/openai/OpenAIApiClient.ts b/src/renderer/src/aiCore/clients/openai/OpenAIApiClient.ts index b9da840164..8b58e78899 100644 --- a/src/renderer/src/aiCore/clients/openai/OpenAIApiClient.ts +++ b/src/renderer/src/aiCore/clients/openai/OpenAIApiClient.ts @@ -137,6 +137,7 @@ export class OpenAIAPIClient extends OpenAIBaseClient< // } // openrouter: use reasoning + // openrouter 如果关闭思考,会隐藏思考内容,所以对于总是思考的模型需要特别处理 if (model.provider === SystemProviderIds.openrouter) { // Don't disable reasoning for Gemini models that support thinking tokens if (isSupportedThinkingTokenGeminiModel(model) && !GEMINI_FLASH_MODEL_REGEX.test(model.id)) { @@ -146,6 +147,9 @@ export class OpenAIAPIClient extends OpenAIBaseClient< if (isGrokReasoningModel(model) || isOpenAIReasoningModel(model)) { return {} } + if (isReasoningModel(model) && !isSupportedThinkingTokenModel(model)) { + return {} + } return { reasoning: { enabled: false, exclude: true } } } @@ -203,10 +207,6 @@ export class OpenAIAPIClient extends OpenAIBaseClient< enable_thinking: true, incremental_output: true } - case SystemProviderIds.silicon: - return { - enable_thinking: true - } case SystemProviderIds.doubao: return { thinking: { @@ -225,10 +225,18 @@ export class OpenAIAPIClient extends OpenAIBaseClient< thinking: true } } + case SystemProviderIds.silicon: + case SystemProviderIds.ppio: + return { + enable_thinking: true + } default: logger.warn( - `Skipping thinking options for provider ${this.provider.name} as DeepSeek v3.1 thinking control method is unknown` + `Use enable_thinking option as fallback for provider ${this.provider.name} since DeepSeek v3.1 thinking control method is unknown` ) + return { + enable_thinking: true + } } } } diff --git a/src/renderer/src/config/models.ts b/src/renderer/src/config/models.ts index a5fae53794..ba4217f76f 100644 --- a/src/renderer/src/config/models.ts +++ b/src/renderer/src/config/models.ts @@ -2584,7 +2584,7 @@ export function isSupportedThinkingTokenModel(model?: Model): boolean { // Specifically for DeepSeek V3.1. White list for now if (isDeepSeekHybridInferenceModel(model)) { - return (['openrouter', 'dashscope', 'doubao', 'silicon', 'nvidia'] satisfies SystemProviderId[]).some( + return (['openrouter', 'dashscope', 'doubao', 'silicon', 'nvidia', 'ppio'] satisfies SystemProviderId[]).some( (id) => id === model.provider ) } @@ -2813,7 +2813,7 @@ export const isDeepSeekHybridInferenceModel = (model: Model) => { const modelId = getLowerBaseModelName(model.id) // deepseek官方使用chat和reasoner做推理控制,其他provider需要单独判断,id可能会有所差别 // openrouter: deepseek/deepseek-chat-v3.1 不知道会不会有其他provider仿照ds官方分出一个同id的作为非思考模式的模型,这里有风险 - return /deepseek-v3(?:\.1|-1-\d+)?/.test(modelId) || modelId === 'deepseek-chat-v3.1' + return /deepseek-v3(?:\.1|-1-\d+)?/.test(modelId) || modelId.includes('deepseek-chat-v3.1') } export const isSupportedThinkingTokenDeepSeekModel = isDeepSeekHybridInferenceModel