From 173e64799ebaa75fcb8ab75a81b3aa9bc5703900 Mon Sep 17 00:00:00 2001 From: suyao Date: Wed, 28 May 2025 01:28:47 +0800 Subject: [PATCH] fix: update token limits for Claude-4 models and refine reasoning checks in OpenAIProvider - Adjusted max token limit for 'claude-sonnet-4' and 'claude-opus-4' models from 64000 to 32000. - Simplified reasoning checks in OpenAIProvider to combine conditions for supported models, enhancing code clarity. --- src/renderer/src/config/models.ts | 2 +- .../src/providers/AiProvider/OpenAIProvider.ts | 15 +++++---------- 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/src/renderer/src/config/models.ts b/src/renderer/src/config/models.ts index cffe99d9a1..aaf7d8a4f3 100644 --- a/src/renderer/src/config/models.ts +++ b/src/renderer/src/config/models.ts @@ -2617,7 +2617,7 @@ export const THINKING_TOKEN_MAP: Record = // Claude models 'claude-3[.-]7.*sonnet.*$': { min: 1024, max: 64000 }, - 'claude-(:?sonnet|opus)-4.*$': { min: 1024, max: 64000 } + 'claude-(:?sonnet|opus)-4.*$': { min: 1024, max: 32000 } } export const findTokenLimit = (modelId: string): { min: number; max: number } | undefined => { diff --git a/src/renderer/src/providers/AiProvider/OpenAIProvider.ts b/src/renderer/src/providers/AiProvider/OpenAIProvider.ts index 78c6d306f2..a2ab49b634 100644 --- a/src/renderer/src/providers/AiProvider/OpenAIProvider.ts +++ b/src/renderer/src/providers/AiProvider/OpenAIProvider.ts @@ -281,21 +281,13 @@ export default class OpenAIProvider extends BaseOpenAIProvider { // OpenRouter models if (model.provider === 'openrouter') { - if (isSupportedReasoningEffortModel(model)) { + if (isSupportedReasoningEffortModel(model) || isSupportedThinkingTokenModel(model)) { return { reasoning: { effort: assistant?.settings?.reasoning_effort } } } - - if (isSupportedThinkingTokenModel(model)) { - return { - reasoning: { - max_tokens: budgetTokens - } - } - } } // Qwen models @@ -634,7 +626,10 @@ export default class OpenAIProvider extends BaseOpenAIProvider { if (chunk.choices && chunk.choices.length > 0) { const delta = chunk.choices[0]?.delta - if (delta?.reasoning_content || delta?.reasoning) { + if ( + (delta?.reasoning_content && delta?.reasoning_content !== '\n') || + (delta?.reasoning && delta?.reasoning !== '\n') + ) { yield { type: 'reasoning', textDelta: delta.reasoning_content || delta.reasoning } } if (delta?.content) {