diff --git a/.env.example b/.env.example index 6d0410951d..0d57ffc033 100644 --- a/.env.example +++ b/.env.example @@ -1 +1,8 @@ NODE_OPTIONS=--max-old-space-size=8000 +API_KEY="sk-xxx" +BASE_URL="https://api.siliconflow.cn/v1/" +MODEL="Qwen/Qwen3-235B-A22B-Instruct-2507" +CSLOGGER_MAIN_LEVEL=info +CSLOGGER_RENDERER_LEVEL=info +#CSLOGGER_MAIN_SHOW_MODULES= +#CSLOGGER_RENDERER_SHOW_MODULES= diff --git a/.yarn/patches/openai-npm-5.12.0-a06a6369b2.patch b/.yarn/patches/openai-npm-5.12.2-30b075401c.patch similarity index 68% rename from .yarn/patches/openai-npm-5.12.0-a06a6369b2.patch rename to .yarn/patches/openai-npm-5.12.2-30b075401c.patch index 39f0c9b7da..29b92dcc7b 100644 Binary files a/.yarn/patches/openai-npm-5.12.0-a06a6369b2.patch and b/.yarn/patches/openai-npm-5.12.2-30b075401c.patch differ diff --git a/package.json b/package.json index c2c9f71458..1896312423 100644 --- a/package.json +++ b/package.json @@ -216,7 +216,7 @@ "motion": "^12.10.5", "notion-helper": "^1.3.22", "npx-scope-finder": "^1.2.0", - "openai": "patch:openai@npm%3A5.12.0#~/.yarn/patches/openai-npm-5.12.0-a06a6369b2.patch", + "openai": "patch:openai@npm%3A5.12.2#~/.yarn/patches/openai-npm-5.12.2-30b075401c.patch", "p-queue": "^8.1.0", "pdf-lib": "^1.17.1", "playwright": "^1.52.0", @@ -274,10 +274,8 @@ "@langchain/openai@npm:^0.3.16": "patch:@langchain/openai@npm%3A0.3.16#~/.yarn/patches/@langchain-openai-npm-0.3.16-e525b59526.patch", "@langchain/openai@npm:>=0.1.0 <0.4.0": "patch:@langchain/openai@npm%3A0.3.16#~/.yarn/patches/@langchain-openai-npm-0.3.16-e525b59526.patch", "libsql@npm:^0.4.4": "patch:libsql@npm%3A0.4.7#~/.yarn/patches/libsql-npm-0.4.7-444e260fb1.patch", - "openai@npm:^4.77.0": "patch:openai@npm%3A5.12.0#~/.yarn/patches/openai-npm-5.12.0-a06a6369b2.patch", "pkce-challenge@npm:^4.1.0": "patch:pkce-challenge@npm%3A4.1.0#~/.yarn/patches/pkce-challenge-npm-4.1.0-fbc51695a3.patch", "app-builder-lib@npm:26.0.13": "patch:app-builder-lib@npm%3A26.0.13#~/.yarn/patches/app-builder-lib-npm-26.0.13-a064c9e1d0.patch", - "openai@npm:^4.87.3": "patch:openai@npm%3A5.12.0#~/.yarn/patches/openai-npm-5.12.0-a06a6369b2.patch", "app-builder-lib@npm:26.0.15": "patch:app-builder-lib@npm%3A26.0.15#~/.yarn/patches/app-builder-lib-npm-26.0.15-360e5b0476.patch", "@langchain/core@npm:^0.3.26": "patch:@langchain/core@npm%3A0.3.44#~/.yarn/patches/@langchain-core-npm-0.3.44-41d5c3cb0a.patch", "node-abi": "4.12.0", @@ -285,7 +283,9 @@ "vite": "npm:rolldown-vite@latest", "atomically@npm:^1.7.0": "patch:atomically@npm%3A1.7.0#~/.yarn/patches/atomically-npm-1.7.0-e742e5293b.patch", "file-stream-rotator@npm:^0.6.1": "patch:file-stream-rotator@npm%3A0.6.1#~/.yarn/patches/file-stream-rotator-npm-0.6.1-eab45fb13d.patch", - "windows-system-proxy@npm:^1.0.0": "patch:windows-system-proxy@npm%3A1.0.0#~/.yarn/patches/windows-system-proxy-npm-1.0.0-ff2a828eec.patch" + "windows-system-proxy@npm:^1.0.0": "patch:windows-system-proxy@npm%3A1.0.0#~/.yarn/patches/windows-system-proxy-npm-1.0.0-ff2a828eec.patch", + "openai@npm:^4.77.0": "patch:openai@npm%3A5.12.2#~/.yarn/patches/openai-npm-5.12.2-30b075401c.patch", + "openai@npm:^4.87.3": "patch:openai@npm%3A5.12.2#~/.yarn/patches/openai-npm-5.12.2-30b075401c.patch" }, "packageManager": "yarn@4.9.1", "lint-staged": { diff --git a/src/renderer/src/aiCore/clients/BaseApiClient.ts b/src/renderer/src/aiCore/clients/BaseApiClient.ts index f5883c74bb..def655e45c 100644 --- a/src/renderer/src/aiCore/clients/BaseApiClient.ts +++ b/src/renderer/src/aiCore/clients/BaseApiClient.ts @@ -23,6 +23,7 @@ import { MemoryItem, Model, OpenAIServiceTiers, + OpenAIVerbosity, Provider, SystemProviderIds, ToolCallResponse, @@ -233,6 +234,21 @@ export abstract class BaseApiClient< return serviceTierSetting } + protected getVerbosity(): OpenAIVerbosity { + try { + const state = window.store?.getState() + const verbosity = state?.settings?.openAI?.verbosity + + if (verbosity && ['low', 'medium', 'high'].includes(verbosity)) { + return verbosity + } + } catch (error) { + logger.warn('Failed to get verbosity from state:', error as Error) + } + + return 'medium' + } + protected getTimeout(model: Model) { if (isSupportFlexServiceTierModel(model)) { return 15 * 1000 * 60 diff --git a/src/renderer/src/aiCore/clients/openai/OpenAIApiClient.ts b/src/renderer/src/aiCore/clients/openai/OpenAIApiClient.ts index 28e6de4223..988a4f3572 100644 --- a/src/renderer/src/aiCore/clients/openai/OpenAIApiClient.ts +++ b/src/renderer/src/aiCore/clients/openai/OpenAIApiClient.ts @@ -6,6 +6,7 @@ import { getOpenAIWebSearchParams, getThinkModelType, isDoubaoThinkingAutoModel, + isGPT5SeriesModel, isGrokReasoningModel, isNotSupportSystemMessageModel, isQwenAlwaysThinkModel, @@ -391,9 +392,13 @@ export class OpenAIAPIClient extends OpenAIBaseClient< ): ToolCallResponse { let parsedArgs: any try { - parsedArgs = JSON.parse(toolCall.function.arguments) + if ('function' in toolCall) { + parsedArgs = JSON.parse(toolCall.function.arguments) + } } catch { - parsedArgs = toolCall.function.arguments + if ('function' in toolCall) { + parsedArgs = toolCall.function.arguments + } } return { id: toolCall.id, @@ -471,7 +476,10 @@ export class OpenAIAPIClient extends OpenAIBaseClient< } if ('tool_calls' in message && message.tool_calls) { sum += message.tool_calls.reduce((acc, toolCall) => { - return acc + estimateTextTokens(JSON.stringify(toolCall.function.arguments)) + if (toolCall.type === 'function' && 'function' in toolCall) { + return acc + estimateTextTokens(JSON.stringify(toolCall.function.arguments)) + } + return acc }, 0) } return sum @@ -572,6 +580,13 @@ export class OpenAIAPIClient extends OpenAIBaseClient< // Note: Some providers like Mistral don't support stream_options const shouldIncludeStreamOptions = streamOutput && isSupportStreamOptionsProvider(this.provider) + const reasoningEffort = this.getReasoningEffort(assistant, model) + + // minimal cannot be used with web_search tool + if (isGPT5SeriesModel(model) && reasoningEffort.reasoning_effort === 'minimal' && enableWebSearch) { + reasoningEffort.reasoning_effort = 'low' + } + const commonParams: OpenAISdkParams = { model: model.id, messages: @@ -587,7 +602,7 @@ export class OpenAIAPIClient extends OpenAIBaseClient< // groq 有不同的 service tier 配置,不符合 openai 接口类型 service_tier: this.getServiceTier(model) as OpenAIServiceTier, ...this.getProviderSpecificParameters(assistant, model), - ...this.getReasoningEffort(assistant, model), + ...reasoningEffort, ...getOpenAIWebSearchParams(model, enableWebSearch), // OpenRouter usage tracking ...(this.provider.id === 'openrouter' ? { usage: { include: true } } : {}), @@ -901,7 +916,9 @@ export class OpenAIAPIClient extends OpenAIBaseClient< type: 'function' } } else if (fun?.arguments) { - toolCalls[index].function.arguments += fun.arguments + if (toolCalls[index] && toolCalls[index].type === 'function' && 'function' in toolCalls[index]) { + toolCalls[index].function.arguments += fun.arguments + } } } else { toolCalls.push(toolCall) diff --git a/src/renderer/src/aiCore/clients/openai/OpenAIResponseAPIClient.ts b/src/renderer/src/aiCore/clients/openai/OpenAIResponseAPIClient.ts index f740c5bdcf..10a2ee7bbe 100644 --- a/src/renderer/src/aiCore/clients/openai/OpenAIResponseAPIClient.ts +++ b/src/renderer/src/aiCore/clients/openai/OpenAIResponseAPIClient.ts @@ -2,12 +2,14 @@ import { loggerService } from '@logger' import { GenericChunk } from '@renderer/aiCore/middleware/schemas' import { CompletionsContext } from '@renderer/aiCore/middleware/types' import { + isGPT5SeriesModel, isOpenAIChatCompletionOnlyModel, isOpenAILLMModel, isSupportedReasoningEffortOpenAIModel, + isSupportVerbosityModel, isVisionModel } from '@renderer/config/models' -import { isSupportDeveloperRoleProvider, isSupportStreamOptionsProvider } from '@renderer/config/providers' +import { isSupportDeveloperRoleProvider } from '@renderer/config/providers' import { estimateTextTokens } from '@renderer/services/TokenService' import { FileMetadata, @@ -304,8 +306,7 @@ export class OpenAIResponseAPIClient extends OpenAIBaseClient< const content = this.convertResponseToMessageContent(output) - const newReqMessages = [...currentReqMessages, ...content, ...(toolResults || [])] - return newReqMessages + return [...currentReqMessages, ...content, ...(toolResults || [])] } override estimateMessageTokens(message: OpenAIResponseSdkMessageParam): number { @@ -442,7 +443,12 @@ export class OpenAIResponseAPIClient extends OpenAIBaseClient< tools = tools.concat(extraTools) - const shouldIncludeStreamOptions = streamOutput && isSupportStreamOptionsProvider(this.provider) + const reasoningEffort = this.getReasoningEffort(assistant, model) + + // minimal cannot be used with web_search tool + if (isGPT5SeriesModel(model) && reasoningEffort.reasoning?.effort === 'minimal' && enableWebSearch) { + reasoningEffort.reasoning.effort = 'low' + } const commonParams: OpenAIResponseSdkParams = { model: model.id, @@ -454,10 +460,16 @@ export class OpenAIResponseAPIClient extends OpenAIBaseClient< top_p: this.getTopP(assistant, model), max_output_tokens: maxTokens, stream: streamOutput, - ...(shouldIncludeStreamOptions ? { stream_options: { include_usage: true } } : {}), tools: !isEmpty(tools) ? tools : undefined, // groq 有不同的 service tier 配置,不符合 openai 接口类型 service_tier: this.getServiceTier(model) as OpenAIServiceTier, + ...(isSupportVerbosityModel(model) + ? { + text: { + verbosity: this.getVerbosity() + } + } + : {}), ...(this.getReasoningEffort(assistant, model) as OpenAI.Reasoning), // 只在对话场景下应用自定义参数,避免影响翻译、总结等其他业务逻辑 // 注意:用户自定义参数总是应该覆盖其他参数 diff --git a/src/renderer/src/assets/images/models/gpt-5-chat.png b/src/renderer/src/assets/images/models/gpt-5-chat.png new file mode 100644 index 0000000000..3a6a5c3937 Binary files /dev/null and b/src/renderer/src/assets/images/models/gpt-5-chat.png differ diff --git a/src/renderer/src/assets/images/models/gpt-5-mini.png b/src/renderer/src/assets/images/models/gpt-5-mini.png new file mode 100644 index 0000000000..0ac07a3a26 Binary files /dev/null and b/src/renderer/src/assets/images/models/gpt-5-mini.png differ diff --git a/src/renderer/src/assets/images/models/gpt-5-nano.png b/src/renderer/src/assets/images/models/gpt-5-nano.png new file mode 100644 index 0000000000..e3cc1ae871 Binary files /dev/null and b/src/renderer/src/assets/images/models/gpt-5-nano.png differ diff --git a/src/renderer/src/assets/images/models/gpt-5.png b/src/renderer/src/assets/images/models/gpt-5.png new file mode 100644 index 0000000000..188df6068f Binary files /dev/null and b/src/renderer/src/assets/images/models/gpt-5.png differ diff --git a/src/renderer/src/components/Icons/SVGIcon.tsx b/src/renderer/src/components/Icons/SVGIcon.tsx index 988d9657c1..b9a3eff899 100644 --- a/src/renderer/src/components/Icons/SVGIcon.tsx +++ b/src/renderer/src/components/Icons/SVGIcon.tsx @@ -56,6 +56,18 @@ export function MdiLightbulbOn10(props: SVGProps) { ) } +export function MdiLightbulbOn30(props: SVGProps) { + return ( + + {/* Icon from Material Design Icons by Pictogrammers - https://github.com/Templarian/MaterialDesign/blob/master/LICENSE */} + + + ) +} + export function MdiLightbulbOn50(props: SVGProps) { return ( @@ -67,6 +79,17 @@ export function MdiLightbulbOn50(props: SVGProps) { ) } +export function MdiLightbulbOn80(props: SVGProps) { + return ( + + {/* Icon from Material Design Icons by Pictogrammers - https://github.com/Templarian/MaterialDesign/blob/master/LICENSE */} + + + ) +} export function MdiLightbulbOn90(props: SVGProps) { return ( @@ -77,3 +100,15 @@ export function MdiLightbulbOn90(props: SVGProps) { ) } + +export function MdiLightbulbOn(props: SVGProps) { + // {/* Icon from Material Design Icons by Pictogrammers - https://github.com/Templarian/MaterialDesign/blob/master/LICENSE */} + return ( + + + + ) +} diff --git a/src/renderer/src/config/models.ts b/src/renderer/src/config/models.ts index 0c4c6d5190..a7ea85c1f0 100644 --- a/src/renderer/src/config/models.ts +++ b/src/renderer/src/config/models.ts @@ -57,6 +57,10 @@ import { } from '@renderer/assets/images/models/gpt_dark.png' import ChatGPTImageModelLogo from '@renderer/assets/images/models/gpt_image_1.png' import ChatGPTo1ModelLogo from '@renderer/assets/images/models/gpt_o1.png' +import GPT5ModelLogo from '@renderer/assets/images/models/gpt-5.png' +import GPT5ChatModelLogo from '@renderer/assets/images/models/gpt-5-chat.png' +import GPT5MiniModelLogo from '@renderer/assets/images/models/gpt-5-mini.png' +import GPT5NanoModelLogo from '@renderer/assets/images/models/gpt-5-nano.png' import GrokModelLogo from '@renderer/assets/images/models/grok.png' import GrokModelLogoDark from '@renderer/assets/images/models/grok_dark.png' import GrypheModelLogo from '@renderer/assets/images/models/gryphe.png' @@ -185,6 +189,7 @@ const visionAllowedModels = [ 'gpt-4.1(?:-[\\w-]+)?', 'gpt-4o(?:-[\\w-]+)?', 'gpt-4.5(?:-[\\w-]+)', + 'gpt-5(?:-[\\w-]+)?', 'chatgpt-4o(?:-[\\w-]+)?', 'o1(?:-[\\w-]+)?', 'o3(?:-[\\w-]+)?', @@ -247,6 +252,7 @@ export const FUNCTION_CALLING_MODELS = [ 'gpt-4', 'gpt-4.5', 'gpt-oss(?:-[\\w-]+)', + 'gpt-5(?:-[\\w-]+)?', 'o(1|3|4)(?:-[\\w-]+)?', 'claude', 'qwen', @@ -269,7 +275,8 @@ const FUNCTION_CALLING_EXCLUDED_MODELS = [ 'o1-preview', 'AIDC-AI/Marco-o1', 'gemini-1(?:\\.[\\w-]+)?', - 'qwen-mt(?:-[\\w-]+)?' + 'qwen-mt(?:-[\\w-]+)?', + 'gpt-5-chat(?:-[\\w-]+)?' ] export const FUNCTION_CALLING_REGEX = new RegExp( @@ -285,6 +292,7 @@ export const CLAUDE_SUPPORTED_WEBSEARCH_REGEX = new RegExp( // 模型类型到支持的reasoning_effort的映射表 export const MODEL_SUPPORTED_REASONING_EFFORT: ReasoningEffortConfig = { default: ['low', 'medium', 'high'] as const, + gpt5: ['minimal', 'low', 'medium', 'high'] as const, grok: ['low', 'high'] as const, gemini: ['low', 'medium', 'high', 'auto'] as const, gemini_pro: ['low', 'medium', 'high', 'auto'] as const, @@ -299,18 +307,22 @@ export const MODEL_SUPPORTED_REASONING_EFFORT: ReasoningEffortConfig = { // 模型类型到支持选项的映射表 export const MODEL_SUPPORTED_OPTIONS: ThinkingOptionConfig = { default: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.default] as const, - grok: [...MODEL_SUPPORTED_REASONING_EFFORT.grok] as const, + gpt5: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5] as const, + grok: MODEL_SUPPORTED_REASONING_EFFORT.grok, gemini: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini] as const, - gemini_pro: [...MODEL_SUPPORTED_REASONING_EFFORT.gemini_pro] as const, + gemini_pro: MODEL_SUPPORTED_REASONING_EFFORT.gemini_pro, qwen: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.qwen] as const, - qwen_thinking: [...MODEL_SUPPORTED_REASONING_EFFORT.qwen_thinking] as const, + qwen_thinking: MODEL_SUPPORTED_REASONING_EFFORT.qwen_thinking, doubao: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao] as const, hunyuan: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.hunyuan] as const, zhipu: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.zhipu] as const, - perplexity: [...MODEL_SUPPORTED_REASONING_EFFORT.perplexity] as const + perplexity: MODEL_SUPPORTED_REASONING_EFFORT.perplexity } as const export const getThinkModelType = (model: Model): ThinkingModelType => { + if (isGPT5SeriesModel(model)) { + return 'gpt5' + } if (isSupportedThinkingTokenGeminiModel(model)) { if (GEMINI_FLASH_MODEL_REGEX.test(model.id)) { return 'gemini' @@ -380,6 +392,10 @@ export function getModelLogo(modelId: string) { 'gpt-image': ChatGPTImageModelLogo, 'gpt-3': isLight ? ChatGPT35ModelLogo : ChatGPT35ModelLogoDark, 'gpt-4': isLight ? ChatGPT4ModelLogo : ChatGPT4ModelLogoDark, + 'gpt-5$': GPT5ModelLogo, + 'gpt-5-mini': GPT5MiniModelLogo, + 'gpt-5-nano': GPT5NanoModelLogo, + 'gpt-5-chat': GPT5ChatModelLogo, gpts: isLight ? ChatGPT4ModelLogo : ChatGPT4ModelLogoDark, 'gpt-oss(?:-[\\w-]+)': isLight ? ChatGptModelLogo : ChatGptModelLogoDark, 'text-moderation': isLight ? ChatGptModelLogo : ChatGptModelLogoDark, @@ -2453,7 +2469,7 @@ export function isVisionModel(model: Model): boolean { export function isOpenAIReasoningModel(model: Model): boolean { const modelId = getLowerBaseModelName(model.id, '/') - return modelId.includes('o1') || modelId.includes('o3') || modelId.includes('o4') || modelId.includes('gpt-oss') + return isSupportedReasoningEffortOpenAIModel(model) || modelId.includes('o1') || modelId.includes('gpt-5-chat') } export function isOpenAILLMModel(model: Model): boolean { @@ -2479,6 +2495,7 @@ export function isOpenAIModel(model: Model): boolean { return false } const modelId = getLowerBaseModelName(model.id) + return modelId.includes('gpt') || isOpenAIReasoningModel(model) } @@ -2487,7 +2504,14 @@ export function isSupportFlexServiceTierModel(model: Model): boolean { return false } const modelId = getLowerBaseModelName(model.id) - return (modelId.includes('o3') && !modelId.includes('o3-mini')) || modelId.includes('o4-mini') + return ( + (modelId.includes('o3') && !modelId.includes('o3-mini')) || modelId.includes('o4-mini') || modelId.includes('gpt-5') + ) +} + +export function isSupportVerbosityModel(model: Model): boolean { + const modelId = getLowerBaseModelName(model.id) + return isGPT5SeriesModel(model) && !modelId.includes('chat') } export function isSupportedReasoningEffortOpenAIModel(model: Model): boolean { @@ -2495,7 +2519,9 @@ export function isSupportedReasoningEffortOpenAIModel(model: Model): boolean { return ( (modelId.includes('o1') && !(modelId.includes('o1-preview') || modelId.includes('o1-mini'))) || modelId.includes('o3') || - modelId.includes('o4') + modelId.includes('o4') || + modelId.includes('gpt-oss') || + (isGPT5SeriesModel(model) && !modelId.includes('chat')) ) } @@ -2527,7 +2553,8 @@ export function isOpenAIWebSearchModel(model: Model): boolean { (modelId.includes('gpt-4.1') && !modelId.includes('gpt-4.1-nano')) || (modelId.includes('gpt-4o') && !modelId.includes('gpt-4o-image')) || modelId.includes('o3') || - modelId.includes('o4') + modelId.includes('o4') || + (modelId.includes('gpt-5') && !modelId.includes('chat')) ) } @@ -3133,17 +3160,14 @@ export const isQwenMTModel = (model: Model): boolean => { } export const isNotSupportedTextDelta = (model: Model): boolean => { - if (isQwenMTModel(model)) { - return true - } - - return false + return isQwenMTModel(model) } export const isNotSupportSystemMessageModel = (model: Model): boolean => { - if (isQwenMTModel(model) || isGemmaModel(model)) { - return true - } - - return false + return isQwenMTModel(model) || isGemmaModel(model) +} + +export const isGPT5SeriesModel = (model: Model) => { + const modelId = getLowerBaseModelName(model.id) + return modelId.includes('gpt-5') } diff --git a/src/renderer/src/i18n/label.ts b/src/renderer/src/i18n/label.ts index 541e741bb1..9bd7839c11 100644 --- a/src/renderer/src/i18n/label.ts +++ b/src/renderer/src/i18n/label.ts @@ -5,6 +5,7 @@ */ import { loggerService } from '@logger' +import { ThinkingOption } from '@renderer/types' import i18n from './index' @@ -266,13 +267,13 @@ export const getHttpMessageLabel = (key: string): string => { return getLabel(key, httpMessageKeyMap) } -const reasoningEffortOptionsKeyMap = { - auto: 'assistants.settings.reasoning_effort.default', +const reasoningEffortOptionsKeyMap: Record = { + off: 'assistants.settings.reasoning_effort.off', + minimal: 'assistants.settings.reasoning_effort.minimal', high: 'assistants.settings.reasoning_effort.high', - label: 'assistants.settings.reasoning_effort.label', low: 'assistants.settings.reasoning_effort.low', medium: 'assistants.settings.reasoning_effort.medium', - off: 'assistants.settings.reasoning_effort.off' + auto: 'assistants.settings.reasoning_effort.default' } as const export const getReasoningEffortOptionsLabel = (key: string): string => { diff --git a/src/renderer/src/i18n/locales/en-us.json b/src/renderer/src/i18n/locales/en-us.json index 9c98ec4b8a..901d9e9e01 100644 --- a/src/renderer/src/i18n/locales/en-us.json +++ b/src/renderer/src/i18n/locales/en-us.json @@ -183,10 +183,11 @@ "prompt": "Prompt Settings", "reasoning_effort": { "default": "Default", - "high": "Think harder", + "high": "High", "label": "Reasoning effort", - "low": "Think less", - "medium": "Think normally", + "low": "Low", + "medium": "Medium", + "minimal": "Minimal", "off": "Off" }, "regular_phrases": { @@ -3119,7 +3120,14 @@ "tip": "A summary of the reasoning performed by the model", "title": "Summary Mode" }, - "title": "OpenAI Settings" + "title": "OpenAI Settings", + "verbosity": { + "high": "High", + "low": "Low", + "medium": "Medium", + "tip": "Control the level of detail in the model's output", + "title": "Level of detail" + } }, "privacy": { "enable_privacy_mode": "Anonymous reporting of errors and statistics", diff --git a/src/renderer/src/i18n/locales/ja-jp.json b/src/renderer/src/i18n/locales/ja-jp.json index 3c76e68a4a..903f395a3c 100644 --- a/src/renderer/src/i18n/locales/ja-jp.json +++ b/src/renderer/src/i18n/locales/ja-jp.json @@ -187,6 +187,7 @@ "label": "思考連鎖の長さ", "low": "少しの思考", "medium": "普通の思考", + "minimal": "最小限の思考", "off": "オフ" }, "regular_phrases": { @@ -3119,7 +3120,14 @@ "tip": "モデルが行った推論の要約", "title": "要約モード" }, - "title": "OpenAIの設定" + "title": "OpenAIの設定", + "verbosity": { + "high": "高", + "low": "低", + "medium": "中", + "tip": "制御モデル出力の詳細さ", + "title": "詳細度" + } }, "privacy": { "enable_privacy_mode": "匿名エラーレポートとデータ統計の送信", diff --git a/src/renderer/src/i18n/locales/ru-ru.json b/src/renderer/src/i18n/locales/ru-ru.json index 13787ee5b8..1c44a19f45 100644 --- a/src/renderer/src/i18n/locales/ru-ru.json +++ b/src/renderer/src/i18n/locales/ru-ru.json @@ -187,6 +187,7 @@ "label": "Настройки размышлений", "low": "Меньше думать", "medium": "Среднее", + "minimal": "минимальный", "off": "Выключить" }, "regular_phrases": { @@ -3119,7 +3120,14 @@ "tip": "Резюме рассуждений, выполненных моделью", "title": "Режим резюме" }, - "title": "Настройки OpenAI" + "title": "Настройки OpenAI", + "verbosity": { + "high": "Высокий", + "low": "низкий", + "medium": "китайский", + "tip": "Управление степенью детализации вывода модели", + "title": "подробность" + } }, "privacy": { "enable_privacy_mode": "Анонимная отчетность об ошибках и статистике", diff --git a/src/renderer/src/i18n/locales/zh-cn.json b/src/renderer/src/i18n/locales/zh-cn.json index 165da9373c..8da54d8125 100644 --- a/src/renderer/src/i18n/locales/zh-cn.json +++ b/src/renderer/src/i18n/locales/zh-cn.json @@ -187,6 +187,7 @@ "label": "思维链长度", "low": "浮想", "medium": "斟酌", + "minimal": "微念", "off": "关闭" }, "regular_phrases": { @@ -3119,7 +3120,14 @@ "tip": "模型执行的推理摘要", "title": "摘要模式" }, - "title": "OpenAI 设置" + "title": "OpenAI 设置", + "verbosity": { + "high": "高", + "low": "低", + "medium": "中", + "tip": "控制模型输出的详细程度", + "title": "详细程度" + } }, "privacy": { "enable_privacy_mode": "匿名发送错误报告和数据统计", diff --git a/src/renderer/src/i18n/locales/zh-tw.json b/src/renderer/src/i18n/locales/zh-tw.json index ceed2351f9..3a7c38f951 100644 --- a/src/renderer/src/i18n/locales/zh-tw.json +++ b/src/renderer/src/i18n/locales/zh-tw.json @@ -187,6 +187,7 @@ "label": "思維鏈長度", "low": "稍微思考", "medium": "正常思考", + "minimal": "最少思考", "off": "關閉" }, "regular_phrases": { @@ -3119,7 +3120,14 @@ "tip": "模型所執行的推理摘要", "title": "摘要模式" }, - "title": "OpenAI 設定" + "title": "OpenAI 設定", + "verbosity": { + "high": "高", + "low": "低", + "medium": "中", + "tip": "控制模型輸出的詳細程度", + "title": "詳細程度" + } }, "privacy": { "enable_privacy_mode": "匿名發送錯誤報告和資料統計", diff --git a/src/renderer/src/i18n/translate/el-gr.json b/src/renderer/src/i18n/translate/el-gr.json index 9565519fb5..1b6ef88a65 100644 --- a/src/renderer/src/i18n/translate/el-gr.json +++ b/src/renderer/src/i18n/translate/el-gr.json @@ -187,6 +187,7 @@ "label": "Μήκος λογισμικού αλυσίδας", "low": "Μικρό", "medium": "Μεσαίο", + "minimal": "ελάχιστος", "off": "Απενεργοποίηση" }, "regular_phrases": { @@ -3119,7 +3120,14 @@ "tip": "Περίληψη συλλογισμού που εκτελείται από το μοντέλο", "title": "Λειτουργία περίληψης" }, - "title": "Ρυθμίσεις OpenAI" + "title": "Ρυθμίσεις OpenAI", + "verbosity": { + "high": "Ψηλός", + "low": "χαμηλό", + "medium": "Μεσαίο", + "tip": "Ελέγχει το βαθμό λεπτομέρειας της έξοδου του μοντέλου.", + "title": "λεπτομέρεια" + } }, "privacy": { "enable_privacy_mode": "Αποστολή ανώνυμων αναφορών σφαλμάτων και στατιστικών δεδομένων", diff --git a/src/renderer/src/i18n/translate/es-es.json b/src/renderer/src/i18n/translate/es-es.json index d6637609c2..c3d6f397ba 100644 --- a/src/renderer/src/i18n/translate/es-es.json +++ b/src/renderer/src/i18n/translate/es-es.json @@ -187,6 +187,7 @@ "label": "Longitud de Cadena de Razonamiento", "low": "Corto", "medium": "Medio", + "minimal": "minimal", "off": "Apagado" }, "regular_phrases": { @@ -3119,7 +3120,14 @@ "tip": "Resumen de la inferencia realizada por el modelo", "title": "Modo de resumen" }, - "title": "Configuración de OpenAI" + "title": "Configuración de OpenAI", + "verbosity": { + "high": "alto", + "low": "bajo", + "medium": "medio", + "tip": "Controlar el nivel de detalle de la salida del modelo", + "title": "nivel de detalle" + } }, "privacy": { "enable_privacy_mode": "Enviar informes de errores y estadísticas de forma anónima", diff --git a/src/renderer/src/i18n/translate/fr-fr.json b/src/renderer/src/i18n/translate/fr-fr.json index d8f8c4d9fa..ac1321a0ba 100644 --- a/src/renderer/src/i18n/translate/fr-fr.json +++ b/src/renderer/src/i18n/translate/fr-fr.json @@ -187,6 +187,7 @@ "label": "Longueur de la chaîne de raisonnement", "low": "Court", "medium": "Moyen", + "minimal": "minimal", "off": "Off" }, "regular_phrases": { @@ -3119,7 +3120,14 @@ "tip": "Résumé des inférences effectuées par le modèle", "title": "Mode de résumé" }, - "title": "Paramètres OpenAI" + "title": "Paramètres OpenAI", + "verbosity": { + "high": "haut", + "low": "faible", + "medium": "moyen", + "tip": "Contrôler le niveau de détail de la sortie du modèle", + "title": "niveau de détail" + } }, "privacy": { "enable_privacy_mode": "Отправлять анонимные сообщения об ошибках и статистику", diff --git a/src/renderer/src/i18n/translate/pt-pt.json b/src/renderer/src/i18n/translate/pt-pt.json index 649df406f6..b49a501432 100644 --- a/src/renderer/src/i18n/translate/pt-pt.json +++ b/src/renderer/src/i18n/translate/pt-pt.json @@ -187,6 +187,7 @@ "label": "Comprimento da Cadeia de Raciocínio", "low": "Curto", "medium": "Médio", + "minimal": "mínimo", "off": "Desligado" }, "regular_phrases": { @@ -3119,7 +3120,14 @@ "tip": "Resumo do raciocínio executado pelo modelo", "title": "Modo de Resumo" }, - "title": "Configurações do OpenAI" + "title": "Configurações do OpenAI", + "verbosity": { + "high": "alto", + "low": "baixo", + "medium": "médio", + "tip": "Controlar o nível de detalhe da saída do modelo", + "title": "nível de detalhe" + } }, "privacy": { "enable_privacy_mode": "Enviar relatórios de erro e estatísticas de forma anônima", diff --git a/src/renderer/src/pages/home/Inputbar/ThinkingButton.tsx b/src/renderer/src/pages/home/Inputbar/ThinkingButton.tsx index e9176e4b50..898a12bb4d 100644 --- a/src/renderer/src/pages/home/Inputbar/ThinkingButton.tsx +++ b/src/renderer/src/pages/home/Inputbar/ThinkingButton.tsx @@ -1,9 +1,10 @@ import { MdiLightbulbAutoOutline, MdiLightbulbOffOutline, - MdiLightbulbOn10, + MdiLightbulbOn, + MdiLightbulbOn30, MdiLightbulbOn50, - MdiLightbulbOn90 + MdiLightbulbOn80 } from '@renderer/components/Icons/SVGIcon' import { useQuickPanel } from '@renderer/components/QuickPanel' import { getThinkModelType, isDoubaoThinkingAutoModel, MODEL_SUPPORTED_OPTIONS } from '@renderer/config/models' @@ -28,6 +29,7 @@ interface Props { // 选项转换映射表:当选项不支持时使用的替代选项 const OPTION_FALLBACK: Record = { off: 'low', // off -> low (for Gemini Pro models) + minimal: 'low', // minimal -> low (for gpt-5 and after) low: 'high', medium: 'high', // medium -> high (for Grok models) high: 'high', @@ -74,12 +76,14 @@ const ThinkingButton: FC = ({ ref, model, assistant, ToolbarButton }): Re const iconColor = isActive ? 'var(--color-link)' : 'var(--color-icon)' switch (true) { + case option === 'minimal': + return case option === 'low': - return - case option === 'medium': return + case option === 'medium': + return case option === 'high': - return + return case option === 'auto': return case option === 'off': diff --git a/src/renderer/src/pages/home/Tabs/components/OpenAISettingsGroup.tsx b/src/renderer/src/pages/home/Tabs/components/OpenAISettingsGroup.tsx index 7b7c88eacf..e3992d5b6b 100644 --- a/src/renderer/src/pages/home/Tabs/components/OpenAISettingsGroup.tsx +++ b/src/renderer/src/pages/home/Tabs/components/OpenAISettingsGroup.tsx @@ -1,11 +1,15 @@ import Selector from '@renderer/components/Selector' -import { isSupportedReasoningEffortOpenAIModel, isSupportFlexServiceTierModel } from '@renderer/config/models' +import { + isSupportedReasoningEffortOpenAIModel, + isSupportFlexServiceTierModel, + isSupportVerbosityModel +} from '@renderer/config/models' import { isSupportServiceTierProvider } from '@renderer/config/providers' import { useProvider } from '@renderer/hooks/useProvider' import { SettingDivider, SettingRow } from '@renderer/pages/settings' import { CollapsibleSettingGroup } from '@renderer/pages/settings/SettingGroup' import { RootState, useAppDispatch } from '@renderer/store' -import { setOpenAISummaryText } from '@renderer/store/settings' +import { setOpenAISummaryText, setOpenAIVerbosity } from '@renderer/store/settings' import { GroqServiceTiers, Model, @@ -15,6 +19,7 @@ import { ServiceTier, SystemProviderIds } from '@renderer/types' +import { OpenAIVerbosity } from '@types' import { Tooltip } from 'antd' import { CircleHelp } from 'lucide-react' import { FC, useCallback, useEffect, useMemo } from 'react' @@ -31,6 +36,7 @@ interface Props { const OpenAISettingsGroup: FC = ({ model, providerId, SettingGroup, SettingRowTitleSmall }) => { const { t } = useTranslation() const { provider, updateProvider } = useProvider(providerId) + const verbosity = useSelector((state: RootState) => state.settings.openAI.verbosity) const summaryText = useSelector((state: RootState) => state.settings.openAI.summaryText) const serviceTierMode = provider.serviceTier const dispatch = useAppDispatch() @@ -39,6 +45,7 @@ const OpenAISettingsGroup: FC = ({ model, providerId, SettingGroup, Setti isSupportedReasoningEffortOpenAIModel(model) && !model.id.includes('o1-pro') && (provider.type === 'openai-response' || provider.id === 'aihubmix') + const isSupportVerbosity = isSupportVerbosityModel(model) const isSupportServiceTier = isSupportServiceTierProvider(provider) const isSupportedFlexServiceTier = isSupportFlexServiceTierModel(model) @@ -56,6 +63,13 @@ const OpenAISettingsGroup: FC = ({ model, providerId, SettingGroup, Setti [updateProvider] ) + const setVerbosity = useCallback( + (value: OpenAIVerbosity) => { + dispatch(setOpenAIVerbosity(value)) + }, + [dispatch] + ) + const summaryTextOptions = [ { value: 'auto', @@ -71,6 +85,21 @@ const OpenAISettingsGroup: FC = ({ model, providerId, SettingGroup, Setti } ] + const verbosityOptions = [ + { + value: 'low', + label: t('settings.openai.verbosity.low') + }, + { + value: 'medium', + label: t('settings.openai.verbosity.medium') + }, + { + value: 'high', + label: t('settings.openai.verbosity.high') + } + ] + const serviceTierOptions = useMemo(() => { let baseOptions: { value: ServiceTier; label: string }[] if (provider.id === SystemProviderIds.groq) { @@ -131,7 +160,7 @@ const OpenAISettingsGroup: FC = ({ model, providerId, SettingGroup, Setti } }, [provider.id, serviceTierMode, serviceTierOptions, setServiceTierMode]) - if (!isOpenAIReasoning && !isSupportServiceTier) { + if (!isOpenAIReasoning && !isSupportServiceTier && !isSupportVerbosity) { return null } @@ -139,26 +168,28 @@ const OpenAISettingsGroup: FC = ({ model, providerId, SettingGroup, Setti {isSupportServiceTier && ( - - - {t('settings.openai.service_tier.title')}{' '} - - - - - { - setServiceTierMode(value as OpenAIServiceTier) - }} - options={serviceTierOptions} - placeholder={t('settings.openai.service_tier.auto')} - /> - + <> + + + {t('settings.openai.service_tier.title')}{' '} + + + + + { + setServiceTierMode(value as OpenAIServiceTier) + }} + options={serviceTierOptions} + placeholder={t('settings.openai.service_tier.auto')} + /> + + {(isOpenAIReasoning || isSupportVerbosity) && } + )} {isOpenAIReasoning && ( <> - {t('settings.openai.summary_text_mode.title')}{' '} @@ -174,8 +205,26 @@ const OpenAISettingsGroup: FC = ({ model, providerId, SettingGroup, Setti options={summaryTextOptions} /> + {isSupportVerbosity && } )} + {isSupportVerbosity && ( + + + {t('settings.openai.verbosity.title')}{' '} + + + + + { + setVerbosity(value as OpenAIVerbosity) + }} + options={verbosityOptions} + /> + + )} diff --git a/src/renderer/src/services/__tests__/ApiService.test.ts b/src/renderer/src/services/__tests__/ApiService.test.ts index 1d2a2415f3..ea5006678d 100644 --- a/src/renderer/src/services/__tests__/ApiService.test.ts +++ b/src/renderer/src/services/__tests__/ApiService.test.ts @@ -1222,7 +1222,9 @@ const mockOpenaiApiClient = { type: 'function' } } else if (fun?.arguments) { - toolCalls[index].function.arguments += fun.arguments + if (toolCalls[index] && toolCalls[index].type === 'function' && 'function' in toolCalls[index]) { + toolCalls[index].function.arguments += fun.arguments + } } } else { toolCalls.push(toolCall) diff --git a/src/renderer/src/store/index.ts b/src/renderer/src/store/index.ts index 1789d2fdbc..9778b6e773 100644 --- a/src/renderer/src/store/index.ts +++ b/src/renderer/src/store/index.ts @@ -60,7 +60,7 @@ const persistedReducer = persistReducer( { key: 'cherry-studio', storage, - version: 129, + version: 130, blacklist: ['runtime', 'messages', 'messageBlocks', 'tabs'], migrate }, diff --git a/src/renderer/src/store/migrate.ts b/src/renderer/src/store/migrate.ts index cdabaddc7c..f5613a52a2 100644 --- a/src/renderer/src/store/migrate.ts +++ b/src/renderer/src/store/migrate.ts @@ -1438,7 +1438,8 @@ const migrateConfig = { try { state.settings.openAI = { summaryText: 'off', - serviceTier: 'auto' + serviceTier: 'auto', + verbosity: 'medium' } state.settings.codeExecution = { @@ -1530,7 +1531,8 @@ const migrateConfig = { if (!state.settings.openAI) { state.settings.openAI = { summaryText: 'off', - serviceTier: 'auto' + serviceTier: 'auto', + verbosity: 'medium' } } return state @@ -2072,12 +2074,22 @@ const migrateConfig = { updateProvider(state, p.id, { apiOptions: changes }) } }) - return state } catch (error) { logger.error('migrate 129 error', error as Error) return state } + }, + '130': (state: RootState) => { + try { + if (state.settings && state.settings.openAI && !state.settings.openAI.verbosity) { + state.settings.openAI.verbosity = 'medium' + } + return state + } catch (error) { + logger.error('migrate 130 error', error as Error) + return state + } } } diff --git a/src/renderer/src/store/settings.ts b/src/renderer/src/store/settings.ts index 992ae306f5..d5f354e722 100644 --- a/src/renderer/src/store/settings.ts +++ b/src/renderer/src/store/settings.ts @@ -15,6 +15,7 @@ import { } from '@renderer/types' import { uuid } from '@renderer/utils' import { UpgradeChannel } from '@shared/config/constant' +import { OpenAIVerbosity } from '@types' import { RemoteSyncState } from './backup' @@ -194,6 +195,7 @@ export interface SettingsState { summaryText: OpenAISummaryText /** @deprecated 现在该设置迁移到Provider对象中 */ serviceTier: OpenAIServiceTier + verbosity: OpenAIVerbosity } // Notification notification: { @@ -365,7 +367,8 @@ export const initialState: SettingsState = { // OpenAI openAI: { summaryText: 'off', - serviceTier: 'auto' + serviceTier: 'auto', + verbosity: 'medium' }, notification: { assistant: false, @@ -775,6 +778,9 @@ const settingsSlice = createSlice({ setOpenAISummaryText: (state, action: PayloadAction) => { state.openAI.summaryText = action.payload }, + setOpenAIVerbosity: (state, action: PayloadAction) => { + state.openAI.verbosity = action.payload + }, setNotificationSettings: (state, action: PayloadAction) => { state.notification = action.payload }, @@ -939,6 +945,7 @@ export const { setEnableBackspaceDeleteModel, setDisableHardwareAcceleration, setOpenAISummaryText, + setOpenAIVerbosity, setNotificationSettings, // Local backup settings setLocalBackupDir, diff --git a/src/renderer/src/types/index.ts b/src/renderer/src/types/index.ts index 82e93b7aa2..ebd96c6bb0 100644 --- a/src/renderer/src/types/index.ts +++ b/src/renderer/src/types/index.ts @@ -52,10 +52,11 @@ export type AssistantSettingCustomParameters = { type: 'string' | 'number' | 'boolean' | 'json' } -export type ReasoningEffortOption = 'low' | 'medium' | 'high' | 'auto' +export type ReasoningEffortOption = NonNullable | 'auto' export type ThinkingOption = ReasoningEffortOption | 'off' export type ThinkingModelType = | 'default' + | 'gpt5' | 'grok' | 'gemini' | 'gemini_pro' @@ -87,6 +88,7 @@ export function isThinkModelType(type: string): type is ThinkingModelType { } export const EFFORT_RATIO: EffortRatio = { + minimal: 0.05, low: 0.05, medium: 0.5, high: 0.8, @@ -946,6 +948,8 @@ export interface StoreSyncAction { } } +export type OpenAIVerbosity = 'high' | 'medium' | 'low' + export type OpenAISummaryText = 'auto' | 'concise' | 'detailed' | 'off' export const OpenAIServiceTiers = { diff --git a/src/renderer/src/utils/mcp-tools.ts b/src/renderer/src/utils/mcp-tools.ts index 973f8fc088..a1f1a2b095 100644 --- a/src/renderer/src/utils/mcp-tools.ts +++ b/src/renderer/src/utils/mcp-tools.ts @@ -78,8 +78,10 @@ export function openAIToolsToMcpTool( try { if ('name' in toolCall) { toolName = toolCall.name - } else { + } else if (toolCall.type === 'function' && 'function' in toolCall) { toolName = toolCall.function.name + } else { + throw new Error('Unknown tool call type') } } catch (error) { logger.error(`Error parsing tool call: ${toolCall}`, error as Error) diff --git a/yarn.lock b/yarn.lock index 7dd043f515..e68e27ab50 100644 --- a/yarn.lock +++ b/yarn.lock @@ -7786,7 +7786,7 @@ __metadata: notion-helper: "npm:^1.3.22" npx-scope-finder: "npm:^1.2.0" officeparser: "npm:^4.2.0" - openai: "patch:openai@npm%3A5.12.0#~/.yarn/patches/openai-npm-5.12.0-a06a6369b2.patch" + openai: "patch:openai@npm%3A5.12.2#~/.yarn/patches/openai-npm-5.12.2-30b075401c.patch" os-proxy-config: "npm:^1.1.2" p-queue: "npm:^8.1.0" pdf-lib: "npm:^1.17.1" @@ -16688,9 +16688,9 @@ __metadata: languageName: node linkType: hard -"openai@npm:5.12.0": - version: 5.12.0 - resolution: "openai@npm:5.12.0" +"openai@npm:5.12.2": + version: 5.12.2 + resolution: "openai@npm:5.12.2" peerDependencies: ws: ^8.18.0 zod: ^3.23.8 @@ -16701,13 +16701,13 @@ __metadata: optional: true bin: openai: bin/cli - checksum: 10c0/adab04e90cae8f393f76c007f98c0636af97a280fb05766b0cee5ab202c802db01c113d0ce0dfea42e1a1fe3b08c9a3881b6eea9a0b0703375f487688aaca1fc + checksum: 10c0/7737b9b24edc81fcf9e6dcfb18a196cc0f8e29b6e839adf06a2538558c03908e3aa4cd94901b1a7f4a9dd62676fe9e34d6202281b2395090d998618ea1614c0c languageName: node linkType: hard -"openai@patch:openai@npm%3A5.12.0#~/.yarn/patches/openai-npm-5.12.0-a06a6369b2.patch": - version: 5.12.0 - resolution: "openai@patch:openai@npm%3A5.12.0#~/.yarn/patches/openai-npm-5.12.0-a06a6369b2.patch::version=5.12.0&hash=d96796" +"openai@patch:openai@npm%3A5.12.2#~/.yarn/patches/openai-npm-5.12.2-30b075401c.patch": + version: 5.12.2 + resolution: "openai@patch:openai@npm%3A5.12.2#~/.yarn/patches/openai-npm-5.12.2-30b075401c.patch::version=5.12.2&hash=ad5d10" peerDependencies: ws: ^8.18.0 zod: ^3.23.8 @@ -16718,7 +16718,7 @@ __metadata: optional: true bin: openai: bin/cli - checksum: 10c0/207f70a43839d34f6ad3322a4bdf6d755ac923ca9c6b5fb49bd13263d816c5acb1a501228b9124b1f72eae2f7efffc8890e2d901907b3c8efc2fee3f8a273cec + checksum: 10c0/2964a1c88a98cf169c9b73e8cd6776c03c8f3103fee30961c6953e5d995ad57a697e2179615999356809349186df6496abae105928ff7ce0229e5016dec87cb3 languageName: node linkType: hard