diff --git a/src/renderer/src/aiCore/legacy/clients/openai/OpenAIApiClient.ts b/src/renderer/src/aiCore/legacy/clients/openai/OpenAIApiClient.ts index d839da896..73a5bed4f 100644 --- a/src/renderer/src/aiCore/legacy/clients/openai/OpenAIApiClient.ts +++ b/src/renderer/src/aiCore/legacy/clients/openai/OpenAIApiClient.ts @@ -142,6 +142,10 @@ export class OpenAIAPIClient extends OpenAIBaseClient< return { thinking: { type: reasoningEffort ? 'enabled' : 'disabled' } } } + if (reasoningEffort === 'default') { + return {} + } + if (!reasoningEffort) { // DeepSeek hybrid inference models, v3.1 and maybe more in the future // 不同的 provider 有不同的思考控制方式,在这里统一解决 @@ -303,7 +307,7 @@ export class OpenAIAPIClient extends OpenAIBaseClient< // Grok models/Perplexity models/OpenAI models if (isSupportedReasoningEffortModel(model)) { // 检查模型是否支持所选选项 - const supportedOptions = getModelSupportedReasoningEffortOptions(model) + const supportedOptions = getModelSupportedReasoningEffortOptions(model)?.filter((option) => option !== 'default') if (supportedOptions?.includes(reasoningEffort)) { return { reasoning_effort: reasoningEffort diff --git a/src/renderer/src/aiCore/prepareParams/__tests__/model-parameters.test.ts b/src/renderer/src/aiCore/prepareParams/__tests__/model-parameters.test.ts index 70b4ac84b..a4f345e3e 100644 --- a/src/renderer/src/aiCore/prepareParams/__tests__/model-parameters.test.ts +++ b/src/renderer/src/aiCore/prepareParams/__tests__/model-parameters.test.ts @@ -18,7 +18,7 @@ vi.mock('@renderer/services/AssistantService', () => ({ toolUseMode: assistant.settings?.toolUseMode ?? 'prompt', defaultModel: assistant.defaultModel, customParameters: assistant.settings?.customParameters ?? [], - reasoning_effort: assistant.settings?.reasoning_effort, + reasoning_effort: assistant.settings?.reasoning_effort ?? 'default', reasoning_effort_cache: assistant.settings?.reasoning_effort_cache, qwenThinkMode: assistant.settings?.qwenThinkMode }) diff --git a/src/renderer/src/aiCore/utils/__tests__/reasoning.test.ts b/src/renderer/src/aiCore/utils/__tests__/reasoning.test.ts index fec4d197e..e5561f6fc 100644 --- a/src/renderer/src/aiCore/utils/__tests__/reasoning.test.ts +++ b/src/renderer/src/aiCore/utils/__tests__/reasoning.test.ts @@ -596,7 +596,7 @@ describe('reasoning utils', () => { expect(result).toEqual({}) }) - it('should return disabled thinking when no reasoning effort', async () => { + it('should return disabled thinking when reasoning effort is none', async () => { const { isReasoningModel, isSupportedThinkingTokenClaudeModel } = await import('@renderer/config/models') vi.mocked(isReasoningModel).mockReturnValue(true) @@ -611,7 +611,9 @@ describe('reasoning utils', () => { const assistant: Assistant = { id: 'test', name: 'Test', - settings: {} + settings: { + reasoning_effort: 'none' + } } as Assistant const result = getAnthropicReasoningParams(assistant, model) @@ -675,7 +677,7 @@ describe('reasoning utils', () => { expect(result).toEqual({}) }) - it('should disable thinking for Flash models without reasoning effort', async () => { + it('should disable thinking for Flash models when reasoning effort is none', async () => { const { isReasoningModel, isSupportedThinkingTokenGeminiModel } = await import('@renderer/config/models') vi.mocked(isReasoningModel).mockReturnValue(true) @@ -690,7 +692,9 @@ describe('reasoning utils', () => { const assistant: Assistant = { id: 'test', name: 'Test', - settings: {} + settings: { + reasoning_effort: 'none' + } } as Assistant const result = getGeminiReasoningParams(assistant, model) diff --git a/src/renderer/src/aiCore/utils/reasoning.ts b/src/renderer/src/aiCore/utils/reasoning.ts index f18240571..10afbbaf5 100644 --- a/src/renderer/src/aiCore/utils/reasoning.ts +++ b/src/renderer/src/aiCore/utils/reasoning.ts @@ -64,7 +64,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin // reasoningEffort is not set, no extra reasoning setting // Generally, for every model which supports reasoning control, the reasoning effort won't be undefined. // It's for some reasoning models that don't support reasoning control, such as deepseek reasoner. - if (!reasoningEffort) { + if (!reasoningEffort || reasoningEffort === 'default') { return {} } @@ -329,7 +329,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin // Grok models/Perplexity models/OpenAI models, use reasoning_effort if (isSupportedReasoningEffortModel(model)) { // 检查模型是否支持所选选项 - const supportedOptions = getModelSupportedReasoningEffortOptions(model) + const supportedOptions = getModelSupportedReasoningEffortOptions(model)?.filter((option) => option !== 'default') if (supportedOptions?.includes(reasoningEffort)) { return { reasoningEffort @@ -427,7 +427,7 @@ export function getOpenAIReasoningParams( let reasoningEffort = assistant?.settings?.reasoning_effort - if (!reasoningEffort) { + if (!reasoningEffort || reasoningEffort === 'default') { return {} } @@ -505,7 +505,11 @@ export function getAnthropicReasoningParams( const reasoningEffort = assistant?.settings?.reasoning_effort - if (reasoningEffort === undefined || reasoningEffort === 'none') { + if (!reasoningEffort || reasoningEffort === 'default') { + return {} + } + + if (reasoningEffort === 'none') { return { thinking: { type: 'disabled' @@ -560,6 +564,10 @@ export function getGeminiReasoningParams( const reasoningEffort = assistant?.settings?.reasoning_effort + if (!reasoningEffort || reasoningEffort === 'default') { + return {} + } + // Gemini 推理参数 if (isSupportedThinkingTokenGeminiModel(model)) { if (reasoningEffort === undefined || reasoningEffort === 'none') { @@ -620,10 +628,6 @@ export function getXAIReasoningParams(assistant: Assistant, model: Model): Pick< const { reasoning_effort: reasoningEffort } = getAssistantSettings(assistant) - if (!reasoningEffort || reasoningEffort === 'none') { - return {} - } - switch (reasoningEffort) { case 'auto': case 'minimal': @@ -634,6 +638,10 @@ export function getXAIReasoningParams(assistant: Assistant, model: Model): Pick< return { reasoningEffort } case 'xhigh': return { reasoningEffort: 'high' } + case 'default': + case 'none': + default: + return {} } } @@ -650,7 +658,7 @@ export function getBedrockReasoningParams( const reasoningEffort = assistant?.settings?.reasoning_effort - if (reasoningEffort === undefined) { + if (reasoningEffort === undefined || reasoningEffort === 'default') { return {} } diff --git a/src/renderer/src/components/Icons/SVGIcon.tsx b/src/renderer/src/components/Icons/SVGIcon.tsx index ad503f0e3..82be6b340 100644 --- a/src/renderer/src/components/Icons/SVGIcon.tsx +++ b/src/renderer/src/components/Icons/SVGIcon.tsx @@ -113,6 +113,18 @@ export function MdiLightbulbOn(props: SVGProps) { ) } +export function MdiLightbulbQuestion(props: SVGProps) { + // {/* Icon from Material Design Icons by Pictogrammers - https://github.com/Templarian/MaterialDesign/blob/master/LICENSE */} + return ( + + + + ) +} + export function BingLogo(props: SVGProps) { return ( { it('restricts GPT-5 Pro reasoning to high effort only', () => { expect(MODEL_SUPPORTED_REASONING_EFFORT.gpt5pro).toEqual(['high']) - expect(MODEL_SUPPORTED_OPTIONS.gpt5pro).toEqual(['high']) + expect(MODEL_SUPPORTED_OPTIONS.gpt5pro).toEqual(['default', 'high']) }) }) @@ -1672,10 +1672,26 @@ describe('getModelSupportedReasoningEffortOptions', () => { describe('OpenAI models', () => { it('should return correct options for o-series models', () => { - expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'o3' }))).toEqual(['low', 'medium', 'high']) - expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'o3-mini' }))).toEqual(['low', 'medium', 'high']) - expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'o4' }))).toEqual(['low', 'medium', 'high']) + expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'o3' }))).toEqual([ + 'default', + 'low', + 'medium', + 'high' + ]) + expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'o3-mini' }))).toEqual([ + 'default', + 'low', + 'medium', + 'high' + ]) + expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'o4' }))).toEqual([ + 'default', + 'low', + 'medium', + 'high' + ]) expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-oss-reasoning' }))).toEqual([ + 'default', 'low', 'medium', 'high' @@ -1685,17 +1701,22 @@ describe('getModelSupportedReasoningEffortOptions', () => { it('should return correct options for deep research models', () => { // Note: Deep research models need to be actual OpenAI reasoning models to be detected // 'sonar-deep-research' from Perplexity is the primary deep research model - expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'sonar-deep-research' }))).toEqual(['medium']) + expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'sonar-deep-research' }))).toEqual([ + 'default', + 'medium' + ]) }) it('should return correct options for GPT-5 models', () => { expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5' }))).toEqual([ + 'default', 'minimal', 'low', 'medium', 'high' ]) expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-preview' }))).toEqual([ + 'default', 'minimal', 'low', 'medium', @@ -1704,17 +1725,22 @@ describe('getModelSupportedReasoningEffortOptions', () => { }) it('should return correct options for GPT-5 Pro models', () => { - expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-pro' }))).toEqual(['high']) - expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-pro-preview' }))).toEqual(['high']) + expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-pro' }))).toEqual(['default', 'high']) + expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-pro-preview' }))).toEqual([ + 'default', + 'high' + ]) }) it('should return correct options for GPT-5 Codex models', () => { expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-codex' }))).toEqual([ + 'default', 'low', 'medium', 'high' ]) expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-codex-mini' }))).toEqual([ + 'default', 'low', 'medium', 'high' @@ -1723,18 +1749,21 @@ describe('getModelSupportedReasoningEffortOptions', () => { it('should return correct options for GPT-5.1 models', () => { expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5.1' }))).toEqual([ + 'default', 'none', 'low', 'medium', 'high' ]) expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5.1-preview' }))).toEqual([ + 'default', 'none', 'low', 'medium', 'high' ]) expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5.1-mini' }))).toEqual([ + 'default', 'none', 'low', 'medium', @@ -1744,11 +1773,13 @@ describe('getModelSupportedReasoningEffortOptions', () => { it('should return correct options for GPT-5.1 Codex models', () => { expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5.1-codex' }))).toEqual([ + 'default', 'none', 'medium', 'high' ]) expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5.1-codex-mini' }))).toEqual([ + 'default', 'none', 'medium', 'high' @@ -1758,19 +1789,24 @@ describe('getModelSupportedReasoningEffortOptions', () => { describe('Grok models', () => { it('should return correct options for Grok 3 mini', () => { - expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'grok-3-mini' }))).toEqual(['low', 'high']) + expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'grok-3-mini' }))).toEqual([ + 'default', + 'low', + 'high' + ]) }) it('should return correct options for Grok 4 Fast', () => { expect( getModelSupportedReasoningEffortOptions(createModel({ id: 'grok-4-fast', provider: 'openrouter' })) - ).toEqual(['none', 'auto']) + ).toEqual(['default', 'none', 'auto']) }) }) describe('Gemini models', () => { it('should return correct options for Gemini Flash models', () => { expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-2.5-flash-latest' }))).toEqual([ + 'default', 'none', 'low', 'medium', @@ -1778,6 +1814,7 @@ describe('getModelSupportedReasoningEffortOptions', () => { 'auto' ]) expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-flash-latest' }))).toEqual([ + 'default', 'none', 'low', 'medium', @@ -1788,12 +1825,14 @@ describe('getModelSupportedReasoningEffortOptions', () => { it('should return correct options for Gemini Pro models', () => { expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-2.5-pro-latest' }))).toEqual([ + 'default', 'low', 'medium', 'high', 'auto' ]) expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-pro-latest' }))).toEqual([ + 'default', 'low', 'medium', 'high', @@ -1803,11 +1842,13 @@ describe('getModelSupportedReasoningEffortOptions', () => { it('should return correct options for Gemini 3 models', () => { expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-3-flash' }))).toEqual([ + 'default', 'low', 'medium', 'high' ]) expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-3-pro-preview' }))).toEqual([ + 'default', 'low', 'medium', 'high' @@ -1818,24 +1859,28 @@ describe('getModelSupportedReasoningEffortOptions', () => { describe('Qwen models', () => { it('should return correct options for controllable Qwen models', () => { expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'qwen-plus' }))).toEqual([ + 'default', 'none', 'low', 'medium', 'high' ]) expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'qwen-turbo' }))).toEqual([ + 'default', 'none', 'low', 'medium', 'high' ]) expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'qwen-flash' }))).toEqual([ + 'default', 'none', 'low', 'medium', 'high' ]) expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'qwen3-8b' }))).toEqual([ + 'default', 'none', 'low', 'medium', @@ -1853,11 +1898,13 @@ describe('getModelSupportedReasoningEffortOptions', () => { describe('Doubao models', () => { it('should return correct options for auto-thinking Doubao models', () => { expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'doubao-seed-1.6' }))).toEqual([ + 'default', 'none', 'auto', 'high' ]) expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'doubao-1-5-thinking-pro-m' }))).toEqual([ + 'default', 'none', 'auto', 'high' @@ -1866,12 +1913,14 @@ describe('getModelSupportedReasoningEffortOptions', () => { it('should return correct options for Doubao models after 251015', () => { expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'doubao-seed-1-6-251015' }))).toEqual([ + 'default', 'minimal', 'low', 'medium', 'high' ]) expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'doubao-seed-1-6-lite-251015' }))).toEqual([ + 'default', 'minimal', 'low', 'medium', @@ -1881,6 +1930,7 @@ describe('getModelSupportedReasoningEffortOptions', () => { it('should return correct options for other Doubao thinking models', () => { expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'doubao-1.5-thinking-vision-pro' }))).toEqual([ + 'default', 'none', 'high' ]) @@ -1889,28 +1939,43 @@ describe('getModelSupportedReasoningEffortOptions', () => { describe('Other providers', () => { it('should return correct options for Hunyuan models', () => { - expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'hunyuan-a13b' }))).toEqual(['none', 'auto']) + expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'hunyuan-a13b' }))).toEqual([ + 'default', + 'none', + 'auto' + ]) }) it('should return correct options for Zhipu models', () => { - expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'glm-4.5' }))).toEqual(['none', 'auto']) - expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'glm-4.6' }))).toEqual(['none', 'auto']) + expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'glm-4.5' }))).toEqual([ + 'default', + 'none', + 'auto' + ]) + expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'glm-4.6' }))).toEqual([ + 'default', + 'none', + 'auto' + ]) }) it('should return correct options for Perplexity models', () => { - expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'sonar-deep-research' }))).toEqual(['medium']) + expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'sonar-deep-research' }))).toEqual([ + 'default', + 'medium' + ]) }) it('should return correct options for DeepSeek hybrid models', () => { expect( getModelSupportedReasoningEffortOptions(createModel({ id: 'deepseek-v3.1', provider: 'deepseek' })) - ).toEqual(['none', 'auto']) + ).toEqual(['default', 'none', 'auto']) expect( getModelSupportedReasoningEffortOptions(createModel({ id: 'deepseek-v3.2', provider: 'openrouter' })) - ).toEqual(['none', 'auto']) + ).toEqual(['default', 'none', 'auto']) expect( getModelSupportedReasoningEffortOptions(createModel({ id: 'deepseek-chat', provider: 'deepseek' })) - ).toEqual(['none', 'auto']) + ).toEqual(['default', 'none', 'auto']) }) }) @@ -1925,7 +1990,7 @@ describe('getModelSupportedReasoningEffortOptions', () => { provider: 'openrouter' }) ) - ).toEqual(['none', 'auto']) + ).toEqual(['default', 'none', 'auto']) expect( getModelSupportedReasoningEffortOptions( @@ -1934,7 +1999,7 @@ describe('getModelSupportedReasoningEffortOptions', () => { name: 'gpt-5.1' }) ) - ).toEqual(['none', 'low', 'medium', 'high']) + ).toEqual(['default', 'none', 'low', 'medium', 'high']) // Qwen models work well for name-based fallback expect( @@ -1944,7 +2009,7 @@ describe('getModelSupportedReasoningEffortOptions', () => { name: 'qwen-plus' }) ) - ).toEqual(['none', 'low', 'medium', 'high']) + ).toEqual(['default', 'none', 'low', 'medium', 'high']) }) it('should use id result when id matches', () => { @@ -1955,7 +2020,7 @@ describe('getModelSupportedReasoningEffortOptions', () => { name: 'Different Name' }) ) - ).toEqual(['none', 'low', 'medium', 'high']) + ).toEqual(['default', 'none', 'low', 'medium', 'high']) expect( getModelSupportedReasoningEffortOptions( @@ -1964,20 +2029,27 @@ describe('getModelSupportedReasoningEffortOptions', () => { name: 'Some other name' }) ) - ).toEqual(['low', 'medium', 'high']) + ).toEqual(['default', 'low', 'medium', 'high']) }) }) describe('Case sensitivity', () => { it('should handle case insensitive model IDs', () => { expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'GPT-5.1' }))).toEqual([ + 'default', 'none', 'low', 'medium', 'high' ]) - expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'O3-MINI' }))).toEqual(['low', 'medium', 'high']) + expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'O3-MINI' }))).toEqual([ + 'default', + 'low', + 'medium', + 'high' + ]) expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'Gemini-2.5-Flash-Latest' }))).toEqual([ + 'default', 'none', 'low', 'medium', diff --git a/src/renderer/src/config/models/reasoning.ts b/src/renderer/src/config/models/reasoning.ts index a5e47ef3b..4b0e293f4 100644 --- a/src/renderer/src/config/models/reasoning.ts +++ b/src/renderer/src/config/models/reasoning.ts @@ -59,31 +59,31 @@ export const MODEL_SUPPORTED_REASONING_EFFORT = { // 模型类型到支持选项的映射表 export const MODEL_SUPPORTED_OPTIONS: ThinkingOptionConfig = { - default: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.default] as const, - o: MODEL_SUPPORTED_REASONING_EFFORT.o, - openai_deep_research: MODEL_SUPPORTED_REASONING_EFFORT.openai_deep_research, - gpt5: [...MODEL_SUPPORTED_REASONING_EFFORT.gpt5] as const, - gpt5pro: MODEL_SUPPORTED_REASONING_EFFORT.gpt5pro, - gpt5_codex: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_codex, - gpt5_1: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1, - gpt5_1_codex: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1_codex, - gpt5_2: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_2, - gpt5_1_codex_max: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1_codex_max, - gpt52pro: MODEL_SUPPORTED_REASONING_EFFORT.gpt52pro, - grok: MODEL_SUPPORTED_REASONING_EFFORT.grok, - grok4_fast: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.grok4_fast] as const, - gemini: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini] as const, - gemini_pro: MODEL_SUPPORTED_REASONING_EFFORT.gemini_pro, - gemini3: MODEL_SUPPORTED_REASONING_EFFORT.gemini3, - qwen: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.qwen] as const, - qwen_thinking: MODEL_SUPPORTED_REASONING_EFFORT.qwen_thinking, - doubao: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao] as const, - doubao_no_auto: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao_no_auto] as const, - doubao_after_251015: MODEL_SUPPORTED_REASONING_EFFORT.doubao_after_251015, - hunyuan: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.hunyuan] as const, - zhipu: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.zhipu] as const, - perplexity: MODEL_SUPPORTED_REASONING_EFFORT.perplexity, - deepseek_hybrid: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.deepseek_hybrid] as const + default: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.default] as const, + o: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.o] as const, + openai_deep_research: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.openai_deep_research] as const, + gpt5: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5] as const, + gpt5pro: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5pro] as const, + gpt5_codex: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5_codex] as const, + gpt5_1: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1] as const, + gpt5_1_codex: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1_codex] as const, + gpt5_2: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5_2] as const, + gpt5_1_codex_max: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1_codex_max] as const, + gpt52pro: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt52pro] as const, + grok: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.grok] as const, + grok4_fast: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.grok4_fast] as const, + gemini: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini] as const, + gemini_pro: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini_pro] as const, + gemini3: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini3] as const, + qwen: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.qwen] as const, + qwen_thinking: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.qwen_thinking] as const, + doubao: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao] as const, + doubao_no_auto: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao_no_auto] as const, + doubao_after_251015: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao_after_251015] as const, + hunyuan: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.hunyuan] as const, + zhipu: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.zhipu] as const, + perplexity: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.perplexity] as const, + deepseek_hybrid: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.deepseek_hybrid] as const } as const const withModelIdAndNameAsId = (model: Model, fn: (model: Model) => T): { idResult: T; nameResult: T } => { @@ -191,20 +191,28 @@ const _getModelSupportedReasoningEffortOptions = (model: Model): ReasoningEffort * - The model is null/undefined * - The model doesn't support reasoning effort or thinking tokens * + * All reasoning models support the 'default' option (always the first element), + * which represents no additional configuration for thinking behavior. + * * @example - * // OpenAI o-series models support low, medium, high + * // OpenAI o-series models support default, low, medium, high * getModelSupportedReasoningEffortOptions({ id: 'o3-mini', ... }) - * // Returns: ['low', 'medium', 'high'] + * // Returns: ['default', 'low', 'medium', 'high'] + * // 'default' = no additional configuration for thinking behavior * * @example - * // GPT-5.1 models support none, low, medium, high + * // GPT-5.1 models support default, none, low, medium, high * getModelSupportedReasoningEffortOptions({ id: 'gpt-5.1', ... }) - * // Returns: ['none', 'low', 'medium', 'high'] + * // Returns: ['default', 'none', 'low', 'medium', 'high'] + * // 'default' = no additional configuration + * // 'none' = explicitly disable reasoning * * @example - * // Gemini Flash models support none, low, medium, high, auto + * // Gemini Flash models support default, none, low, medium, high, auto * getModelSupportedReasoningEffortOptions({ id: 'gemini-2.5-flash-latest', ... }) - * // Returns: ['none', 'low', 'medium', 'high', 'auto'] + * // Returns: ['default', 'none', 'low', 'medium', 'high', 'auto'] + * // 'default' = no additional configuration + * // 'auto' = let the model automatically decide * * @example * // Non-reasoning models return undefined @@ -214,7 +222,7 @@ const _getModelSupportedReasoningEffortOptions = (model: Model): ReasoningEffort * @example * // Name fallback when id doesn't match * getModelSupportedReasoningEffortOptions({ id: 'custom-id', name: 'gpt-5.1', ... }) - * // Returns: ['none', 'low', 'medium', 'high'] + * // Returns: ['default', 'none', 'low', 'medium', 'high'] */ export const getModelSupportedReasoningEffortOptions = ( model: Model | undefined | null diff --git a/src/renderer/src/i18n/label.ts b/src/renderer/src/i18n/label.ts index 283026708..8e2600a68 100644 --- a/src/renderer/src/i18n/label.ts +++ b/src/renderer/src/i18n/label.ts @@ -5,7 +5,7 @@ */ import { loggerService } from '@logger' -import type { AgentType, BuiltinMCPServerName, BuiltinOcrProviderId, ThinkingOption } from '@renderer/types' +import type { AgentType, BuiltinMCPServerName, BuiltinOcrProviderId } from '@renderer/types' import { BuiltinMCPServerNames } from '@renderer/types' import i18n from './index' @@ -310,20 +310,6 @@ export const getHttpMessageLabel = (key: string): string => { return getLabel(httpMessageKeyMap, key) } -const reasoningEffortOptionsKeyMap: Record = { - none: 'assistants.settings.reasoning_effort.off', - minimal: 'assistants.settings.reasoning_effort.minimal', - high: 'assistants.settings.reasoning_effort.high', - low: 'assistants.settings.reasoning_effort.low', - medium: 'assistants.settings.reasoning_effort.medium', - auto: 'assistants.settings.reasoning_effort.default', - xhigh: 'assistants.settings.reasoning_effort.xhigh' -} as const - -export const getReasoningEffortOptionsLabel = (key: string): string => { - return getLabel(reasoningEffortOptionsKeyMap, key) -} - const fileFieldKeyMap = { created_at: 'files.created_at', size: 'files.size', diff --git a/src/renderer/src/i18n/locales/en-us.json b/src/renderer/src/i18n/locales/en-us.json index 7085f523a..0085cef49 100644 --- a/src/renderer/src/i18n/locales/en-us.json +++ b/src/renderer/src/i18n/locales/en-us.json @@ -548,14 +548,23 @@ "more": "Assistant Settings", "prompt": "Prompt Settings", "reasoning_effort": { + "auto": "Auto", + "auto_description": "Flexibly determine reasoning effort", "default": "Default", + "default_description": "Depend on the model's default behavior, without any configuration.", "high": "High", + "high_description": "High level reasoning", "label": "Reasoning effort", "low": "Low", + "low_description": "Low level reasoning", "medium": "Medium", + "medium_description": "Medium level reasoning", "minimal": "Minimal", + "minimal_description": "Minimal reasoning", "off": "Off", - "xhigh": "Extra High" + "off_description": "Disable reasoning", + "xhigh": "Extra High", + "xhigh_description": "Extra high level reasoning" }, "regular_phrases": { "add": "Add Phrase", diff --git a/src/renderer/src/i18n/locales/zh-cn.json b/src/renderer/src/i18n/locales/zh-cn.json index 3abfe795d..2ad73a19b 100644 --- a/src/renderer/src/i18n/locales/zh-cn.json +++ b/src/renderer/src/i18n/locales/zh-cn.json @@ -548,14 +548,23 @@ "more": "助手设置", "prompt": "提示词设置", "reasoning_effort": { + "auto": "自动", + "auto_description": "灵活决定推理力度", "default": "默认", + "default_description": "依赖模型默认行为,不作任何配置", "high": "沉思", + "high_description": "高强度推理", "label": "思维链长度", "low": "浮想", + "low_description": "低强度推理", "medium": "斟酌", + "medium_description": "中强度推理", "minimal": "微念", + "minimal_description": "最小程度的思考", "off": "关闭", - "xhigh": "穷究" + "off_description": "禁用推理", + "xhigh": "穷究", + "xhigh_description": "超高强度推理" }, "regular_phrases": { "add": "添加短语", diff --git a/src/renderer/src/i18n/locales/zh-tw.json b/src/renderer/src/i18n/locales/zh-tw.json index cbff7ccd7..67b56e987 100644 --- a/src/renderer/src/i18n/locales/zh-tw.json +++ b/src/renderer/src/i18n/locales/zh-tw.json @@ -548,14 +548,23 @@ "more": "助手設定", "prompt": "提示詞設定", "reasoning_effort": { + "auto": "自動", + "auto_description": "彈性決定推理投入的心力", "default": "預設", + "default_description": "依賴模型的預設行為,無需任何配置。", "high": "盡力思考", + "high_description": "高級推理", "label": "思維鏈長度", "low": "稍微思考", + "low_description": "低階推理", "medium": "正常思考", + "medium_description": "中等程度推理", "minimal": "最少思考", + "minimal_description": "最少推理", "off": "關閉", - "xhigh": "極力思考" + "off_description": "禁用推理", + "xhigh": "極力思考", + "xhigh_description": "超高階推理" }, "regular_phrases": { "add": "新增短語", diff --git a/src/renderer/src/i18n/translate/de-de.json b/src/renderer/src/i18n/translate/de-de.json index d96cbf3de..3fcd84954 100644 --- a/src/renderer/src/i18n/translate/de-de.json +++ b/src/renderer/src/i18n/translate/de-de.json @@ -548,14 +548,23 @@ "more": "Assistenteneinstellungen", "prompt": "Prompt-Einstellungen", "reasoning_effort": { + "auto": "Auto", + "auto_description": "Denkaufwand flexibel bestimmen", "default": "Standard", + "default_description": "Vom Standardverhalten des Modells abhängen, ohne Konfiguration.", "high": "Tiefes Nachdenken", + "high_description": "Ganzheitliches Denken", "label": "Gedankenkettenlänge", "low": "Spontan", + "low_description": "Geringfügige Argumentation", "medium": "Überlegt", + "medium_description": "Denken auf mittlerem Niveau", "minimal": "Minimal", + "minimal_description": "Minimales Denken", "off": "Aus", - "xhigh": "Extra hoch" + "off_description": "Denken deaktivieren", + "xhigh": "Extra hoch", + "xhigh_description": "Extra hohes Denkvermögen" }, "regular_phrases": { "add": "Phrase hinzufügen", diff --git a/src/renderer/src/i18n/translate/el-gr.json b/src/renderer/src/i18n/translate/el-gr.json index 55bdab9b3..aed40bc2d 100644 --- a/src/renderer/src/i18n/translate/el-gr.json +++ b/src/renderer/src/i18n/translate/el-gr.json @@ -548,14 +548,23 @@ "more": "Ρυθμίσεις Βοηθού", "prompt": "Ρυθμίσεις προκαλύμματος", "reasoning_effort": { + "auto": "Αυτοκίνητο", + "auto_description": "Ευέλικτος καθορισμός της προσπάθειας συλλογισμού", "default": "Προεπιλογή", + "default_description": "Εξαρτηθείτε από την προεπιλεγμένη συμπεριφορά του μοντέλου, χωρίς καμία διαμόρφωση.", "high": "Μεγάλο", + "high_description": "Υψηλού επιπέδου συλλογισμός", "label": "Μήκος λογισμικού αλυσίδας", "low": "Μικρό", + "low_description": "Χαμηλού επιπέδου συλλογιστική", "medium": "Μεσαίο", + "medium_description": "Αιτιολόγηση μεσαίου επιπέδου", "minimal": "ελάχιστος", + "minimal_description": "Ελάχιστος συλλογισμός", "off": "Απενεργοποίηση", - "xhigh": "Εξαιρετικά Υψηλή" + "off_description": "Απενεργοποίηση λογικής", + "xhigh": "Εξαιρετικά Υψηλή", + "xhigh_description": "Εξαιρετικά υψηλού επιπέδου συλλογισμός" }, "regular_phrases": { "add": "Προσθήκη φράσης", diff --git a/src/renderer/src/i18n/translate/es-es.json b/src/renderer/src/i18n/translate/es-es.json index b87262310..b0f584b3b 100644 --- a/src/renderer/src/i18n/translate/es-es.json +++ b/src/renderer/src/i18n/translate/es-es.json @@ -548,14 +548,23 @@ "more": "Configuración del Asistente", "prompt": "Configuración de Palabras Clave", "reasoning_effort": { + "auto": "Automóvil", + "auto_description": "Determinar flexiblemente el esfuerzo de razonamiento", "default": "Por defecto", + "default_description": "Depender del comportamiento predeterminado del modelo, sin ninguna configuración.", "high": "Largo", + "high_description": "Razonamiento de alto nivel", "label": "Longitud de Cadena de Razonamiento", "low": "Corto", + "low_description": "Razonamiento de bajo nivel", "medium": "Medio", + "medium_description": "Razonamiento de nivel medio", "minimal": "minimal", + "minimal_description": "Razonamiento mínimo", "off": "Apagado", - "xhigh": "Extra Alta" + "off_description": "Deshabilitar razonamiento", + "xhigh": "Extra Alta", + "xhigh_description": "Razonamiento de extra alto nivel" }, "regular_phrases": { "add": "Agregar frase", diff --git a/src/renderer/src/i18n/translate/fr-fr.json b/src/renderer/src/i18n/translate/fr-fr.json index 6b64e0041..eea3ddb1c 100644 --- a/src/renderer/src/i18n/translate/fr-fr.json +++ b/src/renderer/src/i18n/translate/fr-fr.json @@ -548,14 +548,23 @@ "more": "Paramètres de l'assistant", "prompt": "Paramètres de l'invite", "reasoning_effort": { + "auto": "Auto", + "auto_description": "Déterminer de manière flexible l'effort de raisonnement", "default": "Par défaut", + "default_description": "Dépendre du comportement par défaut du modèle, sans aucune configuration.", "high": "Long", + "high_description": "Raisonnement de haut niveau", "label": "Longueur de la chaîne de raisonnement", "low": "Court", + "low_description": "Raisonnement de bas niveau", "medium": "Moyen", + "medium_description": "Raisonnement de niveau moyen", "minimal": "minimal", + "minimal_description": "Réflexion minimale", "off": "Off", - "xhigh": "Très élevée" + "off_description": "Désactiver le raisonnement", + "xhigh": "Très élevée", + "xhigh_description": "Raisonnement de très haut niveau" }, "regular_phrases": { "add": "Добавить фразу", diff --git a/src/renderer/src/i18n/translate/ja-jp.json b/src/renderer/src/i18n/translate/ja-jp.json index 021ecf3eb..ec72e3a3a 100644 --- a/src/renderer/src/i18n/translate/ja-jp.json +++ b/src/renderer/src/i18n/translate/ja-jp.json @@ -548,14 +548,23 @@ "more": "アシスタント設定", "prompt": "プロンプト設定", "reasoning_effort": { + "auto": "自動", + "auto_description": "推論にかける労力を柔軟に調整する", "default": "デフォルト", + "default_description": "設定なしで、モデルの既定の動作に依存する。", "high": "最大限の思考", + "high_description": "高度な推論", "label": "思考連鎖の長さ", "low": "少しの思考", + "low_description": "低レベル推論", "medium": "普通の思考", + "medium_description": "中レベル推論", "minimal": "最小限の思考", + "minimal_description": "最小限の推論", "off": "オフ", - "xhigh": "超高" + "off_description": "推論を無効にする", + "xhigh": "超高", + "xhigh_description": "超高度な推論" }, "regular_phrases": { "add": "プロンプトを追加", diff --git a/src/renderer/src/i18n/translate/pt-pt.json b/src/renderer/src/i18n/translate/pt-pt.json index f02caecb3..5c3d2d191 100644 --- a/src/renderer/src/i18n/translate/pt-pt.json +++ b/src/renderer/src/i18n/translate/pt-pt.json @@ -548,14 +548,23 @@ "more": "Configurações do Assistente", "prompt": "Configurações de Prompt", "reasoning_effort": { + "auto": "Automóvel", + "auto_description": "Determinar flexivelmente o esforço de raciocínio", "default": "Padrão", + "default_description": "Depender do comportamento padrão do modelo, sem qualquer configuração.", "high": "Longo", + "high_description": "Raciocínio de alto nível", "label": "Comprimento da Cadeia de Raciocínio", "low": "Curto", + "low_description": "Raciocínio de baixo nível", "medium": "Médio", + "medium_description": "Raciocínio de nível médio", "minimal": "mínimo", + "minimal_description": "Raciocínio mínimo", "off": "Desligado", - "xhigh": "Extra Alta" + "off_description": "Desabilitar raciocínio", + "xhigh": "Extra Alta", + "xhigh_description": "Raciocínio de altíssimo nível" }, "regular_phrases": { "add": "Adicionar Frase", diff --git a/src/renderer/src/i18n/translate/ru-ru.json b/src/renderer/src/i18n/translate/ru-ru.json index 9a3c0bfe6..cba8a1e5e 100644 --- a/src/renderer/src/i18n/translate/ru-ru.json +++ b/src/renderer/src/i18n/translate/ru-ru.json @@ -548,14 +548,23 @@ "more": "Настройки ассистента", "prompt": "Настройки промптов", "reasoning_effort": { + "auto": "Авто", + "auto_description": "Гибко определяйте усилие на рассуждение", "default": "По умолчанию", + "default_description": "Полагаться на поведение модели по умолчанию, без какой-либо конфигурации.", "high": "Стараюсь думать", + "high_description": "Высокоуровневое рассуждение", "label": "Настройки размышлений", "low": "Меньше думать", + "low_description": "Низкоуровневое рассуждение", "medium": "Среднее", + "medium_description": "Средний уровень рассуждения", "minimal": "минимальный", + "minimal_description": "Минимальное рассуждение", "off": "Выключить", - "xhigh": "Сверхвысокое" + "off_description": "Отключить рассуждение", + "xhigh": "Сверхвысокое", + "xhigh_description": "Высочайший уровень рассуждений" }, "regular_phrases": { "add": "Добавить подсказку", diff --git a/src/renderer/src/pages/home/Inputbar/tools/components/ThinkingButton.tsx b/src/renderer/src/pages/home/Inputbar/tools/components/ThinkingButton.tsx index bcc395c53..10b4656bc 100644 --- a/src/renderer/src/pages/home/Inputbar/tools/components/ThinkingButton.tsx +++ b/src/renderer/src/pages/home/Inputbar/tools/components/ThinkingButton.tsx @@ -6,7 +6,8 @@ import { MdiLightbulbOn30, MdiLightbulbOn50, MdiLightbulbOn80, - MdiLightbulbOn90 + MdiLightbulbOn90, + MdiLightbulbQuestion } from '@renderer/components/Icons/SVGIcon' import { QuickPanelReservedSymbol, useQuickPanel } from '@renderer/components/QuickPanel' import { @@ -18,7 +19,6 @@ import { MODEL_SUPPORTED_OPTIONS } from '@renderer/config/models' import { useAssistant } from '@renderer/hooks/useAssistant' -import { getReasoningEffortOptionsLabel } from '@renderer/i18n/label' import type { ToolQuickPanelApi } from '@renderer/pages/home/Inputbar/types' import type { Model, ThinkingOption } from '@renderer/types' import { Tooltip } from 'antd' @@ -88,19 +88,48 @@ const ThinkingButton: FC = ({ quickPanel, model, assistantId }): ReactEle [updateAssistantSettings, assistant.enableWebSearch, model, t] ) + const reasoningEffortOptionLabelMap = { + default: t('assistants.settings.reasoning_effort.default'), + none: t('assistants.settings.reasoning_effort.off'), + minimal: t('assistants.settings.reasoning_effort.minimal'), + high: t('assistants.settings.reasoning_effort.high'), + low: t('assistants.settings.reasoning_effort.low'), + medium: t('assistants.settings.reasoning_effort.medium'), + auto: t('assistants.settings.reasoning_effort.auto'), + xhigh: t('assistants.settings.reasoning_effort.xhigh') + } as const satisfies Record + + const reasoningEffortDescriptionMap = { + default: t('assistants.settings.reasoning_effort.default_description'), + none: t('assistants.settings.reasoning_effort.off_description'), + minimal: t('assistants.settings.reasoning_effort.minimal_description'), + low: t('assistants.settings.reasoning_effort.low_description'), + medium: t('assistants.settings.reasoning_effort.medium_description'), + high: t('assistants.settings.reasoning_effort.high_description'), + xhigh: t('assistants.settings.reasoning_effort.xhigh_description'), + auto: t('assistants.settings.reasoning_effort.auto_description') + } as const satisfies Record + const panelItems = useMemo(() => { // 使用表中定义的选项创建UI选项 return supportedOptions.map((option) => ({ level: option, - label: getReasoningEffortOptionsLabel(option), - description: '', + label: reasoningEffortOptionLabelMap[option], + description: reasoningEffortDescriptionMap[option], icon: ThinkingIcon({ option }), isSelected: currentReasoningEffort === option, action: () => onThinkingChange(option) })) - }, [currentReasoningEffort, supportedOptions, onThinkingChange]) + }, [ + supportedOptions, + reasoningEffortOptionLabelMap, + reasoningEffortDescriptionMap, + currentReasoningEffort, + onThinkingChange + ]) - const isThinkingEnabled = currentReasoningEffort !== undefined && currentReasoningEffort !== 'none' + const isThinkingEnabled = + currentReasoningEffort !== undefined && currentReasoningEffort !== 'none' && currentReasoningEffort !== 'default' const disableThinking = useCallback(() => { onThinkingChange('none') @@ -197,8 +226,9 @@ const ThinkingIcon = (props: { option?: ThinkingOption; isFixedReasoning?: boole case 'none': IconComponent = MdiLightbulbOffOutline break + case 'default': default: - IconComponent = MdiLightbulbOffOutline + IconComponent = MdiLightbulbQuestion break } } diff --git a/src/renderer/src/services/AssistantService.ts b/src/renderer/src/services/AssistantService.ts index 233b3c19c..91a95d691 100644 --- a/src/renderer/src/services/AssistantService.ts +++ b/src/renderer/src/services/AssistantService.ts @@ -38,7 +38,8 @@ export const DEFAULT_ASSISTANT_SETTINGS = { enableTopP: false, // It would gracefully fallback to prompt if not supported by model. toolUseMode: 'function', - customParameters: [] + customParameters: [], + reasoning_effort: 'default' } as const satisfies AssistantSettings export function getDefaultAssistant(): Assistant { @@ -186,7 +187,7 @@ export const getAssistantSettings = (assistant: Assistant): AssistantSettings => streamOutput: assistant?.settings?.streamOutput ?? true, toolUseMode: assistant?.settings?.toolUseMode ?? 'function', defaultModel: assistant?.defaultModel ?? undefined, - reasoning_effort: assistant?.settings?.reasoning_effort ?? undefined, + reasoning_effort: assistant?.settings?.reasoning_effort ?? 'default', customParameters: assistant?.settings?.customParameters ?? [] } } diff --git a/src/renderer/src/store/index.ts b/src/renderer/src/store/index.ts index b29b4d108..51d70ef6d 100644 --- a/src/renderer/src/store/index.ts +++ b/src/renderer/src/store/index.ts @@ -67,7 +67,7 @@ const persistedReducer = persistReducer( { key: 'cherry-studio', storage, - version: 186, + version: 187, blacklist: ['runtime', 'messages', 'messageBlocks', 'tabs', 'toolPermissions'], migrate }, diff --git a/src/renderer/src/store/migrate.ts b/src/renderer/src/store/migrate.ts index 03510133e..f085db230 100644 --- a/src/renderer/src/store/migrate.ts +++ b/src/renderer/src/store/migrate.ts @@ -3038,6 +3038,20 @@ const migrateConfig = { logger.error('migrate 186 error', error as Error) return state } + }, + '187': (state: RootState) => { + try { + state.assistants.assistants.forEach((assistant) => { + if (assistant.settings && assistant.settings.reasoning_effort === undefined) { + assistant.settings.reasoning_effort = 'default' + } + }) + logger.info('migrate 187 success') + return state + } catch (error) { + logger.error('migrate 187 error', error as Error) + return state + } } } diff --git a/src/renderer/src/types/index.ts b/src/renderer/src/types/index.ts index 197d21779..c9dc647ac 100644 --- a/src/renderer/src/types/index.ts +++ b/src/renderer/src/types/index.ts @@ -108,7 +108,7 @@ const ThinkModelTypes = [ 'deepseek_hybrid' ] as const -export type ReasoningEffortOption = NonNullable | 'auto' +export type ReasoningEffortOption = NonNullable | 'auto' | 'default' export type ThinkingOption = ReasoningEffortOption export type ThinkingModelType = (typeof ThinkModelTypes)[number] export type ThinkingOptionConfig = Record @@ -120,6 +120,8 @@ export function isThinkModelType(type: string): type is ThinkingModelType { } export const EFFORT_RATIO: EffortRatio = { + // 'default' is not expected to be used. + default: 0, none: 0.01, minimal: 0.05, low: 0.05, @@ -140,12 +142,11 @@ export type AssistantSettings = { streamOutput: boolean defaultModel?: Model customParameters?: AssistantSettingCustomParameters[] - reasoning_effort?: ReasoningEffortOption - /** 保留上一次使用思考模型时的 reasoning effort, 在从非思考模型切换到思考模型时恢复. - * - * TODO: 目前 reasoning_effort === undefined 有两个语义,有的场景是显式关闭思考,有的场景是不传参。 - * 未来应该重构思考控制,将启用/关闭思考和思考选项分离,这样就不用依赖 cache 了。 - * + reasoning_effort: ReasoningEffortOption + /** + * Preserve the effective reasoning effort (not 'default') from the last use of a thinking model which supports thinking control, + * and restore it when switching back from a non-thinking or fixed reasoning model. + * FIXME: It should be managed by external cache service instead of being stored in the assistant */ reasoning_effort_cache?: ReasoningEffortOption qwenThinkMode?: boolean