fix(Qwen3): Add Qwen3 Model Thinking Mode Switch in Thinking Button(#5781)

* feat(Qwen3): Add Qwen3 Model Thinking Mode Switch

- Add a thinking mode switch for the Qwen3 model on the settings page,
- Enabled: Generates thinking content. Disabled: Does not generate thinking content.

This feature is implemented by adding new settings items and corresponding logic.

* docs(i18n): Add multilingual translations for Qwen3 model's thinking mode

* refactor(Qwen3): Remove Qwen thinking mode related code from SettingTab

- Remove Qwen thinking mode related state and logic from the SettingsTab component
- Integarte Qwen 3 thinking mode switch login the ThinkingButton component to simplify the code and improve maintainability

* refactor(OpenAICompatibleProvider): Extract qwen3 handling logic to ModelMessageService

Move the postsuffix handling logic in OpenAICompatibleProvider to ModelMessageService to improve code maintainability and reusability

* docs(i18n): Remove Qwen3 model-related translations

Remove the translation content of the unused Qwen3 model's thinking mode and its prompts
This commit is contained in:
jwcrystal 2025-05-09 22:21:04 +08:00 committed by GitHub
parent 3a36da1bf9
commit ac0651a9f3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 93 additions and 8 deletions

View File

@ -6,7 +6,11 @@ import {
MdiLightbulbOn90
} from '@renderer/components/Icons/SVGIcon'
import { useQuickPanel } from '@renderer/components/QuickPanel'
import { isSupportedReasoningEffortGrokModel, isSupportedThinkingTokenGeminiModel } from '@renderer/config/models'
import {
isSupportedReasoningEffortGrokModel,
isSupportedThinkingTokenGeminiModel,
isSupportedThinkingTokenQwenModel
} from '@renderer/config/models'
import { useAssistant } from '@renderer/hooks/useAssistant'
import { Assistant, Model, ReasoningEffortOptions } from '@renderer/types'
import { Tooltip } from 'antd'
@ -30,7 +34,8 @@ interface Props {
const MODEL_SUPPORTED_OPTIONS: Record<string, ThinkingOption[]> = {
default: ['off', 'low', 'medium', 'high'],
grok: ['off', 'low', 'high'],
gemini: ['off', 'low', 'medium', 'high', 'auto']
gemini: ['off', 'low', 'medium', 'high', 'auto'],
qwen: ['off', 'low', 'medium', 'high', 'auto']
}
// 选项转换映射表:当选项不支持时使用的替代选项
@ -49,6 +54,7 @@ const ThinkingButton: FC<Props> = ({ ref, model, assistant, ToolbarButton }): Re
const isGrokModel = isSupportedReasoningEffortGrokModel(model)
const isGeminiModel = isSupportedThinkingTokenGeminiModel(model)
const isQwenModel = isSupportedThinkingTokenQwenModel(model)
const currentReasoningEffort = useMemo(() => {
return assistant.settings?.reasoning_effort || 'off'
@ -58,8 +64,9 @@ const ThinkingButton: FC<Props> = ({ ref, model, assistant, ToolbarButton }): Re
const modelType = useMemo(() => {
if (isGeminiModel) return 'gemini'
if (isGrokModel) return 'grok'
if (isQwenModel) return 'qwen'
return 'default'
}, [isGeminiModel, isGrokModel])
}, [isGeminiModel, isGrokModel, isQwenModel])
// 获取当前模型支持的选项
const supportedOptions = useMemo(() => {
@ -73,7 +80,8 @@ const ThinkingButton: FC<Props> = ({ ref, model, assistant, ToolbarButton }): Re
const fallbackOption = OPTION_FALLBACK[currentReasoningEffort as ThinkingOption]
updateAssistantSettings({
reasoning_effort: fallbackOption === 'off' ? undefined : fallbackOption
reasoning_effort: fallbackOption === 'off' ? undefined : fallbackOption,
qwenThinkMode: fallbackOption === 'off'
})
}
}, [currentReasoningEffort, supportedOptions, updateAssistantSettings, model.id])
@ -103,12 +111,14 @@ const ThinkingButton: FC<Props> = ({ ref, model, assistant, ToolbarButton }): Re
// 然后更新设置
if (!isEnabled) {
updateAssistantSettings({
reasoning_effort: undefined
reasoning_effort: undefined,
qwenThinkMode: false
})
return
}
updateAssistantSettings({
reasoning_effort: option
reasoning_effort: option,
qwenThinkMode: true
})
return
},

View File

@ -25,7 +25,7 @@ import {
filterEmptyMessages,
filterUserRoleStartMessages
} from '@renderer/services/MessagesService'
import { processReqMessages } from '@renderer/services/ModelMessageService'
import { processPostsuffixQwen3Model, processReqMessages } from '@renderer/services/ModelMessageService'
import store from '@renderer/store'
import {
Assistant,
@ -401,6 +401,20 @@ export default class OpenAICompatibleProvider extends BaseOpenAiProvider {
const { signal } = abortController
await this.checkIsCopilot()
const lastUserMsg = userMessages.findLast((m) => m.role === 'user')
if (lastUserMsg) {
const postsuffix = '/no_think'
// qwenThinkMode === true 表示思考模式啓用,此時不應添加 /no_think如果存在則移除
const qwenThinkModeEnabled = assistant.settings?.qwenThinkMode === true
const currentContent = lastUserMsg.content // content 類型string | ChatCompletionContentPart[] | null
lastUserMsg.content = processPostsuffixQwen3Model(
currentContent,
postsuffix,
qwenThinkModeEnabled
) as ChatCompletionContentPart[]
}
//当 systemMessage 内容为空时不发送 systemMessage
let reqMessages: ChatCompletionMessageParam[]
if (!systemMessage.content) {

View File

@ -1,5 +1,5 @@
import { Model } from '@renderer/types'
import { ChatCompletionMessageParam } from 'openai/resources'
import { ChatCompletionContentPart, ChatCompletionContentPartText, ChatCompletionMessageParam } from 'openai/resources'
export function processReqMessages(
model: Model,
@ -40,3 +40,63 @@ function interleaveUserAndAssistantMessages(messages: ChatCompletionMessageParam
return processedMessages
}
// Process postsuffix for Qwen3 model
export function processPostsuffixQwen3Model(
// content 類型string | ChatCompletionContentPart[] | null
content: string | ChatCompletionContentPart[] | null,
postsuffix: string,
qwenThinkModeEnabled: boolean
): string | ChatCompletionContentPart[] | null {
if (typeof content === 'string') {
if (qwenThinkModeEnabled) {
// 思考模式启用,移除 postsuffix
if (content.endsWith(postsuffix)) {
return content.substring(0, content.length - postsuffix.length).trimEnd()
}
} else {
// 思考模式未启用,添加 postsuffix
if (!content.endsWith(postsuffix)) {
return content + postsuffix
}
}
} else if (Array.isArray(content)) {
let lastTextPartIndex = -1
for (let i = content.length - 1; i >= 0; i--) {
if (content[i].type === 'text') {
lastTextPartIndex = i
break
}
}
if (lastTextPartIndex !== -1) {
const textPart = content[lastTextPartIndex] as ChatCompletionContentPartText
if (qwenThinkModeEnabled) {
// 思考模式启用,移除 postsuffix
if (textPart.text.endsWith(postsuffix)) {
textPart.text = textPart.text.substring(0, textPart.text.length - postsuffix.length).trimEnd()
// 可選:如果 textPart.text 變為空,可以考慮是否移除該 part
}
} else {
// 思考模式未启用,添加 postsuffix
if (!textPart.text.endsWith(postsuffix)) {
textPart.text += postsuffix
}
}
} else {
// 數組中沒有文本部分
if (!qwenThinkModeEnabled) {
// 思考模式未啓用,需要添加 postsuffix
// 如果沒有文本部分,則添加一個新的文本部分
content.push({ type: 'text', text: postsuffix })
}
}
} else {
// currentContent 是 null
if (!qwenThinkModeEnabled) {
// 思考模式未启用,需要添加 postsuffix
return content
}
}
return content
}

View File

@ -60,6 +60,7 @@ export type AssistantSettings = {
defaultModel?: Model
customParameters?: AssistantSettingCustomParameters[]
reasoning_effort?: ReasoningEffortOptions
qwenThinkMode?: boolean
}
export type Agent = Omit<Assistant, 'model'> & {