fix: fallback when invalid reasoning effort (#8857)

* refactor(types): 扩展思考模型类型并优化相关类型定义

添加 ThinkingModelType 和 ThinkingOption 类型以支持更多模型
引入 ThinkingOptionConfig 和 ReasoningEffortConfig 类型配置
重构 ReasoningEffortOptions 为 ReasoningEffortOption 并更新相关引用

* feat(模型配置): 添加模型推理配置映射表并重构ThinkingButton组件

将模型支持的推理选项配置集中管理,新增MODEL_SUPPORTED_REASONING_EFFORT和MODEL_SUPPORTED_OPTIONS映射表
提取getThinkModelType方法用于统一判断模型类型,简化ThinkingButton组件逻辑

* fix(OpenAIApiClient): 添加对reasoning_effort参数的有效性检查

当模型不支持指定的reasoning_effort值时,回退到第一个支持的值

* fix: 修正判断模型类型的条件函数

* refactor(types): 使用 Record 类型替代映射类型语法

简化类型定义,提升代码可读性和一致性
This commit is contained in:
Phantom 2025-08-07 23:56:47 +08:00 committed by GitHub
parent 201fcf9f45
commit b38b2f16fc
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 112 additions and 80 deletions

View File

@ -4,6 +4,7 @@ import {
findTokenLimit,
GEMINI_FLASH_MODEL_REGEX,
getOpenAIWebSearchParams,
getThinkModelType,
isDoubaoThinkingAutoModel,
isGrokReasoningModel,
isNotSupportSystemMessageModel,
@ -20,7 +21,8 @@ import {
isSupportedThinkingTokenModel,
isSupportedThinkingTokenQwenModel,
isSupportedThinkingTokenZhipuModel,
isVisionModel
isVisionModel,
MODEL_SUPPORTED_REASONING_EFFORT
} from '@renderer/config/models'
import {
isSupportArrayContentProvider,
@ -220,8 +222,18 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
// Grok models/Perplexity models/OpenAI models
if (isSupportedReasoningEffortModel(model)) {
return {
reasoning_effort: reasoningEffort
// 检查模型是否支持所选选项
const modelType = getThinkModelType(model)
const supportedOptions = MODEL_SUPPORTED_REASONING_EFFORT[modelType]
if (supportedOptions.includes(reasoningEffort)) {
return {
reasoning_effort: reasoningEffort
}
} else {
// 如果不支持fallback到第一个支持的值
return {
reasoning_effort: supportedOptions[0]
}
}
}

View File

@ -145,7 +145,13 @@ import YiModelLogoDark from '@renderer/assets/images/models/yi_dark.png'
import YoudaoLogo from '@renderer/assets/images/providers/netease-youdao.svg'
import NomicLogo from '@renderer/assets/images/providers/nomic.png'
import { getProviderByModel } from '@renderer/services/AssistantService'
import { Model, SystemProviderId } from '@renderer/types'
import {
Model,
ReasoningEffortConfig,
SystemProviderId,
ThinkingModelType,
ThinkingOptionConfig
} from '@renderer/types'
import { getLowerBaseModelName, isUserSelectedModelType } from '@renderer/utils'
import OpenAI from 'openai'
@ -274,6 +280,56 @@ export const CLAUDE_SUPPORTED_WEBSEARCH_REGEX = new RegExp(
'i'
)
// 模型类型到支持的reasoning_effort的映射表
export const MODEL_SUPPORTED_REASONING_EFFORT: ReasoningEffortConfig = {
default: ['low', 'medium', 'high'] as const,
grok: ['low', 'high'] as const,
gemini: ['low', 'medium', 'high', 'auto'] as const,
gemini_pro: ['low', 'medium', 'high', 'auto'] as const,
qwen: ['low', 'medium', 'high'] as const,
qwen_3235ba22b_thinking: ['low', 'medium', 'high'] as const,
doubao: ['auto', 'high'] as const,
hunyuan: ['auto'] as const,
zhipu: ['auto'] as const,
perplexity: ['low', 'medium', 'high'] as const
} as const
// 模型类型到支持选项的映射表
export const MODEL_SUPPORTED_OPTIONS: ThinkingOptionConfig = {
default: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.default] as const,
grok: [...MODEL_SUPPORTED_REASONING_EFFORT.grok] as const,
gemini: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini] as const,
gemini_pro: [...MODEL_SUPPORTED_REASONING_EFFORT.gemini_pro] as const,
qwen: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.qwen] as const,
qwen_3235ba22b_thinking: [...MODEL_SUPPORTED_REASONING_EFFORT.qwen_3235ba22b_thinking] as const,
doubao: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao] as const,
hunyuan: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.hunyuan] as const,
zhipu: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.zhipu] as const,
perplexity: [...MODEL_SUPPORTED_REASONING_EFFORT.perplexity] as const
} as const
export const getThinkModelType = (model: Model): ThinkingModelType => {
if (isSupportedThinkingTokenGeminiModel(model)) {
if (GEMINI_FLASH_MODEL_REGEX.test(model.id)) {
return 'gemini'
} else {
return 'gemini_pro'
}
}
if (isSupportedReasoningEffortGrokModel(model)) return 'grok'
if (isSupportedThinkingTokenQwenModel(model)) {
if (isQwen3235BA22BThinkingModel(model)) {
return 'qwen_3235ba22b_thinking'
}
return 'qwen'
}
if (isSupportedThinkingTokenDoubaoModel(model)) return 'doubao'
if (isSupportedThinkingTokenHunyuanModel(model)) return 'hunyuan'
if (isSupportedReasoningEffortPerplexityModel(model)) return 'perplexity'
if (isSupportedThinkingTokenZhipuModel(model)) return 'zhipu'
return 'default'
}
export function isFunctionCallingModel(model?: Model): boolean {
if (!model || isEmbeddingModel(model) || isRerankModel(model)) {
return false

View File

@ -6,27 +6,14 @@ import {
MdiLightbulbOn90
} from '@renderer/components/Icons/SVGIcon'
import { useQuickPanel } from '@renderer/components/QuickPanel'
import {
GEMINI_FLASH_MODEL_REGEX,
isDoubaoThinkingAutoModel,
isQwen3235BA22BThinkingModel,
isSupportedReasoningEffortGrokModel,
isSupportedReasoningEffortPerplexityModel,
isSupportedThinkingTokenDoubaoModel,
isSupportedThinkingTokenGeminiModel,
isSupportedThinkingTokenHunyuanModel,
isSupportedThinkingTokenQwenModel,
isSupportedThinkingTokenZhipuModel
} from '@renderer/config/models'
import { getThinkModelType, isDoubaoThinkingAutoModel, MODEL_SUPPORTED_OPTIONS } from '@renderer/config/models'
import { useAssistant } from '@renderer/hooks/useAssistant'
import { getReasoningEffortOptionsLabel } from '@renderer/i18n/label'
import { Assistant, Model, ReasoningEffortOptions } from '@renderer/types'
import { Assistant, Model, ThinkingOption } from '@renderer/types'
import { Tooltip } from 'antd'
import { FC, ReactElement, useCallback, useEffect, useImperativeHandle, useMemo } from 'react'
import { useTranslation } from 'react-i18next'
type ThinkingOption = ReasoningEffortOptions | 'off'
export interface ThinkingButtonRef {
openQuickPanel: () => void
}
@ -38,20 +25,6 @@ interface Props {
ToolbarButton: any
}
// 模型类型到支持选项的映射表
const MODEL_SUPPORTED_OPTIONS: Record<string, ThinkingOption[]> = {
default: ['off', 'low', 'medium', 'high'],
grok: ['low', 'high'],
gemini: ['off', 'low', 'medium', 'high', 'auto'],
gemini_pro: ['low', 'medium', 'high', 'auto'],
qwen: ['off', 'low', 'medium', 'high'],
qwen_3235ba22b_thinking: ['low', 'medium', 'high'],
doubao: ['off', 'auto', 'high'],
hunyuan: ['off', 'auto'],
zhipu: ['off', 'auto'],
perplexity: ['low', 'medium', 'high']
}
// 选项转换映射表:当选项不支持时使用的替代选项
const OPTION_FALLBACK: Record<ThinkingOption, ThinkingOption> = {
off: 'low', // off -> low (for Gemini Pro models)
@ -66,60 +39,20 @@ const ThinkingButton: FC<Props> = ({ ref, model, assistant, ToolbarButton }): Re
const quickPanel = useQuickPanel()
const { updateAssistantSettings } = useAssistant(assistant.id)
const isGrokModel = isSupportedReasoningEffortGrokModel(model)
const isGeminiModel = isSupportedThinkingTokenGeminiModel(model)
const isGeminiFlashModel = GEMINI_FLASH_MODEL_REGEX.test(model.id)
const isQwenModel = isSupportedThinkingTokenQwenModel(model)
const isQwen3235BA22BThinking = isQwen3235BA22BThinkingModel(model)
const isDoubaoModel = isSupportedThinkingTokenDoubaoModel(model)
const isHunyuanModel = isSupportedThinkingTokenHunyuanModel(model)
const isPerplexityModel = isSupportedReasoningEffortPerplexityModel(model)
const isZhipuModel = isSupportedThinkingTokenZhipuModel(model)
const currentReasoningEffort = useMemo(() => {
return assistant.settings?.reasoning_effort || 'off'
}, [assistant.settings?.reasoning_effort])
// 确定当前模型支持的选项类型
const modelType = useMemo(() => {
if (isGeminiModel) {
if (isGeminiFlashModel) {
return 'gemini'
} else {
return 'gemini_pro'
}
}
if (isGrokModel) return 'grok'
if (isQwenModel) {
if (isQwen3235BA22BThinking) {
return 'qwen_3235ba22b_thinking'
}
return 'qwen'
}
if (isDoubaoModel) return 'doubao'
if (isHunyuanModel) return 'hunyuan'
if (isPerplexityModel) return 'perplexity'
if (isZhipuModel) return 'zhipu'
return 'default'
}, [
isGeminiModel,
isGrokModel,
isQwenModel,
isDoubaoModel,
isGeminiFlashModel,
isHunyuanModel,
isPerplexityModel,
isQwen3235BA22BThinking,
isZhipuModel
])
const modelType = useMemo(() => getThinkModelType(model), [model])
// 获取当前模型支持的选项
const supportedOptions = useMemo(() => {
const supportedOptions: ThinkingOption[] = useMemo(() => {
if (modelType === 'doubao') {
if (isDoubaoThinkingAutoModel(model)) {
return ['off', 'auto', 'high'] as ThinkingOption[]
return ['off', 'auto', 'high']
}
return ['off', 'high'] as ThinkingOption[]
return ['off', 'high']
}
return MODEL_SUPPORTED_OPTIONS[modelType]
}, [model, modelType])

View File

@ -52,8 +52,39 @@ export type AssistantSettingCustomParameters = {
type: 'string' | 'number' | 'boolean' | 'json'
}
export type ReasoningEffortOptions = 'low' | 'medium' | 'high' | 'auto'
export type EffortRatio = Record<ReasoningEffortOptions, number>
export type ReasoningEffortOption = 'low' | 'medium' | 'high' | 'auto'
export type ThinkingOption = ReasoningEffortOption | 'off'
export type ThinkingModelType =
| 'default'
| 'grok'
| 'gemini'
| 'gemini_pro'
| 'qwen'
| 'qwen_3235ba22b_thinking'
| 'doubao'
| 'hunyuan'
| 'zhipu'
| 'perplexity'
export type ThinkingOptionConfig = Record<ThinkingModelType, ThinkingOption[]>
export type ReasoningEffortConfig = Record<ThinkingModelType, ReasoningEffortOption[]>
export type EffortRatio = Record<ReasoningEffortOption, number>
const ThinkModelTypes: ThinkingModelType[] = [
'default',
'grok',
'gemini',
'gemini_pro',
'qwen',
'qwen_3235ba22b_thinking',
'doubao',
'hunyuan',
'zhipu',
'perplexity'
] as const
export function isThinkModelType(type: string): type is ThinkingModelType {
return ThinkModelTypes.includes(type as ThinkingModelType)
}
export const EFFORT_RATIO: EffortRatio = {
low: 0.05,
@ -73,7 +104,7 @@ export type AssistantSettings = {
streamOutput: boolean
defaultModel?: Model
customParameters?: AssistantSettingCustomParameters[]
reasoning_effort?: ReasoningEffortOptions
reasoning_effort?: ReasoningEffortOption
qwenThinkMode?: boolean
toolUseMode: 'function' | 'prompt'
}