* fix/9165

* fix: early return
This commit is contained in:
SuYao 2025-08-15 22:56:40 +08:00 committed by GitHub
parent 4a62bb6ad7
commit e0dbd2d2db
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 27 additions and 22 deletions

View File

@ -9,6 +9,7 @@ import {
isGPT5SeriesModel,
isGrokReasoningModel,
isNotSupportSystemMessageModel,
isOpenAIReasoningModel,
isQwenAlwaysThinkModel,
isQwenMTModel,
isQwenReasoningModel,
@ -146,7 +147,7 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
return {}
}
// Don't disable reasoning for models that require it
if (isGrokReasoningModel(model)) {
if (isGrokReasoningModel(model) || isOpenAIReasoningModel(model)) {
return {}
}
return { reasoning: { enabled: false, exclude: true } }
@ -524,12 +525,13 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
}
// 1. 处理系统消息
let systemMessage = { role: 'system', content: assistant.prompt || '' }
const systemMessage = { role: 'system', content: assistant.prompt || '' }
if (isSupportedReasoningEffortOpenAIModel(model)) {
systemMessage = {
role: isSupportDeveloperRoleProvider(this.provider) ? 'developer' : 'system',
content: `Formatting re-enabled${systemMessage ? '\n' + systemMessage.content : ''}`
if (isSupportDeveloperRoleProvider(this.provider)) {
systemMessage.role = 'developer'
} else {
systemMessage.role = 'system'
}
}

View File

@ -292,6 +292,7 @@ export const CLAUDE_SUPPORTED_WEBSEARCH_REGEX = new RegExp(
// 模型类型到支持的reasoning_effort的映射表
export const MODEL_SUPPORTED_REASONING_EFFORT: ReasoningEffortConfig = {
default: ['low', 'medium', 'high'] as const,
o: ['low', 'medium', 'high'] as const,
gpt5: ['minimal', 'low', 'medium', 'high'] as const,
grok: ['low', 'high'] as const,
gemini: ['low', 'medium', 'high', 'auto'] as const,
@ -307,7 +308,8 @@ export const MODEL_SUPPORTED_REASONING_EFFORT: ReasoningEffortConfig = {
// 模型类型到支持选项的映射表
export const MODEL_SUPPORTED_OPTIONS: ThinkingOptionConfig = {
default: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.default] as const,
gpt5: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5] as const,
o: MODEL_SUPPORTED_REASONING_EFFORT.o,
gpt5: [...MODEL_SUPPORTED_REASONING_EFFORT.gpt5] as const,
grok: MODEL_SUPPORTED_REASONING_EFFORT.grok,
gemini: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini] as const,
gemini_pro: MODEL_SUPPORTED_REASONING_EFFORT.gemini_pro,
@ -320,28 +322,28 @@ export const MODEL_SUPPORTED_OPTIONS: ThinkingOptionConfig = {
} as const
export const getThinkModelType = (model: Model): ThinkingModelType => {
let thinkingModelType: ThinkingModelType = 'default'
if (isGPT5SeriesModel(model)) {
return 'gpt5'
}
if (isSupportedThinkingTokenGeminiModel(model)) {
thinkingModelType = 'gpt5'
} else if (isSupportedReasoningEffortOpenAIModel(model)) {
thinkingModelType = 'o'
} else if (isSupportedThinkingTokenGeminiModel(model)) {
if (GEMINI_FLASH_MODEL_REGEX.test(model.id)) {
return 'gemini'
thinkingModelType = 'gemini'
} else {
return 'gemini_pro'
thinkingModelType = 'gemini_pro'
}
}
if (isSupportedReasoningEffortGrokModel(model)) return 'grok'
if (isSupportedThinkingTokenQwenModel(model)) {
} else if (isSupportedReasoningEffortGrokModel(model)) thinkingModelType = 'grok'
else if (isSupportedThinkingTokenQwenModel(model)) {
if (isQwenAlwaysThinkModel(model)) {
return 'qwen_thinking'
thinkingModelType = 'qwen_thinking'
}
return 'qwen'
}
if (isSupportedThinkingTokenDoubaoModel(model)) return 'doubao'
if (isSupportedThinkingTokenHunyuanModel(model)) return 'hunyuan'
if (isSupportedReasoningEffortPerplexityModel(model)) return 'perplexity'
if (isSupportedThinkingTokenZhipuModel(model)) return 'zhipu'
return 'default'
thinkingModelType = 'qwen'
} else if (isSupportedThinkingTokenDoubaoModel(model)) thinkingModelType = 'doubao'
else if (isSupportedThinkingTokenHunyuanModel(model)) thinkingModelType = 'hunyuan'
else if (isSupportedReasoningEffortPerplexityModel(model)) thinkingModelType = 'perplexity'
else if (isSupportedThinkingTokenZhipuModel(model)) thinkingModelType = 'zhipu'
return thinkingModelType
}
export function isFunctionCallingModel(model?: Model): boolean {

View File

@ -56,6 +56,7 @@ export type ReasoningEffortOption = NonNullable<OpenAI.ReasoningEffort> | 'auto'
export type ThinkingOption = ReasoningEffortOption | 'off'
export type ThinkingModelType =
| 'default'
| 'o'
| 'gpt5'
| 'grok'
| 'gemini'