fix: deep research model only support medium search context and reasoning effort (#10676)

Co-authored-by: ABucket <abucket@github.com>
(cherry picked from commit 6f63eefa86)
This commit is contained in:
ABucket 2025-10-22 21:59:00 +08:00 committed by dev
parent 1da1f10462
commit 3f4d34f6ae
7 changed files with 54 additions and 7 deletions

View File

@ -12,6 +12,7 @@ import {
isGPT5SeriesModel,
isGrokReasoningModel,
isNotSupportSystemMessageModel,
isOpenAIDeepResearchModel,
isOpenAIOpenWeightModel,
isOpenAIReasoningModel,
isQwenAlwaysThinkModel,
@ -125,6 +126,12 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
return {}
}
if (isOpenAIDeepResearchModel(model)) {
return {
reasoning_effort: 'medium'
}
}
const reasoningEffort = assistant?.settings?.reasoning_effort
if (isSupportedThinkingTokenZhipuModel(model)) {

View File

@ -127,7 +127,7 @@ export async function buildStreamTextParams(
let webSearchPluginConfig: WebSearchPluginConfig | undefined = undefined
if (enableWebSearch) {
if (isBaseProvider(aiSdkProviderId)) {
webSearchPluginConfig = buildProviderBuiltinWebSearchConfig(aiSdkProviderId, webSearchConfig)
webSearchPluginConfig = buildProviderBuiltinWebSearchConfig(aiSdkProviderId, webSearchConfig, model)
}
if (!tools) {
tools = {}

View File

@ -9,6 +9,7 @@ import {
isDoubaoThinkingAutoModel,
isGrok4FastReasoningModel,
isGrokReasoningModel,
isOpenAIDeepResearchModel,
isOpenAIReasoningModel,
isQwenAlwaysThinkModel,
isQwenReasoningModel,
@ -44,6 +45,12 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
if (!isReasoningModel(model)) {
return {}
}
if (isOpenAIDeepResearchModel(model)) {
return {
reasoning_effort: 'medium'
}
}
const reasoningEffort = assistant?.settings?.reasoning_effort
if (!reasoningEffort) {
@ -317,7 +324,11 @@ export function getOpenAIReasoningParams(assistant: Assistant, model: Model): Re
reasoningSummary = summaryText
}
const reasoningEffort = assistant?.settings?.reasoning_effort
let reasoningEffort = assistant?.settings?.reasoning_effort
if (isOpenAIDeepResearchModel(model)) {
reasoningEffort = 'medium'
}
if (!reasoningEffort) {
return {}

View File

@ -4,7 +4,7 @@ import {
WebSearchPluginConfig
} from '@cherrystudio/ai-core/core/plugins/built-in/webSearchPlugin/helper'
import { BaseProviderId } from '@cherrystudio/ai-core/provider'
import { isOpenAIWebSearchChatCompletionOnlyModel } from '@renderer/config/models'
import { isOpenAIDeepResearchModel, isOpenAIWebSearchChatCompletionOnlyModel } from '@renderer/config/models'
import { CherryWebSearchConfig } from '@renderer/store/websearch'
import { Model } from '@renderer/types'
import { mapRegexToPatterns } from '@renderer/utils/blacklistMatchPattern'
@ -43,20 +43,27 @@ function mapMaxResultToOpenAIContextSize(maxResults: number): OpenAISearchConfig
export function buildProviderBuiltinWebSearchConfig(
providerId: BaseProviderId,
webSearchConfig: CherryWebSearchConfig
webSearchConfig: CherryWebSearchConfig,
model?: Model
): WebSearchPluginConfig | undefined {
switch (providerId) {
case 'openai': {
const searchContextSize = isOpenAIDeepResearchModel(model)
? 'medium'
: mapMaxResultToOpenAIContextSize(webSearchConfig.maxResults)
return {
openai: {
searchContextSize: mapMaxResultToOpenAIContextSize(webSearchConfig.maxResults)
searchContextSize
}
}
}
case 'openai-chat': {
const searchContextSize = isOpenAIDeepResearchModel(model)
? 'medium'
: mapMaxResultToOpenAIContextSize(webSearchConfig.maxResults)
return {
'openai-chat': {
searchContextSize: mapMaxResultToOpenAIContextSize(webSearchConfig.maxResults)
searchContextSize
}
}
}

View File

@ -10,7 +10,7 @@ import { getLowerBaseModelName, isUserSelectedModelType } from '@renderer/utils'
import { isEmbeddingModel, isRerankModel } from './embedding'
import { isGPT5SeriesModel } from './utils'
import { isTextToImageModel } from './vision'
import { GEMINI_FLASH_MODEL_REGEX } from './websearch'
import { GEMINI_FLASH_MODEL_REGEX, isOpenAIDeepResearchModel } from './websearch'
// Reasoning models
export const REASONING_REGEX =
@ -21,6 +21,7 @@ export const REASONING_REGEX =
export const MODEL_SUPPORTED_REASONING_EFFORT: ReasoningEffortConfig = {
default: ['low', 'medium', 'high'] as const,
o: ['low', 'medium', 'high'] as const,
openai_deep_research: ['medium'] as const,
gpt5: ['minimal', 'low', 'medium', 'high'] as const,
gpt5_codex: ['low', 'medium', 'high'] as const,
grok: ['low', 'high'] as const,
@ -42,6 +43,7 @@ export const MODEL_SUPPORTED_REASONING_EFFORT: ReasoningEffortConfig = {
export const MODEL_SUPPORTED_OPTIONS: ThinkingOptionConfig = {
default: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.default] as const,
o: MODEL_SUPPORTED_REASONING_EFFORT.o,
openai_deep_research: MODEL_SUPPORTED_REASONING_EFFORT.openai_deep_research,
gpt5: [...MODEL_SUPPORTED_REASONING_EFFORT.gpt5] as const,
gpt5_codex: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_codex,
grok: MODEL_SUPPORTED_REASONING_EFFORT.grok,
@ -62,6 +64,9 @@ export const MODEL_SUPPORTED_OPTIONS: ThinkingOptionConfig = {
export const getThinkModelType = (model: Model): ThinkingModelType => {
let thinkingModelType: ThinkingModelType = 'default'
const modelId = getLowerBaseModelName(model.id)
if (isOpenAIDeepResearchModel(model)) {
return 'openai_deep_research'
}
if (isGPT5SeriesModel(model)) {
if (modelId.includes('codex')) {
thinkingModelType = 'gpt5_codex'

View File

@ -26,6 +26,22 @@ export const PERPLEXITY_SEARCH_MODELS = [
'sonar-deep-research'
]
const OPENAI_DEEP_RESEARCH_MODEL_REGEX = /deep[-_]?research/
export function isOpenAIDeepResearchModel(model?: Model): boolean {
if (!model) {
return false
}
const providerId = model.provider
if (providerId !== 'openai' && providerId !== 'openai-chat') {
return false
}
const modelId = getLowerBaseModelName(model.id, '/')
return OPENAI_DEEP_RESEARCH_MODEL_REGEX.test(modelId)
}
export function isWebSearchModel(model: Model): boolean {
if (
!model ||

View File

@ -75,6 +75,7 @@ export type AssistantSettingCustomParameters = {
const ThinkModelTypes = [
'default',
'o',
'openai_deep_research',
'gpt5',
'gpt5_codex',
'grok',