feat: support gpt-5.1 (#11294)
Some checks failed
Auto I18N Weekly / Auto I18N (push) Has been cancelled

* build: update @cherrystudio/openai dependency from v6.5.0 to v6.9.0

* refactor(reasoning): replace 'off' with 'none' for reasoning effort option

Update reasoning effort option from 'off' to 'none' across multiple files for consistency
Add support for gpt5_1 model with reasoning effort options

* fix(openai): handle apply_patch_call and apply_patch_call_output in response conversion

Filter and properly handle apply_patch_call and apply_patch_call_output types in OpenAI response conversion. Ensure undefined/null values are handled appropriately and log warnings for missing required fields.

* feat(models): add gpt-5.1 model logo and configuration

* fix(providers): include cherryin in url context provider check

Add SystemProviderIds.cherryin to the list of providers that support URL context to ensure proper functionality

* feat(models): add logo images for gpt-5.1 model variants

* feat(model): add support for GPT-5.1 series models

- Add new model type check for GPT-5.1 series
- Update reasoning effort and verbosity checks to include GPT-5.1
- Add logging to provider options builder

* feat(models): add gpt5_1_codex model support

Add new model type 'gpt5_1_codex' to ThinkModelTypes and configure its reasoning effort levels
Update model type detection logic to handle gpt5_1_codex variant
This commit is contained in:
Phantom 2025-11-15 19:09:43 +08:00 committed by GitHub
parent a29b2bb3d6
commit 2511113b62
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
18 changed files with 118 additions and 42 deletions

View File

@ -135,7 +135,7 @@
"@cherrystudio/embedjs-ollama": "^0.1.31",
"@cherrystudio/embedjs-openai": "^0.1.31",
"@cherrystudio/extension-table-plus": "workspace:^",
"@cherrystudio/openai": "^6.5.0",
"@cherrystudio/openai": "^6.9.0",
"@dnd-kit/core": "^6.3.1",
"@dnd-kit/modifiers": "^9.0.0",
"@dnd-kit/sortable": "^10.0.0",

View File

@ -297,7 +297,31 @@ export class OpenAIResponseAPIClient extends OpenAIBaseClient<
private convertResponseToMessageContent(response: OpenAI.Responses.Response): ResponseInput {
const content: OpenAI.Responses.ResponseInput = []
content.push(...response.output)
response.output.forEach((item) => {
if (item.type !== 'apply_patch_call' && item.type !== 'apply_patch_call_output') {
content.push(item)
} else if (item.type === 'apply_patch_call') {
if (item.operation !== undefined) {
const applyPatchToolCall: OpenAI.Responses.ResponseInputItem.ApplyPatchCall = {
...item,
operation: item.operation
}
content.push(applyPatchToolCall)
} else {
logger.warn('Undefined tool call operation for ApplyPatchToolCall.')
}
} else if (item.type === 'apply_patch_call_output') {
if (item.output !== undefined) {
const applyPatchToolCallOutput: OpenAI.Responses.ResponseInputItem.ApplyPatchCallOutput = {
...item,
output: item.output === null ? undefined : item.output
}
content.push(applyPatchToolCallOutput)
} else {
logger.warn('Undefined tool call operation for ApplyPatchToolCall.')
}
}
})
return content
}

View File

@ -1,4 +1,5 @@
import { baseProviderIdSchema, customProviderIdSchema } from '@cherrystudio/ai-core/provider'
import { loggerService } from '@logger'
import { isOpenAIModel, isQwenMTModel, isSupportFlexServiceTierModel } from '@renderer/config/models'
import { isSupportServiceTierProvider } from '@renderer/config/providers'
import { mapLanguageToQwenMTModel } from '@renderer/config/translate'
@ -26,6 +27,8 @@ import {
} from './reasoning'
import { getWebSearchParams } from './websearch'
const logger = loggerService.withContext('aiCore.utils.options')
// copy from BaseApiClient.ts
const getServiceTier = (model: Model, provider: Provider) => {
const serviceTierSetting = provider.serviceTier
@ -70,6 +73,7 @@ export function buildProviderOptions(
enableGenerateImage: boolean
}
): Record<string, any> {
logger.debug('buildProviderOptions', { assistant, model, actualProvider, capabilities })
const rawProviderId = getAiSdkProviderId(actualProvider)
// 构建 provider 特定的选项
let providerSpecificOptions: Record<string, any> = {}

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

View File

@ -59,6 +59,10 @@ import {
} from '@renderer/assets/images/models/gpt_dark.png'
import ChatGPTImageModelLogo from '@renderer/assets/images/models/gpt_image_1.png'
import ChatGPTo1ModelLogo from '@renderer/assets/images/models/gpt_o1.png'
import GPT51ModelLogo from '@renderer/assets/images/models/gpt-5.1.png'
import GPT51ChatModelLogo from '@renderer/assets/images/models/gpt-5.1-chat.png'
import GPT51CodexModelLogo from '@renderer/assets/images/models/gpt-5.1-codex.png'
import GPT51CodexMiniModelLogo from '@renderer/assets/images/models/gpt-5.1-codex-mini.png'
import GPT5ModelLogo from '@renderer/assets/images/models/gpt-5.png'
import GPT5ChatModelLogo from '@renderer/assets/images/models/gpt-5-chat.png'
import GPT5CodexModelLogo from '@renderer/assets/images/models/gpt-5-codex.png'
@ -182,6 +186,10 @@ export function getModelLogoById(modelId: string): string | undefined {
'gpt-5-nano': GPT5NanoModelLogo,
'gpt-5-chat': GPT5ChatModelLogo,
'gpt-5-codex': GPT5CodexModelLogo,
'gpt-5.1-codex-mini': GPT51CodexMiniModelLogo,
'gpt-5.1-codex': GPT51CodexModelLogo,
'gpt-5.1-chat': GPT51ChatModelLogo,
'gpt-5.1': GPT51ModelLogo,
'gpt-5': GPT5ModelLogo,
gpts: isLight ? ChatGPT4ModelLogo : ChatGPT4ModelLogoDark,
'gpt-oss(?:-[\\w-]+)': isLight ? ChatGptModelLogo : ChatGptModelLogoDark,

View File

@ -8,7 +8,7 @@ import type {
import { getLowerBaseModelName, isUserSelectedModelType } from '@renderer/utils'
import { isEmbeddingModel, isRerankModel } from './embedding'
import { isGPT5SeriesModel } from './utils'
import { isGPT5SeriesModel, isGPT51SeriesModel } from './utils'
import { isTextToImageModel } from './vision'
import { GEMINI_FLASH_MODEL_REGEX, isOpenAIDeepResearchModel } from './websearch'
@ -24,6 +24,8 @@ export const MODEL_SUPPORTED_REASONING_EFFORT: ReasoningEffortConfig = {
openai_deep_research: ['medium'] as const,
gpt5: ['minimal', 'low', 'medium', 'high'] as const,
gpt5_codex: ['low', 'medium', 'high'] as const,
gpt5_1: ['none', 'low', 'medium', 'high'] as const,
gpt5_1_codex: ['none', 'medium', 'high'] as const,
grok: ['low', 'high'] as const,
grok4_fast: ['auto'] as const,
gemini: ['low', 'medium', 'high', 'auto'] as const,
@ -41,24 +43,26 @@ export const MODEL_SUPPORTED_REASONING_EFFORT: ReasoningEffortConfig = {
// 模型类型到支持选项的映射表
export const MODEL_SUPPORTED_OPTIONS: ThinkingOptionConfig = {
default: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.default] as const,
default: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.default] as const,
o: MODEL_SUPPORTED_REASONING_EFFORT.o,
openai_deep_research: MODEL_SUPPORTED_REASONING_EFFORT.openai_deep_research,
gpt5: [...MODEL_SUPPORTED_REASONING_EFFORT.gpt5] as const,
gpt5_codex: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_codex,
gpt5_1: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1,
gpt5_1_codex: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1_codex,
grok: MODEL_SUPPORTED_REASONING_EFFORT.grok,
grok4_fast: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.grok4_fast] as const,
gemini: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini] as const,
grok4_fast: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.grok4_fast] as const,
gemini: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini] as const,
gemini_pro: MODEL_SUPPORTED_REASONING_EFFORT.gemini_pro,
qwen: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.qwen] as const,
qwen: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.qwen] as const,
qwen_thinking: MODEL_SUPPORTED_REASONING_EFFORT.qwen_thinking,
doubao: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao] as const,
doubao_no_auto: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao_no_auto] as const,
doubao: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao] as const,
doubao_no_auto: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao_no_auto] as const,
doubao_after_251015: MODEL_SUPPORTED_REASONING_EFFORT.doubao_after_251015,
hunyuan: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.hunyuan] as const,
zhipu: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.zhipu] as const,
hunyuan: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.hunyuan] as const,
zhipu: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.zhipu] as const,
perplexity: MODEL_SUPPORTED_REASONING_EFFORT.perplexity,
deepseek_hybrid: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.deepseek_hybrid] as const
deepseek_hybrid: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.deepseek_hybrid] as const
} as const
const withModelIdAndNameAsId = <T>(model: Model, fn: (model: Model) => T): { idResult: T; nameResult: T } => {
@ -75,7 +79,13 @@ const _getThinkModelType = (model: Model): ThinkingModelType => {
if (isOpenAIDeepResearchModel(model)) {
return 'openai_deep_research'
}
if (isGPT5SeriesModel(model)) {
if (isGPT51SeriesModel(model)) {
if (modelId.includes('codex')) {
thinkingModelType = 'gpt5_1_codex'
} else {
thinkingModelType = 'gpt5_1'
}
} else if (isGPT5SeriesModel(model)) {
if (modelId.includes('codex')) {
thinkingModelType = 'gpt5_codex'
} else {
@ -526,7 +536,7 @@ export function isSupportedReasoningEffortOpenAIModel(model: Model): boolean {
modelId.includes('o3') ||
modelId.includes('o4') ||
modelId.includes('gpt-oss') ||
(isGPT5SeriesModel(model) && !modelId.includes('chat'))
((isGPT5SeriesModel(model) || isGPT51SeriesModel(model)) && !modelId.includes('chat'))
)
}

View File

@ -54,7 +54,7 @@ export function isSupportedFlexServiceTier(model: Model): boolean {
export function isSupportVerbosityModel(model: Model): boolean {
const modelId = getLowerBaseModelName(model.id)
return isGPT5SeriesModel(model) && !modelId.includes('chat')
return (isGPT5SeriesModel(model) || isGPT51SeriesModel(model)) && !modelId.includes('chat')
}
export function isOpenAIChatCompletionOnlyModel(model: Model): boolean {
@ -227,12 +227,17 @@ export const isNotSupportSystemMessageModel = (model: Model): boolean => {
export const isGPT5SeriesModel = (model: Model) => {
const modelId = getLowerBaseModelName(model.id)
return modelId.includes('gpt-5')
return modelId.includes('gpt-5') && !modelId.includes('gpt-5.1')
}
export const isGPT5SeriesReasoningModel = (model: Model) => {
const modelId = getLowerBaseModelName(model.id)
return modelId.includes('gpt-5') && !modelId.includes('chat')
return isGPT5SeriesModel(model) && !modelId.includes('chat')
}
export const isGPT51SeriesModel = (model: Model) => {
const modelId = getLowerBaseModelName(model.id)
return modelId.includes('gpt-5.1')
}
export const isGeminiModel = (model: Model) => {

View File

@ -67,7 +67,7 @@ import type {
SystemProvider,
SystemProviderId
} from '@renderer/types'
import { isSystemProvider, OpenAIServiceTiers } from '@renderer/types'
import { isSystemProvider, OpenAIServiceTiers, SystemProviderIds } from '@renderer/types'
import { TOKENFLUX_HOST } from './constant'
import { glm45FlashModel, qwen38bModel, SYSTEM_MODELS } from './models'
@ -1519,7 +1519,10 @@ const SUPPORT_URL_CONTEXT_PROVIDER_TYPES = [
] as const satisfies ProviderType[]
export const isSupportUrlContextProvider = (provider: Provider) => {
return SUPPORT_URL_CONTEXT_PROVIDER_TYPES.some((type) => type === provider.type)
return (
SUPPORT_URL_CONTEXT_PROVIDER_TYPES.some((type) => type === provider.type) ||
provider.id === SystemProviderIds.cherryin
)
}
const SUPPORT_GEMINI_NATIVE_WEB_SEARCH_PROVIDERS = ['gemini', 'vertexai'] as const satisfies SystemProviderId[]

View File

@ -123,9 +123,9 @@ export function useAssistant(id: string) {
}
updateAssistantSettings({
reasoning_effort: fallbackOption === 'off' ? undefined : fallbackOption,
reasoning_effort_cache: fallbackOption === 'off' ? undefined : fallbackOption,
qwenThinkMode: fallbackOption === 'off' ? undefined : true
reasoning_effort: fallbackOption === 'none' ? undefined : fallbackOption,
reasoning_effort_cache: fallbackOption === 'none' ? undefined : fallbackOption,
qwenThinkMode: fallbackOption === 'none' ? undefined : true
})
} else {
// 对于支持的选项, 不再更新 cache.

View File

@ -311,7 +311,7 @@ export const getHttpMessageLabel = (key: string): string => {
}
const reasoningEffortOptionsKeyMap: Record<ThinkingOption, string> = {
off: 'assistants.settings.reasoning_effort.off',
none: 'assistants.settings.reasoning_effort.off',
minimal: 'assistants.settings.reasoning_effort.minimal',
high: 'assistants.settings.reasoning_effort.high',
low: 'assistants.settings.reasoning_effort.low',

View File

@ -36,7 +36,7 @@ const ThinkingButton: FC<Props> = ({ quickPanel, model, assistantId }): ReactEle
const { assistant, updateAssistantSettings } = useAssistant(assistantId)
const currentReasoningEffort = useMemo(() => {
return assistant.settings?.reasoning_effort || 'off'
return assistant.settings?.reasoning_effort || 'none'
}, [assistant.settings?.reasoning_effort])
// 确定当前模型支持的选项类型
@ -46,21 +46,21 @@ const ThinkingButton: FC<Props> = ({ quickPanel, model, assistantId }): ReactEle
const supportedOptions: ThinkingOption[] = useMemo(() => {
if (modelType === 'doubao') {
if (isDoubaoThinkingAutoModel(model)) {
return ['off', 'auto', 'high']
return ['none', 'auto', 'high']
}
return ['off', 'high']
return ['none', 'high']
}
return MODEL_SUPPORTED_OPTIONS[modelType]
}, [model, modelType])
const onThinkingChange = useCallback(
(option?: ThinkingOption) => {
const isEnabled = option !== undefined && option !== 'off'
const isEnabled = option !== undefined && option !== 'none'
// 然后更新设置
if (!isEnabled) {
updateAssistantSettings({
reasoning_effort: undefined,
reasoning_effort_cache: undefined,
reasoning_effort: option,
reasoning_effort_cache: option,
qwenThinkMode: false
})
return
@ -96,10 +96,10 @@ const ThinkingButton: FC<Props> = ({ quickPanel, model, assistantId }): ReactEle
}))
}, [currentReasoningEffort, supportedOptions, onThinkingChange])
const isThinkingEnabled = currentReasoningEffort !== undefined && currentReasoningEffort !== 'off'
const isThinkingEnabled = currentReasoningEffort !== undefined && currentReasoningEffort !== 'none'
const disableThinking = useCallback(() => {
onThinkingChange('off')
onThinkingChange('none')
}, [onThinkingChange])
const openQuickPanel = useCallback(() => {
@ -116,7 +116,7 @@ const ThinkingButton: FC<Props> = ({ quickPanel, model, assistantId }): ReactEle
return
}
if (isThinkingEnabled && supportedOptions.includes('off')) {
if (isThinkingEnabled && supportedOptions.includes('none')) {
disableThinking()
return
}
@ -146,13 +146,13 @@ const ThinkingButton: FC<Props> = ({ quickPanel, model, assistantId }): ReactEle
<Tooltip
placement="top"
title={
isThinkingEnabled && supportedOptions.includes('off')
isThinkingEnabled && supportedOptions.includes('none')
? t('common.close')
: t('assistants.settings.reasoning_effort.label')
}
mouseLeaveDelay={0}
arrow>
<ActionIconButton onClick={handleOpenQuickPanel} active={currentReasoningEffort !== 'off'}>
<ActionIconButton onClick={handleOpenQuickPanel} active={currentReasoningEffort !== 'none'}>
{ThinkingIcon(currentReasoningEffort)}
</ActionIconButton>
</Tooltip>
@ -178,7 +178,7 @@ const ThinkingIcon = (option?: ThinkingOption) => {
case 'auto':
IconComponent = MdiLightbulbAutoOutline
break
case 'off':
case 'none':
IconComponent = MdiLightbulbOffOutline
break
default:

View File

@ -67,7 +67,7 @@ const persistedReducer = persistReducer(
{
key: 'cherry-studio',
storage,
version: 174,
version: 175,
blacklist: ['runtime', 'messages', 'messageBlocks', 'tabs', 'toolPermissions'],
migrate
},

View File

@ -2819,6 +2819,25 @@ const migrateConfig = {
logger.error('migrate 174 error', error as Error)
return state
}
},
'175': (state: RootState) => {
try {
state.assistants.assistants.forEach((assistant) => {
// @ts-expect-error removed type 'off'
if (assistant.settings?.reasoning_effort === 'off') {
assistant.settings.reasoning_effort = 'none'
}
// @ts-expect-error removed type 'off'
if (assistant.settings?.reasoning_effort_cache === 'off') {
assistant.settings.reasoning_effort_cache = 'none'
}
})
logger.info('migrate 175 success')
return state
} catch (error) {
logger.error('migrate 175 error', error as Error)
return state
}
}
}

View File

@ -83,7 +83,9 @@ const ThinkModelTypes = [
'o',
'openai_deep_research',
'gpt5',
'gpt5_1',
'gpt5_codex',
'gpt5_1_codex',
'grok',
'grok4_fast',
'gemini',
@ -100,7 +102,7 @@ const ThinkModelTypes = [
] as const
export type ReasoningEffortOption = NonNullable<OpenAI.ReasoningEffort> | 'auto'
export type ThinkingOption = ReasoningEffortOption | 'off'
export type ThinkingOption = ReasoningEffortOption
export type ThinkingModelType = (typeof ThinkModelTypes)[number]
export type ThinkingOptionConfig = Record<ThinkingModelType, ThinkingOption[]>
export type ReasoningEffortConfig = Record<ThinkingModelType, ReasoningEffortOption[]>
@ -111,6 +113,7 @@ export function isThinkModelType(type: string): type is ThinkingModelType {
}
export const EFFORT_RATIO: EffortRatio = {
none: 0,
minimal: 0.05,
low: 0.05,
medium: 0.5,

View File

@ -2140,9 +2140,9 @@ __metadata:
languageName: unknown
linkType: soft
"@cherrystudio/openai@npm:^6.5.0":
version: 6.5.0
resolution: "@cherrystudio/openai@npm:6.5.0"
"@cherrystudio/openai@npm:^6.9.0":
version: 6.9.0
resolution: "@cherrystudio/openai@npm:6.9.0"
peerDependencies:
ws: ^8.18.0
zod: ^3.25 || ^4.0
@ -2153,7 +2153,7 @@ __metadata:
optional: true
bin:
openai: bin/cli
checksum: 10c0/0f6cafb97aec17037d5ddcccc88e4b4a9c8de77a989a35bab2394b682a1a69e8a9343e8ee5eb8107d5c495970dbf3567642f154c033f7afc3bf078078666a92e
checksum: 10c0/9c51ef33c5b9d08041a115e3d6a8158412a379998a0eae186923d5bdcc808b634c1fef4471a1d499bb8c624b04c075167bc90a1a60a805005c0657ecebbb58d0
languageName: node
linkType: hard
@ -9919,7 +9919,7 @@ __metadata:
"@cherrystudio/embedjs-ollama": "npm:^0.1.31"
"@cherrystudio/embedjs-openai": "npm:^0.1.31"
"@cherrystudio/extension-table-plus": "workspace:^"
"@cherrystudio/openai": "npm:^6.5.0"
"@cherrystudio/openai": "npm:^6.9.0"
"@dnd-kit/core": "npm:^6.3.1"
"@dnd-kit/modifiers": "npm:^9.0.0"
"@dnd-kit/sortable": "npm:^10.0.0"