diff --git a/package.json b/package.json index c6b283a9ac..2f8c0c920d 100644 --- a/package.json +++ b/package.json @@ -135,7 +135,7 @@ "@cherrystudio/embedjs-ollama": "^0.1.31", "@cherrystudio/embedjs-openai": "^0.1.31", "@cherrystudio/extension-table-plus": "workspace:^", - "@cherrystudio/openai": "^6.5.0", + "@cherrystudio/openai": "^6.9.0", "@dnd-kit/core": "^6.3.1", "@dnd-kit/modifiers": "^9.0.0", "@dnd-kit/sortable": "^10.0.0", diff --git a/src/renderer/src/aiCore/legacy/clients/openai/OpenAIResponseAPIClient.ts b/src/renderer/src/aiCore/legacy/clients/openai/OpenAIResponseAPIClient.ts index 40cace50ea..0f72887196 100644 --- a/src/renderer/src/aiCore/legacy/clients/openai/OpenAIResponseAPIClient.ts +++ b/src/renderer/src/aiCore/legacy/clients/openai/OpenAIResponseAPIClient.ts @@ -297,7 +297,31 @@ export class OpenAIResponseAPIClient extends OpenAIBaseClient< private convertResponseToMessageContent(response: OpenAI.Responses.Response): ResponseInput { const content: OpenAI.Responses.ResponseInput = [] - content.push(...response.output) + response.output.forEach((item) => { + if (item.type !== 'apply_patch_call' && item.type !== 'apply_patch_call_output') { + content.push(item) + } else if (item.type === 'apply_patch_call') { + if (item.operation !== undefined) { + const applyPatchToolCall: OpenAI.Responses.ResponseInputItem.ApplyPatchCall = { + ...item, + operation: item.operation + } + content.push(applyPatchToolCall) + } else { + logger.warn('Undefined tool call operation for ApplyPatchToolCall.') + } + } else if (item.type === 'apply_patch_call_output') { + if (item.output !== undefined) { + const applyPatchToolCallOutput: OpenAI.Responses.ResponseInputItem.ApplyPatchCallOutput = { + ...item, + output: item.output === null ? undefined : item.output + } + content.push(applyPatchToolCallOutput) + } else { + logger.warn('Undefined tool call operation for ApplyPatchToolCall.') + } + } + }) return content } diff --git a/src/renderer/src/aiCore/utils/options.ts b/src/renderer/src/aiCore/utils/options.ts index 88f556438b..2d9f40329d 100644 --- a/src/renderer/src/aiCore/utils/options.ts +++ b/src/renderer/src/aiCore/utils/options.ts @@ -1,4 +1,5 @@ import { baseProviderIdSchema, customProviderIdSchema } from '@cherrystudio/ai-core/provider' +import { loggerService } from '@logger' import { isOpenAIModel, isQwenMTModel, isSupportFlexServiceTierModel } from '@renderer/config/models' import { isSupportServiceTierProvider } from '@renderer/config/providers' import { mapLanguageToQwenMTModel } from '@renderer/config/translate' @@ -26,6 +27,8 @@ import { } from './reasoning' import { getWebSearchParams } from './websearch' +const logger = loggerService.withContext('aiCore.utils.options') + // copy from BaseApiClient.ts const getServiceTier = (model: Model, provider: Provider) => { const serviceTierSetting = provider.serviceTier @@ -70,6 +73,7 @@ export function buildProviderOptions( enableGenerateImage: boolean } ): Record { + logger.debug('buildProviderOptions', { assistant, model, actualProvider, capabilities }) const rawProviderId = getAiSdkProviderId(actualProvider) // 构建 provider 特定的选项 let providerSpecificOptions: Record = {} diff --git a/src/renderer/src/assets/images/models/gpt-5.1-chat.png b/src/renderer/src/assets/images/models/gpt-5.1-chat.png new file mode 100644 index 0000000000..52ddd61136 Binary files /dev/null and b/src/renderer/src/assets/images/models/gpt-5.1-chat.png differ diff --git a/src/renderer/src/assets/images/models/gpt-5.1-codex-mini.png b/src/renderer/src/assets/images/models/gpt-5.1-codex-mini.png new file mode 100644 index 0000000000..638c3fea92 Binary files /dev/null and b/src/renderer/src/assets/images/models/gpt-5.1-codex-mini.png differ diff --git a/src/renderer/src/assets/images/models/gpt-5.1-codex.png b/src/renderer/src/assets/images/models/gpt-5.1-codex.png new file mode 100644 index 0000000000..ec12a5e901 Binary files /dev/null and b/src/renderer/src/assets/images/models/gpt-5.1-codex.png differ diff --git a/src/renderer/src/assets/images/models/gpt-5.1.png b/src/renderer/src/assets/images/models/gpt-5.1.png new file mode 100644 index 0000000000..d7f57f8c58 Binary files /dev/null and b/src/renderer/src/assets/images/models/gpt-5.1.png differ diff --git a/src/renderer/src/config/models/logo.ts b/src/renderer/src/config/models/logo.ts index ce592ad466..77f4f5fb9d 100644 --- a/src/renderer/src/config/models/logo.ts +++ b/src/renderer/src/config/models/logo.ts @@ -59,6 +59,10 @@ import { } from '@renderer/assets/images/models/gpt_dark.png' import ChatGPTImageModelLogo from '@renderer/assets/images/models/gpt_image_1.png' import ChatGPTo1ModelLogo from '@renderer/assets/images/models/gpt_o1.png' +import GPT51ModelLogo from '@renderer/assets/images/models/gpt-5.1.png' +import GPT51ChatModelLogo from '@renderer/assets/images/models/gpt-5.1-chat.png' +import GPT51CodexModelLogo from '@renderer/assets/images/models/gpt-5.1-codex.png' +import GPT51CodexMiniModelLogo from '@renderer/assets/images/models/gpt-5.1-codex-mini.png' import GPT5ModelLogo from '@renderer/assets/images/models/gpt-5.png' import GPT5ChatModelLogo from '@renderer/assets/images/models/gpt-5-chat.png' import GPT5CodexModelLogo from '@renderer/assets/images/models/gpt-5-codex.png' @@ -182,6 +186,10 @@ export function getModelLogoById(modelId: string): string | undefined { 'gpt-5-nano': GPT5NanoModelLogo, 'gpt-5-chat': GPT5ChatModelLogo, 'gpt-5-codex': GPT5CodexModelLogo, + 'gpt-5.1-codex-mini': GPT51CodexMiniModelLogo, + 'gpt-5.1-codex': GPT51CodexModelLogo, + 'gpt-5.1-chat': GPT51ChatModelLogo, + 'gpt-5.1': GPT51ModelLogo, 'gpt-5': GPT5ModelLogo, gpts: isLight ? ChatGPT4ModelLogo : ChatGPT4ModelLogoDark, 'gpt-oss(?:-[\\w-]+)': isLight ? ChatGptModelLogo : ChatGptModelLogoDark, diff --git a/src/renderer/src/config/models/reasoning.ts b/src/renderer/src/config/models/reasoning.ts index 3a4d97e592..36b2954875 100644 --- a/src/renderer/src/config/models/reasoning.ts +++ b/src/renderer/src/config/models/reasoning.ts @@ -8,7 +8,7 @@ import type { import { getLowerBaseModelName, isUserSelectedModelType } from '@renderer/utils' import { isEmbeddingModel, isRerankModel } from './embedding' -import { isGPT5SeriesModel } from './utils' +import { isGPT5SeriesModel, isGPT51SeriesModel } from './utils' import { isTextToImageModel } from './vision' import { GEMINI_FLASH_MODEL_REGEX, isOpenAIDeepResearchModel } from './websearch' @@ -24,6 +24,8 @@ export const MODEL_SUPPORTED_REASONING_EFFORT: ReasoningEffortConfig = { openai_deep_research: ['medium'] as const, gpt5: ['minimal', 'low', 'medium', 'high'] as const, gpt5_codex: ['low', 'medium', 'high'] as const, + gpt5_1: ['none', 'low', 'medium', 'high'] as const, + gpt5_1_codex: ['none', 'medium', 'high'] as const, grok: ['low', 'high'] as const, grok4_fast: ['auto'] as const, gemini: ['low', 'medium', 'high', 'auto'] as const, @@ -41,24 +43,26 @@ export const MODEL_SUPPORTED_REASONING_EFFORT: ReasoningEffortConfig = { // 模型类型到支持选项的映射表 export const MODEL_SUPPORTED_OPTIONS: ThinkingOptionConfig = { - default: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.default] as const, + default: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.default] as const, o: MODEL_SUPPORTED_REASONING_EFFORT.o, openai_deep_research: MODEL_SUPPORTED_REASONING_EFFORT.openai_deep_research, gpt5: [...MODEL_SUPPORTED_REASONING_EFFORT.gpt5] as const, gpt5_codex: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_codex, + gpt5_1: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1, + gpt5_1_codex: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1_codex, grok: MODEL_SUPPORTED_REASONING_EFFORT.grok, - grok4_fast: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.grok4_fast] as const, - gemini: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini] as const, + grok4_fast: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.grok4_fast] as const, + gemini: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini] as const, gemini_pro: MODEL_SUPPORTED_REASONING_EFFORT.gemini_pro, - qwen: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.qwen] as const, + qwen: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.qwen] as const, qwen_thinking: MODEL_SUPPORTED_REASONING_EFFORT.qwen_thinking, - doubao: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao] as const, - doubao_no_auto: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao_no_auto] as const, + doubao: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao] as const, + doubao_no_auto: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao_no_auto] as const, doubao_after_251015: MODEL_SUPPORTED_REASONING_EFFORT.doubao_after_251015, - hunyuan: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.hunyuan] as const, - zhipu: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.zhipu] as const, + hunyuan: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.hunyuan] as const, + zhipu: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.zhipu] as const, perplexity: MODEL_SUPPORTED_REASONING_EFFORT.perplexity, - deepseek_hybrid: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.deepseek_hybrid] as const + deepseek_hybrid: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.deepseek_hybrid] as const } as const const withModelIdAndNameAsId = (model: Model, fn: (model: Model) => T): { idResult: T; nameResult: T } => { @@ -75,7 +79,13 @@ const _getThinkModelType = (model: Model): ThinkingModelType => { if (isOpenAIDeepResearchModel(model)) { return 'openai_deep_research' } - if (isGPT5SeriesModel(model)) { + if (isGPT51SeriesModel(model)) { + if (modelId.includes('codex')) { + thinkingModelType = 'gpt5_1_codex' + } else { + thinkingModelType = 'gpt5_1' + } + } else if (isGPT5SeriesModel(model)) { if (modelId.includes('codex')) { thinkingModelType = 'gpt5_codex' } else { @@ -526,7 +536,7 @@ export function isSupportedReasoningEffortOpenAIModel(model: Model): boolean { modelId.includes('o3') || modelId.includes('o4') || modelId.includes('gpt-oss') || - (isGPT5SeriesModel(model) && !modelId.includes('chat')) + ((isGPT5SeriesModel(model) || isGPT51SeriesModel(model)) && !modelId.includes('chat')) ) } diff --git a/src/renderer/src/config/models/utils.ts b/src/renderer/src/config/models/utils.ts index 743f722528..4197a516b3 100644 --- a/src/renderer/src/config/models/utils.ts +++ b/src/renderer/src/config/models/utils.ts @@ -54,7 +54,7 @@ export function isSupportedFlexServiceTier(model: Model): boolean { export function isSupportVerbosityModel(model: Model): boolean { const modelId = getLowerBaseModelName(model.id) - return isGPT5SeriesModel(model) && !modelId.includes('chat') + return (isGPT5SeriesModel(model) || isGPT51SeriesModel(model)) && !modelId.includes('chat') } export function isOpenAIChatCompletionOnlyModel(model: Model): boolean { @@ -227,12 +227,17 @@ export const isNotSupportSystemMessageModel = (model: Model): boolean => { export const isGPT5SeriesModel = (model: Model) => { const modelId = getLowerBaseModelName(model.id) - return modelId.includes('gpt-5') + return modelId.includes('gpt-5') && !modelId.includes('gpt-5.1') } export const isGPT5SeriesReasoningModel = (model: Model) => { const modelId = getLowerBaseModelName(model.id) - return modelId.includes('gpt-5') && !modelId.includes('chat') + return isGPT5SeriesModel(model) && !modelId.includes('chat') +} + +export const isGPT51SeriesModel = (model: Model) => { + const modelId = getLowerBaseModelName(model.id) + return modelId.includes('gpt-5.1') } export const isGeminiModel = (model: Model) => { diff --git a/src/renderer/src/config/providers.ts b/src/renderer/src/config/providers.ts index 965c620ba9..0f2b6cfadd 100644 --- a/src/renderer/src/config/providers.ts +++ b/src/renderer/src/config/providers.ts @@ -67,7 +67,7 @@ import type { SystemProvider, SystemProviderId } from '@renderer/types' -import { isSystemProvider, OpenAIServiceTiers } from '@renderer/types' +import { isSystemProvider, OpenAIServiceTiers, SystemProviderIds } from '@renderer/types' import { TOKENFLUX_HOST } from './constant' import { glm45FlashModel, qwen38bModel, SYSTEM_MODELS } from './models' @@ -1519,7 +1519,10 @@ const SUPPORT_URL_CONTEXT_PROVIDER_TYPES = [ ] as const satisfies ProviderType[] export const isSupportUrlContextProvider = (provider: Provider) => { - return SUPPORT_URL_CONTEXT_PROVIDER_TYPES.some((type) => type === provider.type) + return ( + SUPPORT_URL_CONTEXT_PROVIDER_TYPES.some((type) => type === provider.type) || + provider.id === SystemProviderIds.cherryin + ) } const SUPPORT_GEMINI_NATIVE_WEB_SEARCH_PROVIDERS = ['gemini', 'vertexai'] as const satisfies SystemProviderId[] diff --git a/src/renderer/src/hooks/useAssistant.ts b/src/renderer/src/hooks/useAssistant.ts index dc0f5c28e3..0571092012 100644 --- a/src/renderer/src/hooks/useAssistant.ts +++ b/src/renderer/src/hooks/useAssistant.ts @@ -123,9 +123,9 @@ export function useAssistant(id: string) { } updateAssistantSettings({ - reasoning_effort: fallbackOption === 'off' ? undefined : fallbackOption, - reasoning_effort_cache: fallbackOption === 'off' ? undefined : fallbackOption, - qwenThinkMode: fallbackOption === 'off' ? undefined : true + reasoning_effort: fallbackOption === 'none' ? undefined : fallbackOption, + reasoning_effort_cache: fallbackOption === 'none' ? undefined : fallbackOption, + qwenThinkMode: fallbackOption === 'none' ? undefined : true }) } else { // 对于支持的选项, 不再更新 cache. diff --git a/src/renderer/src/i18n/label.ts b/src/renderer/src/i18n/label.ts index f657fd0e08..bd74ecd452 100644 --- a/src/renderer/src/i18n/label.ts +++ b/src/renderer/src/i18n/label.ts @@ -311,7 +311,7 @@ export const getHttpMessageLabel = (key: string): string => { } const reasoningEffortOptionsKeyMap: Record = { - off: 'assistants.settings.reasoning_effort.off', + none: 'assistants.settings.reasoning_effort.off', minimal: 'assistants.settings.reasoning_effort.minimal', high: 'assistants.settings.reasoning_effort.high', low: 'assistants.settings.reasoning_effort.low', diff --git a/src/renderer/src/pages/home/Inputbar/tools/components/ThinkingButton.tsx b/src/renderer/src/pages/home/Inputbar/tools/components/ThinkingButton.tsx index 132c3f2f38..51cbe87c1f 100644 --- a/src/renderer/src/pages/home/Inputbar/tools/components/ThinkingButton.tsx +++ b/src/renderer/src/pages/home/Inputbar/tools/components/ThinkingButton.tsx @@ -36,7 +36,7 @@ const ThinkingButton: FC = ({ quickPanel, model, assistantId }): ReactEle const { assistant, updateAssistantSettings } = useAssistant(assistantId) const currentReasoningEffort = useMemo(() => { - return assistant.settings?.reasoning_effort || 'off' + return assistant.settings?.reasoning_effort || 'none' }, [assistant.settings?.reasoning_effort]) // 确定当前模型支持的选项类型 @@ -46,21 +46,21 @@ const ThinkingButton: FC = ({ quickPanel, model, assistantId }): ReactEle const supportedOptions: ThinkingOption[] = useMemo(() => { if (modelType === 'doubao') { if (isDoubaoThinkingAutoModel(model)) { - return ['off', 'auto', 'high'] + return ['none', 'auto', 'high'] } - return ['off', 'high'] + return ['none', 'high'] } return MODEL_SUPPORTED_OPTIONS[modelType] }, [model, modelType]) const onThinkingChange = useCallback( (option?: ThinkingOption) => { - const isEnabled = option !== undefined && option !== 'off' + const isEnabled = option !== undefined && option !== 'none' // 然后更新设置 if (!isEnabled) { updateAssistantSettings({ - reasoning_effort: undefined, - reasoning_effort_cache: undefined, + reasoning_effort: option, + reasoning_effort_cache: option, qwenThinkMode: false }) return @@ -96,10 +96,10 @@ const ThinkingButton: FC = ({ quickPanel, model, assistantId }): ReactEle })) }, [currentReasoningEffort, supportedOptions, onThinkingChange]) - const isThinkingEnabled = currentReasoningEffort !== undefined && currentReasoningEffort !== 'off' + const isThinkingEnabled = currentReasoningEffort !== undefined && currentReasoningEffort !== 'none' const disableThinking = useCallback(() => { - onThinkingChange('off') + onThinkingChange('none') }, [onThinkingChange]) const openQuickPanel = useCallback(() => { @@ -116,7 +116,7 @@ const ThinkingButton: FC = ({ quickPanel, model, assistantId }): ReactEle return } - if (isThinkingEnabled && supportedOptions.includes('off')) { + if (isThinkingEnabled && supportedOptions.includes('none')) { disableThinking() return } @@ -146,13 +146,13 @@ const ThinkingButton: FC = ({ quickPanel, model, assistantId }): ReactEle - + {ThinkingIcon(currentReasoningEffort)} @@ -178,7 +178,7 @@ const ThinkingIcon = (option?: ThinkingOption) => { case 'auto': IconComponent = MdiLightbulbAutoOutline break - case 'off': + case 'none': IconComponent = MdiLightbulbOffOutline break default: diff --git a/src/renderer/src/store/index.ts b/src/renderer/src/store/index.ts index 4baeeddfde..edc23f7895 100644 --- a/src/renderer/src/store/index.ts +++ b/src/renderer/src/store/index.ts @@ -67,7 +67,7 @@ const persistedReducer = persistReducer( { key: 'cherry-studio', storage, - version: 174, + version: 175, blacklist: ['runtime', 'messages', 'messageBlocks', 'tabs', 'toolPermissions'], migrate }, diff --git a/src/renderer/src/store/migrate.ts b/src/renderer/src/store/migrate.ts index 10073de1f3..5a7f3b3a0c 100644 --- a/src/renderer/src/store/migrate.ts +++ b/src/renderer/src/store/migrate.ts @@ -2819,6 +2819,25 @@ const migrateConfig = { logger.error('migrate 174 error', error as Error) return state } + }, + '175': (state: RootState) => { + try { + state.assistants.assistants.forEach((assistant) => { + // @ts-expect-error removed type 'off' + if (assistant.settings?.reasoning_effort === 'off') { + assistant.settings.reasoning_effort = 'none' + } + // @ts-expect-error removed type 'off' + if (assistant.settings?.reasoning_effort_cache === 'off') { + assistant.settings.reasoning_effort_cache = 'none' + } + }) + logger.info('migrate 175 success') + return state + } catch (error) { + logger.error('migrate 175 error', error as Error) + return state + } } } diff --git a/src/renderer/src/types/index.ts b/src/renderer/src/types/index.ts index c86e80a157..eb5d2fa1f5 100644 --- a/src/renderer/src/types/index.ts +++ b/src/renderer/src/types/index.ts @@ -83,7 +83,9 @@ const ThinkModelTypes = [ 'o', 'openai_deep_research', 'gpt5', + 'gpt5_1', 'gpt5_codex', + 'gpt5_1_codex', 'grok', 'grok4_fast', 'gemini', @@ -100,7 +102,7 @@ const ThinkModelTypes = [ ] as const export type ReasoningEffortOption = NonNullable | 'auto' -export type ThinkingOption = ReasoningEffortOption | 'off' +export type ThinkingOption = ReasoningEffortOption export type ThinkingModelType = (typeof ThinkModelTypes)[number] export type ThinkingOptionConfig = Record export type ReasoningEffortConfig = Record @@ -111,6 +113,7 @@ export function isThinkModelType(type: string): type is ThinkingModelType { } export const EFFORT_RATIO: EffortRatio = { + none: 0, minimal: 0.05, low: 0.05, medium: 0.5, diff --git a/yarn.lock b/yarn.lock index ee14c1af2a..e7a6944f56 100644 --- a/yarn.lock +++ b/yarn.lock @@ -2140,9 +2140,9 @@ __metadata: languageName: unknown linkType: soft -"@cherrystudio/openai@npm:^6.5.0": - version: 6.5.0 - resolution: "@cherrystudio/openai@npm:6.5.0" +"@cherrystudio/openai@npm:^6.9.0": + version: 6.9.0 + resolution: "@cherrystudio/openai@npm:6.9.0" peerDependencies: ws: ^8.18.0 zod: ^3.25 || ^4.0 @@ -2153,7 +2153,7 @@ __metadata: optional: true bin: openai: bin/cli - checksum: 10c0/0f6cafb97aec17037d5ddcccc88e4b4a9c8de77a989a35bab2394b682a1a69e8a9343e8ee5eb8107d5c495970dbf3567642f154c033f7afc3bf078078666a92e + checksum: 10c0/9c51ef33c5b9d08041a115e3d6a8158412a379998a0eae186923d5bdcc808b634c1fef4471a1d499bb8c624b04c075167bc90a1a60a805005c0657ecebbb58d0 languageName: node linkType: hard @@ -9919,7 +9919,7 @@ __metadata: "@cherrystudio/embedjs-ollama": "npm:^0.1.31" "@cherrystudio/embedjs-openai": "npm:^0.1.31" "@cherrystudio/extension-table-plus": "workspace:^" - "@cherrystudio/openai": "npm:^6.5.0" + "@cherrystudio/openai": "npm:^6.9.0" "@dnd-kit/core": "npm:^6.3.1" "@dnd-kit/modifiers": "npm:^9.0.0" "@dnd-kit/sortable": "npm:^10.0.0"