diff --git a/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch b/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch similarity index 84% rename from .yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch rename to .yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch index 973ddc62a..6fbe30e08 100644 --- a/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch +++ b/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch @@ -1,8 +1,8 @@ diff --git a/dist/index.js b/dist/index.js -index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a70ea2b5a2 100644 +index 130094d194ea1e8e7d3027d07d82465741192124..4d13dcee8c962ca9ee8f1c3d748f8ffe6a3cfb47 100644 --- a/dist/index.js +++ b/dist/index.js -@@ -274,6 +274,7 @@ var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)( +@@ -290,6 +290,7 @@ var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)( message: import_v42.z.object({ role: import_v42.z.literal("assistant").nullish(), content: import_v42.z.string().nullish(), @@ -10,7 +10,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7 tool_calls: import_v42.z.array( import_v42.z.object({ id: import_v42.z.string().nullish(), -@@ -340,6 +341,7 @@ var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)( +@@ -356,6 +357,7 @@ var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)( delta: import_v42.z.object({ role: import_v42.z.enum(["assistant"]).nullish(), content: import_v42.z.string().nullish(), @@ -18,7 +18,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7 tool_calls: import_v42.z.array( import_v42.z.object({ index: import_v42.z.number(), -@@ -795,6 +797,13 @@ var OpenAIChatLanguageModel = class { +@@ -814,6 +816,13 @@ var OpenAIChatLanguageModel = class { if (text != null && text.length > 0) { content.push({ type: "text", text }); } @@ -32,7 +32,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7 for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) { content.push({ type: "tool-call", -@@ -876,6 +885,7 @@ var OpenAIChatLanguageModel = class { +@@ -895,6 +904,7 @@ var OpenAIChatLanguageModel = class { }; let metadataExtracted = false; let isActiveText = false; @@ -40,7 +40,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7 const providerMetadata = { openai: {} }; return { stream: response.pipeThrough( -@@ -933,6 +943,21 @@ var OpenAIChatLanguageModel = class { +@@ -952,6 +962,21 @@ var OpenAIChatLanguageModel = class { return; } const delta = choice.delta; @@ -62,7 +62,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7 if (delta.content != null) { if (!isActiveText) { controller.enqueue({ type: "text-start", id: "0" }); -@@ -1045,6 +1070,9 @@ var OpenAIChatLanguageModel = class { +@@ -1064,6 +1089,9 @@ var OpenAIChatLanguageModel = class { } }, flush(controller) { diff --git a/package.json b/package.json index 075b31309..58bdaf128 100644 --- a/package.json +++ b/package.json @@ -118,7 +118,7 @@ "@ai-sdk/google-vertex": "^3.0.79", "@ai-sdk/huggingface": "^0.0.10", "@ai-sdk/mistral": "^2.0.24", - "@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch", + "@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch", "@ai-sdk/perplexity": "^2.0.20", "@ai-sdk/test-server": "^0.0.1", "@ant-design/v5-patch-for-react-19": "^1.0.3", @@ -142,7 +142,7 @@ "@cherrystudio/embedjs-ollama": "^0.1.31", "@cherrystudio/embedjs-openai": "^0.1.31", "@cherrystudio/extension-table-plus": "workspace:^", - "@cherrystudio/openai": "^6.9.0", + "@cherrystudio/openai": "^6.12.0", "@dnd-kit/core": "^6.3.1", "@dnd-kit/modifiers": "^9.0.0", "@dnd-kit/sortable": "^10.0.0", @@ -414,7 +414,7 @@ "@langchain/openai@npm:>=0.1.0 <0.6.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch", "@langchain/openai@npm:^0.3.16": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch", "@langchain/openai@npm:>=0.2.0 <0.7.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch", - "@ai-sdk/openai@npm:^2.0.42": "patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch", + "@ai-sdk/openai@npm:^2.0.42": "patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch", "@ai-sdk/google@npm:^2.0.40": "patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch", "@ai-sdk/openai-compatible@npm:^1.0.27": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch" }, diff --git a/packages/aiCore/package.json b/packages/aiCore/package.json index a648dcf3c..6fc0f5334 100644 --- a/packages/aiCore/package.json +++ b/packages/aiCore/package.json @@ -40,7 +40,7 @@ }, "dependencies": { "@ai-sdk/anthropic": "^2.0.49", - "@ai-sdk/azure": "^2.0.74", + "@ai-sdk/azure": "^2.0.87", "@ai-sdk/deepseek": "^1.0.31", "@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch", "@ai-sdk/provider": "^2.0.0", diff --git a/src/renderer/src/aiCore/prepareParams/modelParameters.ts b/src/renderer/src/aiCore/prepareParams/modelParameters.ts index 34c341828..58b4834f5 100644 --- a/src/renderer/src/aiCore/prepareParams/modelParameters.ts +++ b/src/renderer/src/aiCore/prepareParams/modelParameters.ts @@ -28,13 +28,14 @@ import { getAnthropicThinkingBudget } from '../utils/reasoning' * - Disabled for models that do not support temperature. * - Disabled for Claude 4.5 reasoning models when TopP is enabled and temperature is disabled. * Otherwise, returns the temperature value if the assistant has temperature enabled. + */ export function getTemperature(assistant: Assistant, model: Model): number | undefined { if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) { return undefined } - if (!isSupportTemperatureModel(model)) { + if (!isSupportTemperatureModel(model, assistant)) { return undefined } @@ -46,6 +47,10 @@ export function getTemperature(assistant: Assistant, model: Model): number | und return undefined } + return getTemperatureValue(assistant, model) +} + +function getTemperatureValue(assistant: Assistant, model: Model): number | undefined { const assistantSettings = getAssistantSettings(assistant) let temperature = assistantSettings?.temperature if (temperature && isMaxTemperatureOneModel(model)) { @@ -68,13 +73,17 @@ export function getTopP(assistant: Assistant, model: Model): number | undefined if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) { return undefined } - if (!isSupportTopPModel(model)) { + if (!isSupportTopPModel(model, assistant)) { return undefined } if (isTemperatureTopPMutuallyExclusiveModel(model) && assistant.settings?.enableTemperature) { return undefined } + return getTopPValue(assistant) +} + +function getTopPValue(assistant: Assistant): number | undefined { const assistantSettings = getAssistantSettings(assistant) // FIXME: assistant.settings.enableTopP should be always a boolean value. const enableTopP = assistantSettings.enableTopP ?? DEFAULT_ASSISTANT_SETTINGS.enableTopP diff --git a/src/renderer/src/aiCore/utils/reasoning.ts b/src/renderer/src/aiCore/utils/reasoning.ts index 6d93a2e20..996d67676 100644 --- a/src/renderer/src/aiCore/utils/reasoning.ts +++ b/src/renderer/src/aiCore/utils/reasoning.ts @@ -13,11 +13,11 @@ import { isDoubaoSeedAfter251015, isDoubaoThinkingAutoModel, isGemini3ThinkingTokenModel, - isGPT5SeriesModel, isGPT51SeriesModel, isGrok4FastReasoningModel, isOpenAIDeepResearchModel, isOpenAIModel, + isOpenAIReasoningModel, isQwenAlwaysThinkModel, isQwenReasoningModel, isReasoningModel, @@ -134,8 +134,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin // https://creator.poe.com/docs/external-applications/openai-compatible-api#additional-considerations // Poe provider - supports custom bot parameters via extra_body if (provider.id === SystemProviderIds.poe) { - // GPT-5 series models use reasoning_effort parameter in extra_body - if (isGPT5SeriesModel(model) || isGPT51SeriesModel(model)) { + if (isOpenAIReasoningModel(model)) { return { extra_body: { reasoning_effort: reasoningEffort === 'auto' ? 'medium' : reasoningEffort @@ -635,6 +634,8 @@ export function getXAIReasoningParams(assistant: Assistant, model: Model): Pick< case 'low': case 'high': return { reasoningEffort } + case 'xhigh': + return { reasoningEffort: 'high' } } } diff --git a/src/renderer/src/config/models/openai.ts b/src/renderer/src/config/models/openai.ts index 4fc223405..7bc5b069c 100644 --- a/src/renderer/src/config/models/openai.ts +++ b/src/renderer/src/config/models/openai.ts @@ -35,6 +35,16 @@ export const isGPT5ProModel = (model: Model) => { return modelId.includes('gpt-5-pro') } +export const isGPT52ProModel = (model: Model) => { + const modelId = getLowerBaseModelName(model.id) + return modelId.includes('gpt-5.2-pro') +} + +export const isGPT51CodexMaxModel = (model: Model) => { + const modelId = getLowerBaseModelName(model.id) + return modelId.includes('gpt-5.1-codex-max') +} + export const isOpenAIOpenWeightModel = (model: Model) => { const modelId = getLowerBaseModelName(model.id) return modelId.includes('gpt-oss') @@ -42,7 +52,7 @@ export const isOpenAIOpenWeightModel = (model: Model) => { export const isGPT5SeriesModel = (model: Model) => { const modelId = getLowerBaseModelName(model.id) - return modelId.includes('gpt-5') && !modelId.includes('gpt-5.1') + return modelId.includes('gpt-5') && !modelId.includes('gpt-5.1') && !modelId.includes('gpt-5.2') } export const isGPT5SeriesReasoningModel = (model: Model) => { @@ -55,6 +65,11 @@ export const isGPT51SeriesModel = (model: Model) => { return modelId.includes('gpt-5.1') } +export const isGPT52SeriesModel = (model: Model) => { + const modelId = getLowerBaseModelName(model.id) + return modelId.includes('gpt-5.2') +} + export function isSupportVerbosityModel(model: Model): boolean { const modelId = getLowerBaseModelName(model.id) return (isGPT5SeriesModel(model) || isGPT51SeriesModel(model)) && !modelId.includes('chat') @@ -86,7 +101,7 @@ export function isSupportedReasoningEffortOpenAIModel(model: Model): boolean { modelId.includes('o3') || modelId.includes('o4') || modelId.includes('gpt-oss') || - ((isGPT5SeriesModel(model) || isGPT51SeriesModel(model)) && !modelId.includes('chat')) + ((isGPT5SeriesModel(model) || isGPT51SeriesModel(model) || isGPT52SeriesModel(model)) && !modelId.includes('chat')) ) } diff --git a/src/renderer/src/config/models/reasoning.ts b/src/renderer/src/config/models/reasoning.ts index 86d3fe97d..bd525a43a 100644 --- a/src/renderer/src/config/models/reasoning.ts +++ b/src/renderer/src/config/models/reasoning.ts @@ -11,7 +11,10 @@ import { isEmbeddingModel, isRerankModel } from './embedding' import { isGPT5ProModel, isGPT5SeriesModel, + isGPT51CodexMaxModel, isGPT51SeriesModel, + isGPT52ProModel, + isGPT52SeriesModel, isOpenAIDeepResearchModel, isOpenAIReasoningModel, isSupportedReasoningEffortOpenAIModel @@ -33,7 +36,10 @@ export const MODEL_SUPPORTED_REASONING_EFFORT: ReasoningEffortConfig = { gpt5_codex: ['low', 'medium', 'high'] as const, gpt5_1: ['none', 'low', 'medium', 'high'] as const, gpt5_1_codex: ['none', 'medium', 'high'] as const, + gpt5_1_codex_max: ['none', 'medium', 'high', 'xhigh'] as const, + gpt5_2: ['none', 'low', 'medium', 'high', 'xhigh'] as const, gpt5pro: ['high'] as const, + gpt52pro: ['medium', 'high', 'xhigh'] as const, grok: ['low', 'high'] as const, grok4_fast: ['auto'] as const, gemini: ['low', 'medium', 'high', 'auto'] as const, @@ -60,6 +66,9 @@ export const MODEL_SUPPORTED_OPTIONS: ThinkingOptionConfig = { gpt5_codex: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_codex, gpt5_1: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1, gpt5_1_codex: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1_codex, + gpt5_2: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_2, + gpt5_1_codex_max: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1_codex_max, + gpt52pro: MODEL_SUPPORTED_REASONING_EFFORT.gpt52pro, grok: MODEL_SUPPORTED_REASONING_EFFORT.grok, grok4_fast: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.grok4_fast] as const, gemini: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini] as const, @@ -84,6 +93,7 @@ const withModelIdAndNameAsId = (model: Model, fn: (model: Model) => T): { idR } } +// TODO: add ut const _getThinkModelType = (model: Model): ThinkingModelType => { let thinkingModelType: ThinkingModelType = 'default' const modelId = getLowerBaseModelName(model.id) @@ -93,9 +103,17 @@ const _getThinkModelType = (model: Model): ThinkingModelType => { if (isGPT51SeriesModel(model)) { if (modelId.includes('codex')) { thinkingModelType = 'gpt5_1_codex' + if (isGPT51CodexMaxModel(model)) { + thinkingModelType = 'gpt5_1_codex_max' + } } else { thinkingModelType = 'gpt5_1' } + } else if (isGPT52SeriesModel(model)) { + thinkingModelType = 'gpt5_2' + if (isGPT52ProModel(model)) { + thinkingModelType = 'gpt52pro' + } } else if (isGPT5SeriesModel(model)) { if (modelId.includes('codex')) { thinkingModelType = 'gpt5_codex' diff --git a/src/renderer/src/config/models/utils.ts b/src/renderer/src/config/models/utils.ts index 9ae8defa7..651655093 100644 --- a/src/renderer/src/config/models/utils.ts +++ b/src/renderer/src/config/models/utils.ts @@ -1,5 +1,6 @@ import type OpenAI from '@cherrystudio/openai' import { isEmbeddingModel, isRerankModel } from '@renderer/config/models/embedding' +import type { Assistant } from '@renderer/types' import { type Model, SystemProviderIds } from '@renderer/types' import type { OpenAIVerbosity, ValidOpenAIVerbosity } from '@renderer/types/aiCoreTypes' import { getLowerBaseModelName } from '@renderer/utils' @@ -8,6 +9,7 @@ import { isGPT5ProModel, isGPT5SeriesModel, isGPT51SeriesModel, + isGPT52SeriesModel, isOpenAIChatCompletionOnlyModel, isOpenAIOpenWeightModel, isOpenAIReasoningModel, @@ -48,13 +50,16 @@ export function isSupportedModel(model: OpenAI.Models.Model): boolean { * @param model - The model to check * @returns true if the model supports temperature parameter */ -export function isSupportTemperatureModel(model: Model | undefined | null): boolean { +export function isSupportTemperatureModel(model: Model | undefined | null, assistant?: Assistant): boolean { if (!model) { return false } // OpenAI reasoning models (except open weight) don't support temperature if (isOpenAIReasoningModel(model) && !isOpenAIOpenWeightModel(model)) { + if (isGPT52SeriesModel(model) && assistant?.settings?.reasoning_effort === 'none') { + return true + } return false } @@ -76,13 +81,16 @@ export function isSupportTemperatureModel(model: Model | undefined | null): bool * @param model - The model to check * @returns true if the model supports top_p parameter */ -export function isSupportTopPModel(model: Model | undefined | null): boolean { +export function isSupportTopPModel(model: Model | undefined | null, assistant?: Assistant): boolean { if (!model) { return false } // OpenAI reasoning models (except open weight) don't support top_p if (isOpenAIReasoningModel(model) && !isOpenAIOpenWeightModel(model)) { + if (isGPT52SeriesModel(model) && assistant?.settings?.reasoning_effort === 'none') { + return true + } return false } diff --git a/src/renderer/src/i18n/label.ts b/src/renderer/src/i18n/label.ts index 7a6ad843d..434fb415f 100644 --- a/src/renderer/src/i18n/label.ts +++ b/src/renderer/src/i18n/label.ts @@ -316,7 +316,8 @@ const reasoningEffortOptionsKeyMap: Record = { high: 'assistants.settings.reasoning_effort.high', low: 'assistants.settings.reasoning_effort.low', medium: 'assistants.settings.reasoning_effort.medium', - auto: 'assistants.settings.reasoning_effort.default' + auto: 'assistants.settings.reasoning_effort.default', + xhigh: 'assistants.settings.reasoning_effort.xhigh' } as const export const getReasoningEffortOptionsLabel = (key: string): string => { diff --git a/src/renderer/src/i18n/locales/en-us.json b/src/renderer/src/i18n/locales/en-us.json index 09a15845e..86f390032 100644 --- a/src/renderer/src/i18n/locales/en-us.json +++ b/src/renderer/src/i18n/locales/en-us.json @@ -546,7 +546,8 @@ "low": "Low", "medium": "Medium", "minimal": "Minimal", - "off": "Off" + "off": "Off", + "xhigh": "Extra High" }, "regular_phrases": { "add": "Add Phrase", diff --git a/src/renderer/src/i18n/locales/zh-cn.json b/src/renderer/src/i18n/locales/zh-cn.json index bb7bbd280..a205408c4 100644 --- a/src/renderer/src/i18n/locales/zh-cn.json +++ b/src/renderer/src/i18n/locales/zh-cn.json @@ -546,7 +546,8 @@ "low": "浮想", "medium": "斟酌", "minimal": "微念", - "off": "关闭" + "off": "关闭", + "xhigh": "穷究" }, "regular_phrases": { "add": "添加短语", diff --git a/src/renderer/src/i18n/locales/zh-tw.json b/src/renderer/src/i18n/locales/zh-tw.json index e4f3921ae..146c9faae 100644 --- a/src/renderer/src/i18n/locales/zh-tw.json +++ b/src/renderer/src/i18n/locales/zh-tw.json @@ -546,7 +546,8 @@ "low": "稍微思考", "medium": "正常思考", "minimal": "最少思考", - "off": "關閉" + "off": "關閉", + "xhigh": "極力思考" }, "regular_phrases": { "add": "添加短语", diff --git a/src/renderer/src/i18n/translate/de-de.json b/src/renderer/src/i18n/translate/de-de.json index d0db25d93..cbb5bc637 100644 --- a/src/renderer/src/i18n/translate/de-de.json +++ b/src/renderer/src/i18n/translate/de-de.json @@ -546,7 +546,8 @@ "low": "Spontan", "medium": "Überlegt", "minimal": "Minimal", - "off": "Aus" + "off": "Aus", + "xhigh": "Extra hoch" }, "regular_phrases": { "add": "Phrase hinzufügen", diff --git a/src/renderer/src/i18n/translate/el-gr.json b/src/renderer/src/i18n/translate/el-gr.json index 9111a2359..e26abd58f 100644 --- a/src/renderer/src/i18n/translate/el-gr.json +++ b/src/renderer/src/i18n/translate/el-gr.json @@ -546,7 +546,8 @@ "low": "Μικρό", "medium": "Μεσαίο", "minimal": "ελάχιστος", - "off": "Απενεργοποίηση" + "off": "Απενεργοποίηση", + "xhigh": "[to be translated]:Extra High" }, "regular_phrases": { "add": "Προσθήκη φράσης", diff --git a/src/renderer/src/i18n/translate/es-es.json b/src/renderer/src/i18n/translate/es-es.json index b269bb2c6..4316c8061 100644 --- a/src/renderer/src/i18n/translate/es-es.json +++ b/src/renderer/src/i18n/translate/es-es.json @@ -546,7 +546,8 @@ "low": "Corto", "medium": "Medio", "minimal": "minimal", - "off": "Apagado" + "off": "Apagado", + "xhigh": "[to be translated]:Extra High" }, "regular_phrases": { "add": "Agregar frase", diff --git a/src/renderer/src/i18n/translate/fr-fr.json b/src/renderer/src/i18n/translate/fr-fr.json index b528283d3..66b9fef86 100644 --- a/src/renderer/src/i18n/translate/fr-fr.json +++ b/src/renderer/src/i18n/translate/fr-fr.json @@ -546,7 +546,8 @@ "low": "Court", "medium": "Moyen", "minimal": "minimal", - "off": "Off" + "off": "Off", + "xhigh": "[to be translated]:Extra High" }, "regular_phrases": { "add": "Добавить фразу", diff --git a/src/renderer/src/i18n/translate/ja-jp.json b/src/renderer/src/i18n/translate/ja-jp.json index 4d6fd8496..493d69358 100644 --- a/src/renderer/src/i18n/translate/ja-jp.json +++ b/src/renderer/src/i18n/translate/ja-jp.json @@ -546,7 +546,8 @@ "low": "少しの思考", "medium": "普通の思考", "minimal": "最小限の思考", - "off": "オフ" + "off": "オフ", + "xhigh": "[to be translated]:Extra High" }, "regular_phrases": { "add": "プロンプトを追加", diff --git a/src/renderer/src/i18n/translate/pt-pt.json b/src/renderer/src/i18n/translate/pt-pt.json index 5acf21cb1..fba1a8e70 100644 --- a/src/renderer/src/i18n/translate/pt-pt.json +++ b/src/renderer/src/i18n/translate/pt-pt.json @@ -546,7 +546,8 @@ "low": "Curto", "medium": "Médio", "minimal": "mínimo", - "off": "Desligado" + "off": "Desligado", + "xhigh": "[to be translated]:Extra High" }, "regular_phrases": { "add": "Adicionar Frase", diff --git a/src/renderer/src/i18n/translate/ru-ru.json b/src/renderer/src/i18n/translate/ru-ru.json index 482b6b94d..297233640 100644 --- a/src/renderer/src/i18n/translate/ru-ru.json +++ b/src/renderer/src/i18n/translate/ru-ru.json @@ -546,7 +546,8 @@ "low": "Меньше думать", "medium": "Среднее", "minimal": "минимальный", - "off": "Выключить" + "off": "Выключить", + "xhigh": "[to be translated]:Extra High" }, "regular_phrases": { "add": "Добавить подсказку", diff --git a/src/renderer/src/pages/home/Inputbar/tools/components/ThinkingButton.tsx b/src/renderer/src/pages/home/Inputbar/tools/components/ThinkingButton.tsx index 96e7adca9..bcc395c53 100644 --- a/src/renderer/src/pages/home/Inputbar/tools/components/ThinkingButton.tsx +++ b/src/renderer/src/pages/home/Inputbar/tools/components/ThinkingButton.tsx @@ -5,7 +5,8 @@ import { MdiLightbulbOn, MdiLightbulbOn30, MdiLightbulbOn50, - MdiLightbulbOn80 + MdiLightbulbOn80, + MdiLightbulbOn90 } from '@renderer/components/Icons/SVGIcon' import { QuickPanelReservedSymbol, useQuickPanel } from '@renderer/components/QuickPanel' import { @@ -185,6 +186,9 @@ const ThinkingIcon = (props: { option?: ThinkingOption; isFixedReasoning?: boole IconComponent = MdiLightbulbOn80 break case 'high': + IconComponent = MdiLightbulbOn90 + break + case 'xhigh': IconComponent = MdiLightbulbOn break case 'auto': diff --git a/src/renderer/src/types/index.ts b/src/renderer/src/types/index.ts index 128d2be70..6e7e4e41e 100644 --- a/src/renderer/src/types/index.ts +++ b/src/renderer/src/types/index.ts @@ -88,7 +88,10 @@ const ThinkModelTypes = [ 'gpt5_1', 'gpt5_codex', 'gpt5_1_codex', + 'gpt5_1_codex_max', + 'gpt5_2', 'gpt5pro', + 'gpt52pro', 'grok', 'grok4_fast', 'gemini', @@ -122,6 +125,7 @@ export const EFFORT_RATIO: EffortRatio = { low: 0.05, medium: 0.5, high: 0.8, + xhigh: 0.9, auto: 2 } diff --git a/src/renderer/src/types/sdk.ts b/src/renderer/src/types/sdk.ts index 33ea9286a..083e45054 100644 --- a/src/renderer/src/types/sdk.ts +++ b/src/renderer/src/types/sdk.ts @@ -74,13 +74,13 @@ export type RequestOptions = Anthropic.RequestOptions | OpenAI.RequestOptions | */ type OpenAIParamsPurified = Omit - +type OpenAIReasoningEffort = NonNullable | 'auto' export type ReasoningEffortOptionalParams = { thinking?: { type: 'disabled' | 'enabled' | 'auto'; budget_tokens?: number } reasoning?: { max_tokens?: number; exclude?: boolean; effort?: string; enabled?: boolean } | OpenAI.Reasoning - reasoningEffort?: OpenAI.Chat.Completions.ChatCompletionCreateParams['reasoning_effort'] | 'none' | 'auto' + reasoningEffort?: OpenAIReasoningEffort // WARN: This field will be overwrite to undefined by aisdk if the provider is openai-compatible. Use reasoningEffort instead. - reasoning_effort?: OpenAI.Chat.Completions.ChatCompletionCreateParams['reasoning_effort'] | 'none' | 'auto' + reasoning_effort?: OpenAIReasoningEffort enable_thinking?: boolean thinking_budget?: number incremental_output?: boolean @@ -100,7 +100,7 @@ export type ReasoningEffortOptionalParams = { type: 'enabled' | 'disabled' } thinking_budget?: number - reasoning_effort?: OpenAI.Chat.Completions.ChatCompletionCreateParams['reasoning_effort'] | 'auto' + reasoning_effort?: OpenAIReasoningEffort } disable_reasoning?: boolean // Add any other potential reasoning-related keys here if they exist diff --git a/yarn.lock b/yarn.lock index 51de88161..6e933257d 100644 --- a/yarn.lock +++ b/yarn.lock @@ -102,16 +102,16 @@ __metadata: languageName: node linkType: hard -"@ai-sdk/azure@npm:^2.0.74": - version: 2.0.74 - resolution: "@ai-sdk/azure@npm:2.0.74" +"@ai-sdk/azure@npm:^2.0.87": + version: 2.0.87 + resolution: "@ai-sdk/azure@npm:2.0.87" dependencies: - "@ai-sdk/openai": "npm:2.0.72" + "@ai-sdk/openai": "npm:2.0.85" "@ai-sdk/provider": "npm:2.0.0" - "@ai-sdk/provider-utils": "npm:3.0.17" + "@ai-sdk/provider-utils": "npm:3.0.19" peerDependencies: zod: ^3.25.76 || ^4.1.8 - checksum: 10c0/dccd1959ef43034a0559cdc862af7f351c0a997a56dbeb68b1c844f67d3ff7920f43890e1d18546600eeaac1c54f0c94943b6ce0b43ba4d44ddc3a829b8a71dd + checksum: 10c0/77b0c74966144c3ca715e8357bd36502bd7055edb74a4005d9537cf9175cd9b33df32164a5e3f1925b1d311ed1a4eaf5b8fad6abdb81e1b6c14ba5ea78479f34 languageName: node linkType: hard @@ -266,27 +266,27 @@ __metadata: languageName: node linkType: hard -"@ai-sdk/openai@npm:2.0.72": - version: 2.0.72 - resolution: "@ai-sdk/openai@npm:2.0.72" +"@ai-sdk/openai@npm:2.0.85": + version: 2.0.85 + resolution: "@ai-sdk/openai@npm:2.0.85" dependencies: "@ai-sdk/provider": "npm:2.0.0" - "@ai-sdk/provider-utils": "npm:3.0.17" + "@ai-sdk/provider-utils": "npm:3.0.19" peerDependencies: zod: ^3.25.76 || ^4.1.8 - checksum: 10c0/64fb8b7b2627b16e1fdcb3a7dd8d26f34d054b3f7bba5de6ef579f1c12c91246d0682caa36c5dae5ed2f29b462cc6013a38d9e80234819030fbf1730e7f8da50 + checksum: 10c0/c8e50de443d939d7a5d7444e1a2ff35357d05dd3add0fca8226b578b199f4ca53c8a9e22c376e88006466b86e39c88d7ceca790a6a866300e3964ad24756d580 languageName: node linkType: hard -"@ai-sdk/openai@patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch": - version: 2.0.72 - resolution: "@ai-sdk/openai@patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch::version=2.0.72&hash=126b76" +"@ai-sdk/openai@patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch": + version: 2.0.85 + resolution: "@ai-sdk/openai@patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch::version=2.0.85&hash=81ee54" dependencies: "@ai-sdk/provider": "npm:2.0.0" - "@ai-sdk/provider-utils": "npm:3.0.17" + "@ai-sdk/provider-utils": "npm:3.0.19" peerDependencies: zod: ^3.25.76 || ^4.1.8 - checksum: 10c0/fec21ab02aff999b487abdd02c32d526580d47cdf83dd74b02f8faf1423b63ab7da3c374b7a98a15bb94fdcb6deb2851381ce0f52b92c9c030dee06ff2dcf71d + checksum: 10c0/8fd0e4e63840b0ceb3fbf61b567e3318edfd5c3177b502076fb04b340ef8ea0a6b4cb95e4c6f7634b3bd8661ef0b69828a22b5434542c8e7d3488bff291e99c1 languageName: node linkType: hard @@ -328,6 +328,19 @@ __metadata: languageName: node linkType: hard +"@ai-sdk/provider-utils@npm:3.0.19": + version: 3.0.19 + resolution: "@ai-sdk/provider-utils@npm:3.0.19" + dependencies: + "@ai-sdk/provider": "npm:2.0.0" + "@standard-schema/spec": "npm:^1.0.0" + eventsource-parser: "npm:^3.0.6" + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + checksum: 10c0/e4decb19264067fa1b1642e07d515d25d1509a1a9143f59ccc051e3ca413c9fb1d708e1052a70eaf329ca39ddf6152520cd833dbf8c95d9bf02bbeffae8ea363 + languageName: node + linkType: hard + "@ai-sdk/provider@npm:2.0.0, @ai-sdk/provider@npm:^2.0.0": version: 2.0.0 resolution: "@ai-sdk/provider@npm:2.0.0" @@ -1853,7 +1866,7 @@ __metadata: resolution: "@cherrystudio/ai-core@workspace:packages/aiCore" dependencies: "@ai-sdk/anthropic": "npm:^2.0.49" - "@ai-sdk/azure": "npm:^2.0.74" + "@ai-sdk/azure": "npm:^2.0.87" "@ai-sdk/deepseek": "npm:^1.0.31" "@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch" "@ai-sdk/provider": "npm:^2.0.0" @@ -2110,9 +2123,9 @@ __metadata: languageName: unknown linkType: soft -"@cherrystudio/openai@npm:^6.9.0": - version: 6.9.0 - resolution: "@cherrystudio/openai@npm:6.9.0" +"@cherrystudio/openai@npm:^6.12.0": + version: 6.12.0 + resolution: "@cherrystudio/openai@npm:6.12.0" peerDependencies: ws: ^8.18.0 zod: ^3.25 || ^4.0 @@ -2123,7 +2136,7 @@ __metadata: optional: true bin: openai: bin/cli - checksum: 10c0/9c51ef33c5b9d08041a115e3d6a8158412a379998a0eae186923d5bdcc808b634c1fef4471a1d499bb8c624b04c075167bc90a1a60a805005c0657ecebbb58d0 + checksum: 10c0/6831a603141b05508e11ea365279b57311424f9db578028d72d9bae8473e09d5fe12b1fbc0b471cabc0b3adb67339d845b6b8f6f8be4cded0e98e5a6ea25efc8 languageName: node linkType: hard @@ -10042,7 +10055,7 @@ __metadata: "@ai-sdk/google-vertex": "npm:^3.0.79" "@ai-sdk/huggingface": "npm:^0.0.10" "@ai-sdk/mistral": "npm:^2.0.24" - "@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch" + "@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch" "@ai-sdk/perplexity": "npm:^2.0.20" "@ai-sdk/test-server": "npm:^0.0.1" "@ant-design/v5-patch-for-react-19": "npm:^1.0.3" @@ -10067,7 +10080,7 @@ __metadata: "@cherrystudio/embedjs-ollama": "npm:^0.1.31" "@cherrystudio/embedjs-openai": "npm:^0.1.31" "@cherrystudio/extension-table-plus": "workspace:^" - "@cherrystudio/openai": "npm:^6.9.0" + "@cherrystudio/openai": "npm:^6.12.0" "@dnd-kit/core": "npm:^6.3.1" "@dnd-kit/modifiers": "npm:^9.0.0" "@dnd-kit/sortable": "npm:^10.0.0"