mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2025-12-19 06:30:10 +08:00
feat: support gpt 5.2 series (#11873)
Some checks failed
Auto I18N Weekly / Auto I18N (push) Has been cancelled
Some checks failed
Auto I18N Weekly / Auto I18N (push) Has been cancelled
* feat: support gpt 5.2 * feat: support param when set to 'none' * chore version & simply type * fix: comment * fix: typecheck * replace placeholder * simplify func * feat: add gpt-5.1-codex-max
This commit is contained in:
parent
66feee714b
commit
dc0c47c64d
@ -1,8 +1,8 @@
|
|||||||
diff --git a/dist/index.js b/dist/index.js
|
diff --git a/dist/index.js b/dist/index.js
|
||||||
index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a70ea2b5a2 100644
|
index 130094d194ea1e8e7d3027d07d82465741192124..4d13dcee8c962ca9ee8f1c3d748f8ffe6a3cfb47 100644
|
||||||
--- a/dist/index.js
|
--- a/dist/index.js
|
||||||
+++ b/dist/index.js
|
+++ b/dist/index.js
|
||||||
@@ -274,6 +274,7 @@ var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)(
|
@@ -290,6 +290,7 @@ var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)(
|
||||||
message: import_v42.z.object({
|
message: import_v42.z.object({
|
||||||
role: import_v42.z.literal("assistant").nullish(),
|
role: import_v42.z.literal("assistant").nullish(),
|
||||||
content: import_v42.z.string().nullish(),
|
content: import_v42.z.string().nullish(),
|
||||||
@ -10,7 +10,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7
|
|||||||
tool_calls: import_v42.z.array(
|
tool_calls: import_v42.z.array(
|
||||||
import_v42.z.object({
|
import_v42.z.object({
|
||||||
id: import_v42.z.string().nullish(),
|
id: import_v42.z.string().nullish(),
|
||||||
@@ -340,6 +341,7 @@ var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)(
|
@@ -356,6 +357,7 @@ var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)(
|
||||||
delta: import_v42.z.object({
|
delta: import_v42.z.object({
|
||||||
role: import_v42.z.enum(["assistant"]).nullish(),
|
role: import_v42.z.enum(["assistant"]).nullish(),
|
||||||
content: import_v42.z.string().nullish(),
|
content: import_v42.z.string().nullish(),
|
||||||
@ -18,7 +18,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7
|
|||||||
tool_calls: import_v42.z.array(
|
tool_calls: import_v42.z.array(
|
||||||
import_v42.z.object({
|
import_v42.z.object({
|
||||||
index: import_v42.z.number(),
|
index: import_v42.z.number(),
|
||||||
@@ -795,6 +797,13 @@ var OpenAIChatLanguageModel = class {
|
@@ -814,6 +816,13 @@ var OpenAIChatLanguageModel = class {
|
||||||
if (text != null && text.length > 0) {
|
if (text != null && text.length > 0) {
|
||||||
content.push({ type: "text", text });
|
content.push({ type: "text", text });
|
||||||
}
|
}
|
||||||
@ -32,7 +32,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7
|
|||||||
for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
|
for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
|
||||||
content.push({
|
content.push({
|
||||||
type: "tool-call",
|
type: "tool-call",
|
||||||
@@ -876,6 +885,7 @@ var OpenAIChatLanguageModel = class {
|
@@ -895,6 +904,7 @@ var OpenAIChatLanguageModel = class {
|
||||||
};
|
};
|
||||||
let metadataExtracted = false;
|
let metadataExtracted = false;
|
||||||
let isActiveText = false;
|
let isActiveText = false;
|
||||||
@ -40,7 +40,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7
|
|||||||
const providerMetadata = { openai: {} };
|
const providerMetadata = { openai: {} };
|
||||||
return {
|
return {
|
||||||
stream: response.pipeThrough(
|
stream: response.pipeThrough(
|
||||||
@@ -933,6 +943,21 @@ var OpenAIChatLanguageModel = class {
|
@@ -952,6 +962,21 @@ var OpenAIChatLanguageModel = class {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
const delta = choice.delta;
|
const delta = choice.delta;
|
||||||
@ -62,7 +62,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7
|
|||||||
if (delta.content != null) {
|
if (delta.content != null) {
|
||||||
if (!isActiveText) {
|
if (!isActiveText) {
|
||||||
controller.enqueue({ type: "text-start", id: "0" });
|
controller.enqueue({ type: "text-start", id: "0" });
|
||||||
@@ -1045,6 +1070,9 @@ var OpenAIChatLanguageModel = class {
|
@@ -1064,6 +1089,9 @@ var OpenAIChatLanguageModel = class {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
flush(controller) {
|
flush(controller) {
|
||||||
@ -118,7 +118,7 @@
|
|||||||
"@ai-sdk/google-vertex": "^3.0.79",
|
"@ai-sdk/google-vertex": "^3.0.79",
|
||||||
"@ai-sdk/huggingface": "^0.0.10",
|
"@ai-sdk/huggingface": "^0.0.10",
|
||||||
"@ai-sdk/mistral": "^2.0.24",
|
"@ai-sdk/mistral": "^2.0.24",
|
||||||
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch",
|
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch",
|
||||||
"@ai-sdk/perplexity": "^2.0.20",
|
"@ai-sdk/perplexity": "^2.0.20",
|
||||||
"@ai-sdk/test-server": "^0.0.1",
|
"@ai-sdk/test-server": "^0.0.1",
|
||||||
"@ant-design/v5-patch-for-react-19": "^1.0.3",
|
"@ant-design/v5-patch-for-react-19": "^1.0.3",
|
||||||
@ -142,7 +142,7 @@
|
|||||||
"@cherrystudio/embedjs-ollama": "^0.1.31",
|
"@cherrystudio/embedjs-ollama": "^0.1.31",
|
||||||
"@cherrystudio/embedjs-openai": "^0.1.31",
|
"@cherrystudio/embedjs-openai": "^0.1.31",
|
||||||
"@cherrystudio/extension-table-plus": "workspace:^",
|
"@cherrystudio/extension-table-plus": "workspace:^",
|
||||||
"@cherrystudio/openai": "^6.9.0",
|
"@cherrystudio/openai": "^6.12.0",
|
||||||
"@dnd-kit/core": "^6.3.1",
|
"@dnd-kit/core": "^6.3.1",
|
||||||
"@dnd-kit/modifiers": "^9.0.0",
|
"@dnd-kit/modifiers": "^9.0.0",
|
||||||
"@dnd-kit/sortable": "^10.0.0",
|
"@dnd-kit/sortable": "^10.0.0",
|
||||||
@ -414,7 +414,7 @@
|
|||||||
"@langchain/openai@npm:>=0.1.0 <0.6.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
"@langchain/openai@npm:>=0.1.0 <0.6.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||||
"@langchain/openai@npm:^0.3.16": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
"@langchain/openai@npm:^0.3.16": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||||
"@langchain/openai@npm:>=0.2.0 <0.7.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
"@langchain/openai@npm:>=0.2.0 <0.7.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||||
"@ai-sdk/openai@npm:^2.0.42": "patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch",
|
"@ai-sdk/openai@npm:^2.0.42": "patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch",
|
||||||
"@ai-sdk/google@npm:^2.0.40": "patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch",
|
"@ai-sdk/google@npm:^2.0.40": "patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch",
|
||||||
"@ai-sdk/openai-compatible@npm:^1.0.27": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch"
|
"@ai-sdk/openai-compatible@npm:^1.0.27": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch"
|
||||||
},
|
},
|
||||||
|
|||||||
@ -40,7 +40,7 @@
|
|||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@ai-sdk/anthropic": "^2.0.49",
|
"@ai-sdk/anthropic": "^2.0.49",
|
||||||
"@ai-sdk/azure": "^2.0.74",
|
"@ai-sdk/azure": "^2.0.87",
|
||||||
"@ai-sdk/deepseek": "^1.0.31",
|
"@ai-sdk/deepseek": "^1.0.31",
|
||||||
"@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch",
|
"@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch",
|
||||||
"@ai-sdk/provider": "^2.0.0",
|
"@ai-sdk/provider": "^2.0.0",
|
||||||
|
|||||||
@ -28,13 +28,14 @@ import { getAnthropicThinkingBudget } from '../utils/reasoning'
|
|||||||
* - Disabled for models that do not support temperature.
|
* - Disabled for models that do not support temperature.
|
||||||
* - Disabled for Claude 4.5 reasoning models when TopP is enabled and temperature is disabled.
|
* - Disabled for Claude 4.5 reasoning models when TopP is enabled and temperature is disabled.
|
||||||
* Otherwise, returns the temperature value if the assistant has temperature enabled.
|
* Otherwise, returns the temperature value if the assistant has temperature enabled.
|
||||||
|
|
||||||
*/
|
*/
|
||||||
export function getTemperature(assistant: Assistant, model: Model): number | undefined {
|
export function getTemperature(assistant: Assistant, model: Model): number | undefined {
|
||||||
if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) {
|
if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) {
|
||||||
return undefined
|
return undefined
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!isSupportTemperatureModel(model)) {
|
if (!isSupportTemperatureModel(model, assistant)) {
|
||||||
return undefined
|
return undefined
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -46,6 +47,10 @@ export function getTemperature(assistant: Assistant, model: Model): number | und
|
|||||||
return undefined
|
return undefined
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return getTemperatureValue(assistant, model)
|
||||||
|
}
|
||||||
|
|
||||||
|
function getTemperatureValue(assistant: Assistant, model: Model): number | undefined {
|
||||||
const assistantSettings = getAssistantSettings(assistant)
|
const assistantSettings = getAssistantSettings(assistant)
|
||||||
let temperature = assistantSettings?.temperature
|
let temperature = assistantSettings?.temperature
|
||||||
if (temperature && isMaxTemperatureOneModel(model)) {
|
if (temperature && isMaxTemperatureOneModel(model)) {
|
||||||
@ -68,13 +73,17 @@ export function getTopP(assistant: Assistant, model: Model): number | undefined
|
|||||||
if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) {
|
if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) {
|
||||||
return undefined
|
return undefined
|
||||||
}
|
}
|
||||||
if (!isSupportTopPModel(model)) {
|
if (!isSupportTopPModel(model, assistant)) {
|
||||||
return undefined
|
return undefined
|
||||||
}
|
}
|
||||||
if (isTemperatureTopPMutuallyExclusiveModel(model) && assistant.settings?.enableTemperature) {
|
if (isTemperatureTopPMutuallyExclusiveModel(model) && assistant.settings?.enableTemperature) {
|
||||||
return undefined
|
return undefined
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return getTopPValue(assistant)
|
||||||
|
}
|
||||||
|
|
||||||
|
function getTopPValue(assistant: Assistant): number | undefined {
|
||||||
const assistantSettings = getAssistantSettings(assistant)
|
const assistantSettings = getAssistantSettings(assistant)
|
||||||
// FIXME: assistant.settings.enableTopP should be always a boolean value.
|
// FIXME: assistant.settings.enableTopP should be always a boolean value.
|
||||||
const enableTopP = assistantSettings.enableTopP ?? DEFAULT_ASSISTANT_SETTINGS.enableTopP
|
const enableTopP = assistantSettings.enableTopP ?? DEFAULT_ASSISTANT_SETTINGS.enableTopP
|
||||||
|
|||||||
@ -13,11 +13,11 @@ import {
|
|||||||
isDoubaoSeedAfter251015,
|
isDoubaoSeedAfter251015,
|
||||||
isDoubaoThinkingAutoModel,
|
isDoubaoThinkingAutoModel,
|
||||||
isGemini3ThinkingTokenModel,
|
isGemini3ThinkingTokenModel,
|
||||||
isGPT5SeriesModel,
|
|
||||||
isGPT51SeriesModel,
|
isGPT51SeriesModel,
|
||||||
isGrok4FastReasoningModel,
|
isGrok4FastReasoningModel,
|
||||||
isOpenAIDeepResearchModel,
|
isOpenAIDeepResearchModel,
|
||||||
isOpenAIModel,
|
isOpenAIModel,
|
||||||
|
isOpenAIReasoningModel,
|
||||||
isQwenAlwaysThinkModel,
|
isQwenAlwaysThinkModel,
|
||||||
isQwenReasoningModel,
|
isQwenReasoningModel,
|
||||||
isReasoningModel,
|
isReasoningModel,
|
||||||
@ -134,8 +134,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
|||||||
// https://creator.poe.com/docs/external-applications/openai-compatible-api#additional-considerations
|
// https://creator.poe.com/docs/external-applications/openai-compatible-api#additional-considerations
|
||||||
// Poe provider - supports custom bot parameters via extra_body
|
// Poe provider - supports custom bot parameters via extra_body
|
||||||
if (provider.id === SystemProviderIds.poe) {
|
if (provider.id === SystemProviderIds.poe) {
|
||||||
// GPT-5 series models use reasoning_effort parameter in extra_body
|
if (isOpenAIReasoningModel(model)) {
|
||||||
if (isGPT5SeriesModel(model) || isGPT51SeriesModel(model)) {
|
|
||||||
return {
|
return {
|
||||||
extra_body: {
|
extra_body: {
|
||||||
reasoning_effort: reasoningEffort === 'auto' ? 'medium' : reasoningEffort
|
reasoning_effort: reasoningEffort === 'auto' ? 'medium' : reasoningEffort
|
||||||
@ -635,6 +634,8 @@ export function getXAIReasoningParams(assistant: Assistant, model: Model): Pick<
|
|||||||
case 'low':
|
case 'low':
|
||||||
case 'high':
|
case 'high':
|
||||||
return { reasoningEffort }
|
return { reasoningEffort }
|
||||||
|
case 'xhigh':
|
||||||
|
return { reasoningEffort: 'high' }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -35,6 +35,16 @@ export const isGPT5ProModel = (model: Model) => {
|
|||||||
return modelId.includes('gpt-5-pro')
|
return modelId.includes('gpt-5-pro')
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export const isGPT52ProModel = (model: Model) => {
|
||||||
|
const modelId = getLowerBaseModelName(model.id)
|
||||||
|
return modelId.includes('gpt-5.2-pro')
|
||||||
|
}
|
||||||
|
|
||||||
|
export const isGPT51CodexMaxModel = (model: Model) => {
|
||||||
|
const modelId = getLowerBaseModelName(model.id)
|
||||||
|
return modelId.includes('gpt-5.1-codex-max')
|
||||||
|
}
|
||||||
|
|
||||||
export const isOpenAIOpenWeightModel = (model: Model) => {
|
export const isOpenAIOpenWeightModel = (model: Model) => {
|
||||||
const modelId = getLowerBaseModelName(model.id)
|
const modelId = getLowerBaseModelName(model.id)
|
||||||
return modelId.includes('gpt-oss')
|
return modelId.includes('gpt-oss')
|
||||||
@ -42,7 +52,7 @@ export const isOpenAIOpenWeightModel = (model: Model) => {
|
|||||||
|
|
||||||
export const isGPT5SeriesModel = (model: Model) => {
|
export const isGPT5SeriesModel = (model: Model) => {
|
||||||
const modelId = getLowerBaseModelName(model.id)
|
const modelId = getLowerBaseModelName(model.id)
|
||||||
return modelId.includes('gpt-5') && !modelId.includes('gpt-5.1')
|
return modelId.includes('gpt-5') && !modelId.includes('gpt-5.1') && !modelId.includes('gpt-5.2')
|
||||||
}
|
}
|
||||||
|
|
||||||
export const isGPT5SeriesReasoningModel = (model: Model) => {
|
export const isGPT5SeriesReasoningModel = (model: Model) => {
|
||||||
@ -55,6 +65,11 @@ export const isGPT51SeriesModel = (model: Model) => {
|
|||||||
return modelId.includes('gpt-5.1')
|
return modelId.includes('gpt-5.1')
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export const isGPT52SeriesModel = (model: Model) => {
|
||||||
|
const modelId = getLowerBaseModelName(model.id)
|
||||||
|
return modelId.includes('gpt-5.2')
|
||||||
|
}
|
||||||
|
|
||||||
export function isSupportVerbosityModel(model: Model): boolean {
|
export function isSupportVerbosityModel(model: Model): boolean {
|
||||||
const modelId = getLowerBaseModelName(model.id)
|
const modelId = getLowerBaseModelName(model.id)
|
||||||
return (isGPT5SeriesModel(model) || isGPT51SeriesModel(model)) && !modelId.includes('chat')
|
return (isGPT5SeriesModel(model) || isGPT51SeriesModel(model)) && !modelId.includes('chat')
|
||||||
@ -86,7 +101,7 @@ export function isSupportedReasoningEffortOpenAIModel(model: Model): boolean {
|
|||||||
modelId.includes('o3') ||
|
modelId.includes('o3') ||
|
||||||
modelId.includes('o4') ||
|
modelId.includes('o4') ||
|
||||||
modelId.includes('gpt-oss') ||
|
modelId.includes('gpt-oss') ||
|
||||||
((isGPT5SeriesModel(model) || isGPT51SeriesModel(model)) && !modelId.includes('chat'))
|
((isGPT5SeriesModel(model) || isGPT51SeriesModel(model) || isGPT52SeriesModel(model)) && !modelId.includes('chat'))
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -11,7 +11,10 @@ import { isEmbeddingModel, isRerankModel } from './embedding'
|
|||||||
import {
|
import {
|
||||||
isGPT5ProModel,
|
isGPT5ProModel,
|
||||||
isGPT5SeriesModel,
|
isGPT5SeriesModel,
|
||||||
|
isGPT51CodexMaxModel,
|
||||||
isGPT51SeriesModel,
|
isGPT51SeriesModel,
|
||||||
|
isGPT52ProModel,
|
||||||
|
isGPT52SeriesModel,
|
||||||
isOpenAIDeepResearchModel,
|
isOpenAIDeepResearchModel,
|
||||||
isOpenAIReasoningModel,
|
isOpenAIReasoningModel,
|
||||||
isSupportedReasoningEffortOpenAIModel
|
isSupportedReasoningEffortOpenAIModel
|
||||||
@ -33,7 +36,10 @@ export const MODEL_SUPPORTED_REASONING_EFFORT: ReasoningEffortConfig = {
|
|||||||
gpt5_codex: ['low', 'medium', 'high'] as const,
|
gpt5_codex: ['low', 'medium', 'high'] as const,
|
||||||
gpt5_1: ['none', 'low', 'medium', 'high'] as const,
|
gpt5_1: ['none', 'low', 'medium', 'high'] as const,
|
||||||
gpt5_1_codex: ['none', 'medium', 'high'] as const,
|
gpt5_1_codex: ['none', 'medium', 'high'] as const,
|
||||||
|
gpt5_1_codex_max: ['none', 'medium', 'high', 'xhigh'] as const,
|
||||||
|
gpt5_2: ['none', 'low', 'medium', 'high', 'xhigh'] as const,
|
||||||
gpt5pro: ['high'] as const,
|
gpt5pro: ['high'] as const,
|
||||||
|
gpt52pro: ['medium', 'high', 'xhigh'] as const,
|
||||||
grok: ['low', 'high'] as const,
|
grok: ['low', 'high'] as const,
|
||||||
grok4_fast: ['auto'] as const,
|
grok4_fast: ['auto'] as const,
|
||||||
gemini: ['low', 'medium', 'high', 'auto'] as const,
|
gemini: ['low', 'medium', 'high', 'auto'] as const,
|
||||||
@ -60,6 +66,9 @@ export const MODEL_SUPPORTED_OPTIONS: ThinkingOptionConfig = {
|
|||||||
gpt5_codex: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_codex,
|
gpt5_codex: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_codex,
|
||||||
gpt5_1: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1,
|
gpt5_1: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1,
|
||||||
gpt5_1_codex: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1_codex,
|
gpt5_1_codex: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1_codex,
|
||||||
|
gpt5_2: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_2,
|
||||||
|
gpt5_1_codex_max: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1_codex_max,
|
||||||
|
gpt52pro: MODEL_SUPPORTED_REASONING_EFFORT.gpt52pro,
|
||||||
grok: MODEL_SUPPORTED_REASONING_EFFORT.grok,
|
grok: MODEL_SUPPORTED_REASONING_EFFORT.grok,
|
||||||
grok4_fast: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.grok4_fast] as const,
|
grok4_fast: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.grok4_fast] as const,
|
||||||
gemini: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini] as const,
|
gemini: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini] as const,
|
||||||
@ -84,6 +93,7 @@ const withModelIdAndNameAsId = <T>(model: Model, fn: (model: Model) => T): { idR
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: add ut
|
||||||
const _getThinkModelType = (model: Model): ThinkingModelType => {
|
const _getThinkModelType = (model: Model): ThinkingModelType => {
|
||||||
let thinkingModelType: ThinkingModelType = 'default'
|
let thinkingModelType: ThinkingModelType = 'default'
|
||||||
const modelId = getLowerBaseModelName(model.id)
|
const modelId = getLowerBaseModelName(model.id)
|
||||||
@ -93,9 +103,17 @@ const _getThinkModelType = (model: Model): ThinkingModelType => {
|
|||||||
if (isGPT51SeriesModel(model)) {
|
if (isGPT51SeriesModel(model)) {
|
||||||
if (modelId.includes('codex')) {
|
if (modelId.includes('codex')) {
|
||||||
thinkingModelType = 'gpt5_1_codex'
|
thinkingModelType = 'gpt5_1_codex'
|
||||||
|
if (isGPT51CodexMaxModel(model)) {
|
||||||
|
thinkingModelType = 'gpt5_1_codex_max'
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
thinkingModelType = 'gpt5_1'
|
thinkingModelType = 'gpt5_1'
|
||||||
}
|
}
|
||||||
|
} else if (isGPT52SeriesModel(model)) {
|
||||||
|
thinkingModelType = 'gpt5_2'
|
||||||
|
if (isGPT52ProModel(model)) {
|
||||||
|
thinkingModelType = 'gpt52pro'
|
||||||
|
}
|
||||||
} else if (isGPT5SeriesModel(model)) {
|
} else if (isGPT5SeriesModel(model)) {
|
||||||
if (modelId.includes('codex')) {
|
if (modelId.includes('codex')) {
|
||||||
thinkingModelType = 'gpt5_codex'
|
thinkingModelType = 'gpt5_codex'
|
||||||
|
|||||||
@ -1,5 +1,6 @@
|
|||||||
import type OpenAI from '@cherrystudio/openai'
|
import type OpenAI from '@cherrystudio/openai'
|
||||||
import { isEmbeddingModel, isRerankModel } from '@renderer/config/models/embedding'
|
import { isEmbeddingModel, isRerankModel } from '@renderer/config/models/embedding'
|
||||||
|
import type { Assistant } from '@renderer/types'
|
||||||
import { type Model, SystemProviderIds } from '@renderer/types'
|
import { type Model, SystemProviderIds } from '@renderer/types'
|
||||||
import type { OpenAIVerbosity, ValidOpenAIVerbosity } from '@renderer/types/aiCoreTypes'
|
import type { OpenAIVerbosity, ValidOpenAIVerbosity } from '@renderer/types/aiCoreTypes'
|
||||||
import { getLowerBaseModelName } from '@renderer/utils'
|
import { getLowerBaseModelName } from '@renderer/utils'
|
||||||
@ -8,6 +9,7 @@ import {
|
|||||||
isGPT5ProModel,
|
isGPT5ProModel,
|
||||||
isGPT5SeriesModel,
|
isGPT5SeriesModel,
|
||||||
isGPT51SeriesModel,
|
isGPT51SeriesModel,
|
||||||
|
isGPT52SeriesModel,
|
||||||
isOpenAIChatCompletionOnlyModel,
|
isOpenAIChatCompletionOnlyModel,
|
||||||
isOpenAIOpenWeightModel,
|
isOpenAIOpenWeightModel,
|
||||||
isOpenAIReasoningModel,
|
isOpenAIReasoningModel,
|
||||||
@ -48,13 +50,16 @@ export function isSupportedModel(model: OpenAI.Models.Model): boolean {
|
|||||||
* @param model - The model to check
|
* @param model - The model to check
|
||||||
* @returns true if the model supports temperature parameter
|
* @returns true if the model supports temperature parameter
|
||||||
*/
|
*/
|
||||||
export function isSupportTemperatureModel(model: Model | undefined | null): boolean {
|
export function isSupportTemperatureModel(model: Model | undefined | null, assistant?: Assistant): boolean {
|
||||||
if (!model) {
|
if (!model) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// OpenAI reasoning models (except open weight) don't support temperature
|
// OpenAI reasoning models (except open weight) don't support temperature
|
||||||
if (isOpenAIReasoningModel(model) && !isOpenAIOpenWeightModel(model)) {
|
if (isOpenAIReasoningModel(model) && !isOpenAIOpenWeightModel(model)) {
|
||||||
|
if (isGPT52SeriesModel(model) && assistant?.settings?.reasoning_effort === 'none') {
|
||||||
|
return true
|
||||||
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -76,13 +81,16 @@ export function isSupportTemperatureModel(model: Model | undefined | null): bool
|
|||||||
* @param model - The model to check
|
* @param model - The model to check
|
||||||
* @returns true if the model supports top_p parameter
|
* @returns true if the model supports top_p parameter
|
||||||
*/
|
*/
|
||||||
export function isSupportTopPModel(model: Model | undefined | null): boolean {
|
export function isSupportTopPModel(model: Model | undefined | null, assistant?: Assistant): boolean {
|
||||||
if (!model) {
|
if (!model) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// OpenAI reasoning models (except open weight) don't support top_p
|
// OpenAI reasoning models (except open weight) don't support top_p
|
||||||
if (isOpenAIReasoningModel(model) && !isOpenAIOpenWeightModel(model)) {
|
if (isOpenAIReasoningModel(model) && !isOpenAIOpenWeightModel(model)) {
|
||||||
|
if (isGPT52SeriesModel(model) && assistant?.settings?.reasoning_effort === 'none') {
|
||||||
|
return true
|
||||||
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -316,7 +316,8 @@ const reasoningEffortOptionsKeyMap: Record<ThinkingOption, string> = {
|
|||||||
high: 'assistants.settings.reasoning_effort.high',
|
high: 'assistants.settings.reasoning_effort.high',
|
||||||
low: 'assistants.settings.reasoning_effort.low',
|
low: 'assistants.settings.reasoning_effort.low',
|
||||||
medium: 'assistants.settings.reasoning_effort.medium',
|
medium: 'assistants.settings.reasoning_effort.medium',
|
||||||
auto: 'assistants.settings.reasoning_effort.default'
|
auto: 'assistants.settings.reasoning_effort.default',
|
||||||
|
xhigh: 'assistants.settings.reasoning_effort.xhigh'
|
||||||
} as const
|
} as const
|
||||||
|
|
||||||
export const getReasoningEffortOptionsLabel = (key: string): string => {
|
export const getReasoningEffortOptionsLabel = (key: string): string => {
|
||||||
|
|||||||
@ -546,7 +546,8 @@
|
|||||||
"low": "Low",
|
"low": "Low",
|
||||||
"medium": "Medium",
|
"medium": "Medium",
|
||||||
"minimal": "Minimal",
|
"minimal": "Minimal",
|
||||||
"off": "Off"
|
"off": "Off",
|
||||||
|
"xhigh": "Extra High"
|
||||||
},
|
},
|
||||||
"regular_phrases": {
|
"regular_phrases": {
|
||||||
"add": "Add Phrase",
|
"add": "Add Phrase",
|
||||||
|
|||||||
@ -546,7 +546,8 @@
|
|||||||
"low": "浮想",
|
"low": "浮想",
|
||||||
"medium": "斟酌",
|
"medium": "斟酌",
|
||||||
"minimal": "微念",
|
"minimal": "微念",
|
||||||
"off": "关闭"
|
"off": "关闭",
|
||||||
|
"xhigh": "穷究"
|
||||||
},
|
},
|
||||||
"regular_phrases": {
|
"regular_phrases": {
|
||||||
"add": "添加短语",
|
"add": "添加短语",
|
||||||
|
|||||||
@ -546,7 +546,8 @@
|
|||||||
"low": "稍微思考",
|
"low": "稍微思考",
|
||||||
"medium": "正常思考",
|
"medium": "正常思考",
|
||||||
"minimal": "最少思考",
|
"minimal": "最少思考",
|
||||||
"off": "關閉"
|
"off": "關閉",
|
||||||
|
"xhigh": "極力思考"
|
||||||
},
|
},
|
||||||
"regular_phrases": {
|
"regular_phrases": {
|
||||||
"add": "添加短语",
|
"add": "添加短语",
|
||||||
|
|||||||
@ -546,7 +546,8 @@
|
|||||||
"low": "Spontan",
|
"low": "Spontan",
|
||||||
"medium": "Überlegt",
|
"medium": "Überlegt",
|
||||||
"minimal": "Minimal",
|
"minimal": "Minimal",
|
||||||
"off": "Aus"
|
"off": "Aus",
|
||||||
|
"xhigh": "Extra hoch"
|
||||||
},
|
},
|
||||||
"regular_phrases": {
|
"regular_phrases": {
|
||||||
"add": "Phrase hinzufügen",
|
"add": "Phrase hinzufügen",
|
||||||
|
|||||||
@ -546,7 +546,8 @@
|
|||||||
"low": "Μικρό",
|
"low": "Μικρό",
|
||||||
"medium": "Μεσαίο",
|
"medium": "Μεσαίο",
|
||||||
"minimal": "ελάχιστος",
|
"minimal": "ελάχιστος",
|
||||||
"off": "Απενεργοποίηση"
|
"off": "Απενεργοποίηση",
|
||||||
|
"xhigh": "[to be translated]:Extra High"
|
||||||
},
|
},
|
||||||
"regular_phrases": {
|
"regular_phrases": {
|
||||||
"add": "Προσθήκη φράσης",
|
"add": "Προσθήκη φράσης",
|
||||||
|
|||||||
@ -546,7 +546,8 @@
|
|||||||
"low": "Corto",
|
"low": "Corto",
|
||||||
"medium": "Medio",
|
"medium": "Medio",
|
||||||
"minimal": "minimal",
|
"minimal": "minimal",
|
||||||
"off": "Apagado"
|
"off": "Apagado",
|
||||||
|
"xhigh": "[to be translated]:Extra High"
|
||||||
},
|
},
|
||||||
"regular_phrases": {
|
"regular_phrases": {
|
||||||
"add": "Agregar frase",
|
"add": "Agregar frase",
|
||||||
|
|||||||
@ -546,7 +546,8 @@
|
|||||||
"low": "Court",
|
"low": "Court",
|
||||||
"medium": "Moyen",
|
"medium": "Moyen",
|
||||||
"minimal": "minimal",
|
"minimal": "minimal",
|
||||||
"off": "Off"
|
"off": "Off",
|
||||||
|
"xhigh": "[to be translated]:Extra High"
|
||||||
},
|
},
|
||||||
"regular_phrases": {
|
"regular_phrases": {
|
||||||
"add": "Добавить фразу",
|
"add": "Добавить фразу",
|
||||||
|
|||||||
@ -546,7 +546,8 @@
|
|||||||
"low": "少しの思考",
|
"low": "少しの思考",
|
||||||
"medium": "普通の思考",
|
"medium": "普通の思考",
|
||||||
"minimal": "最小限の思考",
|
"minimal": "最小限の思考",
|
||||||
"off": "オフ"
|
"off": "オフ",
|
||||||
|
"xhigh": "[to be translated]:Extra High"
|
||||||
},
|
},
|
||||||
"regular_phrases": {
|
"regular_phrases": {
|
||||||
"add": "プロンプトを追加",
|
"add": "プロンプトを追加",
|
||||||
|
|||||||
@ -546,7 +546,8 @@
|
|||||||
"low": "Curto",
|
"low": "Curto",
|
||||||
"medium": "Médio",
|
"medium": "Médio",
|
||||||
"minimal": "mínimo",
|
"minimal": "mínimo",
|
||||||
"off": "Desligado"
|
"off": "Desligado",
|
||||||
|
"xhigh": "[to be translated]:Extra High"
|
||||||
},
|
},
|
||||||
"regular_phrases": {
|
"regular_phrases": {
|
||||||
"add": "Adicionar Frase",
|
"add": "Adicionar Frase",
|
||||||
|
|||||||
@ -546,7 +546,8 @@
|
|||||||
"low": "Меньше думать",
|
"low": "Меньше думать",
|
||||||
"medium": "Среднее",
|
"medium": "Среднее",
|
||||||
"minimal": "минимальный",
|
"minimal": "минимальный",
|
||||||
"off": "Выключить"
|
"off": "Выключить",
|
||||||
|
"xhigh": "[to be translated]:Extra High"
|
||||||
},
|
},
|
||||||
"regular_phrases": {
|
"regular_phrases": {
|
||||||
"add": "Добавить подсказку",
|
"add": "Добавить подсказку",
|
||||||
|
|||||||
@ -5,7 +5,8 @@ import {
|
|||||||
MdiLightbulbOn,
|
MdiLightbulbOn,
|
||||||
MdiLightbulbOn30,
|
MdiLightbulbOn30,
|
||||||
MdiLightbulbOn50,
|
MdiLightbulbOn50,
|
||||||
MdiLightbulbOn80
|
MdiLightbulbOn80,
|
||||||
|
MdiLightbulbOn90
|
||||||
} from '@renderer/components/Icons/SVGIcon'
|
} from '@renderer/components/Icons/SVGIcon'
|
||||||
import { QuickPanelReservedSymbol, useQuickPanel } from '@renderer/components/QuickPanel'
|
import { QuickPanelReservedSymbol, useQuickPanel } from '@renderer/components/QuickPanel'
|
||||||
import {
|
import {
|
||||||
@ -185,6 +186,9 @@ const ThinkingIcon = (props: { option?: ThinkingOption; isFixedReasoning?: boole
|
|||||||
IconComponent = MdiLightbulbOn80
|
IconComponent = MdiLightbulbOn80
|
||||||
break
|
break
|
||||||
case 'high':
|
case 'high':
|
||||||
|
IconComponent = MdiLightbulbOn90
|
||||||
|
break
|
||||||
|
case 'xhigh':
|
||||||
IconComponent = MdiLightbulbOn
|
IconComponent = MdiLightbulbOn
|
||||||
break
|
break
|
||||||
case 'auto':
|
case 'auto':
|
||||||
|
|||||||
@ -88,7 +88,10 @@ const ThinkModelTypes = [
|
|||||||
'gpt5_1',
|
'gpt5_1',
|
||||||
'gpt5_codex',
|
'gpt5_codex',
|
||||||
'gpt5_1_codex',
|
'gpt5_1_codex',
|
||||||
|
'gpt5_1_codex_max',
|
||||||
|
'gpt5_2',
|
||||||
'gpt5pro',
|
'gpt5pro',
|
||||||
|
'gpt52pro',
|
||||||
'grok',
|
'grok',
|
||||||
'grok4_fast',
|
'grok4_fast',
|
||||||
'gemini',
|
'gemini',
|
||||||
@ -122,6 +125,7 @@ export const EFFORT_RATIO: EffortRatio = {
|
|||||||
low: 0.05,
|
low: 0.05,
|
||||||
medium: 0.5,
|
medium: 0.5,
|
||||||
high: 0.8,
|
high: 0.8,
|
||||||
|
xhigh: 0.9,
|
||||||
auto: 2
|
auto: 2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -74,13 +74,13 @@ export type RequestOptions = Anthropic.RequestOptions | OpenAI.RequestOptions |
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
type OpenAIParamsPurified = Omit<OpenAI.Chat.Completions.ChatCompletionCreateParams, 'reasoning_effort' | 'modalities'>
|
type OpenAIParamsPurified = Omit<OpenAI.Chat.Completions.ChatCompletionCreateParams, 'reasoning_effort' | 'modalities'>
|
||||||
|
type OpenAIReasoningEffort = NonNullable<OpenAI.ReasoningEffort> | 'auto'
|
||||||
export type ReasoningEffortOptionalParams = {
|
export type ReasoningEffortOptionalParams = {
|
||||||
thinking?: { type: 'disabled' | 'enabled' | 'auto'; budget_tokens?: number }
|
thinking?: { type: 'disabled' | 'enabled' | 'auto'; budget_tokens?: number }
|
||||||
reasoning?: { max_tokens?: number; exclude?: boolean; effort?: string; enabled?: boolean } | OpenAI.Reasoning
|
reasoning?: { max_tokens?: number; exclude?: boolean; effort?: string; enabled?: boolean } | OpenAI.Reasoning
|
||||||
reasoningEffort?: OpenAI.Chat.Completions.ChatCompletionCreateParams['reasoning_effort'] | 'none' | 'auto'
|
reasoningEffort?: OpenAIReasoningEffort
|
||||||
// WARN: This field will be overwrite to undefined by aisdk if the provider is openai-compatible. Use reasoningEffort instead.
|
// WARN: This field will be overwrite to undefined by aisdk if the provider is openai-compatible. Use reasoningEffort instead.
|
||||||
reasoning_effort?: OpenAI.Chat.Completions.ChatCompletionCreateParams['reasoning_effort'] | 'none' | 'auto'
|
reasoning_effort?: OpenAIReasoningEffort
|
||||||
enable_thinking?: boolean
|
enable_thinking?: boolean
|
||||||
thinking_budget?: number
|
thinking_budget?: number
|
||||||
incremental_output?: boolean
|
incremental_output?: boolean
|
||||||
@ -100,7 +100,7 @@ export type ReasoningEffortOptionalParams = {
|
|||||||
type: 'enabled' | 'disabled'
|
type: 'enabled' | 'disabled'
|
||||||
}
|
}
|
||||||
thinking_budget?: number
|
thinking_budget?: number
|
||||||
reasoning_effort?: OpenAI.Chat.Completions.ChatCompletionCreateParams['reasoning_effort'] | 'auto'
|
reasoning_effort?: OpenAIReasoningEffort
|
||||||
}
|
}
|
||||||
disable_reasoning?: boolean
|
disable_reasoning?: boolean
|
||||||
// Add any other potential reasoning-related keys here if they exist
|
// Add any other potential reasoning-related keys here if they exist
|
||||||
|
|||||||
59
yarn.lock
59
yarn.lock
@ -102,16 +102,16 @@ __metadata:
|
|||||||
languageName: node
|
languageName: node
|
||||||
linkType: hard
|
linkType: hard
|
||||||
|
|
||||||
"@ai-sdk/azure@npm:^2.0.74":
|
"@ai-sdk/azure@npm:^2.0.87":
|
||||||
version: 2.0.74
|
version: 2.0.87
|
||||||
resolution: "@ai-sdk/azure@npm:2.0.74"
|
resolution: "@ai-sdk/azure@npm:2.0.87"
|
||||||
dependencies:
|
dependencies:
|
||||||
"@ai-sdk/openai": "npm:2.0.72"
|
"@ai-sdk/openai": "npm:2.0.85"
|
||||||
"@ai-sdk/provider": "npm:2.0.0"
|
"@ai-sdk/provider": "npm:2.0.0"
|
||||||
"@ai-sdk/provider-utils": "npm:3.0.17"
|
"@ai-sdk/provider-utils": "npm:3.0.19"
|
||||||
peerDependencies:
|
peerDependencies:
|
||||||
zod: ^3.25.76 || ^4.1.8
|
zod: ^3.25.76 || ^4.1.8
|
||||||
checksum: 10c0/dccd1959ef43034a0559cdc862af7f351c0a997a56dbeb68b1c844f67d3ff7920f43890e1d18546600eeaac1c54f0c94943b6ce0b43ba4d44ddc3a829b8a71dd
|
checksum: 10c0/77b0c74966144c3ca715e8357bd36502bd7055edb74a4005d9537cf9175cd9b33df32164a5e3f1925b1d311ed1a4eaf5b8fad6abdb81e1b6c14ba5ea78479f34
|
||||||
languageName: node
|
languageName: node
|
||||||
linkType: hard
|
linkType: hard
|
||||||
|
|
||||||
@ -266,27 +266,27 @@ __metadata:
|
|||||||
languageName: node
|
languageName: node
|
||||||
linkType: hard
|
linkType: hard
|
||||||
|
|
||||||
"@ai-sdk/openai@npm:2.0.72":
|
"@ai-sdk/openai@npm:2.0.85":
|
||||||
version: 2.0.72
|
version: 2.0.85
|
||||||
resolution: "@ai-sdk/openai@npm:2.0.72"
|
resolution: "@ai-sdk/openai@npm:2.0.85"
|
||||||
dependencies:
|
dependencies:
|
||||||
"@ai-sdk/provider": "npm:2.0.0"
|
"@ai-sdk/provider": "npm:2.0.0"
|
||||||
"@ai-sdk/provider-utils": "npm:3.0.17"
|
"@ai-sdk/provider-utils": "npm:3.0.19"
|
||||||
peerDependencies:
|
peerDependencies:
|
||||||
zod: ^3.25.76 || ^4.1.8
|
zod: ^3.25.76 || ^4.1.8
|
||||||
checksum: 10c0/64fb8b7b2627b16e1fdcb3a7dd8d26f34d054b3f7bba5de6ef579f1c12c91246d0682caa36c5dae5ed2f29b462cc6013a38d9e80234819030fbf1730e7f8da50
|
checksum: 10c0/c8e50de443d939d7a5d7444e1a2ff35357d05dd3add0fca8226b578b199f4ca53c8a9e22c376e88006466b86e39c88d7ceca790a6a866300e3964ad24756d580
|
||||||
languageName: node
|
languageName: node
|
||||||
linkType: hard
|
linkType: hard
|
||||||
|
|
||||||
"@ai-sdk/openai@patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch":
|
"@ai-sdk/openai@patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch":
|
||||||
version: 2.0.72
|
version: 2.0.85
|
||||||
resolution: "@ai-sdk/openai@patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch::version=2.0.72&hash=126b76"
|
resolution: "@ai-sdk/openai@patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch::version=2.0.85&hash=81ee54"
|
||||||
dependencies:
|
dependencies:
|
||||||
"@ai-sdk/provider": "npm:2.0.0"
|
"@ai-sdk/provider": "npm:2.0.0"
|
||||||
"@ai-sdk/provider-utils": "npm:3.0.17"
|
"@ai-sdk/provider-utils": "npm:3.0.19"
|
||||||
peerDependencies:
|
peerDependencies:
|
||||||
zod: ^3.25.76 || ^4.1.8
|
zod: ^3.25.76 || ^4.1.8
|
||||||
checksum: 10c0/fec21ab02aff999b487abdd02c32d526580d47cdf83dd74b02f8faf1423b63ab7da3c374b7a98a15bb94fdcb6deb2851381ce0f52b92c9c030dee06ff2dcf71d
|
checksum: 10c0/8fd0e4e63840b0ceb3fbf61b567e3318edfd5c3177b502076fb04b340ef8ea0a6b4cb95e4c6f7634b3bd8661ef0b69828a22b5434542c8e7d3488bff291e99c1
|
||||||
languageName: node
|
languageName: node
|
||||||
linkType: hard
|
linkType: hard
|
||||||
|
|
||||||
@ -328,6 +328,19 @@ __metadata:
|
|||||||
languageName: node
|
languageName: node
|
||||||
linkType: hard
|
linkType: hard
|
||||||
|
|
||||||
|
"@ai-sdk/provider-utils@npm:3.0.19":
|
||||||
|
version: 3.0.19
|
||||||
|
resolution: "@ai-sdk/provider-utils@npm:3.0.19"
|
||||||
|
dependencies:
|
||||||
|
"@ai-sdk/provider": "npm:2.0.0"
|
||||||
|
"@standard-schema/spec": "npm:^1.0.0"
|
||||||
|
eventsource-parser: "npm:^3.0.6"
|
||||||
|
peerDependencies:
|
||||||
|
zod: ^3.25.76 || ^4.1.8
|
||||||
|
checksum: 10c0/e4decb19264067fa1b1642e07d515d25d1509a1a9143f59ccc051e3ca413c9fb1d708e1052a70eaf329ca39ddf6152520cd833dbf8c95d9bf02bbeffae8ea363
|
||||||
|
languageName: node
|
||||||
|
linkType: hard
|
||||||
|
|
||||||
"@ai-sdk/provider@npm:2.0.0, @ai-sdk/provider@npm:^2.0.0":
|
"@ai-sdk/provider@npm:2.0.0, @ai-sdk/provider@npm:^2.0.0":
|
||||||
version: 2.0.0
|
version: 2.0.0
|
||||||
resolution: "@ai-sdk/provider@npm:2.0.0"
|
resolution: "@ai-sdk/provider@npm:2.0.0"
|
||||||
@ -1853,7 +1866,7 @@ __metadata:
|
|||||||
resolution: "@cherrystudio/ai-core@workspace:packages/aiCore"
|
resolution: "@cherrystudio/ai-core@workspace:packages/aiCore"
|
||||||
dependencies:
|
dependencies:
|
||||||
"@ai-sdk/anthropic": "npm:^2.0.49"
|
"@ai-sdk/anthropic": "npm:^2.0.49"
|
||||||
"@ai-sdk/azure": "npm:^2.0.74"
|
"@ai-sdk/azure": "npm:^2.0.87"
|
||||||
"@ai-sdk/deepseek": "npm:^1.0.31"
|
"@ai-sdk/deepseek": "npm:^1.0.31"
|
||||||
"@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch"
|
"@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch"
|
||||||
"@ai-sdk/provider": "npm:^2.0.0"
|
"@ai-sdk/provider": "npm:^2.0.0"
|
||||||
@ -2110,9 +2123,9 @@ __metadata:
|
|||||||
languageName: unknown
|
languageName: unknown
|
||||||
linkType: soft
|
linkType: soft
|
||||||
|
|
||||||
"@cherrystudio/openai@npm:^6.9.0":
|
"@cherrystudio/openai@npm:^6.12.0":
|
||||||
version: 6.9.0
|
version: 6.12.0
|
||||||
resolution: "@cherrystudio/openai@npm:6.9.0"
|
resolution: "@cherrystudio/openai@npm:6.12.0"
|
||||||
peerDependencies:
|
peerDependencies:
|
||||||
ws: ^8.18.0
|
ws: ^8.18.0
|
||||||
zod: ^3.25 || ^4.0
|
zod: ^3.25 || ^4.0
|
||||||
@ -2123,7 +2136,7 @@ __metadata:
|
|||||||
optional: true
|
optional: true
|
||||||
bin:
|
bin:
|
||||||
openai: bin/cli
|
openai: bin/cli
|
||||||
checksum: 10c0/9c51ef33c5b9d08041a115e3d6a8158412a379998a0eae186923d5bdcc808b634c1fef4471a1d499bb8c624b04c075167bc90a1a60a805005c0657ecebbb58d0
|
checksum: 10c0/6831a603141b05508e11ea365279b57311424f9db578028d72d9bae8473e09d5fe12b1fbc0b471cabc0b3adb67339d845b6b8f6f8be4cded0e98e5a6ea25efc8
|
||||||
languageName: node
|
languageName: node
|
||||||
linkType: hard
|
linkType: hard
|
||||||
|
|
||||||
@ -10042,7 +10055,7 @@ __metadata:
|
|||||||
"@ai-sdk/google-vertex": "npm:^3.0.79"
|
"@ai-sdk/google-vertex": "npm:^3.0.79"
|
||||||
"@ai-sdk/huggingface": "npm:^0.0.10"
|
"@ai-sdk/huggingface": "npm:^0.0.10"
|
||||||
"@ai-sdk/mistral": "npm:^2.0.24"
|
"@ai-sdk/mistral": "npm:^2.0.24"
|
||||||
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch"
|
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch"
|
||||||
"@ai-sdk/perplexity": "npm:^2.0.20"
|
"@ai-sdk/perplexity": "npm:^2.0.20"
|
||||||
"@ai-sdk/test-server": "npm:^0.0.1"
|
"@ai-sdk/test-server": "npm:^0.0.1"
|
||||||
"@ant-design/v5-patch-for-react-19": "npm:^1.0.3"
|
"@ant-design/v5-patch-for-react-19": "npm:^1.0.3"
|
||||||
@ -10067,7 +10080,7 @@ __metadata:
|
|||||||
"@cherrystudio/embedjs-ollama": "npm:^0.1.31"
|
"@cherrystudio/embedjs-ollama": "npm:^0.1.31"
|
||||||
"@cherrystudio/embedjs-openai": "npm:^0.1.31"
|
"@cherrystudio/embedjs-openai": "npm:^0.1.31"
|
||||||
"@cherrystudio/extension-table-plus": "workspace:^"
|
"@cherrystudio/extension-table-plus": "workspace:^"
|
||||||
"@cherrystudio/openai": "npm:^6.9.0"
|
"@cherrystudio/openai": "npm:^6.12.0"
|
||||||
"@dnd-kit/core": "npm:^6.3.1"
|
"@dnd-kit/core": "npm:^6.3.1"
|
||||||
"@dnd-kit/modifiers": "npm:^9.0.0"
|
"@dnd-kit/modifiers": "npm:^9.0.0"
|
||||||
"@dnd-kit/sortable": "npm:^10.0.0"
|
"@dnd-kit/sortable": "npm:^10.0.0"
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user