feat: update Google and OpenAI SDKs with new features and fixes (#11395)

* feat: update Google and OpenAI SDKs with new features and fixes

- Updated Google SDK to ensure model paths are correctly formatted.
- Enhanced OpenAI SDK to include support for image URLs in chat responses.
- Added reasoning content handling in OpenAI chat responses and chunks.
- Introduced Azure Anthropic provider configuration for Claude integration.

* fix: azure error

* fix: lint

* fix: test

* fix: test

* fix type

* fix comment

* fix: redundant

* chore resolution

* fix: test

* fix: comment

* fix: comment

* fix

* feat: 添加 OpenRouter 推理中间件以支持内容过滤
This commit is contained in:
SuYao 2025-11-23 23:18:57 +08:00 committed by GitHub
parent 64ca3802a4
commit 2c3338939e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
35 changed files with 717 additions and 611 deletions

View File

@ -1,152 +0,0 @@
diff --git a/dist/index.js b/dist/index.js
index c2ef089c42e13a8ee4a833899a415564130e5d79..75efa7baafb0f019fb44dd50dec1641eee8879e7 100644
--- a/dist/index.js
+++ b/dist/index.js
@@ -471,7 +471,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
// src/get-model-path.ts
function getModelPath(modelId) {
- return modelId.includes("/") ? modelId : `models/${modelId}`;
+ return modelId.includes("models/") ? modelId : `models/${modelId}`;
}
// src/google-generative-ai-options.ts
diff --git a/dist/index.mjs b/dist/index.mjs
index d75c0cc13c41192408c1f3f2d29d76a7bffa6268..ada730b8cb97d9b7d4cb32883a1d1ff416404d9b 100644
--- a/dist/index.mjs
+++ b/dist/index.mjs
@@ -477,7 +477,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
// src/get-model-path.ts
function getModelPath(modelId) {
- return modelId.includes("/") ? modelId : `models/${modelId}`;
+ return modelId.includes("models/") ? modelId : `models/${modelId}`;
}
// src/google-generative-ai-options.ts
diff --git a/dist/internal/index.js b/dist/internal/index.js
index 277cac8dc734bea2fb4f3e9a225986b402b24f48..bb704cd79e602eb8b0cee1889e42497d59ccdb7a 100644
--- a/dist/internal/index.js
+++ b/dist/internal/index.js
@@ -432,7 +432,15 @@ function prepareTools({
var _a;
tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
const toolWarnings = [];
- const isGemini2 = modelId.includes("gemini-2");
+ // These changes could be safely removed when @ai-sdk/google v3 released.
+ const isLatest = (
+ [
+ 'gemini-flash-latest',
+ 'gemini-flash-lite-latest',
+ 'gemini-pro-latest',
+ ]
+ ).some(id => id === modelId);
+ const isGemini2OrNewer = modelId.includes("gemini-2") || modelId.includes("gemini-3") || isLatest;
const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
const supportsFileSearch = modelId.includes("gemini-2.5");
if (tools == null) {
@@ -458,7 +466,7 @@ function prepareTools({
providerDefinedTools.forEach((tool) => {
switch (tool.id) {
case "google.google_search":
- if (isGemini2) {
+ if (isGemini2OrNewer) {
googleTools2.push({ googleSearch: {} });
} else if (supportsDynamicRetrieval) {
googleTools2.push({
@@ -474,7 +482,7 @@ function prepareTools({
}
break;
case "google.url_context":
- if (isGemini2) {
+ if (isGemini2OrNewer) {
googleTools2.push({ urlContext: {} });
} else {
toolWarnings.push({
@@ -485,7 +493,7 @@ function prepareTools({
}
break;
case "google.code_execution":
- if (isGemini2) {
+ if (isGemini2OrNewer) {
googleTools2.push({ codeExecution: {} });
} else {
toolWarnings.push({
@@ -507,7 +515,7 @@ function prepareTools({
}
break;
case "google.vertex_rag_store":
- if (isGemini2) {
+ if (isGemini2OrNewer) {
googleTools2.push({
retrieval: {
vertex_rag_store: {
diff --git a/dist/internal/index.mjs b/dist/internal/index.mjs
index 03b7cc591be9b58bcc2e775a96740d9f98862a10..347d2c12e1cee79f0f8bb258f3844fb0522a6485 100644
--- a/dist/internal/index.mjs
+++ b/dist/internal/index.mjs
@@ -424,7 +424,15 @@ function prepareTools({
var _a;
tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
const toolWarnings = [];
- const isGemini2 = modelId.includes("gemini-2");
+ // These changes could be safely removed when @ai-sdk/google v3 released.
+ const isLatest = (
+ [
+ 'gemini-flash-latest',
+ 'gemini-flash-lite-latest',
+ 'gemini-pro-latest',
+ ]
+ ).some(id => id === modelId);
+ const isGemini2OrNewer = modelId.includes("gemini-2") || modelId.includes("gemini-3") || isLatest;
const supportsDynamicRetrieval = modelId.includes("gemini-1.5-flash") && !modelId.includes("-8b");
const supportsFileSearch = modelId.includes("gemini-2.5");
if (tools == null) {
@@ -450,7 +458,7 @@ function prepareTools({
providerDefinedTools.forEach((tool) => {
switch (tool.id) {
case "google.google_search":
- if (isGemini2) {
+ if (isGemini2OrNewer) {
googleTools2.push({ googleSearch: {} });
} else if (supportsDynamicRetrieval) {
googleTools2.push({
@@ -466,7 +474,7 @@ function prepareTools({
}
break;
case "google.url_context":
- if (isGemini2) {
+ if (isGemini2OrNewer) {
googleTools2.push({ urlContext: {} });
} else {
toolWarnings.push({
@@ -477,7 +485,7 @@ function prepareTools({
}
break;
case "google.code_execution":
- if (isGemini2) {
+ if (isGemini2OrNewer) {
googleTools2.push({ codeExecution: {} });
} else {
toolWarnings.push({
@@ -499,7 +507,7 @@ function prepareTools({
}
break;
case "google.vertex_rag_store":
- if (isGemini2) {
+ if (isGemini2OrNewer) {
googleTools2.push({
retrieval: {
vertex_rag_store: {
@@ -1434,9 +1442,7 @@ var googleTools = {
vertexRagStore
};
export {
- GoogleGenerativeAILanguageModel,
getGroundingMetadataSchema,
- getUrlContextMetadataSchema,
- googleTools
+ getUrlContextMetadataSchema, GoogleGenerativeAILanguageModel, googleTools
};
//# sourceMappingURL=index.mjs.map
\ No newline at end of file

View File

@ -0,0 +1,26 @@
diff --git a/dist/index.js b/dist/index.js
index dc7b74ba55337c491cdf1ab3e39ca68cc4187884..ace8c90591288e42c2957e93c9bf7984f1b22444 100644
--- a/dist/index.js
+++ b/dist/index.js
@@ -472,7 +472,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
// src/get-model-path.ts
function getModelPath(modelId) {
- return modelId.includes("/") ? modelId : `models/${modelId}`;
+ return modelId.includes("models/") ? modelId : `models/${modelId}`;
}
// src/google-generative-ai-options.ts
diff --git a/dist/index.mjs b/dist/index.mjs
index 8390439c38cb7eaeb52080862cd6f4c58509e67c..a7647f2e11700dff7e1c8d4ae8f99d3637010733 100644
--- a/dist/index.mjs
+++ b/dist/index.mjs
@@ -478,7 +478,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
// src/get-model-path.ts
function getModelPath(modelId) {
- return modelId.includes("/") ? modelId : `models/${modelId}`;
+ return modelId.includes("models/") ? modelId : `models/${modelId}`;
}
// src/google-generative-ai-options.ts

View File

@ -1,131 +0,0 @@
diff --git a/dist/index.mjs b/dist/index.mjs
index b3f018730a93639aad7c203f15fb1aeb766c73f4..ade2a43d66e9184799d072153df61ef7be4ea110 100644
--- a/dist/index.mjs
+++ b/dist/index.mjs
@@ -296,7 +296,14 @@ var HuggingFaceResponsesLanguageModel = class {
metadata: huggingfaceOptions == null ? void 0 : huggingfaceOptions.metadata,
instructions: huggingfaceOptions == null ? void 0 : huggingfaceOptions.instructions,
...preparedTools && { tools: preparedTools },
- ...preparedToolChoice && { tool_choice: preparedToolChoice }
+ ...preparedToolChoice && { tool_choice: preparedToolChoice },
+ ...(huggingfaceOptions?.reasoningEffort != null && {
+ reasoning: {
+ ...(huggingfaceOptions?.reasoningEffort != null && {
+ effort: huggingfaceOptions.reasoningEffort,
+ }),
+ },
+ }),
};
return { args: baseArgs, warnings };
}
@@ -365,6 +372,20 @@ var HuggingFaceResponsesLanguageModel = class {
}
break;
}
+ case 'reasoning': {
+ for (const contentPart of part.content) {
+ content.push({
+ type: 'reasoning',
+ text: contentPart.text,
+ providerMetadata: {
+ huggingface: {
+ itemId: part.id,
+ },
+ },
+ });
+ }
+ break;
+ }
case "mcp_call": {
content.push({
type: "tool-call",
@@ -519,6 +540,11 @@ var HuggingFaceResponsesLanguageModel = class {
id: value.item.call_id,
toolName: value.item.name
});
+ } else if (value.item.type === 'reasoning') {
+ controller.enqueue({
+ type: 'reasoning-start',
+ id: value.item.id,
+ });
}
return;
}
@@ -570,6 +596,22 @@ var HuggingFaceResponsesLanguageModel = class {
});
return;
}
+ if (isReasoningDeltaChunk(value)) {
+ controller.enqueue({
+ type: 'reasoning-delta',
+ id: value.item_id,
+ delta: value.delta,
+ });
+ return;
+ }
+
+ if (isReasoningEndChunk(value)) {
+ controller.enqueue({
+ type: 'reasoning-end',
+ id: value.item_id,
+ });
+ return;
+ }
},
flush(controller) {
controller.enqueue({
@@ -593,7 +635,8 @@ var HuggingFaceResponsesLanguageModel = class {
var huggingfaceResponsesProviderOptionsSchema = z2.object({
metadata: z2.record(z2.string(), z2.string()).optional(),
instructions: z2.string().optional(),
- strictJsonSchema: z2.boolean().optional()
+ strictJsonSchema: z2.boolean().optional(),
+ reasoningEffort: z2.string().optional(),
});
var huggingfaceResponsesResponseSchema = z2.object({
id: z2.string(),
@@ -727,12 +770,31 @@ var responseCreatedChunkSchema = z2.object({
model: z2.string()
})
});
+var reasoningTextDeltaChunkSchema = z2.object({
+ type: z2.literal('response.reasoning_text.delta'),
+ item_id: z2.string(),
+ output_index: z2.number(),
+ content_index: z2.number(),
+ delta: z2.string(),
+ sequence_number: z2.number(),
+});
+
+var reasoningTextEndChunkSchema = z2.object({
+ type: z2.literal('response.reasoning_text.done'),
+ item_id: z2.string(),
+ output_index: z2.number(),
+ content_index: z2.number(),
+ text: z2.string(),
+ sequence_number: z2.number(),
+});
var huggingfaceResponsesChunkSchema = z2.union([
responseOutputItemAddedSchema,
responseOutputItemDoneSchema,
textDeltaChunkSchema,
responseCompletedChunkSchema,
responseCreatedChunkSchema,
+ reasoningTextDeltaChunkSchema,
+ reasoningTextEndChunkSchema,
z2.object({ type: z2.string() }).loose()
// fallback for unknown chunks
]);
@@ -751,6 +813,12 @@ function isResponseCompletedChunk(chunk) {
function isResponseCreatedChunk(chunk) {
return chunk.type === "response.created";
}
+function isReasoningDeltaChunk(chunk) {
+ return chunk.type === 'response.reasoning_text.delta';
+}
+function isReasoningEndChunk(chunk) {
+ return chunk.type === 'response.reasoning_text.done';
+}
// src/huggingface-provider.ts
function createHuggingFace(options = {}) {

View File

@ -0,0 +1,140 @@
diff --git a/dist/index.js b/dist/index.js
index 73045a7d38faafdc7f7d2cd79d7ff0e2b031056b..8d948c9ac4ea4b474db9ef3c5491961e7fcf9a07 100644
--- a/dist/index.js
+++ b/dist/index.js
@@ -421,6 +421,17 @@ var OpenAICompatibleChatLanguageModel = class {
text: reasoning
});
}
+ if (choice.message.images) {
+ for (const image of choice.message.images) {
+ const match1 = image.image_url.url.match(/^data:([^;]+)/)
+ const match2 = image.image_url.url.match(/^data:[^;]*;base64,(.+)$/);
+ content.push({
+ type: 'file',
+ mediaType: match1 ? (match1[1] ?? 'image/jpeg') : 'image/jpeg',
+ data: match2 ? match2[1] : image.image_url.url,
+ });
+ }
+ }
if (choice.message.tool_calls != null) {
for (const toolCall of choice.message.tool_calls) {
content.push({
@@ -598,6 +609,17 @@ var OpenAICompatibleChatLanguageModel = class {
delta: delta.content
});
}
+ if (delta.images) {
+ for (const image of delta.images) {
+ const match1 = image.image_url.url.match(/^data:([^;]+)/)
+ const match2 = image.image_url.url.match(/^data:[^;]*;base64,(.+)$/);
+ controller.enqueue({
+ type: 'file',
+ mediaType: match1 ? (match1[1] ?? 'image/jpeg') : 'image/jpeg',
+ data: match2 ? match2[1] : image.image_url.url,
+ });
+ }
+ }
if (delta.tool_calls != null) {
for (const toolCallDelta of delta.tool_calls) {
const index = toolCallDelta.index;
@@ -765,6 +787,14 @@ var OpenAICompatibleChatResponseSchema = import_v43.z.object({
arguments: import_v43.z.string()
})
})
+ ).nullish(),
+ images: import_v43.z.array(
+ import_v43.z.object({
+ type: import_v43.z.literal('image_url'),
+ image_url: import_v43.z.object({
+ url: import_v43.z.string(),
+ })
+ })
).nullish()
}),
finish_reason: import_v43.z.string().nullish()
@@ -795,6 +825,14 @@ var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_v43.z.union(
arguments: import_v43.z.string().nullish()
})
})
+ ).nullish(),
+ images: import_v43.z.array(
+ import_v43.z.object({
+ type: import_v43.z.literal('image_url'),
+ image_url: import_v43.z.object({
+ url: import_v43.z.string(),
+ })
+ })
).nullish()
}).nullish(),
finish_reason: import_v43.z.string().nullish()
diff --git a/dist/index.mjs b/dist/index.mjs
index 1c2b9560bbfbfe10cb01af080aeeed4ff59db29c..2c8ddc4fc9bfc5e7e06cfca105d197a08864c427 100644
--- a/dist/index.mjs
+++ b/dist/index.mjs
@@ -405,6 +405,17 @@ var OpenAICompatibleChatLanguageModel = class {
text: reasoning
});
}
+ if (choice.message.images) {
+ for (const image of choice.message.images) {
+ const match1 = image.image_url.url.match(/^data:([^;]+)/)
+ const match2 = image.image_url.url.match(/^data:[^;]*;base64,(.+)$/);
+ content.push({
+ type: 'file',
+ mediaType: match1 ? (match1[1] ?? 'image/jpeg') : 'image/jpeg',
+ data: match2 ? match2[1] : image.image_url.url,
+ });
+ }
+ }
if (choice.message.tool_calls != null) {
for (const toolCall of choice.message.tool_calls) {
content.push({
@@ -582,6 +593,17 @@ var OpenAICompatibleChatLanguageModel = class {
delta: delta.content
});
}
+ if (delta.images) {
+ for (const image of delta.images) {
+ const match1 = image.image_url.url.match(/^data:([^;]+)/)
+ const match2 = image.image_url.url.match(/^data:[^;]*;base64,(.+)$/);
+ controller.enqueue({
+ type: 'file',
+ mediaType: match1 ? (match1[1] ?? 'image/jpeg') : 'image/jpeg',
+ data: match2 ? match2[1] : image.image_url.url,
+ });
+ }
+ }
if (delta.tool_calls != null) {
for (const toolCallDelta of delta.tool_calls) {
const index = toolCallDelta.index;
@@ -749,6 +771,14 @@ var OpenAICompatibleChatResponseSchema = z3.object({
arguments: z3.string()
})
})
+ ).nullish(),
+ images: z3.array(
+ z3.object({
+ type: z3.literal('image_url'),
+ image_url: z3.object({
+ url: z3.string(),
+ })
+ })
).nullish()
}),
finish_reason: z3.string().nullish()
@@ -779,6 +809,14 @@ var createOpenAICompatibleChatChunkSchema = (errorSchema) => z3.union([
arguments: z3.string().nullish()
})
})
+ ).nullish(),
+ images: z3.array(
+ z3.object({
+ type: z3.literal('image_url'),
+ image_url: z3.object({
+ url: z3.string(),
+ })
+ })
).nullish()
}).nullish(),
finish_reason: z3.string().nullish()

View File

@ -1,5 +1,5 @@
diff --git a/dist/index.js b/dist/index.js
index 992c85ac6656e51c3471af741583533c5a7bf79f..83c05952a07aebb95fc6c62f9ddb8aa96b52ac0d 100644
index 7481f3b3511078068d87d03855b568b20bb86971..8ac5ec28d2f7ad1b3b0d3f8da945c75674e59637 100644
--- a/dist/index.js
+++ b/dist/index.js
@@ -274,6 +274,7 @@ var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)(
@ -18,7 +18,7 @@ index 992c85ac6656e51c3471af741583533c5a7bf79f..83c05952a07aebb95fc6c62f9ddb8aa9
tool_calls: import_v42.z.array(
import_v42.z.object({
index: import_v42.z.number(),
@@ -785,6 +787,13 @@ var OpenAIChatLanguageModel = class {
@@ -795,6 +797,13 @@ var OpenAIChatLanguageModel = class {
if (text != null && text.length > 0) {
content.push({ type: "text", text });
}
@ -32,7 +32,7 @@ index 992c85ac6656e51c3471af741583533c5a7bf79f..83c05952a07aebb95fc6c62f9ddb8aa9
for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
content.push({
type: "tool-call",
@@ -866,6 +875,7 @@ var OpenAIChatLanguageModel = class {
@@ -876,6 +885,7 @@ var OpenAIChatLanguageModel = class {
};
let metadataExtracted = false;
let isActiveText = false;
@ -40,7 +40,7 @@ index 992c85ac6656e51c3471af741583533c5a7bf79f..83c05952a07aebb95fc6c62f9ddb8aa9
const providerMetadata = { openai: {} };
return {
stream: response.pipeThrough(
@@ -923,6 +933,21 @@ var OpenAIChatLanguageModel = class {
@@ -933,6 +943,21 @@ var OpenAIChatLanguageModel = class {
return;
}
const delta = choice.delta;
@ -62,7 +62,7 @@ index 992c85ac6656e51c3471af741583533c5a7bf79f..83c05952a07aebb95fc6c62f9ddb8aa9
if (delta.content != null) {
if (!isActiveText) {
controller.enqueue({ type: "text-start", id: "0" });
@@ -1035,6 +1060,9 @@ var OpenAIChatLanguageModel = class {
@@ -1045,6 +1070,9 @@ var OpenAIChatLanguageModel = class {
}
},
flush(controller) {

View File

@ -109,16 +109,16 @@
"@agentic/exa": "^7.3.3",
"@agentic/searxng": "^7.3.3",
"@agentic/tavily": "^7.3.3",
"@ai-sdk/amazon-bedrock": "^3.0.53",
"@ai-sdk/anthropic": "^2.0.44",
"@ai-sdk/amazon-bedrock": "^3.0.56",
"@ai-sdk/anthropic": "^2.0.45",
"@ai-sdk/cerebras": "^1.0.31",
"@ai-sdk/gateway": "^2.0.9",
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.36#~/.yarn/patches/@ai-sdk-google-npm-2.0.36-6f3cc06026.patch",
"@ai-sdk/google-vertex": "^3.0.68",
"@ai-sdk/huggingface": "patch:@ai-sdk/huggingface@npm%3A0.0.8#~/.yarn/patches/@ai-sdk-huggingface-npm-0.0.8-d4d0aaac93.patch",
"@ai-sdk/mistral": "^2.0.23",
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.64#~/.yarn/patches/@ai-sdk-openai-npm-2.0.64-48f99f5bf3.patch",
"@ai-sdk/perplexity": "^2.0.17",
"@ai-sdk/gateway": "^2.0.13",
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch",
"@ai-sdk/google-vertex": "^3.0.72",
"@ai-sdk/huggingface": "^0.0.10",
"@ai-sdk/mistral": "^2.0.24",
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.71#~/.yarn/patches/@ai-sdk-openai-npm-2.0.71-a88ef00525.patch",
"@ai-sdk/perplexity": "^2.0.20",
"@ai-sdk/test-server": "^0.0.1",
"@ant-design/v5-patch-for-react-19": "^1.0.3",
"@anthropic-ai/sdk": "^0.41.0",
@ -164,7 +164,7 @@
"@modelcontextprotocol/sdk": "^1.17.5",
"@mozilla/readability": "^0.6.0",
"@notionhq/client": "^2.2.15",
"@openrouter/ai-sdk-provider": "^1.2.0",
"@openrouter/ai-sdk-provider": "^1.2.5",
"@opentelemetry/api": "^1.9.0",
"@opentelemetry/core": "2.0.0",
"@opentelemetry/exporter-trace-otlp-http": "^0.200.0",
@ -240,7 +240,7 @@
"@viz-js/lang-dot": "^1.0.5",
"@viz-js/viz": "^3.14.0",
"@xyflow/react": "^12.4.4",
"ai": "^5.0.90",
"ai": "^5.0.98",
"antd": "patch:antd@npm%3A5.27.0#~/.yarn/patches/antd-npm-5.27.0-aa91c36546.patch",
"archiver": "^7.0.1",
"async-mutex": "^0.5.0",
@ -413,8 +413,11 @@
"@langchain/openai@npm:^0.3.16": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
"@langchain/openai@npm:>=0.2.0 <0.7.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
"@ai-sdk/openai@npm:2.0.64": "patch:@ai-sdk/openai@npm%3A2.0.64#~/.yarn/patches/@ai-sdk-openai-npm-2.0.64-48f99f5bf3.patch",
"@ai-sdk/openai@npm:^2.0.42": "patch:@ai-sdk/openai@npm%3A2.0.64#~/.yarn/patches/@ai-sdk-openai-npm-2.0.64-48f99f5bf3.patch",
"@ai-sdk/google@npm:2.0.36": "patch:@ai-sdk/google@npm%3A2.0.36#~/.yarn/patches/@ai-sdk-google-npm-2.0.36-6f3cc06026.patch"
"@ai-sdk/openai@npm:^2.0.42": "patch:@ai-sdk/openai@npm%3A2.0.71#~/.yarn/patches/@ai-sdk-openai-npm-2.0.71-a88ef00525.patch",
"@ai-sdk/google@npm:2.0.40": "patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch",
"@ai-sdk/openai@npm:2.0.71": "patch:@ai-sdk/openai@npm%3A2.0.71#~/.yarn/patches/@ai-sdk-openai-npm-2.0.71-a88ef00525.patch",
"@ai-sdk/openai-compatible@npm:1.0.27": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch",
"@ai-sdk/openai-compatible@npm:^1.0.19": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch"
},
"packageManager": "yarn@4.9.1",
"lint-staged": {

View File

@ -42,7 +42,7 @@
},
"dependencies": {
"@ai-sdk/provider": "^2.0.0",
"@ai-sdk/provider-utils": "^3.0.12"
"@ai-sdk/provider-utils": "^3.0.17"
},
"devDependencies": {
"tsdown": "^0.13.3",

View File

@ -39,13 +39,13 @@
"ai": "^5.0.26"
},
"dependencies": {
"@ai-sdk/anthropic": "^2.0.43",
"@ai-sdk/azure": "^2.0.66",
"@ai-sdk/deepseek": "^1.0.27",
"@ai-sdk/openai-compatible": "^1.0.26",
"@ai-sdk/anthropic": "^2.0.45",
"@ai-sdk/azure": "^2.0.73",
"@ai-sdk/deepseek": "^1.0.29",
"@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch",
"@ai-sdk/provider": "^2.0.0",
"@ai-sdk/provider-utils": "^3.0.16",
"@ai-sdk/xai": "^2.0.31",
"@ai-sdk/provider-utils": "^3.0.17",
"@ai-sdk/xai": "^2.0.34",
"zod": "^4.1.5"
},
"devDependencies": {

View File

@ -155,7 +155,8 @@ export default class ModernAiProvider {
params: StreamTextParams,
config: ModernAiProviderConfig
): Promise<CompletionsResult> {
if (config.isImageGenerationEndpoint) {
// ai-gateway不是image/generation 端点所以就先不走legacy了
if (config.isImageGenerationEndpoint && config.provider!.id !== SystemProviderIds['ai-gateway']) {
// 使用 legacy 实现处理图像生成(支持图片编辑等高级功能)
if (!config.uiMessages) {
throw new Error('uiMessages is required for image generation endpoint')
@ -475,7 +476,7 @@ export default class ModernAiProvider {
}
// 确保本地provider已创建
if (!this.localProvider) {
if (!this.localProvider && this.config) {
this.localProvider = await createAiSdkProvider(this.config)
if (!this.localProvider) {
throw new Error('Local provider not created')

View File

@ -2,7 +2,7 @@ import type { WebSearchPluginConfig } from '@cherrystudio/ai-core/built-in/plugi
import { loggerService } from '@logger'
import { isSupportedThinkingTokenQwenModel } from '@renderer/config/models'
import type { MCPTool } from '@renderer/types'
import { type Assistant, type Message, type Model, type Provider } from '@renderer/types'
import { type Assistant, type Message, type Model, type Provider, SystemProviderIds } from '@renderer/types'
import type { Chunk } from '@renderer/types/chunk'
import { isSupportEnableThinkingProvider } from '@renderer/utils/provider'
import type { LanguageModelMiddleware } from 'ai'
@ -12,6 +12,7 @@ import { isEmpty } from 'lodash'
import { isOpenRouterGeminiGenerateImageModel } from '../utils/image'
import { noThinkMiddleware } from './noThinkMiddleware'
import { openrouterGenerateImageMiddleware } from './openrouterGenerateImageMiddleware'
import { openrouterReasoningMiddleware } from './openrouterReasoningMiddleware'
import { qwenThinkingMiddleware } from './qwenThinkingMiddleware'
import { toolChoiceMiddleware } from './toolChoiceMiddleware'
@ -217,6 +218,14 @@ function addProviderSpecificMiddlewares(builder: AiSdkMiddlewareBuilder, config:
middleware: noThinkMiddleware()
})
}
if (config.provider.id === SystemProviderIds.openrouter && config.enableReasoning) {
builder.add({
name: 'openrouter-reasoning-redaction',
middleware: openrouterReasoningMiddleware()
})
logger.debug('Added OpenRouter reasoning redaction middleware')
}
}
/**

View File

@ -0,0 +1,50 @@
import type { LanguageModelV2StreamPart } from '@ai-sdk/provider'
import type { LanguageModelMiddleware } from 'ai'
/**
* https://openrouter.ai/docs/docs/best-practices/reasoning-tokens#example-preserving-reasoning-blocks-with-openrouter-and-claude
*
* @returns LanguageModelMiddleware - a middleware filter redacted block
*/
export function openrouterReasoningMiddleware(): LanguageModelMiddleware {
const REDACTED_BLOCK = '[REDACTED]'
return {
middlewareVersion: 'v2',
wrapGenerate: async ({ doGenerate }) => {
const { content, ...rest } = await doGenerate()
const modifiedContent = content.map((part) => {
if (part.type === 'reasoning' && part.text.includes(REDACTED_BLOCK)) {
return {
...part,
text: part.text.replace(REDACTED_BLOCK, '')
}
}
return part
})
return { content: modifiedContent, ...rest }
},
wrapStream: async ({ doStream }) => {
const { stream, ...rest } = await doStream()
return {
stream: stream.pipeThrough(
new TransformStream<LanguageModelV2StreamPart, LanguageModelV2StreamPart>({
transform(
chunk: LanguageModelV2StreamPart,
controller: TransformStreamDefaultController<LanguageModelV2StreamPart>
) {
if (chunk.type === 'reasoning-delta' && chunk.delta.includes(REDACTED_BLOCK)) {
controller.enqueue({
...chunk,
delta: chunk.delta.replace(REDACTED_BLOCK, '')
})
} else {
controller.enqueue(chunk)
}
}
})
),
...rest
}
}
}
}

View File

@ -4,11 +4,12 @@
*/
import { anthropic } from '@ai-sdk/anthropic'
import { azure } from '@ai-sdk/azure'
import { google } from '@ai-sdk/google'
import { vertexAnthropic } from '@ai-sdk/google-vertex/anthropic/edge'
import { vertex } from '@ai-sdk/google-vertex/edge'
import { combineHeaders } from '@ai-sdk/provider-utils'
import type { WebSearchPluginConfig } from '@cherrystudio/ai-core/built-in/plugins'
import type { AnthropicSearchConfig, WebSearchPluginConfig } from '@cherrystudio/ai-core/built-in/plugins'
import { isBaseProvider } from '@cherrystudio/ai-core/core/providers/schemas'
import { loggerService } from '@logger'
import {
@ -127,6 +128,17 @@ export async function buildStreamTextParams(
maxUses: webSearchConfig.maxResults,
blockedDomains: blockedDomains.length > 0 ? blockedDomains : undefined
}) as ProviderDefinedTool
} else if (aiSdkProviderId === 'azure-responses') {
tools.web_search_preview = azure.tools.webSearchPreview({
searchContextSize: webSearchPluginConfig?.openai!.searchContextSize
}) as ProviderDefinedTool
} else if (aiSdkProviderId === 'azure-anthropic') {
const blockedDomains = mapRegexToPatterns(webSearchConfig.excludeDomains)
const anthropicSearchOptions: AnthropicSearchConfig = {
maxUses: webSearchConfig.maxResults,
blockedDomains: blockedDomains.length > 0 ? blockedDomains : undefined
}
tools.web_search = anthropic.tools.webSearch_20250305(anthropicSearchOptions) as ProviderDefinedTool
}
}
@ -144,9 +156,10 @@ export async function buildStreamTextParams(
tools.url_context = google.tools.urlContext({}) as ProviderDefinedTool
break
case 'anthropic':
case 'azure-anthropic':
case 'google-vertex-anthropic':
tools.web_fetch = (
aiSdkProviderId === 'anthropic'
['anthropic', 'azure-anthropic'].includes(aiSdkProviderId)
? anthropic.tools.webFetch_20250910({
maxUses: webSearchConfig.maxResults,
blockedDomains: blockedDomains.length > 0 ? blockedDomains : undefined

View File

@ -23,6 +23,26 @@ vi.mock('@cherrystudio/ai-core', () => ({
}
}))
vi.mock('@renderer/services/AssistantService', () => ({
getProviderByModel: vi.fn(),
getAssistantSettings: vi.fn(),
getDefaultAssistant: vi.fn().mockReturnValue({
id: 'default',
name: 'Default Assistant',
prompt: '',
settings: {}
})
}))
vi.mock('@renderer/store/settings', () => ({
default: {},
settingsSlice: {
name: 'settings',
reducer: vi.fn(),
actions: {}
}
}))
// Mock the provider configs
vi.mock('../providerConfigs', () => ({
initializeNewProviders: vi.fn()

View File

@ -12,7 +12,14 @@ vi.mock('@renderer/services/LoggerService', () => ({
}))
vi.mock('@renderer/services/AssistantService', () => ({
getProviderByModel: vi.fn()
getProviderByModel: vi.fn(),
getAssistantSettings: vi.fn(),
getDefaultAssistant: vi.fn().mockReturnValue({
id: 'default',
name: 'Default Assistant',
prompt: '',
settings: {}
})
}))
vi.mock('@renderer/store', () => ({

View File

@ -0,0 +1,22 @@
import type { Provider } from '@renderer/types'
import { provider2Provider, startsWith } from './helper'
import type { RuleSet } from './types'
// https://platform.claude.com/docs/en/build-with-claude/claude-in-microsoft-foundry
const AZURE_ANTHROPIC_RULES: RuleSet = {
rules: [
{
match: startsWith('claude'),
provider: (provider: Provider) => ({
...provider,
type: 'anthropic',
apiHost: provider.apiHost + 'anthropic/v1',
id: 'azure-anthropic'
})
}
],
fallbackRule: (provider: Provider) => provider
}
export const azureAnthropicProviderCreator = provider2Provider.bind(null, AZURE_ANTHROPIC_RULES)

View File

@ -2,6 +2,7 @@ import { hasProviderConfigByAlias, type ProviderId, resolveProviderConfigId } fr
import { createProvider as createProviderCore } from '@cherrystudio/ai-core/provider'
import { loggerService } from '@logger'
import type { Provider } from '@renderer/types'
import { isAzureOpenAIProvider, isAzureResponsesEndpoint } from '@renderer/utils/provider'
import type { Provider as AiSdkProvider } from 'ai'
import type { AiSdkConfig } from '../types'
@ -59,6 +60,9 @@ function tryResolveProviderId(identifier: string): ProviderId | null {
export function getAiSdkProviderId(provider: Provider): string {
// 1. 尝试解析provider.id
const resolvedFromId = tryResolveProviderId(provider.id)
if (isAzureOpenAIProvider(provider) && isAzureResponsesEndpoint(provider)) {
return 'azure-responses'
}
if (resolvedFromId) {
return resolvedFromId
}

View File

@ -25,6 +25,7 @@ import { cloneDeep } from 'lodash'
import type { AiSdkConfig } from '../types'
import { aihubmixProviderCreator, newApiResolverCreator, vertexAnthropicProviderCreator } from './config'
import { azureAnthropicProviderCreator } from './config/azure-anthropic'
import { COPILOT_DEFAULT_HEADERS } from './constants'
import { getAiSdkProviderId } from './factory'
@ -70,6 +71,9 @@ function handleSpecialProviders(model: Model, provider: Provider): Provider {
return vertexAnthropicProviderCreator(model, provider)
}
}
if (isAzureOpenAIProvider(provider)) {
return azureAnthropicProviderCreator(model, provider)
}
return provider
}
@ -181,13 +185,10 @@ export function providerToAiSdkConfig(actualProvider: Provider, model: Model): A
// azure
// https://learn.microsoft.com/en-us/azure/ai-foundry/openai/latest
// https://learn.microsoft.com/en-us/azure/ai-foundry/openai/how-to/responses?tabs=python-key#responses-api
if (aiSdkProviderId === 'azure' || actualProvider.type === 'azure-openai') {
// extraOptions.apiVersion = actualProvider.apiVersion === 'preview' ? 'v1' : actualProvider.apiVersion 默认使用v1不使用azure endpoint
if (actualProvider.apiVersion === 'preview' || actualProvider.apiVersion === 'v1') {
extraOptions.mode = 'responses'
} else {
extraOptions.mode = 'chat'
}
if (aiSdkProviderId === 'azure-responses') {
extraOptions.mode = 'responses'
} else if (aiSdkProviderId === 'azure') {
extraOptions.mode = 'chat'
}
// bedrock

View File

@ -32,6 +32,14 @@ export const NEW_PROVIDER_CONFIGS: ProviderConfig[] = [
supportsImageGeneration: true,
aliases: ['vertexai-anthropic']
},
{
id: 'azure-anthropic',
name: 'Azure AI Anthropic',
import: () => import('@ai-sdk/anthropic'),
creatorFunctionName: 'createAnthropic',
supportsImageGeneration: false,
aliases: ['azure-anthropic']
},
{
id: 'github-copilot-openai-compatible',
name: 'GitHub Copilot OpenAI Compatible',

View File

@ -77,11 +77,14 @@ vi.mock('@renderer/config/models', async (importOriginal) => ({
}
}))
vi.mock('@renderer/utils/provider', () => ({
isSupportServiceTierProvider: vi.fn((provider) => {
return [SystemProviderIds.openai, SystemProviderIds.groq].includes(provider.id)
})
}))
vi.mock(import('@renderer/utils/provider'), async (importOriginal) => {
return {
...(await importOriginal()),
isSupportServiceTierProvider: vi.fn((provider) => {
return [SystemProviderIds.openai, SystemProviderIds.groq].includes(provider.id)
})
}
})
vi.mock('@renderer/store/settings', () => ({
default: (state = { settings: {} }) => state

View File

@ -178,6 +178,7 @@ export function buildProviderOptions(
case 'google-vertex':
providerSpecificOptions = buildGeminiProviderOptions(assistant, model, capabilities)
break
case 'azure-anthropic':
case 'google-vertex-anthropic':
providerSpecificOptions = buildAnthropicProviderOptions(assistant, model, capabilities)
break
@ -210,6 +211,7 @@ export function buildProviderOptions(
{
'google-vertex': 'google',
'google-vertex-anthropic': 'anthropic',
'azure-anthropic': 'anthropic',
'ai-gateway': 'gateway'
}[rawProviderId] || rawProviderId

View File

@ -12,6 +12,7 @@ import {
isDeepSeekHybridInferenceModel,
isDoubaoSeedAfter251015,
isDoubaoThinkingAutoModel,
isGemini3Model,
isGPT51SeriesModel,
isGrok4FastReasoningModel,
isGrokReasoningModel,
@ -35,7 +36,7 @@ import {
} from '@renderer/config/models'
import { getStoreSetting } from '@renderer/hooks/useSettings'
import { getAssistantSettings, getProviderByModel } from '@renderer/services/AssistantService'
import type { Assistant, Model } from '@renderer/types'
import type { Assistant, Model, ReasoningEffortOption } from '@renderer/types'
import { EFFORT_RATIO, isSystemProvider, SystemProviderIds } from '@renderer/types'
import type { OpenAISummaryText } from '@renderer/types/aiCoreTypes'
import type { ReasoningEffortOptionalParams } from '@renderer/types/sdk'
@ -279,6 +280,12 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
// gemini series, openai compatible api
if (isSupportedThinkingTokenGeminiModel(model)) {
// https://ai.google.dev/gemini-api/docs/gemini-3?thinking=high#openai_compatibility
if (isGemini3Model(model)) {
return {
reasoning_effort: reasoningEffort
}
}
if (reasoningEffort === 'auto') {
return {
extra_body: {
@ -458,6 +465,21 @@ export function getAnthropicReasoningParams(
return {}
}
type GoogelThinkingLevel = NonNullable<GoogleGenerativeAIProviderOptions['thinkingConfig']>['thinkingLevel']
function mapToGeminiThinkingLevel(reasoningEffort: ReasoningEffortOption): GoogelThinkingLevel {
switch (reasoningEffort) {
case 'low':
return 'low'
case 'medium':
return 'medium'
case 'high':
return 'high'
default:
return 'medium'
}
}
/**
* Gemini
* GeminiAPIClient
@ -485,6 +507,15 @@ export function getGeminiReasoningParams(
}
}
// https://ai.google.dev/gemini-api/docs/gemini-3?thinking=high#new_api_features_in_gemini_3
if (isGemini3Model(model)) {
return {
thinkingConfig: {
thinkingLevel: mapToGeminiThinkingLevel(reasoningEffort)
}
}
}
const effortRatio = EFFORT_RATIO[reasoningEffort]
if (effortRatio > 1) {

View File

@ -47,6 +47,7 @@ export function buildProviderBuiltinWebSearchConfig(
model?: Model
): WebSearchPluginConfig | undefined {
switch (providerId) {
case 'azure-responses':
case 'openai': {
const searchContextSize = isOpenAIDeepResearchModel(model)
? 'medium'

View File

@ -1,6 +1,5 @@
import {
isImageEnhancementModel,
isPureGenerateImageModel,
isQwenReasoningModel,
isSupportedThinkingTokenQwenModel,
isVisionModel
@ -90,11 +89,4 @@ describe('Vision Model Detection', () => {
expect(isImageEnhancementModel({ id: 'qwen-image-edit' } as Model)).toBe(true)
expect(isImageEnhancementModel({ id: 'grok-2-image-latest' } as Model)).toBe(true)
})
test('isPureGenerateImageModel', () => {
expect(isPureGenerateImageModel({ id: 'gpt-image-1' } as Model)).toBe(true)
expect(isPureGenerateImageModel({ id: 'gemini-2.5-flash-image-preview' } as Model)).toBe(true)
expect(isPureGenerateImageModel({ id: 'gemini-2.0-flash-preview-image-generation' } as Model)).toBe(true)
expect(isPureGenerateImageModel({ id: 'grok-2-image-latest' } as Model)).toBe(true)
expect(isPureGenerateImageModel({ id: 'gpt-4o' } as Model)).toBe(false)
})
})

View File

@ -68,7 +68,9 @@ vi.mock('../embedding', () => ({
}))
vi.mock('../vision', () => ({
isTextToImageModel: vi.fn()
isTextToImageModel: vi.fn(),
isPureGenerateImageModel: vi.fn(),
isModernGenerateImageModel: vi.fn()
}))
describe('Doubao Models', () => {
@ -926,6 +928,69 @@ describe('Gemini Models', () => {
group: ''
})
).toBe(true)
// Version with decimals
expect(
isSupportedThinkingTokenGeminiModel({
id: 'gemini-3.0-flash',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isSupportedThinkingTokenGeminiModel({
id: 'gemini-3.5-pro-preview',
name: '',
provider: '',
group: ''
})
).toBe(true)
})
it('should return true for gemini-3 image models', () => {
expect(
isSupportedThinkingTokenGeminiModel({
id: 'gemini-3-pro-image-preview',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isSupportedThinkingTokenGeminiModel({
id: 'gemini-3.0-flash-image-preview',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isSupportedThinkingTokenGeminiModel({
id: 'gemini-3.5-pro-image-preview',
name: '',
provider: '',
group: ''
})
).toBe(true)
})
it('should return false for gemini-2.x image models', () => {
expect(
isSupportedThinkingTokenGeminiModel({
id: 'gemini-2.5-flash-image-preview',
name: '',
provider: '',
group: ''
})
).toBe(false)
expect(
isSupportedThinkingTokenGeminiModel({
id: 'gemini-2.0-pro-image-preview',
name: '',
provider: '',
group: ''
})
).toBe(false)
})
it('should return false for image and tts models', () => {
@ -945,6 +1010,14 @@ describe('Gemini Models', () => {
group: ''
})
).toBe(false)
expect(
isSupportedThinkingTokenGeminiModel({
id: 'gemini-3-flash-tts',
name: '',
provider: '',
group: ''
})
).toBe(false)
})
it('should return false for older gemini models', () => {
@ -1065,6 +1138,40 @@ describe('Gemini Models', () => {
group: ''
})
).toBe(true)
// Version with decimals
expect(
isGeminiReasoningModel({
id: 'gemini-3.0-flash',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isGeminiReasoningModel({
id: 'gemini-3.5-pro-preview',
name: '',
provider: '',
group: ''
})
).toBe(true)
// Image models
expect(
isGeminiReasoningModel({
id: 'gemini-3-pro-image-preview',
name: '',
provider: '',
group: ''
})
).toBe(true)
expect(
isGeminiReasoningModel({
id: 'gemini-3.5-flash-image-preview',
name: '',
provider: '',
group: ''
})
).toBe(true)
})
it('should return false for older gemini models without thinking', () => {

View File

@ -110,6 +110,7 @@ describe('vision helpers', () => {
it('requires both generate and text-to-image support', () => {
expect(isPureGenerateImageModel(createModel({ id: 'gpt-image-1' }))).toBe(true)
expect(isPureGenerateImageModel(createModel({ id: 'gpt-4o' }))).toBe(false)
expect(isPureGenerateImageModel(createModel({ id: 'gemini-2.5-flash-image-preview' }))).toBe(true)
})
})

View File

@ -26,7 +26,8 @@ const isGenerateImageModel = vi.hoisted(() => vi.fn())
vi.mock('../vision', () => ({
isPureGenerateImageModel: (...args: any[]) => isPureGenerateImageModel(...args),
isTextToImageModel: (...args: any[]) => isTextToImageModel(...args),
isGenerateImageModel: (...args: any[]) => isGenerateImageModel(...args)
isGenerateImageModel: (...args: any[]) => isGenerateImageModel(...args),
isModernGenerateImageModel: vi.fn()
}))
const providerMocks = vi.hoisted(() => ({
@ -35,7 +36,8 @@ const providerMocks = vi.hoisted(() => ({
isOpenAICompatibleProvider: vi.fn(),
isOpenAIProvider: vi.fn(),
isVertexProvider: vi.fn(),
isAwsBedrockProvider: vi.fn()
isAwsBedrockProvider: vi.fn(),
isAzureOpenAIProvider: vi.fn()
}))
vi.mock('@renderer/utils/provider', () => providerMocks)
@ -367,9 +369,22 @@ describe('websearch helpers', () => {
it('should match gemini 3 models', () => {
// Preview versions
expect(GEMINI_SEARCH_REGEX.test('gemini-3-pro-preview')).toBe(true)
expect(GEMINI_SEARCH_REGEX.test('gemini-3-flash-preview')).toBe(true)
expect(GEMINI_SEARCH_REGEX.test('gemini-3-pro-image-preview')).toBe(true)
expect(GEMINI_SEARCH_REGEX.test('gemini-3-flash-image-preview')).toBe(true)
// Future stable versions
expect(GEMINI_SEARCH_REGEX.test('gemini-3-flash')).toBe(true)
expect(GEMINI_SEARCH_REGEX.test('gemini-3-pro')).toBe(true)
// Version with decimals
expect(GEMINI_SEARCH_REGEX.test('gemini-3.0-flash')).toBe(true)
expect(GEMINI_SEARCH_REGEX.test('gemini-3.0-pro')).toBe(true)
expect(GEMINI_SEARCH_REGEX.test('gemini-3.5-flash-preview')).toBe(true)
expect(GEMINI_SEARCH_REGEX.test('gemini-3.5-pro-image-preview')).toBe(true)
})
it('should not match gemini 2.x image-preview models', () => {
expect(GEMINI_SEARCH_REGEX.test('gemini-2.5-flash-image-preview')).toBe(false)
expect(GEMINI_SEARCH_REGEX.test('gemini-2.0-pro-image-preview')).toBe(false)
})
it('should not match older gemini models', () => {

View File

@ -16,7 +16,7 @@ import {
isOpenAIReasoningModel,
isSupportedReasoningEffortOpenAIModel
} from './openai'
import { GEMINI_FLASH_MODEL_REGEX } from './utils'
import { GEMINI_FLASH_MODEL_REGEX, isGemini3Model } from './utils'
import { isTextToImageModel } from './vision'
// Reasoning models
@ -37,6 +37,7 @@ export const MODEL_SUPPORTED_REASONING_EFFORT: ReasoningEffortConfig = {
grok: ['low', 'high'] as const,
grok4_fast: ['auto'] as const,
gemini: ['low', 'medium', 'high', 'auto'] as const,
gemini3: ['low', 'medium', 'high'] as const,
gemini_pro: ['low', 'medium', 'high', 'auto'] as const,
qwen: ['low', 'medium', 'high'] as const,
qwen_thinking: ['low', 'medium', 'high'] as const,
@ -63,6 +64,7 @@ export const MODEL_SUPPORTED_OPTIONS: ThinkingOptionConfig = {
grok4_fast: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.grok4_fast] as const,
gemini: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini] as const,
gemini_pro: MODEL_SUPPORTED_REASONING_EFFORT.gemini_pro,
gemini3: MODEL_SUPPORTED_REASONING_EFFORT.gemini3,
qwen: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.qwen] as const,
qwen_thinking: MODEL_SUPPORTED_REASONING_EFFORT.qwen_thinking,
doubao: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao] as const,
@ -113,6 +115,9 @@ const _getThinkModelType = (model: Model): ThinkingModelType => {
} else {
thinkingModelType = 'gemini_pro'
}
if (isGemini3Model(model)) {
thinkingModelType = 'gemini3'
}
} else if (isSupportedReasoningEffortGrokModel(model)) thinkingModelType = 'grok'
else if (isSupportedThinkingTokenQwenModel(model)) {
if (isQwenAlwaysThinkModel(model)) {
@ -261,11 +266,19 @@ export function isGeminiReasoningModel(model?: Model): boolean {
// Gemini 支持思考模式的模型正则
export const GEMINI_THINKING_MODEL_REGEX =
/gemini-(?:2\.5.*(?:-latest)?|3-(?:flash|pro)(?:-preview)?|flash-latest|pro-latest|flash-lite-latest)(?:-[\w-]+)*$/i
/gemini-(?:2\.5.*(?:-latest)?|3(?:\.\d+)?-(?:flash|pro)(?:-preview)?|flash-latest|pro-latest|flash-lite-latest)(?:-[\w-]+)*$/i
export const isSupportedThinkingTokenGeminiModel = (model: Model): boolean => {
const modelId = getLowerBaseModelName(model.id, '/')
if (GEMINI_THINKING_MODEL_REGEX.test(modelId)) {
// gemini-3.x 的 image 模型支持思考模式
if (isGemini3Model(model)) {
if (modelId.includes('tts')) {
return false
}
return true
}
// gemini-2.x 的 image/tts 模型不支持
if (modelId.includes('image') || modelId.includes('tts')) {
return false
}

View File

@ -4,7 +4,7 @@ import { getLowerBaseModelName, isUserSelectedModelType } from '@renderer/utils'
import { isEmbeddingModel, isRerankModel } from './embedding'
import { isDeepSeekHybridInferenceModel } from './reasoning'
import { isPureGenerateImageModel, isTextToImageModel } from './vision'
import { isTextToImageModel } from './vision'
// Tool calling models
export const FUNCTION_CALLING_MODELS = [
@ -41,7 +41,9 @@ const FUNCTION_CALLING_EXCLUDED_MODELS = [
'gemini-1(?:\\.[\\w-]+)?',
'qwen-mt(?:-[\\w-]+)?',
'gpt-5-chat(?:-[\\w-]+)?',
'glm-4\\.5v'
'glm-4\\.5v',
'gemini-2.5-flash-image(?:-[\\w-]+)?',
'gemini-2.0-flash-preview-image-generation'
]
export const FUNCTION_CALLING_REGEX = new RegExp(
@ -50,13 +52,7 @@ export const FUNCTION_CALLING_REGEX = new RegExp(
)
export function isFunctionCallingModel(model?: Model): boolean {
if (
!model ||
isEmbeddingModel(model) ||
isRerankModel(model) ||
isTextToImageModel(model) ||
isPureGenerateImageModel(model)
) {
if (!model || isEmbeddingModel(model) || isRerankModel(model) || isTextToImageModel(model)) {
return false
}

View File

@ -155,3 +155,8 @@ export const isMaxTemperatureOneModel = (model: Model): boolean => {
}
return false
}
export const isGemini3Model = (model: Model) => {
const modelId = getLowerBaseModelName(model.id)
return modelId.includes('gemini-3')
}

View File

@ -3,6 +3,7 @@ import type { Model } from '@renderer/types'
import { getLowerBaseModelName, isUserSelectedModelType } from '@renderer/utils'
import { isEmbeddingModel, isRerankModel } from './embedding'
import { isFunctionCallingModel } from './tooluse'
// Vision models
const visionAllowedModels = [
@ -72,12 +73,10 @@ const VISION_REGEX = new RegExp(
// For middleware to identify models that must use the dedicated Image API
const DEDICATED_IMAGE_MODELS = [
'grok-2-image',
'grok-2-image-1212',
'grok-2-image-latest',
'dall-e-3',
'dall-e-2',
'gpt-image-1'
'grok-2-image(?:-[\\w-]+)?',
'dall-e(?:-[\\w-]+)?',
'gpt-image-1(?:-[\\w-]+)?',
'imagen(?:-[\\w-]+)?'
]
const IMAGE_ENHANCEMENT_MODELS = [
@ -85,13 +84,22 @@ const IMAGE_ENHANCEMENT_MODELS = [
'qwen-image-edit',
'gpt-image-1',
'gemini-2.5-flash-image(?:-[\\w-]+)?',
'gemini-2.0-flash-preview-image-generation'
'gemini-2.0-flash-preview-image-generation',
'gemini-3(?:\\.\\d+)?-pro-image(?:-[\\w-]+)?'
]
const IMAGE_ENHANCEMENT_MODELS_REGEX = new RegExp(IMAGE_ENHANCEMENT_MODELS.join('|'), 'i')
const DEDICATED_IMAGE_MODELS_REGEX = new RegExp(DEDICATED_IMAGE_MODELS.join('|'), 'i')
// Models that should auto-enable image generation button when selected
const AUTO_ENABLE_IMAGE_MODELS = ['gemini-2.5-flash-image', 'gemini-3-pro-image-preview', ...DEDICATED_IMAGE_MODELS]
const AUTO_ENABLE_IMAGE_MODELS = [
'gemini-2.5-flash-image(?:-[\\w-]+)?',
'gemini-3(?:\\.\\d+)?-pro-image(?:-[\\w-]+)?',
...DEDICATED_IMAGE_MODELS
]
const AUTO_ENABLE_IMAGE_MODELS_REGEX = new RegExp(AUTO_ENABLE_IMAGE_MODELS.join('|'), 'i')
const OPENAI_TOOL_USE_IMAGE_GENERATION_MODELS = [
'o3',
@ -105,27 +113,34 @@ const OPENAI_TOOL_USE_IMAGE_GENERATION_MODELS = [
const OPENAI_IMAGE_GENERATION_MODELS = [...OPENAI_TOOL_USE_IMAGE_GENERATION_MODELS, 'gpt-image-1']
const MODERN_IMAGE_MODELS = ['gemini-3(?:\\.\\d+)?-pro-image(?:-[\\w-]+)?']
const GENERATE_IMAGE_MODELS = [
'gemini-2.0-flash-exp',
'gemini-2.0-flash-exp-image-generation',
'gemini-2.0-flash-exp(?:-[\\w-]+)?',
'gemini-2.5-flash-image(?:-[\\w-]+)?',
'gemini-2.0-flash-preview-image-generation',
'gemini-2.5-flash-image',
'gemini-3-pro-image-preview',
...MODERN_IMAGE_MODELS,
...DEDICATED_IMAGE_MODELS
]
const OPENAI_IMAGE_GENERATION_MODELS_REGEX = new RegExp(OPENAI_IMAGE_GENERATION_MODELS.join('|'), 'i')
const GENERATE_IMAGE_MODELS_REGEX = new RegExp(GENERATE_IMAGE_MODELS.join('|'), 'i')
const MODERN_GENERATE_IMAGE_MODELS_REGEX = new RegExp(MODERN_IMAGE_MODELS.join('|'), 'i')
export const isDedicatedImageGenerationModel = (model: Model): boolean => {
if (!model) return false
const modelId = getLowerBaseModelName(model.id)
return DEDICATED_IMAGE_MODELS.some((m) => modelId.includes(m))
return DEDICATED_IMAGE_MODELS_REGEX.test(modelId)
}
export const isAutoEnableImageGenerationModel = (model: Model): boolean => {
if (!model) return false
const modelId = getLowerBaseModelName(model.id)
return AUTO_ENABLE_IMAGE_MODELS.some((m) => modelId.includes(m))
return AUTO_ENABLE_IMAGE_MODELS_REGEX.test(modelId)
}
/**
@ -147,48 +162,44 @@ export function isGenerateImageModel(model: Model): boolean {
const modelId = getLowerBaseModelName(model.id, '/')
if (provider.type === 'openai-response') {
return (
OPENAI_IMAGE_GENERATION_MODELS.some((imageModel) => modelId.includes(imageModel)) ||
GENERATE_IMAGE_MODELS.some((imageModel) => modelId.includes(imageModel))
)
return OPENAI_IMAGE_GENERATION_MODELS_REGEX.test(modelId) || GENERATE_IMAGE_MODELS_REGEX.test(modelId)
}
return GENERATE_IMAGE_MODELS.some((imageModel) => modelId.includes(imageModel))
return GENERATE_IMAGE_MODELS_REGEX.test(modelId)
}
// TODO: refine the regex
/**
*
* @param model
* @returns
*/
export function isPureGenerateImageModel(model: Model): boolean {
if (!isGenerateImageModel(model) || !isTextToImageModel(model)) {
if (!isGenerateImageModel(model) && !isTextToImageModel(model)) {
return false
}
if (isFunctionCallingModel(model)) {
return false
}
const modelId = getLowerBaseModelName(model.id)
return !OPENAI_TOOL_USE_IMAGE_GENERATION_MODELS.some((imageModel) => modelId.includes(imageModel))
if (GENERATE_IMAGE_MODELS_REGEX.test(modelId) && !MODERN_GENERATE_IMAGE_MODELS_REGEX.test(modelId)) {
return true
}
return !OPENAI_TOOL_USE_IMAGE_GENERATION_MODELS.some((m) => modelId.includes(m))
}
// TODO: refine the regex
// Text to image models
const TEXT_TO_IMAGE_REGEX = /flux|diffusion|stabilityai|sd-|dall|cogview|janus|midjourney|mj-|image|gpt-image/i
const TEXT_TO_IMAGE_REGEX = /flux|diffusion|stabilityai|sd-|dall|cogview|janus|midjourney|mj-|imagen|gpt-image/i
export function isTextToImageModel(model: Model): boolean {
const modelId = getLowerBaseModelName(model.id)
return TEXT_TO_IMAGE_REGEX.test(modelId)
}
// It's not used now
// export function isNotSupportedImageSizeModel(model?: Model): boolean {
// if (!model) {
// return false
// }
// const baseName = getLowerBaseModelName(model.id, '/')
// return baseName.includes('grok-2-image')
// }
/**
*
* @param model

View File

@ -3,6 +3,7 @@ import type { Model } from '@renderer/types'
import { SystemProviderIds } from '@renderer/types'
import { getLowerBaseModelName, isUserSelectedModelType } from '@renderer/utils'
import {
isAzureOpenAIProvider,
isGeminiProvider,
isNewApiProvider,
isOpenAICompatibleProvider,
@ -15,7 +16,7 @@ export { GEMINI_FLASH_MODEL_REGEX } from './utils'
import { isEmbeddingModel, isRerankModel } from './embedding'
import { isClaude4SeriesModel } from './reasoning'
import { isAnthropicModel } from './utils'
import { isGenerateImageModel, isPureGenerateImageModel, isTextToImageModel } from './vision'
import { isTextToImageModel } from './vision'
const CLAUDE_SUPPORTED_WEBSEARCH_REGEX = new RegExp(
`\\b(?:claude-3(-|\\.)(7|5)-sonnet(?:-[\\w-]+)|claude-3(-|\\.)5-haiku(?:-[\\w-]+)|claude-(haiku|sonnet|opus)-4(?:-[\\w-]+)?)\\b`,
@ -23,7 +24,7 @@ const CLAUDE_SUPPORTED_WEBSEARCH_REGEX = new RegExp(
)
export const GEMINI_SEARCH_REGEX = new RegExp(
'gemini-(?:2.*(?:-latest)?|3-(?:flash|pro)(?:-preview)?|flash-latest|pro-latest|flash-lite-latest)(?:-[\\w-]+)*$',
'gemini-(?:2(?!.*-image-preview).*(?:-latest)?|3(?:\\.\\d+)?-(?:flash|pro)(?:-(?:image-)?preview)?|flash-latest|pro-latest|flash-lite-latest)(?:-[\\w-]+)*$',
'i'
)
@ -36,14 +37,7 @@ export const PERPLEXITY_SEARCH_MODELS = [
]
export function isWebSearchModel(model: Model): boolean {
if (
!model ||
isEmbeddingModel(model) ||
isRerankModel(model) ||
isTextToImageModel(model) ||
isPureGenerateImageModel(model) ||
isGenerateImageModel(model)
) {
if (!model || isEmbeddingModel(model) || isRerankModel(model) || isTextToImageModel(model)) {
return false
}
@ -59,7 +53,7 @@ export function isWebSearchModel(model: Model): boolean {
const modelId = getLowerBaseModelName(model.id, '/')
// bedrock不支持
// bedrock不支持, azure支持
if (isAnthropicModel(model) && !(provider.id === SystemProviderIds['aws-bedrock'])) {
if (isVertexProvider(provider)) {
return isClaude4SeriesModel(model)
@ -68,7 +62,8 @@ export function isWebSearchModel(model: Model): boolean {
}
// TODO: 当其他供应商采用Response端点时这个地方逻辑需要改进
if (isOpenAIProvider(provider)) {
// azure现在也支持了websearch
if (isOpenAIProvider(provider) || isAzureOpenAIProvider(provider)) {
if (isOpenAIWebSearchModel(model)) {
return true
}

View File

@ -1,4 +1,4 @@
import { isAnthropicModel, isGeminiModel } from '@renderer/config/models'
import { isAnthropicModel, isGeminiModel, isPureGenerateImageModel } from '@renderer/config/models'
import { defineTool, registerTool, TopicType } from '@renderer/pages/home/Inputbar/types'
import { getProviderByModel } from '@renderer/services/AssistantService'
import { isSupportUrlContextProvider } from '@renderer/utils/provider'
@ -11,7 +11,12 @@ const urlContextTool = defineTool({
visibleInScopes: [TopicType.Chat],
condition: ({ model }) => {
const provider = getProviderByModel(model)
return !!provider && isSupportUrlContextProvider(provider) && (isGeminiModel(model) || isAnthropicModel(model))
return (
!!provider &&
isSupportUrlContextProvider(provider) &&
!isPureGenerateImageModel(model) &&
(isGeminiModel(model) || isAnthropicModel(model))
)
},
render: ({ assistant }) => <UrlContextButton assistantId={assistant.id} />
})

View File

@ -91,6 +91,7 @@ const ThinkModelTypes = [
'grok4_fast',
'gemini',
'gemini_pro',
'gemini3',
'qwen',
'qwen_thinking',
'doubao',

View File

@ -2,6 +2,10 @@ import { CLAUDE_SUPPORTED_PROVIDERS } from '@renderer/pages/code'
import type { AzureOpenAIProvider, ProviderType, VertexProvider } from '@renderer/types'
import { isSystemProvider, type Provider, type SystemProviderId, SystemProviderIds } from '@renderer/types'
export const isAzureResponsesEndpoint = (provider: AzureOpenAIProvider) => {
return provider.apiVersion === 'preview' || provider.apiVersion === 'v1'
}
export const getClaudeSupportedProviders = (providers: Provider[]) => {
return providers.filter(
(p) => p.type === 'anthropic' || !!p.anthropicApiHost || CLAUDE_SUPPORTED_PROVIDERS.includes(p.id)

333
yarn.lock
View File

@ -74,35 +74,23 @@ __metadata:
languageName: node
linkType: hard
"@ai-sdk/amazon-bedrock@npm:^3.0.53":
version: 3.0.53
resolution: "@ai-sdk/amazon-bedrock@npm:3.0.53"
"@ai-sdk/amazon-bedrock@npm:^3.0.56":
version: 3.0.56
resolution: "@ai-sdk/amazon-bedrock@npm:3.0.56"
dependencies:
"@ai-sdk/anthropic": "npm:2.0.43"
"@ai-sdk/anthropic": "npm:2.0.45"
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.16"
"@ai-sdk/provider-utils": "npm:3.0.17"
"@smithy/eventstream-codec": "npm:^4.0.1"
"@smithy/util-utf8": "npm:^4.0.0"
aws4fetch: "npm:^1.0.20"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/4ad693af6796fac6cb6f5aacf512708478a045070435f10781072aeb02f4f97083b86ae4fff135329703af7ceb158349c6b62e6f05b394817dca5d90ff31d528
checksum: 10c0/1d5607de6b7a450bbdbf4e704f5f5690c6cda861e0f9c99d715f893fa5eab13ca534d63eebe58b42856e3c5c65d795ad5238bf5d0187b6f50343c8dc9a3e8b2b
languageName: node
linkType: hard
"@ai-sdk/anthropic@npm:2.0.43, @ai-sdk/anthropic@npm:^2.0.43":
version: 2.0.43
resolution: "@ai-sdk/anthropic@npm:2.0.43"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.16"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/a83029edc541a9cecda9e15b8732de111ed739a586b55d6a0e7d2b8ef40660289986d7a144252736bfc9ee067ee19b11d5c5830278513aa32c6fa24666bd0e78
languageName: node
linkType: hard
"@ai-sdk/anthropic@npm:2.0.45":
"@ai-sdk/anthropic@npm:2.0.45, @ai-sdk/anthropic@npm:^2.0.45":
version: 2.0.45
resolution: "@ai-sdk/anthropic@npm:2.0.45"
dependencies:
@ -114,28 +102,16 @@ __metadata:
languageName: node
linkType: hard
"@ai-sdk/anthropic@npm:^2.0.44":
version: 2.0.44
resolution: "@ai-sdk/anthropic@npm:2.0.44"
"@ai-sdk/azure@npm:^2.0.73":
version: 2.0.73
resolution: "@ai-sdk/azure@npm:2.0.73"
dependencies:
"@ai-sdk/openai": "npm:2.0.71"
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.17"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/6484fdc60da8658d8d28c2c61fc4bc322829aee31baf71f0ea1bfbf17d008d37ce9db5d3bb395646bdd9891866b24b763766cba17b5c0fbd67f183ceac71df57
languageName: node
linkType: hard
"@ai-sdk/azure@npm:^2.0.66":
version: 2.0.66
resolution: "@ai-sdk/azure@npm:2.0.66"
dependencies:
"@ai-sdk/openai": "npm:2.0.64"
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.16"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/261c00a3998611857f0e7c95962849d8e4468262477b07dafd29b0d447ae4088a8b3fc351ca84086e4cf008e2ee9d6efeb379964a091539d6af16a25a8726cd4
checksum: 10c0/e21ca310d23fcbf485ea2e2a6ec3daf29d36fcc827a31f961a06b4ab0d8cfbf19b58a9172e741a1311f88b663d6fb0608b584dbaa3bbddf08215bab3255b0e39
languageName: node
linkType: hard
@ -152,131 +128,93 @@ __metadata:
languageName: node
linkType: hard
"@ai-sdk/deepseek@npm:^1.0.27":
version: 1.0.27
resolution: "@ai-sdk/deepseek@npm:1.0.27"
"@ai-sdk/deepseek@npm:^1.0.29":
version: 1.0.29
resolution: "@ai-sdk/deepseek@npm:1.0.29"
dependencies:
"@ai-sdk/openai-compatible": "npm:1.0.26"
"@ai-sdk/openai-compatible": "npm:1.0.27"
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.16"
"@ai-sdk/provider-utils": "npm:3.0.17"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/8d05887ef5e9c08d63a54f0b51c1ff6c9242daab339aaae919d2dc48a11d1065a84b0dc3e5f1e9b48ef20122ff330a5eee826f0632402d1ff87fcec9a2edd516
checksum: 10c0/f43fba5c72e3f2d8ddc79d68c656cb4fc5fcd488c97b0a5371ad728e2d5c7a8c61fe9125a2a471b7648d99646cd2c78aad2d462c1469942bb4046763c5f13f38
languageName: node
linkType: hard
"@ai-sdk/gateway@npm:2.0.7":
version: 2.0.7
resolution: "@ai-sdk/gateway@npm:2.0.7"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.16"
"@vercel/oidc": "npm:3.0.3"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/b57db87ccfbda6d28c8ac6e24df5e57a45f18826bff3ca5d1b65b00d863dd779d2b0d80496eee8eea8cbf6db232c31bd00494cd0d25e745cb402aa98b0b4d50d
languageName: node
linkType: hard
"@ai-sdk/gateway@npm:^2.0.9":
version: 2.0.9
resolution: "@ai-sdk/gateway@npm:2.0.9"
"@ai-sdk/gateway@npm:2.0.13, @ai-sdk/gateway@npm:^2.0.13":
version: 2.0.13
resolution: "@ai-sdk/gateway@npm:2.0.13"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.17"
"@vercel/oidc": "npm:3.0.3"
"@vercel/oidc": "npm:3.0.5"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/840f94795b96c0fa6e73897ea8dba95fc78af1f8482f3b7d8439b6233b4f4de6979a8b67206f4bbf32649baf2acfb1153a46792dfa20259ca9f5fd214fb25fa5
checksum: 10c0/c92413bdcbd05bca15d4f96fd9cd2ec3410870cacf9902181fcc9677bd00860d7920aac494a25e307a3d4c6aa2d68f87e6771402019062c88948a767b4a31280
languageName: node
linkType: hard
"@ai-sdk/google-vertex@npm:^3.0.68":
version: 3.0.68
resolution: "@ai-sdk/google-vertex@npm:3.0.68"
"@ai-sdk/google-vertex@npm:^3.0.72":
version: 3.0.72
resolution: "@ai-sdk/google-vertex@npm:3.0.72"
dependencies:
"@ai-sdk/anthropic": "npm:2.0.45"
"@ai-sdk/google": "npm:2.0.36"
"@ai-sdk/google": "npm:2.0.40"
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.17"
google-auth-library: "npm:^9.15.0"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/6a3f4cb1e649313b46a0c349c717757071f8b012b0a28e59ab7a55fd35d9600f0043f0a4f57417c4cc49e0d3734e89a1e4fb248fc88795b5286c83395d3f617a
checksum: 10c0/ac3f2465f911ba0872a6b3616bda9b80d6ccdde8e56de3ce8395be798614a6cd01957f779d9519f5edd8d2597345162c5c08c489d7b146f21f13647691f961f5
languageName: node
linkType: hard
"@ai-sdk/google@npm:2.0.36":
version: 2.0.36
resolution: "@ai-sdk/google@npm:2.0.36"
"@ai-sdk/google@npm:2.0.40":
version: 2.0.40
resolution: "@ai-sdk/google@npm:2.0.40"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.17"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/2c6de5e1cf0703b6b932a3f313bf4bc9439897af39c805169ab04bba397185d99b2b1306f3b817f991ca41fdced0365b072ee39e76382c045930256bce47e0e4
checksum: 10c0/e0a22f24aac9475148177c725ade25ce8a6e4531dd6e51d811d2cee484770f97df876066ce75342b37191e5d7efcc3e0224450ba3c05eb48276e8f2899c6a1e5
languageName: node
linkType: hard
"@ai-sdk/google@patch:@ai-sdk/google@npm%3A2.0.36#~/.yarn/patches/@ai-sdk-google-npm-2.0.36-6f3cc06026.patch":
version: 2.0.36
resolution: "@ai-sdk/google@patch:@ai-sdk/google@npm%3A2.0.36#~/.yarn/patches/@ai-sdk-google-npm-2.0.36-6f3cc06026.patch::version=2.0.36&hash=2da8c3"
"@ai-sdk/google@patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch":
version: 2.0.40
resolution: "@ai-sdk/google@patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch::version=2.0.40&hash=c2a2ca"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.17"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/ce99a497360377d2917cf3a48278eb6f4337623ce3738ba743cf048c8c2a7731ec4fc27605a50e461e716ed49b3690206ca8e4078f27cb7be162b684bfc2fc22
checksum: 10c0/dec9d156ed9aeb129521f8d03158edbdbbdfc487d5f117c097123398f13670407b0ab03f6602487811d2334cd65377b72aca348cb39a48e149a71c4f728e8436
languageName: node
linkType: hard
"@ai-sdk/huggingface@npm:0.0.8":
version: 0.0.8
resolution: "@ai-sdk/huggingface@npm:0.0.8"
"@ai-sdk/huggingface@npm:^0.0.10":
version: 0.0.10
resolution: "@ai-sdk/huggingface@npm:0.0.10"
dependencies:
"@ai-sdk/openai-compatible": "npm:1.0.26"
"@ai-sdk/openai-compatible": "npm:1.0.27"
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.16"
"@ai-sdk/provider-utils": "npm:3.0.17"
peerDependencies:
zod: ^3.25.76 || ^4
checksum: 10c0/12d5064bb3dbb591941c76a33ffa76e75df0c1fb547255c20acbdc9cfd00a434c8210d92df382717c188022aa705ad36c3e31ddcb6b1387f154f956c9ea61e66
checksum: 10c0/df9f48cb1259dca7ea304a2136d69019350102901c672b90ea26a588c284aebc904a483be3967f2548c1c55dbc4db641e25a2202c435fa53038fa413c0f393df
languageName: node
linkType: hard
"@ai-sdk/huggingface@patch:@ai-sdk/huggingface@npm%3A0.0.8#~/.yarn/patches/@ai-sdk-huggingface-npm-0.0.8-d4d0aaac93.patch":
version: 0.0.8
resolution: "@ai-sdk/huggingface@patch:@ai-sdk/huggingface@npm%3A0.0.8#~/.yarn/patches/@ai-sdk-huggingface-npm-0.0.8-d4d0aaac93.patch::version=0.0.8&hash=ceb48e"
dependencies:
"@ai-sdk/openai-compatible": "npm:1.0.26"
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.16"
peerDependencies:
zod: ^3.25.76 || ^4
checksum: 10c0/30760547543f7e33fe088a4a5b5be7ce0cd37f446a5ddb13c99c5a2725c6c020fc76d6cf6bc1c5cdd8f765366ecb3022605096dc45cd50acf602ef46a89c1eb7
languageName: node
linkType: hard
"@ai-sdk/mistral@npm:^2.0.23":
version: 2.0.23
resolution: "@ai-sdk/mistral@npm:2.0.23"
"@ai-sdk/mistral@npm:^2.0.24":
version: 2.0.24
resolution: "@ai-sdk/mistral@npm:2.0.24"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.16"
"@ai-sdk/provider-utils": "npm:3.0.17"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/7b7597740d1e48ee4905f48276c46591fbdd6d7042f001ec1a34256c8b054f480f547c6aa9175987e6fdfc4c068925176d0123fa3b4b5af985d55b7890cfe80a
languageName: node
linkType: hard
"@ai-sdk/openai-compatible@npm:1.0.26, @ai-sdk/openai-compatible@npm:^1.0.26":
version: 1.0.26
resolution: "@ai-sdk/openai-compatible@npm:1.0.26"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.16"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/b419641f1e97c2db688f2371cdc4efb4c16652fde74fff92afaa614eea5aabee40d7f2e4e082f00d3805f12390084c7b47986de570e836beb1466c2dd48d31e9
checksum: 10c0/da0d37822fa96eb55e41a3a663488c8bfeb492b5dbde3914560fad4f0b70c47004bd649bf0c01359a4fb09d8ab2c63385e94ab280cf554d8ffe35fb5afbad340
languageName: node
linkType: hard
@ -292,81 +230,55 @@ __metadata:
languageName: node
linkType: hard
"@ai-sdk/openai-compatible@npm:^1.0.19":
version: 1.0.19
resolution: "@ai-sdk/openai-compatible@npm:1.0.19"
"@ai-sdk/openai-compatible@patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch":
version: 1.0.27
resolution: "@ai-sdk/openai-compatible@patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch::version=1.0.27&hash=c44b76"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.10"
"@ai-sdk/provider-utils": "npm:3.0.17"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/5b7b21fb515e829c3d8a499a5760ffc035d9b8220695996110e361bd79e9928859da4ecf1ea072735bcbe4977c6dd0661f543871921692e86f8b5bfef14fe0e5
checksum: 10c0/80c8331bc5fc62dc23d99d861bdc76e4eaf8b4b071d0b2bfa42fbd87f50b1bcdfa5ce4a4deaf7026a603a1ba6eaf5c884d87e3c58b4d6515c220121d3f421de5
languageName: node
linkType: hard
"@ai-sdk/openai@npm:2.0.64":
version: 2.0.64
resolution: "@ai-sdk/openai@npm:2.0.64"
"@ai-sdk/openai@npm:2.0.71":
version: 2.0.71
resolution: "@ai-sdk/openai@npm:2.0.71"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.16"
"@ai-sdk/provider-utils": "npm:3.0.17"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/fde91951ca5f2612458d618fd2b8a6e29a8cae61f1bda45816258c697af5ec6f047dbd3acc1fcc921db6e39dfa3158799f0e66f737bcd40f5f0cdd10be74d2a7
checksum: 10c0/19a0a1648df074ba1c1836bf7b5cd874a3e4e5c2d4efad3bec1ecdcd7f013008b1f573685be2f5d8b6b326a91309f4f6c8b556755d62e6b03c840f9030ad7a5f
languageName: node
linkType: hard
"@ai-sdk/openai@patch:@ai-sdk/openai@npm%3A2.0.64#~/.yarn/patches/@ai-sdk-openai-npm-2.0.64-48f99f5bf3.patch":
version: 2.0.64
resolution: "@ai-sdk/openai@patch:@ai-sdk/openai@npm%3A2.0.64#~/.yarn/patches/@ai-sdk-openai-npm-2.0.64-48f99f5bf3.patch::version=2.0.64&hash=e78090"
"@ai-sdk/openai@patch:@ai-sdk/openai@npm%3A2.0.71#~/.yarn/patches/@ai-sdk-openai-npm-2.0.71-a88ef00525.patch":
version: 2.0.71
resolution: "@ai-sdk/openai@patch:@ai-sdk/openai@npm%3A2.0.71#~/.yarn/patches/@ai-sdk-openai-npm-2.0.71-a88ef00525.patch::version=2.0.71&hash=78bebe"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.16"
"@ai-sdk/provider-utils": "npm:3.0.17"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/e4a0967cbdb25309144c6263e6d691fa67898953207e050c23ba99df23ce76ab025fed3a79d541d54b99b4a049a945db2a4a3fbae5ab3a52207f024f5b4e6f4a
checksum: 10c0/a68ba6b32a940e48daa6354667108648ff6a99646eb413ead7a70ca82289874de98206322b5704326f2d9579fcc92f50a1cdf1328368cc337f28213c0da90f5c
languageName: node
linkType: hard
"@ai-sdk/perplexity@npm:^2.0.17":
version: 2.0.17
resolution: "@ai-sdk/perplexity@npm:2.0.17"
"@ai-sdk/perplexity@npm:^2.0.20":
version: 2.0.20
resolution: "@ai-sdk/perplexity@npm:2.0.20"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.16"
"@ai-sdk/provider-utils": "npm:3.0.17"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/7c900a507bc7a60efb120ee4d251cb98314a6ea0f2d876552caf7b8c18e44ff38a8e205e94e6fa823629ac30c4e191c2441b107556c2b50bc4e90f80e6094bb1
checksum: 10c0/7c48da46c2fec30749b261167384dc5d10bb405d31b69fadf9903ea6df32917470b4d13654d36e3465f96bd63670f94f2c6b1388abfe9f04134b5bf4adc3a770
languageName: node
linkType: hard
"@ai-sdk/provider-utils@npm:3.0.10":
version: 3.0.10
resolution: "@ai-sdk/provider-utils@npm:3.0.10"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@standard-schema/spec": "npm:^1.0.0"
eventsource-parser: "npm:^3.0.5"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/d2c16abdb84ba4ef48c9f56190b5ffde224b9e6ae5147c5c713d2623627732d34b96aa9aef2a2ea4b0c49e1b863cc963c7d7ff964a1dc95f0f036097aaaaaa98
languageName: node
linkType: hard
"@ai-sdk/provider-utils@npm:3.0.16, @ai-sdk/provider-utils@npm:^3.0.16":
version: 3.0.16
resolution: "@ai-sdk/provider-utils@npm:3.0.16"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@standard-schema/spec": "npm:^1.0.0"
eventsource-parser: "npm:^3.0.6"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/0922af1864b31aed4704174683d356c482199bf691c3d1a3e27cdedd574eec2249ea386b1081023d301a87e38dea09ec259ee45c5889316f7eed0de0a6064a49
languageName: node
linkType: hard
"@ai-sdk/provider-utils@npm:3.0.17, @ai-sdk/provider-utils@npm:^3.0.10":
"@ai-sdk/provider-utils@npm:3.0.17, @ai-sdk/provider-utils@npm:^3.0.10, @ai-sdk/provider-utils@npm:^3.0.17":
version: 3.0.17
resolution: "@ai-sdk/provider-utils@npm:3.0.17"
dependencies:
@ -379,19 +291,6 @@ __metadata:
languageName: node
linkType: hard
"@ai-sdk/provider-utils@npm:^3.0.12":
version: 3.0.12
resolution: "@ai-sdk/provider-utils@npm:3.0.12"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@standard-schema/spec": "npm:^1.0.0"
eventsource-parser: "npm:^3.0.5"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/83886bf188cad0cc655b680b710a10413989eaba9ec59dd24a58b985c02a8a1d50ad0f96dd5259385c07592ec3c37a7769fdf4a1ef569a73c9edbdb2cd585915
languageName: node
linkType: hard
"@ai-sdk/provider@npm:2.0.0, @ai-sdk/provider@npm:^2.0.0":
version: 2.0.0
resolution: "@ai-sdk/provider@npm:2.0.0"
@ -419,16 +318,16 @@ __metadata:
languageName: node
linkType: hard
"@ai-sdk/xai@npm:^2.0.31":
version: 2.0.31
resolution: "@ai-sdk/xai@npm:2.0.31"
"@ai-sdk/xai@npm:^2.0.34":
version: 2.0.34
resolution: "@ai-sdk/xai@npm:2.0.34"
dependencies:
"@ai-sdk/openai-compatible": "npm:1.0.26"
"@ai-sdk/openai-compatible": "npm:1.0.27"
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.16"
"@ai-sdk/provider-utils": "npm:3.0.17"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/33a0336f032a12b8406cc1aa1541fdf1a7b9924555456b77844e47a5ddf11b726fdbcec1a240cb66a9c7597a1a05503cf03204866730c39f99e7d4442b781ec0
checksum: 10c0/e6d5a02edcc8ea8f1b6423faf27a56bd02d4664a021c9b13e18c3f393bc9b64f7781a86782fcfd3b559aacfbea248a1176d7511d03ee184fd1146b84833c9514
languageName: node
linkType: hard
@ -1904,13 +1803,13 @@ __metadata:
version: 0.0.0-use.local
resolution: "@cherrystudio/ai-core@workspace:packages/aiCore"
dependencies:
"@ai-sdk/anthropic": "npm:^2.0.43"
"@ai-sdk/azure": "npm:^2.0.66"
"@ai-sdk/deepseek": "npm:^1.0.27"
"@ai-sdk/openai-compatible": "npm:^1.0.26"
"@ai-sdk/anthropic": "npm:^2.0.45"
"@ai-sdk/azure": "npm:^2.0.73"
"@ai-sdk/deepseek": "npm:^1.0.29"
"@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch"
"@ai-sdk/provider": "npm:^2.0.0"
"@ai-sdk/provider-utils": "npm:^3.0.16"
"@ai-sdk/xai": "npm:^2.0.31"
"@ai-sdk/provider-utils": "npm:^3.0.17"
"@ai-sdk/xai": "npm:^2.0.34"
tsdown: "npm:^0.12.9"
typescript: "npm:^5.0.0"
vitest: "npm:^3.2.4"
@ -1928,7 +1827,7 @@ __metadata:
resolution: "@cherrystudio/ai-sdk-provider@workspace:packages/ai-sdk-provider"
dependencies:
"@ai-sdk/provider": "npm:^2.0.0"
"@ai-sdk/provider-utils": "npm:^3.0.12"
"@ai-sdk/provider-utils": "npm:^3.0.17"
tsdown: "npm:^0.13.3"
typescript: "npm:^5.8.2"
vitest: "npm:^3.2.4"
@ -5082,13 +4981,24 @@ __metadata:
languageName: node
linkType: hard
"@openrouter/ai-sdk-provider@npm:^1.2.0":
version: 1.2.0
resolution: "@openrouter/ai-sdk-provider@npm:1.2.0"
"@openrouter/ai-sdk-provider@npm:^1.2.5":
version: 1.2.5
resolution: "@openrouter/ai-sdk-provider@npm:1.2.5"
dependencies:
"@openrouter/sdk": "npm:^0.1.8"
peerDependencies:
ai: ^5.0.0
zod: ^3.24.1 || ^v4
checksum: 10c0/4ca7c471ec46bdd48eea9c56d94778a06ca4b74b6ef2ab892ab7eadbd409e3530ac0c5791cd80e88cafc44a49a76585e59707104792e3e3124237fed767104ef
checksum: 10c0/f422f767ff8fcba2bb2fca32e5e2df163abae3c754f98416830654c5135db3aed5d4f941bfa0005109d202053a2e6a4a6b997940eb154ac964c87dd85dbe82e1
languageName: node
linkType: hard
"@openrouter/sdk@npm:^0.1.8":
version: 0.1.23
resolution: "@openrouter/sdk@npm:0.1.23"
dependencies:
zod: "npm:^3.25.0 || ^4.0.0"
checksum: 10c0/ec4a3a23b697e2c4bc1658af991e97d0adda10bb4f4208044abec3f7d97859e0abacc3e82745ef31291be4e7e4e4ce5552e4cb3efaa05414a48c9b3c0f4f7597
languageName: node
linkType: hard
@ -9708,10 +9618,10 @@ __metadata:
languageName: node
linkType: hard
"@vercel/oidc@npm:3.0.3":
version: 3.0.3
resolution: "@vercel/oidc@npm:3.0.3"
checksum: 10c0/c8eecb1324559435f4ab8a955f5ef44f74f546d11c2ddcf28151cb636d989bd4b34e0673fd8716cb21bb21afb34b3de663bacc30c9506036eeecbcbf2fd86241
"@vercel/oidc@npm:3.0.5":
version: 3.0.5
resolution: "@vercel/oidc@npm:3.0.5"
checksum: 10c0/a63f0ab226f9070f974334014bd2676611a2d13473c10b867e3d9db8a2cc83637ae7922db26b184dd97b5945e144fc211c8f899642d205517e5b4e0e34f05b0e
languageName: node
linkType: hard
@ -10018,16 +9928,16 @@ __metadata:
"@agentic/exa": "npm:^7.3.3"
"@agentic/searxng": "npm:^7.3.3"
"@agentic/tavily": "npm:^7.3.3"
"@ai-sdk/amazon-bedrock": "npm:^3.0.53"
"@ai-sdk/anthropic": "npm:^2.0.44"
"@ai-sdk/amazon-bedrock": "npm:^3.0.56"
"@ai-sdk/anthropic": "npm:^2.0.45"
"@ai-sdk/cerebras": "npm:^1.0.31"
"@ai-sdk/gateway": "npm:^2.0.9"
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.36#~/.yarn/patches/@ai-sdk-google-npm-2.0.36-6f3cc06026.patch"
"@ai-sdk/google-vertex": "npm:^3.0.68"
"@ai-sdk/huggingface": "patch:@ai-sdk/huggingface@npm%3A0.0.8#~/.yarn/patches/@ai-sdk-huggingface-npm-0.0.8-d4d0aaac93.patch"
"@ai-sdk/mistral": "npm:^2.0.23"
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.64#~/.yarn/patches/@ai-sdk-openai-npm-2.0.64-48f99f5bf3.patch"
"@ai-sdk/perplexity": "npm:^2.0.17"
"@ai-sdk/gateway": "npm:^2.0.13"
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch"
"@ai-sdk/google-vertex": "npm:^3.0.72"
"@ai-sdk/huggingface": "npm:^0.0.10"
"@ai-sdk/mistral": "npm:^2.0.24"
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.71#~/.yarn/patches/@ai-sdk-openai-npm-2.0.71-a88ef00525.patch"
"@ai-sdk/perplexity": "npm:^2.0.20"
"@ai-sdk/test-server": "npm:^0.0.1"
"@ant-design/v5-patch-for-react-19": "npm:^1.0.3"
"@anthropic-ai/claude-agent-sdk": "patch:@anthropic-ai/claude-agent-sdk@npm%3A0.1.30#~/.yarn/patches/@anthropic-ai-claude-agent-sdk-npm-0.1.30-b50a299674.patch"
@ -10077,7 +9987,7 @@ __metadata:
"@mozilla/readability": "npm:^0.6.0"
"@napi-rs/system-ocr": "patch:@napi-rs/system-ocr@npm%3A1.0.2#~/.yarn/patches/@napi-rs-system-ocr-npm-1.0.2-59e7a78e8b.patch"
"@notionhq/client": "npm:^2.2.15"
"@openrouter/ai-sdk-provider": "npm:^1.2.0"
"@openrouter/ai-sdk-provider": "npm:^1.2.5"
"@opentelemetry/api": "npm:^1.9.0"
"@opentelemetry/core": "npm:2.0.0"
"@opentelemetry/exporter-trace-otlp-http": "npm:^0.200.0"
@ -10155,7 +10065,7 @@ __metadata:
"@viz-js/lang-dot": "npm:^1.0.5"
"@viz-js/viz": "npm:^3.14.0"
"@xyflow/react": "npm:^12.4.4"
ai: "npm:^5.0.90"
ai: "npm:^5.0.98"
antd: "patch:antd@npm%3A5.27.0#~/.yarn/patches/antd-npm-5.27.0-aa91c36546.patch"
archiver: "npm:^7.0.1"
async-mutex: "npm:^0.5.0"
@ -10403,17 +10313,17 @@ __metadata:
languageName: node
linkType: hard
"ai@npm:^5.0.90":
version: 5.0.90
resolution: "ai@npm:5.0.90"
"ai@npm:^5.0.98":
version: 5.0.98
resolution: "ai@npm:5.0.98"
dependencies:
"@ai-sdk/gateway": "npm:2.0.7"
"@ai-sdk/gateway": "npm:2.0.13"
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.16"
"@ai-sdk/provider-utils": "npm:3.0.17"
"@opentelemetry/api": "npm:1.9.0"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/feee8908803743cee49216a37bcbc6f33e2183423d623863e8a0c5ce065dcb18d17c5c86b8f587bf391818bb47a882287f14650a77a857accb5cb7a0ecb2653c
checksum: 10c0/87c684263dd8f944287b3241255841aec092e487480c4abc6b28fdcea3a36f998f96022b1972f00d083525ccb95517dadcba6a69cbef1eadb0d6465041dcc092
languageName: node
linkType: hard
@ -14602,13 +14512,6 @@ __metadata:
languageName: node
linkType: hard
"eventsource-parser@npm:^3.0.5":
version: 3.0.5
resolution: "eventsource-parser@npm:3.0.5"
checksum: 10c0/5cb75e3f84ff1cfa1cee6199d4fd430c4544855ab03e953ddbe5927e7b31bc2af3933ab8aba6440ba160ed2c48972b6c317f27b8a1d0764c7b12e34e249de631
languageName: node
linkType: hard
"eventsource-parser@npm:^3.0.6":
version: 3.0.6
resolution: "eventsource-parser@npm:3.0.6"
@ -26421,7 +26324,7 @@ __metadata:
languageName: node
linkType: hard
"zod@npm:^3.25.76 || ^4":
"zod@npm:^3.25.0 || ^4.0.0, zod@npm:^3.25.76 || ^4":
version: 4.1.12
resolution: "zod@npm:4.1.12"
checksum: 10c0/b64c1feb19e99d77075261eaf613e0b2be4dfcd3551eff65ad8b4f2a079b61e379854d066f7d447491fcf193f45babd8095551a9d47973d30b46b6d8e2c46774