Merge branch 'main' of github.com:CherryHQ/cherry-studio into v2

This commit is contained in:
fullex 2026-01-07 21:02:43 +08:00
commit cd778f3574
65 changed files with 3446 additions and 640 deletions

View File

@ -154,9 +154,10 @@ jobs:
with: with:
node-version: 22 node-version: 22
- name: Install pnpm - name: Enable corepack
if: steps.check.outputs.should_run == 'true' if: steps.check.outputs.should_run == 'true'
uses: pnpm/action-setup@v4 working-directory: main
run: corepack enable pnpm
- name: Install dependencies - name: Install dependencies
if: steps.check.outputs.should_run == 'true' if: steps.check.outputs.should_run == 'true'

View File

@ -28,6 +28,12 @@ files:
- "!**/{tsconfig.json,tsconfig.tsbuildinfo,tsconfig.node.json,tsconfig.web.json}" - "!**/{tsconfig.json,tsconfig.tsbuildinfo,tsconfig.node.json,tsconfig.web.json}"
- "!**/{.editorconfig,.jekyll-metadata}" - "!**/{.editorconfig,.jekyll-metadata}"
- "!src" - "!src"
- "!config"
- "!patches"
- "!app-upgrade-config.json"
- "!**/node_modules/**/*.cpp"
- "!**/node_modules/node-addon-api/**"
- "!**/node_modules/prebuild-install/**"
- "!scripts" - "!scripts"
- "!local" - "!local"
- "!docs" - "!docs"
@ -91,6 +97,7 @@ nsis:
oneClick: false oneClick: false
include: build/nsis-installer.nsh include: build/nsis-installer.nsh
buildUniversalInstaller: false buildUniversalInstaller: false
differentialPackage: false
portable: portable:
artifactName: ${productName}-${version}-${arch}-portable.${ext} artifactName: ${productName}-${version}-${arch}-portable.${ext}
buildUniversalInstaller: false buildUniversalInstaller: false
@ -106,6 +113,8 @@ mac:
target: target:
- target: dmg - target: dmg
- target: zip - target: zip
dmg:
writeUpdateInfo: false
linux: linux:
artifactName: ${productName}-${version}-${arch}.${ext} artifactName: ${productName}-${version}-${arch}.${ext}
target: target:
@ -135,44 +144,34 @@ artifactBuildCompleted: scripts/artifact-build-completed.js
releaseInfo: releaseInfo:
releaseNotes: | releaseNotes: |
<!--LANG:en--> <!--LANG:en-->
Cherry Studio 1.7.9 - New Features & Bug Fixes Cherry Studio 1.7.11 - New Features & Bug Fixes
✨ New Features ✨ New Features
- [Agent] Add 302.AI provider support - [MCP] Add MCP Hub with Auto mode for intelligent multi-server tool orchestration
- [Browser] Browser data now persists and supports multiple tabs
- [Language] Add Romanian language support
- [Search] Add fuzzy search for file list
- [Models] Add latest Zhipu models
- [Image] Improve text-to-image functionality
🐛 Bug Fixes 🐛 Bug Fixes
- [Mac] Fix mini window unexpected closing issue - [Chat] Fix reasoning process not displaying correctly for some proxy models
- [Preview] Fix HTML preview controls not working in fullscreen - [Chat] Fix duplicate loading spinners on action buttons
- [Translate] Fix translation duplicate execution issue - [Editor] Fix paragraph handle and plus button not clickable
- [Zoom] Fix page zoom reset issue during navigation - [Drawing] Fix TokenFlux models not showing in drawing panel
- [Agent] Fix crash when switching between agent and assistant - [Translate] Fix translation stalling after initialization
- [Agent] Fix navigation in agent mode - [Error] Fix app freeze when viewing error details with large images
- [Copy] Fix markdown copy button issue - [Notes] Fix folder overlay blocking webview preview
- [Windows] Fix compatibility issues on non-Windows systems - [Chat] Fix thinking time display when stopping generation
<!--LANG:zh-CN--> <!--LANG:zh-CN-->
Cherry Studio 1.7.9 - 新功能与问题修复 Cherry Studio 1.7.11 - 新功能与问题修复
✨ 新功能 ✨ 新功能
- [Agent] 新增 302.AI 服务商支持 - [MCP] 新增 MCP Hub 智能模式,可自动管理和调用多个 MCP 服务器工具
- [浏览器] 浏览器数据现在可以保存,支持多标签页
- [语言] 新增罗马尼亚语支持
- [搜索] 文件列表新增模糊搜索功能
- [模型] 新增最新智谱模型
- [图片] 优化文生图功能
🐛 问题修复 🐛 问题修复
- [Mac] 修复迷你窗口意外关闭的问题 - [对话] 修复部分代理模型的推理过程无法正确显示的问题
- [预览] 修复全屏模式下 HTML 预览控件无法使用的问题 - [对话] 修复操作按钮重复显示加载状态的问题
- [翻译] 修复翻译重复执行的问题 - [编辑器] 修复段落手柄和加号按钮无法点击的问题
- [缩放] 修复页面导航时缩放被重置的问题 - [绘图] 修复 TokenFlux 模型在绘图面板不显示的问题
- [智能体] 修复在智能体和助手间切换时崩溃的问题 - [翻译] 修复翻译功能初始化后卡住的问题
- [智能体] 修复智能体模式下的导航问题 - [错误] 修复查看包含大图片的错误详情时应用卡死的问题
- [复制] 修复 Markdown 复制按钮问题 - [笔记] 修复文件夹遮挡网页预览的问题
- [兼容性] 修复非 Windows 系统的兼容性问题 - [对话] 修复停止生成时思考时间显示问题
<!--LANG:END--> <!--LANG:END-->

View File

@ -68,7 +68,7 @@
"release:ai-sdk-provider": "pnpm --filter @cherrystudio/ai-sdk-provider version patch && pnpm --filter @cherrystudio/ai-sdk-provider build && pnpm --filter @cherrystudio/ai-sdk-provider publish --access public" "release:ai-sdk-provider": "pnpm --filter @cherrystudio/ai-sdk-provider version patch && pnpm --filter @cherrystudio/ai-sdk-provider build && pnpm --filter @cherrystudio/ai-sdk-provider publish --access public"
}, },
"dependencies": { "dependencies": {
"@anthropic-ai/claude-agent-sdk": "0.1.62", "@anthropic-ai/claude-agent-sdk": "0.1.76",
"@libsql/client": "0.14.0", "@libsql/client": "0.14.0",
"@napi-rs/system-ocr": "1.0.2", "@napi-rs/system-ocr": "1.0.2",
"@paymoapp/electron-shutdown-handler": "1.1.2", "@paymoapp/electron-shutdown-handler": "1.1.2",
@ -439,7 +439,6 @@
"@ai-sdk/openai-compatible@1.0.27": "1.0.28" "@ai-sdk/openai-compatible@1.0.27": "1.0.28"
}, },
"patchedDependencies": { "patchedDependencies": {
"@anthropic-ai/claude-agent-sdk@0.1.62": "patches/@anthropic-ai-claude-agent-sdk-npm-0.1.62-23ae56f8c8.patch",
"@napi-rs/system-ocr@1.0.2": "patches/@napi-rs-system-ocr-npm-1.0.2-59e7a78e8b.patch", "@napi-rs/system-ocr@1.0.2": "patches/@napi-rs-system-ocr-npm-1.0.2-59e7a78e8b.patch",
"tesseract.js@6.0.1": "patches/tesseract.js-npm-6.0.1-2562a7e46d.patch", "tesseract.js@6.0.1": "patches/tesseract.js-npm-6.0.1-2562a7e46d.patch",
"@ai-sdk/google@2.0.49": "patches/@ai-sdk-google-npm-2.0.49-84720f41bd.patch", "@ai-sdk/google@2.0.49": "patches/@ai-sdk-google-npm-2.0.49-84720f41bd.patch",

View File

@ -21,9 +21,6 @@ const TOOL_USE_TAG_CONFIG: TagConfig = {
separator: '\n' separator: '\n'
} }
/**
*
*/
export const DEFAULT_SYSTEM_PROMPT = `In this environment you have access to a set of tools you can use to answer the user's question. \ export const DEFAULT_SYSTEM_PROMPT = `In this environment you have access to a set of tools you can use to answer the user's question. \
You can use one or more tools per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use. You can use one or more tools per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
@ -38,10 +35,16 @@ Tool use is formatted using XML-style tags. The tool name is enclosed in opening
The tool name should be the exact name of the tool you are using, and the arguments should be a JSON object containing the parameters required by that tool. For example: The tool name should be the exact name of the tool you are using, and the arguments should be a JSON object containing the parameters required by that tool. For example:
<tool_use> <tool_use>
<name>python_interpreter</name> <name>search</name>
<arguments>{"code": "5 + 3 + 1294.678"}</arguments> <arguments>{ "query": "browser,fetch" }</arguments>
</tool_use> </tool_use>
<tool_use>
<name>exec</name>
<arguments>{ "code": "const page = await CherryBrowser_fetch({ url: "https://example.com" })\nreturn page" }</arguments>
</tool_use>
The user will respond with the result of the tool use, which should be formatted as follows: The user will respond with the result of the tool use, which should be formatted as follows:
<tool_use_result> <tool_use_result>
@ -59,13 +62,6 @@ For example, if the result of the tool use is an image file, you can use it in t
Always adhere to this format for the tool use to ensure proper parsing and execution. Always adhere to this format for the tool use to ensure proper parsing and execution.
## Tool Use Examples
{{ TOOL_USE_EXAMPLES }}
## Tool Use Available Tools
Above example were using notional tools that might not exist for you. You only have access to these tools:
{{ AVAILABLE_TOOLS }}
## Tool Use Rules ## Tool Use Rules
Here are the rules you should always follow to solve your task: Here are the rules you should always follow to solve your task:
1. Always use the right arguments for the tools. Never use variable names as the action arguments, use the value instead. 1. Always use the right arguments for the tools. Never use variable names as the action arguments, use the value instead.
@ -74,6 +70,8 @@ Here are the rules you should always follow to solve your task:
4. Never re-do a tool call that you previously did with the exact same parameters. 4. Never re-do a tool call that you previously did with the exact same parameters.
5. For tool use, MAKE SURE use XML tag format as shown in the examples above. Do not use any other format. 5. For tool use, MAKE SURE use XML tag format as shown in the examples above. Do not use any other format.
{{ TOOLS_INFO }}
## Response rules ## Response rules
Respond in the language of the user's query, unless the user instructions specify additional requirements for the language to be used. Respond in the language of the user's query, unless the user instructions specify additional requirements for the language to be used.
@ -154,7 +152,8 @@ User: <tool_use_result>
<name>search</name> <name>search</name>
<result>26 million (2019)</result> <result>26 million (2019)</result>
</tool_use_result> </tool_use_result>
Assistant: The population of Shanghai is 26 million, while Guangzhou has a population of 15 million. Therefore, Shanghai has the highest population.`
A: The population of Shanghai is 26 million, while Guangzhou has a population of 15 million. Therefore, Shanghai has the highest population.`
/** /**
* Cherry Studio * Cherry Studio
@ -184,13 +183,30 @@ ${result}
/** /**
* Cherry Studio * Cherry Studio
*/ */
function defaultBuildSystemPrompt(userSystemPrompt: string, tools: ToolSet): string { function defaultBuildSystemPrompt(userSystemPrompt: string, tools: ToolSet, mcpMode?: string): string {
const availableTools = buildAvailableTools(tools) const availableTools = buildAvailableTools(tools)
if (availableTools === null) return userSystemPrompt if (availableTools === null) return userSystemPrompt
const fullPrompt = DEFAULT_SYSTEM_PROMPT.replace('{{ TOOL_USE_EXAMPLES }}', DEFAULT_TOOL_USE_EXAMPLES) if (mcpMode == 'auto') {
return DEFAULT_SYSTEM_PROMPT.replace('{{ TOOLS_INFO }}', '').replace(
'{{ USER_SYSTEM_PROMPT }}',
userSystemPrompt || ''
)
}
const toolsInfo = `
## Tool Use Examples
{{ TOOL_USE_EXAMPLES }}
## Tool Use Available Tools
Above example were using notional tools that might not exist for you. You only have access to these tools:
{{ AVAILABLE_TOOLS }}`
.replace('{{ TOOL_USE_EXAMPLES }}', DEFAULT_TOOL_USE_EXAMPLES)
.replace('{{ AVAILABLE_TOOLS }}', availableTools) .replace('{{ AVAILABLE_TOOLS }}', availableTools)
.replace('{{ USER_SYSTEM_PROMPT }}', userSystemPrompt || '')
const fullPrompt = DEFAULT_SYSTEM_PROMPT.replace('{{ TOOLS_INFO }}', toolsInfo).replace(
'{{ USER_SYSTEM_PROMPT }}',
userSystemPrompt || ''
)
return fullPrompt return fullPrompt
} }
@ -223,7 +239,17 @@ function defaultParseToolUse(content: string, tools: ToolSet): { results: ToolUs
// Find all tool use blocks // Find all tool use blocks
while ((match = toolUsePattern.exec(contentToProcess)) !== null) { while ((match = toolUsePattern.exec(contentToProcess)) !== null) {
const fullMatch = match[0] const fullMatch = match[0]
const toolName = match[2].trim() let toolName = match[2].trim()
switch (toolName.toLowerCase()) {
case 'search':
toolName = 'mcp__CherryHub__search'
break
case 'exec':
toolName = 'mcp__CherryHub__exec'
break
default:
break
}
const toolArgs = match[4].trim() const toolArgs = match[4].trim()
// Try to parse the arguments as JSON // Try to parse the arguments as JSON
@ -255,7 +281,12 @@ function defaultParseToolUse(content: string, tools: ToolSet): { results: ToolUs
} }
export const createPromptToolUsePlugin = (config: PromptToolUseConfig = {}) => { export const createPromptToolUsePlugin = (config: PromptToolUseConfig = {}) => {
const { enabled = true, buildSystemPrompt = defaultBuildSystemPrompt, parseToolUse = defaultParseToolUse } = config const {
enabled = true,
buildSystemPrompt = defaultBuildSystemPrompt,
parseToolUse = defaultParseToolUse,
mcpMode
} = config
return definePlugin({ return definePlugin({
name: 'built-in:prompt-tool-use', name: 'built-in:prompt-tool-use',
@ -285,7 +316,7 @@ export const createPromptToolUsePlugin = (config: PromptToolUseConfig = {}) => {
// 构建系统提示符(只包含非 provider-defined 工具) // 构建系统提示符(只包含非 provider-defined 工具)
const userSystemPrompt = typeof params.system === 'string' ? params.system : '' const userSystemPrompt = typeof params.system === 'string' ? params.system : ''
const systemPrompt = buildSystemPrompt(userSystemPrompt, promptTools) const systemPrompt = buildSystemPrompt(userSystemPrompt, promptTools, mcpMode)
let systemMessage: string | null = systemPrompt let systemMessage: string | null = systemPrompt
if (config.createSystemMessage) { if (config.createSystemMessage) {
// 🎯 如果用户提供了自定义处理函数,使用它 // 🎯 如果用户提供了自定义处理函数,使用它

View File

@ -23,6 +23,7 @@ export interface PromptToolUseConfig extends BaseToolUsePluginConfig {
// 自定义工具解析函数(可选,有默认实现) // 自定义工具解析函数(可选,有默认实现)
parseToolUse?: (content: string, tools: ToolSet) => { results: ToolUseResult[]; content: string } parseToolUse?: (content: string, tools: ToolSet) => { results: ToolUseResult[]; content: string }
createSystemMessage?: (systemPrompt: string, originalParams: any, context: AiRequestContext) => string | null createSystemMessage?: (systemPrompt: string, originalParams: any, context: AiRequestContext) => string | null
mcpMode?: string
} }
/** /**

116
packages/shared/mcp.ts Normal file
View File

@ -0,0 +1,116 @@
/**
* Convert a string to camelCase, ensuring it's a valid JavaScript identifier.
*
* - Normalizes to lowercase first, then capitalizes word boundaries
* - Non-alphanumeric characters are treated as word separators
* - Non-ASCII characters are dropped (ASCII-only output)
* - If result starts with a digit, prefixes with underscore
*
* @example
* toCamelCase('my-server') // 'myServer'
* toCamelCase('MY_SERVER') // 'myServer'
* toCamelCase('123tool') // '_123tool'
*/
export function toCamelCase(str: string): string {
let result = str
.trim()
.toLowerCase()
.replace(/[^a-z0-9]+(.)/g, (_, char) => char.toUpperCase())
.replace(/[^a-zA-Z0-9]/g, '')
if (result && !/^[a-zA-Z_]/.test(result)) {
result = '_' + result
}
return result
}
export type McpToolNameOptions = {
/** Prefix added before the name (e.g., 'mcp__'). Must be JS-identifier-safe. */
prefix?: string
/** Delimiter between server and tool parts (e.g., '_' or '__'). Must be JS-identifier-safe. */
delimiter?: string
/** Maximum length of the final name. Suffix numbers for uniqueness are included in this limit. */
maxLength?: number
/** Mutable Set for collision detection. The final name will be added to this Set. */
existingNames?: Set<string>
}
/**
* Build a valid JavaScript function name from server and tool names.
* Uses camelCase for both parts.
*
* @param serverName - The MCP server name (optional)
* @param toolName - The tool name
* @param options - Configuration options
* @returns A valid JS identifier
*/
export function buildMcpToolName(
serverName: string | undefined,
toolName: string,
options: McpToolNameOptions = {}
): string {
const { prefix = '', delimiter = '_', maxLength, existingNames } = options
const serverPart = serverName ? toCamelCase(serverName) : ''
const toolPart = toCamelCase(toolName)
const baseName = serverPart ? `${prefix}${serverPart}${delimiter}${toolPart}` : `${prefix}${toolPart}`
if (!existingNames) {
return maxLength ? truncateToLength(baseName, maxLength) : baseName
}
let name = maxLength ? truncateToLength(baseName, maxLength) : baseName
let counter = 1
while (existingNames.has(name)) {
const suffix = String(counter)
const truncatedBase = maxLength ? truncateToLength(baseName, maxLength - suffix.length) : baseName
name = `${truncatedBase}${suffix}`
counter++
}
existingNames.add(name)
return name
}
function truncateToLength(str: string, maxLength: number): string {
if (str.length <= maxLength) {
return str
}
return str.slice(0, maxLength).replace(/_+$/, '')
}
/**
* Generate a unique function name from server name and tool name.
* Format: serverName_toolName (camelCase)
*
* @example
* generateMcpToolFunctionName('github', 'search_issues') // 'github_searchIssues'
*/
export function generateMcpToolFunctionName(
serverName: string | undefined,
toolName: string,
existingNames?: Set<string>
): string {
return buildMcpToolName(serverName, toolName, { existingNames })
}
/**
* Builds a valid JavaScript function name for MCP tool calls.
* Format: mcp__{serverName}__{toolName}
*
* @param serverName - The MCP server name
* @param toolName - The tool name from the server
* @returns A valid JS identifier in format mcp__{server}__{tool}, max 63 chars
*
* @example
* buildFunctionCallToolName('github', 'search_issues') // 'mcp__github__searchIssues'
*/
export function buildFunctionCallToolName(serverName: string, toolName: string): string {
return buildMcpToolName(serverName, toolName, {
prefix: 'mcp__',
delimiter: '__',
maxLength: 63
})
}

View File

@ -1,35 +0,0 @@
diff --git a/sdk.mjs b/sdk.mjs
index dea7766a3432a1e809f12d6daba4f2834a219689..e0b02ef73da177ba32b903887d7bbbeaa08cc6d3 100755
--- a/sdk.mjs
+++ b/sdk.mjs
@@ -6250,7 +6250,7 @@ function createAbortController(maxListeners = DEFAULT_MAX_LISTENERS) {
}
// ../src/transport/ProcessTransport.ts
-import { spawn } from "child_process";
+import { fork } from "child_process";
import { createInterface } from "readline";
// ../src/utils/fsOperations.ts
@@ -6644,18 +6644,11 @@ class ProcessTransport {
const errorMessage = isNativeBinary(pathToClaudeCodeExecutable) ? `Claude Code native binary not found at ${pathToClaudeCodeExecutable}. Please ensure Claude Code is installed via native installer or specify a valid path with options.pathToClaudeCodeExecutable.` : `Claude Code executable not found at ${pathToClaudeCodeExecutable}. Is options.pathToClaudeCodeExecutable set?`;
throw new ReferenceError(errorMessage);
}
- const isNative = isNativeBinary(pathToClaudeCodeExecutable);
- const spawnCommand = isNative ? pathToClaudeCodeExecutable : executable;
- const spawnArgs = isNative ? [...executableArgs, ...args] : [...executableArgs, pathToClaudeCodeExecutable, ...args];
- const spawnMessage = isNative ? `Spawning Claude Code native binary: ${spawnCommand} ${spawnArgs.join(" ")}` : `Spawning Claude Code process: ${spawnCommand} ${spawnArgs.join(" ")}`;
- logForSdkDebugging(spawnMessage);
- if (stderr) {
- stderr(spawnMessage);
- }
+ logForSdkDebugging(`Forking Claude Code Node.js process: ${pathToClaudeCodeExecutable} ${args.join(" ")}`);
const stderrMode = env.DEBUG_CLAUDE_AGENT_SDK || stderr ? "pipe" : "ignore";
- this.child = spawn(spawnCommand, spawnArgs, {
+ this.child = fork(pathToClaudeCodeExecutable, args, {
cwd,
- stdio: ["pipe", "pipe", stderrMode],
+ stdio: stderrMode === "pipe" ? ["pipe", "pipe", "pipe", "ipc"] : ["pipe", "pipe", "ignore", "ipc"],
signal: this.abortController.signal,
env
});

View File

@ -34,9 +34,6 @@ patchedDependencies:
'@ai-sdk/openai@2.0.85': '@ai-sdk/openai@2.0.85':
hash: f2077f4759520d1de69b164dfd8adca1a9ace9de667e35cb0e55e812ce2ac13b hash: f2077f4759520d1de69b164dfd8adca1a9ace9de667e35cb0e55e812ce2ac13b
path: patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch path: patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch
'@anthropic-ai/claude-agent-sdk@0.1.62':
hash: 61ed4549b423c717cbfef526e3ed5b0329c2de2de88c9ef772188668f7dc26e8
path: patches/@anthropic-ai-claude-agent-sdk-npm-0.1.62-23ae56f8c8.patch
'@anthropic-ai/vertex-sdk@0.11.4': '@anthropic-ai/vertex-sdk@0.11.4':
hash: 12e3275df5632dfe717d4db64df70e9b0128dfac86195da27722effe4749662f hash: 12e3275df5632dfe717d4db64df70e9b0128dfac86195da27722effe4749662f
path: patches/@anthropic-ai-vertex-sdk-npm-0.11.4-c19cb41edb.patch path: patches/@anthropic-ai-vertex-sdk-npm-0.11.4-c19cb41edb.patch
@ -88,8 +85,8 @@ importers:
.: .:
dependencies: dependencies:
'@anthropic-ai/claude-agent-sdk': '@anthropic-ai/claude-agent-sdk':
specifier: 0.1.62 specifier: 0.1.76
version: 0.1.62(patch_hash=61ed4549b423c717cbfef526e3ed5b0329c2de2de88c9ef772188668f7dc26e8)(zod@4.3.4) version: 0.1.76(zod@4.3.4)
'@libsql/client': '@libsql/client':
specifier: 0.14.0 specifier: 0.14.0
version: 0.14.0 version: 0.14.0
@ -1619,11 +1616,11 @@ packages:
'@antfu/install-pkg@1.1.0': '@antfu/install-pkg@1.1.0':
resolution: {integrity: sha512-MGQsmw10ZyI+EJo45CdSER4zEb+p31LpDAFp2Z3gkSd1yqVZGi0Ebx++YTEMonJy4oChEMLsxZ64j8FH6sSqtQ==} resolution: {integrity: sha512-MGQsmw10ZyI+EJo45CdSER4zEb+p31LpDAFp2Z3gkSd1yqVZGi0Ebx++YTEMonJy4oChEMLsxZ64j8FH6sSqtQ==}
'@anthropic-ai/claude-agent-sdk@0.1.62': '@anthropic-ai/claude-agent-sdk@0.1.76':
resolution: {integrity: sha512-KoJAQ0kdrbOukh4r0CFvFZgSKlAGAVJf8baeK2jpFCxbUhqr99Ier88v1L2iehWSWkXR6oVaThCYozN74Q3jUw==} resolution: {integrity: sha512-s7RvpXoFaLXLG7A1cJBAPD8ilwOhhc/12fb5mJXRuD561o4FmPtQ+WRfuy9akMmrFRfLsKv8Ornw3ClGAPL2fw==}
engines: {node: '>=18.0.0'} engines: {node: '>=18.0.0'}
peerDependencies: peerDependencies:
zod: ^3.24.1 zod: ^3.24.1 || ^4.0.0
'@anthropic-ai/sdk@0.27.3': '@anthropic-ai/sdk@0.27.3':
resolution: {integrity: sha512-IjLt0gd3L4jlOfilxVXTifn42FnVffMgDC04RJK1KDZpmkBWLv0XC92MVVmkxrFZNS/7l3xWgP/I3nqtX1sQHw==} resolution: {integrity: sha512-IjLt0gd3L4jlOfilxVXTifn42FnVffMgDC04RJK1KDZpmkBWLv0XC92MVVmkxrFZNS/7l3xWgP/I3nqtX1sQHw==}
@ -14614,7 +14611,7 @@ snapshots:
package-manager-detector: 1.6.0 package-manager-detector: 1.6.0
tinyexec: 1.0.2 tinyexec: 1.0.2
'@anthropic-ai/claude-agent-sdk@0.1.62(patch_hash=61ed4549b423c717cbfef526e3ed5b0329c2de2de88c9ef772188668f7dc26e8)(zod@4.3.4)': '@anthropic-ai/claude-agent-sdk@0.1.76(zod@4.3.4)':
dependencies: dependencies:
zod: 4.3.4 zod: 4.3.4
optionalDependencies: optionalDependencies:

View File

@ -0,0 +1,240 @@
import { buildFunctionCallToolName, buildMcpToolName, generateMcpToolFunctionName, toCamelCase } from '@shared/mcp'
import { describe, expect, it } from 'vitest'
describe('toCamelCase', () => {
it('should convert hyphenated strings', () => {
expect(toCamelCase('my-server')).toBe('myServer')
expect(toCamelCase('my-tool-name')).toBe('myToolName')
})
it('should convert underscored strings', () => {
expect(toCamelCase('my_server')).toBe('myServer')
expect(toCamelCase('search_issues')).toBe('searchIssues')
})
it('should handle mixed delimiters', () => {
expect(toCamelCase('my-server_name')).toBe('myServerName')
})
it('should handle leading numbers by prefixing underscore', () => {
expect(toCamelCase('123server')).toBe('_123server')
})
it('should handle special characters', () => {
expect(toCamelCase('test@server!')).toBe('testServer')
expect(toCamelCase('tool#name$')).toBe('toolName')
})
it('should trim whitespace', () => {
expect(toCamelCase(' server ')).toBe('server')
})
it('should handle empty string', () => {
expect(toCamelCase('')).toBe('')
})
it('should handle uppercase snake case', () => {
expect(toCamelCase('MY_SERVER')).toBe('myServer')
expect(toCamelCase('SEARCH_ISSUES')).toBe('searchIssues')
})
it('should handle mixed case', () => {
expect(toCamelCase('MyServer')).toBe('myserver')
expect(toCamelCase('myTOOL')).toBe('mytool')
})
})
describe('buildMcpToolName', () => {
it('should build basic name with defaults', () => {
expect(buildMcpToolName('github', 'search_issues')).toBe('github_searchIssues')
})
it('should handle undefined server name', () => {
expect(buildMcpToolName(undefined, 'search_issues')).toBe('searchIssues')
})
it('should apply custom prefix and delimiter', () => {
expect(buildMcpToolName('github', 'search', { prefix: 'mcp__', delimiter: '__' })).toBe('mcp__github__search')
})
it('should respect maxLength', () => {
const result = buildMcpToolName('veryLongServerName', 'veryLongToolName', { maxLength: 20 })
expect(result.length).toBeLessThanOrEqual(20)
})
it('should handle collision with existingNames', () => {
const existingNames = new Set(['github_search'])
const result = buildMcpToolName('github', 'search', { existingNames })
expect(result).toBe('github_search1')
expect(existingNames.has('github_search1')).toBe(true)
})
it('should respect maxLength when adding collision suffix', () => {
const existingNames = new Set(['a'.repeat(20)])
const result = buildMcpToolName('a'.repeat(20), '', { maxLength: 20, existingNames })
expect(result.length).toBeLessThanOrEqual(20)
expect(existingNames.has(result)).toBe(true)
})
it('should handle multiple collisions with maxLength', () => {
const existingNames = new Set(['abcd', 'abc1', 'abc2'])
const result = buildMcpToolName('abcd', '', { maxLength: 4, existingNames })
expect(result).toBe('abc3')
expect(result.length).toBeLessThanOrEqual(4)
})
})
describe('generateMcpToolFunctionName', () => {
it('should return format serverName_toolName in camelCase', () => {
expect(generateMcpToolFunctionName('github', 'search_issues')).toBe('github_searchIssues')
})
it('should handle hyphenated names', () => {
expect(generateMcpToolFunctionName('my-server', 'my-tool')).toBe('myServer_myTool')
})
it('should handle undefined server name', () => {
expect(generateMcpToolFunctionName(undefined, 'search_issues')).toBe('searchIssues')
})
it('should handle collision detection', () => {
const existingNames = new Set<string>()
const first = generateMcpToolFunctionName('github', 'search', existingNames)
const second = generateMcpToolFunctionName('github', 'search', existingNames)
expect(first).toBe('github_search')
expect(second).toBe('github_search1')
})
})
describe('buildFunctionCallToolName', () => {
describe('basic format', () => {
it('should return format mcp__{server}__{tool} in camelCase', () => {
const result = buildFunctionCallToolName('github', 'search_issues')
expect(result).toBe('mcp__github__searchIssues')
})
it('should handle simple server and tool names', () => {
expect(buildFunctionCallToolName('fetch', 'get_page')).toBe('mcp__fetch__getPage')
expect(buildFunctionCallToolName('database', 'query')).toBe('mcp__database__query')
})
})
describe('valid JavaScript identifier', () => {
it('should always start with mcp__ prefix (valid JS identifier start)', () => {
const result = buildFunctionCallToolName('123server', '456tool')
expect(result).toMatch(/^mcp__/)
})
it('should handle hyphenated names with camelCase', () => {
const result = buildFunctionCallToolName('my-server', 'my-tool')
expect(result).toBe('mcp__myServer__myTool')
})
it('should be a valid JavaScript identifier', () => {
const testCases = [
['github', 'create_issue'],
['my-server', 'fetch-data'],
['test@server', 'tool#name'],
['server.name', 'tool.action']
]
for (const [server, tool] of testCases) {
const result = buildFunctionCallToolName(server, tool)
expect(result).toMatch(/^[a-zA-Z_][a-zA-Z0-9_]*$/)
}
})
})
describe('character sanitization', () => {
it('should convert special characters to camelCase boundaries', () => {
expect(buildFunctionCallToolName('my-server', 'my-tool-name')).toBe('mcp__myServer__myToolName')
expect(buildFunctionCallToolName('test@server!', 'tool#name$')).toBe('mcp__testServer__toolName')
expect(buildFunctionCallToolName('server.name', 'tool.action')).toBe('mcp__serverName__toolAction')
})
it('should handle spaces', () => {
const result = buildFunctionCallToolName('my server', 'my tool')
expect(result).toBe('mcp__myServer__myTool')
})
})
describe('length constraints', () => {
it('should not exceed 63 characters', () => {
const longServerName = 'a'.repeat(50)
const longToolName = 'b'.repeat(50)
const result = buildFunctionCallToolName(longServerName, longToolName)
expect(result.length).toBeLessThanOrEqual(63)
})
it('should not end with underscores after truncation', () => {
const longServerName = 'a'.repeat(30)
const longToolName = 'b'.repeat(30)
const result = buildFunctionCallToolName(longServerName, longToolName)
expect(result).not.toMatch(/_+$/)
expect(result.length).toBeLessThanOrEqual(63)
})
})
describe('edge cases', () => {
it('should handle empty server name', () => {
const result = buildFunctionCallToolName('', 'tool')
expect(result).toBe('mcp__tool')
})
it('should handle empty tool name', () => {
const result = buildFunctionCallToolName('server', '')
expect(result).toBe('mcp__server__')
})
it('should trim whitespace from names', () => {
const result = buildFunctionCallToolName(' server ', ' tool ')
expect(result).toBe('mcp__server__tool')
})
it('should handle mixed case by normalizing to lowercase first', () => {
const result = buildFunctionCallToolName('MyServer', 'MyTool')
expect(result).toBe('mcp__myserver__mytool')
})
it('should handle uppercase snake case', () => {
const result = buildFunctionCallToolName('MY_SERVER', 'SEARCH_ISSUES')
expect(result).toBe('mcp__myServer__searchIssues')
})
})
describe('deterministic output', () => {
it('should produce consistent results for same input', () => {
const result1 = buildFunctionCallToolName('github', 'search_repos')
const result2 = buildFunctionCallToolName('github', 'search_repos')
expect(result1).toBe(result2)
})
it('should produce different results for different inputs', () => {
const result1 = buildFunctionCallToolName('server1', 'tool')
const result2 = buildFunctionCallToolName('server2', 'tool')
expect(result1).not.toBe(result2)
})
})
describe('real-world scenarios', () => {
it('should handle GitHub MCP server', () => {
expect(buildFunctionCallToolName('github', 'create_issue')).toBe('mcp__github__createIssue')
expect(buildFunctionCallToolName('github', 'search_repositories')).toBe('mcp__github__searchRepositories')
})
it('should handle filesystem MCP server', () => {
expect(buildFunctionCallToolName('filesystem', 'read_file')).toBe('mcp__filesystem__readFile')
expect(buildFunctionCallToolName('filesystem', 'write_file')).toBe('mcp__filesystem__writeFile')
})
it('should handle hyphenated server names (common in npm packages)', () => {
expect(buildFunctionCallToolName('cherry-fetch', 'get_page')).toBe('mcp__cherryFetch__getPage')
expect(buildFunctionCallToolName('mcp-server-github', 'search')).toBe('mcp__mcpServerGithub__search')
})
it('should handle scoped npm package style names', () => {
const result = buildFunctionCallToolName('@anthropic/mcp-server', 'chat')
expect(result).toBe('mcp__AnthropicMcpServer__chat')
})
})
})

View File

@ -9,6 +9,7 @@ import DiDiMcpServer from './didi-mcp'
import DifyKnowledgeServer from './dify-knowledge' import DifyKnowledgeServer from './dify-knowledge'
import FetchServer from './fetch' import FetchServer from './fetch'
import FileSystemServer from './filesystem' import FileSystemServer from './filesystem'
import HubServer from './hub'
import MemoryServer from './memory' import MemoryServer from './memory'
import PythonServer from './python' import PythonServer from './python'
import ThinkingServer from './sequentialthinking' import ThinkingServer from './sequentialthinking'
@ -52,6 +53,9 @@ export function createInMemoryMCPServer(
case BuiltinMCPServerNames.browser: { case BuiltinMCPServerNames.browser: {
return new BrowserServer().server return new BrowserServer().server
} }
case BuiltinMCPServerNames.hub: {
return new HubServer().server
}
default: default:
throw new Error(`Unknown in-memory MCP server: ${name}`) throw new Error(`Unknown in-memory MCP server: ${name}`)
} }

View File

@ -0,0 +1,213 @@
# Hub MCP Server
A built-in MCP server that aggregates all active MCP servers in Cherry Studio and exposes them through `search` and `exec` tools.
## Overview
The Hub server enables LLMs to discover and call tools from all active MCP servers without needing to know the specific server names or tool signatures upfront.
## Auto Mode Integration
The Hub server is the core component of Cherry Studio's **Auto MCP Mode**. When an assistant is set to Auto mode:
1. **Automatic Injection**: The Hub server is automatically injected as the only MCP server for the assistant
2. **System Prompt**: A specialized system prompt (`HUB_MODE_SYSTEM_PROMPT`) is appended to guide the LLM on how to use the `search` and `exec` tools
3. **Dynamic Discovery**: The LLM can discover and use any tools from all active MCP servers without manual configuration
### MCP Modes
Cherry Studio supports three MCP modes per assistant:
| Mode | Description | Tools Available |
|------|-------------|-----------------|
| **Disabled** | No MCP tools | None |
| **Auto** | Hub server only | `search`, `exec` |
| **Manual** | User selects servers | Selected server tools |
### How Auto Mode Works
```
User Message
┌─────────────────────────────────────────┐
│ Assistant (mcpMode: 'auto') │
│ │
│ System Prompt + HUB_MODE_SYSTEM_PROMPT │
│ Tools: [hub.search, hub.exec] │
└─────────────────────────────────────────┘
┌─────────────────────────────────────────┐
│ LLM decides to use MCP tools │
│ │
│ 1. search({ query: "github,repo" }) │
│ 2. exec({ code: "await searchRepos()" })│
└─────────────────────────────────────────┘
┌─────────────────────────────────────────┐
│ Hub Server │
│ │
│ Aggregates all active MCP servers │
│ Routes tool calls to appropriate server │
└─────────────────────────────────────────┘
```
### Relevant Code
- **Type Definition**: `src/renderer/src/types/index.ts` - `McpMode` type and `getEffectiveMcpMode()`
- **Hub Server Constant**: `src/renderer/src/store/mcp.ts` - `hubMCPServer`
- **Server Selection**: `src/renderer/src/services/ApiService.ts` - `getMcpServersForAssistant()`
- **System Prompt**: `src/renderer/src/config/prompts.ts` - `HUB_MODE_SYSTEM_PROMPT`
- **Prompt Injection**: `src/renderer/src/aiCore/prepareParams/parameterBuilder.ts`
## Tools
### `search`
Search for available MCP tools by keywords.
**Parameters:**
| Name | Type | Required | Description |
|------|------|----------|-------------|
| `query` | string | Yes | Search keywords, comma-separated for OR matching |
| `limit` | number | No | Maximum results to return (default: 10, max: 50) |
**Example:**
```json
{
"query": "browser,chrome",
"limit": 5
}
```
**Returns:** JavaScript function declarations with JSDoc comments that can be used in the `exec` tool.
```javascript
// Found 2 tool(s):
/**
* Launch a browser instance
*
* @param {{ browser?: "chromium" | "firefox" | "webkit", headless?: boolean }} params
* @returns {Promise<unknown>}
*/
async function launchBrowser(params) {
return await __callTool("browser__launch_browser", params);
}
```
### `exec`
Execute JavaScript code that calls MCP tools.
**Parameters:**
| Name | Type | Required | Description |
|------|------|----------|-------------|
| `code` | string | Yes | JavaScript code to execute |
**Built-in Helpers:**
- `parallel(...promises)` - Run multiple tool calls concurrently (Promise.all)
- `settle(...promises)` - Run multiple tool calls and get all results (Promise.allSettled)
- `console.log/warn/error/info/debug` - Captured in output logs
**Example:**
```javascript
// Call a single tool
const result = await searchRepos({ query: "react" });
return result;
// Call multiple tools in parallel
const [users, repos] = await parallel(
getUsers({ limit: 10 }),
searchRepos({ query: "typescript" })
);
return { users, repos };
```
**Returns:**
```json
{
"result": { "users": [...], "repos": [...] },
"logs": ["[log] Processing..."],
"error": null
}
```
## Usage Flow
1. **Search** for tools using keywords:
```
search({ query: "github,repository" })
```
2. **Review** the returned function signatures and JSDoc
3. **Execute** code using the discovered tools:
```
exec({ code: 'return await searchRepos({ query: "react" })' })
```
## Configuration
The Hub server is a built-in server identified as `@cherry/hub`.
### Using Auto Mode (Recommended)
The easiest way to use the Hub server is through Auto mode:
1. Click the **MCP Tools** button (hammer icon) in the input bar
2. Select **Auto** mode
3. The Hub server is automatically enabled for the assistant
### Manual Configuration
Alternatively, you can enable the Hub server manually:
1. Go to **Settings** → **MCP Servers**
2. Find **Hub** in the built-in servers list
3. Toggle it on
4. In the assistant's MCP settings, select the Hub server
## Caching
- Tool definitions are cached for **10 minutes**
- Cache is automatically refreshed when expired
- Cache is invalidated when MCP servers connect/disconnect
## Limitations
- Code execution has a **60-second timeout**
- Console logs are limited to **1000 entries**
- Search results are limited to **50 tools** maximum
- The Hub server excludes itself from the aggregated server list
## Architecture
```
LLM
HubServer
├── search → ToolRegistry → SearchIndex
└── exec → Runtime → callMcpTool()
MCPService.callTool()
External MCP Servers
```
## Files
| File | Description |
|------|-------------|
| `index.ts` | Main HubServer class |
| `types.ts` | TypeScript type definitions |
| `generator.ts` | Converts MCP tools to JS functions with JSDoc |
| `tool-registry.ts` | In-memory tool cache with TTL |
| `search.ts` | Keyword-based tool search |
| `runtime.ts` | JavaScript code execution engine |
| `mcp-bridge.ts` | Bridge to Cherry Studio's MCPService |

View File

@ -0,0 +1,119 @@
import { describe, expect, it } from 'vitest'
import { generateToolFunction, generateToolsCode } from '../generator'
import type { GeneratedTool } from '../types'
describe('generator', () => {
describe('generateToolFunction', () => {
it('generates a simple tool function', () => {
const tool = {
id: 'test-id',
name: 'search_repos',
description: 'Search for GitHub repositories',
serverId: 'github',
serverName: 'github-server',
inputSchema: {
type: 'object' as const,
properties: {
query: { type: 'string', description: 'Search query' },
limit: { type: 'number', description: 'Max results' }
},
required: ['query']
},
type: 'mcp' as const
}
const existingNames = new Set<string>()
const callTool = async () => ({ success: true })
const result = generateToolFunction(tool, existingNames, callTool)
expect(result.functionName).toBe('githubServer_searchRepos')
expect(result.jsCode).toContain('async function githubServer_searchRepos')
expect(result.jsCode).toContain('Search for GitHub repositories')
expect(result.jsCode).toContain('__callTool')
})
it('handles unique function names', () => {
const tool = {
id: 'test-id',
name: 'search',
serverId: 'server1',
serverName: 'server1',
inputSchema: { type: 'object' as const, properties: {} },
type: 'mcp' as const
}
const existingNames = new Set<string>(['server1_search'])
const callTool = async () => ({})
const result = generateToolFunction(tool, existingNames, callTool)
expect(result.functionName).toBe('server1_search1')
})
it('handles enum types in schema', () => {
const tool = {
id: 'test-id',
name: 'launch_browser',
serverId: 'browser',
serverName: 'browser',
inputSchema: {
type: 'object' as const,
properties: {
browser: {
type: 'string',
enum: ['chromium', 'firefox', 'webkit']
}
}
},
type: 'mcp' as const
}
const existingNames = new Set<string>()
const callTool = async () => ({})
const result = generateToolFunction(tool, existingNames, callTool)
expect(result.jsCode).toContain('"chromium" | "firefox" | "webkit"')
})
})
describe('generateToolsCode', () => {
it('generates code for multiple tools', () => {
const tools: GeneratedTool[] = [
{
serverId: 's1',
serverName: 'server1',
toolName: 'tool1',
functionName: 'server1_tool1',
jsCode: 'async function server1_tool1() {}',
fn: async () => ({}),
signature: '{}',
returns: 'unknown'
},
{
serverId: 's2',
serverName: 'server2',
toolName: 'tool2',
functionName: 'server2_tool2',
jsCode: 'async function server2_tool2() {}',
fn: async () => ({}),
signature: '{}',
returns: 'unknown'
}
]
const result = generateToolsCode(tools)
expect(result).toContain('2 tool(s)')
expect(result).toContain('async function server1_tool1')
expect(result).toContain('async function server2_tool2')
})
it('returns message for empty tools', () => {
const result = generateToolsCode([])
expect(result).toBe('// No tools available')
})
})
})

View File

@ -0,0 +1,229 @@
import type { MCPTool } from '@types'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { HubServer } from '../index'
const mockTools: MCPTool[] = [
{
id: 'github__search_repos',
name: 'search_repos',
description: 'Search for GitHub repositories',
serverId: 'github',
serverName: 'GitHub',
inputSchema: {
type: 'object',
properties: {
query: { type: 'string', description: 'Search query' },
limit: { type: 'number', description: 'Max results' }
},
required: ['query']
},
type: 'mcp'
},
{
id: 'github__get_user',
name: 'get_user',
description: 'Get GitHub user profile',
serverId: 'github',
serverName: 'GitHub',
inputSchema: {
type: 'object',
properties: {
username: { type: 'string', description: 'GitHub username' }
},
required: ['username']
},
type: 'mcp'
},
{
id: 'database__query',
name: 'query',
description: 'Execute a database query',
serverId: 'database',
serverName: 'Database',
inputSchema: {
type: 'object',
properties: {
sql: { type: 'string', description: 'SQL query to execute' }
},
required: ['sql']
},
type: 'mcp'
}
]
vi.mock('@main/services/MCPService', () => ({
default: {
listAllActiveServerTools: vi.fn(async () => mockTools),
callToolById: vi.fn(async (toolId: string, args: unknown) => {
if (toolId === 'github__search_repos') {
return {
content: [{ type: 'text', text: JSON.stringify({ repos: ['repo1', 'repo2'], query: args }) }]
}
}
if (toolId === 'github__get_user') {
return {
content: [{ type: 'text', text: JSON.stringify({ username: (args as any).username, id: 123 }) }]
}
}
if (toolId === 'database__query') {
return {
content: [{ type: 'text', text: JSON.stringify({ rows: [{ id: 1 }, { id: 2 }] }) }]
}
}
return { content: [{ type: 'text', text: '{}' }] }
}),
abortTool: vi.fn(async () => true)
}
}))
import mcpService from '@main/services/MCPService'
describe('HubServer Integration', () => {
let hubServer: HubServer
beforeEach(() => {
vi.clearAllMocks()
hubServer = new HubServer()
})
afterEach(() => {
vi.clearAllMocks()
})
describe('full search → exec flow', () => {
it('searches for tools and executes them', async () => {
const searchResult = await (hubServer as any).handleSearch({ query: 'github,repos' })
expect(searchResult.content).toBeDefined()
const searchText = JSON.parse(searchResult.content[0].text)
expect(searchText.total).toBeGreaterThan(0)
expect(searchText.tools).toContain('github_searchRepos')
const execResult = await (hubServer as any).handleExec({
code: 'return await github_searchRepos({ query: "test" })'
})
expect(execResult.content).toBeDefined()
const execOutput = JSON.parse(execResult.content[0].text)
expect(execOutput.result).toEqual({ repos: ['repo1', 'repo2'], query: { query: 'test' } })
})
it('handles multiple tool calls in parallel', async () => {
await (hubServer as any).handleSearch({ query: 'github' })
const execResult = await (hubServer as any).handleExec({
code: `
const results = await parallel(
github_searchRepos({ query: "react" }),
github_getUser({ username: "octocat" })
);
return results
`
})
const execOutput = JSON.parse(execResult.content[0].text)
expect(execOutput.result).toHaveLength(2)
expect(execOutput.result[0]).toEqual({ repos: ['repo1', 'repo2'], query: { query: 'react' } })
expect(execOutput.result[1]).toEqual({ username: 'octocat', id: 123 })
})
it('searches across multiple servers', async () => {
const searchResult = await (hubServer as any).handleSearch({ query: 'query' })
const searchText = JSON.parse(searchResult.content[0].text)
expect(searchText.tools).toContain('database_query')
})
})
describe('tools caching', () => {
it('uses cached tools within TTL', async () => {
await (hubServer as any).handleSearch({ query: 'github' })
const firstCallCount = vi.mocked(mcpService.listAllActiveServerTools).mock.calls.length
await (hubServer as any).handleSearch({ query: 'github' })
const secondCallCount = vi.mocked(mcpService.listAllActiveServerTools).mock.calls.length
expect(secondCallCount).toBe(firstCallCount) // Should use cache
})
it('refreshes tools after cache invalidation', async () => {
await (hubServer as any).handleSearch({ query: 'github' })
const firstCallCount = vi.mocked(mcpService.listAllActiveServerTools).mock.calls.length
hubServer.invalidateCache()
await (hubServer as any).handleSearch({ query: 'github' })
const secondCallCount = vi.mocked(mcpService.listAllActiveServerTools).mock.calls.length
expect(secondCallCount).toBe(firstCallCount + 1)
})
})
describe('error handling', () => {
it('throws error for invalid search query', async () => {
await expect((hubServer as any).handleSearch({})).rejects.toThrow('query parameter is required')
})
it('throws error for invalid exec code', async () => {
await expect((hubServer as any).handleExec({})).rejects.toThrow('code parameter is required')
})
it('handles runtime errors in exec', async () => {
const execResult = await (hubServer as any).handleExec({
code: 'throw new Error("test error")'
})
const execOutput = JSON.parse(execResult.content[0].text)
expect(execOutput.error).toBe('test error')
expect(execOutput.isError).toBe(true)
})
})
describe('exec timeouts', () => {
afterEach(() => {
vi.useRealTimers()
})
it('aborts in-flight tool calls and returns logs on timeout', async () => {
vi.useFakeTimers()
let toolCallStarted: (() => void) | null = null
const toolCallStartedPromise = new Promise<void>((resolve) => {
toolCallStarted = resolve
})
vi.mocked(mcpService.callToolById).mockImplementationOnce(async () => {
toolCallStarted?.()
return await new Promise(() => {})
})
const execPromise = (hubServer as any).handleExec({
code: `
console.log("starting");
return await github_searchRepos({ query: "hang" });
`
})
await toolCallStartedPromise
await vi.advanceTimersByTimeAsync(60000)
await vi.runAllTimersAsync()
const execResult = await execPromise
const execOutput = JSON.parse(execResult.content[0].text)
expect(execOutput.error).toBe('Execution timed out after 60000ms')
expect(execOutput.result).toBeUndefined()
expect(execOutput.isError).toBe(true)
expect(execOutput.logs).toContain('[log] starting')
expect(vi.mocked(mcpService.abortTool)).toHaveBeenCalled()
})
})
describe('server instance', () => {
it('creates a valid MCP server instance', () => {
expect(hubServer.server).toBeDefined()
expect(hubServer.server.setRequestHandler).toBeDefined()
})
})
})

View File

@ -0,0 +1,159 @@
import { describe, expect, it, vi } from 'vitest'
import { Runtime } from '../runtime'
import type { GeneratedTool } from '../types'
vi.mock('../mcp-bridge', () => ({
callMcpTool: vi.fn(async (toolId: string, params: unknown) => {
if (toolId === 'server__failing_tool') {
throw new Error('Tool failed')
}
return { toolId, params, success: true }
})
}))
const createMockTool = (partial: Partial<GeneratedTool>): GeneratedTool => ({
serverId: 'server1',
serverName: 'server1',
toolName: 'tool',
functionName: 'server1_mockTool',
jsCode: 'async function server1_mockTool() {}',
fn: async (params) => ({ result: params }),
signature: '{}',
returns: 'unknown',
...partial
})
describe('Runtime', () => {
describe('execute', () => {
it('executes simple code and returns result', async () => {
const runtime = new Runtime()
const tools: GeneratedTool[] = []
const result = await runtime.execute('return 1 + 1', tools)
expect(result.result).toBe(2)
expect(result.error).toBeUndefined()
})
it('executes async code', async () => {
const runtime = new Runtime()
const tools: GeneratedTool[] = []
const result = await runtime.execute('return await Promise.resolve(42)', tools)
expect(result.result).toBe(42)
})
it('calls tool functions', async () => {
const runtime = new Runtime()
const tools = [
createMockTool({
functionName: 'searchRepos',
fn: async (params) => ({ repos: ['repo1', 'repo2'], query: params })
})
]
const result = await runtime.execute('return await searchRepos({ query: "test" })', tools)
expect(result.result).toEqual({ toolId: 'searchRepos', params: { query: 'test' }, success: true })
})
it('captures console logs', async () => {
const runtime = new Runtime()
const tools: GeneratedTool[] = []
const result = await runtime.execute(
`
console.log("hello");
console.warn("warning");
return "done"
`,
tools
)
expect(result.result).toBe('done')
expect(result.logs).toContain('[log] hello')
expect(result.logs).toContain('[warn] warning')
})
it('handles errors gracefully', async () => {
const runtime = new Runtime()
const tools: GeneratedTool[] = []
const result = await runtime.execute('throw new Error("test error")', tools)
expect(result.result).toBeUndefined()
expect(result.error).toBe('test error')
expect(result.isError).toBe(true)
})
it('supports parallel helper', async () => {
const runtime = new Runtime()
const tools: GeneratedTool[] = []
const result = await runtime.execute(
`
const results = await parallel(
Promise.resolve(1),
Promise.resolve(2),
Promise.resolve(3)
);
return results
`,
tools
)
expect(result.result).toEqual([1, 2, 3])
})
it('supports settle helper', async () => {
const runtime = new Runtime()
const tools: GeneratedTool[] = []
const result = await runtime.execute(
`
const results = await settle(
Promise.resolve(1),
Promise.reject(new Error("fail"))
);
return results.map(r => r.status)
`,
tools
)
expect(result.result).toEqual(['fulfilled', 'rejected'])
})
it('returns last expression when no explicit return', async () => {
const runtime = new Runtime()
const tools: GeneratedTool[] = []
const result = await runtime.execute(
`
const x = 10;
const y = 20;
return x + y
`,
tools
)
expect(result.result).toBe(30)
})
it('stops execution when a tool throws', async () => {
const runtime = new Runtime()
const tools = [
createMockTool({
functionName: 'server__failing_tool'
})
]
const result = await runtime.execute('return await server__failing_tool({})', tools)
expect(result.result).toBeUndefined()
expect(result.error).toBe('Tool failed')
expect(result.isError).toBe(true)
})
})
})

View File

@ -0,0 +1,118 @@
import { describe, expect, it } from 'vitest'
import { searchTools } from '../search'
import type { GeneratedTool } from '../types'
const createMockTool = (partial: Partial<GeneratedTool>): GeneratedTool => {
const functionName = partial.functionName || 'server1_tool'
return {
serverId: 'server1',
serverName: 'server1',
toolName: partial.toolName || 'tool',
functionName,
jsCode: `async function ${functionName}() {}`,
fn: async () => ({}),
signature: '{}',
returns: 'unknown',
...partial
}
}
describe('search', () => {
describe('searchTools', () => {
it('returns all tools when query is empty', () => {
const tools = [
createMockTool({ toolName: 'tool1', functionName: 'tool1' }),
createMockTool({ toolName: 'tool2', functionName: 'tool2' })
]
const result = searchTools(tools, { query: '' })
expect(result.total).toBe(2)
expect(result.tools).toContain('tool1')
expect(result.tools).toContain('tool2')
})
it('filters tools by single keyword', () => {
const tools = [
createMockTool({ toolName: 'search_repos', functionName: 'searchRepos' }),
createMockTool({ toolName: 'get_user', functionName: 'getUser' }),
createMockTool({ toolName: 'search_users', functionName: 'searchUsers' })
]
const result = searchTools(tools, { query: 'search' })
expect(result.total).toBe(2)
expect(result.tools).toContain('searchRepos')
expect(result.tools).toContain('searchUsers')
expect(result.tools).not.toContain('getUser')
})
it('supports OR matching with comma-separated keywords', () => {
const tools = [
createMockTool({ toolName: 'browser_open', functionName: 'browserOpen' }),
createMockTool({ toolName: 'chrome_launch', functionName: 'chromeLaunch' }),
createMockTool({ toolName: 'file_read', functionName: 'fileRead' })
]
const result = searchTools(tools, { query: 'browser,chrome' })
expect(result.total).toBe(2)
expect(result.tools).toContain('browserOpen')
expect(result.tools).toContain('chromeLaunch')
expect(result.tools).not.toContain('fileRead')
})
it('matches against description', () => {
const tools = [
createMockTool({
toolName: 'launch',
functionName: 'launch',
description: 'Launch a browser instance'
}),
createMockTool({
toolName: 'close',
functionName: 'close',
description: 'Close a window'
})
]
const result = searchTools(tools, { query: 'browser' })
expect(result.total).toBe(1)
expect(result.tools).toContain('launch')
})
it('respects limit parameter', () => {
const tools = Array.from({ length: 20 }, (_, i) =>
createMockTool({ toolName: `tool${i}`, functionName: `server1_tool${i}` })
)
const result = searchTools(tools, { query: 'tool', limit: 5 })
expect(result.total).toBe(20)
const matches = (result.tools.match(/async function server1_tool\d+/g) || []).length
expect(matches).toBe(5)
})
it('is case insensitive', () => {
const tools = [createMockTool({ toolName: 'SearchRepos', functionName: 'searchRepos' })]
const result = searchTools(tools, { query: 'SEARCH' })
expect(result.total).toBe(1)
})
it('ranks exact matches higher', () => {
const tools = [
createMockTool({ toolName: 'searching', functionName: 'searching' }),
createMockTool({ toolName: 'search', functionName: 'search' }),
createMockTool({ toolName: 'search_more', functionName: 'searchMore' })
]
const result = searchTools(tools, { query: 'search', limit: 1 })
expect(result.tools).toContain('function search(')
})
})
})

View File

@ -0,0 +1,152 @@
import { generateMcpToolFunctionName } from '@shared/mcp'
import type { MCPTool } from '@types'
import type { GeneratedTool } from './types'
type PropertySchema = Record<string, unknown>
type InputSchema = {
type?: string
properties?: Record<string, PropertySchema>
required?: string[]
}
function schemaTypeToTS(prop: Record<string, unknown>): string {
const type = prop.type as string | string[] | undefined
const enumValues = prop.enum as unknown[] | undefined
if (enumValues && Array.isArray(enumValues)) {
return enumValues.map((v) => (typeof v === 'string' ? `"${v}"` : String(v))).join(' | ')
}
if (Array.isArray(type)) {
return type.map((t) => primitiveTypeToTS(t)).join(' | ')
}
if (type === 'array') {
const items = prop.items as Record<string, unknown> | undefined
if (items) {
return `${schemaTypeToTS(items)}[]`
}
return 'unknown[]'
}
if (type === 'object') {
return 'object'
}
return primitiveTypeToTS(type)
}
function primitiveTypeToTS(type: string | undefined): string {
switch (type) {
case 'string':
return 'string'
case 'number':
case 'integer':
return 'number'
case 'boolean':
return 'boolean'
case 'null':
return 'null'
default:
return 'unknown'
}
}
function jsonSchemaToSignature(schema: Record<string, unknown> | undefined): string {
if (!schema || typeof schema !== 'object') {
return '{}'
}
const properties = schema.properties as Record<string, Record<string, unknown>> | undefined
if (!properties) {
return '{}'
}
const required = (schema.required as string[]) || []
const parts: string[] = []
for (const [key, prop] of Object.entries(properties)) {
const isRequired = required.includes(key)
const typeStr = schemaTypeToTS(prop)
parts.push(`${key}${isRequired ? '' : '?'}: ${typeStr}`)
}
return `{ ${parts.join(', ')} }`
}
function generateJSDoc(tool: MCPTool, inputSchema: InputSchema | undefined, returns: string): string {
const lines: string[] = ['/**']
if (tool.description) {
const desc = tool.description.split('\n')[0]
lines.push(` * ${desc}`)
}
const properties = inputSchema?.properties || {}
const required = inputSchema?.required || []
if (Object.keys(properties).length > 0) {
lines.push(` * @param {Object} params`)
for (const [name, prop] of Object.entries(properties)) {
const isReq = required.includes(name)
const type = schemaTypeToTS(prop)
const paramName = isReq ? `params.${name}` : `[params.${name}]`
const desc = (prop.description as string)?.split('\n')[0] || ''
lines.push(` * @param {${type}} ${paramName} ${desc}`)
}
}
lines.push(` * @returns {Promise<${returns}>}`)
lines.push(` */`)
return lines.join('\n')
}
export function generateToolFunction(
tool: MCPTool,
existingNames: Set<string>,
callToolFn: (functionName: string, params: unknown) => Promise<unknown>
): GeneratedTool {
const functionName = generateMcpToolFunctionName(tool.serverName, tool.name, existingNames)
const inputSchema = tool.inputSchema as InputSchema | undefined
const outputSchema = tool.outputSchema as Record<string, unknown> | undefined
const signature = jsonSchemaToSignature(inputSchema)
const returns = outputSchema ? jsonSchemaToSignature(outputSchema) : 'unknown'
const jsDoc = generateJSDoc(tool, inputSchema, returns)
const jsCode = `${jsDoc}
async function ${functionName}(params) {
return await __callTool("${functionName}", params);
}`
const fn = async (params: unknown): Promise<unknown> => {
return await callToolFn(functionName, params)
}
return {
serverId: tool.serverId,
serverName: tool.serverName,
toolName: tool.name,
functionName,
jsCode,
fn,
signature,
returns,
description: tool.description
}
}
export function generateToolsCode(tools: GeneratedTool[]): string {
if (tools.length === 0) {
return '// No tools available'
}
const header = `// ${tools.length} tool(s). ALWAYS use: const r = await ToolName({...}); return r;`
const code = tools.map((t) => t.jsCode).join('\n\n')
return header + '\n\n' + code
}

View File

@ -0,0 +1,184 @@
import { cacheService } from '@data/CacheService'
import { loggerService } from '@logger'
import { Server } from '@modelcontextprotocol/sdk/server/index.js'
import { CallToolRequestSchema, ErrorCode, ListToolsRequestSchema, McpError } from '@modelcontextprotocol/sdk/types.js'
import { generateToolFunction } from './generator'
import { callMcpTool, clearToolMap, listAllTools, syncToolMapFromGeneratedTools } from './mcp-bridge'
import { Runtime } from './runtime'
import { searchTools } from './search'
import type { ExecInput, GeneratedTool, SearchQuery } from './types'
const logger = loggerService.withContext('MCPServer:Hub')
const TOOLS_CACHE_KEY = 'hub:tools'
const TOOLS_CACHE_TTL = 60 * 1000 // 1 minute
/**
* Hub MCP Server - A meta-server that aggregates all active MCP servers.
*
* This server is NOT included in builtinMCPServers because:
* 1. It aggregates tools from all other MCP servers, not a standalone tool provider
* 2. It's designed for LLM "code mode" - enabling AI to discover and call tools programmatically
* 3. It should be auto-enabled when code mode features are used, not manually installed by users
*
* The server exposes two tools:
* - `search`: Find available tools by keywords, returns JS function signatures
* - `exec`: Execute JavaScript code that calls discovered tools
*/
export class HubServer {
public server: Server
private runtime: Runtime
constructor() {
this.runtime = new Runtime()
this.server = new Server(
{
name: 'hub-server',
version: '1.0.0'
},
{
capabilities: {
tools: {}
}
}
)
this.setupRequestHandlers()
}
private setupRequestHandlers(): void {
this.server.setRequestHandler(ListToolsRequestSchema, async () => {
return {
tools: [
{
name: 'search',
description:
'Search for available MCP tools by keywords. Use this FIRST to discover tools. Returns JavaScript async function declarations with JSDoc showing exact function names, parameters, and return types for use in `exec`.',
inputSchema: {
type: 'object',
properties: {
query: {
type: 'string',
description:
'Comma-separated search keywords. A tool matches if ANY keyword appears in its name, description, or server name. Example: "chrome,browser,tab" matches tools related to Chrome OR browser OR tabs.'
},
limit: {
type: 'number',
description: 'Maximum number of tools to return (default: 10, max: 50)'
}
},
required: ['query']
}
},
{
name: 'exec',
description:
'Execute JavaScript that calls MCP tools discovered via `search`. IMPORTANT: You MUST explicitly `return` the final value, or the result will be `undefined`.',
inputSchema: {
type: 'object',
properties: {
code: {
type: 'string',
description:
'JavaScript code to execute. The code runs inside an async context, so use `await` directly. Do NOT wrap your code in `(async () => { ... })()` - this causes double-wrapping and returns undefined. All discovered tools are async functions (call as `await ToolName(params)`). Helpers: `parallel(...promises)`, `settle(...promises)`, `console.*`. You MUST `return` the final value. Examples: `const r = await Tool({ id: "1" }); return r` or `return await Tool({ x: 1 })`'
}
},
required: ['code']
}
}
]
}
})
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
const { name, arguments: args } = request.params
if (!args) {
throw new McpError(ErrorCode.InvalidParams, 'No arguments provided')
}
try {
switch (name) {
case 'search':
return await this.handleSearch(args as unknown as SearchQuery)
case 'exec':
return await this.handleExec(args as unknown as ExecInput)
default:
throw new McpError(ErrorCode.MethodNotFound, `Unknown tool: ${name}`)
}
} catch (error) {
if (error instanceof McpError) {
throw error
}
logger.error(`Error executing tool ${name}:`, error as Error)
throw new McpError(
ErrorCode.InternalError,
`Error executing tool ${name}: ${error instanceof Error ? error.message : String(error)}`
)
}
})
}
private async fetchTools(): Promise<GeneratedTool[]> {
const cached = cacheService.get<GeneratedTool[]>(TOOLS_CACHE_KEY)
if (cached) {
logger.debug('Returning cached tools')
syncToolMapFromGeneratedTools(cached)
return cached
}
logger.debug('Fetching fresh tools')
const allTools = await listAllTools()
const existingNames = new Set<string>()
const tools = allTools.map((tool) => generateToolFunction(tool, existingNames, callMcpTool))
cacheService.set(TOOLS_CACHE_KEY, tools, TOOLS_CACHE_TTL)
syncToolMapFromGeneratedTools(tools)
return tools
}
invalidateCache(): void {
cacheService.delete(TOOLS_CACHE_KEY)
clearToolMap()
logger.debug('Tools cache invalidated')
}
private async handleSearch(query: SearchQuery) {
if (!query.query || typeof query.query !== 'string') {
throw new McpError(ErrorCode.InvalidParams, 'query parameter is required and must be a string')
}
const tools = await this.fetchTools()
const result = searchTools(tools, query)
return {
content: [
{
type: 'text',
text: JSON.stringify(result, null, 2)
}
]
}
}
private async handleExec(input: ExecInput) {
if (!input.code || typeof input.code !== 'string') {
throw new McpError(ErrorCode.InvalidParams, 'code parameter is required and must be a string')
}
const tools = await this.fetchTools()
const result = await this.runtime.execute(input.code, tools)
return {
content: [
{
type: 'text',
text: JSON.stringify(result, null, 2)
}
],
isError: result.isError
}
}
}
export default HubServer

View File

@ -0,0 +1,96 @@
/**
* Bridge module for Hub server to access MCPService.
* Re-exports the methods needed by tool-registry and runtime.
*/
import mcpService from '@main/services/MCPService'
import { generateMcpToolFunctionName } from '@shared/mcp'
import type { MCPCallToolResponse, MCPTool, MCPToolResultContent } from '@types'
import type { GeneratedTool } from './types'
export const listAllTools = () => mcpService.listAllActiveServerTools()
const toolFunctionNameToIdMap = new Map<string, { serverId: string; toolName: string }>()
export async function refreshToolMap(): Promise<void> {
const tools = await listAllTools()
syncToolMapFromTools(tools)
}
export function syncToolMapFromTools(tools: MCPTool[]): void {
toolFunctionNameToIdMap.clear()
const existingNames = new Set<string>()
for (const tool of tools) {
const functionName = generateMcpToolFunctionName(tool.serverName, tool.name, existingNames)
toolFunctionNameToIdMap.set(functionName, { serverId: tool.serverId, toolName: tool.name })
}
}
export function syncToolMapFromGeneratedTools(tools: GeneratedTool[]): void {
toolFunctionNameToIdMap.clear()
for (const tool of tools) {
toolFunctionNameToIdMap.set(tool.functionName, { serverId: tool.serverId, toolName: tool.toolName })
}
}
export function clearToolMap(): void {
toolFunctionNameToIdMap.clear()
}
export const callMcpTool = async (functionName: string, params: unknown, callId?: string): Promise<unknown> => {
const toolInfo = toolFunctionNameToIdMap.get(functionName)
if (!toolInfo) {
await refreshToolMap()
const retryToolInfo = toolFunctionNameToIdMap.get(functionName)
if (!retryToolInfo) {
throw new Error(`Tool not found: ${functionName}`)
}
const toolId = `${retryToolInfo.serverId}__${retryToolInfo.toolName}`
const result = await mcpService.callToolById(toolId, params, callId)
throwIfToolError(result)
return extractToolResult(result)
}
const toolId = `${toolInfo.serverId}__${toolInfo.toolName}`
const result = await mcpService.callToolById(toolId, params, callId)
throwIfToolError(result)
return extractToolResult(result)
}
export const abortMcpTool = async (callId: string): Promise<boolean> => {
return mcpService.abortTool(null as unknown as Electron.IpcMainInvokeEvent, callId)
}
function extractToolResult(result: MCPCallToolResponse): unknown {
if (!result.content || result.content.length === 0) {
return null
}
const textContent = result.content.find((c) => c.type === 'text')
if (textContent?.text) {
try {
return JSON.parse(textContent.text)
} catch {
return textContent.text
}
}
return result.content
}
function throwIfToolError(result: MCPCallToolResponse): void {
if (!result.isError) {
return
}
const textContent = extractTextContent(result.content)
throw new Error(textContent ?? 'Tool execution failed')
}
function extractTextContent(content: MCPToolResultContent[] | undefined): string | undefined {
if (!content || content.length === 0) {
return undefined
}
const textBlock = content.find((item) => item.type === 'text' && item.text)
return textBlock?.text
}

View File

@ -0,0 +1,170 @@
import crypto from 'node:crypto'
import { Worker } from 'node:worker_threads'
import { loggerService } from '@logger'
import { abortMcpTool, callMcpTool } from './mcp-bridge'
import type {
ExecOutput,
GeneratedTool,
HubWorkerCallToolMessage,
HubWorkerExecMessage,
HubWorkerMessage,
HubWorkerResultMessage
} from './types'
import { hubWorkerSource } from './worker'
const logger = loggerService.withContext('MCPServer:Hub:Runtime')
const MAX_LOGS = 1000
const EXECUTION_TIMEOUT = 60000
export class Runtime {
async execute(code: string, tools: GeneratedTool[]): Promise<ExecOutput> {
return await new Promise<ExecOutput>((resolve) => {
const logs: string[] = []
const activeCallIds = new Map<string, string>()
let finished = false
let timedOut = false
let timeoutId: NodeJS.Timeout | null = null
const worker = new Worker(hubWorkerSource, { eval: true })
const addLog = (entry: string) => {
if (logs.length >= MAX_LOGS) {
return
}
logs.push(entry)
}
const finalize = async (output: ExecOutput, terminateWorker = true) => {
if (finished) {
return
}
finished = true
if (timeoutId) {
clearTimeout(timeoutId)
}
worker.removeAllListeners()
if (terminateWorker) {
try {
await worker.terminate()
} catch (error) {
logger.warn('Failed to terminate exec worker', error as Error)
}
}
resolve(output)
}
const abortActiveTools = async () => {
const callIds = Array.from(activeCallIds.values())
activeCallIds.clear()
if (callIds.length === 0) {
return
}
await Promise.allSettled(callIds.map((callId) => abortMcpTool(callId)))
}
const handleToolCall = async (message: HubWorkerCallToolMessage) => {
if (finished || timedOut) {
return
}
const callId = crypto.randomUUID()
activeCallIds.set(message.requestId, callId)
try {
const result = await callMcpTool(message.functionName, message.params, callId)
if (finished || timedOut) {
return
}
worker.postMessage({ type: 'toolResult', requestId: message.requestId, result })
} catch (error) {
if (finished || timedOut) {
return
}
const errorMessage = error instanceof Error ? error.message : String(error)
worker.postMessage({ type: 'toolError', requestId: message.requestId, error: errorMessage })
} finally {
activeCallIds.delete(message.requestId)
}
}
const handleResult = (message: HubWorkerResultMessage) => {
const resolvedLogs = message.logs && message.logs.length > 0 ? message.logs : logs
void finalize({
result: message.result,
logs: resolvedLogs.length > 0 ? resolvedLogs : undefined
})
}
const handleError = (errorMessage: string, messageLogs?: string[], terminateWorker = true) => {
const resolvedLogs = messageLogs && messageLogs.length > 0 ? messageLogs : logs
void finalize(
{
result: undefined,
logs: resolvedLogs.length > 0 ? resolvedLogs : undefined,
error: errorMessage,
isError: true
},
terminateWorker
)
}
const handleMessage = (message: HubWorkerMessage) => {
if (!message || typeof message !== 'object') {
return
}
switch (message.type) {
case 'log':
addLog(message.entry)
break
case 'callTool':
void handleToolCall(message)
break
case 'result':
handleResult(message)
break
case 'error':
handleError(message.error, message.logs)
break
default:
break
}
}
timeoutId = setTimeout(() => {
timedOut = true
void (async () => {
await abortActiveTools()
try {
await worker.terminate()
} catch (error) {
logger.warn('Failed to terminate exec worker after timeout', error as Error)
}
handleError(`Execution timed out after ${EXECUTION_TIMEOUT}ms`, undefined, false)
})()
}, EXECUTION_TIMEOUT)
worker.on('message', handleMessage)
worker.on('error', (error) => {
logger.error('Worker execution error', error)
handleError(error instanceof Error ? error.message : String(error))
})
worker.on('exit', (code) => {
if (finished || timedOut) {
return
}
const message = code === 0 ? 'Exec worker exited unexpectedly' : `Exec worker exited with code ${code}`
logger.error(message)
handleError(message, undefined, false)
})
const execMessage: HubWorkerExecMessage = {
type: 'exec',
code,
tools: tools.map((tool) => ({ functionName: tool.functionName }))
}
worker.postMessage(execMessage)
})
}
}

View File

@ -0,0 +1,109 @@
import { generateToolsCode } from './generator'
import type { GeneratedTool, SearchQuery, SearchResult } from './types'
const DEFAULT_LIMIT = 10
const MAX_LIMIT = 50
export function searchTools(tools: GeneratedTool[], query: SearchQuery): SearchResult {
const { query: queryStr, limit = DEFAULT_LIMIT } = query
const effectiveLimit = Math.min(Math.max(1, limit), MAX_LIMIT)
const keywords = queryStr
.toLowerCase()
.split(',')
.map((k) => k.trim())
.filter((k) => k.length > 0)
if (keywords.length === 0) {
const sliced = tools.slice(0, effectiveLimit)
return {
tools: generateToolsCode(sliced),
total: tools.length
}
}
const matchedTools = tools.filter((tool) => {
const searchText = buildSearchText(tool).toLowerCase()
return keywords.some((keyword) => searchText.includes(keyword))
})
const rankedTools = rankTools(matchedTools, keywords)
const sliced = rankedTools.slice(0, effectiveLimit)
return {
tools: generateToolsCode(sliced),
total: matchedTools.length
}
}
function buildSearchText(tool: GeneratedTool): string {
const combinedName = tool.serverName ? `${tool.serverName}_${tool.toolName}` : tool.toolName
const parts = [
tool.toolName,
tool.functionName,
tool.serverName,
combinedName,
tool.description || '',
tool.signature
]
return parts.join(' ')
}
function rankTools(tools: GeneratedTool[], keywords: string[]): GeneratedTool[] {
const scored = tools.map((tool) => ({
tool,
score: calculateScore(tool, keywords)
}))
scored.sort((a, b) => b.score - a.score)
return scored.map((s) => s.tool)
}
function calculateScore(tool: GeneratedTool, keywords: string[]): number {
let score = 0
const toolName = tool.toolName.toLowerCase()
const serverName = (tool.serverName || '').toLowerCase()
const functionName = tool.functionName.toLowerCase()
const description = (tool.description || '').toLowerCase()
for (const keyword of keywords) {
// Match tool name
if (toolName === keyword) {
score += 10
} else if (toolName.startsWith(keyword)) {
score += 5
} else if (toolName.includes(keyword)) {
score += 3
}
// Match server name
if (serverName === keyword) {
score += 8
} else if (serverName.startsWith(keyword)) {
score += 4
} else if (serverName.includes(keyword)) {
score += 2
}
// Match function name (serverName_toolName format)
if (functionName === keyword) {
score += 10
} else if (functionName.startsWith(keyword)) {
score += 5
} else if (functionName.includes(keyword)) {
score += 3
}
if (description.includes(keyword)) {
const count = (description.match(new RegExp(escapeRegex(keyword), 'g')) || []).length
score += Math.min(count, 3)
}
}
return score
}
function escapeRegex(str: string): string {
return str.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')
}

View File

@ -0,0 +1,113 @@
import type { MCPServer, MCPTool } from '@types'
export interface GeneratedTool {
serverId: string
serverName: string
toolName: string
functionName: string
jsCode: string
fn: (params: unknown) => Promise<unknown>
signature: string
returns: string
description?: string
}
export interface SearchQuery {
query: string
limit?: number
}
export interface SearchResult {
tools: string
total: number
}
export interface ExecInput {
code: string
}
export type ExecOutput = {
result: unknown
logs?: string[]
error?: string
isError?: boolean
}
export interface ToolRegistryOptions {
ttl?: number
}
export interface MCPToolWithServer extends MCPTool {
server: MCPServer
}
export interface ExecutionContext {
__callTool: (functionName: string, params: unknown) => Promise<unknown>
parallel: <T>(...promises: Promise<T>[]) => Promise<T[]>
settle: <T>(...promises: Promise<T>[]) => Promise<PromiseSettledResult<T>[]>
console: ConsoleMethods
[functionName: string]: unknown
}
export interface ConsoleMethods {
log: (...args: unknown[]) => void
warn: (...args: unknown[]) => void
error: (...args: unknown[]) => void
info: (...args: unknown[]) => void
debug: (...args: unknown[]) => void
}
export type HubWorkerTool = {
functionName: string
}
export type HubWorkerExecMessage = {
type: 'exec'
code: string
tools: HubWorkerTool[]
}
export type HubWorkerCallToolMessage = {
type: 'callTool'
requestId: string
functionName: string
params: unknown
}
export type HubWorkerToolResultMessage = {
type: 'toolResult'
requestId: string
result: unknown
}
export type HubWorkerToolErrorMessage = {
type: 'toolError'
requestId: string
error: string
}
export type HubWorkerResultMessage = {
type: 'result'
result: unknown
logs?: string[]
}
export type HubWorkerErrorMessage = {
type: 'error'
error: string
logs?: string[]
}
export type HubWorkerLogMessage = {
type: 'log'
entry: string
}
export type HubWorkerMessage =
| HubWorkerExecMessage
| HubWorkerCallToolMessage
| HubWorkerToolResultMessage
| HubWorkerToolErrorMessage
| HubWorkerResultMessage
| HubWorkerErrorMessage
| HubWorkerLogMessage

View File

@ -0,0 +1,133 @@
export const hubWorkerSource = `
const crypto = require('node:crypto')
const { parentPort } = require('node:worker_threads')
const MAX_LOGS = 1000
const logs = []
const pendingCalls = new Map()
let isExecuting = false
const stringify = (value) => {
if (value === undefined) return 'undefined'
if (value === null) return 'null'
if (typeof value === 'string') return value
if (typeof value === 'number' || typeof value === 'boolean') return String(value)
if (value instanceof Error) return value.message
try {
return JSON.stringify(value, null, 2)
} catch {
return String(value)
}
}
const pushLog = (level, args) => {
if (logs.length >= MAX_LOGS) {
return
}
const message = args.map((arg) => stringify(arg)).join(' ')
const entry = \`[\${level}] \${message}\`
logs.push(entry)
parentPort?.postMessage({ type: 'log', entry })
}
const capturedConsole = {
log: (...args) => pushLog('log', args),
warn: (...args) => pushLog('warn', args),
error: (...args) => pushLog('error', args),
info: (...args) => pushLog('info', args),
debug: (...args) => pushLog('debug', args)
}
const callTool = (functionName, params) =>
new Promise((resolve, reject) => {
const requestId = crypto.randomUUID()
pendingCalls.set(requestId, { resolve, reject })
parentPort?.postMessage({ type: 'callTool', requestId, functionName, params })
})
const buildContext = (tools) => {
const context = {
__callTool: callTool,
parallel: (...promises) => Promise.all(promises),
settle: (...promises) => Promise.allSettled(promises),
console: capturedConsole
}
for (const tool of tools) {
context[tool.functionName] = (params) => callTool(tool.functionName, params)
}
return context
}
const runCode = async (code, context) => {
const contextKeys = Object.keys(context)
const contextValues = contextKeys.map((key) => context[key])
const wrappedCode = \`
return (async () => {
\${code}
})()
\`
const fn = new Function(...contextKeys, wrappedCode)
return await fn(...contextValues)
}
const handleExec = async (code, tools) => {
if (isExecuting) {
return
}
isExecuting = true
try {
const context = buildContext(tools)
const result = await runCode(code, context)
parentPort?.postMessage({ type: 'result', result, logs: logs.length > 0 ? logs : undefined })
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error)
parentPort?.postMessage({ type: 'error', error: errorMessage, logs: logs.length > 0 ? logs : undefined })
} finally {
pendingCalls.clear()
}
}
const handleToolResult = (message) => {
const pending = pendingCalls.get(message.requestId)
if (!pending) {
return
}
pendingCalls.delete(message.requestId)
pending.resolve(message.result)
}
const handleToolError = (message) => {
const pending = pendingCalls.get(message.requestId)
if (!pending) {
return
}
pendingCalls.delete(message.requestId)
pending.reject(new Error(message.error))
}
parentPort?.on('message', (message) => {
if (!message || typeof message !== 'object') {
return
}
switch (message.type) {
case 'exec':
handleExec(message.code, message.tools ?? [])
break
case 'toolResult':
handleToolResult(message)
break
case 'toolError':
handleToolError(message)
break
default:
break
}
})
`

View File

@ -4,9 +4,9 @@ import path from 'node:path'
import { cacheService } from '@data/CacheService' import { cacheService } from '@data/CacheService'
import { loggerService } from '@logger' import { loggerService } from '@logger'
import { getMCPServersFromRedux } from '@main/apiServer/utils/mcp'
import { createInMemoryMCPServer } from '@main/mcpServers/factory' import { createInMemoryMCPServer } from '@main/mcpServers/factory'
import { makeSureDirExists, removeEnvProxy } from '@main/utils' import { makeSureDirExists, removeEnvProxy } from '@main/utils'
import { buildFunctionCallToolName } from '@main/utils/mcp'
import { findCommandInShellEnv, getBinaryName, getBinaryPath, isBinaryExists } from '@main/utils/process' import { findCommandInShellEnv, getBinaryName, getBinaryPath, isBinaryExists } from '@main/utils/process'
import getLoginShellEnvironment from '@main/utils/shell-env' import getLoginShellEnvironment from '@main/utils/shell-env'
import { TraceMethod, withSpanFunc } from '@mcp-trace/trace-core' import { TraceMethod, withSpanFunc } from '@mcp-trace/trace-core'
@ -36,6 +36,7 @@ import { HOME_CHERRY_DIR } from '@shared/config/constant'
import type { MCPProgressEvent } from '@shared/config/types' import type { MCPProgressEvent } from '@shared/config/types'
import type { MCPServerLogEntry } from '@shared/config/types' import type { MCPServerLogEntry } from '@shared/config/types'
import { IpcChannel } from '@shared/IpcChannel' import { IpcChannel } from '@shared/IpcChannel'
import { buildFunctionCallToolName } from '@shared/mcp'
import { defaultAppHeaders } from '@shared/utils' import { defaultAppHeaders } from '@shared/utils'
import { import {
BuiltinMCPServerNames, BuiltinMCPServerNames,
@ -165,6 +166,67 @@ class McpService {
this.getServerLogs = this.getServerLogs.bind(this) this.getServerLogs = this.getServerLogs.bind(this)
} }
/**
* List all tools from all active MCP servers (excluding hub).
* Used by Hub server's tool registry.
*/
public async listAllActiveServerTools(): Promise<MCPTool[]> {
const servers = await getMCPServersFromRedux()
const activeServers = servers.filter((server) => server.isActive)
const results = await Promise.allSettled(
activeServers.map(async (server) => {
const tools = await this.listToolsImpl(server)
const disabledTools = new Set(server.disabledTools ?? [])
return disabledTools.size > 0 ? tools.filter((tool) => !disabledTools.has(tool.name)) : tools
})
)
const allTools: MCPTool[] = []
results.forEach((result, index) => {
if (result.status === 'fulfilled') {
allTools.push(...result.value)
} else {
logger.error(
`[listAllActiveServerTools] Failed to list tools from ${activeServers[index].name}:`,
result.reason as Error
)
}
})
return allTools
}
/**
* Call a tool by its full ID (serverId__toolName format).
* Used by Hub server's runtime.
*/
public async callToolById(toolId: string, params: unknown, callId?: string): Promise<MCPCallToolResponse> {
const parts = toolId.split('__')
if (parts.length < 2) {
throw new Error(`Invalid tool ID format: ${toolId}`)
}
const serverId = parts[0]
const toolName = parts.slice(1).join('__')
const servers = await getMCPServersFromRedux()
const server = servers.find((s) => s.id === serverId)
if (!server) {
throw new Error(`Server not found: ${serverId}`)
}
logger.debug(`[callToolById] Calling tool ${toolName} on server ${server.name}`)
return this.callTool(null as unknown as Electron.IpcMainInvokeEvent, {
server,
name: toolName,
args: params,
callId
})
}
private getServerKey(server: MCPServer): string { private getServerKey(server: MCPServer): string {
return JSON.stringify({ return JSON.stringify({
baseUrl: server.baseUrl, baseUrl: server.baseUrl,

View File

@ -0,0 +1,75 @@
import type { MCPServer, MCPTool } from '@types'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
vi.mock('@main/apiServer/utils/mcp', () => ({
getMCPServersFromRedux: vi.fn()
}))
vi.mock('@main/services/WindowService', () => ({
windowService: {
getMainWindow: vi.fn(() => null)
}
}))
import { getMCPServersFromRedux } from '@main/apiServer/utils/mcp'
import mcpService from '@main/services/MCPService'
const baseInputSchema: { type: 'object'; properties: Record<string, unknown>; required: string[] } = {
type: 'object',
properties: {},
required: []
}
const createTool = (overrides: Partial<MCPTool>): MCPTool => ({
id: `${overrides.serverId}__${overrides.name}`,
name: overrides.name ?? 'tool',
description: overrides.description,
serverId: overrides.serverId ?? 'server',
serverName: overrides.serverName ?? 'server',
inputSchema: baseInputSchema,
type: 'mcp',
...overrides
})
describe('MCPService.listAllActiveServerTools', () => {
beforeEach(() => {
vi.clearAllMocks()
})
afterEach(() => {
vi.restoreAllMocks()
})
it('filters disabled tools per server', async () => {
const servers: MCPServer[] = [
{
id: 'alpha',
name: 'Alpha',
isActive: true,
disabledTools: ['disabled_tool']
},
{
id: 'beta',
name: 'Beta',
isActive: true
}
]
vi.mocked(getMCPServersFromRedux).mockResolvedValue(servers)
const listToolsSpy = vi.spyOn(mcpService as any, 'listToolsImpl').mockImplementation(async (server: any) => {
if (server.id === 'alpha') {
return [
createTool({ name: 'enabled_tool', serverId: server.id, serverName: server.name }),
createTool({ name: 'disabled_tool', serverId: server.id, serverName: server.name })
]
}
return [createTool({ name: 'beta_tool', serverId: server.id, serverName: server.name })]
})
const tools = await mcpService.listAllActiveServerTools()
expect(listToolsSpy).toHaveBeenCalledTimes(2)
expect(tools.map((tool) => tool.name)).toEqual(['enabled_tool', 'beta_tool'])
})
})

View File

@ -1,7 +1,7 @@
import { loggerService } from '@logger' import { loggerService } from '@logger'
import { mcpApiService } from '@main/apiServer/services/mcp' import { mcpApiService } from '@main/apiServer/services/mcp'
import { type ModelValidationError, validateModelId } from '@main/apiServer/utils' import { type ModelValidationError, validateModelId } from '@main/apiServer/utils'
import { buildFunctionCallToolName } from '@main/utils/mcp' import { buildFunctionCallToolName } from '@shared/mcp'
import type { AgentType, MCPTool, SlashCommand, Tool } from '@types' import type { AgentType, MCPTool, SlashCommand, Tool } from '@types'
import { objectKeys } from '@types' import { objectKeys } from '@types'
import fs from 'fs' import fs from 'fs'

View File

@ -1,225 +0,0 @@
import { describe, expect, it } from 'vitest'
import { buildFunctionCallToolName } from '../mcp'
describe('buildFunctionCallToolName', () => {
describe('basic format', () => {
it('should return format mcp__{server}__{tool}', () => {
const result = buildFunctionCallToolName('github', 'search_issues')
expect(result).toBe('mcp__github__search_issues')
})
it('should handle simple server and tool names', () => {
expect(buildFunctionCallToolName('fetch', 'get_page')).toBe('mcp__fetch__get_page')
expect(buildFunctionCallToolName('database', 'query')).toBe('mcp__database__query')
expect(buildFunctionCallToolName('cherry_studio', 'search')).toBe('mcp__cherry_studio__search')
})
})
describe('valid JavaScript identifier', () => {
it('should always start with mcp__ prefix (valid JS identifier start)', () => {
const result = buildFunctionCallToolName('123server', '456tool')
expect(result).toMatch(/^mcp__/)
expect(result).toBe('mcp__123server__456tool')
})
it('should only contain alphanumeric chars and underscores', () => {
const result = buildFunctionCallToolName('my-server', 'my-tool')
expect(result).toBe('mcp__my_server__my_tool')
expect(result).toMatch(/^[a-zA-Z][a-zA-Z0-9_]*$/)
})
it('should be a valid JavaScript identifier', () => {
const testCases = [
['github', 'create_issue'],
['my-server', 'fetch-data'],
['test@server', 'tool#name'],
['server.name', 'tool.action'],
['123abc', 'def456']
]
for (const [server, tool] of testCases) {
const result = buildFunctionCallToolName(server, tool)
// Valid JS identifiers match this pattern
expect(result).toMatch(/^[a-zA-Z_][a-zA-Z0-9_]*$/)
}
})
})
describe('character sanitization', () => {
it('should replace dashes with underscores', () => {
const result = buildFunctionCallToolName('my-server', 'my-tool-name')
expect(result).toBe('mcp__my_server__my_tool_name')
})
it('should replace special characters with underscores', () => {
const result = buildFunctionCallToolName('test@server!', 'tool#name$')
expect(result).toBe('mcp__test_server__tool_name')
})
it('should replace dots with underscores', () => {
const result = buildFunctionCallToolName('server.name', 'tool.action')
expect(result).toBe('mcp__server_name__tool_action')
})
it('should replace spaces with underscores', () => {
const result = buildFunctionCallToolName('my server', 'my tool')
expect(result).toBe('mcp__my_server__my_tool')
})
it('should collapse consecutive underscores', () => {
const result = buildFunctionCallToolName('my--server', 'my___tool')
expect(result).toBe('mcp__my_server__my_tool')
expect(result).not.toMatch(/_{3,}/)
})
it('should trim leading and trailing underscores from parts', () => {
const result = buildFunctionCallToolName('_server_', '_tool_')
expect(result).toBe('mcp__server__tool')
})
it('should handle names with only special characters', () => {
const result = buildFunctionCallToolName('---', '###')
expect(result).toBe('mcp____')
})
})
describe('length constraints', () => {
it('should not exceed 63 characters', () => {
const longServerName = 'a'.repeat(50)
const longToolName = 'b'.repeat(50)
const result = buildFunctionCallToolName(longServerName, longToolName)
expect(result.length).toBeLessThanOrEqual(63)
})
it('should truncate server name to max 20 chars', () => {
const longServerName = 'abcdefghijklmnopqrstuvwxyz' // 26 chars
const result = buildFunctionCallToolName(longServerName, 'tool')
expect(result).toBe('mcp__abcdefghijklmnopqrst__tool')
expect(result).toContain('abcdefghijklmnopqrst') // First 20 chars
expect(result).not.toContain('uvwxyz') // Truncated
})
it('should truncate tool name to max 35 chars', () => {
const longToolName = 'a'.repeat(40)
const result = buildFunctionCallToolName('server', longToolName)
const expectedTool = 'a'.repeat(35)
expect(result).toBe(`mcp__server__${expectedTool}`)
})
it('should not end with underscores after truncation', () => {
// Create a name that would end with underscores after truncation
const longServerName = 'a'.repeat(20)
const longToolName = 'b'.repeat(35) + '___extra'
const result = buildFunctionCallToolName(longServerName, longToolName)
expect(result).not.toMatch(/_+$/)
expect(result.length).toBeLessThanOrEqual(63)
})
it('should handle max length edge case exactly', () => {
// mcp__ (5) + server (20) + __ (2) + tool (35) = 62 chars
const server = 'a'.repeat(20)
const tool = 'b'.repeat(35)
const result = buildFunctionCallToolName(server, tool)
expect(result.length).toBe(62)
expect(result).toBe(`mcp__${'a'.repeat(20)}__${'b'.repeat(35)}`)
})
})
describe('edge cases', () => {
it('should handle empty server name', () => {
const result = buildFunctionCallToolName('', 'tool')
expect(result).toBe('mcp____tool')
})
it('should handle empty tool name', () => {
const result = buildFunctionCallToolName('server', '')
expect(result).toBe('mcp__server__')
})
it('should handle both empty names', () => {
const result = buildFunctionCallToolName('', '')
expect(result).toBe('mcp____')
})
it('should handle whitespace-only names', () => {
const result = buildFunctionCallToolName(' ', ' ')
expect(result).toBe('mcp____')
})
it('should trim whitespace from names', () => {
const result = buildFunctionCallToolName(' server ', ' tool ')
expect(result).toBe('mcp__server__tool')
})
it('should handle unicode characters', () => {
const result = buildFunctionCallToolName('服务器', '工具')
// Unicode chars are replaced with underscores, then collapsed
expect(result).toMatch(/^mcp__/)
})
it('should handle mixed case', () => {
const result = buildFunctionCallToolName('MyServer', 'MyTool')
expect(result).toBe('mcp__MyServer__MyTool')
})
})
describe('deterministic output', () => {
it('should produce consistent results for same input', () => {
const serverName = 'github'
const toolName = 'search_repos'
const result1 = buildFunctionCallToolName(serverName, toolName)
const result2 = buildFunctionCallToolName(serverName, toolName)
const result3 = buildFunctionCallToolName(serverName, toolName)
expect(result1).toBe(result2)
expect(result2).toBe(result3)
})
it('should produce different results for different inputs', () => {
const result1 = buildFunctionCallToolName('server1', 'tool')
const result2 = buildFunctionCallToolName('server2', 'tool')
const result3 = buildFunctionCallToolName('server', 'tool1')
const result4 = buildFunctionCallToolName('server', 'tool2')
expect(result1).not.toBe(result2)
expect(result3).not.toBe(result4)
})
})
describe('real-world scenarios', () => {
it('should handle GitHub MCP server', () => {
expect(buildFunctionCallToolName('github', 'create_issue')).toBe('mcp__github__create_issue')
expect(buildFunctionCallToolName('github', 'search_repositories')).toBe('mcp__github__search_repositories')
expect(buildFunctionCallToolName('github', 'get_pull_request')).toBe('mcp__github__get_pull_request')
})
it('should handle filesystem MCP server', () => {
expect(buildFunctionCallToolName('filesystem', 'read_file')).toBe('mcp__filesystem__read_file')
expect(buildFunctionCallToolName('filesystem', 'write_file')).toBe('mcp__filesystem__write_file')
expect(buildFunctionCallToolName('filesystem', 'list_directory')).toBe('mcp__filesystem__list_directory')
})
it('should handle hyphenated server names (common in npm packages)', () => {
expect(buildFunctionCallToolName('cherry-fetch', 'get_page')).toBe('mcp__cherry_fetch__get_page')
expect(buildFunctionCallToolName('mcp-server-github', 'search')).toBe('mcp__mcp_server_github__search')
})
it('should handle scoped npm package style names', () => {
const result = buildFunctionCallToolName('@anthropic/mcp-server', 'chat')
expect(result).toBe('mcp__anthropic_mcp_server__chat')
})
it('should handle tools with long descriptive names', () => {
const result = buildFunctionCallToolName('github', 'search_repositories_by_language_and_stars')
expect(result.length).toBeLessThanOrEqual(63)
expect(result).toMatch(/^mcp__github__search_repositories_by_lan/)
})
})
})

View File

@ -13,18 +13,13 @@ export async function getIpCountry(): Promise<string> {
const controller = new AbortController() const controller = new AbortController()
const timeoutId = setTimeout(() => controller.abort(), 5000) const timeoutId = setTimeout(() => controller.abort(), 5000)
const ipinfo = await net.fetch('https://ipinfo.io/json', { const ipinfo = await net.fetch(`https://api.ipinfo.io/lite/me?token=2a42580355dae4`, {
signal: controller.signal, signal: controller.signal
headers: {
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
'Accept-Language': 'en-US,en;q=0.9'
}
}) })
clearTimeout(timeoutId) clearTimeout(timeoutId)
const data = await ipinfo.json() const data = await ipinfo.json()
const country = data.country || 'CN' const country = data.country_code || 'CN'
logger.info(`Detected user IP address country: ${country}`) logger.info(`Detected user IP address country: ${country}`)
return country return country
} catch (error) { } catch (error) {

View File

@ -1,29 +0,0 @@
/**
* Builds a valid JavaScript function name for MCP tool calls.
* Format: mcp__{server_name}__{tool_name}
*
* @param serverName - The MCP server name
* @param toolName - The tool name from the server
* @returns A valid JS identifier in format mcp__{server}__{tool}, max 63 chars
*/
export function buildFunctionCallToolName(serverName: string, toolName: string): string {
// Sanitize to valid JS identifier chars (alphanumeric + underscore only)
const sanitize = (str: string): string =>
str
.trim()
.replace(/[^a-zA-Z0-9]/g, '_') // Replace all non-alphanumeric with underscore
.replace(/_{2,}/g, '_') // Collapse multiple underscores
.replace(/^_+|_+$/g, '') // Trim leading/trailing underscores
const server = sanitize(serverName).slice(0, 20) // Keep server name short
const tool = sanitize(toolName).slice(0, 35) // More room for tool name
let name = `mcp__${server}__${tool}`
// Ensure max 63 chars and clean trailing underscores
if (name.length > 63) {
name = name.slice(0, 63).replace(/_+$/, '')
}
return name
}

View File

@ -1,7 +1,7 @@
import type { WebSearchPluginConfig } from '@cherrystudio/ai-core/built-in/plugins' import type { WebSearchPluginConfig } from '@cherrystudio/ai-core/built-in/plugins'
import { loggerService } from '@logger' import { loggerService } from '@logger'
import { isGemini3Model, isSupportedThinkingTokenQwenModel } from '@renderer/config/models' import { isGemini3Model, isSupportedThinkingTokenQwenModel } from '@renderer/config/models'
import type { MCPTool } from '@renderer/types' import type { McpMode, MCPTool } from '@renderer/types'
import { type Assistant, type Message, type Model, type Provider, SystemProviderIds } from '@renderer/types' import { type Assistant, type Message, type Model, type Provider, SystemProviderIds } from '@renderer/types'
import type { Chunk } from '@renderer/types/chunk' import type { Chunk } from '@renderer/types/chunk'
import { isOllamaProvider, isSupportEnableThinkingProvider } from '@renderer/utils/provider' import { isOllamaProvider, isSupportEnableThinkingProvider } from '@renderer/utils/provider'
@ -38,6 +38,7 @@ export interface AiSdkMiddlewareConfig {
enableWebSearch: boolean enableWebSearch: boolean
enableGenerateImage: boolean enableGenerateImage: boolean
enableUrlContext: boolean enableUrlContext: boolean
mcpMode?: McpMode
mcpTools?: MCPTool[] mcpTools?: MCPTool[]
uiMessages?: Message[] uiMessages?: Message[]
// 内置搜索配置 // 内置搜索配置
@ -182,13 +183,12 @@ function addProviderSpecificMiddlewares(builder: AiSdkMiddlewareBuilder, config:
break break
case 'openai': case 'openai':
case 'azure-openai': { case 'azure-openai': {
if (config.enableReasoning) { // 就算这里不传参数也有可能调用推理
const tagName = getReasoningTagName(config.model?.id.toLowerCase()) const tagName = getReasoningTagName(config.model?.id.toLowerCase())
builder.add({ builder.add({
name: 'thinking-tag-extraction', name: 'thinking-tag-extraction',
middleware: extractReasoningMiddleware({ tagName }) middleware: extractReasoningMiddleware({ tagName })
}) })
}
break break
} }
case 'gemini': case 'gemini':

View File

@ -47,6 +47,7 @@ export async function buildPlugins(
plugins.push( plugins.push(
createPromptToolUsePlugin({ createPromptToolUsePlugin({
enabled: true, enabled: true,
mcpMode: middlewareConfig.mcpMode,
createSystemMessage: (systemPrompt, params, context) => { createSystemMessage: (systemPrompt, params, context) => {
const modelId = typeof context.model === 'string' ? context.model : context.model.modelId const modelId = typeof context.model === 'string' ? context.model : context.model.modelId
if (modelId.includes('o1-mini') || modelId.includes('o1-preview')) { if (modelId.includes('o1-mini') || modelId.includes('o1-preview')) {

View File

@ -26,11 +26,13 @@ import {
isSupportedThinkingTokenModel, isSupportedThinkingTokenModel,
isWebSearchModel isWebSearchModel
} from '@renderer/config/models' } from '@renderer/config/models'
import { getHubModeSystemPrompt } from '@renderer/config/prompts-code-mode'
import { fetchAllActiveServerTools } from '@renderer/services/ApiService'
import { getDefaultModel } from '@renderer/services/AssistantService' import { getDefaultModel } from '@renderer/services/AssistantService'
import store from '@renderer/store' import store from '@renderer/store'
import type { CherryWebSearchConfig } from '@renderer/store/websearch' import type { CherryWebSearchConfig } from '@renderer/store/websearch'
import type { Model } from '@renderer/types' import type { Model } from '@renderer/types'
import { type Assistant, type MCPTool, type Provider, SystemProviderIds } from '@renderer/types' import { type Assistant, getEffectiveMcpMode, type MCPTool, type Provider, SystemProviderIds } from '@renderer/types'
import type { StreamTextParams } from '@renderer/types/aiCoreTypes' import type { StreamTextParams } from '@renderer/types/aiCoreTypes'
import { mapRegexToPatterns } from '@renderer/utils/blacklistMatchPattern' import { mapRegexToPatterns } from '@renderer/utils/blacklistMatchPattern'
import { replacePromptVariables } from '@renderer/utils/prompt' import { replacePromptVariables } from '@renderer/utils/prompt'
@ -243,8 +245,18 @@ export async function buildStreamTextParams(
params.tools = tools params.tools = tools
} }
if (assistant.prompt) { let systemPrompt = assistant.prompt ? await replacePromptVariables(assistant.prompt, model.name) : ''
params.system = await replacePromptVariables(assistant.prompt, model.name)
if (getEffectiveMcpMode(assistant) === 'auto') {
const allActiveTools = await fetchAllActiveServerTools()
const autoModePrompt = getHubModeSystemPrompt(allActiveTools)
if (autoModePrompt) {
systemPrompt = systemPrompt ? `${systemPrompt}\n\n${autoModePrompt}` : autoModePrompt
}
}
if (systemPrompt) {
params.system = systemPrompt
} }
logger.debug('params', params) logger.debug('params', params)

View File

@ -1,5 +1,5 @@
import { formatPrivateKey, hasProviderConfig, ProviderConfigFactory } from '@cherrystudio/ai-core/provider' import { formatPrivateKey, hasProviderConfig, ProviderConfigFactory } from '@cherrystudio/ai-core/provider'
import { isOpenAIChatCompletionOnlyModel } from '@renderer/config/models' import { isOpenAIChatCompletionOnlyModel, isOpenAIReasoningModel } from '@renderer/config/models'
import { import {
getAwsBedrockAccessKeyId, getAwsBedrockAccessKeyId,
getAwsBedrockApiKey, getAwsBedrockApiKey,
@ -29,6 +29,7 @@ import {
isNewApiProvider, isNewApiProvider,
isOllamaProvider, isOllamaProvider,
isPerplexityProvider, isPerplexityProvider,
isSupportDeveloperRoleProvider,
isSupportStreamOptionsProvider, isSupportStreamOptionsProvider,
isVertexProvider isVertexProvider
} from '@renderer/utils/provider' } from '@renderer/utils/provider'
@ -264,6 +265,14 @@ export function providerToAiSdkConfig(actualProvider: Provider, model: Model): A
} }
} }
// Apply developer-to-system role conversion for providers that don't support developer role
// bug: https://github.com/vercel/ai/issues/10982
// fixPR: https://github.com/vercel/ai/pull/11127
// TODO: but the PR don't backport to v5, the code will be removed when upgrading to v6
if (!isSupportDeveloperRoleProvider(actualProvider) || !isOpenAIReasoningModel(model)) {
extraOptions.fetch = createDeveloperToSystemFetch(extraOptions.fetch)
}
if (hasProviderConfig(aiSdkProviderId) && aiSdkProviderId !== 'openai-compatible') { if (hasProviderConfig(aiSdkProviderId) && aiSdkProviderId !== 'openai-compatible') {
const options = ProviderConfigFactory.fromProvider(aiSdkProviderId, baseConfig, extraOptions) const options = ProviderConfigFactory.fromProvider(aiSdkProviderId, baseConfig, extraOptions)
return { return {
@ -302,6 +311,44 @@ export function isModernSdkSupported(provider: Provider): boolean {
return hasProviderConfig(aiSdkProviderId) return hasProviderConfig(aiSdkProviderId)
} }
/**
* Creates a custom fetch wrapper that converts 'developer' role to 'system' role in request body.
* This is needed for providers that don't support the 'developer' role (e.g., Azure DeepSeek R1).
*
* @param originalFetch - Optional original fetch function to wrap
* @returns A fetch function that transforms the request body
*/
function createDeveloperToSystemFetch(originalFetch?: typeof fetch): typeof fetch {
const baseFetch = originalFetch ?? fetch
return async (input: RequestInfo | URL, init?: RequestInit) => {
let options = init
if (options?.body && typeof options.body === 'string') {
try {
const body = JSON.parse(options.body)
if (body.messages && Array.isArray(body.messages)) {
let hasChanges = false
body.messages = body.messages.map((msg: { role: string }) => {
if (msg.role === 'developer') {
hasChanges = true
return { ...msg, role: 'system' }
}
return msg
})
if (hasChanges) {
options = {
...options,
body: JSON.stringify(body)
}
}
}
} catch {
// If parsing fails, just use original body
}
}
return baseFetch(input, options)
}
}
/** /**
* provider的配置, * provider的配置,
*/ */
@ -360,5 +407,6 @@ export async function prepareSpecialProviderConfig(
} }
} }
} }
return config return config
} }

View File

@ -118,6 +118,11 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
return { thinking: { type: 'disabled' } } return { thinking: { type: 'disabled' } }
} }
// Deepseek, default behavior is non-thinking
if (isDeepSeekHybridInferenceModel(model)) {
return {}
}
// GPT 5.1, GPT 5.2, or newer // GPT 5.1, GPT 5.2, or newer
if (isSupportNoneReasoningEffortModel(model)) { if (isSupportNoneReasoningEffortModel(model)) {
return { return {

View File

@ -580,7 +580,7 @@ const RichEditor = ({
<GripVertical /> <GripVertical />
</Tooltip> </Tooltip>
</DragHandle> </DragHandle>
<EditorContent style={{ height: '100%' }} editor={editor} /> <EditorContent style={{ minHeight: '100%' }} editor={editor} />
</StyledEditorContent> </StyledEditorContent>
</Scrollbar> </Scrollbar>
{enableContentSearch && ( {enableContentSearch && (

View File

@ -745,7 +745,7 @@ describe('getThinkModelType - Comprehensive Coverage', () => {
}) })
it('should return doubao_after_251015 for Doubao-Seed-1.8 models', () => { it('should return doubao_after_251015 for Doubao-Seed-1.8 models', () => {
expect(getThinkModelType(createModel({ id: 'doubao-seed-1-8-251215' }))).toBe('doubao_after_251015') expect(getThinkModelType(createModel({ id: 'doubao-seed-1-8-251228' }))).toBe('doubao_after_251015')
expect(getThinkModelType(createModel({ id: 'doubao-seed-1.8' }))).toBe('doubao_after_251015') expect(getThinkModelType(createModel({ id: 'doubao-seed-1.8' }))).toBe('doubao_after_251015')
}) })
@ -879,7 +879,7 @@ describe('getThinkModelType - Comprehensive Coverage', () => {
// auto > after_251015 > no_auto // auto > after_251015 > no_auto
expect(getThinkModelType(createModel({ id: 'doubao-seed-1.6' }))).toBe('doubao') expect(getThinkModelType(createModel({ id: 'doubao-seed-1.6' }))).toBe('doubao')
expect(getThinkModelType(createModel({ id: 'doubao-seed-1-6-251015' }))).toBe('doubao_after_251015') expect(getThinkModelType(createModel({ id: 'doubao-seed-1-6-251015' }))).toBe('doubao_after_251015')
expect(getThinkModelType(createModel({ id: 'doubao-seed-1-8-251215' }))).toBe('doubao_after_251015') expect(getThinkModelType(createModel({ id: 'doubao-seed-1-8-251228' }))).toBe('doubao_after_251015')
expect(getThinkModelType(createModel({ id: 'doubao-1.5-thinking-vision-pro' }))).toBe('doubao_no_auto') expect(getThinkModelType(createModel({ id: 'doubao-1.5-thinking-vision-pro' }))).toBe('doubao_no_auto')
}) })

View File

@ -771,7 +771,7 @@ export const SYSTEM_MODELS: Record<SystemProviderId | 'defaultModel', Model[]> =
], ],
doubao: [ doubao: [
{ {
id: 'doubao-seed-1-8-251215', id: 'doubao-seed-1-8-251228',
provider: 'doubao', provider: 'doubao',
name: 'Doubao-Seed-1.8', name: 'Doubao-Seed-1.8',
group: 'Doubao-Seed-1.8' group: 'Doubao-Seed-1.8'

View File

@ -0,0 +1,174 @@
import { generateMcpToolFunctionName } from '@shared/mcp'
export interface ToolInfo {
name: string
serverName?: string
description?: string
}
/**
* Hub Mode System Prompt - For native MCP tool calling
* Used when model supports native function calling via MCP protocol
*/
const HUB_MODE_SYSTEM_PROMPT_BASE = `
## Hub MCP Tools Code Execution Mode
You can discover and call MCP tools through the hub server using **ONLY two meta-tools**: **search** and **exec**.
### IMPORTANT: You can ONLY call these two tools directly
| Tool | Purpose |
|------|---------|
| \`search\` | Discover available tools and their signatures |
| \`exec\` | Execute JavaScript code that calls the discovered tools |
**All other tools (listed in "Discoverable Tools" below) can ONLY be called from INSIDE \`exec\` code.**
You CANNOT call them directly as tool calls. They are async functions available within the \`exec\` runtime.
### Critical Rules (Read First)
1. **ONLY \`search\` and \`exec\` are callable as tools.** All other tools must be used inside \`exec\` code.
2. You MUST explicitly \`return\` the final value from your \`exec\` code. If you do not return a value, the result will be \`undefined\`.
3. All MCP tools inside \`exec\` are async functions. Always call them as \`await ToolName(params)\`.
4. Use the exact function names and parameter shapes returned by \`search\`.
5. You CANNOT call \`search\` or \`exec\` from inside \`exec\` code—use them only as direct tool calls.
6. \`console.log\` output is NOT the result. Logs are separate; the final answer must come from \`return\`.
### Workflow
1. Call \`search\` with relevant keywords to discover tools.
2. Read the returned JavaScript function declarations and JSDoc to understand names and parameters.
3. Call \`exec\` with JavaScript code that uses the discovered tools and ends with an explicit \`return\`.
4. Use the \`exec\` result as your answer.
### What \`search\` Does
- Input: keyword string (comma-separated for OR-matching), plus optional \`limit\`.
- Output: JavaScript async function declarations with JSDoc showing exact function names, parameters, and return types.
### What \`exec\` Does
- Runs JavaScript code in an isolated async context (wrapped as \`(async () => { your code })())\`.
- All discovered tools are exposed as async functions: \`await ToolName(params)\`.
- Available helpers:
- \`parallel(...promises)\`\`Promise.all(promises)\`
- \`settle(...promises)\`\`Promise.allSettled(promises)\`
- \`console.log/info/warn/error/debug\`
- Returns JSON with: \`result\` (your returned value), \`logs\` (optional), \`error\` (optional), \`isError\` (optional).
### Example: Single Tool Call
\`\`\`javascript
// Step 1: search({ query: "browser,fetch" })
// Step 2: exec with:
const page = await CherryBrowser_fetch({ url: "https://example.com" })
return page
\`\`\`
### Example: Multiple Tools with Parallel
\`\`\`javascript
const [forecast, time] = await parallel(
Weather_getForecast({ city: "Paris" }),
Time_getLocalTime({ city: "Paris" })
)
return { city: "Paris", forecast, time }
\`\`\`
### Example: Handle Partial Failures with Settle
\`\`\`javascript
const results = await settle(
Weather_getForecast({ city: "Paris" }),
Weather_getForecast({ city: "Tokyo" })
)
const successful = results.filter(r => r.status === "fulfilled").map(r => r.value)
return { results, successful }
\`\`\`
### Example: Error Handling
\`\`\`javascript
try {
const user = await User_lookup({ email: "user@example.com" })
return { found: true, user }
} catch (error) {
return { found: false, error: String(error) }
}
\`\`\`
### Common Mistakes to Avoid
**Forgetting to return** (result will be \`undefined\`):
\`\`\`javascript
const data = await SomeTool({ id: "123" })
// Missing return!
\`\`\`
**Always return**:
\`\`\`javascript
const data = await SomeTool({ id: "123" })
return data
\`\`\`
**Only logging, not returning**:
\`\`\`javascript
const data = await SomeTool({ id: "123" })
console.log(data) // Logs are NOT the result!
\`\`\`
**Missing await**:
\`\`\`javascript
const data = SomeTool({ id: "123" }) // Returns Promise, not value!
return data
\`\`\`
**Awaiting before parallel**:
\`\`\`javascript
await parallel(await ToolA(), await ToolB()) // Wrong: runs sequentially
\`\`\`
**Pass promises directly to parallel**:
\`\`\`javascript
await parallel(ToolA(), ToolB()) // Correct: runs in parallel
\`\`\`
### Best Practices
- Always call \`search\` first to discover tools and confirm signatures.
- Always use an explicit \`return\` at the end of \`exec\` code.
- Use \`parallel\` for independent operations that can run at the same time.
- Use \`settle\` when some calls may fail but you still want partial results.
- Prefer a single \`exec\` call for multi-step flows.
- Treat \`console.*\` as debugging only, never as the primary result.
`
function buildToolsSection(tools: ToolInfo[]): string {
const existingNames = new Set<string>()
return tools
.map((t) => {
const functionName = generateMcpToolFunctionName(t.serverName, t.name, existingNames)
const desc = t.description || ''
const normalizedDesc = desc.replace(/\s+/g, ' ').trim()
const truncatedDesc = normalizedDesc.length > 50 ? `${normalizedDesc.slice(0, 50)}...` : normalizedDesc
return `- ${functionName}: ${truncatedDesc}`
})
.join('\n')
}
export function getHubModeSystemPrompt(tools: ToolInfo[] = []): string {
if (tools.length === 0) {
return ''
}
const toolsSection = buildToolsSection(tools)
return `${HUB_MODE_SYSTEM_PROMPT_BASE}
## Discoverable Tools (ONLY usable inside \`exec\` code, NOT as direct tool calls)
The following tools are available inside \`exec\`. Use \`search\` to get their full signatures.
Do NOT call these directlywrap them in \`exec\` code.
${toolsSection}
`
}

View File

@ -332,7 +332,8 @@ const builtInMcpDescriptionKeyMap: Record<BuiltinMCPServerName, string> = {
[BuiltinMCPServerNames.python]: 'settings.mcp.builtinServersDescriptions.python', [BuiltinMCPServerNames.python]: 'settings.mcp.builtinServersDescriptions.python',
[BuiltinMCPServerNames.didiMCP]: 'settings.mcp.builtinServersDescriptions.didi_mcp', [BuiltinMCPServerNames.didiMCP]: 'settings.mcp.builtinServersDescriptions.didi_mcp',
[BuiltinMCPServerNames.browser]: 'settings.mcp.builtinServersDescriptions.browser', [BuiltinMCPServerNames.browser]: 'settings.mcp.builtinServersDescriptions.browser',
[BuiltinMCPServerNames.nowledgeMem]: 'settings.mcp.builtinServersDescriptions.nowledge_mem' [BuiltinMCPServerNames.nowledgeMem]: 'settings.mcp.builtinServersDescriptions.nowledge_mem',
[BuiltinMCPServerNames.hub]: 'settings.mcp.builtinServersDescriptions.hub'
} as const } as const
export const getBuiltInMcpServerDescriptionLabel = (key: string): string => { export const getBuiltInMcpServerDescriptionLabel = (key: string): string => {

View File

@ -544,6 +544,20 @@
"description": "Default enabled MCP servers", "description": "Default enabled MCP servers",
"enableFirst": "Enable this server in MCP settings first", "enableFirst": "Enable this server in MCP settings first",
"label": "MCP Servers", "label": "MCP Servers",
"mode": {
"auto": {
"description": "AI discovers and uses tools automatically",
"label": "Auto"
},
"disabled": {
"description": "No MCP tools",
"label": "Disabled"
},
"manual": {
"description": "Select specific MCP servers",
"label": "Manual"
}
},
"noServersAvailable": "No MCP servers available. Add servers in settings", "noServersAvailable": "No MCP servers available. Add servers in settings",
"title": "MCP Settings" "title": "MCP Settings"
}, },

View File

@ -544,6 +544,20 @@
"description": "默认启用的 MCP 服务器", "description": "默认启用的 MCP 服务器",
"enableFirst": "请先在 MCP 设置中启用此服务器", "enableFirst": "请先在 MCP 设置中启用此服务器",
"label": "MCP 服务器", "label": "MCP 服务器",
"mode": {
"auto": {
"description": "AI 自动发现和使用工具",
"label": "自动"
},
"disabled": {
"description": "不使用 MCP 工具",
"label": "禁用"
},
"manual": {
"description": "选择特定的 MCP 服务器",
"label": "手动"
}
},
"noServersAvailable": "无可用 MCP 服务器。请在设置中添加服务器", "noServersAvailable": "无可用 MCP 服务器。请在设置中添加服务器",
"title": "MCP 服务器" "title": "MCP 服务器"
}, },

View File

@ -544,6 +544,20 @@
"description": "預設啟用的 MCP 伺服器", "description": "預設啟用的 MCP 伺服器",
"enableFirst": "請先在 MCP 設定中啟用此伺服器", "enableFirst": "請先在 MCP 設定中啟用此伺服器",
"label": "MCP 伺服器", "label": "MCP 伺服器",
"mode": {
"auto": {
"description": "AI 自動發現和使用工具",
"label": "自動"
},
"disabled": {
"description": "不使用 MCP 工具",
"label": "停用"
},
"manual": {
"description": "選擇特定的 MCP 伺服器",
"label": "手動"
}
},
"noServersAvailable": "無可用 MCP 伺服器。請在設定中新增伺服器", "noServersAvailable": "無可用 MCP 伺服器。請在設定中新增伺服器",
"title": "MCP 設定" "title": "MCP 設定"
}, },

View File

@ -544,6 +544,20 @@
"description": "Standardmäßig aktivierte MCP-Server", "description": "Standardmäßig aktivierte MCP-Server",
"enableFirst": "Bitte aktivieren Sie diesen Server zuerst in den MCP-Einstellungen", "enableFirst": "Bitte aktivieren Sie diesen Server zuerst in den MCP-Einstellungen",
"label": "MCP-Server", "label": "MCP-Server",
"mode": {
"auto": {
"description": "KI entdeckt und nutzt Werkzeuge automatisch",
"label": "Auto"
},
"disabled": {
"description": "Keine MCP-Tools",
"label": "Deaktiviert"
},
"manual": {
"description": "Wählen Sie spezifische MCP-Server",
"label": "Handbuch"
}
},
"noServersAvailable": "Keine MCP-Server verfügbar. Bitte fügen Sie Server in den Einstellungen hinzu", "noServersAvailable": "Keine MCP-Server verfügbar. Bitte fügen Sie Server in den Einstellungen hinzu",
"title": "MCP-Server" "title": "MCP-Server"
}, },
@ -1297,6 +1311,7 @@
"backup": { "backup": {
"file_format": "Backup-Dateiformat fehlerhaft" "file_format": "Backup-Dateiformat fehlerhaft"
}, },
"base64DataTruncated": "Base64-Bilddaten abgeschnitten, Größe",
"boundary": { "boundary": {
"default": { "default": {
"devtools": "Debug-Panel öffnen", "devtools": "Debug-Panel öffnen",
@ -1377,6 +1392,8 @@
"text": "Text", "text": "Text",
"toolInput": "Tool-Eingabe", "toolInput": "Tool-Eingabe",
"toolName": "Tool-Name", "toolName": "Tool-Name",
"truncated": "Daten wurden gekürzt, Originalgröße",
"truncatedBadge": "Abgeschnitten",
"unknown": "Unbekannter Fehler", "unknown": "Unbekannter Fehler",
"usage": "Nutzung", "usage": "Nutzung",
"user_message_not_found": "Ursprüngliche Benutzernachricht nicht gefunden", "user_message_not_found": "Ursprüngliche Benutzernachricht nicht gefunden",

View File

@ -544,6 +544,20 @@
"description": "Διακομιστής MCP που είναι ενεργοποιημένος εξ ορισμού", "description": "Διακομιστής MCP που είναι ενεργοποιημένος εξ ορισμού",
"enableFirst": "Πρώτα ενεργοποιήστε αυτόν τον διακομιστή στις ρυθμίσεις MCP", "enableFirst": "Πρώτα ενεργοποιήστε αυτόν τον διακομιστή στις ρυθμίσεις MCP",
"label": "Διακομιστής MCP", "label": "Διακομιστής MCP",
"mode": {
"auto": {
"description": "Η τεχνητή νοημοσύνη ανακαλύπτει και χρησιμοποιεί εργαλεία αυτόματα",
"label": "Αυτόματο"
},
"disabled": {
"description": "Χωρίς εργαλεία MCP",
"label": "Ανάπηρος"
},
"manual": {
"description": "Επιλέξτε συγκεκριμένους διακομιστές MCP",
"label": "Εγχειρίδιο"
}
},
"noServersAvailable": "Δεν υπάρχουν διαθέσιμοι διακομιστές MCP. Προσθέστε ένα διακομιστή στις ρυθμίσεις", "noServersAvailable": "Δεν υπάρχουν διαθέσιμοι διακομιστές MCP. Προσθέστε ένα διακομιστή στις ρυθμίσεις",
"title": "Ρυθμίσεις MCP" "title": "Ρυθμίσεις MCP"
}, },
@ -1297,6 +1311,7 @@
"backup": { "backup": {
"file_format": "Λάθος μορφή αρχείου που επιστρέφεται" "file_format": "Λάθος μορφή αρχείου που επιστρέφεται"
}, },
"base64DataTruncated": "Τα δεδομένα εικόνας Base64 έχουν περικοπεί, μέγεθος",
"boundary": { "boundary": {
"default": { "default": {
"devtools": "Άνοιγμα πίνακα αποσφαλμάτωσης", "devtools": "Άνοιγμα πίνακα αποσφαλμάτωσης",
@ -1377,6 +1392,8 @@
"text": "κείμενο", "text": "κείμενο",
"toolInput": "εισαγωγή εργαλείου", "toolInput": "εισαγωγή εργαλείου",
"toolName": "Όνομα εργαλείου", "toolName": "Όνομα εργαλείου",
"truncated": "Δεδομένα περικόπηκαν, αρχικό μέγεθος",
"truncatedBadge": "Αποκομμένο",
"unknown": "Άγνωστο σφάλμα", "unknown": "Άγνωστο σφάλμα",
"usage": "δοσολογία", "usage": "δοσολογία",
"user_message_not_found": "Αδυναμία εύρεσης της αρχικής μηνύματος χρήστη", "user_message_not_found": "Αδυναμία εύρεσης της αρχικής μηνύματος χρήστη",

View File

@ -544,6 +544,20 @@
"description": "Servidor MCP habilitado por defecto", "description": "Servidor MCP habilitado por defecto",
"enableFirst": "Habilite este servidor en la configuración de MCP primero", "enableFirst": "Habilite este servidor en la configuración de MCP primero",
"label": "Servidor MCP", "label": "Servidor MCP",
"mode": {
"auto": {
"description": "La IA descubre y utiliza herramientas automáticamente",
"label": "Auto"
},
"disabled": {
"description": "Sin herramientas MCP",
"label": "Discapacitado"
},
"manual": {
"description": "Seleccionar servidores MCP específicos",
"label": "Manual"
}
},
"noServersAvailable": "No hay servidores MCP disponibles. Agregue un servidor en la configuración", "noServersAvailable": "No hay servidores MCP disponibles. Agregue un servidor en la configuración",
"title": "Configuración MCP" "title": "Configuración MCP"
}, },
@ -1297,6 +1311,7 @@
"backup": { "backup": {
"file_format": "Formato de archivo de copia de seguridad incorrecto" "file_format": "Formato de archivo de copia de seguridad incorrecto"
}, },
"base64DataTruncated": "Datos de imagen Base64 truncados, tamaño",
"boundary": { "boundary": {
"default": { "default": {
"devtools": "Abrir el panel de depuración", "devtools": "Abrir el panel de depuración",
@ -1377,6 +1392,8 @@
"text": "Texto", "text": "Texto",
"toolInput": "Herramienta de entrada", "toolInput": "Herramienta de entrada",
"toolName": "Nombre de la herramienta", "toolName": "Nombre de la herramienta",
"truncated": "Datos truncados, tamaño original",
"truncatedBadge": "Truncado",
"unknown": "Error desconocido", "unknown": "Error desconocido",
"usage": "Cantidad de uso", "usage": "Cantidad de uso",
"user_message_not_found": "No se pudo encontrar el mensaje original del usuario", "user_message_not_found": "No se pudo encontrar el mensaje original del usuario",

View File

@ -544,6 +544,20 @@
"description": "Serveur MCP activé par défaut", "description": "Serveur MCP activé par défaut",
"enableFirst": "Veuillez d'abord activer ce serveur dans les paramètres MCP", "enableFirst": "Veuillez d'abord activer ce serveur dans les paramètres MCP",
"label": "Serveur MCP", "label": "Serveur MCP",
"mode": {
"auto": {
"description": "L'IA découvre et utilise des outils automatiquement",
"label": "Auto"
},
"disabled": {
"description": "Aucun outil MCP",
"label": "Désactivé"
},
"manual": {
"description": "Sélectionner des serveurs MCP spécifiques",
"label": "Manuel"
}
},
"noServersAvailable": "Aucun serveur MCP disponible. Veuillez ajouter un serveur dans les paramètres", "noServersAvailable": "Aucun serveur MCP disponible. Veuillez ajouter un serveur dans les paramètres",
"title": "Paramètres MCP" "title": "Paramètres MCP"
}, },
@ -1297,6 +1311,7 @@
"backup": { "backup": {
"file_format": "Le format du fichier de sauvegarde est incorrect" "file_format": "Le format du fichier de sauvegarde est incorrect"
}, },
"base64DataTruncated": "Données d'image Base64 tronquées, taille",
"boundary": { "boundary": {
"default": { "default": {
"devtools": "Ouvrir le panneau de débogage", "devtools": "Ouvrir le panneau de débogage",
@ -1377,6 +1392,8 @@
"text": "texte", "text": "texte",
"toolInput": "entrée de l'outil", "toolInput": "entrée de l'outil",
"toolName": "Nom de l'outil", "toolName": "Nom de l'outil",
"truncated": "Données tronquées, taille d'origine",
"truncatedBadge": "Tronqué",
"unknown": "Неизвестная ошибка", "unknown": "Неизвестная ошибка",
"usage": "Quantité", "usage": "Quantité",
"user_message_not_found": "Impossible de trouver le message d'utilisateur original", "user_message_not_found": "Impossible de trouver le message d'utilisateur original",

View File

@ -544,6 +544,20 @@
"description": "デフォルトで有効な MCP サーバー", "description": "デフォルトで有効な MCP サーバー",
"enableFirst": "まず MCP 設定でこのサーバーを有効にしてください", "enableFirst": "まず MCP 設定でこのサーバーを有効にしてください",
"label": "MCP サーバー", "label": "MCP サーバー",
"mode": {
"auto": {
"description": "AIはツールを自動的に発見し、使用する",
"label": "オート"
},
"disabled": {
"description": "MCPツールなし",
"label": "無効"
},
"manual": {
"description": "特定のMCPサーバーを選択",
"label": "マニュアル"
}
},
"noServersAvailable": "利用可能な MCP サーバーがありません。設定でサーバーを追加してください", "noServersAvailable": "利用可能な MCP サーバーがありません。設定でサーバーを追加してください",
"title": "MCP 設定" "title": "MCP 設定"
}, },
@ -1297,6 +1311,7 @@
"backup": { "backup": {
"file_format": "バックアップファイルの形式エラー" "file_format": "バックアップファイルの形式エラー"
}, },
"base64DataTruncated": "Base64画像データが切り捨てられています、サイズ",
"boundary": { "boundary": {
"default": { "default": {
"devtools": "デバッグパネルを開く", "devtools": "デバッグパネルを開く",
@ -1377,6 +1392,8 @@
"text": "テキスト", "text": "テキスト",
"toolInput": "ツール入力", "toolInput": "ツール入力",
"toolName": "ツール名", "toolName": "ツール名",
"truncated": "データが切り捨てられました、元のサイズ",
"truncatedBadge": "切り捨て",
"unknown": "不明なエラー", "unknown": "不明なエラー",
"usage": "用量", "usage": "用量",
"user_message_not_found": "元のユーザーメッセージを見つけることができませんでした", "user_message_not_found": "元のユーザーメッセージを見つけることができませんでした",

View File

@ -544,6 +544,20 @@
"description": "Servidor MCP ativado por padrão", "description": "Servidor MCP ativado por padrão",
"enableFirst": "Por favor, ative este servidor nas configurações do MCP primeiro", "enableFirst": "Por favor, ative este servidor nas configurações do MCP primeiro",
"label": "Servidor MCP", "label": "Servidor MCP",
"mode": {
"auto": {
"description": "IA descobre e usa ferramentas automaticamente",
"label": "Auto"
},
"disabled": {
"description": "Sem ferramentas MCP",
"label": "Desativado"
},
"manual": {
"description": "Selecione servidores MCP específicos",
"label": "Manual"
}
},
"noServersAvailable": "Nenhum servidor MCP disponível. Adicione um servidor nas configurações", "noServersAvailable": "Nenhum servidor MCP disponível. Adicione um servidor nas configurações",
"title": "Configurações do MCP" "title": "Configurações do MCP"
}, },
@ -1297,6 +1311,7 @@
"backup": { "backup": {
"file_format": "Formato do arquivo de backup está incorreto" "file_format": "Formato do arquivo de backup está incorreto"
}, },
"base64DataTruncated": "Dados da imagem em Base64 truncados, tamanho",
"boundary": { "boundary": {
"default": { "default": {
"devtools": "Abrir o painel de depuração", "devtools": "Abrir o painel de depuração",
@ -1377,6 +1392,8 @@
"text": "texto", "text": "texto",
"toolInput": "ferramenta de entrada", "toolInput": "ferramenta de entrada",
"toolName": "Nome da ferramenta", "toolName": "Nome da ferramenta",
"truncated": "Dados truncados, tamanho original",
"truncatedBadge": "Truncado",
"unknown": "Erro desconhecido", "unknown": "Erro desconhecido",
"usage": "dosagem", "usage": "dosagem",
"user_message_not_found": "Não foi possível encontrar a mensagem original do usuário", "user_message_not_found": "Não foi possível encontrar a mensagem original do usuário",

View File

@ -544,6 +544,20 @@
"description": "Servere MCP activate implicit", "description": "Servere MCP activate implicit",
"enableFirst": "Activează mai întâi acest server în setările MCP", "enableFirst": "Activează mai întâi acest server în setările MCP",
"label": "Servere MCP", "label": "Servere MCP",
"mode": {
"auto": {
"description": "AI descoperă și folosește instrumente automat",
"label": "Auto"
},
"disabled": {
"description": "Niciun instrument MCP",
"label": "Dezactivat"
},
"manual": {
"description": "Selectați servere MCP specifice",
"label": "Manual"
}
},
"noServersAvailable": "Nu există servere MCP disponibile. Adaugă servere în setări", "noServersAvailable": "Nu există servere MCP disponibile. Adaugă servere în setări",
"title": "Setări MCP" "title": "Setări MCP"
}, },
@ -1297,6 +1311,7 @@
"backup": { "backup": {
"file_format": "Eroare format fișier backup" "file_format": "Eroare format fișier backup"
}, },
"base64DataTruncated": "Datele imagine Base64 sunt trunchiate, dimensiunea",
"boundary": { "boundary": {
"default": { "default": {
"devtools": "Deschide panoul de depanare", "devtools": "Deschide panoul de depanare",
@ -1377,6 +1392,8 @@
"text": "Text", "text": "Text",
"toolInput": "Intrare instrument", "toolInput": "Intrare instrument",
"toolName": "Nume instrument", "toolName": "Nume instrument",
"truncated": "Date trunchiate, dimensiunea originală",
"truncatedBadge": "Trunchiat",
"unknown": "Eroare necunoscută", "unknown": "Eroare necunoscută",
"usage": "Utilizare", "usage": "Utilizare",
"user_message_not_found": "Nu se poate găsi mesajul original al utilizatorului pentru a retrimite", "user_message_not_found": "Nu se poate găsi mesajul original al utilizatorului pentru a retrimite",

View File

@ -544,6 +544,20 @@
"description": "Серверы MCP, включенные по умолчанию", "description": "Серверы MCP, включенные по умолчанию",
"enableFirst": "Сначала включите этот сервер в настройках MCP", "enableFirst": "Сначала включите этот сервер в настройках MCP",
"label": "Серверы MCP", "label": "Серверы MCP",
"mode": {
"auto": {
"description": "ИИ самостоятельно обнаруживает и использует инструменты",
"label": "Авто"
},
"disabled": {
"description": "Нет инструментов MCP",
"label": "Отключено"
},
"manual": {
"description": "Выберите конкретные MCP-серверы",
"label": "Руководство"
}
},
"noServersAvailable": "Нет доступных серверов MCP. Добавьте серверы в настройках", "noServersAvailable": "Нет доступных серверов MCP. Добавьте серверы в настройках",
"title": "Настройки MCP" "title": "Настройки MCP"
}, },
@ -1297,6 +1311,7 @@
"backup": { "backup": {
"file_format": "Ошибка формата файла резервной копии" "file_format": "Ошибка формата файла резервной копии"
}, },
"base64DataTruncated": "Данные изображения в формате Base64 усечены, размер",
"boundary": { "boundary": {
"default": { "default": {
"devtools": "Открыть панель отладки", "devtools": "Открыть панель отладки",
@ -1377,6 +1392,8 @@
"text": "текст", "text": "текст",
"toolInput": "ввод инструмента", "toolInput": "ввод инструмента",
"toolName": "имя инструмента", "toolName": "имя инструмента",
"truncated": "Данные усечены, исходный размер",
"truncatedBadge": "Усечённый",
"unknown": "Неизвестная ошибка", "unknown": "Неизвестная ошибка",
"usage": "Дозировка", "usage": "Дозировка",
"user_message_not_found": "Не удалось найти исходное сообщение пользователя", "user_message_not_found": "Не удалось найти исходное сообщение пользователя",

View File

@ -9,12 +9,13 @@ import { useTimer } from '@renderer/hooks/useTimer'
import type { ToolQuickPanelApi } from '@renderer/pages/home/Inputbar/types' import type { ToolQuickPanelApi } from '@renderer/pages/home/Inputbar/types'
import { getProviderByModel } from '@renderer/services/AssistantService' import { getProviderByModel } from '@renderer/services/AssistantService'
import { EventEmitter } from '@renderer/services/EventService' import { EventEmitter } from '@renderer/services/EventService'
import type { MCPPrompt, MCPResource, MCPServer } from '@renderer/types' import type { McpMode, MCPPrompt, MCPResource, MCPServer } from '@renderer/types'
import { getEffectiveMcpMode } from '@renderer/types'
import { isToolUseModeFunction } from '@renderer/utils/assistant' import { isToolUseModeFunction } from '@renderer/utils/assistant'
import { isGeminiWebSearchProvider, isSupportUrlContextProvider } from '@renderer/utils/provider' import { isGeminiWebSearchProvider, isSupportUrlContextProvider } from '@renderer/utils/provider'
import { useNavigate } from '@tanstack/react-router' import { useNavigate } from '@tanstack/react-router'
import { Form, Input } from 'antd' import { Form, Input } from 'antd'
import { CircleX, Hammer, Plus } from 'lucide-react' import { CircleX, Hammer, Plus, Sparkles } from 'lucide-react'
import type { FC } from 'react' import type { FC } from 'react'
import React, { useCallback, useEffect, useMemo, useRef, useState } from 'react' import React, { useCallback, useEffect, useMemo, useRef, useState } from 'react'
import { useTranslation } from 'react-i18next' import { useTranslation } from 'react-i18next'
@ -26,7 +27,6 @@ interface Props {
resizeTextArea: () => void resizeTextArea: () => void
} }
// 添加类型定义
interface PromptArgument { interface PromptArgument {
name: string name: string
description?: string description?: string
@ -45,24 +45,19 @@ interface ResourceData {
uri?: string uri?: string
} }
// 提取到组件外的工具函数
const extractPromptContent = (response: any): string | null => { const extractPromptContent = (response: any): string | null => {
// Handle string response (backward compatibility)
if (typeof response === 'string') { if (typeof response === 'string') {
return response return response
} }
// Handle GetMCPPromptResponse format
if (response && Array.isArray(response.messages)) { if (response && Array.isArray(response.messages)) {
let formattedContent = '' let formattedContent = ''
for (const message of response.messages) { for (const message of response.messages) {
if (!message.content) continue if (!message.content) continue
// Add role prefix if available
const rolePrefix = message.role ? `**${message.role.charAt(0).toUpperCase() + message.role.slice(1)}:** ` : '' const rolePrefix = message.role ? `**${message.role.charAt(0).toUpperCase() + message.role.slice(1)}:** ` : ''
// Process different content types
switch (message.content.type) { switch (message.content.type) {
case 'text': case 'text':
formattedContent += `${rolePrefix}${message.content.text}\n\n` formattedContent += `${rolePrefix}${message.content.text}\n\n`
@ -99,7 +94,6 @@ const extractPromptContent = (response: any): string | null => {
return formattedContent.trim() return formattedContent.trim()
} }
// Fallback handling for single message format
if (response && response.messages && response.messages.length > 0) { if (response && response.messages && response.messages.length > 0) {
const message = response.messages[0] const message = response.messages[0]
if (message.content && message.content.text) { if (message.content && message.content.text) {
@ -122,7 +116,6 @@ const MCPToolsButton: FC<Props> = ({ quickPanel, setInputValue, resizeTextArea,
const model = assistant.model const model = assistant.model
const { setTimeoutTimer } = useTimer() const { setTimeoutTimer } = useTimer()
// 使用 useRef 存储不需要触发重渲染的值
const isMountedRef = useRef(true) const isMountedRef = useRef(true)
useEffect(() => { useEffect(() => {
@ -131,11 +124,30 @@ const MCPToolsButton: FC<Props> = ({ quickPanel, setInputValue, resizeTextArea,
} }
}, []) }, [])
const currentMode = useMemo(() => getEffectiveMcpMode(assistant), [assistant])
const mcpServers = useMemo(() => assistant.mcpServers || [], [assistant.mcpServers]) const mcpServers = useMemo(() => assistant.mcpServers || [], [assistant.mcpServers])
const assistantMcpServers = useMemo( const assistantMcpServers = useMemo(
() => activedMcpServers.filter((server) => mcpServers.some((s) => s.id === server.id)), () => activedMcpServers.filter((server) => mcpServers.some((s) => s.id === server.id)),
[activedMcpServers, mcpServers] [activedMcpServers, mcpServers]
) )
const handleModeChange = useCallback(
(mode: McpMode) => {
setTimeoutTimer(
'updateMcpMode',
() => {
updateAssistant({
...assistant,
mcpMode: mode
})
},
200
)
},
[assistant, setTimeoutTimer, updateAssistant]
)
const handleMcpServerSelect = useCallback( const handleMcpServerSelect = useCallback(
(server: MCPServer) => { (server: MCPServer) => {
const update = { ...assistant } const update = { ...assistant }
@ -145,29 +157,24 @@ const MCPToolsButton: FC<Props> = ({ quickPanel, setInputValue, resizeTextArea,
update.mcpServers = [...mcpServers, server] update.mcpServers = [...mcpServers, server]
} }
// only for gemini
if (update.mcpServers.length > 0 && isGeminiModel(model) && isToolUseModeFunction(assistant)) { if (update.mcpServers.length > 0 && isGeminiModel(model) && isToolUseModeFunction(assistant)) {
const provider = getProviderByModel(model) const provider = getProviderByModel(model)
if (isSupportUrlContextProvider(provider) && assistant.enableUrlContext) { if (isSupportUrlContextProvider(provider) && assistant.enableUrlContext) {
window.toast.warning(t('chat.mcp.warning.url_context')) window.toast.warning(t('chat.mcp.warning.url_context'))
update.enableUrlContext = false update.enableUrlContext = false
} }
if ( if (isGeminiWebSearchProvider(provider) && assistant.enableWebSearch) {
// 非官方 API (openrouter etc.) 可能支持同时启用内置搜索和函数调用
// 这里先假设 gemini type 和 vertexai type 不支持
isGeminiWebSearchProvider(provider) &&
assistant.enableWebSearch
) {
window.toast.warning(t('chat.mcp.warning.gemini_web_search')) window.toast.warning(t('chat.mcp.warning.gemini_web_search'))
update.enableWebSearch = false update.enableWebSearch = false
} }
} }
update.mcpMode = 'manual'
updateAssistant(update) updateAssistant(update)
}, },
[assistant, assistantMcpServers, mcpServers, model, t, updateAssistant] [assistant, assistantMcpServers, mcpServers, model, t, updateAssistant]
) )
// 使用 useRef 缓存事件处理函数
const handleMcpServerSelectRef = useRef(handleMcpServerSelect) const handleMcpServerSelectRef = useRef(handleMcpServerSelect)
handleMcpServerSelectRef.current = handleMcpServerSelect handleMcpServerSelectRef.current = handleMcpServerSelect
@ -177,23 +184,7 @@ const MCPToolsButton: FC<Props> = ({ quickPanel, setInputValue, resizeTextArea,
return () => EventEmitter.off('mcp-server-select', handler) return () => EventEmitter.off('mcp-server-select', handler)
}, []) }, [])
const updateMcpEnabled = useCallback( const manualModeMenuItems = useMemo(() => {
(enabled: boolean) => {
setTimeoutTimer(
'updateMcpEnabled',
() => {
updateAssistant({
...assistant,
mcpServers: enabled ? assistant.mcpServers || [] : []
})
},
200
)
},
[assistant, setTimeoutTimer, updateAssistant]
)
const menuItems = useMemo(() => {
const newList: QuickPanelListItem[] = activedMcpServers.map((server) => ({ const newList: QuickPanelListItem[] = activedMcpServers.map((server) => ({
label: server.name, label: server.name,
description: server.description || server.baseUrl, description: server.description || server.baseUrl,
@ -208,33 +199,70 @@ const MCPToolsButton: FC<Props> = ({ quickPanel, setInputValue, resizeTextArea,
action: () => navigate({ to: '/settings/mcp' }) action: () => navigate({ to: '/settings/mcp' })
}) })
newList.unshift({
label: t('settings.input.clear.all'),
description: t('settings.mcp.disable.description'),
icon: <CircleX />,
isSelected: false,
action: () => {
updateMcpEnabled(false)
quickPanelHook.close()
}
})
return newList return newList
}, [activedMcpServers, t, assistantMcpServers, navigate, updateMcpEnabled, quickPanelHook]) }, [activedMcpServers, t, assistantMcpServers, navigate])
const openQuickPanel = useCallback(() => { const openManualModePanel = useCallback(() => {
quickPanelHook.open({ quickPanelHook.open({
title: t('settings.mcp.title'), title: t('assistants.settings.mcp.mode.manual.label'),
list: menuItems, list: manualModeMenuItems,
symbol: QuickPanelReservedSymbol.Mcp, symbol: QuickPanelReservedSymbol.Mcp,
multiple: true, multiple: true,
afterAction({ item }) { afterAction({ item }) {
item.isSelected = !item.isSelected item.isSelected = !item.isSelected
} }
}) })
}, [manualModeMenuItems, quickPanelHook, t])
const menuItems = useMemo(() => {
const newList: QuickPanelListItem[] = []
newList.push({
label: t('assistants.settings.mcp.mode.disabled.label'),
description: t('assistants.settings.mcp.mode.disabled.description'),
icon: <CircleX />,
isSelected: currentMode === 'disabled',
action: () => {
handleModeChange('disabled')
quickPanelHook.close()
}
})
newList.push({
label: t('assistants.settings.mcp.mode.auto.label'),
description: t('assistants.settings.mcp.mode.auto.description'),
icon: <Sparkles />,
isSelected: currentMode === 'auto',
action: () => {
handleModeChange('auto')
quickPanelHook.close()
}
})
newList.push({
label: t('assistants.settings.mcp.mode.manual.label'),
description: t('assistants.settings.mcp.mode.manual.description'),
icon: <Hammer />,
isSelected: currentMode === 'manual',
isMenu: true,
action: () => {
handleModeChange('manual')
openManualModePanel()
}
})
return newList
}, [t, currentMode, handleModeChange, quickPanelHook, openManualModePanel])
const openQuickPanel = useCallback(() => {
quickPanelHook.open({
title: t('settings.mcp.title'),
list: menuItems,
symbol: QuickPanelReservedSymbol.Mcp,
multiple: false
})
}, [menuItems, quickPanelHook, t]) }, [menuItems, quickPanelHook, t])
// 使用 useCallback 优化 insertPromptIntoTextArea
const insertPromptIntoTextArea = useCallback( const insertPromptIntoTextArea = useCallback(
(promptText: string) => { (promptText: string) => {
setInputValue((prev) => { setInputValue((prev) => {
@ -246,7 +274,6 @@ const MCPToolsButton: FC<Props> = ({ quickPanel, setInputValue, resizeTextArea,
const selectionEndPosition = cursorPosition + promptText.length const selectionEndPosition = cursorPosition + promptText.length
const newText = prev.slice(0, cursorPosition) + promptText + prev.slice(cursorPosition) const newText = prev.slice(0, cursorPosition) + promptText + prev.slice(cursorPosition)
// 使用 requestAnimationFrame 优化 DOM 操作
requestAnimationFrame(() => { requestAnimationFrame(() => {
textArea.focus() textArea.focus()
textArea.setSelectionRange(selectionStart, selectionEndPosition) textArea.setSelectionRange(selectionStart, selectionEndPosition)
@ -425,7 +452,6 @@ const MCPToolsButton: FC<Props> = ({ quickPanel, setInputValue, resizeTextArea,
[activedMcpServers, t, insertPromptIntoTextArea] [activedMcpServers, t, insertPromptIntoTextArea]
) )
// 优化 resourcesList 的状态更新
const [resourcesList, setResourcesList] = useState<QuickPanelListItem[]>([]) const [resourcesList, setResourcesList] = useState<QuickPanelListItem[]>([])
useEffect(() => { useEffect(() => {
@ -515,17 +541,29 @@ const MCPToolsButton: FC<Props> = ({ quickPanel, setInputValue, resizeTextArea,
} }
}, [openPromptList, openQuickPanel, openResourcesList, quickPanel, t]) }, [openPromptList, openQuickPanel, openResourcesList, quickPanel, t])
const isActive = currentMode !== 'disabled'
const getButtonIcon = () => {
switch (currentMode) {
case 'auto':
return <Sparkles size={18} />
case 'disabled':
case 'manual':
default:
return <Hammer size={18} />
}
}
return ( return (
<Tooltip content={t('settings.mcp.title')} closeDelay={0}> <Tooltip content={t('settings.mcp.title')} closeDelay={0}>
<ActionIconButton <ActionIconButton
onClick={handleOpenQuickPanel} onClick={handleOpenQuickPanel}
active={assistant.mcpServers && assistant.mcpServers.length > 0} active={isActive}
aria-label={t('settings.mcp.title')} aria-label={t('settings.mcp.title')}
icon={<Hammer size={18} />} icon={getButtonIcon()}
/> />
</Tooltip> </Tooltip>
) )
} }
// 使用 React.memo 包装组件
export default React.memo(MCPToolsButton) export default React.memo(MCPToolsButton)

View File

@ -2,6 +2,7 @@ import { Tooltip } from '@cherrystudio/ui'
import { ActionIconButton } from '@renderer/components/Buttons' import { ActionIconButton } from '@renderer/components/Buttons'
import { useAssistant } from '@renderer/hooks/useAssistant' import { useAssistant } from '@renderer/hooks/useAssistant'
import { useTimer } from '@renderer/hooks/useTimer' import { useTimer } from '@renderer/hooks/useTimer'
import { getEffectiveMcpMode } from '@renderer/types'
import { isToolUseModeFunction } from '@renderer/utils/assistant' import { isToolUseModeFunction } from '@renderer/utils/assistant'
import { Link } from 'lucide-react' import { Link } from 'lucide-react'
import type { FC } from 'react' import type { FC } from 'react'
@ -30,8 +31,7 @@ const UrlContextButton: FC<Props> = ({ assistantId }) => {
() => { () => {
const update = { ...assistant } const update = { ...assistant }
if ( if (
assistant.mcpServers && getEffectiveMcpMode(assistant) !== 'disabled' &&
assistant.mcpServers.length > 0 &&
urlContentNewState === true && urlContentNewState === true &&
isToolUseModeFunction(assistant) isToolUseModeFunction(assistant)
) { ) {

View File

@ -16,7 +16,7 @@ import { useWebSearchProviders } from '@renderer/hooks/useWebSearchProviders'
import type { ToolQuickPanelController, ToolRenderContext } from '@renderer/pages/home/Inputbar/types' import type { ToolQuickPanelController, ToolRenderContext } from '@renderer/pages/home/Inputbar/types'
import { getProviderByModel } from '@renderer/services/AssistantService' import { getProviderByModel } from '@renderer/services/AssistantService'
import WebSearchService from '@renderer/services/WebSearchService' import WebSearchService from '@renderer/services/WebSearchService'
import type { WebSearchProvider, WebSearchProviderId } from '@renderer/types' import { getEffectiveMcpMode, type WebSearchProvider, type WebSearchProviderId } from '@renderer/types'
import { hasObjectKey } from '@renderer/utils' import { hasObjectKey } from '@renderer/utils'
import { isToolUseModeFunction } from '@renderer/utils/assistant' import { isToolUseModeFunction } from '@renderer/utils/assistant'
import { isPromptToolUse } from '@renderer/utils/mcp-tools' import { isPromptToolUse } from '@renderer/utils/mcp-tools'
@ -108,8 +108,7 @@ export const useWebSearchPanelController = (assistantId: string, quickPanelContr
isGeminiModel(model) && isGeminiModel(model) &&
isToolUseModeFunction(assistant) && isToolUseModeFunction(assistant) &&
update.enableWebSearch && update.enableWebSearch &&
assistant.mcpServers && getEffectiveMcpMode(assistant) !== 'disabled'
assistant.mcpServers.length > 0
) { ) {
update.enableWebSearch = false update.enableWebSearch = false
window.toast.warning(t('chat.mcp.warning.gemini_web_search')) window.toast.warning(t('chat.mcp.warning.gemini_web_search'))

View File

@ -6,6 +6,9 @@ import type { TokenFluxModel } from '../config/tokenFluxConfig'
const logger = loggerService.withContext('TokenFluxService') const logger = loggerService.withContext('TokenFluxService')
// 图片 API 使用固定的基础地址,独立于 provider.apiHost后者是 OpenAI 兼容的聊天 API 地址)
const TOKENFLUX_IMAGE_API_HOST = 'https://api.tokenflux.ai'
export interface TokenFluxGenerationRequest { export interface TokenFluxGenerationRequest {
model: string model: string
input: { input: {
@ -66,7 +69,7 @@ export class TokenFluxService {
return cachedModels return cachedModels
} }
const response = await fetch(`${this.apiHost}/v1/images/models`, { const response = await fetch(`${TOKENFLUX_IMAGE_API_HOST}/v1/images/models`, {
headers: { headers: {
Authorization: `Bearer ${this.apiKey}` Authorization: `Bearer ${this.apiKey}`
} }
@ -88,7 +91,7 @@ export class TokenFluxService {
* Create a new image generation request * Create a new image generation request
*/ */
async createGeneration(request: TokenFluxGenerationRequest, signal?: AbortSignal): Promise<string> { async createGeneration(request: TokenFluxGenerationRequest, signal?: AbortSignal): Promise<string> {
const response = await fetch(`${this.apiHost}/v1/images/generations`, { const response = await fetch(`${TOKENFLUX_IMAGE_API_HOST}/v1/images/generations`, {
method: 'POST', method: 'POST',
headers: this.getHeaders(), headers: this.getHeaders(),
body: JSON.stringify(request), body: JSON.stringify(request),
@ -108,7 +111,7 @@ export class TokenFluxService {
* Get the status and result of a generation * Get the status and result of a generation
*/ */
async getGenerationResult(generationId: string): Promise<TokenFluxGenerationResponse['data']> { async getGenerationResult(generationId: string): Promise<TokenFluxGenerationResponse['data']> {
const response = await fetch(`${this.apiHost}/v1/images/generations/${generationId}`, { const response = await fetch(`${TOKENFLUX_IMAGE_API_HOST}/v1/images/generations/${generationId}`, {
headers: { headers: {
Authorization: `Bearer ${this.apiKey}` Authorization: `Bearer ${this.apiKey}`
} }

View File

@ -1,10 +1,8 @@
import { Box } from '@cherrystudio/ui' import { Box, InfoTooltip, Switch, Tooltip } from '@cherrystudio/ui'
import { Switch } from '@cherrystudio/ui'
import { InfoTooltip } from '@cherrystudio/ui'
import { Tooltip } from '@cherrystudio/ui'
import { useMCPServers } from '@renderer/hooks/useMCPServers' import { useMCPServers } from '@renderer/hooks/useMCPServers'
import type { Assistant, AssistantSettings } from '@renderer/types' import type { Assistant, AssistantSettings, McpMode } from '@renderer/types'
import { Empty } from 'antd' import { getEffectiveMcpMode } from '@renderer/types'
import { Empty, Radio } from 'antd'
import { useTranslation } from 'react-i18next' import { useTranslation } from 'react-i18next'
import styled from 'styled-components' import styled from 'styled-components'
@ -29,22 +27,26 @@ const AssistantMCPSettings: React.FC<Props> = ({ assistant, updateAssistant }) =
const { t } = useTranslation() const { t } = useTranslation()
const { mcpServers: allMcpServers } = useMCPServers() const { mcpServers: allMcpServers } = useMCPServers()
const currentMode = getEffectiveMcpMode(assistant)
const handleModeChange = (mode: McpMode) => {
updateAssistant({ ...assistant, mcpMode: mode })
}
const onUpdate = (ids: string[]) => { const onUpdate = (ids: string[]) => {
const mcpServers = ids const mcpServers = ids
.map((id) => allMcpServers.find((server) => server.id === id)) .map((id) => allMcpServers.find((server) => server.id === id))
.filter((server): server is MCPServer => server !== undefined && server.isActive) .filter((server): server is MCPServer => server !== undefined && server.isActive)
updateAssistant({ ...assistant, mcpServers }) updateAssistant({ ...assistant, mcpServers, mcpMode: 'manual' })
} }
const handleServerToggle = (serverId: string) => { const handleServerToggle = (serverId: string) => {
const currentServerIds = assistant.mcpServers?.map((server) => server.id) || [] const currentServerIds = assistant.mcpServers?.map((server) => server.id) || []
if (currentServerIds.includes(serverId)) { if (currentServerIds.includes(serverId)) {
// Remove server if it's already enabled
onUpdate(currentServerIds.filter((id) => id !== serverId)) onUpdate(currentServerIds.filter((id) => id !== serverId))
} else { } else {
// Add server if it's not enabled
onUpdate([...currentServerIds, serverId]) onUpdate([...currentServerIds, serverId])
} }
} }
@ -61,48 +63,76 @@ const AssistantMCPSettings: React.FC<Props> = ({ assistant, updateAssistant }) =
iconProps={{ className: 'ml-1.5 text-xs text-color-text-2 cursor-help' }} iconProps={{ className: 'ml-1.5 text-xs text-color-text-2 cursor-help' }}
/> />
</Box> </Box>
{allMcpServers.length > 0 && (
<EnabledCount>
{enabledCount} / {allMcpServers.length} {t('settings.mcp.active')}
</EnabledCount>
)}
</HeaderContainer> </HeaderContainer>
{allMcpServers.length > 0 ? ( <ModeSelector>
<ServerList> <Radio.Group value={currentMode} onChange={(e) => handleModeChange(e.target.value)}>
{allMcpServers.map((server) => { <Radio.Button value="disabled">
const isEnabled = assistant.mcpServers?.some((s) => s.id === server.id) || false <ModeOption>
<ModeLabel>{t('assistants.settings.mcp.mode.disabled.label')}</ModeLabel>
<ModeDescription>{t('assistants.settings.mcp.mode.disabled.description')}</ModeDescription>
</ModeOption>
</Radio.Button>
<Radio.Button value="auto">
<ModeOption>
<ModeLabel>{t('assistants.settings.mcp.mode.auto.label')}</ModeLabel>
<ModeDescription>{t('assistants.settings.mcp.mode.auto.description')}</ModeDescription>
</ModeOption>
</Radio.Button>
<Radio.Button value="manual">
<ModeOption>
<ModeLabel>{t('assistants.settings.mcp.mode.manual.label')}</ModeLabel>
<ModeDescription>{t('assistants.settings.mcp.mode.manual.description')}</ModeDescription>
</ModeOption>
</Radio.Button>
</Radio.Group>
</ModeSelector>
return ( {currentMode === 'manual' && (
<ServerItem key={server.id} isEnabled={isEnabled}> <>
<ServerInfo> {allMcpServers.length > 0 && (
<ServerName>{server.name}</ServerName> <EnabledCount>
{server.description && <ServerDescription>{server.description}</ServerDescription>} {enabledCount} / {allMcpServers.length} {t('settings.mcp.active')}
{server.baseUrl && <ServerUrl>{server.baseUrl}</ServerUrl>} </EnabledCount>
</ServerInfo> )}
<Tooltip
content={ {allMcpServers.length > 0 ? (
!server.isActive <ServerList>
? t('assistants.settings.mcp.enableFirst', 'Enable this server in MCP settings first') {allMcpServers.map((server) => {
: undefined const isEnabled = assistant.mcpServers?.some((s) => s.id === server.id) || false
}>
<Switch return (
checked={isEnabled} <ServerItem key={server.id} isEnabled={isEnabled}>
disabled={!server.isActive} <ServerInfo>
onCheckedChange={() => handleServerToggle(server.id)} <ServerName>{server.name}</ServerName>
/> {server.description && <ServerDescription>{server.description}</ServerDescription>}
</Tooltip> {server.baseUrl && <ServerUrl>{server.baseUrl}</ServerUrl>}
</ServerItem> </ServerInfo>
) <Tooltip
})} content={
</ServerList> !server.isActive
) : ( ? t('assistants.settings.mcp.enableFirst', 'Enable this server in MCP settings first')
<EmptyContainer> : undefined
<Empty }>
description={t('assistants.settings.mcp.noServersAvailable', 'No MCP servers available')} <Switch
image={Empty.PRESENTED_IMAGE_SIMPLE} checked={isEnabled}
/> disabled={!server.isActive}
</EmptyContainer> onCheckedChange={() => handleServerToggle(server.id)}
/>
</Tooltip>
</ServerItem>
)
})}
</ServerList>
) : (
<EmptyContainer>
<Empty
description={t('assistants.settings.mcp.noServersAvailable', 'No MCP servers available')}
image={Empty.PRESENTED_IMAGE_SIMPLE}
/>
</EmptyContainer>
)}
</>
)} )}
</Container> </Container>
) )
@ -112,7 +142,7 @@ const Container = styled.div`
display: flex; display: flex;
flex: 1; flex: 1;
flex-direction: column; flex-direction: column;
overflow: hidden; min-height: 0;
` `
const HeaderContainer = styled.div` const HeaderContainer = styled.div`
@ -122,9 +152,54 @@ const HeaderContainer = styled.div`
margin-bottom: 16px; margin-bottom: 16px;
` `
const ModeSelector = styled.div`
margin-bottom: 16px;
.ant-radio-group {
display: flex;
flex-direction: column;
gap: 8px;
}
.ant-radio-button-wrapper {
height: auto;
padding: 12px 16px;
border-radius: 8px;
border: 1px solid var(--color-border);
&:not(:first-child)::before {
display: none;
}
&:first-child {
border-radius: 8px;
}
&:last-child {
border-radius: 8px;
}
}
`
const ModeOption = styled.div`
display: flex;
flex-direction: column;
gap: 2px;
`
const ModeLabel = styled.span`
font-weight: 600;
`
const ModeDescription = styled.span`
font-size: 12px;
color: var(--color-text-2);
`
const EnabledCount = styled.span` const EnabledCount = styled.span`
font-size: 12px; font-size: 12px;
color: var(--color-text-2); color: var(--color-text-2);
margin-bottom: 8px;
` `
const EmptyContainer = styled.div` const EmptyContainer = styled.div`

View File

@ -5,7 +5,7 @@ import OpenAIAlert from '@renderer/components/Alert/OpenAIAlert'
import { LoadingIcon } from '@renderer/components/Icons' import { LoadingIcon } from '@renderer/components/Icons'
import { ApiKeyListPopup } from '@renderer/components/Popups/ApiKeyListPopup' import { ApiKeyListPopup } from '@renderer/components/Popups/ApiKeyListPopup'
import Selector from '@renderer/components/Selector' import Selector from '@renderer/components/Selector'
import { isEmbeddingModel, isRerankModel } from '@renderer/config/models' import { isRerankModel } from '@renderer/config/models'
import { PROVIDER_URLS } from '@renderer/config/providers' import { PROVIDER_URLS } from '@renderer/config/providers'
import { useTheme } from '@renderer/context/ThemeProvider' import { useTheme } from '@renderer/context/ThemeProvider'
import { useAllProviders, useProvider, useProviders } from '@renderer/hooks/useProvider' import { useAllProviders, useProvider, useProviders } from '@renderer/hooks/useProvider'
@ -129,17 +129,20 @@ const ProviderSetting: FC<Props> = ({ providerId }) => {
checking: false checking: false
}) })
const updateWebSearchProviderKey = ({ apiKey }: { apiKey: string }) => { const updateWebSearchProviderKey = useCallback(
provider.id === 'zhipu' && dispatch(updateWebSearchProvider({ id: 'zhipu', apiKey: apiKey.split(',')[0] })) ({ apiKey }: { apiKey: string }) => {
} provider.id === 'zhipu' && dispatch(updateWebSearchProvider({ id: 'zhipu', apiKey: apiKey.split(',')[0] }))
},
[dispatch, provider.id]
)
// eslint-disable-next-line react-hooks/exhaustive-deps const debouncedUpdateApiKey = useMemo(
const debouncedUpdateApiKey = useCallback( () =>
debounce((value) => { debounce((value: string) => {
updateProvider({ apiKey: formatApiKeys(value) }) updateProvider({ apiKey: formatApiKeys(value) })
updateWebSearchProviderKey({ apiKey: formatApiKeys(value) }) updateWebSearchProviderKey({ apiKey: formatApiKeys(value) })
}, 150), }, 150),
[] [updateProvider, updateWebSearchProviderKey]
) )
// 同步 provider.apiKey 到 localApiKey // 同步 provider.apiKey 到 localApiKey
@ -225,7 +228,7 @@ const ProviderSetting: FC<Props> = ({ providerId }) => {
return return
} }
const modelsToCheck = models.filter((model) => !isEmbeddingModel(model) && !isRerankModel(model)) const modelsToCheck = models.filter((model) => !isRerankModel(model))
if (isEmpty(modelsToCheck)) { if (isEmpty(modelsToCheck)) {
window.toast.error({ window.toast.error({

View File

@ -9,8 +9,9 @@ import { buildStreamTextParams } from '@renderer/aiCore/prepareParams'
import { isDedicatedImageGenerationModel, isEmbeddingModel, isFunctionCallingModel } from '@renderer/config/models' import { isDedicatedImageGenerationModel, isEmbeddingModel, isFunctionCallingModel } from '@renderer/config/models'
import i18n from '@renderer/i18n' import i18n from '@renderer/i18n'
import store from '@renderer/store' import store from '@renderer/store'
import { hubMCPServer } from '@renderer/store/mcp'
import type { Assistant, MCPServer, MCPTool, Model, Provider } from '@renderer/types' import type { Assistant, MCPServer, MCPTool, Model, Provider } from '@renderer/types'
import { type FetchChatCompletionParams, isSystemProvider } from '@renderer/types' import { type FetchChatCompletionParams, getEffectiveMcpMode, isSystemProvider } from '@renderer/types'
import type { StreamTextParams } from '@renderer/types/aiCoreTypes' import type { StreamTextParams } from '@renderer/types/aiCoreTypes'
import { type Chunk, ChunkType } from '@renderer/types/chunk' import { type Chunk, ChunkType } from '@renderer/types/chunk'
import type { Message, ResponseError } from '@renderer/types/newMessage' import type { Message, ResponseError } from '@renderer/types/newMessage'
@ -52,14 +53,60 @@ import type { StreamProcessorCallbacks } from './StreamProcessingService'
const logger = loggerService.withContext('ApiService') const logger = loggerService.withContext('ApiService')
export async function fetchMcpTools(assistant: Assistant) { /**
// Get MCP tools (Fix duplicate declaration) * Get the MCP servers to use based on the assistant's MCP mode.
let mcpTools: MCPTool[] = [] // Initialize as empty array */
export function getMcpServersForAssistant(assistant: Assistant): MCPServer[] {
const mode = getEffectiveMcpMode(assistant)
const allMcpServers = store.getState().mcp.servers || [] const allMcpServers = store.getState().mcp.servers || []
const activedMcpServers = allMcpServers.filter((s) => s.isActive) const activedMcpServers = allMcpServers.filter((s) => s.isActive)
const assistantMcpServers = assistant.mcpServers || []
const enabledMCPs = activedMcpServers.filter((server) => assistantMcpServers.some((s) => s.id === server.id)) switch (mode) {
case 'disabled':
return []
case 'auto':
return [hubMCPServer]
case 'manual': {
const assistantMcpServers = assistant.mcpServers || []
return activedMcpServers.filter((server) => assistantMcpServers.some((s) => s.id === server.id))
}
default:
return []
}
}
export async function fetchAllActiveServerTools(): Promise<MCPTool[]> {
const allMcpServers = store.getState().mcp.servers || []
const activedMcpServers = allMcpServers.filter((s) => s.isActive)
if (activedMcpServers.length === 0) {
return []
}
try {
const toolPromises = activedMcpServers.map(async (mcpServer: MCPServer) => {
try {
const tools = await window.api.mcp.listTools(mcpServer)
return tools.filter((tool: any) => !mcpServer.disabledTools?.includes(tool.name))
} catch (error) {
logger.error(`Error fetching tools from MCP server ${mcpServer.name}:`, error as Error)
return []
}
})
const results = await Promise.allSettled(toolPromises)
return results
.filter((result): result is PromiseFulfilledResult<MCPTool[]> => result.status === 'fulfilled')
.map((result) => result.value)
.flat()
} catch (toolError) {
logger.error('Error fetching all active server tools:', toolError as Error)
return []
}
}
export async function fetchMcpTools(assistant: Assistant) {
let mcpTools: MCPTool[] = []
const enabledMCPs = getMcpServersForAssistant(assistant)
if (enabledMCPs && enabledMCPs.length > 0) { if (enabledMCPs && enabledMCPs.length > 0) {
try { try {
@ -199,6 +246,7 @@ export async function fetchChatCompletion({
const usePromptToolUse = const usePromptToolUse =
isPromptToolUse(assistant) || (isToolUseModeFunction(assistant) && !isFunctionCallingModel(assistant.model)) isPromptToolUse(assistant) || (isToolUseModeFunction(assistant) && !isFunctionCallingModel(assistant.model))
const mcpMode = getEffectiveMcpMode(assistant)
const middlewareConfig: AiSdkMiddlewareConfig = { const middlewareConfig: AiSdkMiddlewareConfig = {
streamOutput: assistant.settings?.streamOutput ?? true, streamOutput: assistant.settings?.streamOutput ?? true,
onChunk: onChunkReceived, onChunk: onChunkReceived,
@ -211,6 +259,7 @@ export async function fetchChatCompletion({
enableWebSearch: capabilities.enableWebSearch, enableWebSearch: capabilities.enableWebSearch,
enableGenerateImage: capabilities.enableGenerateImage, enableGenerateImage: capabilities.enableGenerateImage,
enableUrlContext: capabilities.enableUrlContext, enableUrlContext: capabilities.enableUrlContext,
mcpMode,
mcpTools, mcpTools,
uiMessages, uiMessages,
knowledgeRecognition: assistant.knowledgeRecognition knowledgeRecognition: assistant.knowledgeRecognition
@ -602,6 +651,13 @@ export function checkApiProvider(provider: Provider): void {
} }
} }
/**
* Validates that a provider/model pair is working by sending a minimal request.
* @param provider - The provider configuration to test.
* @param model - The model to use for the validation request (chat or embeddings).
* @param timeout - Maximum time (ms) to wait for the request to complete. Defaults to 15000 ms.
* @throws {Error} If the request fails or times out, indicating the API is not usable.
*/
export async function checkApi(provider: Provider, model: Model, timeout = 15000): Promise<void> { export async function checkApi(provider: Provider, model: Model, timeout = 15000): Promise<void> {
checkApiProvider(provider) checkApiProvider(provider)
@ -612,7 +668,6 @@ export async function checkApi(provider: Provider, model: Model, timeout = 15000
assistant.prompt = 'test' // 避免部分 provider 空系统提示词会报错 assistant.prompt = 'test' // 避免部分 provider 空系统提示词会报错
if (isEmbeddingModel(model)) { if (isEmbeddingModel(model)) {
// race 超时 15s
logger.silly("it's a embedding model") logger.silly("it's a embedding model")
const timerPromise = new Promise((_, reject) => setTimeout(() => reject('Timeout'), timeout)) const timerPromise = new Promise((_, reject) => setTimeout(() => reject('Timeout'), timeout))
await Promise.race([ai.getEmbeddingDimensions(model), timerPromise]) await Promise.race([ai.getEmbeddingDimensions(model), timerPromise])

View File

@ -0,0 +1,54 @@
import { describe, expect, it } from 'vitest'
import type { Assistant, MCPServer } from '../../types'
import { getEffectiveMcpMode } from '../../types'
describe('getEffectiveMcpMode', () => {
it('should return mcpMode when explicitly set to auto', () => {
const assistant: Partial<Assistant> = { mcpMode: 'auto' }
expect(getEffectiveMcpMode(assistant as Assistant)).toBe('auto')
})
it('should return disabled when mcpMode is explicitly disabled', () => {
const assistant: Partial<Assistant> = { mcpMode: 'disabled' }
expect(getEffectiveMcpMode(assistant as Assistant)).toBe('disabled')
})
it('should return manual when mcpMode is explicitly manual', () => {
const assistant: Partial<Assistant> = { mcpMode: 'manual' }
expect(getEffectiveMcpMode(assistant as Assistant)).toBe('manual')
})
it('should return manual when no mcpMode but mcpServers has items (backward compatibility)', () => {
const assistant: Partial<Assistant> = {
mcpServers: [{ id: 'test', name: 'Test Server', isActive: true }] as MCPServer[]
}
expect(getEffectiveMcpMode(assistant as Assistant)).toBe('manual')
})
it('should return disabled when no mcpMode and no mcpServers (backward compatibility)', () => {
const assistant: Partial<Assistant> = {}
expect(getEffectiveMcpMode(assistant as Assistant)).toBe('disabled')
})
it('should return disabled when no mcpMode and empty mcpServers (backward compatibility)', () => {
const assistant: Partial<Assistant> = { mcpServers: [] }
expect(getEffectiveMcpMode(assistant as Assistant)).toBe('disabled')
})
it('should prioritize explicit mcpMode over mcpServers presence', () => {
const assistant: Partial<Assistant> = {
mcpMode: 'disabled',
mcpServers: [{ id: 'test', name: 'Test Server', isActive: true }] as MCPServer[]
}
expect(getEffectiveMcpMode(assistant as Assistant)).toBe('disabled')
})
it('should return auto when mcpMode is auto regardless of mcpServers', () => {
const assistant: Partial<Assistant> = {
mcpMode: 'auto',
mcpServers: [{ id: 'test', name: 'Test Server', isActive: true }] as MCPServer[]
}
expect(getEffectiveMcpMode(assistant as Assistant)).toBe('auto')
})
})

View File

@ -86,6 +86,28 @@ export { mcpSlice }
// Export the reducer as default export // Export the reducer as default export
export default mcpSlice.reducer export default mcpSlice.reducer
/**
* Hub MCP server for auto mode - aggregates all MCP servers for LLM code mode.
* This server is injected automatically when mcpMode === 'auto'.
*/
export const hubMCPServer: BuiltinMCPServer = {
id: 'hub',
name: BuiltinMCPServerNames.hub,
type: 'inMemory',
isActive: true,
provider: 'CherryAI',
installSource: 'builtin',
isTrusted: true
}
/**
* User-installable built-in MCP servers shown in the UI.
*
* Note: The `hub` server (@cherry/hub) is intentionally excluded because:
* - It's a meta-server that aggregates all other MCP servers
* - It's designed for LLM code mode, not direct user interaction
* - It should be auto-enabled internally when needed, not manually installed
*/
export const builtinMCPServers: BuiltinMCPServer[] = [ export const builtinMCPServers: BuiltinMCPServer[] = [
{ {
id: nanoid(), id: nanoid(),

View File

@ -871,6 +871,7 @@ const fetchAndProcessAssistantResponseImpl = async (
const streamProcessorCallbacks = createStreamProcessor(callbacks) const streamProcessorCallbacks = createStreamProcessor(callbacks)
const abortController = new AbortController() const abortController = new AbortController()
logger.silly('Add Abort Controller', { id: userMessageId })
addAbortController(userMessageId!, () => abortController.abort()) addAbortController(userMessageId!, () => abortController.abort())
await transformMessagesAndFetch( await transformMessagesAndFetch(

View File

@ -27,6 +27,8 @@ export * from './ocr'
export * from './plugin' export * from './plugin'
export * from './provider' export * from './provider'
export type McpMode = 'disabled' | 'auto' | 'manual'
export type Assistant = { export type Assistant = {
id: string id: string
name: string name: string
@ -47,6 +49,8 @@ export type Assistant = {
// enableUrlContext 是 Gemini/Anthropic 的特有功能 // enableUrlContext 是 Gemini/Anthropic 的特有功能
enableUrlContext?: boolean enableUrlContext?: boolean
enableGenerateImage?: boolean enableGenerateImage?: boolean
/** MCP mode: 'disabled' (no MCP), 'auto' (hub server only), 'manual' (user selects servers) */
mcpMode?: McpMode
mcpServers?: MCPServer[] mcpServers?: MCPServer[]
knowledgeRecognition?: 'off' | 'on' knowledgeRecognition?: 'off' | 'on'
regularPhrases?: QuickPhrase[] // Added for regular phrase regularPhrases?: QuickPhrase[] // Added for regular phrase
@ -57,6 +61,15 @@ export type Assistant = {
targetLanguage?: TranslateLanguage targetLanguage?: TranslateLanguage
} }
/**
* Get the effective MCP mode for an assistant with backward compatibility.
* Legacy assistants without mcpMode default based on mcpServers presence.
*/
export function getEffectiveMcpMode(assistant: Assistant): McpMode {
if (assistant.mcpMode) return assistant.mcpMode
return (assistant.mcpServers?.length ?? 0) > 0 ? 'manual' : 'disabled'
}
export type TranslateAssistant = Assistant & { export type TranslateAssistant = Assistant & {
model: Model model: Model
content: string content: string
@ -752,7 +765,8 @@ export const BuiltinMCPServerNames = {
python: '@cherry/python', python: '@cherry/python',
didiMCP: '@cherry/didi-mcp', didiMCP: '@cherry/didi-mcp',
browser: '@cherry/browser', browser: '@cherry/browser',
nowledgeMem: '@cherry/nowledge-mem' nowledgeMem: '@cherry/nowledge-mem',
hub: '@cherry/hub'
} as const } as const
export type BuiltinMCPServerName = (typeof BuiltinMCPServerNames)[keyof typeof BuiltinMCPServerNames] export type BuiltinMCPServerName = (typeof BuiltinMCPServerNames)[keyof typeof BuiltinMCPServerNames]

View File

@ -13,7 +13,7 @@ import { isFunctionCallingModel, isVisionModel } from '@renderer/config/models'
import i18n from '@renderer/i18n' import i18n from '@renderer/i18n'
import { currentSpan } from '@renderer/services/SpanManagerService' import { currentSpan } from '@renderer/services/SpanManagerService'
import store from '@renderer/store' import store from '@renderer/store'
import { addMCPServer } from '@renderer/store/mcp' import { addMCPServer, hubMCPServer } from '@renderer/store/mcp'
import type { import type {
Assistant, Assistant,
MCPCallToolResponse, MCPCallToolResponse,
@ -325,7 +325,16 @@ export function filterMCPTools(
export function getMcpServerByTool(tool: MCPTool) { export function getMcpServerByTool(tool: MCPTool) {
const servers = store.getState().mcp.servers const servers = store.getState().mcp.servers
return servers.find((s) => s.id === tool.serverId) const server = servers.find((s) => s.id === tool.serverId)
if (server) {
return server
}
// For hub server (auto mode), the server isn't in the store
// Return the hub server constant if the tool's serverId matches
if (tool.serverId === 'hub') {
return hubMCPServer
}
return undefined
} }
export function isToolAutoApproved(tool: MCPTool, server?: MCPServer): boolean { export function isToolAutoApproved(tool: MCPTool, server?: MCPServer): boolean {

View File

@ -16,7 +16,7 @@ Here are a few examples using notional tools:
--- ---
User: Generate an image of the oldest person in this document. User: Generate an image of the oldest person in this document.
Assistant: I can use the document_qa tool to find out who the oldest person is in the document. A: I can use the document_qa tool to find out who the oldest person is in the document.
<tool_use> <tool_use>
<name>document_qa</name> <name>document_qa</name>
<arguments>{"document": "document.pdf", "question": "Who is the oldest person mentioned?"}</arguments> <arguments>{"document": "document.pdf", "question": "Who is the oldest person mentioned?"}</arguments>
@ -27,7 +27,7 @@ User: <tool_use_result>
<result>John Doe, a 55 year old lumberjack living in Newfoundland.</result> <result>John Doe, a 55 year old lumberjack living in Newfoundland.</result>
</tool_use_result> </tool_use_result>
Assistant: I can use the image_generator tool to create a portrait of John Doe. A: I can use the image_generator tool to create a portrait of John Doe.
<tool_use> <tool_use>
<name>image_generator</name> <name>image_generator</name>
<arguments>{"prompt": "A portrait of John Doe, a 55-year-old man living in Canada."}</arguments> <arguments>{"prompt": "A portrait of John Doe, a 55-year-old man living in Canada."}</arguments>
@ -38,12 +38,12 @@ User: <tool_use_result>
<result>image.png</result> <result>image.png</result>
</tool_use_result> </tool_use_result>
Assistant: the image is generated as image.png A: the image is generated as image.png
--- ---
User: "What is the result of the following operation: 5 + 3 + 1294.678?" User: "What is the result of the following operation: 5 + 3 + 1294.678?"
Assistant: I can use the python_interpreter tool to calculate the result of the operation. A: I can use the python_interpreter tool to calculate the result of the operation.
<tool_use> <tool_use>
<name>python_interpreter</name> <name>python_interpreter</name>
<arguments>{"code": "5 + 3 + 1294.678"}</arguments> <arguments>{"code": "5 + 3 + 1294.678"}</arguments>
@ -54,12 +54,12 @@ User: <tool_use_result>
<result>1302.678</result> <result>1302.678</result>
</tool_use_result> </tool_use_result>
Assistant: The result of the operation is 1302.678. A: The result of the operation is 1302.678.
--- ---
User: "Which city has the highest population , Guangzhou or Shanghai?" User: "Which city has the highest population , Guangzhou or Shanghai?"
Assistant: I can use the search tool to find the population of Guangzhou. A: I can use the search tool to find the population of Guangzhou.
<tool_use> <tool_use>
<name>search</name> <name>search</name>
<arguments>{"query": "Population Guangzhou"}</arguments> <arguments>{"query": "Population Guangzhou"}</arguments>
@ -70,7 +70,7 @@ User: <tool_use_result>
<result>Guangzhou has a population of 15 million inhabitants as of 2021.</result> <result>Guangzhou has a population of 15 million inhabitants as of 2021.</result>
</tool_use_result> </tool_use_result>
Assistant: I can use the search tool to find the population of Shanghai. A: I can use the search tool to find the population of Shanghai.
<tool_use> <tool_use>
<name>search</name> <name>search</name>
<arguments>{"query": "Population Shanghai"}</arguments> <arguments>{"query": "Population Shanghai"}</arguments>
@ -80,7 +80,8 @@ User: <tool_use_result>
<name>search</name> <name>search</name>
<result>26 million (2019)</result> <result>26 million (2019)</result>
</tool_use_result> </tool_use_result>
Assistant: The population of Shanghai is 26 million, while Guangzhou has a population of 15 million. Therefore, Shanghai has the highest population.
A: The population of Shanghai is 26 million, while Guangzhou has a population of 15 million. Therefore, Shanghai has the highest population.
` `
export const AvailableTools = (tools: MCPTool[]) => { export const AvailableTools = (tools: MCPTool[]) => {

View File

@ -12,6 +12,7 @@ import {
} from '@renderer/services/AssistantService' } from '@renderer/services/AssistantService'
import { pauseTrace } from '@renderer/services/SpanManagerService' import { pauseTrace } from '@renderer/services/SpanManagerService'
import type { Assistant, Topic } from '@renderer/types' import type { Assistant, Topic } from '@renderer/types'
import { AssistantMessageStatus } from '@renderer/types/newMessage'
import { abortCompletion } from '@renderer/utils/abortController' import { abortCompletion } from '@renderer/utils/abortController'
import type { SelectionActionItem } from '@shared/data/preference/preferenceTypes' import type { SelectionActionItem } from '@shared/data/preference/preferenceTypes'
import { ChevronDown } from 'lucide-react' import { ChevronDown } from 'lucide-react'
@ -34,8 +35,7 @@ const ActionGeneral: FC<Props> = React.memo(({ action, scrollToBottom }) => {
const [language] = usePreference('app.language') const [language] = usePreference('app.language')
const [error, setError] = useState<string | null>(null) const [error, setError] = useState<string | null>(null)
const [showOriginal, setShowOriginal] = useState(false) const [showOriginal, setShowOriginal] = useState(false)
const [isContented, setIsContented] = useState(false) const [status, setStatus] = useState<'preparing' | 'streaming' | 'finished'>('preparing')
const [isLoading, setIsLoading] = useState(true)
const [contentToCopy, setContentToCopy] = useState('') const [contentToCopy, setContentToCopy] = useState('')
const initialized = useRef(false) const initialized = useRef(false)
@ -96,19 +96,24 @@ const ActionGeneral: FC<Props> = React.memo(({ action, scrollToBottom }) => {
}, [action, language]) }, [action, language])
const fetchResult = useCallback(() => { const fetchResult = useCallback(() => {
if (!initialized.current) {
return
}
setStatus('preparing')
const setAskId = (id: string) => { const setAskId = (id: string) => {
askId.current = id askId.current = id
} }
const onStream = () => { const onStream = () => {
setIsContented(true) setStatus('streaming')
scrollToBottom?.() scrollToBottom?.()
} }
const onFinish = (content: string) => { const onFinish = (content: string) => {
setStatus('finished')
setContentToCopy(content) setContentToCopy(content)
setIsLoading(false)
} }
const onError = (error: Error) => { const onError = (error: Error) => {
setIsLoading(false) setStatus('finished')
setError(error.message) setError(error.message)
} }
@ -131,17 +136,40 @@ const ActionGeneral: FC<Props> = React.memo(({ action, scrollToBottom }) => {
const allMessages = useTopicMessages(topicRef.current?.id || '') const allMessages = useTopicMessages(topicRef.current?.id || '')
// Memoize the messages to prevent unnecessary re-renders const currentAssistantMessage = useMemo(() => {
const messageContent = useMemo(() => {
const assistantMessages = allMessages.filter((message) => message.role === 'assistant') const assistantMessages = allMessages.filter((message) => message.role === 'assistant')
const lastAssistantMessage = assistantMessages[assistantMessages.length - 1] if (assistantMessages.length === 0) {
return lastAssistantMessage ? <MessageContent key={lastAssistantMessage.id} message={lastAssistantMessage} /> : null return null
}
return assistantMessages[assistantMessages.length - 1]
}, [allMessages]) }, [allMessages])
useEffect(() => {
// Sync message status
switch (currentAssistantMessage?.status) {
case AssistantMessageStatus.PROCESSING:
case AssistantMessageStatus.PENDING:
case AssistantMessageStatus.SEARCHING:
setStatus('streaming')
break
case AssistantMessageStatus.PAUSED:
case AssistantMessageStatus.ERROR:
case AssistantMessageStatus.SUCCESS:
setStatus('finished')
break
case undefined:
break
default:
logger.warn('Unexpected assistant message status:', { status: currentAssistantMessage?.status })
}
}, [currentAssistantMessage?.status])
const isPreparing = status === 'preparing'
const isStreaming = status === 'streaming'
const handlePause = () => { const handlePause = () => {
if (askId.current) { if (askId.current) {
abortCompletion(askId.current) abortCompletion(askId.current)
setIsLoading(false)
} }
if (topicRef.current?.id) { if (topicRef.current?.id) {
pauseTrace(topicRef.current.id) pauseTrace(topicRef.current.id)
@ -150,7 +178,6 @@ const ActionGeneral: FC<Props> = React.memo(({ action, scrollToBottom }) => {
const handleRegenerate = () => { const handleRegenerate = () => {
setContentToCopy('') setContentToCopy('')
setIsLoading(true)
fetchResult() fetchResult()
} }
@ -178,13 +205,20 @@ const ActionGeneral: FC<Props> = React.memo(({ action, scrollToBottom }) => {
</OriginalContent> </OriginalContent>
)} )}
<Result> <Result>
{!isContented && isLoading && <LoadingOutlined style={{ fontSize: 16 }} spin />} {isPreparing && <LoadingOutlined style={{ fontSize: 16 }} spin />}
{messageContent} {!isPreparing && currentAssistantMessage && (
<MessageContent key={currentAssistantMessage.id} message={currentAssistantMessage} />
)}
</Result> </Result>
{error && <ErrorMsg>{error}</ErrorMsg>} {error && <ErrorMsg>{error}</ErrorMsg>}
</Container> </Container>
<FooterPadding /> <FooterPadding />
<WindowFooter loading={isLoading} onPause={handlePause} onRegenerate={handleRegenerate} content={contentToCopy} /> <WindowFooter
loading={isStreaming}
onPause={handlePause}
onRegenerate={handleRegenerate}
content={contentToCopy}
/>
</> </>
) )
}) })

View File

@ -10,7 +10,9 @@ import { useTopicMessages } from '@renderer/hooks/useMessageOperations'
import useTranslate from '@renderer/hooks/useTranslate' import useTranslate from '@renderer/hooks/useTranslate'
import MessageContent from '@renderer/pages/home/Messages/MessageContent' import MessageContent from '@renderer/pages/home/Messages/MessageContent'
import { getDefaultTopic, getDefaultTranslateAssistant } from '@renderer/services/AssistantService' import { getDefaultTopic, getDefaultTranslateAssistant } from '@renderer/services/AssistantService'
import { pauseTrace } from '@renderer/services/SpanManagerService'
import type { Assistant, Topic, TranslateLanguage, TranslateLanguageCode } from '@renderer/types' import type { Assistant, Topic, TranslateLanguage, TranslateLanguageCode } from '@renderer/types'
import { AssistantMessageStatus } from '@renderer/types/newMessage'
import { abortCompletion } from '@renderer/utils/abortController' import { abortCompletion } from '@renderer/utils/abortController'
import { detectLanguage } from '@renderer/utils/translate' import { detectLanguage } from '@renderer/utils/translate'
import { defaultLanguage } from '@shared/config/constant' import { defaultLanguage } from '@shared/config/constant'
@ -50,12 +52,11 @@ const ActionTranslate: FC<Props> = ({ action, scrollToBottom }) => {
const [error, setError] = useState('') const [error, setError] = useState('')
const [showOriginal, setShowOriginal] = useState(false) const [showOriginal, setShowOriginal] = useState(false)
const [isContented, setIsContented] = useState(false) const [status, setStatus] = useState<'preparing' | 'streaming' | 'finished'>('preparing')
const [isLoading, setIsLoading] = useState(true)
const [contentToCopy, setContentToCopy] = useState('') const [contentToCopy, setContentToCopy] = useState('')
const [initialized, setInitialized] = useState(false)
// Use useRef for values that shouldn't trigger re-renders // Use useRef for values that shouldn't trigger re-renders
const initialized = useRef(false)
const assistantRef = useRef<Assistant | null>(null) const assistantRef = useRef<Assistant | null>(null)
const topicRef = useRef<Topic | null>(null) const topicRef = useRef<Topic | null>(null)
const askId = useRef('') const askId = useRef('')
@ -87,7 +88,7 @@ const ActionTranslate: FC<Props> = ({ action, scrollToBottom }) => {
// Initialize values only once // Initialize values only once
const initialize = useCallback(async () => { const initialize = useCallback(async () => {
if (initialized.current) { if (initialized) {
logger.silly('[initialize] Already initialized.') logger.silly('[initialize] Already initialized.')
return return
} }
@ -108,6 +109,7 @@ const ActionTranslate: FC<Props> = ({ action, scrollToBottom }) => {
// Initialize language pair. // Initialize language pair.
// It will update targetLangRef, so we could get latest target language in the following code // It will update targetLangRef, so we could get latest target language in the following code
await updateLanguagePair() await updateLanguagePair()
logger.silly('[initialize] UpdateLanguagePair completed.')
// Initialize assistant // Initialize assistant
const currentAssistant = await getDefaultTranslateAssistant(targetLangRef.current, action.selectedText) const currentAssistant = await getDefaultTranslateAssistant(targetLangRef.current, action.selectedText)
@ -116,8 +118,8 @@ const ActionTranslate: FC<Props> = ({ action, scrollToBottom }) => {
// Initialize topic // Initialize topic
topicRef.current = getDefaultTopic(currentAssistant.id) topicRef.current = getDefaultTopic(currentAssistant.id)
initialized.current = true setInitialized(true)
}, [action.selectedText, isLanguagesLoaded, updateLanguagePair]) }, [action.selectedText, initialized, isLanguagesLoaded, updateLanguagePair])
// Try to initialize when: // Try to initialize when:
// 1. action.selectedText change (generally will not) // 1. action.selectedText change (generally will not)
@ -128,26 +130,24 @@ const ActionTranslate: FC<Props> = ({ action, scrollToBottom }) => {
}, [initialize]) }, [initialize])
const fetchResult = useCallback(async () => { const fetchResult = useCallback(async () => {
if (!assistantRef.current || !topicRef.current || !action.selectedText || !initialized.current) return if (!assistantRef.current || !topicRef.current || !action.selectedText || !initialized) return
const setAskId = (id: string) => { const setAskId = (id: string) => {
askId.current = id askId.current = id
} }
const onStream = () => { const onStream = () => {
setIsContented(true) setStatus('streaming')
scrollToBottom?.() scrollToBottom?.()
} }
const onFinish = (content: string) => { const onFinish = (content: string) => {
setStatus('finished')
setContentToCopy(content) setContentToCopy(content)
setIsLoading(false)
} }
const onError = (error: Error) => { const onError = (error: Error) => {
setIsLoading(false) setStatus('finished')
setError(error.message) setError(error.message)
} }
setIsLoading(true)
let sourceLanguageCode: TranslateLanguageCode let sourceLanguageCode: TranslateLanguageCode
try { try {
@ -176,7 +176,7 @@ const ActionTranslate: FC<Props> = ({ action, scrollToBottom }) => {
assistantRef.current = assistant assistantRef.current = assistant
logger.debug('process once') logger.debug('process once')
processMessages(assistant, topicRef.current, assistant.content, setAskId, onStream, onFinish, onError) processMessages(assistant, topicRef.current, assistant.content, setAskId, onStream, onFinish, onError)
}, [action, targetLanguage, alterLanguage, scrollToBottom]) }, [action, targetLanguage, alterLanguage, scrollToBottom, initialized])
useEffect(() => { useEffect(() => {
fetchResult() fetchResult()
@ -184,14 +184,39 @@ const ActionTranslate: FC<Props> = ({ action, scrollToBottom }) => {
const allMessages = useTopicMessages(topicRef.current?.id || '') const allMessages = useTopicMessages(topicRef.current?.id || '')
const messageContent = useMemo(() => { const currentAssistantMessage = useMemo(() => {
const assistantMessages = allMessages.filter((message) => message.role === 'assistant') const assistantMessages = allMessages.filter((message) => message.role === 'assistant')
const lastAssistantMessage = assistantMessages[assistantMessages.length - 1] if (assistantMessages.length === 0) {
return lastAssistantMessage ? <MessageContent key={lastAssistantMessage.id} message={lastAssistantMessage} /> : null return null
}
return assistantMessages[assistantMessages.length - 1]
}, [allMessages]) }, [allMessages])
useEffect(() => {
// Sync message status
switch (currentAssistantMessage?.status) {
case AssistantMessageStatus.PROCESSING:
case AssistantMessageStatus.PENDING:
case AssistantMessageStatus.SEARCHING:
setStatus('streaming')
break
case AssistantMessageStatus.PAUSED:
case AssistantMessageStatus.ERROR:
case AssistantMessageStatus.SUCCESS:
setStatus('finished')
break
case undefined:
break
default:
logger.warn('Unexpected assistant message status:', { status: currentAssistantMessage?.status })
}
}, [currentAssistantMessage?.status])
const isPreparing = status === 'preparing'
const isStreaming = status === 'streaming'
const handleChangeLanguage = (targetLanguage: TranslateLanguage, alterLanguage: TranslateLanguage) => { const handleChangeLanguage = (targetLanguage: TranslateLanguage, alterLanguage: TranslateLanguage) => {
if (!initialized.current) { if (!initialized) {
return return
} }
setTargetLanguage(targetLanguage) setTargetLanguage(targetLanguage)
@ -202,15 +227,18 @@ const ActionTranslate: FC<Props> = ({ action, scrollToBottom }) => {
} }
const handlePause = () => { const handlePause = () => {
// FIXME: It doesn't work because abort signal is not set.
logger.silly('Try to pause: ', { id: askId.current })
if (askId.current) { if (askId.current) {
abortCompletion(askId.current) abortCompletion(askId.current)
setIsLoading(false) }
if (topicRef.current?.id) {
pauseTrace(topicRef.current.id)
} }
} }
const handleRegenerate = () => { const handleRegenerate = () => {
setContentToCopy('') setContentToCopy('')
setIsLoading(true)
fetchResult() fetchResult()
} }
@ -230,7 +258,7 @@ const ActionTranslate: FC<Props> = ({ action, scrollToBottom }) => {
title={t('translate.target_language')} title={t('translate.target_language')}
optionFilterProp="label" optionFilterProp="label"
onChange={(value) => handleChangeLanguage(getLanguageByLangcode(value), alterLanguage)} onChange={(value) => handleChangeLanguage(getLanguageByLangcode(value), alterLanguage)}
disabled={isLoading} disabled={isStreaming}
/> />
</Tooltip> </Tooltip>
<ArrowRightFromLine size={16} color="var(--color-text-3)" style={{ margin: '0 2px' }} /> <ArrowRightFromLine size={16} color="var(--color-text-3)" style={{ margin: '0 2px' }} />
@ -242,7 +270,7 @@ const ActionTranslate: FC<Props> = ({ action, scrollToBottom }) => {
title={t('translate.alter_language')} title={t('translate.alter_language')}
optionFilterProp="label" optionFilterProp="label"
onChange={(value) => handleChangeLanguage(targetLanguage, getLanguageByLangcode(value))} onChange={(value) => handleChangeLanguage(targetLanguage, getLanguageByLangcode(value))}
disabled={isLoading} disabled={isStreaming}
/> />
</Tooltip> </Tooltip>
<Tooltip placement="bottom" content={t('selection.action.translate.smart_translate_tips')}> <Tooltip placement="bottom" content={t('selection.action.translate.smart_translate_tips')}>
@ -269,13 +297,20 @@ const ActionTranslate: FC<Props> = ({ action, scrollToBottom }) => {
</OriginalContent> </OriginalContent>
)} )}
<Result> <Result>
{!isContented && isLoading && <LoadingOutlined style={{ fontSize: 16 }} spin />} {isPreparing && <LoadingOutlined style={{ fontSize: 16 }} spin />}
{messageContent} {!isPreparing && currentAssistantMessage && (
<MessageContent key={currentAssistantMessage.id} message={currentAssistantMessage} />
)}
</Result> </Result>
{error && <ErrorMsg>{error}</ErrorMsg>} {error && <ErrorMsg>{error}</ErrorMsg>}
</Container> </Container>
<FooterPadding /> <FooterPadding />
<WindowFooter loading={isLoading} onPause={handlePause} onRegenerate={handleRegenerate} content={contentToCopy} /> <WindowFooter
loading={isStreaming}
onPause={handlePause}
onRegenerate={handleRegenerate}
content={contentToCopy}
/>
</> </>
) )
} }

View File

@ -44,59 +44,69 @@ vi.mock('@main/data/db/DbService', async () => {
}) })
// Mock electron modules that are commonly used in main process // Mock electron modules that are commonly used in main process
vi.mock('electron', () => ({ vi.mock('electron', () => {
app: { const mock = {
getPath: vi.fn((key: string) => { app: {
switch (key) { getPath: vi.fn((key: string) => {
case 'userData': switch (key) {
return '/mock/userData' case 'userData':
case 'temp': return '/mock/userData'
return '/mock/temp' case 'temp':
case 'logs': return '/mock/temp'
return '/mock/logs' case 'logs':
default: return '/mock/logs'
return '/mock/unknown' default:
return '/mock/unknown'
}
}),
getVersion: vi.fn(() => '1.0.0')
},
ipcMain: {
handle: vi.fn(),
on: vi.fn(),
once: vi.fn(),
removeHandler: vi.fn(),
removeAllListeners: vi.fn()
},
BrowserWindow: vi.fn(),
dialog: {
showErrorBox: vi.fn(),
showMessageBox: vi.fn(),
showOpenDialog: vi.fn(),
showSaveDialog: vi.fn()
},
shell: {
openExternal: vi.fn(),
showItemInFolder: vi.fn()
},
session: {
defaultSession: {
clearCache: vi.fn(),
clearStorageData: vi.fn()
} }
}), },
getVersion: vi.fn(() => '1.0.0') webContents: {
}, getAllWebContents: vi.fn(() => [])
ipcMain: { },
handle: vi.fn(), systemPreferences: {
on: vi.fn(), getMediaAccessStatus: vi.fn(),
once: vi.fn(), askForMediaAccess: vi.fn()
removeHandler: vi.fn(), },
removeAllListeners: vi.fn() nativeTheme: {
}, themeSource: 'system',
BrowserWindow: vi.fn(), shouldUseDarkColors: false,
dialog: { on: vi.fn(),
showErrorBox: vi.fn(), removeListener: vi.fn()
showMessageBox: vi.fn(), },
showOpenDialog: vi.fn(), screen: {
showSaveDialog: vi.fn() getPrimaryDisplay: vi.fn(),
}, getAllDisplays: vi.fn()
shell: { },
openExternal: vi.fn(), Notification: vi.fn()
showItemInFolder: vi.fn() }
},
session: { return { __esModule: true, ...mock, default: mock }
defaultSession: { })
clearCache: vi.fn(),
clearStorageData: vi.fn()
}
},
webContents: {
getAllWebContents: vi.fn(() => [])
},
systemPreferences: {
getMediaAccessStatus: vi.fn(),
askForMediaAccess: vi.fn()
},
screen: {
getPrimaryDisplay: vi.fn(),
getAllDisplays: vi.fn()
},
Notification: vi.fn()
}))
// Mock Winston for LoggerService dependencies // Mock Winston for LoggerService dependencies
vi.mock('winston', () => ({ vi.mock('winston', () => ({
@ -132,13 +142,17 @@ vi.mock('winston-daily-rotate-file', () => {
}) })
// Mock Node.js modules // Mock Node.js modules
vi.mock('node:os', () => ({ vi.mock('node:os', () => {
platform: vi.fn(() => 'darwin'), const mock = {
arch: vi.fn(() => 'x64'), platform: vi.fn(() => 'darwin'),
version: vi.fn(() => '20.0.0'), arch: vi.fn(() => 'x64'),
cpus: vi.fn(() => [{ model: 'Mock CPU' }]), version: vi.fn(() => '20.0.0'),
totalmem: vi.fn(() => 8 * 1024 * 1024 * 1024) // 8GB cpus: vi.fn(() => [{ model: 'Mock CPU' }]),
})) homedir: vi.fn(() => '/mock/home'),
totalmem: vi.fn(() => 8 * 1024 * 1024 * 1024) // 8GB
}
return { ...mock, default: mock }
})
vi.mock('node:path', async () => { vi.mock('node:path', async () => {
const actual = await vi.importActual('node:path') const actual = await vi.importActual('node:path')
@ -149,25 +163,29 @@ vi.mock('node:path', async () => {
} }
}) })
vi.mock('node:fs', () => ({ vi.mock('node:fs', () => {
promises: { const mock = {
access: vi.fn(), promises: {
readFile: vi.fn(), access: vi.fn(),
writeFile: vi.fn(), readFile: vi.fn(),
mkdir: vi.fn(), writeFile: vi.fn(),
readdir: vi.fn(), mkdir: vi.fn(),
stat: vi.fn(), readdir: vi.fn(),
unlink: vi.fn(), stat: vi.fn(),
rmdir: vi.fn() unlink: vi.fn(),
}, rmdir: vi.fn()
existsSync: vi.fn(), },
readFileSync: vi.fn(), existsSync: vi.fn(),
writeFileSync: vi.fn(), readFileSync: vi.fn(),
mkdirSync: vi.fn(), writeFileSync: vi.fn(),
readdirSync: vi.fn(), mkdirSync: vi.fn(),
statSync: vi.fn(), readdirSync: vi.fn(),
unlinkSync: vi.fn(), statSync: vi.fn(),
rmdirSync: vi.fn(), unlinkSync: vi.fn(),
createReadStream: vi.fn(), rmdirSync: vi.fn(),
createWriteStream: vi.fn() createReadStream: vi.fn(),
})) createWriteStream: vi.fn()
}
return { ...mock, default: mock }
})