diff --git a/packages/aiCore/src/core/plugins/built-in/toolUsePlugin/promptToolUsePlugin.ts b/packages/aiCore/src/core/plugins/built-in/toolUsePlugin/promptToolUsePlugin.ts index 224cee05ae..11a69a3354 100644 --- a/packages/aiCore/src/core/plugins/built-in/toolUsePlugin/promptToolUsePlugin.ts +++ b/packages/aiCore/src/core/plugins/built-in/toolUsePlugin/promptToolUsePlugin.ts @@ -21,9 +21,6 @@ const TOOL_USE_TAG_CONFIG: TagConfig = { separator: '\n' } -/** - * 默认系统提示符模板 - */ export const DEFAULT_SYSTEM_PROMPT = `In this environment you have access to a set of tools you can use to answer the user's question. \ You can use one or more tools per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use. @@ -59,13 +56,6 @@ For example, if the result of the tool use is an image file, you can use it in t Always adhere to this format for the tool use to ensure proper parsing and execution. -## Tool Use Examples -{{ TOOL_USE_EXAMPLES }} - -## Tool Use Available Tools -Above example were using notional tools that might not exist for you. You only have access to these tools: -{{ AVAILABLE_TOOLS }} - ## Tool Use Rules Here are the rules you should always follow to solve your task: 1. Always use the right arguments for the tools. Never use variable names as the action arguments, use the value instead. @@ -74,6 +64,9 @@ Here are the rules you should always follow to solve your task: 4. Never re-do a tool call that you previously did with the exact same parameters. 5. For tool use, MAKE SURE use XML tag format as shown in the examples above. Do not use any other format. +## Tool Use Examples +{{ TOOLS_INFO }} + ## Response rules Respond in the language of the user's query, unless the user instructions specify additional requirements for the language to be used. @@ -184,13 +177,29 @@ ${result} /** * 默认的系统提示符构建函数(提取自 Cherry Studio) */ -function defaultBuildSystemPrompt(userSystemPrompt: string, tools: ToolSet): string { +function defaultBuildSystemPrompt(userSystemPrompt: string, tools: ToolSet, mcpMode?: string): string { const availableTools = buildAvailableTools(tools) if (availableTools === null) return userSystemPrompt - const fullPrompt = DEFAULT_SYSTEM_PROMPT.replace('{{ TOOL_USE_EXAMPLES }}', DEFAULT_TOOL_USE_EXAMPLES) + if (mcpMode == 'auto') { + return DEFAULT_SYSTEM_PROMPT.replace('{{ TOOLS_INFO }}', getHubModeSystemPrompt(tools)).replace( + '{{ USER_SYSTEM_PROMPT }}', + userSystemPrompt || '' + ) + } + const toolsInfo = `## Tool Use Examples + {{ TOOL_USE_EXAMPLES }} + + ## Tool Use Available Tools + Above example were using notional tools that might not exist for you. You only have access to these tools: + {{ AVAILABLE_TOOLS }}` + .replace('{{ TOOL_USE_EXAMPLES }}', DEFAULT_TOOL_USE_EXAMPLES) .replace('{{ AVAILABLE_TOOLS }}', availableTools) - .replace('{{ USER_SYSTEM_PROMPT }}', userSystemPrompt || '') + + const fullPrompt = DEFAULT_SYSTEM_PROMPT.replace('{{ TOOLS_INFO }}', toolsInfo).replace( + '{{ USER_SYSTEM_PROMPT }}', + userSystemPrompt || '' + ) return fullPrompt } @@ -255,7 +264,12 @@ function defaultParseToolUse(content: string, tools: ToolSet): { results: ToolUs } export const createPromptToolUsePlugin = (config: PromptToolUseConfig = {}) => { - const { enabled = true, buildSystemPrompt = defaultBuildSystemPrompt, parseToolUse = defaultParseToolUse } = config + const { + enabled = true, + buildSystemPrompt = defaultBuildSystemPrompt, + parseToolUse = defaultParseToolUse, + mcpMode + } = config return definePlugin({ name: 'built-in:prompt-tool-use', @@ -285,7 +299,7 @@ export const createPromptToolUsePlugin = (config: PromptToolUseConfig = {}) => { // 构建系统提示符(只包含非 provider-defined 工具) const userSystemPrompt = typeof params.system === 'string' ? params.system : '' - const systemPrompt = buildSystemPrompt(userSystemPrompt, promptTools) + const systemPrompt = buildSystemPrompt(userSystemPrompt, promptTools, mcpMode) let systemMessage: string | null = systemPrompt if (config.createSystemMessage) { // 🎯 如果用户提供了自定义处理函数,使用它 diff --git a/packages/aiCore/src/core/plugins/built-in/toolUsePlugin/type.ts b/packages/aiCore/src/core/plugins/built-in/toolUsePlugin/type.ts index 4937b25601..c92dfe4bde 100644 --- a/packages/aiCore/src/core/plugins/built-in/toolUsePlugin/type.ts +++ b/packages/aiCore/src/core/plugins/built-in/toolUsePlugin/type.ts @@ -23,6 +23,7 @@ export interface PromptToolUseConfig extends BaseToolUsePluginConfig { // 自定义工具解析函数(可选,有默认实现) parseToolUse?: (content: string, tools: ToolSet) => { results: ToolUseResult[]; content: string } createSystemMessage?: (systemPrompt: string, originalParams: any, context: AiRequestContext) => string | null + mcpMode?: string } /** diff --git a/src/renderer/src/aiCore/middleware/AiSdkMiddlewareBuilder.ts b/src/renderer/src/aiCore/middleware/AiSdkMiddlewareBuilder.ts index b2a796bd33..0d27390370 100644 --- a/src/renderer/src/aiCore/middleware/AiSdkMiddlewareBuilder.ts +++ b/src/renderer/src/aiCore/middleware/AiSdkMiddlewareBuilder.ts @@ -1,7 +1,7 @@ import type { WebSearchPluginConfig } from '@cherrystudio/ai-core/built-in/plugins' import { loggerService } from '@logger' import { isGemini3Model, isSupportedThinkingTokenQwenModel } from '@renderer/config/models' -import type { MCPTool } from '@renderer/types' +import type { McpMode, MCPTool } from '@renderer/types' import { type Assistant, type Message, type Model, type Provider, SystemProviderIds } from '@renderer/types' import type { Chunk } from '@renderer/types/chunk' import { isOllamaProvider, isSupportEnableThinkingProvider } from '@renderer/utils/provider' @@ -38,6 +38,7 @@ export interface AiSdkMiddlewareConfig { enableWebSearch: boolean enableGenerateImage: boolean enableUrlContext: boolean + mcpMode?: McpMode mcpTools?: MCPTool[] uiMessages?: Message[] // 内置搜索配置 diff --git a/src/renderer/src/aiCore/plugins/PluginBuilder.ts b/src/renderer/src/aiCore/plugins/PluginBuilder.ts index eb46eb7524..d2104550a0 100644 --- a/src/renderer/src/aiCore/plugins/PluginBuilder.ts +++ b/src/renderer/src/aiCore/plugins/PluginBuilder.ts @@ -47,6 +47,7 @@ export function buildPlugins( plugins.push( createPromptToolUsePlugin({ enabled: true, + mcpMode: middlewareConfig.mcpMode, createSystemMessage: (systemPrompt, params, context) => { const modelId = typeof context.model === 'string' ? context.model : context.model.modelId if (modelId.includes('o1-mini') || modelId.includes('o1-preview')) { diff --git a/src/renderer/src/aiCore/prepareParams/parameterBuilder.ts b/src/renderer/src/aiCore/prepareParams/parameterBuilder.ts index 0e33689d2d..8c8187e5b2 100644 --- a/src/renderer/src/aiCore/prepareParams/parameterBuilder.ts +++ b/src/renderer/src/aiCore/prepareParams/parameterBuilder.ts @@ -26,7 +26,7 @@ import { isSupportedThinkingTokenModel, isWebSearchModel } from '@renderer/config/models' -import { getAutoModeSystemPrompt } from '@renderer/config/prompts-code-mode' +import { getHubModeSystemPrompt } from '@renderer/config/prompts-code-mode' import { fetchAllActiveServerTools } from '@renderer/services/ApiService' import { getDefaultModel } from '@renderer/services/AssistantService' import store from '@renderer/store' @@ -249,7 +249,7 @@ export async function buildStreamTextParams( if (getEffectiveMcpMode(assistant) === 'auto') { const allActiveTools = await fetchAllActiveServerTools() - const autoModePrompt = getAutoModeSystemPrompt(allActiveTools) + const autoModePrompt = getHubModeSystemPrompt(allActiveTools) if (autoModePrompt) { systemPrompt = systemPrompt ? `${systemPrompt}\n\n${autoModePrompt}` : autoModePrompt } diff --git a/src/renderer/src/config/prompts-code-mode.ts b/src/renderer/src/config/prompts-code-mode.ts index 37541a314f..a2b3592c09 100644 --- a/src/renderer/src/config/prompts-code-mode.ts +++ b/src/renderer/src/config/prompts-code-mode.ts @@ -132,94 +132,6 @@ await parallel(ToolA(), ToolB()) // Correct: runs in parallel - Treat \`console.*\` as debugging only, never as the primary result. ` -/** - * Auto Mode System Prompt - For XML tool_use format - * Used when model needs explicit XML format to invoke tools - * Only teaches search and exec tools - */ -const AUTO_MODE_SYSTEM_PROMPT_BASE = ` -You can discover and invoke MCP tools through a hub using TWO meta-tools: \`search\` and \`exec\`. - -## Tool Invocation Format - -When you want to call a tool, output exactly one XML block: - - - {tool_name} - {json_arguments} - - -Rules: -- \`{tool_name}\` MUST be either \`search\` or \`exec\` -- \`\` MUST contain valid JSON (no comments, no trailing commas) -- Do NOT include extra text before or after the \`\` block - -## Available Tools - -1. **search** - Discover MCP tools by keyword - \`\`\`json - { "query": "keyword1,keyword2", "limit": 10 } - \`\`\` - Returns JavaScript function declarations with JSDoc showing names, parameters, and return types. - -2. **exec** - Execute JavaScript that calls discovered tools - \`\`\`json - { "code": "const r = await ToolName({...}); return r;" } - \`\`\` - **CRITICAL:** You MUST \`return\` the final value, or result will be \`undefined\`. - -## Workflow - -1. Call \`search\` with keywords to discover tools -2. Read the returned function signatures carefully -3. Call \`exec\` with JavaScript code that: - - Uses ONLY functions returned by \`search\` - - Calls them with \`await\` - - Ends with explicit \`return\` -4. Answer the user based on the result - -## Example - -User: "Calculate 15 * 7" - -Assistant calls search: - - search - {"query": "python,calculator"} - - -Hub returns function signature: -\`\`\`js -async function CherryPython_pythonExecute(params: { code: string }): Promise -\`\`\` - -Assistant calls exec: - - exec - {"code": "const result = await CherryPython_pythonExecute({ code: '15 * 7' }); return result;"} - - -Hub returns: { "result": 105 } - -Assistant answers: "15 × 7 = 105" - -## Common Mistakes - -❌ Forgetting to return (result will be undefined): -\`\`\`js -await SomeTool({ id: "123" }) -\`\`\` - -✅ Always return: -\`\`\`js -const data = await SomeTool({ id: "123" }); return data; -\`\`\` - -❌ Calling exec before search - you must discover tools first - -❌ Using functions not returned by search -` - function buildToolsSection(tools: ToolInfo[]): string { const existingNames = new Set() return tools @@ -233,9 +145,6 @@ function buildToolsSection(tools: ToolInfo[]): string { .join('\n') } -/** - * Get system prompt for Hub Mode (native MCP tool calling) - */ export function getHubModeSystemPrompt(tools: ToolInfo[] = []): string { if (tools.length === 0) { return '' @@ -244,22 +153,6 @@ export function getHubModeSystemPrompt(tools: ToolInfo[] = []): string { const toolsSection = buildToolsSection(tools) return `${HUB_MODE_SYSTEM_PROMPT_BASE} -### Available Tools -${toolsSection} -` -} - -/** - * Get system prompt for Auto Mode (XML tool_use format) - */ -export function getAutoModeSystemPrompt(tools: ToolInfo[] = []): string { - if (tools.length === 0) { - return '' - } - - const toolsSection = buildToolsSection(tools) - - return `${AUTO_MODE_SYSTEM_PROMPT_BASE} ## Discoverable Tools (use search to get full signatures) ${toolsSection} ` diff --git a/src/renderer/src/services/ApiService.ts b/src/renderer/src/services/ApiService.ts index 5b86d3c5c5..105779093f 100644 --- a/src/renderer/src/services/ApiService.ts +++ b/src/renderer/src/services/ApiService.ts @@ -245,6 +245,7 @@ export async function fetchChatCompletion({ const usePromptToolUse = isPromptToolUse(assistant) || (isToolUseModeFunction(assistant) && !isFunctionCallingModel(assistant.model)) + const mcpMode = getEffectiveMcpMode(assistant) const middlewareConfig: AiSdkMiddlewareConfig = { streamOutput: assistant.settings?.streamOutput ?? true, onChunk: onChunkReceived, @@ -257,6 +258,7 @@ export async function fetchChatCompletion({ enableWebSearch: capabilities.enableWebSearch, enableGenerateImage: capabilities.enableGenerateImage, enableUrlContext: capabilities.enableUrlContext, + mcpMode, mcpTools, uiMessages, knowledgeRecognition: assistant.knowledgeRecognition