From 73dc3325df3c4a112adde6268cf93b485ac92fd6 Mon Sep 17 00:00:00 2001 From: kangfenmao Date: Fri, 8 Aug 2025 23:48:00 +0800 Subject: [PATCH] refactor(ApiService): streamline tool usage logic and enhance MCP tool handling Updated the logic for determining tool usage by consolidating conditions into a single variable. Improved the handling of MCP tools by filtering active servers and checking for enabled tools before sending notifications. Removed redundant code and ensured that tool execution completion notifications are sent based on the new logic. --- src/renderer/src/services/ApiService.ts | 56 ++++++++++++------------- 1 file changed, 27 insertions(+), 29 deletions(-) diff --git a/src/renderer/src/services/ApiService.ts b/src/renderer/src/services/ApiService.ts index 35495f3e82..1421a592ae 100644 --- a/src/renderer/src/services/ApiService.ts +++ b/src/renderer/src/services/ApiService.ts @@ -96,9 +96,19 @@ async function fetchExternalTool( const globalMemoryEnabled = selectGlobalMemoryEnabled(store.getState()) const shouldSearchMemory = globalMemoryEnabled && assistant.enableMemory + // 获取 MCP 工具 + let mcpTools: MCPTool[] = [] + const allMcpServers = store.getState().mcp.servers || [] + const activedMcpServers = allMcpServers.filter((s) => s.isActive) + const assistantMcpServers = assistant.mcpServers || [] + const enabledMCPs = activedMcpServers.filter((server) => assistantMcpServers.some((s) => s.id === server.id)) + const showListTools = enabledMCPs && enabledMCPs.length > 0 + + // 是否使用工具 + const hasAnyTool = shouldWebSearch || shouldKnowledgeSearch || shouldSearchMemory || showListTools + // 在工具链开始时发送进度通知 - const willUseTools = shouldWebSearch || shouldKnowledgeSearch - if (willUseTools) { + if (hasAnyTool) { onChunkReceived({ type: ChunkType.EXTERNEL_TOOL_IN_PROGRESS }) } @@ -347,28 +357,7 @@ async function fetchExternalTool( } } - // 发送工具执行完成通知 - const wasAnyToolEnabled = shouldWebSearch || shouldKnowledgeSearch || shouldSearchMemory - if (wasAnyToolEnabled) { - onChunkReceived({ - type: ChunkType.EXTERNEL_TOOL_COMPLETE, - external_tool: { - webSearch: webSearchResponseFromSearch, - knowledge: knowledgeReferencesFromSearch, - memories: memorySearchReferences - } - }) - } - - // Get MCP tools (Fix duplicate declaration) - let mcpTools: MCPTool[] = [] - const allMcpServers = store.getState().mcp.servers || [] - const activedMcpServers = allMcpServers.filter((s) => s.isActive) - const assistantMcpServers = assistant.mcpServers || [] - - const enabledMCPs = activedMcpServers.filter((server) => assistantMcpServers.some((s) => s.id === server.id)) - - if (enabledMCPs && enabledMCPs.length > 0) { + if (showListTools) { try { const spanContext = currentSpan(lastUserMessage.topicId, assistant.model?.name)?.spanContext() const toolPromises = enabledMCPs.map>(async (mcpServer) => { @@ -385,9 +374,6 @@ async function fetchExternalTool( .filter((result): result is PromiseFulfilledResult => result.status === 'fulfilled') .map((result) => result.value) .flat() - // 添加内置工具 - // const { BUILT_IN_TOOLS } = await import('../tools') - // mcpTools.push(...BUILT_IN_TOOLS) // 根据toolUseMode决定如何构建系统提示词 const basePrompt = assistant.prompt @@ -403,6 +389,18 @@ async function fetchExternalTool( } } + // 发送工具执行完成通知 + if (hasAnyTool) { + onChunkReceived({ + type: ChunkType.EXTERNEL_TOOL_COMPLETE, + external_tool: { + webSearch: webSearchResponseFromSearch, + knowledge: knowledgeReferencesFromSearch, + memories: memorySearchReferences + } + }) + } + return { mcpTools } } catch (error) { if (isAbortError(error)) throw error @@ -437,8 +435,6 @@ export async function fetchChatCompletion({ }) { logger.debug('fetchChatCompletion', messages, assistant) - onChunkReceived({ type: ChunkType.LLM_RESPONSE_CREATED }) - if (assistant.prompt && containsSupportedVariables(assistant.prompt)) { assistant.prompt = await replacePromptVariables(assistant.prompt, assistant.model?.name) } @@ -461,6 +457,8 @@ export async function fetchChatCompletion({ const { mcpTools } = await fetchExternalTool(lastUserMessage, assistant, onChunkReceived, lastAnswer) const model = assistant.model || getDefaultModel() + onChunkReceived({ type: ChunkType.LLM_RESPONSE_CREATED }) + const { maxTokens, contextCount } = getAssistantSettings(assistant) const filteredMessages = filterUsefulMessages(messages)