fix(aiCore): update tool call status and enhance execution flow

- Changed tool call status from 'invoking' to 'pending' for better clarity in execution state.
- Updated the tool execution logic to include user confirmation for non-auto-approved tools, improving user interaction.
- Refactored the handling of experimental context in the tool execution parameters to support chunk streaming.
- Commented out unused tool input event cases in AiSdkToChunkAdapter for cleaner code.
This commit is contained in:
MyPrototypeWhat 2025-08-27 19:37:10 +08:00
parent 1b3fcb2e55
commit bfcb215c16
4 changed files with 51 additions and 16 deletions

View File

@ -135,11 +135,11 @@ export class AiSdkToChunkAdapter {
// === 工具调用相关事件(原始 AI SDK 事件,如果没有被中间件处理) ===
case 'tool-input-start':
case 'tool-input-delta':
case 'tool-input-end':
this.toolCallHandler.handleToolCallCreated(chunk)
break
// case 'tool-input-start':
// case 'tool-input-delta':
// case 'tool-input-end':
// this.toolCallHandler.handleToolCallCreated(chunk)
// break
// case 'tool-input-delta':
// this.toolCallHandler.handleToolCallCreated(chunk)

View File

@ -208,14 +208,14 @@ export class ToolCallChunkHandler {
id: toolCallId,
tool: tool,
arguments: args,
status: 'invoking',
status: 'pending',
toolCallId: toolCallId
}
// 调用 onChunk
if (this.onChunk) {
this.onChunk({
type: ChunkType.MCP_TOOL_IN_PROGRESS,
type: ChunkType.MCP_TOOL_PENDING,
responses: [toolResponse]
})
}

View File

@ -236,7 +236,7 @@ export default class ModernAiProvider {
const streamResult = await executor.streamText(
modelId,
params,
{ ...params, experimental_context: { onChunk: config.onChunk } },
middlewares.length > 0 ? { middlewares } : undefined
)

View File

@ -1,9 +1,10 @@
import { Tool } from '@cherrystudio/ai-core'
import { loggerService } from '@logger'
// import { AiSdkTool, ToolCallResult } from '@renderer/aiCore/tools/types'
import { MCPTool, MCPToolResponse } from '@renderer/types'
import { callMCPTool } from '@renderer/utils/mcp-tools'
import { jsonSchema, tool } from 'ai'
import { Chunk, ChunkType } from '@renderer/types/chunk'
import { callMCPTool, getMcpServerByTool, isToolAutoApproved } from '@renderer/utils/mcp-tools'
import { requestToolConfirmation } from '@renderer/utils/userConfirmation'
import { jsonSchema, type Tool, tool } from 'ai'
import { JSONSchema7 } from 'json-schema'
const logger = loggerService.withContext('MCP-utils')
@ -31,18 +32,52 @@ export function convertMcpToolsToAiSdkTools(mcpTools: MCPTool[]): Record<string,
tools[mcpTool.name] = tool({
description: mcpTool.description || `Tool from ${mcpTool.serverName}`,
inputSchema: jsonSchema(mcpTool.inputSchema as JSONSchema7),
execute: async (params) => {
execute: async (params, { toolCallId, experimental_context }) => {
const { onChunk } = experimental_context as { onChunk: (chunk: Chunk) => void }
// 创建适配的 MCPToolResponse 对象
const toolResponse: MCPToolResponse = {
id: `tool_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`,
id: toolCallId,
tool: mcpTool,
arguments: params,
status: 'invoking',
toolCallId: `call_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`
status: 'pending',
toolCallId
}
try {
// 复用现有的 callMCPTool 函数
// 检查是否启用自动批准
const server = getMcpServerByTool(mcpTool)
const isAutoApproveEnabled = isToolAutoApproved(mcpTool, server)
let confirmed = true
if (!isAutoApproveEnabled) {
// 请求用户确认
logger.debug(`Requesting user confirmation for tool: ${mcpTool.name}`)
confirmed = await requestToolConfirmation(toolResponse.id)
}
if (!confirmed) {
// 用户拒绝执行工具
logger.debug(`User cancelled tool execution: ${mcpTool.name}`)
return {
content: [
{
type: 'text',
text: `User declined to execute tool "${mcpTool.name}".`
}
],
isError: false
}
}
// 用户确认或自动批准,执行工具
toolResponse.status = 'invoking'
logger.debug(`Executing tool: ${mcpTool.name}`)
onChunk({
type: ChunkType.MCP_TOOL_IN_PROGRESS,
responses: [toolResponse]
})
const result = await callMCPTool(toolResponse)
// 返回结果AI SDK 会处理序列化