mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2026-01-07 13:59:28 +08:00
fix: get empty response when using MCP by functional method (#8296)
* feat: 添加日志记录以调试中间件处理流程 在多个中间件中添加日志记录以跟踪chunk处理流程 在AiProvider中添加日志记录以调试中间件移除逻辑 * fix(openai): 修复tool_call chunk被跳过的问题 添加对choice.delta.content为null情况的处理 同时添加日志输出用于调试chunk数据 * fix(openai): 修复流式响应中空内容判断逻辑 * fix(openai): 修复流式响应中tool_calls内容判断逻辑
This commit is contained in:
parent
2e77792042
commit
0149cfbd21
@ -692,6 +692,7 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
|
|||||||
return (context: ResponseChunkTransformerContext) => ({
|
return (context: ResponseChunkTransformerContext) => ({
|
||||||
async transform(chunk: OpenAISdkRawChunk, controller: TransformStreamDefaultController<GenericChunk>) {
|
async transform(chunk: OpenAISdkRawChunk, controller: TransformStreamDefaultController<GenericChunk>) {
|
||||||
// 持续更新usage信息
|
// 持续更新usage信息
|
||||||
|
logger.silly('chunk', chunk)
|
||||||
if (chunk.usage) {
|
if (chunk.usage) {
|
||||||
lastUsageInfo = {
|
lastUsageInfo = {
|
||||||
prompt_tokens: chunk.usage.prompt_tokens || 0,
|
prompt_tokens: chunk.usage.prompt_tokens || 0,
|
||||||
@ -714,6 +715,7 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
|
|||||||
choice.delta &&
|
choice.delta &&
|
||||||
Object.keys(choice.delta).length > 0 &&
|
Object.keys(choice.delta).length > 0 &&
|
||||||
(!('content' in choice.delta) ||
|
(!('content' in choice.delta) ||
|
||||||
|
(choice.delta.tool_calls && choice.delta.tool_calls.length > 0) ||
|
||||||
(typeof choice.delta.content === 'string' && choice.delta.content !== '') ||
|
(typeof choice.delta.content === 'string' && choice.delta.content !== '') ||
|
||||||
(typeof (choice.delta as any).reasoning_content === 'string' &&
|
(typeof (choice.delta as any).reasoning_content === 'string' &&
|
||||||
(choice.delta as any).reasoning_content !== '') ||
|
(choice.delta as any).reasoning_content !== '') ||
|
||||||
|
|||||||
@ -77,34 +77,45 @@ export default class AiProvider {
|
|||||||
.add(MiddlewareRegistry[ImageGenerationMiddlewareName])
|
.add(MiddlewareRegistry[ImageGenerationMiddlewareName])
|
||||||
} else {
|
} else {
|
||||||
// Existing logic for other models
|
// Existing logic for other models
|
||||||
|
logger.silly('Builder Params', params)
|
||||||
if (!params.enableReasoning) {
|
if (!params.enableReasoning) {
|
||||||
// 这里注释掉不会影响正常的关闭思考,可忽略不计的性能下降
|
// 这里注释掉不会影响正常的关闭思考,可忽略不计的性能下降
|
||||||
// builder.remove(ThinkingTagExtractionMiddlewareName)
|
// builder.remove(ThinkingTagExtractionMiddlewareName)
|
||||||
builder.remove(ThinkChunkMiddlewareName)
|
builder.remove(ThinkChunkMiddlewareName)
|
||||||
|
logger.silly('ThinkChunkMiddleware is removed')
|
||||||
}
|
}
|
||||||
// 注意:用client判断会导致typescript类型收窄
|
// 注意:用client判断会导致typescript类型收窄
|
||||||
if (!(this.apiClient instanceof OpenAIAPIClient) && !(this.apiClient instanceof OpenAIResponseAPIClient)) {
|
if (!(this.apiClient instanceof OpenAIAPIClient) && !(this.apiClient instanceof OpenAIResponseAPIClient)) {
|
||||||
|
logger.silly('ThinkingTagExtractionMiddleware is removed')
|
||||||
builder.remove(ThinkingTagExtractionMiddlewareName)
|
builder.remove(ThinkingTagExtractionMiddlewareName)
|
||||||
}
|
}
|
||||||
if (!(this.apiClient instanceof AnthropicAPIClient) && !(this.apiClient instanceof OpenAIResponseAPIClient)) {
|
if (!(this.apiClient instanceof AnthropicAPIClient) && !(this.apiClient instanceof OpenAIResponseAPIClient)) {
|
||||||
|
logger.silly('RawStreamListenerMiddleware is removed')
|
||||||
builder.remove(RawStreamListenerMiddlewareName)
|
builder.remove(RawStreamListenerMiddlewareName)
|
||||||
}
|
}
|
||||||
if (!params.enableWebSearch) {
|
if (!params.enableWebSearch) {
|
||||||
|
logger.silly('WebSearchMiddleware is removed')
|
||||||
builder.remove(WebSearchMiddlewareName)
|
builder.remove(WebSearchMiddlewareName)
|
||||||
}
|
}
|
||||||
if (!params.mcpTools?.length) {
|
if (!params.mcpTools?.length) {
|
||||||
builder.remove(ToolUseExtractionMiddlewareName)
|
builder.remove(ToolUseExtractionMiddlewareName)
|
||||||
|
logger.silly('ToolUseExtractionMiddleware is removed')
|
||||||
builder.remove(McpToolChunkMiddlewareName)
|
builder.remove(McpToolChunkMiddlewareName)
|
||||||
|
logger.silly('McpToolChunkMiddleware is removed')
|
||||||
}
|
}
|
||||||
if (isEnabledToolUse(params.assistant) && isFunctionCallingModel(model)) {
|
if (isEnabledToolUse(params.assistant) && isFunctionCallingModel(model)) {
|
||||||
builder.remove(ToolUseExtractionMiddlewareName)
|
builder.remove(ToolUseExtractionMiddlewareName)
|
||||||
|
logger.silly('ToolUseExtractionMiddleware is removed')
|
||||||
}
|
}
|
||||||
if (params.callType !== 'chat') {
|
if (params.callType !== 'chat') {
|
||||||
|
logger.silly('AbortHandlerMiddleware is removed')
|
||||||
builder.remove(AbortHandlerMiddlewareName)
|
builder.remove(AbortHandlerMiddlewareName)
|
||||||
}
|
}
|
||||||
if (params.callType === 'test') {
|
if (params.callType === 'test') {
|
||||||
builder.remove(ErrorHandlerMiddlewareName)
|
builder.remove(ErrorHandlerMiddlewareName)
|
||||||
|
logger.silly('ErrorHandlerMiddleware is removed')
|
||||||
builder.remove(FinalChunkConsumerMiddlewareName)
|
builder.remove(FinalChunkConsumerMiddlewareName)
|
||||||
|
logger.silly('FinalChunkConsumerMiddleware is removed')
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -100,6 +100,7 @@ function createToolHandlingTransform(
|
|||||||
async transform(chunk: GenericChunk, controller) {
|
async transform(chunk: GenericChunk, controller) {
|
||||||
try {
|
try {
|
||||||
// 处理MCP工具进展chunk
|
// 处理MCP工具进展chunk
|
||||||
|
logger.silly('chunk', chunk)
|
||||||
if (chunk.type === ChunkType.MCP_TOOL_CREATED) {
|
if (chunk.type === ChunkType.MCP_TOOL_CREATED) {
|
||||||
const createdChunk = chunk as MCPToolCreatedChunk
|
const createdChunk = chunk as MCPToolCreatedChunk
|
||||||
|
|
||||||
|
|||||||
@ -43,6 +43,7 @@ export const TextChunkMiddleware: CompletionsMiddleware =
|
|||||||
const enhancedTextStream = resultFromUpstream.pipeThrough(
|
const enhancedTextStream = resultFromUpstream.pipeThrough(
|
||||||
new TransformStream<GenericChunk, GenericChunk>({
|
new TransformStream<GenericChunk, GenericChunk>({
|
||||||
transform(chunk: GenericChunk, controller) {
|
transform(chunk: GenericChunk, controller) {
|
||||||
|
logger.silly('chunk', chunk)
|
||||||
if (chunk.type === ChunkType.TEXT_DELTA) {
|
if (chunk.type === ChunkType.TEXT_DELTA) {
|
||||||
accumulatedTextContent += chunk.text
|
accumulatedTextContent += chunk.text
|
||||||
|
|
||||||
|
|||||||
@ -72,6 +72,7 @@ export const ThinkingTagExtractionMiddleware: CompletionsMiddleware =
|
|||||||
const processedStream = resultFromUpstream.pipeThrough(
|
const processedStream = resultFromUpstream.pipeThrough(
|
||||||
new TransformStream<GenericChunk, GenericChunk>({
|
new TransformStream<GenericChunk, GenericChunk>({
|
||||||
transform(chunk: GenericChunk, controller) {
|
transform(chunk: GenericChunk, controller) {
|
||||||
|
logger.silly('chunk', chunk)
|
||||||
if (chunk.type === ChunkType.TEXT_DELTA) {
|
if (chunk.type === ChunkType.TEXT_DELTA) {
|
||||||
const textChunk = chunk as TextDeltaChunk
|
const textChunk = chunk as TextDeltaChunk
|
||||||
|
|
||||||
|
|||||||
@ -69,6 +69,7 @@ function createToolUseExtractionTransform(
|
|||||||
async transform(chunk: GenericChunk, controller) {
|
async transform(chunk: GenericChunk, controller) {
|
||||||
try {
|
try {
|
||||||
// 处理文本内容,检测工具使用标签
|
// 处理文本内容,检测工具使用标签
|
||||||
|
logger.silly('chunk', chunk)
|
||||||
if (chunk.type === ChunkType.TEXT_DELTA) {
|
if (chunk.type === ChunkType.TEXT_DELTA) {
|
||||||
const textChunk = chunk as TextDeltaChunk
|
const textChunk = chunk as TextDeltaChunk
|
||||||
|
|
||||||
|
|||||||
@ -398,6 +398,7 @@ export async function fetchChatCompletion({
|
|||||||
filterEmptyMessages(filterContextMessages(takeRight(filteredMessages, contextCount + 2))) // 取原来几个provider的最大值
|
filterEmptyMessages(filterContextMessages(takeRight(filteredMessages, contextCount + 2))) // 取原来几个provider的最大值
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// FIXME: qwen3即使关闭思考仍然会导致enableReasoning的结果为true
|
||||||
const enableReasoning =
|
const enableReasoning =
|
||||||
((isSupportedThinkingTokenModel(model) || isSupportedReasoningEffortModel(model)) &&
|
((isSupportedThinkingTokenModel(model) || isSupportedReasoningEffortModel(model)) &&
|
||||||
assistant.settings?.reasoning_effort !== undefined) ||
|
assistant.settings?.reasoning_effort !== undefined) ||
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user