refactor: streamline error handling and logging in ModernAiProvider

- Commented out the try-catch block in the `ModernAiProvider` class to simplify the code structure.
- Enhanced readability by removing unnecessary error logging while maintaining the core functionality of the AI processing flow.
- Updated `messageThunk` to incorporate an abort controller for improved request management during message processing.
This commit is contained in:
MyPrototypeWhat 2025-06-27 17:08:22 +08:00
parent 1bcc716eaf
commit dc106a8af7
2 changed files with 43 additions and 36 deletions

View File

@ -187,50 +187,51 @@ export default class ModernAiProvider {
params: StreamTextParams,
middlewareConfig: AiSdkMiddlewareConfig
): Promise<CompletionsResult> {
try {
// 根据条件构建插件数组
const plugins = this.buildPlugins(middlewareConfig)
// try {
// 根据条件构建插件数组
const plugins = this.buildPlugins(middlewareConfig)
// 用构建好的插件数组创建executor
const executor = createExecutor(this.config.providerId, this.config.options, plugins)
// 用构建好的插件数组创建executor
const executor = createExecutor(this.config.providerId, this.config.options, plugins)
// 动态构建中间件数组
const middlewares = buildAiSdkMiddlewares(middlewareConfig)
console.log('构建的中间件:', middlewares)
// 动态构建中间件数组
const middlewares = buildAiSdkMiddlewares(middlewareConfig)
console.log('构建的中间件:', middlewares)
// 创建带有中间件的执行器
if (middlewareConfig.onChunk) {
// 流式处理 - 使用适配器
const adapter = new AiSdkToChunkAdapter(middlewareConfig.onChunk)
// 创建带有中间件的执行器
if (middlewareConfig.onChunk) {
// 流式处理 - 使用适配器
const adapter = new AiSdkToChunkAdapter(middlewareConfig.onChunk)
const streamResult = await executor.streamText(
modelId,
params,
middlewares.length > 0 ? { middlewares } : undefined
)
const streamResult = await executor.streamText(
modelId,
params,
middlewares.length > 0 ? { middlewares } : undefined
)
const finalText = await adapter.processStream(streamResult)
const finalText = await adapter.processStream(streamResult)
return {
getText: () => finalText
}
} else {
// 流式处理但没有 onChunk 回调
const streamResult = await executor.streamText(
modelId,
params,
middlewares.length > 0 ? { middlewares } : undefined
)
const finalText = await streamResult.text
return {
getText: () => finalText
}
return {
getText: () => finalText
}
} else {
// 流式处理但没有 onChunk 回调
const streamResult = await executor.streamText(
modelId,
params,
middlewares.length > 0 ? { middlewares } : undefined
)
const finalText = await streamResult.text
return {
getText: () => finalText
}
} catch (error) {
console.error('Modern AI SDK error:', error)
throw error
}
// }
// catch (error) {
// console.error('Modern AI SDK error:', error)
// throw error
// }
}
// 代理其他方法到原有实现

View File

@ -20,6 +20,7 @@ import type {
import { AssistantMessageStatus, MessageBlockStatus, MessageBlockType } from '@renderer/types/newMessage'
import { Response } from '@renderer/types/newMessage'
import { uuid } from '@renderer/utils'
import { addAbortController } from '@renderer/utils/abortController'
import { formatErrorMessage, isAbortError } from '@renderer/utils/error'
import {
createAssistantMessage,
@ -840,11 +841,16 @@ const fetchAndProcessAssistantResponseImpl = async (
const streamProcessorCallbacks = createStreamProcessor(callbacks)
const startTime = Date.now()
const abortController = new AbortController()
addAbortController(userMessageId!, () => abortController.abort())
await transformMessagesAndFetch(
{
messages: messagesForContext,
assistant,
options: {
signal: abortController.signal,
timeout: 30000
}
},