diff --git a/src/renderer/src/services/ConversationService.ts b/src/renderer/src/services/ConversationService.ts index 031f164c89..f6568e6cfd 100644 --- a/src/renderer/src/services/ConversationService.ts +++ b/src/renderer/src/services/ConversationService.ts @@ -12,7 +12,10 @@ import { } from './MessagesService' export class ConversationService { - static async prepareMessagesForLlm(messages: Message[], assistant: Assistant): Promise { + static async prepareMessagesForModel( + messages: Message[], + assistant: Assistant + ): Promise { const { contextCount } = getAssistantSettings(assistant) // This logic is extracted from the original ApiService.fetchChatCompletion const contextMessages = filterContextMessages(messages) diff --git a/src/renderer/src/services/OrchestrateService.ts b/src/renderer/src/services/OrchestrateService.ts index 60a5339151..feb5001914 100644 --- a/src/renderer/src/services/OrchestrateService.ts +++ b/src/renderer/src/services/OrchestrateService.ts @@ -35,11 +35,11 @@ export class OrchestrationService { * @param request The orchestration request containing messages and assistant info. * @param events A set of callbacks to report progress and results to the UI layer. */ - async handleUserMessage(request: OrchestrationRequest, onChunkReceived: (chunk: Chunk) => void) { + async transformMessagesAndFetch(request: OrchestrationRequest, onChunkReceived: (chunk: Chunk) => void) { const { messages, assistant } = request try { - const llmMessages = await ConversationService.prepareMessagesForLlm(messages, assistant) + const llmMessages = await ConversationService.prepareMessagesForModel(messages, assistant) await fetchChatCompletion({ messages: llmMessages, @@ -52,3 +52,24 @@ export class OrchestrationService { } } } + +// 目前先按照函数来写,后续如果有需要到class的地方就改回来 +export async function transformMessagesAndFetch( + request: OrchestrationRequest, + onChunkReceived: (chunk: Chunk) => void +) { + const { messages, assistant } = request + + try { + const llmMessages = await ConversationService.prepareMessagesForModel(messages, assistant) + + await fetchChatCompletion({ + messages: llmMessages, + assistant: assistant, + options: request.options, + onChunkReceived + }) + } catch (error: any) { + onChunkReceived({ type: ChunkType.ERROR, error }) + } +} diff --git a/src/renderer/src/store/thunk/messageThunk.ts b/src/renderer/src/store/thunk/messageThunk.ts index 4443cb1633..e89bdb4987 100644 --- a/src/renderer/src/store/thunk/messageThunk.ts +++ b/src/renderer/src/store/thunk/messageThunk.ts @@ -3,7 +3,7 @@ import { autoRenameTopic } from '@renderer/hooks/useTopic' import { EVENT_NAMES, EventEmitter } from '@renderer/services/EventService' import FileManager from '@renderer/services/FileManager' import { NotificationService } from '@renderer/services/NotificationService' -import { OrchestrationService } from '@renderer/services/OrchestrateService' +import { transformMessagesAndFetch } from '@renderer/services/OrchestrateService' import { createStreamProcessor, type StreamProcessorCallbacks } from '@renderer/services/StreamProcessingService' import { estimateMessagesUsage } from '@renderer/services/TokenService' import store from '@renderer/store' @@ -840,8 +840,7 @@ const fetchAndProcessAssistantResponseImpl = async ( const streamProcessorCallbacks = createStreamProcessor(callbacks) const startTime = Date.now() - const orchestrationService = new OrchestrationService() - await orchestrationService.handleUserMessage( + await transformMessagesAndFetch( { messages: messagesForContext, assistant,