mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2025-12-28 13:31:32 +08:00
refactor: rename and restructure message handling in Conversation and Orchestrate services
- Renamed `prepareMessagesForLlm` to `prepareMessagesForModel` in `ConversationService` for clarity. - Updated `OrchestrationService` to use the new method name and introduced a new function `transformMessagesAndFetch` for improved message processing. - Adjusted imports in `messageThunk` to reflect the changes in the orchestration service, enhancing code readability and maintainability.
This commit is contained in:
parent
30a288ce5d
commit
1bcc716eaf
@ -12,7 +12,10 @@ import {
|
||||
} from './MessagesService'
|
||||
|
||||
export class ConversationService {
|
||||
static async prepareMessagesForLlm(messages: Message[], assistant: Assistant): Promise<StreamTextParams['messages']> {
|
||||
static async prepareMessagesForModel(
|
||||
messages: Message[],
|
||||
assistant: Assistant
|
||||
): Promise<StreamTextParams['messages']> {
|
||||
const { contextCount } = getAssistantSettings(assistant)
|
||||
// This logic is extracted from the original ApiService.fetchChatCompletion
|
||||
const contextMessages = filterContextMessages(messages)
|
||||
|
||||
@ -35,11 +35,11 @@ export class OrchestrationService {
|
||||
* @param request The orchestration request containing messages and assistant info.
|
||||
* @param events A set of callbacks to report progress and results to the UI layer.
|
||||
*/
|
||||
async handleUserMessage(request: OrchestrationRequest, onChunkReceived: (chunk: Chunk) => void) {
|
||||
async transformMessagesAndFetch(request: OrchestrationRequest, onChunkReceived: (chunk: Chunk) => void) {
|
||||
const { messages, assistant } = request
|
||||
|
||||
try {
|
||||
const llmMessages = await ConversationService.prepareMessagesForLlm(messages, assistant)
|
||||
const llmMessages = await ConversationService.prepareMessagesForModel(messages, assistant)
|
||||
|
||||
await fetchChatCompletion({
|
||||
messages: llmMessages,
|
||||
@ -52,3 +52,24 @@ export class OrchestrationService {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 目前先按照函数来写,后续如果有需要到class的地方就改回来
|
||||
export async function transformMessagesAndFetch(
|
||||
request: OrchestrationRequest,
|
||||
onChunkReceived: (chunk: Chunk) => void
|
||||
) {
|
||||
const { messages, assistant } = request
|
||||
|
||||
try {
|
||||
const llmMessages = await ConversationService.prepareMessagesForModel(messages, assistant)
|
||||
|
||||
await fetchChatCompletion({
|
||||
messages: llmMessages,
|
||||
assistant: assistant,
|
||||
options: request.options,
|
||||
onChunkReceived
|
||||
})
|
||||
} catch (error: any) {
|
||||
onChunkReceived({ type: ChunkType.ERROR, error })
|
||||
}
|
||||
}
|
||||
|
||||
@ -3,7 +3,7 @@ import { autoRenameTopic } from '@renderer/hooks/useTopic'
|
||||
import { EVENT_NAMES, EventEmitter } from '@renderer/services/EventService'
|
||||
import FileManager from '@renderer/services/FileManager'
|
||||
import { NotificationService } from '@renderer/services/NotificationService'
|
||||
import { OrchestrationService } from '@renderer/services/OrchestrateService'
|
||||
import { transformMessagesAndFetch } from '@renderer/services/OrchestrateService'
|
||||
import { createStreamProcessor, type StreamProcessorCallbacks } from '@renderer/services/StreamProcessingService'
|
||||
import { estimateMessagesUsage } from '@renderer/services/TokenService'
|
||||
import store from '@renderer/store'
|
||||
@ -840,8 +840,7 @@ const fetchAndProcessAssistantResponseImpl = async (
|
||||
const streamProcessorCallbacks = createStreamProcessor(callbacks)
|
||||
|
||||
const startTime = Date.now()
|
||||
const orchestrationService = new OrchestrationService()
|
||||
await orchestrationService.handleUserMessage(
|
||||
await transformMessagesAndFetch(
|
||||
{
|
||||
messages: messagesForContext,
|
||||
assistant,
|
||||
|
||||
Loading…
Reference in New Issue
Block a user