mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2026-01-09 06:49:02 +08:00
refactor: rename and restructure message handling in Conversation and Orchestrate services
- Renamed `prepareMessagesForLlm` to `prepareMessagesForModel` in `ConversationService` for clarity. - Updated `OrchestrationService` to use the new method name and introduced a new function `transformMessagesAndFetch` for improved message processing. - Adjusted imports in `messageThunk` to reflect the changes in the orchestration service, enhancing code readability and maintainability.
This commit is contained in:
parent
30a288ce5d
commit
1bcc716eaf
@ -12,7 +12,10 @@ import {
|
|||||||
} from './MessagesService'
|
} from './MessagesService'
|
||||||
|
|
||||||
export class ConversationService {
|
export class ConversationService {
|
||||||
static async prepareMessagesForLlm(messages: Message[], assistant: Assistant): Promise<StreamTextParams['messages']> {
|
static async prepareMessagesForModel(
|
||||||
|
messages: Message[],
|
||||||
|
assistant: Assistant
|
||||||
|
): Promise<StreamTextParams['messages']> {
|
||||||
const { contextCount } = getAssistantSettings(assistant)
|
const { contextCount } = getAssistantSettings(assistant)
|
||||||
// This logic is extracted from the original ApiService.fetchChatCompletion
|
// This logic is extracted from the original ApiService.fetchChatCompletion
|
||||||
const contextMessages = filterContextMessages(messages)
|
const contextMessages = filterContextMessages(messages)
|
||||||
|
|||||||
@ -35,11 +35,11 @@ export class OrchestrationService {
|
|||||||
* @param request The orchestration request containing messages and assistant info.
|
* @param request The orchestration request containing messages and assistant info.
|
||||||
* @param events A set of callbacks to report progress and results to the UI layer.
|
* @param events A set of callbacks to report progress and results to the UI layer.
|
||||||
*/
|
*/
|
||||||
async handleUserMessage(request: OrchestrationRequest, onChunkReceived: (chunk: Chunk) => void) {
|
async transformMessagesAndFetch(request: OrchestrationRequest, onChunkReceived: (chunk: Chunk) => void) {
|
||||||
const { messages, assistant } = request
|
const { messages, assistant } = request
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const llmMessages = await ConversationService.prepareMessagesForLlm(messages, assistant)
|
const llmMessages = await ConversationService.prepareMessagesForModel(messages, assistant)
|
||||||
|
|
||||||
await fetchChatCompletion({
|
await fetchChatCompletion({
|
||||||
messages: llmMessages,
|
messages: llmMessages,
|
||||||
@ -52,3 +52,24 @@ export class OrchestrationService {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 目前先按照函数来写,后续如果有需要到class的地方就改回来
|
||||||
|
export async function transformMessagesAndFetch(
|
||||||
|
request: OrchestrationRequest,
|
||||||
|
onChunkReceived: (chunk: Chunk) => void
|
||||||
|
) {
|
||||||
|
const { messages, assistant } = request
|
||||||
|
|
||||||
|
try {
|
||||||
|
const llmMessages = await ConversationService.prepareMessagesForModel(messages, assistant)
|
||||||
|
|
||||||
|
await fetchChatCompletion({
|
||||||
|
messages: llmMessages,
|
||||||
|
assistant: assistant,
|
||||||
|
options: request.options,
|
||||||
|
onChunkReceived
|
||||||
|
})
|
||||||
|
} catch (error: any) {
|
||||||
|
onChunkReceived({ type: ChunkType.ERROR, error })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -3,7 +3,7 @@ import { autoRenameTopic } from '@renderer/hooks/useTopic'
|
|||||||
import { EVENT_NAMES, EventEmitter } from '@renderer/services/EventService'
|
import { EVENT_NAMES, EventEmitter } from '@renderer/services/EventService'
|
||||||
import FileManager from '@renderer/services/FileManager'
|
import FileManager from '@renderer/services/FileManager'
|
||||||
import { NotificationService } from '@renderer/services/NotificationService'
|
import { NotificationService } from '@renderer/services/NotificationService'
|
||||||
import { OrchestrationService } from '@renderer/services/OrchestrateService'
|
import { transformMessagesAndFetch } from '@renderer/services/OrchestrateService'
|
||||||
import { createStreamProcessor, type StreamProcessorCallbacks } from '@renderer/services/StreamProcessingService'
|
import { createStreamProcessor, type StreamProcessorCallbacks } from '@renderer/services/StreamProcessingService'
|
||||||
import { estimateMessagesUsage } from '@renderer/services/TokenService'
|
import { estimateMessagesUsage } from '@renderer/services/TokenService'
|
||||||
import store from '@renderer/store'
|
import store from '@renderer/store'
|
||||||
@ -840,8 +840,7 @@ const fetchAndProcessAssistantResponseImpl = async (
|
|||||||
const streamProcessorCallbacks = createStreamProcessor(callbacks)
|
const streamProcessorCallbacks = createStreamProcessor(callbacks)
|
||||||
|
|
||||||
const startTime = Date.now()
|
const startTime = Date.now()
|
||||||
const orchestrationService = new OrchestrationService()
|
await transformMessagesAndFetch(
|
||||||
await orchestrationService.handleUserMessage(
|
|
||||||
{
|
{
|
||||||
messages: messagesForContext,
|
messages: messagesForContext,
|
||||||
assistant,
|
assistant,
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user