mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2026-01-03 02:59:07 +08:00
refactor: update logging in message processing to use debug level and improve clarity
This commit is contained in:
parent
69bcb0e13e
commit
7a169c424d
@ -129,15 +129,6 @@ async function processMessageRequest(
|
|||||||
request.model = modelId
|
request.model = modelId
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.info('Processing message request:', {
|
|
||||||
provider: provider.id,
|
|
||||||
model: request.model,
|
|
||||||
messageCount: request.messages?.length || 0,
|
|
||||||
stream: request.stream,
|
|
||||||
max_tokens: request.max_tokens,
|
|
||||||
temperature: request.temperature
|
|
||||||
})
|
|
||||||
|
|
||||||
// Ensure provider is Anthropic type
|
// Ensure provider is Anthropic type
|
||||||
if (provider.type !== 'anthropic') {
|
if (provider.type !== 'anthropic') {
|
||||||
return res.status(400).json({
|
return res.status(400).json({
|
||||||
@ -149,12 +140,6 @@ async function processMessageRequest(
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.info('Provider validation successful:', {
|
|
||||||
provider: provider.id,
|
|
||||||
providerType: provider.type,
|
|
||||||
modelId: request.model
|
|
||||||
})
|
|
||||||
|
|
||||||
// Validate request
|
// Validate request
|
||||||
const validation = messagesService.validateRequest(request)
|
const validation = messagesService.validateRequest(request)
|
||||||
if (!validation.isValid) {
|
if (!validation.isValid) {
|
||||||
@ -314,13 +299,6 @@ router.post('/', async (req: Request, res: Response) => {
|
|||||||
try {
|
try {
|
||||||
const request: MessageCreateParams = req.body
|
const request: MessageCreateParams = req.body
|
||||||
|
|
||||||
logger.info('Anthropic message request:', {
|
|
||||||
model: request.model,
|
|
||||||
messageCount: request.messages?.length || 0,
|
|
||||||
stream: request.stream,
|
|
||||||
max_tokens: request.max_tokens
|
|
||||||
})
|
|
||||||
|
|
||||||
// Validate model ID and get provider
|
// Validate model ID and get provider
|
||||||
const modelValidation = await validateModelId(request.model)
|
const modelValidation = await validateModelId(request.model)
|
||||||
if (!modelValidation.valid) {
|
if (!modelValidation.valid) {
|
||||||
@ -338,13 +316,6 @@ router.post('/', async (req: Request, res: Response) => {
|
|||||||
const provider = modelValidation.provider!
|
const provider = modelValidation.provider!
|
||||||
const modelId = modelValidation.modelId!
|
const modelId = modelValidation.modelId!
|
||||||
|
|
||||||
logger.info('Model validation successful:', {
|
|
||||||
provider: provider.id,
|
|
||||||
providerType: provider.type,
|
|
||||||
modelId: modelId,
|
|
||||||
fullModelId: request.model
|
|
||||||
})
|
|
||||||
|
|
||||||
// Use shared processing function
|
// Use shared processing function
|
||||||
return await processMessageRequest(req, res, provider, modelId)
|
return await processMessageRequest(req, res, provider, modelId)
|
||||||
} catch (error: any) {
|
} catch (error: any) {
|
||||||
@ -489,14 +460,6 @@ providerRouter.post('/', async (req: Request, res: Response) => {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.info('Provider-specific message request:', {
|
|
||||||
providerId,
|
|
||||||
model: request.model,
|
|
||||||
messageCount: request.messages?.length || 0,
|
|
||||||
stream: request.stream,
|
|
||||||
max_tokens: request.max_tokens
|
|
||||||
})
|
|
||||||
|
|
||||||
// Get provider directly by ID from URL path
|
// Get provider directly by ID from URL path
|
||||||
const provider = await getProviderById(providerId)
|
const provider = await getProviderById(providerId)
|
||||||
if (!provider) {
|
if (!provider) {
|
||||||
@ -509,12 +472,6 @@ providerRouter.post('/', async (req: Request, res: Response) => {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.info('Provider validation successful:', {
|
|
||||||
provider: provider.id,
|
|
||||||
providerType: provider.type,
|
|
||||||
modelId: request.model
|
|
||||||
})
|
|
||||||
|
|
||||||
// Use shared processing function (no modelId override needed)
|
// Use shared processing function (no modelId override needed)
|
||||||
return await processMessageRequest(req, res, provider)
|
return await processMessageRequest(req, res, provider)
|
||||||
} catch (error: any) {
|
} catch (error: any) {
|
||||||
|
|||||||
@ -46,11 +46,12 @@ export class MessagesService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async processMessage(request: MessageCreateParams, provider: Provider): Promise<Message> {
|
async processMessage(request: MessageCreateParams, provider: Provider): Promise<Message> {
|
||||||
logger.info('Processing Anthropic message request:', {
|
logger.debug('Preparing Anthropic message request', {
|
||||||
model: request.model,
|
model: request.model,
|
||||||
messageCount: request.messages.length,
|
messageCount: request.messages.length,
|
||||||
stream: request.stream,
|
stream: request.stream,
|
||||||
max_tokens: request.max_tokens
|
maxTokens: request.max_tokens,
|
||||||
|
provider: provider.id
|
||||||
})
|
})
|
||||||
|
|
||||||
// Create Anthropic client for the provider
|
// Create Anthropic client for the provider
|
||||||
@ -66,14 +67,12 @@ export class MessagesService {
|
|||||||
anthropicRequest.system = buildClaudeCodeSystemMessage(request.system || '')
|
anthropicRequest.system = buildClaudeCodeSystemMessage(request.system || '')
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.debug('Sending request to Anthropic provider:', {
|
|
||||||
provider: provider.id,
|
|
||||||
apiHost: provider.apiHost
|
|
||||||
})
|
|
||||||
|
|
||||||
const response = await client.messages.create(anthropicRequest)
|
const response = await client.messages.create(anthropicRequest)
|
||||||
|
|
||||||
logger.info('Successfully processed Anthropic message')
|
logger.info('Anthropic message completed', {
|
||||||
|
model: request.model,
|
||||||
|
provider: provider.id
|
||||||
|
})
|
||||||
return response
|
return response
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -81,9 +80,10 @@ export class MessagesService {
|
|||||||
request: MessageCreateParams,
|
request: MessageCreateParams,
|
||||||
provider: Provider
|
provider: Provider
|
||||||
): AsyncIterable<RawMessageStreamEvent> {
|
): AsyncIterable<RawMessageStreamEvent> {
|
||||||
logger.info('Processing streaming Anthropic message request:', {
|
logger.debug('Preparing streaming Anthropic message request', {
|
||||||
model: request.model,
|
model: request.model,
|
||||||
messageCount: request.messages.length
|
messageCount: request.messages.length,
|
||||||
|
provider: provider.id
|
||||||
})
|
})
|
||||||
|
|
||||||
// Create Anthropic client for the provider
|
// Create Anthropic client for the provider
|
||||||
@ -99,18 +99,16 @@ export class MessagesService {
|
|||||||
streamingRequest.system = buildClaudeCodeSystemMessage(request.system || '')
|
streamingRequest.system = buildClaudeCodeSystemMessage(request.system || '')
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.debug('Sending streaming request to Anthropic provider:', {
|
|
||||||
provider: provider.id,
|
|
||||||
apiHost: provider.apiHost
|
|
||||||
})
|
|
||||||
|
|
||||||
const stream = client.messages.stream(streamingRequest)
|
const stream = client.messages.stream(streamingRequest)
|
||||||
|
|
||||||
for await (const chunk of stream) {
|
for await (const chunk of stream) {
|
||||||
yield chunk
|
yield chunk
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.info('Successfully completed streaming Anthropic message')
|
logger.info('Completed streaming Anthropic message', {
|
||||||
|
model: request.model,
|
||||||
|
provider: provider.id
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user