feat: use openrouter's builtin metric (#8314)

This commit is contained in:
happyZYM 2025-07-20 21:22:25 +08:00 committed by GitHub
parent bfe83c0256
commit ebe7cce161
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 72 additions and 7 deletions

View File

@ -536,7 +536,9 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
...this.getReasoningEffort(assistant, model),
...getOpenAIWebSearchParams(model, enableWebSearch),
// 只在对话场景下应用自定义参数,避免影响翻译、总结等其他业务逻辑
...(coreRequest.callType === 'chat' ? this.getCustomParameters(assistant) : {})
...(coreRequest.callType === 'chat' ? this.getCustomParameters(assistant) : {}),
// OpenRouter usage tracking
...(this.provider.id === 'openrouter' ? { usage: { include: true } } : {})
}
// Create the appropriate parameters object based on whether streaming is enabled
@ -657,6 +659,7 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
const toolCalls: OpenAI.Chat.Completions.ChatCompletionMessageToolCall[] = []
let isFinished = false
let lastUsageInfo: any = null
let hasFinishReason = false // Track if we've seen a finish_reason
/**
*
@ -692,14 +695,33 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
let isFirstTextChunk = true
return (context: ResponseChunkTransformerContext) => ({
async transform(chunk: OpenAISdkRawChunk, controller: TransformStreamDefaultController<GenericChunk>) {
const isOpenRouter = context.provider?.id === 'openrouter'
// 持续更新usage信息
logger.silly('chunk', chunk)
if (chunk.usage) {
const usage = chunk.usage as any // OpenRouter may include additional fields like cost
lastUsageInfo = {
prompt_tokens: chunk.usage.prompt_tokens || 0,
completion_tokens: chunk.usage.completion_tokens || 0,
total_tokens: (chunk.usage.prompt_tokens || 0) + (chunk.usage.completion_tokens || 0)
prompt_tokens: usage.prompt_tokens || 0,
completion_tokens: usage.completion_tokens || 0,
total_tokens: usage.total_tokens || (usage.prompt_tokens || 0) + (usage.completion_tokens || 0),
// Handle OpenRouter specific cost fields
...(usage.cost !== undefined ? { cost: usage.cost } : {})
}
// For OpenRouter, if we've seen finish_reason and now have usage, emit completion signals
if (isOpenRouter && hasFinishReason && !isFinished) {
emitCompletionSignals(controller)
return
}
}
// For OpenRouter, if this chunk only contains usage without choices, emit completion signals
if (isOpenRouter && chunk.usage && (!chunk.choices || chunk.choices.length === 0)) {
if (!isFinished) {
emitCompletionSignals(controller)
}
return
}
// 处理chunk
@ -729,7 +751,18 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
if (!contentSource) {
if ('finish_reason' in choice && choice.finish_reason) {
emitCompletionSignals(controller)
// For OpenRouter, don't emit completion signals immediately after finish_reason
// Wait for the usage chunk that comes after
if (isOpenRouter) {
hasFinishReason = true
// If we already have usage info, emit completion signals now
if (lastUsageInfo && lastUsageInfo.total_tokens > 0) {
emitCompletionSignals(controller)
}
} else {
// For other providers, emit completion signals immediately
emitCompletionSignals(controller)
}
}
continue
}
@ -805,7 +838,19 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
llm_web_search: webSearchData
})
}
emitCompletionSignals(controller)
// For OpenRouter, don't emit completion signals immediately after finish_reason
// Wait for the usage chunk that comes after
if (isOpenRouter) {
hasFinishReason = true
// If we already have usage info, emit completion signals now
if (lastUsageInfo && lastUsageInfo.total_tokens > 0) {
emitCompletionSignals(controller)
}
} else {
// For other providers, emit completion signals immediately
emitCompletionSignals(controller)
}
}
}
}

View File

@ -179,6 +179,10 @@ function accumulateUsage(accumulated: Usage, newUsage: Usage): void {
if (newUsage.thoughts_tokens !== undefined) {
accumulated.thoughts_tokens = (accumulated.thoughts_tokens || 0) + newUsage.thoughts_tokens
}
// Handle OpenRouter specific cost fields
if (newUsage.cost !== undefined) {
accumulated.cost = (accumulated.cost || 0) + newUsage.cost
}
}
export default FinalChunkConsumerMiddleware

View File

@ -22,6 +22,12 @@ const MessageTokens: React.FC<MessageTokensProps> = ({ message }) => {
const inputTokens = message?.usage?.prompt_tokens ?? 0
const outputTokens = message?.usage?.completion_tokens ?? 0
const model = message.model
// For OpenRouter, use the cost directly from usage if available
if (model?.provider === 'openrouter' && message?.usage?.cost !== undefined) {
return message.usage.cost
}
if (!model || model.pricing?.input_per_million_tokens === 0 || model.pricing?.output_per_million_tokens === 0) {
return 0
}
@ -37,8 +43,13 @@ const MessageTokens: React.FC<MessageTokensProps> = ({ message }) => {
if (price === 0) {
return ''
}
// For OpenRouter, always show cost even without pricing config
const shouldShowCost = message.model?.provider === 'openrouter' || price > 0
if (!shouldShowCost) {
return ''
}
const currencySymbol = message.model?.pricing?.currencySymbol || '$'
return `| ${t('models.price.cost')}: ${currencySymbol}${price}`
return `| ${t('models.price.cost')}: ${currencySymbol}${price.toFixed(6)}`
}
if (!message.usage) {

View File

@ -177,7 +177,10 @@ export const createBaseCallbacks = (deps: BaseCallbacksDependencies) => {
autoRenameTopic(assistant, topicId)
// 处理usage估算
// For OpenRouter, always use the accurate usage data from API, don't estimate
const isOpenRouter = assistant.model?.provider === 'openrouter'
if (
!isOpenRouter &&
response &&
(response.usage?.total_tokens === 0 ||
response?.usage?.prompt_tokens === 0 ||

View File

@ -123,6 +123,8 @@ export type LegacyMessage = {
export type Usage = OpenAI.Completions.CompletionUsage & {
thoughts_tokens?: number
// OpenRouter specific fields
cost?: number
}
export type Metrics = {