feat(aiCore): enhance AI SDK with tracing and telemetry support

- Integrated tracing capabilities into the ModernAiProvider, allowing for better tracking of AI completions and image generation processes.
- Added a new TelemetryPlugin to inject telemetry data into AI SDK requests, ensuring compatibility with existing tracing systems.
- Updated middleware and plugin configurations to support topic-based tracing, improving the overall observability of AI interactions.
- Introduced comprehensive logging throughout the AI SDK processes to facilitate debugging and performance monitoring.
- Added unit tests for new functionalities to ensure reliability and maintainability.
This commit is contained in:
MyPrototypeWhat 2025-08-14 16:17:41 +08:00
parent ff7ad52ad5
commit cb55f7a69b
14 changed files with 1535 additions and 95 deletions

View File

@ -9,8 +9,11 @@
*/
import { createExecutor, generateImage, StreamTextParams } from '@cherrystudio/ai-core'
import { loggerService } from '@logger'
import { isNotSupportedImageSizeModel } from '@renderer/config/models'
import type { GenerateImageParams, Model, Provider } from '@renderer/types'
import { addSpan, endSpan } from '@renderer/services/SpanManagerService'
import { StartSpanParams } from '@renderer/trace/types/ModelSpanEntity'
import type { Assistant, GenerateImageParams, Model, Provider } from '@renderer/types'
import { ChunkType } from '@renderer/types/chunk'
import AiSdkToChunkAdapter from './chunk/AiSdkToChunkAdapter'
@ -20,6 +23,8 @@ import { AiSdkMiddlewareConfig, buildAiSdkMiddlewares } from './middleware/AiSdk
import { buildPlugins } from './plugins/PluginBuilder'
import { getActualProvider, isModernSdkSupported, providerToAiSdkConfig } from './provider/ProviderConfigProcessor'
const logger = loggerService.withContext('ModernAiProvider')
export default class ModernAiProvider {
private legacyProvider: LegacyAiProvider
private config: ReturnType<typeof providerToAiSdkConfig>
@ -40,15 +45,112 @@ export default class ModernAiProvider {
public async completions(
modelId: string,
params: StreamTextParams,
middlewareConfig: AiSdkMiddlewareConfig
config: AiSdkMiddlewareConfig & {
assistant: Assistant
// topicId for tracing
topicId?: string
callType: string
}
): Promise<CompletionsResult> {
console.log('completions', modelId, params, middlewareConfig)
if (middlewareConfig.isImageGenerationEndpoint) {
return await this.modernImageGeneration(modelId, params, middlewareConfig)
if (config.isImageGenerationEndpoint) {
return await this.modernImageGeneration(modelId, params, config)
}
return await this.modernCompletions(modelId, params, middlewareConfig)
return await this.modernCompletions(modelId, params, config)
}
/**
* trace支持的completions方法
* legacy的completionsForTraceAI SDK spans在正确的trace上下文中
*/
public async completionsForTrace(
modelId: string,
params: StreamTextParams,
config: AiSdkMiddlewareConfig & {
assistant: Assistant
// topicId for tracing
topicId?: string
callType: string
}
): Promise<CompletionsResult> {
if (!config.topicId) {
logger.warn('No topicId provided, falling back to regular completions')
return await this.completions(modelId, params, config)
}
const traceName = `${this.actualProvider.name}.${modelId}.${config.callType}`
const traceParams: StartSpanParams = {
name: traceName,
tag: 'LLM',
topicId: config.topicId,
modelName: config.assistant.model?.name, // 使用modelId而不是provider名称
inputs: params
}
logger.info('Starting AI SDK trace span', {
traceName,
topicId: config.topicId,
modelId,
hasTools: !!params.tools && Object.keys(params.tools).length > 0,
toolNames: params.tools ? Object.keys(params.tools) : [],
isImageGeneration: config.isImageGenerationEndpoint
})
const span = addSpan(traceParams)
if (!span) {
logger.warn('Failed to create span, falling back to regular completions', {
topicId: config.topicId,
modelId,
traceName
})
return await this.completions(modelId, params, config)
}
try {
logger.info('Created parent span, now calling completions', {
spanId: span.spanContext().spanId,
traceId: span.spanContext().traceId,
topicId: config.topicId,
modelId,
parentSpanCreated: true
})
const result = await this.completions(modelId, params, config)
logger.info('Completions finished, ending parent span', {
spanId: span.spanContext().spanId,
traceId: span.spanContext().traceId,
topicId: config.topicId,
modelId,
resultLength: result.getText().length
})
// 标记span完成
endSpan({
topicId: config.topicId,
outputs: result,
span,
modelName: modelId // 使用modelId保持一致性
})
return result
} catch (error) {
logger.error('Error in completionsForTrace, ending parent span with error', error as Error, {
spanId: span.spanContext().spanId,
traceId: span.spanContext().traceId,
topicId: config.topicId,
modelId
})
// 标记span出错
endSpan({
topicId: config.topicId,
error: error as Error,
span,
modelName: modelId // 使用modelId保持一致性
})
throw error
}
}
/**
@ -57,46 +159,123 @@ export default class ModernAiProvider {
private async modernCompletions(
modelId: string,
params: StreamTextParams,
middlewareConfig: AiSdkMiddlewareConfig
config: AiSdkMiddlewareConfig & {
assistant: Assistant
// topicId for tracing
topicId?: string
callType: string
}
): Promise<CompletionsResult> {
// try {
logger.info('Starting modernCompletions', {
modelId,
providerId: this.config.providerId,
topicId: config.topicId,
hasOnChunk: !!config.onChunk,
hasTools: !!params.tools && Object.keys(params.tools).length > 0,
toolCount: params.tools ? Object.keys(params.tools).length : 0
})
// 根据条件构建插件数组
const plugins = buildPlugins(middlewareConfig)
console.log('this.config.providerId', this.config.providerId)
console.log('this.config.options', this.config.options)
console.log('plugins', plugins)
const plugins = buildPlugins(config)
logger.debug('Built plugins for AI SDK', {
pluginCount: plugins.length,
pluginNames: plugins.map((p) => p.name),
providerId: this.config.providerId,
topicId: config.topicId
})
// 用构建好的插件数组创建executor
const executor = createExecutor(this.config.providerId, this.config.options, plugins)
logger.debug('Created AI SDK executor', {
providerId: this.config.providerId,
hasOptions: !!this.config.options,
pluginCount: plugins.length
})
// 动态构建中间件数组
const middlewares = buildAiSdkMiddlewares(middlewareConfig)
// console.log('构建的中间件:', middlewares)
const middlewares = buildAiSdkMiddlewares(config)
logger.debug('Built AI SDK middlewares', {
middlewareCount: middlewares.length,
topicId: config.topicId
})
// 创建带有中间件的执行器
if (middlewareConfig.onChunk) {
if (config.onChunk) {
// 流式处理 - 使用适配器
const adapter = new AiSdkToChunkAdapter(middlewareConfig.onChunk, middlewareConfig.mcpTools)
console.log('最终params', params)
logger.info('Starting streaming with chunk adapter', {
modelId,
hasMiddlewares: middlewares.length > 0,
middlewareCount: middlewares.length,
hasMcpTools: !!config.mcpTools,
mcpToolCount: config.mcpTools?.length || 0,
topicId: config.topicId
})
const adapter = new AiSdkToChunkAdapter(config.onChunk, config.mcpTools)
logger.debug('Final params before streamText', {
modelId,
hasMessages: !!params.messages,
messageCount: params.messages?.length || 0,
hasTools: !!params.tools && Object.keys(params.tools).length > 0,
toolNames: params.tools ? Object.keys(params.tools) : [],
hasSystem: !!params.system,
topicId: config.topicId
})
const streamResult = await executor.streamText(
modelId,
params,
middlewares.length > 0 ? { middlewares } : undefined
)
logger.info('StreamText call successful, processing stream', {
modelId,
topicId: config.topicId,
hasFullStream: !!streamResult.fullStream
})
const finalText = await adapter.processStream(streamResult)
logger.info('Stream processing completed', {
modelId,
topicId: config.topicId,
finalTextLength: finalText.length
})
return {
getText: () => finalText
}
} else {
// 流式处理但没有 onChunk 回调
logger.info('Starting streaming without chunk callback', {
modelId,
hasMiddlewares: middlewares.length > 0,
middlewareCount: middlewares.length,
topicId: config.topicId
})
const streamResult = await executor.streamText(
modelId,
params,
middlewares.length > 0 ? { middlewares } : undefined
)
logger.info('StreamText call successful, waiting for text', {
modelId,
topicId: config.topicId
})
// 强制消费流,不然await streamResult.text会阻塞
await streamResult?.consumeStream()
const finalText = await streamResult.text
logger.info('Text extraction completed', {
modelId,
topicId: config.topicId,
finalTextLength: finalText.length
})
return {
getText: () => finalText
}
@ -114,9 +293,14 @@ export default class ModernAiProvider {
private async modernImageGeneration(
modelId: string,
params: StreamTextParams,
middlewareConfig: AiSdkMiddlewareConfig
config: AiSdkMiddlewareConfig & {
assistant: Assistant
// topicId for tracing
topicId?: string
callType: string
}
): Promise<CompletionsResult> {
const { onChunk } = middlewareConfig
const { onChunk } = config
try {
// 检查 messages 是否存在
@ -150,7 +334,7 @@ export default class ModernAiProvider {
// 构建图像生成参数
const imageParams = {
prompt,
size: isNotSupportedImageSizeModel(middlewareConfig.model) ? undefined : ('1024x1024' as `${number}x${number}`), // 默认尺寸,使用正确的类型
size: isNotSupportedImageSizeModel(config.model) ? undefined : ('1024x1024' as `${number}x${number}`), // 默认尺寸,使用正确的类型
n: 1,
...(params.abortSignal && { abortSignal: params.abortSignal })
}

View File

@ -1,11 +1,11 @@
import { AihubmixAPIClient } from '@renderer/aiCore/clients/AihubmixAPIClient'
import { AnthropicAPIClient } from '@renderer/aiCore/clients/anthropic/AnthropicAPIClient'
import { ApiClientFactory } from '@renderer/aiCore/clients/ApiClientFactory'
import { GeminiAPIClient } from '@renderer/aiCore/clients/gemini/GeminiAPIClient'
import { VertexAPIClient } from '@renderer/aiCore/clients/gemini/VertexAPIClient'
import { NewAPIClient } from '@renderer/aiCore/clients/NewAPIClient'
import { OpenAIAPIClient } from '@renderer/aiCore/clients/openai/OpenAIApiClient'
import { OpenAIResponseAPIClient } from '@renderer/aiCore/clients/openai/OpenAIResponseAPIClient'
import { AihubmixAPIClient } from '@renderer/aiCore/legacy/clients/AihubmixAPIClient'
import { AnthropicAPIClient } from '@renderer/aiCore/legacy/clients/anthropic/AnthropicAPIClient'
import { ApiClientFactory } from '@renderer/aiCore/legacy/clients/ApiClientFactory'
import { GeminiAPIClient } from '@renderer/aiCore/legacy/clients/gemini/GeminiAPIClient'
import { VertexAPIClient } from '@renderer/aiCore/legacy/clients/gemini/VertexAPIClient'
import { NewAPIClient } from '@renderer/aiCore/legacy/clients/NewAPIClient'
import { OpenAIAPIClient } from '@renderer/aiCore/legacy/clients/openai/OpenAIApiClient'
import { OpenAIResponseAPIClient } from '@renderer/aiCore/legacy/clients/openai/OpenAIResponseAPIClient'
import { EndpointType, Model, Provider } from '@renderer/types'
import { beforeEach, describe, expect, it, vi } from 'vitest'

View File

@ -3,7 +3,7 @@ import {
LanguageModelV2Middleware,
simulateStreamingMiddleware
} from '@cherrystudio/ai-core'
import type { Assistant, BaseTool, Model, Provider } from '@renderer/types'
import type { BaseTool, Model, Provider } from '@renderer/types'
import type { Chunk } from '@renderer/types/chunk'
/**
@ -24,8 +24,6 @@ export interface AiSdkMiddlewareConfig {
enableWebSearch: boolean
enableGenerateImage: boolean
mcpTools?: BaseTool[]
// TODO assistant
assistant: Assistant
}
/**

View File

@ -1,24 +1,40 @@
import { AiPlugin } from '@cherrystudio/ai-core'
import { createPromptToolUsePlugin, webSearchPlugin } from '@cherrystudio/ai-core/built-in/plugins'
import store from '@renderer/store'
import { Assistant } from '@renderer/types'
import { AiSdkMiddlewareConfig } from '../middleware/AiSdkMiddlewareBuilder'
import reasoningTimePlugin from './reasoningTimePlugin'
import { searchOrchestrationPlugin } from './searchOrchestrationPlugin'
import { createTelemetryPlugin } from './telemetryPlugin'
/**
*
*/
export function buildPlugins(middlewareConfig: AiSdkMiddlewareConfig): AiPlugin[] {
export function buildPlugins(
middlewareConfig: AiSdkMiddlewareConfig & { assistant: Assistant; topicId?: string }
): AiPlugin[] {
const plugins: AiPlugin[] = []
// 1. 总是添加通用插件
// plugins.push(textPlugin)
if (middlewareConfig.topicId && store.getState().settings.enableDeveloperMode) {
// 0. 添加 telemetry 插件
plugins.push(
createTelemetryPlugin({
enabled: true,
topicId: middlewareConfig.topicId,
assistant: middlewareConfig.assistant
})
)
}
// 1. 模型内置搜索
if (middlewareConfig.enableWebSearch) {
// 内置了默认搜索参数如果改的话可以传config进去
plugins.push(webSearchPlugin())
}
// 2. 支持工具调用时添加搜索插件
if (middlewareConfig.isSupportedToolUse) {
plugins.push(searchOrchestrationPlugin(middlewareConfig.assistant))
plugins.push(searchOrchestrationPlugin(middlewareConfig.assistant, middlewareConfig.topicId || ''))
}
// 3. 推理模型时添加推理插件

View File

@ -8,6 +8,7 @@
*/
import type { AiRequestContext, ModelMessage } from '@cherrystudio/ai-core'
import { definePlugin } from '@cherrystudio/ai-core'
import { loggerService } from '@logger'
// import { generateObject } from '@cherrystudio/ai-core'
import {
SEARCH_SUMMARY_PROMPT,
@ -26,6 +27,8 @@ import { knowledgeSearchTool } from '../tools/KnowledgeSearchTool'
import { memorySearchTool } from '../tools/MemorySearchTool'
import { webSearchToolWithPreExtractedKeywords } from '../tools/WebSearchTool'
const logger = loggerService.withContext('SearchOrchestrationPlugin')
const getMessageContent = (message: ModelMessage) => {
if (typeof message.content === 'string') return message.content
return message.content.reduce((acc, part) => {
@ -76,6 +79,7 @@ async function analyzeSearchIntent(
context: AiRequestContext & {
isAnalyzing?: boolean
}
topicId: string
}
): Promise<ExtractResults | undefined> {
const { shouldWebSearch = false, shouldKnowledgeSearch = false, lastAnswer, context } = options
@ -121,12 +125,28 @@ async function analyzeSearchIntent(
// console.log('formattedPrompt', schema)
try {
context.isAnalyzing = true
const { text: result } = await context.executor.generateText(model.id, {
prompt: formattedPrompt
logger.info('Starting intent analysis generateText call', {
modelId: model.id,
topicId: options.topicId,
requestId: context.requestId,
hasWebSearch: needWebExtract,
hasKnowledgeSearch: needKnowledgeExtract
})
context.isAnalyzing = false
const { text: result } = await context.executor
.generateText(model.id, {
prompt: formattedPrompt
})
.finally(() => {
context.isAnalyzing = false
logger.info('Intent analysis generateText call completed', {
modelId: model.id,
topicId: options.topicId,
requestId: context.requestId
})
})
const parsedResult = extractInfoFromXML(result)
console.log('parsedResult', parsedResult)
logger.debug('Intent analysis result', { parsedResult })
// 根据需求过滤结果
return {
@ -134,7 +154,7 @@ async function analyzeSearchIntent(
knowledge: needKnowledgeExtract ? parsedResult?.knowledge : undefined
}
} catch (e: any) {
console.error('analyze search intent error', e)
logger.error('Intent analysis failed', e as Error)
return getFallbackResult()
}
@ -221,27 +241,36 @@ async function storeConversationMemory(
/**
* 🎯
*/
export const searchOrchestrationPlugin = (assistant: Assistant) => {
export const searchOrchestrationPlugin = (assistant: Assistant, topicId: string) => {
// 存储意图分析结果
const intentAnalysisResults: { [requestId: string]: ExtractResults } = {}
const userMessages: { [requestId: string]: ModelMessage } = {}
let currentContext: AiRequestContext | null = null
console.log('searchOrchestrationPlugin', assistant)
return definePlugin({
name: 'search-orchestration',
enforce: 'pre', // 确保在其他插件之前执行
configureContext: (context: AiRequestContext) => {
if (currentContext) {
context.isAnalyzing = currentContext.isAnalyzing
}
currentContext = context
},
/**
* 🔍 Step 1: 意图识别阶段
*/
onRequestStart: async (context: AiRequestContext) => {
console.log('onRequestStart', context.isAnalyzing)
console.log('onRequestStart', context)
if (context.isAnalyzing) return
console.log('🧠 [SearchOrchestration] Starting intent analysis...', context.requestId)
// console.log('🧠 [SearchOrchestration] Starting intent analysis...', context.requestId)
try {
const messages = context.originalParams.messages
// console.log('🧠 [SearchOrchestration]', context.isAnalyzing)
if (!messages || messages.length === 0) {
console.log('🧠 [SearchOrchestration] No messages found, skipping analysis')
return
@ -255,9 +284,9 @@ export const searchOrchestrationPlugin = (assistant: Assistant) => {
// 判断是否需要各种搜索
const knowledgeBaseIds = assistant.knowledge_bases?.map((base) => base.id)
console.log('knowledgeBaseIds', knowledgeBaseIds)
// console.log('knowledgeBaseIds', knowledgeBaseIds)
const hasKnowledgeBase = !isEmpty(knowledgeBaseIds)
console.log('hasKnowledgeBase', hasKnowledgeBase)
// console.log('hasKnowledgeBase', hasKnowledgeBase)
const knowledgeRecognition = assistant.knowledgeRecognition || 'on'
const globalMemoryEnabled = selectGlobalMemoryEnabled(store.getState())
@ -265,11 +294,11 @@ export const searchOrchestrationPlugin = (assistant: Assistant) => {
const shouldKnowledgeSearch = hasKnowledgeBase && knowledgeRecognition === 'on'
const shouldMemorySearch = globalMemoryEnabled && assistant.enableMemory
console.log('🧠 [SearchOrchestration] Search capabilities:', {
shouldWebSearch,
hasKnowledgeBase,
shouldMemorySearch
})
// console.log('🧠 [SearchOrchestration] Search capabilities:', {
// shouldWebSearch,
// hasKnowledgeBase,
// shouldMemorySearch
// })
// 执行意图分析
if (shouldWebSearch || hasKnowledgeBase) {
const analysisResult = await analyzeSearchIntent(lastUserMessage, assistant, {
@ -277,12 +306,13 @@ export const searchOrchestrationPlugin = (assistant: Assistant) => {
shouldKnowledgeSearch,
shouldMemorySearch,
lastAnswer: lastAssistantMessage,
context
context,
topicId
})
if (analysisResult) {
intentAnalysisResults[context.requestId] = analysisResult
console.log('🧠 [SearchOrchestration] Intent analysis completed:', analysisResult)
// console.log('🧠 [SearchOrchestration] Intent analysis completed:', analysisResult)
}
}
} catch (error) {
@ -296,7 +326,7 @@ export const searchOrchestrationPlugin = (assistant: Assistant) => {
*/
transformParams: async (params: any, context: AiRequestContext) => {
if (context.isAnalyzing) return params
console.log('🔧 [SearchOrchestration] Configuring tools based on intent...', context.requestId)
// console.log('🔧 [SearchOrchestration] Configuring tools based on intent...', context.requestId)
try {
const analysisResult = intentAnalysisResults[context.requestId]
@ -316,7 +346,7 @@ export const searchOrchestrationPlugin = (assistant: Assistant) => {
if (needsSearch) {
// onChunk({ type: ChunkType.EXTERNEL_TOOL_IN_PROGRESS })
console.log('🌐 [SearchOrchestration] Adding web search tool with pre-extracted keywords')
// console.log('🌐 [SearchOrchestration] Adding web search tool with pre-extracted keywords')
params.tools['builtin_web_search'] = webSearchToolWithPreExtractedKeywords(
assistant.webSearchProviderId,
analysisResult.websearch,
@ -338,11 +368,12 @@ export const searchOrchestrationPlugin = (assistant: Assistant) => {
question: [getMessageContent(userMessage) || 'search'],
rewrite: getMessageContent(userMessage) || 'search'
}
console.log('📚 [SearchOrchestration] Adding knowledge search tool (force mode)')
// console.log('📚 [SearchOrchestration] Adding knowledge search tool (force mode)')
params.tools['builtin_knowledge_search'] = knowledgeSearchTool(
assistant,
fallbackKeywords,
getMessageContent(userMessage)
getMessageContent(userMessage),
topicId
)
params.toolChoice = { type: 'tool', toolName: 'builtin_knowledge_search' }
} else {
@ -353,12 +384,13 @@ export const searchOrchestrationPlugin = (assistant: Assistant) => {
analysisResult.knowledge.question[0] !== 'not_needed'
if (needsKnowledgeSearch && analysisResult.knowledge) {
console.log('📚 [SearchOrchestration] Adding knowledge search tool (intent-based)')
// console.log('📚 [SearchOrchestration] Adding knowledge search tool (intent-based)')
const userMessage = userMessages[context.requestId]
params.tools['builtin_knowledge_search'] = knowledgeSearchTool(
assistant,
analysisResult.knowledge,
getMessageContent(userMessage)
getMessageContent(userMessage),
topicId
)
}
}
@ -367,11 +399,11 @@ export const searchOrchestrationPlugin = (assistant: Assistant) => {
// 🧠 记忆搜索工具配置
const globalMemoryEnabled = selectGlobalMemoryEnabled(store.getState())
if (globalMemoryEnabled && assistant.enableMemory) {
console.log('🧠 [SearchOrchestration] Adding memory search tool')
// console.log('🧠 [SearchOrchestration] Adding memory search tool')
params.tools['builtin_memory_search'] = memorySearchTool()
}
console.log('🔧 [SearchOrchestration] Tools configured:', Object.keys(params.tools))
// console.log('🔧 [SearchOrchestration] Tools configured:', Object.keys(params.tools))
return params
} catch (error) {
console.error('🔧 [SearchOrchestration] Tool configuration failed:', error)
@ -383,10 +415,10 @@ export const searchOrchestrationPlugin = (assistant: Assistant) => {
* 💾 Step 3: 记忆存储阶段
*/
onRequestEnd: async (context: AiRequestContext, result: any) => {
onRequestEnd: async (context: AiRequestContext) => {
// context.isAnalyzing = false
console.log('context.isAnalyzing', context, result)
console.log('💾 [SearchOrchestration] Starting memory storage...', context.requestId)
// console.log('context.isAnalyzing', context, result)
// console.log('💾 [SearchOrchestration] Starting memory storage...', context.requestId)
if (context.isAnalyzing) return
try {
const messages = context.originalParams.messages

View File

@ -0,0 +1,422 @@
/**
* Telemetry Plugin for AI SDK Integration
*
* transformParams experimental_telemetry
* AI SDK trace trace
* AiSdkSpanAdapter AI SDK trace
*/
import { definePlugin } from '@cherrystudio/ai-core'
import { loggerService } from '@logger'
import { Context, context as otelContext, Span, SpanContext, trace, Tracer } from '@opentelemetry/api'
import { currentSpan } from '@renderer/services/SpanManagerService'
import { webTraceService } from '@renderer/services/WebTraceService'
import { Assistant } from '@renderer/types'
import { AiSdkSpanAdapter } from '../trace/AiSdkSpanAdapter'
const logger = loggerService.withContext('TelemetryPlugin')
export interface TelemetryPluginConfig {
enabled?: boolean
recordInputs?: boolean
recordOutputs?: boolean
topicId: string
assistant: Assistant
}
/**
* Tracer
*/
class AdapterTracer {
private originalTracer: Tracer
private topicId?: string
private modelName?: string
private parentSpanContext?: SpanContext
private cachedParentContext?: Context
constructor(originalTracer: Tracer, topicId?: string, modelName?: string, parentSpanContext?: SpanContext) {
this.originalTracer = originalTracer
this.topicId = topicId
this.modelName = modelName
this.parentSpanContext = parentSpanContext
// 预构建一个包含父 SpanContext 的 Context便于复用
try {
this.cachedParentContext = this.parentSpanContext
? trace.setSpanContext(otelContext.active(), this.parentSpanContext)
: undefined
} catch {
this.cachedParentContext = undefined
}
logger.info('AdapterTracer created with parent context info', {
topicId,
modelName,
parentTraceId: this.parentSpanContext?.traceId,
parentSpanId: this.parentSpanContext?.spanId,
hasOriginalTracer: !!originalTracer
})
}
// startSpan(name: string, options?: any, context?: any): Span {
// // 如果提供了父 SpanContext 且未显式传入 context则使用父上下文
// const contextToUse = context ?? this.cachedParentContext ?? otelContext.active()
// const span = this.originalTracer.startSpan(name, options, contextToUse)
// // 标记父子关系,便于在转换阶段兜底重建层级
// try {
// if (this.parentSpanContext) {
// span.setAttribute('trace.parentSpanId', this.parentSpanContext.spanId)
// span.setAttribute('trace.parentTraceId', this.parentSpanContext.traceId)
// }
// if (this.topicId) {
// span.setAttribute('trace.topicId', this.topicId)
// }
// } catch (e) {
// logger.debug('Failed to set trace parent attributes', e as Error)
// }
// logger.info('AI SDK span created via AdapterTracer', {
// spanName: name,
// spanId: span.spanContext().spanId,
// traceId: span.spanContext().traceId,
// parentTraceId: this.parentSpanContext?.traceId,
// topicId: this.topicId,
// modelName: this.modelName,
// traceIdMatches: this.parentSpanContext ? span.spanContext().traceId === this.parentSpanContext.traceId : undefined
// })
// // 包装 span 的 end 方法,在结束时进行数据转换
// const originalEnd = span.end.bind(span)
// span.end = (endTime?: any) => {
// logger.info('AI SDK span.end() called - about to convert span', {
// spanName: name,
// spanId: span.spanContext().spanId,
// traceId: span.spanContext().traceId,
// topicId: this.topicId,
// modelName: this.modelName
// })
// // 调用原始 end 方法
// originalEnd(endTime)
// // 转换并保存 span 数据
// try {
// logger.info('Converting AI SDK span to SpanEntity', {
// spanName: name,
// spanId: span.spanContext().spanId,
// traceId: span.spanContext().traceId,
// topicId: this.topicId,
// modelName: this.modelName
// })
// logger.info('spanspanspanspanspanspan', span)
// const spanEntity = AiSdkSpanAdapter.convertToSpanEntity({
// span,
// topicId: this.topicId,
// modelName: this.modelName
// })
// // 保存转换后的数据
// window.api.trace.saveEntity(spanEntity)
// logger.info('AI SDK span converted and saved successfully', {
// spanName: name,
// spanId: span.spanContext().spanId,
// traceId: span.spanContext().traceId,
// topicId: this.topicId,
// modelName: this.modelName,
// hasUsage: !!spanEntity.usage,
// usage: spanEntity.usage
// })
// } catch (error) {
// logger.error('Failed to convert AI SDK span', error as Error, {
// spanName: name,
// spanId: span.spanContext().spanId,
// traceId: span.spanContext().traceId,
// topicId: this.topicId,
// modelName: this.modelName
// })
// }
// }
// return span
// }
startActiveSpan<F extends (span: Span) => any>(name: string, fn: F): ReturnType<F>
startActiveSpan<F extends (span: Span) => any>(name: string, options: any, fn: F): ReturnType<F>
startActiveSpan<F extends (span: Span) => any>(name: string, options: any, context: any, fn: F): ReturnType<F>
startActiveSpan<F extends (span: Span) => any>(name: string, arg2?: any, arg3?: any, arg4?: any): ReturnType<F> {
logger.info('AdapterTracer.startActiveSpan called', {
spanName: name,
topicId: this.topicId,
modelName: this.modelName,
argCount: arguments.length
})
// 包装函数来添加span转换逻辑
const wrapFunction = (originalFn: F, span: Span): F => {
const wrappedFn = ((passedSpan: Span) => {
// 注入父子关系属性(兜底重建层级用)
try {
if (this.parentSpanContext) {
passedSpan.setAttribute('trace.parentSpanId', this.parentSpanContext.spanId)
passedSpan.setAttribute('trace.parentTraceId', this.parentSpanContext.traceId)
}
if (this.topicId) {
passedSpan.setAttribute('trace.topicId', this.topicId)
}
} catch (e) {
logger.debug('Failed to set trace parent attributes in startActiveSpan', e as Error)
}
// 包装span的end方法
const originalEnd = span.end.bind(span)
span.end = (endTime?: any) => {
logger.info('AI SDK span.end() called in startActiveSpan - about to convert span', {
spanName: name,
spanId: span.spanContext().spanId,
traceId: span.spanContext().traceId,
topicId: this.topicId,
modelName: this.modelName
})
// 调用原始 end 方法
originalEnd(endTime)
// 转换并保存 span 数据
try {
logger.info('Converting AI SDK span to SpanEntity (from startActiveSpan)', {
spanName: name,
spanId: span.spanContext().spanId,
traceId: span.spanContext().traceId,
topicId: this.topicId,
modelName: this.modelName
})
logger.info('span', span)
const spanEntity = AiSdkSpanAdapter.convertToSpanEntity({
span,
topicId: this.topicId,
modelName: this.modelName
})
// 保存转换后的数据
window.api.trace.saveEntity(spanEntity)
logger.info('AI SDK span converted and saved successfully (from startActiveSpan)', {
spanName: name,
spanId: span.spanContext().spanId,
traceId: span.spanContext().traceId,
topicId: this.topicId,
modelName: this.modelName,
hasUsage: !!spanEntity.usage,
usage: spanEntity.usage
})
} catch (error) {
logger.error('Failed to convert AI SDK span (from startActiveSpan)', error as Error, {
spanName: name,
spanId: span.spanContext().spanId,
traceId: span.spanContext().traceId,
topicId: this.topicId,
modelName: this.modelName
})
}
}
return originalFn(passedSpan)
}) as F
return wrappedFn
}
// 创建包含父 SpanContext 的上下文(如果有的话)
const createContextWithParent = () => {
if (this.cachedParentContext) {
return this.cachedParentContext
}
if (this.parentSpanContext) {
try {
const ctx = trace.setSpanContext(otelContext.active(), this.parentSpanContext)
logger.info('Created active context with parent SpanContext for startActiveSpan', {
spanName: name,
parentTraceId: this.parentSpanContext.traceId,
parentSpanId: this.parentSpanContext.spanId,
topicId: this.topicId
})
return ctx
} catch (error) {
logger.warn('Failed to create context with parent SpanContext in startActiveSpan', error as Error)
}
}
return otelContext.active()
}
// 根据参数数量确定调用方式注入包含mainTraceId的上下文
if (typeof arg2 === 'function') {
return this.originalTracer.startActiveSpan(name, {}, createContextWithParent(), (span: Span) => {
return wrapFunction(arg2, span)(span)
})
} else if (typeof arg3 === 'function') {
return this.originalTracer.startActiveSpan(name, arg2, createContextWithParent(), (span: Span) => {
return wrapFunction(arg3, span)(span)
})
} else if (typeof arg4 === 'function') {
// 如果调用方提供了 context则保留以维护嵌套关系否则回退到父上下文
const ctx = arg3 ?? createContextWithParent()
return this.originalTracer.startActiveSpan(name, arg2, ctx, (span: Span) => {
return wrapFunction(arg4, span)(span)
})
} else {
throw new Error('Invalid arguments for startActiveSpan')
}
}
}
export function createTelemetryPlugin(config: TelemetryPluginConfig) {
const { enabled = true, recordInputs = true, recordOutputs = true, topicId } = config
return definePlugin({
name: 'telemetryPlugin',
enforce: 'pre', // 在其他插件之前执行,确保 telemetry 配置被正确注入
transformParams: (params, context) => {
if (!enabled) {
return params
}
// 获取共享的 tracer
const originalTracer = webTraceService.getTracer()
if (!originalTracer) {
logger.warn('No tracer available from WebTraceService')
return params
}
// 获取topicId和modelName
const effectiveTopicId = context.topicId || topicId
// 使用与父span创建时一致的modelName - 应该是完整的modelId
const modelName = config.assistant.model?.name || context.modelId
// 获取当前活跃的 span确保 AI SDK spans 与手动 spans 在同一个 trace 中
let parentSpan: Span | undefined = undefined
let parentSpanContext: SpanContext | undefined = undefined
// 只有在有topicId时才尝试查找父span
if (effectiveTopicId) {
try {
// 从 SpanManagerService 获取当前的 span
logger.info('Attempting to find parent span', {
topicId: effectiveTopicId,
requestId: context.requestId,
modelName: modelName,
contextModelId: context.modelId,
providerId: context.providerId
})
parentSpan = currentSpan(effectiveTopicId, modelName)
if (parentSpan) {
// 直接使用父 span 的 SpanContext避免手动拼装字段遗漏
parentSpanContext = parentSpan.spanContext()
logger.info('Found active parent span for AI SDK', {
parentSpanId: parentSpanContext.spanId,
parentTraceId: parentSpanContext.traceId,
topicId: effectiveTopicId,
requestId: context.requestId,
modelName: modelName
})
} else {
logger.warn('No active parent span found in SpanManagerService', {
topicId: effectiveTopicId,
requestId: context.requestId,
modelId: context.modelId,
modelName: modelName,
providerId: context.providerId,
// 更详细的调试信息
searchedModelName: modelName,
contextModelId: context.modelId,
isAnalyzing: context.isAnalyzing
})
}
} catch (error) {
logger.error('Error getting current span from SpanManagerService', error as Error, {
topicId: effectiveTopicId,
requestId: context.requestId,
modelName: modelName
})
}
} else {
logger.debug('No topicId provided, skipping parent span lookup', {
requestId: context.requestId,
contextTopicId: context.topicId,
configTopicId: topicId,
modelName: modelName
})
}
// 创建适配器包装的 tracer传入获取到的父 SpanContext
const adapterTracer = new AdapterTracer(originalTracer, effectiveTopicId, modelName, parentSpanContext)
// 注入 AI SDK telemetry 配置
const telemetryConfig = {
isEnabled: true,
recordInputs,
recordOutputs,
tracer: adapterTracer, // 使用包装后的 tracer
functionId: `ai-request-${context.requestId}`,
metadata: {
providerId: context.providerId,
modelId: context.modelId,
topicId: effectiveTopicId,
requestId: context.requestId,
modelName: modelName,
// 确保topicId也作为标准属性传递
'trace.topicId': effectiveTopicId,
'trace.modelName': modelName,
// 添加父span信息用于调试
parentSpanId: parentSpanContext?.spanId,
parentTraceId: parentSpanContext?.traceId
}
}
// 如果有父span尝试在telemetry配置中设置父上下文
if (parentSpan) {
try {
// 设置活跃上下文,确保 AI SDK spans 在正确的 trace 上下文中创建
const activeContext = trace.setSpan(otelContext.active(), parentSpan)
// 更新全局上下文
otelContext.with(activeContext, () => {
logger.debug('Updated active context with parent span')
})
logger.info('Set parent context for AI SDK spans', {
parentSpanId: parentSpanContext?.spanId,
parentTraceId: parentSpanContext?.traceId,
hasActiveContext: !!activeContext,
hasParentSpan: !!parentSpan
})
} catch (error) {
logger.warn('Failed to set parent context in telemetry config', error as Error)
}
}
logger.info('Injecting AI SDK telemetry config with adapter', {
requestId: context.requestId,
topicId: effectiveTopicId,
modelId: context.modelId,
modelName: modelName,
hasParentSpan: !!parentSpan,
parentSpanId: parentSpanContext?.spanId,
parentTraceId: parentSpanContext?.traceId,
functionId: telemetryConfig.functionId,
hasTracer: !!telemetryConfig.tracer,
tracerType: telemetryConfig.tracer?.constructor?.name || 'unknown'
})
return {
...params,
experimental_telemetry: telemetryConfig
}
}
})
}
// 默认导出便于使用
export default createTelemetryPlugin

View File

@ -1,3 +1,4 @@
import { REFERENCE_PROMPT } from '@renderer/config/prompts'
import { processKnowledgeSearch } from '@renderer/services/KnowledgeService'
import type { Assistant, KnowledgeReference } from '@renderer/types'
import { ExtractResults, KnowledgeExtractResults } from '@renderer/utils/extract'
@ -12,6 +13,7 @@ import { z } from 'zod'
export const knowledgeSearchTool = (
assistant: Assistant,
extractedKeywords: KnowledgeExtractResults,
topicId: string,
userMessage?: string
) => {
return tool({
@ -21,7 +23,8 @@ export const knowledgeSearchTool = (
Pre-extracted search queries: "${extractedKeywords.question.join(', ')}"
Rewritten query: "${extractedKeywords.rewrite}"
This tool searches your knowledge base for relevant documents and returns results for easy reference.
This tool searches for relevant information and formats results for easy citation. The returned sources should be cited using [1], [2], etc. format in your response.
Call this tool to execute the search. You can optionally provide additional context to refine the search.`,
inputSchema: z.object({
@ -40,7 +43,13 @@ Call this tool to execute the search. You can optionally provide additional cont
// 检查是否有知识库
if (!hasKnowledgeBase) {
return []
return {
summary: 'No knowledge base configured for this assistant.',
knowledgeReferences: [],
sources: '',
instructions: '',
rawResults: []
}
}
let finalQueries = [...extractedKeywords.question]
@ -59,7 +68,13 @@ Call this tool to execute the search. You can optionally provide additional cont
// 检查是否需要搜索
if (finalQueries[0] === 'not_needed') {
return []
return {
summary: 'No search needed based on the query analysis.',
knowledgeReferences: [],
sources: '',
instructions: '',
rawResults: []
}
}
// 构建搜索条件
@ -89,21 +104,42 @@ Call this tool to execute the search. You can optionally provide additional cont
console.log('Knowledge search extractResults:', extractResults)
// 执行知识库搜索
const knowledgeReferences = await processKnowledgeSearch(extractResults, knowledgeBaseIds)
// 返回结果数组
return knowledgeReferences.map((ref: KnowledgeReference) => ({
const knowledgeReferences = await processKnowledgeSearch(extractResults, knowledgeBaseIds, topicId)
const knowledgeReferencesData = knowledgeReferences.map((ref: KnowledgeReference) => ({
id: ref.id,
content: ref.content,
sourceUrl: ref.sourceUrl,
type: ref.type,
file: ref.file
}))
const referenceContent = `\`\`\`json\n${JSON.stringify(knowledgeReferencesData, null, 2)}\n\`\`\``
const fullInstructions = REFERENCE_PROMPT.replace(
'{question}',
"Based on the knowledge references, please answer the user's question with proper citations."
).replace('{references}', referenceContent)
// 返回结果
return {
summary: `Found ${knowledgeReferencesData.length} relevant sources. Use [number] format to cite specific information.`,
knowledgeReferences: knowledgeReferencesData,
// sources: citationData
// .map((source) => `[${source.number}] ${source.title}\n${source.content}\nURL: ${source.url}`)
// .join('\n\n'),
instructions: fullInstructions
// rawResults: citationData
}
} catch (error) {
console.error('🔍 [KnowledgeSearchTool] Search failed:', error)
// 返回空数组而不是抛出错误,避免中断对话流程
return []
// 返回空对象而不是抛出错误,避免中断对话流程
return {
summary: `Search failed: ${error instanceof Error ? error.message : 'Unknown error'}`,
knowledgeReferences: [],
instructions: ''
// rawResults: []
}
}
}
})

View File

@ -0,0 +1,655 @@
/**
* AI SDK Span Adapter
*
* AI SDK telemetry SpanEntity
* AI SDK ai.xxx ai.xxx.xxx
*/
import { loggerService } from '@logger'
import { SpanEntity, TokenUsage } from '@mcp-trace/trace-core'
import { Span, SpanKind, SpanStatusCode } from '@opentelemetry/api'
const logger = loggerService.withContext('AiSdkSpanAdapter')
export interface AiSdkSpanData {
span: Span
topicId?: string
modelName?: string
}
// 扩展接口用于访问span的内部数据
interface SpanWithInternals extends Span {
_spanProcessor?: any
_attributes?: Record<string, any>
_events?: any[]
name?: string
startTime?: [number, number]
endTime?: [number, number] | null
status?: { code: SpanStatusCode; message?: string }
kind?: SpanKind
ended?: boolean
parentSpanId?: string
links?: any[]
}
export class AiSdkSpanAdapter {
/**
* AI SDK span SpanEntity
*/
static convertToSpanEntity(spanData: AiSdkSpanData): SpanEntity {
const { span, topicId, modelName } = spanData
const spanContext = span.spanContext()
// 尝试从不同方式获取span数据
const spanWithInternals = span as SpanWithInternals
let attributes: Record<string, any> = {}
let events: any[] = []
let spanName = 'unknown'
let spanStatus = { code: SpanStatusCode.UNSET }
let spanKind = SpanKind.INTERNAL
let startTime: [number, number] = [0, 0]
let endTime: [number, number] | null = null
let ended = false
let parentSpanId = ''
let links: any[] = []
// 详细记录span的结构信息用于调试
logger.debug('Debugging span structure', {
hasInternalAttributes: !!spanWithInternals._attributes,
hasGetAttributes: typeof (span as any).getAttributes === 'function',
spanKeys: Object.keys(span),
spanInternalKeys: Object.keys(spanWithInternals),
spanContext: span.spanContext(),
// 尝试获取所有可能的属性路径
attributesPath1: spanWithInternals._attributes,
attributesPath2: (span as any).attributes,
attributesPath3: (span as any)._spanData?.attributes,
attributesPath4: (span as any).resource?.attributes
})
// 尝试多种方式获取attributes
if (spanWithInternals._attributes) {
attributes = spanWithInternals._attributes
logger.debug('Found attributes via _attributes', { attributeCount: Object.keys(attributes).length })
} else if (typeof (span as any).getAttributes === 'function') {
attributes = (span as any).getAttributes()
logger.debug('Found attributes via getAttributes()', { attributeCount: Object.keys(attributes).length })
} else if ((span as any).attributes) {
attributes = (span as any).attributes
logger.debug('Found attributes via direct attributes property', {
attributeCount: Object.keys(attributes).length
})
} else if ((span as any)._spanData?.attributes) {
attributes = (span as any)._spanData.attributes
logger.debug('Found attributes via _spanData.attributes', { attributeCount: Object.keys(attributes).length })
} else {
// 尝试从span的其他属性获取
logger.warn('无法获取span attributes尝试备用方法', {
availableKeys: Object.keys(span),
spanType: span.constructor.name
})
}
// 获取其他属性
if (spanWithInternals._events) {
events = spanWithInternals._events
}
if (spanWithInternals.name) {
spanName = spanWithInternals.name
}
if (spanWithInternals.status) {
spanStatus = spanWithInternals.status
}
if (spanWithInternals.kind !== undefined) {
spanKind = spanWithInternals.kind
}
if (spanWithInternals.startTime) {
startTime = spanWithInternals.startTime
}
if (spanWithInternals.endTime) {
endTime = spanWithInternals.endTime
}
if (spanWithInternals.ended !== undefined) {
ended = spanWithInternals.ended
}
if (spanWithInternals.parentSpanId) {
parentSpanId = spanWithInternals.parentSpanId
}
// 兜底:尝试从 attributes 中读取我们注入的父信息
if (!parentSpanId && attributes['trace.parentSpanId']) {
parentSpanId = attributes['trace.parentSpanId']
}
if (spanWithInternals.links) {
links = spanWithInternals.links
}
// 提取 AI SDK 特有的数据
const tokenUsage = this.extractTokenUsage(attributes)
const { inputs, outputs } = this.extractInputsOutputs(attributes)
const formattedSpanName = this.formatSpanName(spanName)
const spanTag = this.extractSpanTag(spanName, attributes)
const typeSpecificData = this.extractSpanTypeSpecificData(attributes)
// 详细记录转换过程
const operationId = attributes['ai.operationId']
logger.info('Converting AI SDK span to SpanEntity', {
spanName: spanName,
operationId,
spanTag,
hasTokenUsage: !!tokenUsage,
hasInputs: !!inputs,
hasOutputs: !!outputs,
hasTypeSpecificData: Object.keys(typeSpecificData).length > 0,
attributesCount: Object.keys(attributes).length,
topicId,
modelName,
spanId: spanContext.spanId,
traceId: spanContext.traceId
})
if (tokenUsage) {
logger.info('Token usage data found', {
spanName: spanName,
operationId,
usage: tokenUsage,
spanId: spanContext.spanId
})
}
if (inputs || outputs) {
logger.info('Input/Output data extracted', {
spanName: spanName,
operationId,
hasInputs: !!inputs,
hasOutputs: !!outputs,
inputKeys: inputs ? Object.keys(inputs) : [],
outputKeys: outputs ? Object.keys(outputs) : [],
spanId: spanContext.spanId
})
}
if (Object.keys(typeSpecificData).length > 0) {
logger.info('Type-specific data extracted', {
spanName: spanName,
operationId,
typeSpecificKeys: Object.keys(typeSpecificData),
spanId: spanContext.spanId
})
}
// 转换为 SpanEntity 格式
const spanEntity: SpanEntity = {
id: spanContext.spanId,
name: formattedSpanName,
parentId: parentSpanId,
traceId: spanContext.traceId,
status: this.convertSpanStatus(spanStatus.code),
kind: this.convertSpanKind(spanKind),
attributes: {
...this.filterRelevantAttributes(attributes),
...typeSpecificData,
inputs: inputs,
outputs: outputs,
tags: spanTag,
modelName: modelName || this.extractModelFromAttributes(attributes) || ''
},
isEnd: ended,
events: events,
startTime: this.convertTimestamp(startTime),
endTime: endTime ? this.convertTimestamp(endTime) : null,
links: links,
topicId: topicId,
usage: tokenUsage,
modelName: modelName || this.extractModelFromAttributes(attributes)
}
logger.info('AI SDK span successfully converted to SpanEntity', {
spanName: spanName,
operationId,
spanId: spanContext.spanId,
traceId: spanContext.traceId,
topicId,
entityId: spanEntity.id,
hasUsage: !!spanEntity.usage,
status: spanEntity.status,
tags: spanEntity.attributes?.tags
})
return spanEntity
}
/**
* AI SDK attributes token usage
*
* - AI SDK 标准格式: ai.usage.completionTokens, ai.usage.promptTokens
* - 完整usage对象格式: ai.usage (JSON字符串或对象)
*/
private static extractTokenUsage(attributes: Record<string, any>): TokenUsage | undefined {
logger.debug('Extracting token usage from attributes', {
attributeKeys: Object.keys(attributes),
usageRelatedKeys: Object.keys(attributes).filter((key) => key.includes('usage') || key.includes('token')),
fullAttributes: attributes
})
const inputsTokenKeys = [
// base span
'ai.usage.promptTokens',
// LLM span
'gen_ai.usage.input_tokens'
]
const outputTokenKeys = [
// base span
'ai.usage.completionTokens',
// LLM span
'gen_ai.usage.output_tokens'
]
const completionTokens = attributes[inputsTokenKeys.find((key) => attributes[key]) || '']
const promptTokens = attributes[outputTokenKeys.find((key) => attributes[key]) || '']
if (completionTokens !== undefined || promptTokens !== undefined) {
const usage: TokenUsage = {
prompt_tokens: Number(promptTokens) || 0,
completion_tokens: Number(completionTokens) || 0,
total_tokens: (Number(promptTokens) || 0) + (Number(completionTokens) || 0)
}
logger.debug('Extracted token usage from AI SDK standard attributes', {
usage,
foundStandardAttributes: {
'ai.usage.completionTokens': completionTokens,
'ai.usage.promptTokens': promptTokens
}
})
return usage
}
// 对于不包含token usage的spans如tool calls这是正常的
logger.debug('No token usage found in span attributes (normal for tool calls)', {
availableKeys: Object.keys(attributes),
usageKeys: Object.keys(attributes).filter((key) => key.includes('usage') || key.includes('token'))
})
return undefined
}
/**
* AI SDK attributes inputs outputs
* AI SDK文档按不同span类型精确映射
*/
private static extractInputsOutputs(attributes: Record<string, any>): { inputs?: any; outputs?: any } {
const operationId = attributes['ai.operationId']
let inputs: any = undefined
let outputs: any = undefined
logger.debug('Extracting inputs/outputs by operation type', {
operationId,
availableKeys: Object.keys(attributes).filter(
(key) => key.includes('prompt') || key.includes('response') || key.includes('toolCall')
)
})
// 根据AI SDK文档按操作类型提取数据
switch (operationId) {
case 'ai.generateText':
case 'ai.streamText':
// 顶层LLM spans: ai.prompt 包含输入
inputs = {
prompt: this.parseAttributeValue(attributes['ai.prompt'])
}
outputs = this.extractLLMOutputs(attributes)
break
case 'ai.generateText.doGenerate':
case 'ai.streamText.doStream':
// Provider spans: ai.prompt.messages 包含详细输入
inputs = {
messages: this.parseAttributeValue(attributes['ai.prompt.messages']),
tools: this.parseAttributeValue(attributes['ai.prompt.tools']),
toolChoice: this.parseAttributeValue(attributes['ai.prompt.toolChoice'])
}
outputs = this.extractProviderOutputs(attributes)
break
case 'ai.toolCall':
// Tool call spans: 工具参数和结果
inputs = {
toolName: attributes['ai.toolCall.name'],
toolId: attributes['ai.toolCall.id'],
args: this.parseAttributeValue(attributes['ai.toolCall.args'])
}
outputs = {
result: this.parseAttributeValue(attributes['ai.toolCall.result'])
}
break
default:
// 回退到通用逻辑
inputs = this.extractGenericInputs(attributes)
outputs = this.extractGenericOutputs(attributes)
break
}
logger.debug('Extracted inputs/outputs', {
operationId,
hasInputs: !!inputs,
hasOutputs: !!outputs,
inputKeys: inputs ? Object.keys(inputs) : [],
outputKeys: outputs ? Object.keys(outputs) : []
})
return { inputs, outputs }
}
/**
* LLM顶层spans的输出
*/
private static extractLLMOutputs(attributes: Record<string, any>): any {
const outputs: any = {}
if (attributes['ai.response.text']) {
outputs.text = attributes['ai.response.text']
}
if (attributes['ai.response.toolCalls']) {
outputs.toolCalls = this.parseAttributeValue(attributes['ai.response.toolCalls'])
}
if (attributes['ai.response.finishReason']) {
outputs.finishReason = attributes['ai.response.finishReason']
}
if (attributes['ai.settings.maxOutputTokens']) {
outputs.maxOutputTokens = attributes['ai.settings.maxOutputTokens']
}
return Object.keys(outputs).length > 0 ? outputs : undefined
}
/**
* Provider spans的输出
*/
private static extractProviderOutputs(attributes: Record<string, any>): any {
const outputs: any = {}
if (attributes['ai.response.text']) {
outputs.text = attributes['ai.response.text']
}
if (attributes['ai.response.toolCalls']) {
outputs.toolCalls = this.parseAttributeValue(attributes['ai.response.toolCalls'])
}
if (attributes['ai.response.finishReason']) {
outputs.finishReason = attributes['ai.response.finishReason']
}
// doStream特有的性能指标
if (attributes['ai.response.msToFirstChunk']) {
outputs.msToFirstChunk = attributes['ai.response.msToFirstChunk']
}
if (attributes['ai.response.msToFinish']) {
outputs.msToFinish = attributes['ai.response.msToFinish']
}
if (attributes['ai.response.avgCompletionTokensPerSecond']) {
outputs.avgCompletionTokensPerSecond = attributes['ai.response.avgCompletionTokensPerSecond']
}
return Object.keys(outputs).length > 0 ? outputs : undefined
}
/**
* 退
*/
private static extractGenericInputs(attributes: Record<string, any>): any {
const inputKeys = ['ai.prompt', 'ai.prompt.messages', 'ai.request', 'inputs']
for (const key of inputKeys) {
if (attributes[key]) {
return this.parseAttributeValue(attributes[key])
}
}
return undefined
}
/**
* 退
*/
private static extractGenericOutputs(attributes: Record<string, any>): any {
const outputKeys = ['ai.response.text', 'ai.response', 'ai.output', 'outputs']
for (const key of outputKeys) {
if (attributes[key]) {
return this.parseAttributeValue(attributes[key])
}
}
return undefined
}
/**
* JSON
*/
private static parseAttributeValue(value: any): any {
if (typeof value === 'string') {
try {
return JSON.parse(value)
} catch (e) {
return value
}
}
return value
}
/**
* span AI SDK
*/
private static formatSpanName(name: string): string {
// AI SDK 的 span 名称可能是 ai.generateText, ai.streamText.doStream 等
// 保持原始名称,但可以添加一些格式化逻辑
if (name.startsWith('ai.')) {
return name
}
return name
}
/**
* AI SDK operationId中提取精确的span标签
*/
private static extractSpanTag(name: string, attributes: Record<string, any>): string {
const operationId = attributes['ai.operationId']
logger.debug('Extracting span tag', {
spanName: name,
operationId,
operationName: attributes['operation.name']
})
// 根据AI SDK文档的operationId精确分类
switch (operationId) {
case 'ai.generateText':
return 'LLM-GENERATE'
case 'ai.streamText':
return 'LLM-STREAM'
case 'ai.generateText.doGenerate':
return 'PROVIDER-GENERATE'
case 'ai.streamText.doStream':
return 'PROVIDER-STREAM'
case 'ai.toolCall':
return 'TOOL-CALL'
case 'ai.generateImage':
return 'IMAGE'
case 'ai.embed':
return 'EMBEDDING'
default:
// 回退逻辑基于span名称
if (name.includes('generateText') || name.includes('streamText')) {
return 'LLM'
}
if (name.includes('generateImage')) {
return 'IMAGE'
}
if (name.includes('embed')) {
return 'EMBEDDING'
}
if (name.includes('toolCall')) {
return 'TOOL'
}
// 最终回退
return attributes['ai.operationType'] || attributes['operation.type'] || 'AI_SDK'
}
}
/**
* span类型提取特定的额外数据
*/
private static extractSpanTypeSpecificData(attributes: Record<string, any>): Record<string, any> {
const operationId = attributes['ai.operationId']
const specificData: Record<string, any> = {}
switch (operationId) {
case 'ai.generateText':
case 'ai.streamText':
// LLM顶层spans的特定数据
if (attributes['ai.settings.maxOutputTokens']) {
specificData.maxOutputTokens = attributes['ai.settings.maxOutputTokens']
}
if (attributes['resource.name']) {
specificData.functionId = attributes['resource.name']
}
break
case 'ai.generateText.doGenerate':
case 'ai.streamText.doStream':
// Provider spans的特定数据
if (attributes['ai.model.id']) {
specificData.providerId = attributes['ai.model.provider'] || 'unknown'
specificData.modelId = attributes['ai.model.id']
}
// doStream特有的性能数据
if (operationId === 'ai.streamText.doStream') {
if (attributes['ai.response.msToFirstChunk']) {
specificData.msToFirstChunk = attributes['ai.response.msToFirstChunk']
}
if (attributes['ai.response.msToFinish']) {
specificData.msToFinish = attributes['ai.response.msToFinish']
}
if (attributes['ai.response.avgCompletionTokensPerSecond']) {
specificData.tokensPerSecond = attributes['ai.response.avgCompletionTokensPerSecond']
}
}
break
case 'ai.toolCall':
// Tool call spans的特定数据
specificData.toolName = attributes['ai.toolCall.name']
specificData.toolId = attributes['ai.toolCall.id']
// 根据文档tool call可能有不同的操作类型
if (attributes['operation.name']) {
specificData.operationName = attributes['operation.name']
}
break
default:
// 通用的AI SDK属性
if (attributes['ai.telemetry.functionId']) {
specificData.telemetryFunctionId = attributes['ai.telemetry.functionId']
}
if (attributes['ai.telemetry.metadata']) {
specificData.telemetryMetadata = this.parseAttributeValue(attributes['ai.telemetry.metadata'])
}
break
}
// 添加通用的操作标识
if (operationId) {
specificData.operationType = operationId
}
if (attributes['operation.name']) {
specificData.operationName = attributes['operation.name']
}
logger.debug('Extracted type-specific data', {
operationId,
specificDataKeys: Object.keys(specificData),
specificData
})
return specificData
}
/**
*
*/
private static extractModelFromAttributes(attributes: Record<string, any>): string | undefined {
return (
attributes['ai.model.id'] ||
attributes['ai.model'] ||
attributes['model.id'] ||
attributes['model'] ||
attributes['modelName']
)
}
/**
*
*/
private static filterRelevantAttributes(attributes: Record<string, any>): Record<string, any> {
const filtered: Record<string, any> = {}
// 保留有用的属性,过滤掉已经单独处理的属性
const excludeKeys = ['ai.usage', 'ai.prompt', 'ai.response', 'ai.input', 'ai.output', 'inputs', 'outputs']
Object.entries(attributes).forEach(([key, value]) => {
if (!excludeKeys.includes(key)) {
filtered[key] = value
}
})
return filtered
}
/**
* span
*/
private static convertSpanStatus(statusCode?: SpanStatusCode): string {
switch (statusCode) {
case SpanStatusCode.OK:
return 'OK'
case SpanStatusCode.ERROR:
return 'ERROR'
case SpanStatusCode.UNSET:
default:
return 'UNSET'
}
}
/**
* span
*/
private static convertSpanKind(kind?: SpanKind): string {
switch (kind) {
case SpanKind.INTERNAL:
return 'INTERNAL'
case SpanKind.CLIENT:
return 'CLIENT'
case SpanKind.SERVER:
return 'SERVER'
case SpanKind.PRODUCER:
return 'PRODUCER'
case SpanKind.CONSUMER:
return 'CONSUMER'
default:
return 'INTERNAL'
}
}
/**
*
*/
private static convertTimestamp(timestamp: [number, number] | number): number {
if (Array.isArray(timestamp)) {
// OpenTelemetry 高精度时间戳 [seconds, nanoseconds]
return timestamp[0] * 1000 + timestamp[1] / 1000000
}
return timestamp
}
}

View File

@ -15,7 +15,9 @@ const MessageContent: React.FC<Props> = ({ message }) => {
<>
{!isEmpty(message.mentions) && (
<Flex gap="8px" wrap style={{ marginBottom: '10px' }}>
{message.mentions?.map((model) => <MentionTag key={getModelUniqId(model)}>{'@' + model.name}</MentionTag>)}
{message.mentions?.map((model) => (
<MentionTag key={getModelUniqId(model)}>{'@' + model.name}</MentionTag>
))}
</Flex>
)}
<MessageBlockRenderer blocks={message.blocks} message={message} />

View File

@ -381,7 +381,8 @@ export async function fetchChatCompletion({
messages,
assistant,
options,
onChunkReceived
onChunkReceived,
topicId
}: {
messages: StreamTextParams['messages']
assistant: Assistant
@ -391,10 +392,18 @@ export async function fetchChatCompletion({
headers?: Record<string, string>
}
onChunkReceived: (chunk: Chunk) => void
topicId?: string // 添加 topicId 参数
// TODO
// onChunkStatus: (status: 'searching' | 'processing' | 'success' | 'error') => void
}) {
console.log('fetchChatCompletion', messages, assistant)
logger.info('fetchChatCompletion called with detailed context', {
messageCount: messages?.length || 0,
assistantId: assistant.id,
topicId,
hasTopicId: !!topicId,
modelId: assistant.model?.id,
modelName: assistant.model?.name
})
const AI = new AiProviderNew(assistant.model || getDefaultModel())
const provider = AI.getActualProvider()
@ -446,15 +455,57 @@ export async function fetchChatCompletion({
isImageGenerationEndpoint: isDedicatedImageGenerationModel(assistant.model || getDefaultModel()),
enableWebSearch: capabilities.enableWebSearch,
enableGenerateImage: capabilities.enableGenerateImage,
mcpTools,
assistant
mcpTools
}
// if (capabilities.enableWebSearch) {
// onChunkReceived({ type: ChunkType.LLM_WEB_SEARCH_IN_PROGRESS })
// }
// --- Call AI Completions ---
onChunkReceived({ type: ChunkType.LLM_RESPONSE_CREATED })
await AI.completions(modelId, aiSdkParams, middlewareConfig)
// 在 AI SDK 调用时设置正确的 OpenTelemetry 上下文
if (topicId) {
logger.info('Attempting to set OpenTelemetry context', { topicId })
const { currentSpan } = await import('@renderer/services/SpanManagerService')
const parentSpan = currentSpan(topicId, modelId)
logger.info('Parent span lookup result', {
topicId,
hasParentSpan: !!parentSpan,
parentSpanId: parentSpan?.spanContext().spanId,
parentTraceId: parentSpan?.spanContext().traceId
})
if (parentSpan) {
logger.info('Found parent span, using completionsForTrace for proper span hierarchy', {
topicId,
parentSpanId: parentSpan.spanContext().spanId,
parentTraceId: parentSpan.spanContext().traceId
})
} else {
logger.warn('No parent span found for topicId, using completionsForTrace anyway', { topicId })
}
// 使用带trace支持的completions方法它会自动创建子span并关联到父span
await AI.completionsForTrace(modelId, aiSdkParams, {
...middlewareConfig,
assistant,
topicId,
callType: 'chat'
})
} else {
logger.warn('No topicId provided, using regular completions')
// 没有topicId时禁用telemetry以避免警告
const configWithoutTelemetry = {
...middlewareConfig,
topicId: undefined // 确保telemetryPlugin不会尝试查找span
}
await AI.completions(modelId, aiSdkParams, {
...configWithoutTelemetry,
assistant,
callType: 'chat'
})
}
// await AI.completions(
// {
@ -640,7 +691,7 @@ export async function fetchMessagesSummary({ messages, assistant }: { messages:
return null
}
const AI = new AiProvider(provider)
const AI = new AiProviderNew(model)
const topicId = messages?.find((message) => message.topicId)?.topicId || undefined
@ -665,28 +716,63 @@ export async function fetchMessagesSummary({ messages, assistant }: { messages:
})
const conversation = JSON.stringify(structredMessages)
// 复制 assistant 对象,并强制关闭思考预算
// // 复制 assistant 对象,并强制关闭思考预算
// const summaryAssistant = {
// ...assistant,
// settings: {
// ...assistant.settings,
// reasoning_effort: undefined,
// qwenThinkMode: false
// }
// }
const summaryAssistant = {
...assistant,
settings: {
...assistant.settings,
reasoning_effort: undefined,
qwenThinkMode: false
}
},
prompt,
model
}
// const params: CompletionsParams = {
// callType: 'summary',
// messages: conversation,
// assistant: { ...summaryAssistant, prompt, model },
// maxTokens: 1000,
// streamOutput: false,
// topicId,
// enableReasoning: false
// }
const llmMessages = {
system: prompt,
prompt: conversation
}
const params: CompletionsParams = {
callType: 'summary',
messages: conversation,
assistant: { ...summaryAssistant, prompt, model },
maxTokens: 1000,
// const llmMessages = await ConversationService.prepareMessagesForModel(messages, summaryAssistant)
// 使用 transformParameters 模块构建参数
// const { params: aiSdkParams, modelId } = await buildStreamTextParams(llmMessages, summaryAssistant, provider)
const middlewareConfig: AiSdkMiddlewareConfig = {
streamOutput: false,
topicId,
enableReasoning: false
enableReasoning: false,
isPromptToolUse: false,
isSupportedToolUse: false,
isImageGenerationEndpoint: false,
enableWebSearch: false,
enableGenerateImage: false,
mcpTools: []
}
console.log('fetchMessagesSummary', '开始总结')
try {
const { getText } = await AI.completionsForTrace(params)
const { getText } = await AI.completionsForTrace(model.id, llmMessages, {
...middlewareConfig,
assistant: summaryAssistant,
topicId,
callType: 'summary'
})
console.log('fetchMessagesSummary', '总结完成', getText())
const text = getText()
return removeSpecialCharactersForTopicName(text) || null
} catch (error: any) {

View File

@ -15,6 +15,7 @@ export interface OrchestrationRequest {
timeout?: number
headers?: Record<string, string>
}
topicId?: string // 添加 topicId 用于 trace
}
/**
@ -45,7 +46,8 @@ export class OrchestrationService {
messages: llmMessages,
assistant: assistant,
options: request.options,
onChunkReceived
onChunkReceived,
topicId: request.topicId
})
} catch (error: any) {
onChunkReceived({ type: ChunkType.ERROR, error })
@ -67,7 +69,8 @@ export async function transformMessagesAndFetch(
messages: llmMessages,
assistant: assistant,
options: request.options,
onChunkReceived
onChunkReceived,
topicId: request.topicId
})
} catch (error: any) {
onChunkReceived({ type: ChunkType.ERROR, error })

View File

@ -53,7 +53,7 @@ export function createStreamProcessor(callbacks: StreamProcessorCallbacks = {})
return (chunk: Chunk) => {
try {
const data = chunk
logger.debug('data: ', data)
// logger.debug('data: ', data)
switch (data.type) {
case ChunkType.BLOCK_COMPLETE: {
if (callbacks.onComplete) callbacks.onComplete(AssistantMessageStatus.SUCCESS, data?.response)

View File

@ -1,6 +1,7 @@
import { loggerService } from '@logger'
import { convertSpanToSpanEntity, FunctionSpanExporter, FunctionSpanProcessor } from '@mcp-trace/trace-core'
import { WebTracer } from '@mcp-trace/trace-web'
import { trace } from '@opentelemetry/api'
import { ReadableSpan } from '@opentelemetry/sdk-trace-base'
const logger = loggerService.withContext('WebTraceService')
@ -33,6 +34,10 @@ class WebTraceService {
processor
)
}
getTracer() {
return trace.getTracer(TRACER_NAME, '1.0.0')
}
}
export const webTraceService = new WebTraceService()

View File

@ -896,6 +896,7 @@ const fetchAndProcessAssistantResponseImpl = async (
{
messages: messagesForContext,
assistant,
topicId,
options: {
signal: abortController.signal,
timeout: 30000