refactor(aiCore): enhance completions methods with developer mode support

- Introduced a check for developer mode in the completions methods to enable tracing capabilities when a topic ID is provided.
- Updated the method signatures and internal calls to streamline the handling of completions with and without tracing.
- Improved code organization by making the completionsForTrace method private and renaming it for clarity.
This commit is contained in:
MyPrototypeWhat 2025-08-29 11:14:38 +08:00
parent cf777ba62b
commit 1b86997f14
2 changed files with 33 additions and 23 deletions

View File

@ -11,6 +11,7 @@ import { createExecutor, generateImage } from '@cherrystudio/ai-core'
import { createAndRegisterProvider } from '@cherrystudio/ai-core/provider' import { createAndRegisterProvider } from '@cherrystudio/ai-core/provider'
import { loggerService } from '@logger' import { loggerService } from '@logger'
import { isNotSupportedImageSizeModel } from '@renderer/config/models' import { isNotSupportedImageSizeModel } from '@renderer/config/models'
import { getEnableDeveloperMode } from '@renderer/hooks/useSettings'
import { addSpan, endSpan } from '@renderer/services/SpanManagerService' import { addSpan, endSpan } from '@renderer/services/SpanManagerService'
import { StartSpanParams } from '@renderer/trace/types/ModelSpanEntity' import { StartSpanParams } from '@renderer/trace/types/ModelSpanEntity'
import type { Assistant, GenerateImageParams, Model, Provider } from '@renderer/types' import type { Assistant, GenerateImageParams, Model, Provider } from '@renderer/types'
@ -52,6 +53,28 @@ export default class ModernAiProvider {
topicId?: string topicId?: string
callType: string callType: string
} }
) {
if (config.topicId && getEnableDeveloperMode()) {
// TypeScript类型窄化确保topicId是string类型
const traceConfig = {
...config,
topicId: config.topicId
}
return await this._completionsForTrace(modelId, params, traceConfig)
} else {
return await this._completions(modelId, params, config)
}
}
private async _completions(
modelId: string,
params: StreamTextParams,
config: AiSdkMiddlewareConfig & {
assistant: Assistant
// topicId for tracing
topicId?: string
callType: string
}
): Promise<CompletionsResult> { ): Promise<CompletionsResult> {
// 初始化 provider 到全局管理器 // 初始化 provider 到全局管理器
try { try {
@ -79,7 +102,7 @@ export default class ModernAiProvider {
* trace支持的completions方法 * trace支持的completions方法
* legacy的completionsForTraceAI SDK spans在正确的trace上下文中 * legacy的completionsForTraceAI SDK spans在正确的trace上下文中
*/ */
public async completionsForTrace( private async _completionsForTrace(
modelId: string, modelId: string,
params: StreamTextParams, params: StreamTextParams,
config: AiSdkMiddlewareConfig & { config: AiSdkMiddlewareConfig & {
@ -114,7 +137,7 @@ export default class ModernAiProvider {
modelId, modelId,
traceName traceName
}) })
return await this.completions(modelId, params, config) return await this._completions(modelId, params, config)
} }
try { try {
@ -126,7 +149,7 @@ export default class ModernAiProvider {
parentSpanCreated: true parentSpanCreated: true
}) })
const result = await this.completions(modelId, params, config) const result = await this._completions(modelId, params, config)
logger.info('Completions finished, ending parent span', { logger.info('Completions finished, ending parent span', {
spanId: span.spanContext().spanId, spanId: span.spanContext().spanId,
@ -172,7 +195,6 @@ export default class ModernAiProvider {
params: StreamTextParams, params: StreamTextParams,
config: AiSdkMiddlewareConfig & { config: AiSdkMiddlewareConfig & {
assistant: Assistant assistant: Assistant
// topicId for tracing
topicId?: string topicId?: string
callType: string callType: string
} }

View File

@ -10,7 +10,6 @@ import type { StreamTextParams } from '@renderer/aiCore/types'
import { isDedicatedImageGenerationModel, isEmbeddingModel, isQwenMTModel } from '@renderer/config/models' import { isDedicatedImageGenerationModel, isEmbeddingModel, isQwenMTModel } from '@renderer/config/models'
import { LANG_DETECT_PROMPT } from '@renderer/config/prompts' import { LANG_DETECT_PROMPT } from '@renderer/config/prompts'
import { getStoreSetting } from '@renderer/hooks/useSettings' import { getStoreSetting } from '@renderer/hooks/useSettings'
import { getEnableDeveloperMode } from '@renderer/hooks/useSettings'
import i18n from '@renderer/i18n' import i18n from '@renderer/i18n'
import store from '@renderer/store' import store from '@renderer/store'
import { Assistant, MCPServer, MCPTool, Model, Provider } from '@renderer/types' import { Assistant, MCPServer, MCPTool, Model, Provider } from '@renderer/types'
@ -138,23 +137,12 @@ export async function fetchChatCompletion({
// --- Call AI Completions --- // --- Call AI Completions ---
onChunkReceived({ type: ChunkType.LLM_RESPONSE_CREATED }) onChunkReceived({ type: ChunkType.LLM_RESPONSE_CREATED })
const enableDeveloperMode = getEnableDeveloperMode() await AI.completions(modelId, aiSdkParams, {
// 在 AI SDK 调用时设置正确的 OpenTelemetry 上下文 ...middlewareConfig,
if (topicId && enableDeveloperMode) { assistant,
// 使用带trace支持的completions方法它会自动创建子span并关联到父span topicId,
await AI.completionsForTrace(modelId, aiSdkParams, { callType: 'chat'
...middlewareConfig, })
assistant,
topicId,
callType: 'chat'
})
} else {
await AI.completions(modelId, aiSdkParams, {
...middlewareConfig,
assistant,
callType: 'chat'
})
}
} }
interface FetchLanguageDetectionProps { interface FetchLanguageDetectionProps {
@ -311,7 +299,7 @@ export async function fetchMessagesSummary({ messages, assistant }: { messages:
await appendTrace({ topicId, traceId: messageWithTrace.traceId, model }) await appendTrace({ topicId, traceId: messageWithTrace.traceId, model })
} }
const { getText } = await AI.completionsForTrace(model.id, llmMessages, { const { getText } = await AI.completions(model.id, llmMessages, {
...middlewareConfig, ...middlewareConfig,
assistant: summaryAssistant, assistant: summaryAssistant,
topicId, topicId,