mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2025-12-28 05:11:24 +08:00
refactor(aiCore): enhance completions methods with developer mode support
- Introduced a check for developer mode in the completions methods to enable tracing capabilities when a topic ID is provided. - Updated the method signatures and internal calls to streamline the handling of completions with and without tracing. - Improved code organization by making the completionsForTrace method private and renaming it for clarity.
This commit is contained in:
parent
cf777ba62b
commit
1b86997f14
@ -11,6 +11,7 @@ import { createExecutor, generateImage } from '@cherrystudio/ai-core'
|
||||
import { createAndRegisterProvider } from '@cherrystudio/ai-core/provider'
|
||||
import { loggerService } from '@logger'
|
||||
import { isNotSupportedImageSizeModel } from '@renderer/config/models'
|
||||
import { getEnableDeveloperMode } from '@renderer/hooks/useSettings'
|
||||
import { addSpan, endSpan } from '@renderer/services/SpanManagerService'
|
||||
import { StartSpanParams } from '@renderer/trace/types/ModelSpanEntity'
|
||||
import type { Assistant, GenerateImageParams, Model, Provider } from '@renderer/types'
|
||||
@ -52,6 +53,28 @@ export default class ModernAiProvider {
|
||||
topicId?: string
|
||||
callType: string
|
||||
}
|
||||
) {
|
||||
if (config.topicId && getEnableDeveloperMode()) {
|
||||
// TypeScript类型窄化:确保topicId是string类型
|
||||
const traceConfig = {
|
||||
...config,
|
||||
topicId: config.topicId
|
||||
}
|
||||
return await this._completionsForTrace(modelId, params, traceConfig)
|
||||
} else {
|
||||
return await this._completions(modelId, params, config)
|
||||
}
|
||||
}
|
||||
|
||||
private async _completions(
|
||||
modelId: string,
|
||||
params: StreamTextParams,
|
||||
config: AiSdkMiddlewareConfig & {
|
||||
assistant: Assistant
|
||||
// topicId for tracing
|
||||
topicId?: string
|
||||
callType: string
|
||||
}
|
||||
): Promise<CompletionsResult> {
|
||||
// 初始化 provider 到全局管理器
|
||||
try {
|
||||
@ -79,7 +102,7 @@ export default class ModernAiProvider {
|
||||
* 带trace支持的completions方法
|
||||
* 类似于legacy的completionsForTrace,确保AI SDK spans在正确的trace上下文中
|
||||
*/
|
||||
public async completionsForTrace(
|
||||
private async _completionsForTrace(
|
||||
modelId: string,
|
||||
params: StreamTextParams,
|
||||
config: AiSdkMiddlewareConfig & {
|
||||
@ -114,7 +137,7 @@ export default class ModernAiProvider {
|
||||
modelId,
|
||||
traceName
|
||||
})
|
||||
return await this.completions(modelId, params, config)
|
||||
return await this._completions(modelId, params, config)
|
||||
}
|
||||
|
||||
try {
|
||||
@ -126,7 +149,7 @@ export default class ModernAiProvider {
|
||||
parentSpanCreated: true
|
||||
})
|
||||
|
||||
const result = await this.completions(modelId, params, config)
|
||||
const result = await this._completions(modelId, params, config)
|
||||
|
||||
logger.info('Completions finished, ending parent span', {
|
||||
spanId: span.spanContext().spanId,
|
||||
@ -172,7 +195,6 @@ export default class ModernAiProvider {
|
||||
params: StreamTextParams,
|
||||
config: AiSdkMiddlewareConfig & {
|
||||
assistant: Assistant
|
||||
// topicId for tracing
|
||||
topicId?: string
|
||||
callType: string
|
||||
}
|
||||
|
||||
@ -10,7 +10,6 @@ import type { StreamTextParams } from '@renderer/aiCore/types'
|
||||
import { isDedicatedImageGenerationModel, isEmbeddingModel, isQwenMTModel } from '@renderer/config/models'
|
||||
import { LANG_DETECT_PROMPT } from '@renderer/config/prompts'
|
||||
import { getStoreSetting } from '@renderer/hooks/useSettings'
|
||||
import { getEnableDeveloperMode } from '@renderer/hooks/useSettings'
|
||||
import i18n from '@renderer/i18n'
|
||||
import store from '@renderer/store'
|
||||
import { Assistant, MCPServer, MCPTool, Model, Provider } from '@renderer/types'
|
||||
@ -138,23 +137,12 @@ export async function fetchChatCompletion({
|
||||
|
||||
// --- Call AI Completions ---
|
||||
onChunkReceived({ type: ChunkType.LLM_RESPONSE_CREATED })
|
||||
const enableDeveloperMode = getEnableDeveloperMode()
|
||||
// 在 AI SDK 调用时设置正确的 OpenTelemetry 上下文
|
||||
if (topicId && enableDeveloperMode) {
|
||||
// 使用带trace支持的completions方法,它会自动创建子span并关联到父span
|
||||
await AI.completionsForTrace(modelId, aiSdkParams, {
|
||||
...middlewareConfig,
|
||||
assistant,
|
||||
topicId,
|
||||
callType: 'chat'
|
||||
})
|
||||
} else {
|
||||
await AI.completions(modelId, aiSdkParams, {
|
||||
...middlewareConfig,
|
||||
assistant,
|
||||
callType: 'chat'
|
||||
})
|
||||
}
|
||||
await AI.completions(modelId, aiSdkParams, {
|
||||
...middlewareConfig,
|
||||
assistant,
|
||||
topicId,
|
||||
callType: 'chat'
|
||||
})
|
||||
}
|
||||
|
||||
interface FetchLanguageDetectionProps {
|
||||
@ -311,7 +299,7 @@ export async function fetchMessagesSummary({ messages, assistant }: { messages:
|
||||
await appendTrace({ topicId, traceId: messageWithTrace.traceId, model })
|
||||
}
|
||||
|
||||
const { getText } = await AI.completionsForTrace(model.id, llmMessages, {
|
||||
const { getText } = await AI.completions(model.id, llmMessages, {
|
||||
...middlewareConfig,
|
||||
assistant: summaryAssistant,
|
||||
topicId,
|
||||
|
||||
Loading…
Reference in New Issue
Block a user