feat(dependencies): update ai-sdk packages and improve logging

- Upgraded `@ai-sdk/gateway` to version 1.0.8 and `@ai-sdk/provider-utils` to version 3.0.4 in yarn.lock for enhanced functionality.
- Updated `ai` dependency in package.json to version ^5.0.16 for better compatibility.
- Added logging functionality in `AiSdkToChunkAdapter` to track chunk types and improve debugging.
- Refactored plugin imports to streamline code and enhance readability.
- Removed unnecessary console logs in `searchOrchestrationPlugin` to clean up the codebase.
This commit is contained in:
MyPrototypeWhat 2025-08-19 16:03:51 +08:00
parent 179b7af9bd
commit d4da7d817d
8 changed files with 55 additions and 67 deletions

View File

@ -1,6 +1,6 @@
{
"name": "@cherrystudio/ai-core",
"version": "1.0.0-alpha.7",
"version": "1.0.0-alpha.8",
"description": "Cherry Studio AI Core - Unified AI Provider Interface Based on Vercel AI SDK",
"main": "dist/index.js",
"module": "dist/index.mjs",
@ -42,7 +42,7 @@
"@ai-sdk/provider": "2.0.0",
"@ai-sdk/provider-utils": "3.0.0",
"@ai-sdk/xai": "2.0.0",
"ai": "5.0.0",
"ai": "^5.0.16",
"zod": "^3.25.0"
},
"devDependencies": {

View File

@ -4,11 +4,14 @@
*/
import { TextStreamPart, ToolSet } from '@cherrystudio/ai-core'
import { loggerService } from '@logger'
import { BaseTool, WebSearchResults, WebSearchSource } from '@renderer/types'
import { Chunk, ChunkType } from '@renderer/types/chunk'
import { ToolCallChunkHandler } from './handleToolCallChunk'
const logger = loggerService.withContext('AiSdkToChunkAdapter')
export interface CherryStudioChunk {
type: 'text-delta' | 'text-complete' | 'tool-call' | 'tool-result' | 'finish' | 'error'
text?: string
@ -83,6 +86,7 @@ export class AiSdkToChunkAdapter {
chunk: TextStreamPart<any>,
final: { text: string; reasoningContent: string; webSearchResults: any[]; reasoningId: string }
) {
logger.info(`AI SDK chunk type: ${chunk.type}`, chunk)
switch (chunk.type) {
// === 文本相关事件 ===
case 'text-start':
@ -105,12 +109,12 @@ export class AiSdkToChunkAdapter {
final.text = ''
break
case 'reasoning-start':
if (final.reasoningId !== chunk.id) {
final.reasoningId = chunk.id
this.onChunk({
type: ChunkType.THINKING_START
})
}
// if (final.reasoningId !== chunk.id) {
final.reasoningId = chunk.id
this.onChunk({
type: ChunkType.THINKING_START
})
// }
break
case 'reasoning-delta':
final.reasoningContent += chunk.text || ''
@ -171,7 +175,7 @@ export class AiSdkToChunkAdapter {
// break
case 'finish-step': {
const { providerMetadata } = chunk
const { providerMetadata, finishReason } = chunk
// googel web search
if (providerMetadata?.google?.groundingMetadata) {
this.onChunk({
@ -182,6 +186,9 @@ export class AiSdkToChunkAdapter {
}
})
}
if (finishReason === 'tool-calls') {
this.onChunk({ type: ChunkType.LLM_RESPONSE_CREATED })
}
// else {
// this.onChunk({
// type: ChunkType.LLM_WEB_SEARCH_COMPLETE,

View File

@ -1,6 +1,6 @@
import { AiPlugin } from '@cherrystudio/ai-core'
import { createPromptToolUsePlugin, webSearchPlugin } from '@cherrystudio/ai-core/built-in/plugins'
import store from '@renderer/store'
import { getEnableDeveloperMode } from '@renderer/hooks/useSettings'
import { Assistant } from '@renderer/types'
import { AiSdkMiddlewareConfig } from '../middleware/AiSdkMiddlewareBuilder'
@ -16,7 +16,7 @@ export function buildPlugins(
): AiPlugin[] {
const plugins: AiPlugin[] = []
if (middlewareConfig.topicId && store.getState().settings.enableDeveloperMode) {
if (middlewareConfig.topicId && getEnableDeveloperMode()) {
// 0. 添加 telemetry 插件
plugins.push(
createTelemetryPlugin({

View File

@ -247,8 +247,6 @@ export const searchOrchestrationPlugin = (assistant: Assistant, topicId: string)
const userMessages: { [requestId: string]: ModelMessage } = {}
let currentContext: AiRequestContext | null = null
console.log('searchOrchestrationPlugin', assistant)
return definePlugin({
name: 'search-orchestration',
enforce: 'pre', // 确保在其他插件之前执行
@ -264,15 +262,14 @@ export const searchOrchestrationPlugin = (assistant: Assistant, topicId: string)
* 🔍 Step 1: 意图识别阶段
*/
onRequestStart: async (context: AiRequestContext) => {
console.log('onRequestStart', context)
if (context.isAnalyzing) return
// console.log('🧠 [SearchOrchestration] Starting intent analysis...', context.requestId)
// 没开启任何搜索则不进行意图分析
if (!(assistant.webSearchProviderId || assistant.knowledge_bases?.length || assistant.enableMemory)) return
try {
const messages = context.originalParams.messages
// console.log('🧠 [SearchOrchestration]', context.isAnalyzing)
if (!messages || messages.length === 0) {
console.log('🧠 [SearchOrchestration] No messages found, skipping analysis')
return
}
@ -283,10 +280,8 @@ export const searchOrchestrationPlugin = (assistant: Assistant, topicId: string)
userMessages[context.requestId] = lastUserMessage
// 判断是否需要各种搜索
const knowledgeBaseIds = assistant.knowledge_bases?.map((base) => base.id)
// console.log('knowledgeBaseIds', knowledgeBaseIds)
const knowledgeBaseIds = assistant.knowledge_bases.map((base) => base.id)
const hasKnowledgeBase = !isEmpty(knowledgeBaseIds)
// console.log('hasKnowledgeBase', hasKnowledgeBase)
const knowledgeRecognition = assistant.knowledgeRecognition || 'on'
const globalMemoryEnabled = selectGlobalMemoryEnabled(store.getState())
@ -294,11 +289,6 @@ export const searchOrchestrationPlugin = (assistant: Assistant, topicId: string)
const shouldKnowledgeSearch = hasKnowledgeBase && knowledgeRecognition === 'on'
const shouldMemorySearch = globalMemoryEnabled && assistant.enableMemory
// console.log('🧠 [SearchOrchestration] Search capabilities:', {
// shouldWebSearch,
// hasKnowledgeBase,
// shouldMemorySearch
// })
// 执行意图分析
if (shouldWebSearch || hasKnowledgeBase) {
const analysisResult = await analyzeSearchIntent(lastUserMessage, assistant, {

View File

@ -84,6 +84,7 @@ const ChooseTool = (toolResponse: MCPToolResponse): { label: React.ReactNode; bo
export default function MessageTool({ block }: Props) {
// FIXME: 语义错误,这里已经不是 MCP tool 了
const toolResponse = block.metadata?.rawMcpToolResponse
if (!toolResponse) return null
const toolRenderer = ChooseTool(toolResponse)

View File

@ -9,6 +9,7 @@ import { AiSdkMiddlewareConfig } from '@renderer/aiCore/middleware/AiSdkMiddlewa
import { buildStreamTextParams } from '@renderer/aiCore/transformParameters'
import { isDedicatedImageGenerationModel, isEmbeddingModel } from '@renderer/config/models'
import { getStoreSetting } from '@renderer/hooks/useSettings'
import { getEnableDeveloperMode } from '@renderer/hooks/useSettings'
import i18n from '@renderer/i18n'
import store from '@renderer/store'
import { Assistant, MCPServer, MCPTool, Model, Provider } from '@renderer/types'
@ -156,30 +157,9 @@ export async function fetchChatCompletion({
// }
// --- Call AI Completions ---
onChunkReceived({ type: ChunkType.LLM_RESPONSE_CREATED })
const enableDeveloperMode = getEnableDeveloperMode()
// 在 AI SDK 调用时设置正确的 OpenTelemetry 上下文
if (topicId) {
logger.info('Attempting to set OpenTelemetry context', { topicId })
const { currentSpan } = await import('@renderer/services/SpanManagerService')
const parentSpan = currentSpan(topicId, modelId)
logger.info('Parent span lookup result', {
topicId,
hasParentSpan: !!parentSpan,
parentSpanId: parentSpan?.spanContext().spanId,
parentTraceId: parentSpan?.spanContext().traceId
})
if (parentSpan) {
logger.info('Found parent span, using completionsForTrace for proper span hierarchy', {
topicId,
parentSpanId: parentSpan.spanContext().spanId,
parentTraceId: parentSpan.spanContext().traceId
})
} else {
logger.warn('No parent span found for topicId, using completionsForTrace anyway', { topicId })
}
if (topicId && enableDeveloperMode) {
// 使用带trace支持的completions方法它会自动创建子span并关联到父span
await AI.completionsForTrace(modelId, aiSdkParams, {
...middlewareConfig,
@ -188,14 +168,8 @@ export async function fetchChatCompletion({
callType: 'chat'
})
} else {
logger.warn('No topicId provided, using regular completions')
// 没有topicId时禁用telemetry以避免警告
const configWithoutTelemetry = {
...middlewareConfig,
topicId: undefined // 确保telemetryPlugin不会尝试查找span
}
await AI.completions(modelId, aiSdkParams, {
...configWithoutTelemetry,
...middlewareConfig,
assistant,
callType: 'chat'
})

View File

@ -23,6 +23,8 @@ export const createToolCallbacks = (deps: ToolCallbacksDependencies) => {
return {
onToolCallPending: (toolResponse: MCPToolResponse) => {
console.log('onToolCallPending', toolResponse)
if (blockManager.hasInitialPlaceholder) {
const changes = {
type: MessageBlockType.TOOL,

View File

@ -140,15 +140,15 @@ __metadata:
languageName: node
linkType: hard
"@ai-sdk/gateway@npm:1.0.0":
version: 1.0.0
resolution: "@ai-sdk/gateway@npm:1.0.0"
"@ai-sdk/gateway@npm:1.0.8":
version: 1.0.8
resolution: "@ai-sdk/gateway@npm:1.0.8"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.0"
"@ai-sdk/provider-utils": "npm:3.0.4"
peerDependencies:
zod: ^3.25.76 || ^4
checksum: 10c0/ab1238cb364dd40f2793953183717f9e2c657e9c7dbcc3e18e8aa4273253fb3cb8441b018d6fc38d0aed28a5fff633deb3216ac97cf8ef64d1442a53af03da53
checksum: 10c0/7464ca5280a15ddab2a2addf8b0223bcf85519cff0d1d1893beb4ee723abebdd7eae02079429c738a16c872c8574ab8d509b349e913c0fb99d8173740d0e851a
languageName: node
linkType: hard
@ -255,6 +255,20 @@ __metadata:
languageName: node
linkType: hard
"@ai-sdk/provider-utils@npm:3.0.4":
version: 3.0.4
resolution: "@ai-sdk/provider-utils@npm:3.0.4"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@standard-schema/spec": "npm:^1.0.0"
eventsource-parser: "npm:^3.0.3"
zod-to-json-schema: "npm:^3.24.1"
peerDependencies:
zod: ^3.25.76 || ^4
checksum: 10c0/6732b99310561d72262cdeef40cc58190afa55248dca0eb3a378ef87fede12086e534c68687e0fe5ef5b092da41f3e745857ce3f9b248a272a78c0dc268dffd4
languageName: node
linkType: hard
"@ai-sdk/provider@npm:2.0.0":
version: 2.0.0
resolution: "@ai-sdk/provider@npm:2.0.0"
@ -2202,7 +2216,7 @@ __metadata:
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.0"
"@ai-sdk/xai": "npm:2.0.0"
ai: "npm:5.0.0"
ai: "npm:^5.0.16"
tsdown: "npm:^0.12.9"
typescript: "npm:^5.0.0"
vitest: "npm:^3.2.4"
@ -9150,17 +9164,17 @@ __metadata:
languageName: node
linkType: hard
"ai@npm:5.0.0":
version: 5.0.0
resolution: "ai@npm:5.0.0"
"ai@npm:^5.0.16":
version: 5.0.16
resolution: "ai@npm:5.0.16"
dependencies:
"@ai-sdk/gateway": "npm:1.0.0"
"@ai-sdk/gateway": "npm:1.0.8"
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.0"
"@ai-sdk/provider-utils": "npm:3.0.4"
"@opentelemetry/api": "npm:1.9.0"
peerDependencies:
zod: ^3.25.76 || ^4
checksum: 10c0/ec414871e2f9804f65e21b0f1ddd2d5eb9ece56a014c493528b4f6a7983347d5112d6a990d6e9847b021ad3423116fc56511bb7631710e461b6c60d5a6a6d4e5
checksum: 10c0/3d26ff0af6ad56cfc7d382c0ef9664572dc661da04f2df37fbb09f3346c7b10cf3643d0c2d95e338b34f183dfbe225e9b364326e73918cf50edd050b1a6ec2e7
languageName: node
linkType: hard