mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2025-12-28 21:42:27 +08:00
Merge branch 'feat/ai-sdk-package' of https://github.com/CherryHQ/cherry-studio into feat/ai-sdk-package
This commit is contained in:
commit
1b56ec33e5
@ -73,6 +73,7 @@
|
||||
"@agentic/tavily": "^7.3.3",
|
||||
"@ant-design/v5-patch-for-react-19": "^1.0.3",
|
||||
"@anthropic-ai/sdk": "^0.41.0",
|
||||
"@cherry-studio/ai-core": "workspace:*",
|
||||
"@cherrystudio/embedjs": "^0.1.31",
|
||||
"@cherrystudio/embedjs-libsql": "^0.1.31",
|
||||
"@cherrystudio/embedjs-loader-csv": "^0.1.31",
|
||||
|
||||
@ -15,6 +15,7 @@ Cherry Studio AI Core 是一个基于 Vercel AI SDK 的统一 AI Provider 接口
|
||||
基于 [AI SDK 官方支持的 providers](https://ai-sdk.dev/providers/ai-sdk-providers):
|
||||
|
||||
**核心 Providers:**
|
||||
|
||||
- OpenAI
|
||||
- Anthropic
|
||||
- Google Generative AI
|
||||
@ -25,6 +26,7 @@ Cherry Studio AI Core 是一个基于 Vercel AI SDK 的统一 AI Provider 接口
|
||||
- Amazon Bedrock
|
||||
|
||||
**扩展 Providers:**
|
||||
|
||||
- Cohere
|
||||
- Groq
|
||||
- Together.ai
|
||||
@ -64,17 +66,13 @@ const client = await createAiSdkClient('openai', {
|
||||
// 流式生成
|
||||
const result = await client.stream({
|
||||
modelId: 'gpt-4',
|
||||
messages: [
|
||||
{ role: 'user', content: 'Hello!' }
|
||||
]
|
||||
messages: [{ role: 'user', content: 'Hello!' }]
|
||||
})
|
||||
|
||||
// 非流式生成
|
||||
const response = await client.generate({
|
||||
modelId: 'gpt-4',
|
||||
messages: [
|
||||
{ role: 'user', content: 'Hello!' }
|
||||
]
|
||||
messages: [{ role: 'user', content: 'Hello!' }]
|
||||
})
|
||||
```
|
||||
|
||||
@ -89,12 +87,9 @@ const client = await createOpenAIClient({
|
||||
})
|
||||
|
||||
// 便捷流式生成
|
||||
const result = await streamGeneration(
|
||||
'openai',
|
||||
'gpt-4',
|
||||
[{ role: 'user', content: 'Hello!' }],
|
||||
{ apiKey: 'your-api-key' }
|
||||
)
|
||||
const result = await streamGeneration('openai', 'gpt-4', [{ role: 'user', content: 'Hello!' }], {
|
||||
apiKey: 'your-api-key'
|
||||
})
|
||||
```
|
||||
|
||||
### 多 Provider 支持
|
||||
@ -111,4 +106,4 @@ const xaiClient = await createAiSdkClient('xai', { apiKey: 'xai-key' })
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
MIT
|
||||
|
||||
@ -2,8 +2,8 @@
|
||||
"name": "@cherry-studio/ai-core",
|
||||
"version": "1.0.0",
|
||||
"description": "Cherry Studio AI Core - Unified AI Provider Interface Based on Vercel AI SDK",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
"main": "src/index.ts",
|
||||
"types": "src/index.ts",
|
||||
"scripts": {
|
||||
"build": "tsdown",
|
||||
"dev": "tsc -w",
|
||||
@ -41,12 +41,12 @@
|
||||
"@ai-sdk/togetherai": "^0.2.14",
|
||||
"@ai-sdk/vercel": "^0.0.1",
|
||||
"@ai-sdk/xai": "^1.2.16",
|
||||
"@openrouter/ai-sdk-provider": "^0.1.0",
|
||||
"ai": "^4.3.16",
|
||||
"anthropic-vertex-ai": "^1.0.2",
|
||||
"ollama-ai-provider": "^1.2.0",
|
||||
"qwen-ai-provider": "^0.1.0",
|
||||
"zhipu-ai-provider": "^0.1.1",
|
||||
"@openrouter/ai-sdk-provider": "^0.1.0"
|
||||
"zhipu-ai-provider": "^0.1.1"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@ai-sdk/amazon-bedrock": {
|
||||
@ -116,10 +116,9 @@
|
||||
],
|
||||
"exports": {
|
||||
".": {
|
||||
"types": "./dist/index.d.ts",
|
||||
"import": "./dist/index.js",
|
||||
"require": "./dist/index.js"
|
||||
"types": "./src/index.ts",
|
||||
"import": "./src/index.ts",
|
||||
"require": "./src/index.ts"
|
||||
}
|
||||
},
|
||||
"packageManager": "pnpm"
|
||||
}
|
||||
}
|
||||
|
||||
@ -7,7 +7,7 @@ import type { ImageModelV1 } from '@ai-sdk/provider'
|
||||
import { type LanguageModelV1, wrapLanguageModel } from 'ai'
|
||||
|
||||
import { aiProviderRegistry } from '../providers/registry'
|
||||
import { ProviderOptions } from './types'
|
||||
import { type ProviderId, type ProviderSettingsMap } from './types'
|
||||
|
||||
// 客户端配置接口
|
||||
export interface ClientConfig {
|
||||
@ -34,23 +34,29 @@ export class ClientFactoryError extends Error {
|
||||
export class ApiClientFactory {
|
||||
/**
|
||||
* 创建 AI SDK 模型实例
|
||||
* 直接返回 LanguageModelV1 实例用于 streamText/generateText
|
||||
* 对于已知的 Provider 使用严格类型检查,未知的 Provider 默认使用 openai-compatible
|
||||
*/
|
||||
static async createClient<T extends ProviderId>(
|
||||
providerId: T,
|
||||
modelId: string,
|
||||
options: ProviderSettingsMap[T]
|
||||
): Promise<LanguageModelV1>
|
||||
|
||||
static async createClient(
|
||||
providerId: string,
|
||||
modelId: string = 'default',
|
||||
options: ProviderOptions
|
||||
): Promise<LanguageModelV1> {
|
||||
modelId: string,
|
||||
options: ProviderSettingsMap['openai-compatible']
|
||||
): Promise<LanguageModelV1>
|
||||
|
||||
static async createClient(providerId: string, modelId: string = 'default', options: any): Promise<LanguageModelV1> {
|
||||
try {
|
||||
// 验证provider是否支持
|
||||
if (!aiProviderRegistry.isSupported(providerId)) {
|
||||
throw new ClientFactoryError(`Provider "${providerId}" is not supported`, providerId)
|
||||
}
|
||||
// 对于不在注册表中的 provider,默认使用 openai-compatible
|
||||
const effectiveProviderId = aiProviderRegistry.isSupported(providerId) ? providerId : 'openai-compatible'
|
||||
|
||||
// 获取Provider配置
|
||||
const providerConfig = aiProviderRegistry.getProvider(providerId)
|
||||
const providerConfig = aiProviderRegistry.getProvider(effectiveProviderId)
|
||||
if (!providerConfig) {
|
||||
throw new ClientFactoryError(`Provider "${providerId}" is not registered`, providerId)
|
||||
throw new ClientFactoryError(`Provider "${effectiveProviderId}" is not registered`, providerId)
|
||||
}
|
||||
|
||||
// 动态导入模块
|
||||
@ -61,10 +67,9 @@ export class ApiClientFactory {
|
||||
|
||||
if (typeof creatorFunction !== 'function') {
|
||||
throw new ClientFactoryError(
|
||||
`Creator function "${providerConfig.creatorFunctionName}" not found in the imported module for provider "${providerId}"`
|
||||
`Creator function "${providerConfig.creatorFunctionName}" not found in the imported module for provider "${effectiveProviderId}"`
|
||||
)
|
||||
}
|
||||
|
||||
// 创建provider实例
|
||||
const provider = creatorFunction(options)
|
||||
|
||||
@ -82,7 +87,7 @@ export class ApiClientFactory {
|
||||
|
||||
return model
|
||||
} else {
|
||||
throw new ClientFactoryError(`Unknown model access pattern for provider "${providerId}"`)
|
||||
throw new ClientFactoryError(`Unknown model access pattern for provider "${effectiveProviderId}"`)
|
||||
}
|
||||
} catch (error) {
|
||||
if (error instanceof ClientFactoryError) {
|
||||
@ -164,19 +169,36 @@ export class ApiClientFactory {
|
||||
id: string
|
||||
name: string
|
||||
isSupported: boolean
|
||||
effectiveProvider: string
|
||||
} {
|
||||
const provider = aiProviderRegistry.getProvider(providerId)
|
||||
const effectiveProviderId = aiProviderRegistry.isSupported(providerId) ? providerId : 'openai-compatible'
|
||||
const provider = aiProviderRegistry.getProvider(effectiveProviderId)
|
||||
|
||||
return {
|
||||
id: providerId,
|
||||
name: provider?.name || providerId,
|
||||
isSupported: aiProviderRegistry.isSupported(providerId)
|
||||
isSupported: aiProviderRegistry.isSupported(providerId),
|
||||
effectiveProvider: effectiveProviderId
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 便捷导出函数
|
||||
export const createClient = (providerId: string, modelId?: string, options?: any) =>
|
||||
ApiClientFactory.createClient(providerId, modelId, options)
|
||||
export function createClient<T extends ProviderId>(
|
||||
providerId: T,
|
||||
modelId: string,
|
||||
options: ProviderSettingsMap[T]
|
||||
): Promise<LanguageModelV1>
|
||||
|
||||
export function createClient(
|
||||
providerId: string,
|
||||
modelId: string,
|
||||
options: ProviderSettingsMap['openai-compatible']
|
||||
): Promise<LanguageModelV1>
|
||||
|
||||
export function createClient(providerId: string, modelId: string = 'default', options: any): Promise<LanguageModelV1> {
|
||||
return ApiClientFactory.createClient(providerId, modelId, options)
|
||||
}
|
||||
|
||||
export const getSupportedProviders = () => ApiClientFactory.getSupportedProviders()
|
||||
|
||||
|
||||
305
packages/aiCore/src/clients/PluginEnabledAiClient.ts
Normal file
305
packages/aiCore/src/clients/PluginEnabledAiClient.ts
Normal file
@ -0,0 +1,305 @@
|
||||
/**
|
||||
* AI Client - Cherry Studio AI Core 的主要客户端接口
|
||||
* 默认集成插件系统,提供完整的 AI 调用能力
|
||||
*
|
||||
* ## 使用方式
|
||||
*
|
||||
* ```typescript
|
||||
* import { AiClient } from '@cherry-studio/ai-core'
|
||||
*
|
||||
* // 创建客户端(默认带插件系统)
|
||||
* const client = AiClient.create('openai', {
|
||||
* name: 'openai',
|
||||
* apiKey: process.env.OPENAI_API_KEY
|
||||
* }, [LoggingPlugin, ContentFilterPlugin])
|
||||
*
|
||||
* // 使用方式与 UniversalAiSdkClient 完全相同
|
||||
* const result = await client.generateText('gpt-4', {
|
||||
* messages: [{ role: 'user', content: 'Hello!' }]
|
||||
* })
|
||||
* ```
|
||||
*/
|
||||
import { generateObject, generateText, streamObject, streamText } from 'ai'
|
||||
|
||||
import { AiPlugin, createContext, PluginManager } from '../plugins'
|
||||
import { isProviderSupported } from '../providers/registry'
|
||||
import { ApiClientFactory } from './ApiClientFactory'
|
||||
import { type ProviderId, type ProviderSettingsMap } from './types'
|
||||
import { UniversalAiSdkClient } from './UniversalAiSdkClient'
|
||||
|
||||
/**
|
||||
* Cherry Studio AI Core 的主要客户端
|
||||
* 默认集成插件系统,提供完整的 AI 调用能力
|
||||
*/
|
||||
export class PluginEnabledAiClient<T extends ProviderId = ProviderId> {
|
||||
private pluginManager: PluginManager
|
||||
private baseClient: UniversalAiSdkClient<T>
|
||||
|
||||
constructor(
|
||||
private readonly providerId: T,
|
||||
private readonly options: ProviderSettingsMap[T],
|
||||
plugins: AiPlugin[] = []
|
||||
) {
|
||||
this.pluginManager = new PluginManager(plugins)
|
||||
this.baseClient = UniversalAiSdkClient.create(providerId, options)
|
||||
}
|
||||
|
||||
/**
|
||||
* 添加单个插件
|
||||
*/
|
||||
use(plugin: AiPlugin): this {
|
||||
this.pluginManager.use(plugin)
|
||||
return this
|
||||
}
|
||||
|
||||
/**
|
||||
* 批量添加插件
|
||||
*/
|
||||
usePlugins(plugins: AiPlugin[]): this {
|
||||
plugins.forEach((plugin) => this.pluginManager.use(plugin))
|
||||
return this
|
||||
}
|
||||
|
||||
/**
|
||||
* 移除插件
|
||||
*/
|
||||
removePlugin(pluginName: string): this {
|
||||
this.pluginManager.remove(pluginName)
|
||||
return this
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取插件统计信息
|
||||
*/
|
||||
getPluginStats() {
|
||||
return this.pluginManager.getStats()
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取插件列表
|
||||
*/
|
||||
getPlugins() {
|
||||
return this.pluginManager.getPlugins()
|
||||
}
|
||||
|
||||
/**
|
||||
* 执行插件处理的通用逻辑
|
||||
* 1-5步骤的通用处理
|
||||
*/
|
||||
private async executeWithPlugins<TParams, TResult>(
|
||||
methodName: string,
|
||||
modelId: string,
|
||||
params: TParams,
|
||||
executor: (finalModelId: string, transformedParams: TParams) => Promise<TResult>
|
||||
): Promise<TResult> {
|
||||
// 创建请求上下文
|
||||
const context = createContext(this.providerId, modelId, params)
|
||||
|
||||
try {
|
||||
// 1. 触发请求开始事件
|
||||
await this.pluginManager.executeParallel('onRequestStart', context)
|
||||
|
||||
// 2. 解析模型别名
|
||||
const resolvedModelId = await this.pluginManager.executeFirst<string>('resolveModel', modelId, context)
|
||||
const finalModelId = resolvedModelId || modelId
|
||||
|
||||
// 3. 转换请求参数
|
||||
const transformedParams = await this.pluginManager.executeSequential('transformParams', params, context)
|
||||
|
||||
// 4. 执行具体的 API 调用
|
||||
const result = await executor(finalModelId, transformedParams)
|
||||
|
||||
// 5. 转换结果(对于非流式调用)
|
||||
const transformedResult = await this.pluginManager.executeSequential('transformResult', result, context)
|
||||
|
||||
// 6. 触发完成事件
|
||||
await this.pluginManager.executeParallel('onRequestEnd', context, transformedResult)
|
||||
|
||||
return transformedResult
|
||||
} catch (error) {
|
||||
// 7. 触发错误事件
|
||||
await this.pluginManager.executeParallel('onError', context, undefined, error as Error)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 执行流式调用的通用逻辑
|
||||
* 流式调用的特殊处理(支持流转换器)
|
||||
*/
|
||||
private async executeStreamWithPlugins<TParams, TResult>(
|
||||
methodName: string,
|
||||
modelId: string,
|
||||
params: TParams,
|
||||
executor: (finalModelId: string, transformedParams: TParams, streamTransforms: any[]) => Promise<TResult>
|
||||
): Promise<TResult> {
|
||||
// 创建请求上下文
|
||||
const context = createContext(this.providerId, modelId, params)
|
||||
|
||||
try {
|
||||
// 1. 触发请求开始事件
|
||||
await this.pluginManager.executeParallel('onRequestStart', context)
|
||||
|
||||
// 2. 解析模型别名
|
||||
const resolvedModelId = await this.pluginManager.executeFirst<string>('resolveModel', modelId, context)
|
||||
const finalModelId = resolvedModelId || modelId
|
||||
|
||||
// 3. 转换请求参数
|
||||
const transformedParams = await this.pluginManager.executeSequential('transformParams', params, context)
|
||||
|
||||
// 4. 收集流转换器
|
||||
const streamTransforms = this.pluginManager.collectStreamTransforms()
|
||||
|
||||
// 5. 执行流式 API 调用
|
||||
const result = await executor(finalModelId, transformedParams, streamTransforms)
|
||||
|
||||
// 6. 触发完成事件(注意:对于流式调用,这里触发的是开始流式响应的事件)
|
||||
await this.pluginManager.executeParallel('onRequestEnd', context, { stream: true })
|
||||
|
||||
return result
|
||||
} catch (error) {
|
||||
// 7. 触发错误事件
|
||||
await this.pluginManager.executeParallel('onError', context, undefined, error as Error)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 流式文本生成 - 集成插件系统
|
||||
*/
|
||||
async streamText(
|
||||
modelId: string,
|
||||
params: Omit<Parameters<typeof streamText>[0], 'model'>
|
||||
): Promise<ReturnType<typeof streamText>> {
|
||||
return this.executeStreamWithPlugins(
|
||||
'streamText',
|
||||
modelId,
|
||||
params,
|
||||
async (finalModelId, transformedParams, streamTransforms) => {
|
||||
// 对于流式调用,需要直接调用 AI SDK 以支持流转换器
|
||||
const model = await ApiClientFactory.createClient(this.providerId, finalModelId, this.options)
|
||||
return await streamText({
|
||||
model,
|
||||
...transformedParams,
|
||||
experimental_transform: streamTransforms.length > 0 ? streamTransforms : undefined
|
||||
})
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* 生成文本 - 集成插件系统
|
||||
*/
|
||||
async generateText(
|
||||
modelId: string,
|
||||
params: Omit<Parameters<typeof generateText>[0], 'model'>
|
||||
): Promise<ReturnType<typeof generateText>> {
|
||||
return this.executeWithPlugins('generateText', modelId, params, async (finalModelId, transformedParams) => {
|
||||
return await this.baseClient.generateText(finalModelId, transformedParams)
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* 生成结构化对象 - 集成插件系统
|
||||
*/
|
||||
async generateObject(
|
||||
modelId: string,
|
||||
params: Omit<Parameters<typeof generateObject>[0], 'model'>
|
||||
): Promise<ReturnType<typeof generateObject>> {
|
||||
return this.executeWithPlugins('generateObject', modelId, params, async (finalModelId, transformedParams) => {
|
||||
return await this.baseClient.generateObject(finalModelId, transformedParams)
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* 流式生成结构化对象 - 集成插件系统
|
||||
* 注意:streamObject 目前不支持流转换器,所以使用普通的插件处理
|
||||
*/
|
||||
async streamObject(
|
||||
modelId: string,
|
||||
params: Omit<Parameters<typeof streamObject>[0], 'model'>
|
||||
): Promise<ReturnType<typeof streamObject>> {
|
||||
return this.executeWithPlugins('streamObject', modelId, params, async (finalModelId, transformedParams) => {
|
||||
return await this.baseClient.streamObject(finalModelId, transformedParams)
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取客户端信息
|
||||
*/
|
||||
getClientInfo() {
|
||||
return this.baseClient.getClientInfo()
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取底层客户端实例(用于高级用法)
|
||||
*/
|
||||
getBaseClient(): UniversalAiSdkClient<T> {
|
||||
return this.baseClient
|
||||
}
|
||||
|
||||
// === 静态工厂方法 ===
|
||||
|
||||
/**
|
||||
* 创建 OpenAI Compatible 客户端
|
||||
*/
|
||||
static createOpenAICompatible(
|
||||
config: ProviderSettingsMap['openai-compatible'],
|
||||
plugins: AiPlugin[] = []
|
||||
): PluginEnabledAiClient<'openai-compatible'> {
|
||||
return new PluginEnabledAiClient('openai-compatible', config, plugins)
|
||||
}
|
||||
|
||||
/**
|
||||
* 创建标准提供商客户端
|
||||
*/
|
||||
static create<T extends ProviderId>(
|
||||
providerId: T,
|
||||
options: ProviderSettingsMap[T],
|
||||
plugins?: AiPlugin[]
|
||||
): PluginEnabledAiClient<T>
|
||||
|
||||
static create(
|
||||
providerId: string,
|
||||
options: ProviderSettingsMap['openai-compatible'],
|
||||
plugins?: AiPlugin[]
|
||||
): PluginEnabledAiClient<'openai-compatible'>
|
||||
|
||||
static create(providerId: string, options: any, plugins: AiPlugin[] = []): PluginEnabledAiClient {
|
||||
if (isProviderSupported(providerId)) {
|
||||
return new PluginEnabledAiClient(providerId as ProviderId, options, plugins)
|
||||
} else {
|
||||
// 对于未知 provider,使用 openai-compatible
|
||||
return new PluginEnabledAiClient('openai-compatible', options, plugins)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 创建 AI 客户端的工厂函数(默认带插件系统)
|
||||
*/
|
||||
export function createClient<T extends ProviderId>(
|
||||
providerId: T,
|
||||
options: ProviderSettingsMap[T],
|
||||
plugins?: AiPlugin[]
|
||||
): PluginEnabledAiClient<T>
|
||||
|
||||
export function createClient(
|
||||
providerId: string,
|
||||
options: ProviderSettingsMap['openai-compatible'],
|
||||
plugins?: AiPlugin[]
|
||||
): PluginEnabledAiClient<'openai-compatible'>
|
||||
|
||||
export function createClient(providerId: string, options: any, plugins: AiPlugin[] = []): PluginEnabledAiClient {
|
||||
return PluginEnabledAiClient.create(providerId, options, plugins)
|
||||
}
|
||||
|
||||
/**
|
||||
* 创建 OpenAI Compatible 客户端的便捷函数
|
||||
*/
|
||||
export function createCompatibleClient(
|
||||
config: ProviderSettingsMap['openai-compatible'],
|
||||
plugins: AiPlugin[] = []
|
||||
): PluginEnabledAiClient<'openai-compatible'> {
|
||||
return PluginEnabledAiClient.createOpenAICompatible(config, plugins)
|
||||
}
|
||||
@ -1,21 +1,93 @@
|
||||
/**
|
||||
* Universal AI SDK Client
|
||||
* 统一的AI SDK客户端实现
|
||||
*
|
||||
* ## 使用方式
|
||||
*
|
||||
* ### 1. 官方提供商
|
||||
* ```typescript
|
||||
* import { UniversalAiSdkClient } from '@cherry-studio/ai-core'
|
||||
*
|
||||
* // OpenAI
|
||||
* const openai = UniversalAiSdkClient.create('openai', {
|
||||
* name: 'openai',
|
||||
* apiHost: 'https://api.openai.com/v1',
|
||||
* apiKey: process.env.OPENAI_API_KEY
|
||||
* })
|
||||
*
|
||||
* // Anthropic
|
||||
* const anthropic = UniversalAiSdkClient.create('anthropic', {
|
||||
* name: 'anthropic',
|
||||
* apiHost: 'https://api.anthropic.com',
|
||||
* apiKey: process.env.ANTHROPIC_API_KEY
|
||||
* })
|
||||
* ```
|
||||
*
|
||||
* ### 2. OpenAI Compatible 第三方提供商
|
||||
* ```typescript
|
||||
* // LM Studio (本地运行)
|
||||
* const lmStudio = UniversalAiSdkClient.createOpenAICompatible({
|
||||
* name: 'lm-studio',
|
||||
* baseURL: 'http://localhost:1234/v1'
|
||||
* })
|
||||
*
|
||||
* // Ollama (本地运行)
|
||||
* const ollama = UniversalAiSdkClient.createOpenAICompatible({
|
||||
* name: 'ollama',
|
||||
* baseURL: 'http://localhost:11434/v1'
|
||||
* })
|
||||
*
|
||||
* // 自定义第三方 API
|
||||
* const customProvider = UniversalAiSdkClient.createOpenAICompatible({
|
||||
* name: 'my-provider',
|
||||
* apiKey: process.env.CUSTOM_API_KEY,
|
||||
* baseURL: 'https://api.customprovider.com/v1',
|
||||
* headers: {
|
||||
* 'X-Custom-Header': 'value',
|
||||
* 'User-Agent': 'MyApp/1.0'
|
||||
* },
|
||||
* queryParams: {
|
||||
* 'api-version': '2024-01'
|
||||
* }
|
||||
* })
|
||||
* ```
|
||||
*
|
||||
* ### 3. 使用客户端进行 AI 调用
|
||||
* ```typescript
|
||||
* // 流式文本生成
|
||||
* const stream = await client.streamText('gpt-4', {
|
||||
* messages: [{ role: 'user', content: 'Hello!' }]
|
||||
* })
|
||||
*
|
||||
* // 生成文本
|
||||
* const { text } = await client.generateText('gpt-4', {
|
||||
* messages: [{ role: 'user', content: 'Hello!' }]
|
||||
* })
|
||||
*
|
||||
* // 生成结构化对象
|
||||
* const { object } = await client.generateObject('gpt-4', {
|
||||
* messages: [{ role: 'user', content: 'Generate a user profile' }],
|
||||
* schema: z.object({
|
||||
* name: z.string(),
|
||||
* age: z.number()
|
||||
* })
|
||||
* })
|
||||
* ```
|
||||
*/
|
||||
|
||||
import { experimental_generateImage as generateImage, generateObject, generateText, streamObject, streamText } from 'ai'
|
||||
import { generateObject, generateText, streamObject, streamText } from 'ai'
|
||||
|
||||
import { ApiClientFactory } from './ApiClientFactory'
|
||||
import { ProviderOptions } from './types'
|
||||
import { type ProviderId, type ProviderSettingsMap } from './types'
|
||||
|
||||
/**
|
||||
* 通用 AI SDK 客户端
|
||||
* 为特定 AI 提供商创建的客户端实例
|
||||
*/
|
||||
export class UniversalAiSdkClient {
|
||||
export class UniversalAiSdkClient<T extends ProviderId = ProviderId> {
|
||||
constructor(
|
||||
private readonly providerId: string,
|
||||
private readonly options: ProviderOptions
|
||||
private readonly providerId: T,
|
||||
private readonly options: ProviderSettingsMap[T]
|
||||
) {}
|
||||
|
||||
/**
|
||||
@ -95,11 +167,62 @@ export class UniversalAiSdkClient {
|
||||
getClientInfo() {
|
||||
return ApiClientFactory.getClientInfo(this.providerId)
|
||||
}
|
||||
|
||||
// === 静态工厂方法 ===
|
||||
|
||||
/**
|
||||
* 创建 OpenAI Compatible 客户端
|
||||
* 用于那些实现 OpenAI API 的第三方提供商
|
||||
*/
|
||||
static createOpenAICompatible(
|
||||
config: ProviderSettingsMap['openai-compatible']
|
||||
): UniversalAiSdkClient<'openai-compatible'> {
|
||||
return new UniversalAiSdkClient('openai-compatible', config)
|
||||
}
|
||||
|
||||
/**
|
||||
* 创建标准提供商客户端
|
||||
* 对于已知的 Provider 使用严格类型检查,未知的 Provider 默认使用 openai-compatible
|
||||
*/
|
||||
static create<T extends ProviderId>(providerId: T, options: ProviderSettingsMap[T]): UniversalAiSdkClient<T>
|
||||
|
||||
static create(
|
||||
providerId: string,
|
||||
options: ProviderSettingsMap['openai-compatible']
|
||||
): UniversalAiSdkClient<'openai-compatible'>
|
||||
|
||||
static create(providerId: string, options: any): UniversalAiSdkClient {
|
||||
if (providerId in ({} as ProviderSettingsMap)) {
|
||||
return new UniversalAiSdkClient(providerId as ProviderId, options)
|
||||
} else {
|
||||
// 对于未知 provider,使用 openai-compatible
|
||||
return new UniversalAiSdkClient('openai-compatible', options)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 创建客户端实例的工厂函数
|
||||
*/
|
||||
export function createUniversalClient(providerId: string, options: ProviderOptions): UniversalAiSdkClient {
|
||||
return new UniversalAiSdkClient(providerId, options)
|
||||
export function createUniversalClient<T extends ProviderId>(
|
||||
providerId: T,
|
||||
options: ProviderSettingsMap[T]
|
||||
): UniversalAiSdkClient<T>
|
||||
|
||||
export function createUniversalClient(
|
||||
providerId: string,
|
||||
options: ProviderSettingsMap['openai-compatible']
|
||||
): UniversalAiSdkClient<'openai-compatible'>
|
||||
|
||||
export function createUniversalClient(providerId: string, options: any): UniversalAiSdkClient {
|
||||
return UniversalAiSdkClient.create(providerId, options)
|
||||
}
|
||||
|
||||
/**
|
||||
* 创建 OpenAI Compatible 客户端的便捷函数
|
||||
*/
|
||||
export function createOpenAICompatibleClient(
|
||||
config: ProviderSettingsMap['openai-compatible']
|
||||
): UniversalAiSdkClient<'openai-compatible'> {
|
||||
return UniversalAiSdkClient.createOpenAICompatible(config)
|
||||
}
|
||||
|
||||
@ -1,6 +1,42 @@
|
||||
export type ProviderOptions = {
|
||||
apiKey?: string
|
||||
baseURL?: string
|
||||
apiVersion?: string
|
||||
headers?: Record<string, string | unknown>
|
||||
}
|
||||
import { generateObject, generateText, streamObject, streamText } from 'ai'
|
||||
|
||||
import type { ProviderSettingsMap } from '../providers/registry'
|
||||
|
||||
// ProviderSettings 是所有 Provider Settings 的联合类型
|
||||
export type ProviderSettings = ProviderSettingsMap[keyof ProviderSettingsMap]
|
||||
|
||||
export type StreamTextParams = Omit<Parameters<typeof streamText>[0], 'model'>
|
||||
export type GenerateTextParams = Omit<Parameters<typeof generateText>[0], 'model'>
|
||||
export type StreamObjectParams = Omit<Parameters<typeof streamObject>[0], 'model'>
|
||||
export type GenerateObjectParams = Omit<Parameters<typeof generateObject>[0], 'model'>
|
||||
|
||||
// 重新导出 ProviderSettingsMap 中的所有类型
|
||||
export type {
|
||||
AmazonBedrockProviderSettings,
|
||||
AnthropicProviderSettings,
|
||||
AnthropicVertexProviderSettings,
|
||||
AzureOpenAIProviderSettings,
|
||||
CerebrasProviderSettings,
|
||||
CohereProviderSettings,
|
||||
DeepInfraProviderSettings,
|
||||
DeepSeekProviderSettings,
|
||||
FalProviderSettings,
|
||||
FireworksProviderSettings,
|
||||
GoogleGenerativeAIProviderSettings,
|
||||
GoogleVertexProviderSettings,
|
||||
GroqProviderSettings,
|
||||
MistralProviderSettings,
|
||||
OllamaProviderSettings,
|
||||
OpenAICompatibleProviderSettings,
|
||||
OpenAIProviderSettings,
|
||||
OpenRouterProviderSettings,
|
||||
PerplexityProviderSettings,
|
||||
ProviderId,
|
||||
ProviderSettingsMap,
|
||||
QwenProviderSettings,
|
||||
ReplicateProviderSettings,
|
||||
TogetherAIProviderSettings,
|
||||
VercelProviderSettings,
|
||||
XaiProviderSettings,
|
||||
ZhipuProviderSettings
|
||||
} from '../providers/registry'
|
||||
|
||||
@ -5,43 +5,131 @@
|
||||
|
||||
// 导入内部使用的类和函数
|
||||
import { ApiClientFactory } from './clients/ApiClientFactory'
|
||||
import { ProviderOptions } from './clients/types'
|
||||
import { createClient } from './clients/PluginEnabledAiClient'
|
||||
import { type ProviderSettingsMap } from './clients/types'
|
||||
import { createUniversalClient } from './clients/UniversalAiSdkClient'
|
||||
import { aiProviderRegistry, isProviderSupported } from './providers/registry'
|
||||
|
||||
// 核心导出
|
||||
// ==================== 主要客户端接口 ====================
|
||||
// 默认使用集成插件系统的客户端
|
||||
export {
|
||||
PluginEnabledAiClient as AiClient,
|
||||
createClient,
|
||||
createCompatibleClient
|
||||
} from './clients/PluginEnabledAiClient'
|
||||
|
||||
// 为了向后兼容,也导出原名称
|
||||
export { PluginEnabledAiClient } from './clients/PluginEnabledAiClient'
|
||||
|
||||
// ==================== 插件系统 ====================
|
||||
export type { AiPlugin, AiRequestContext, HookResult, HookType, PluginManagerConfig } from './plugins'
|
||||
export { createContext, definePlugin, PluginManager } from './plugins'
|
||||
|
||||
// ==================== 底层客户端(高级用法) ====================
|
||||
// 不带插件系统的基础客户端,用于需要绕过插件系统的场景
|
||||
export {
|
||||
createOpenAICompatibleClient as createBasicOpenAICompatibleClient,
|
||||
createUniversalClient,
|
||||
UniversalAiSdkClient
|
||||
} from './clients/UniversalAiSdkClient'
|
||||
|
||||
// ==================== 低级 API ====================
|
||||
export { ApiClientFactory } from './clients/ApiClientFactory'
|
||||
export { createUniversalClient, UniversalAiSdkClient } from './clients/UniversalAiSdkClient'
|
||||
export { aiProviderRegistry } from './providers/registry'
|
||||
|
||||
// 类型导出
|
||||
// ==================== 类型定义 ====================
|
||||
export type { ClientFactoryError } from './clients/ApiClientFactory'
|
||||
export type {
|
||||
GenerateObjectParams,
|
||||
GenerateTextParams,
|
||||
ProviderSettings,
|
||||
StreamObjectParams,
|
||||
StreamTextParams
|
||||
} from './clients/types'
|
||||
export type { ProviderConfig } from './providers/registry'
|
||||
export type { ProviderError } from './providers/types'
|
||||
export * as aiSdk from 'ai'
|
||||
|
||||
// 便捷函数导出
|
||||
export { createClient, getClientInfo, getSupportedProviders } from './clients/ApiClientFactory'
|
||||
// ==================== AI SDK 常用类型导出 ====================
|
||||
// 直接导出 AI SDK 的常用类型,方便使用
|
||||
export type {
|
||||
CoreAssistantMessage,
|
||||
// 消息相关类型
|
||||
CoreMessage,
|
||||
CoreSystemMessage,
|
||||
CoreToolMessage,
|
||||
CoreUserMessage,
|
||||
// 通用类型
|
||||
FinishReason,
|
||||
GenerateObjectResult,
|
||||
// 生成相关类型
|
||||
GenerateTextResult,
|
||||
InvalidToolArgumentsError,
|
||||
LanguageModelUsage, // AI SDK 4.0 中 TokenUsage 改名为 LanguageModelUsage
|
||||
// 错误类型
|
||||
NoSuchToolError,
|
||||
StreamTextResult,
|
||||
// 流相关类型
|
||||
TextStreamPart,
|
||||
// 工具相关类型
|
||||
Tool,
|
||||
ToolCall,
|
||||
ToolExecutionError,
|
||||
ToolResult
|
||||
} from 'ai'
|
||||
|
||||
// 重新导出所有 Provider Settings 类型
|
||||
export type {
|
||||
AmazonBedrockProviderSettings,
|
||||
AnthropicProviderSettings,
|
||||
AnthropicVertexProviderSettings,
|
||||
AzureOpenAIProviderSettings,
|
||||
CerebrasProviderSettings,
|
||||
CohereProviderSettings,
|
||||
DeepInfraProviderSettings,
|
||||
DeepSeekProviderSettings,
|
||||
FalProviderSettings,
|
||||
FireworksProviderSettings,
|
||||
GoogleGenerativeAIProviderSettings,
|
||||
GoogleVertexProviderSettings,
|
||||
GroqProviderSettings,
|
||||
MistralProviderSettings,
|
||||
OllamaProviderSettings,
|
||||
OpenAICompatibleProviderSettings,
|
||||
OpenAIProviderSettings,
|
||||
OpenRouterProviderSettings,
|
||||
PerplexityProviderSettings,
|
||||
ProviderId,
|
||||
ProviderSettingsMap,
|
||||
QwenProviderSettings,
|
||||
ReplicateProviderSettings,
|
||||
TogetherAIProviderSettings,
|
||||
VercelProviderSettings,
|
||||
XaiProviderSettings,
|
||||
ZhipuProviderSettings
|
||||
} from './clients/types'
|
||||
|
||||
// ==================== 工具函数 ====================
|
||||
export { createClient as createApiClient, getClientInfo, getSupportedProviders } from './clients/ApiClientFactory'
|
||||
export { getAllProviders, getProvider, isProviderSupported, registerProvider } from './providers/registry'
|
||||
|
||||
// 默认导出 - 主要的工厂类
|
||||
export { ApiClientFactory as default } from './clients/ApiClientFactory'
|
||||
|
||||
// 包信息
|
||||
// ==================== 包信息 ====================
|
||||
export const AI_CORE_VERSION = '1.0.0'
|
||||
export const AI_CORE_NAME = '@cherry-studio/ai-core'
|
||||
|
||||
// 包配置和实用工具
|
||||
// ==================== 便捷 API ====================
|
||||
// 主要的便捷工厂类
|
||||
export const AiCore = {
|
||||
version: AI_CORE_VERSION,
|
||||
name: AI_CORE_NAME,
|
||||
|
||||
// 快速创建客户端
|
||||
async createClient(providerId: string, modelId: string = 'default', options: any = {}) {
|
||||
return ApiClientFactory.createClient(providerId, modelId, options)
|
||||
// 创建主要客户端(默认带插件系统)
|
||||
create(providerId: string, options: any = {}, plugins: any[] = []) {
|
||||
return createClient(providerId, options, plugins)
|
||||
},
|
||||
|
||||
// 创建通用客户端
|
||||
createUniversalClient(providerId: string, options: any = {}) {
|
||||
// 创建基础客户端(不带插件系统)
|
||||
createBasic(providerId: string, options: any = {}) {
|
||||
return createUniversalClient(providerId, options)
|
||||
},
|
||||
|
||||
@ -61,28 +149,23 @@ export const AiCore = {
|
||||
}
|
||||
}
|
||||
|
||||
// 便捷的预配置clients创建函数
|
||||
export const createOpenAIClient = (options: ProviderOptions) => {
|
||||
return createUniversalClient('openai', options)
|
||||
export const createOpenAIClient = (options: ProviderSettingsMap['openai'], plugins?: any[]) => {
|
||||
return createClient('openai', options, plugins)
|
||||
}
|
||||
|
||||
export const createOpenAICompatibleClient = (options: ProviderOptions) => {
|
||||
return createUniversalClient('openai-compatible', options)
|
||||
export const createAnthropicClient = (options: ProviderSettingsMap['anthropic'], plugins?: any[]) => {
|
||||
return createClient('anthropic', options, plugins)
|
||||
}
|
||||
|
||||
export const createAnthropicClient = (options: ProviderOptions) => {
|
||||
return createUniversalClient('anthropic', options)
|
||||
export const createGoogleClient = (options: ProviderSettingsMap['google'], plugins?: any[]) => {
|
||||
return createClient('google', options, plugins)
|
||||
}
|
||||
|
||||
export const createGoogleClient = (options: ProviderOptions) => {
|
||||
return createUniversalClient('google', options)
|
||||
export const createXAIClient = (options: ProviderSettingsMap['xai'], plugins?: any[]) => {
|
||||
return createClient('xai', options, plugins)
|
||||
}
|
||||
|
||||
export const createXAIClient = (options: ProviderOptions) => {
|
||||
return createUniversalClient('xai', options)
|
||||
}
|
||||
|
||||
// 调试和开发工具
|
||||
// ==================== 调试和开发工具 ====================
|
||||
export const DevTools = {
|
||||
// 列出所有注册的providers
|
||||
listProviders() {
|
||||
@ -95,7 +178,7 @@ export const DevTools = {
|
||||
// 测试provider连接
|
||||
async testProvider(providerId: string, options: any) {
|
||||
try {
|
||||
const client = createUniversalClient(providerId, options)
|
||||
const client = createClient(providerId, options)
|
||||
const info = client.getClientInfo()
|
||||
return {
|
||||
success: true,
|
||||
|
||||
@ -1,102 +0,0 @@
|
||||
import { openai } from '@ai-sdk/openai'
|
||||
import { streamText } from 'ai'
|
||||
|
||||
import { createContext, PluginManager } from '..'
|
||||
import { ContentFilterPlugin, LoggingPlugin } from './example-plugins'
|
||||
|
||||
/**
|
||||
* 完整的 AI SDK 集成示例
|
||||
*/
|
||||
export async function exampleAiRequest() {
|
||||
// 1. 创建插件管理器
|
||||
const pluginManager = new PluginManager([LoggingPlugin, ContentFilterPlugin])
|
||||
|
||||
// 2. 创建请求上下文
|
||||
const context = createContext('openai', 'gpt-4', {
|
||||
messages: [{ role: 'user', content: 'Hello!' }]
|
||||
})
|
||||
|
||||
try {
|
||||
// 3. 触发请求开始事件
|
||||
await pluginManager.executeParallel('onRequestStart', context)
|
||||
|
||||
// 4. 解析模型别名
|
||||
// const resolvedModel = await pluginManager.executeFirst('resolveModel', 'gpt-4', context)
|
||||
// const modelId = resolvedModel || 'gpt-4'
|
||||
|
||||
// 5. 转换请求参数
|
||||
const params = {
|
||||
messages: [{ role: 'user' as const, content: 'Hello, AI!' }],
|
||||
temperature: 0.7
|
||||
}
|
||||
const transformedParams = await pluginManager.executeSequential('transformParams', params, context)
|
||||
|
||||
// 6. 收集流转换器(关键:AI SDK 原生支持数组!)
|
||||
const streamTransforms = pluginManager.collectStreamTransforms()
|
||||
|
||||
// 7. 调用 AI SDK,直接传入转换器工厂数组
|
||||
const result = await streamText({
|
||||
model: openai('gpt-4'),
|
||||
...transformedParams,
|
||||
experimental_transform: streamTransforms // 直接传入工厂函数数组
|
||||
})
|
||||
|
||||
// 8. 处理结果
|
||||
let fullText = ''
|
||||
for await (const textPart of result.textStream) {
|
||||
fullText += textPart
|
||||
console.log('Streaming:', textPart)
|
||||
}
|
||||
|
||||
// 9. 转换最终结果
|
||||
const finalResult = { text: fullText, usage: await result.usage }
|
||||
const transformedResult = await pluginManager.executeSequential('transformResult', finalResult, context)
|
||||
|
||||
// 10. 触发完成事件
|
||||
await pluginManager.executeParallel('onRequestEnd', context, transformedResult)
|
||||
|
||||
return transformedResult
|
||||
} catch (error) {
|
||||
// 11. 触发错误事件
|
||||
await pluginManager.executeParallel('onError', context, undefined, error as Error)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 流转换器数组的其他使用方式
|
||||
*/
|
||||
export function demonstrateStreamTransforms() {
|
||||
const pluginManager = new PluginManager([
|
||||
ContentFilterPlugin,
|
||||
{
|
||||
name: 'text-replacer',
|
||||
transformStream() {
|
||||
return () =>
|
||||
new TransformStream({
|
||||
transform(chunk, controller) {
|
||||
if (chunk.type === 'text-delta') {
|
||||
const replaced = chunk.textDelta.replace(/hello/gi, 'hi')
|
||||
controller.enqueue({ ...chunk, textDelta: replaced })
|
||||
} else {
|
||||
controller.enqueue(chunk)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
])
|
||||
|
||||
// 获取所有流转换器
|
||||
const transforms = pluginManager.collectStreamTransforms()
|
||||
console.log(`收集到 ${transforms.length} 个流转换器`)
|
||||
|
||||
// 可以单独使用每个转换器
|
||||
transforms.forEach((factory, index) => {
|
||||
console.log(`转换器 ${index + 1} 已准备就绪`)
|
||||
const transform = factory({ stopStream: () => {} })
|
||||
console.log('Transform created:', transform)
|
||||
})
|
||||
|
||||
return transforms
|
||||
}
|
||||
255
packages/aiCore/src/plugins/examples/example-usage.ts
Normal file
255
packages/aiCore/src/plugins/examples/example-usage.ts
Normal file
@ -0,0 +1,255 @@
|
||||
import { openai } from '@ai-sdk/openai'
|
||||
import { streamText } from 'ai'
|
||||
|
||||
import { PluginEnabledAiClient } from '../../clients/PluginEnabledAiClient'
|
||||
import { createContext, PluginManager } from '../'
|
||||
import { ContentFilterPlugin, LoggingPlugin } from './example-plugins'
|
||||
|
||||
/**
|
||||
* 使用 PluginEnabledAiClient 的推荐方式
|
||||
* 这是最简单直接的使用方法
|
||||
*/
|
||||
export async function exampleWithPluginEnabledClient() {
|
||||
console.log('=== 使用 PluginEnabledAiClient 示例 ===')
|
||||
|
||||
// 1. 创建带插件的客户端 - 链式调用方式
|
||||
const client = PluginEnabledAiClient.create('openai-compatible', {
|
||||
name: 'openai',
|
||||
baseURL: 'https://api.openai.com/v1',
|
||||
apiKey: process.env.OPENAI_API_KEY || 'sk-test'
|
||||
})
|
||||
.use(LoggingPlugin)
|
||||
.use(ContentFilterPlugin)
|
||||
|
||||
// 2. 或者在创建时传入插件(也可以这样使用)
|
||||
// const clientWithPlugins = PluginEnabledAiClient.create(
|
||||
// 'openai-compatible',
|
||||
// {
|
||||
// name: 'openai',
|
||||
// baseURL: 'https://api.openai.com/v1',
|
||||
// apiKey: process.env.OPENAI_API_KEY || 'sk-test'
|
||||
// },
|
||||
// [LoggingPlugin, ContentFilterPlugin]
|
||||
// )
|
||||
|
||||
// 3. 查看插件统计信息
|
||||
console.log('插件统计:', client.getPluginStats())
|
||||
|
||||
try {
|
||||
// 4. 使用客户端进行 AI 调用(插件会自动生效)
|
||||
console.log('开始生成文本...')
|
||||
const result = await client.generateText('gpt-4', {
|
||||
messages: [{ role: 'user', content: 'Hello, world!' }],
|
||||
temperature: 0.7
|
||||
})
|
||||
|
||||
console.log('生成的文本:', result.text)
|
||||
|
||||
// 5. 流式调用(支持流转换器)
|
||||
console.log('开始流式生成...')
|
||||
const streamResult = await client.streamText('gpt-4', {
|
||||
messages: [{ role: 'user', content: 'Tell me a short story about AI' }]
|
||||
})
|
||||
|
||||
console.log('开始流式响应...')
|
||||
for await (const textPart of streamResult.textStream) {
|
||||
process.stdout.write(textPart)
|
||||
}
|
||||
console.log('\n流式响应完成')
|
||||
|
||||
return result
|
||||
} catch (error) {
|
||||
console.error('调用失败:', error)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 创建 OpenAI Compatible 客户端的示例
|
||||
*/
|
||||
export function exampleOpenAICompatible() {
|
||||
console.log('=== OpenAI Compatible 示例 ===')
|
||||
|
||||
// Ollama 示例
|
||||
const ollama = PluginEnabledAiClient.createOpenAICompatible(
|
||||
{
|
||||
name: 'ollama',
|
||||
baseURL: 'http://localhost:11434/v1'
|
||||
},
|
||||
[LoggingPlugin]
|
||||
)
|
||||
|
||||
// LM Studio 示例
|
||||
const lmStudio = PluginEnabledAiClient.createOpenAICompatible({
|
||||
name: 'lm-studio',
|
||||
baseURL: 'http://localhost:1234/v1'
|
||||
}).use(ContentFilterPlugin)
|
||||
|
||||
console.log('Ollama 插件统计:', ollama.getPluginStats())
|
||||
console.log('LM Studio 插件统计:', lmStudio.getPluginStats())
|
||||
|
||||
return { ollama, lmStudio }
|
||||
}
|
||||
|
||||
/**
|
||||
* 动态插件管理示例
|
||||
*/
|
||||
export function exampleDynamicPlugins() {
|
||||
console.log('=== 动态插件管理示例 ===')
|
||||
|
||||
const client = PluginEnabledAiClient.create('openai-compatible', {
|
||||
name: 'openai',
|
||||
baseURL: 'https://api.openai.com/v1',
|
||||
apiKey: 'your-api-key'
|
||||
})
|
||||
|
||||
console.log('初始状态:', client.getPluginStats())
|
||||
|
||||
// 动态添加插件
|
||||
client.use(LoggingPlugin)
|
||||
console.log('添加 LoggingPlugin 后:', client.getPluginStats())
|
||||
|
||||
client.usePlugins([ContentFilterPlugin])
|
||||
console.log('添加 ContentFilterPlugin 后:', client.getPluginStats())
|
||||
|
||||
// 移除插件
|
||||
client.removePlugin('content-filter')
|
||||
console.log('移除 content-filter 后:', client.getPluginStats())
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
/**
|
||||
* 完整的低级 API 示例(原有的 example-usage.ts 的方式)
|
||||
* 这种方式适合需要精细控制插件生命周期的场景
|
||||
*/
|
||||
export async function exampleLowLevelApi() {
|
||||
console.log('=== 低级 API 示例 ===')
|
||||
|
||||
// 1. 创建插件管理器
|
||||
const pluginManager = new PluginManager([LoggingPlugin, ContentFilterPlugin])
|
||||
|
||||
// 2. 创建请求上下文
|
||||
const context = createContext('openai', 'gpt-4', {
|
||||
messages: [{ role: 'user', content: 'Hello!' }]
|
||||
})
|
||||
|
||||
try {
|
||||
// 3. 触发请求开始事件
|
||||
await pluginManager.executeParallel('onRequestStart', context)
|
||||
|
||||
// 4. 解析模型别名
|
||||
const resolvedModel = await pluginManager.executeFirst('resolveModel', 'gpt-4', context)
|
||||
console.log('Resolved model:', resolvedModel || 'gpt-4')
|
||||
|
||||
// 5. 转换请求参数
|
||||
const params = {
|
||||
messages: [{ role: 'user' as const, content: 'Hello, AI!' }],
|
||||
temperature: 0.7
|
||||
}
|
||||
const transformedParams = await pluginManager.executeSequential('transformParams', params, context)
|
||||
|
||||
// 6. 收集流转换器(关键:AI SDK 原生支持数组!)
|
||||
const streamTransforms = pluginManager.collectStreamTransforms()
|
||||
|
||||
// 7. 调用 AI SDK,直接传入转换器工厂数组
|
||||
const result = await streamText({
|
||||
model: openai('gpt-4'),
|
||||
...transformedParams,
|
||||
experimental_transform: streamTransforms // 直接传入工厂函数数组
|
||||
})
|
||||
|
||||
// 8. 处理结果
|
||||
let fullText = ''
|
||||
for await (const textPart of result.textStream) {
|
||||
fullText += textPart
|
||||
console.log('Streaming:', textPart)
|
||||
}
|
||||
|
||||
// 9. 转换最终结果
|
||||
const finalResult = { text: fullText, usage: await result.usage }
|
||||
const transformedResult = await pluginManager.executeSequential('transformResult', finalResult, context)
|
||||
|
||||
// 10. 触发完成事件
|
||||
await pluginManager.executeParallel('onRequestEnd', context, transformedResult)
|
||||
|
||||
return transformedResult
|
||||
} catch (error) {
|
||||
// 11. 触发错误事件
|
||||
await pluginManager.executeParallel('onError', context, undefined, error as Error)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 流转换器数组的其他使用方式
|
||||
*/
|
||||
export function demonstrateStreamTransforms() {
|
||||
console.log('=== 流转换器示例 ===')
|
||||
|
||||
const pluginManager = new PluginManager([
|
||||
ContentFilterPlugin,
|
||||
{
|
||||
name: 'text-replacer',
|
||||
transformStream() {
|
||||
return () =>
|
||||
new TransformStream({
|
||||
transform(chunk, controller) {
|
||||
if (chunk.type === 'text-delta') {
|
||||
const replaced = chunk.textDelta.replace(/hello/gi, 'hi')
|
||||
controller.enqueue({ ...chunk, textDelta: replaced })
|
||||
} else {
|
||||
controller.enqueue(chunk)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
])
|
||||
|
||||
// 获取所有流转换器
|
||||
const transforms = pluginManager.collectStreamTransforms()
|
||||
console.log(`收集到 ${transforms.length} 个流转换器`)
|
||||
|
||||
// 可以单独使用每个转换器
|
||||
transforms.forEach((factory, index) => {
|
||||
console.log(`转换器 ${index + 1} 已准备就绪`)
|
||||
const transform = factory({ stopStream: () => {} })
|
||||
console.log('Transform created:', transform)
|
||||
})
|
||||
|
||||
return transforms
|
||||
}
|
||||
|
||||
/**
|
||||
* 运行所有示例
|
||||
*/
|
||||
export async function runAllExamples() {
|
||||
console.log('🚀 开始运行所有示例...\n')
|
||||
|
||||
try {
|
||||
// 1. PluginEnabledAiClient 示例(推荐)
|
||||
await exampleWithPluginEnabledClient()
|
||||
console.log('✅ PluginEnabledAiClient 示例完成\n')
|
||||
|
||||
// 2. OpenAI Compatible 示例
|
||||
exampleOpenAICompatible()
|
||||
console.log('✅ OpenAI Compatible 示例完成\n')
|
||||
|
||||
// 3. 动态插件管理示例
|
||||
exampleDynamicPlugins()
|
||||
console.log('✅ 动态插件管理示例完成\n')
|
||||
|
||||
// 4. 流转换器示例
|
||||
demonstrateStreamTransforms()
|
||||
console.log('✅ 流转换器示例完成\n')
|
||||
|
||||
// 5. 低级 API 示例
|
||||
// await exampleLowLevelApi()
|
||||
console.log('✅ 低级 API 示例完成\n')
|
||||
|
||||
console.log('🎉 所有示例运行完成!')
|
||||
} catch (error) {
|
||||
console.error('❌ 示例运行失败:', error)
|
||||
}
|
||||
}
|
||||
@ -1,5 +1,28 @@
|
||||
import type { LanguageModelV1Middleware, TextStreamPart, ToolSet } from 'ai'
|
||||
|
||||
/**
|
||||
* 生命周期阶段定义
|
||||
*/
|
||||
export enum LifecycleStage {
|
||||
PRE_REQUEST = 'pre-request', // 请求预处理
|
||||
REQUEST_EXECUTION = 'execution', // 请求执行
|
||||
STREAM_PROCESSING = 'stream', // 流式处理(仅流模式)
|
||||
POST_RESPONSE = 'post-response', // 响应后处理
|
||||
ERROR_HANDLING = 'error' // 错误处理
|
||||
}
|
||||
|
||||
/**
|
||||
* 生命周期上下文
|
||||
*/
|
||||
export interface LifecycleContext {
|
||||
currentStage: LifecycleStage
|
||||
startTime: number
|
||||
stageStartTime: number
|
||||
completedStages: Set<LifecycleStage>
|
||||
stageDurations: Map<LifecycleStage, number>
|
||||
metadata: Record<string, any>
|
||||
}
|
||||
|
||||
/**
|
||||
* AI 请求上下文
|
||||
*/
|
||||
@ -2,10 +2,68 @@ import type { LanguageModelV1Middleware } from 'ai'
|
||||
|
||||
/**
|
||||
* AI Provider 注册表
|
||||
* 统一管理所有 AI SDK Providers 的动态导入和工厂函数
|
||||
* 静态类型 + 动态导入模式:所有类型静态导入,所有实现动态导入
|
||||
*/
|
||||
|
||||
// Provider 配置接口(简化版)
|
||||
// 静态导入所有 AI SDK 类型
|
||||
import { type AmazonBedrockProviderSettings } from '@ai-sdk/amazon-bedrock'
|
||||
import { type AnthropicProviderSettings } from '@ai-sdk/anthropic'
|
||||
import { type AzureOpenAIProviderSettings } from '@ai-sdk/azure'
|
||||
import { type CerebrasProviderSettings } from '@ai-sdk/cerebras'
|
||||
import { type CohereProviderSettings } from '@ai-sdk/cohere'
|
||||
import { type DeepInfraProviderSettings } from '@ai-sdk/deepinfra'
|
||||
import { type DeepSeekProviderSettings } from '@ai-sdk/deepseek'
|
||||
import { type FalProviderSettings } from '@ai-sdk/fal'
|
||||
import { type FireworksProviderSettings } from '@ai-sdk/fireworks'
|
||||
import { type GoogleGenerativeAIProviderSettings } from '@ai-sdk/google'
|
||||
import { type GoogleVertexProviderSettings } from '@ai-sdk/google-vertex'
|
||||
import { type GroqProviderSettings } from '@ai-sdk/groq'
|
||||
import { type MistralProviderSettings } from '@ai-sdk/mistral'
|
||||
import { type OpenAIProviderSettings } from '@ai-sdk/openai'
|
||||
import { type OpenAICompatibleProviderSettings } from '@ai-sdk/openai-compatible'
|
||||
import { type PerplexityProviderSettings } from '@ai-sdk/perplexity'
|
||||
import { type ReplicateProviderSettings } from '@ai-sdk/replicate'
|
||||
import { type TogetherAIProviderSettings } from '@ai-sdk/togetherai'
|
||||
import { type VercelProviderSettings } from '@ai-sdk/vercel'
|
||||
import { type XaiProviderSettings } from '@ai-sdk/xai'
|
||||
import { type OpenRouterProviderSettings } from '@openrouter/ai-sdk-provider'
|
||||
import { type AnthropicVertexProviderSettings } from 'anthropic-vertex-ai'
|
||||
import { type OllamaProviderSettings } from 'ollama-ai-provider'
|
||||
import { type QwenProviderSettings } from 'qwen-ai-provider'
|
||||
import { type ZhipuProviderSettings } from 'zhipu-ai-provider'
|
||||
|
||||
// 类型安全的 Provider Settings 映射
|
||||
export type ProviderSettingsMap = {
|
||||
openai: OpenAIProviderSettings
|
||||
'openai-compatible': OpenAICompatibleProviderSettings
|
||||
anthropic: AnthropicProviderSettings
|
||||
google: GoogleGenerativeAIProviderSettings
|
||||
'google-vertex': GoogleVertexProviderSettings
|
||||
mistral: MistralProviderSettings
|
||||
xai: XaiProviderSettings
|
||||
azure: AzureOpenAIProviderSettings
|
||||
bedrock: AmazonBedrockProviderSettings
|
||||
cohere: CohereProviderSettings
|
||||
groq: GroqProviderSettings
|
||||
together: TogetherAIProviderSettings
|
||||
fireworks: FireworksProviderSettings
|
||||
deepseek: DeepSeekProviderSettings
|
||||
cerebras: CerebrasProviderSettings
|
||||
deepinfra: DeepInfraProviderSettings
|
||||
replicate: ReplicateProviderSettings
|
||||
perplexity: PerplexityProviderSettings
|
||||
fal: FalProviderSettings
|
||||
vercel: VercelProviderSettings
|
||||
ollama: OllamaProviderSettings
|
||||
qwen: QwenProviderSettings
|
||||
zhipu: ZhipuProviderSettings
|
||||
'anthropic-vertex': AnthropicVertexProviderSettings
|
||||
openrouter: OpenRouterProviderSettings
|
||||
}
|
||||
|
||||
export type ProviderId = keyof ProviderSettingsMap
|
||||
|
||||
// 统一的 Provider 配置接口(所有都使用动态导入)
|
||||
export interface ProviderConfig {
|
||||
id: string
|
||||
name: string
|
||||
@ -44,7 +102,7 @@ export class AiProviderRegistry {
|
||||
*/
|
||||
private initializeProviders(): void {
|
||||
const providers: ProviderConfig[] = [
|
||||
// 核心 AI SDK Providers
|
||||
// 官方 AI SDK Providers (19个)
|
||||
{
|
||||
id: 'openai',
|
||||
name: 'OpenAI',
|
||||
@ -181,13 +239,10 @@ export class AiProviderRegistry {
|
||||
id: 'vercel',
|
||||
name: 'Vercel',
|
||||
import: () => import('@ai-sdk/vercel'),
|
||||
creatorFunctionName: 'createVercel',
|
||||
supportsImageGeneration: false
|
||||
}
|
||||
]
|
||||
creatorFunctionName: 'createVercel'
|
||||
},
|
||||
|
||||
// 社区提供的 Providers
|
||||
const communityProviders: ProviderConfig[] = [
|
||||
// 社区 Providers (5个)
|
||||
{
|
||||
id: 'ollama',
|
||||
name: 'Ollama',
|
||||
@ -225,9 +280,8 @@ export class AiProviderRegistry {
|
||||
}
|
||||
]
|
||||
|
||||
// 注册所有 providers(官方 + 社区)
|
||||
const allProviders = [...providers, ...communityProviders]
|
||||
allProviders.forEach((config) => {
|
||||
// 注册所有 providers (总计24个)
|
||||
providers.forEach((config) => {
|
||||
this.registry.set(config.id, config)
|
||||
})
|
||||
}
|
||||
@ -295,3 +349,32 @@ export const registerProvider = (config: ProviderConfig) => aiProviderRegistry.r
|
||||
|
||||
// 兼容现有实现的导出
|
||||
export const PROVIDER_REGISTRY = aiProviderRegistry.getCompatibleRegistry()
|
||||
|
||||
// 重新导出所有类型供外部使用
|
||||
export type {
|
||||
AmazonBedrockProviderSettings,
|
||||
AnthropicProviderSettings,
|
||||
AnthropicVertexProviderSettings,
|
||||
AzureOpenAIProviderSettings,
|
||||
CerebrasProviderSettings,
|
||||
CohereProviderSettings,
|
||||
DeepInfraProviderSettings,
|
||||
DeepSeekProviderSettings,
|
||||
FalProviderSettings,
|
||||
FireworksProviderSettings,
|
||||
GoogleGenerativeAIProviderSettings,
|
||||
GoogleVertexProviderSettings,
|
||||
GroqProviderSettings,
|
||||
MistralProviderSettings,
|
||||
OllamaProviderSettings,
|
||||
OpenAICompatibleProviderSettings,
|
||||
OpenAIProviderSettings,
|
||||
OpenRouterProviderSettings,
|
||||
PerplexityProviderSettings,
|
||||
QwenProviderSettings,
|
||||
ReplicateProviderSettings,
|
||||
TogetherAIProviderSettings,
|
||||
VercelProviderSettings,
|
||||
XaiProviderSettings,
|
||||
ZhipuProviderSettings
|
||||
}
|
||||
|
||||
@ -1,66 +0,0 @@
|
||||
/**
|
||||
* AI Core 类型定义
|
||||
* 直接使用 Vercel AI SDK 的原生类型
|
||||
*/
|
||||
|
||||
// 直接重新导出 AI SDK 的类型,避免重复定义
|
||||
export type {
|
||||
// 通用类型
|
||||
CoreMessage,
|
||||
CoreTool,
|
||||
CoreToolChoice,
|
||||
// 其他有用的类型
|
||||
FinishReason,
|
||||
GenerateTextResult,
|
||||
LanguageModelV1,
|
||||
// 核心函数的参数和返回类型
|
||||
StreamTextResult,
|
||||
// 流式处理相关
|
||||
TextStreamPart,
|
||||
ToolSet
|
||||
} from 'ai'
|
||||
|
||||
/**
|
||||
* 生命周期阶段定义
|
||||
*/
|
||||
export enum LifecycleStage {
|
||||
PRE_REQUEST = 'pre-request', // 请求预处理
|
||||
REQUEST_EXECUTION = 'execution', // 请求执行
|
||||
STREAM_PROCESSING = 'stream', // 流式处理(仅流模式)
|
||||
POST_RESPONSE = 'post-response', // 响应后处理
|
||||
ERROR_HANDLING = 'error' // 错误处理
|
||||
}
|
||||
|
||||
/**
|
||||
* 生命周期上下文
|
||||
*/
|
||||
export interface LifecycleContext {
|
||||
currentStage: LifecycleStage
|
||||
startTime: number
|
||||
stageStartTime: number
|
||||
completedStages: Set<LifecycleStage>
|
||||
stageDurations: Map<LifecycleStage, number>
|
||||
metadata: Record<string, any>
|
||||
}
|
||||
|
||||
/**
|
||||
* 中间件执行上下文
|
||||
*/
|
||||
export interface AiRequestContext {
|
||||
// 生命周期信息
|
||||
lifecycle: LifecycleContext
|
||||
|
||||
// 请求信息
|
||||
method: 'streamText' | 'generateText'
|
||||
providerId: string
|
||||
originalParams: any // 使用 any,让 AI SDK 自己处理类型检查
|
||||
|
||||
// 可变状态
|
||||
state: {
|
||||
transformedParams?: any
|
||||
result?: any
|
||||
error?: Error
|
||||
aborted?: boolean
|
||||
metadata: Record<string, any>
|
||||
}
|
||||
}
|
||||
296
src/renderer/src/aiCore/AiSdkToChunkAdapter.ts
Normal file
296
src/renderer/src/aiCore/AiSdkToChunkAdapter.ts
Normal file
@ -0,0 +1,296 @@
|
||||
/**
|
||||
* AI SDK 到 Cherry Studio Chunk 适配器
|
||||
* 用于将 AI SDK 的 fullStream 转换为 Cherry Studio 的 chunk 格式
|
||||
*/
|
||||
|
||||
import { TextStreamPart } from '@cherry-studio/ai-core'
|
||||
import { Chunk, ChunkType } from '@renderer/types/chunk'
|
||||
|
||||
export interface CherryStudioChunk {
|
||||
type: 'text-delta' | 'text-complete' | 'tool-call' | 'tool-result' | 'finish' | 'error'
|
||||
text?: string
|
||||
toolCall?: any
|
||||
toolResult?: any
|
||||
finishReason?: string
|
||||
usage?: any
|
||||
error?: any
|
||||
}
|
||||
|
||||
/**
|
||||
* AI SDK 到 Cherry Studio Chunk 适配器类
|
||||
* 处理 fullStream 到 Cherry Studio chunk 的转换
|
||||
*/
|
||||
export class AiSdkToChunkAdapter {
|
||||
constructor(private onChunk: (chunk: Chunk) => void) {}
|
||||
|
||||
/**
|
||||
* 处理 AI SDK 流结果
|
||||
* @param aiSdkResult AI SDK 的流结果对象
|
||||
* @returns 最终的文本内容
|
||||
*/
|
||||
async processStream(aiSdkResult: any): Promise<string> {
|
||||
// 如果是流式且有 fullStream
|
||||
if (aiSdkResult.fullStream) {
|
||||
await this.readFullStream(aiSdkResult.fullStream)
|
||||
}
|
||||
|
||||
// 使用 streamResult.text 获取最终结果
|
||||
return await aiSdkResult.text
|
||||
}
|
||||
|
||||
/**
|
||||
* 读取 fullStream 并转换为 Cherry Studio chunks
|
||||
* @param fullStream AI SDK 的 fullStream (ReadableStream)
|
||||
*/
|
||||
private async readFullStream(fullStream: ReadableStream<TextStreamPart<any>>) {
|
||||
const reader = fullStream.getReader()
|
||||
const final = {
|
||||
text: '',
|
||||
reasoning_content: ''
|
||||
}
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
|
||||
if (done) {
|
||||
break
|
||||
}
|
||||
|
||||
// 转换并发送 chunk
|
||||
this.convertAndEmitChunk(value, final)
|
||||
}
|
||||
} finally {
|
||||
reader.releaseLock()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 转换 AI SDK chunk 为 Cherry Studio chunk 并调用回调
|
||||
* @param chunk AI SDK 的 chunk 数据
|
||||
*/
|
||||
private convertAndEmitChunk(chunk: TextStreamPart<any>, final: { text: string; reasoning_content: string }) {
|
||||
console.log('AI SDK chunk type:', chunk.type, chunk)
|
||||
switch (chunk.type) {
|
||||
// === 文本相关事件 ===
|
||||
case 'text-delta':
|
||||
final.text += chunk.textDelta || ''
|
||||
this.onChunk({
|
||||
type: ChunkType.TEXT_DELTA,
|
||||
text: chunk.textDelta || ''
|
||||
})
|
||||
if (final.reasoning_content) {
|
||||
this.onChunk({
|
||||
type: ChunkType.THINKING_COMPLETE,
|
||||
text: final.reasoning_content || ''
|
||||
})
|
||||
final.reasoning_content = ''
|
||||
}
|
||||
break
|
||||
|
||||
// === 推理相关事件 ===
|
||||
case 'reasoning':
|
||||
final.reasoning_content += chunk.textDelta || ''
|
||||
this.onChunk({
|
||||
type: ChunkType.THINKING_DELTA,
|
||||
text: chunk.textDelta || ''
|
||||
})
|
||||
break
|
||||
|
||||
case 'reasoning-signature':
|
||||
// 推理签名,可以映射到思考完成
|
||||
this.onChunk({
|
||||
type: ChunkType.THINKING_COMPLETE,
|
||||
text: chunk.signature || ''
|
||||
})
|
||||
break
|
||||
|
||||
case 'redacted-reasoning':
|
||||
// 被编辑的推理内容,也映射到思考
|
||||
this.onChunk({
|
||||
type: ChunkType.THINKING_DELTA,
|
||||
text: chunk.data || ''
|
||||
})
|
||||
break
|
||||
|
||||
// === 工具调用相关事件 ===
|
||||
case 'tool-call-streaming-start':
|
||||
// 开始流式工具调用
|
||||
this.onChunk({
|
||||
type: ChunkType.MCP_TOOL_CREATED,
|
||||
tool_calls: [
|
||||
{
|
||||
id: chunk.toolCallId,
|
||||
name: chunk.toolName,
|
||||
args: {}
|
||||
}
|
||||
]
|
||||
})
|
||||
break
|
||||
|
||||
case 'tool-call-delta':
|
||||
// 工具调用参数的增量更新
|
||||
this.onChunk({
|
||||
type: ChunkType.MCP_TOOL_IN_PROGRESS,
|
||||
responses: [
|
||||
{
|
||||
id: chunk.toolCallId,
|
||||
tool: {
|
||||
id: chunk.toolName,
|
||||
// TODO: serverId,serverName
|
||||
serverId: 'ai-sdk',
|
||||
serverName: 'AI SDK',
|
||||
name: chunk.toolName,
|
||||
description: '',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
title: chunk.toolName,
|
||||
properties: {}
|
||||
}
|
||||
},
|
||||
arguments: {},
|
||||
status: 'invoking',
|
||||
response: chunk.argsTextDelta,
|
||||
toolCallId: chunk.toolCallId
|
||||
}
|
||||
]
|
||||
})
|
||||
break
|
||||
|
||||
case 'tool-call':
|
||||
// 完整的工具调用
|
||||
this.onChunk({
|
||||
type: ChunkType.MCP_TOOL_CREATED,
|
||||
tool_calls: [
|
||||
{
|
||||
id: chunk.toolCallId,
|
||||
name: chunk.toolName,
|
||||
args: chunk.args
|
||||
}
|
||||
]
|
||||
})
|
||||
break
|
||||
|
||||
case 'tool-result':
|
||||
// 工具调用结果
|
||||
this.onChunk({
|
||||
type: ChunkType.MCP_TOOL_COMPLETE,
|
||||
responses: [
|
||||
{
|
||||
id: chunk.toolCallId,
|
||||
tool: {
|
||||
id: chunk.toolName,
|
||||
// TODO: serverId,serverName
|
||||
serverId: 'ai-sdk',
|
||||
serverName: 'AI SDK',
|
||||
name: chunk.toolName,
|
||||
description: '',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
title: chunk.toolName,
|
||||
properties: {}
|
||||
}
|
||||
},
|
||||
arguments: chunk.args || {},
|
||||
status: 'done',
|
||||
response: chunk.result,
|
||||
toolCallId: chunk.toolCallId
|
||||
}
|
||||
]
|
||||
})
|
||||
break
|
||||
|
||||
// === 步骤相关事件 ===
|
||||
// case 'step-start':
|
||||
// this.onChunk({
|
||||
// type: ChunkType.LLM_RESPONSE_CREATED
|
||||
// })
|
||||
// break
|
||||
case 'step-finish':
|
||||
this.onChunk({
|
||||
type: ChunkType.BLOCK_COMPLETE,
|
||||
response: {
|
||||
text: final.text || '',
|
||||
reasoning_content: final.reasoning_content || '',
|
||||
usage: {
|
||||
completion_tokens: chunk.usage.completionTokens || 0,
|
||||
prompt_tokens: chunk.usage.promptTokens || 0,
|
||||
total_tokens: chunk.usage.totalTokens || 0
|
||||
},
|
||||
metrics: chunk.usage
|
||||
? {
|
||||
completion_tokens: chunk.usage.completionTokens || 0,
|
||||
time_completion_millsec: 0
|
||||
}
|
||||
: undefined
|
||||
}
|
||||
})
|
||||
break
|
||||
|
||||
case 'finish':
|
||||
this.onChunk({
|
||||
type: ChunkType.TEXT_COMPLETE,
|
||||
text: final.text || '' // TEXT_COMPLETE 需要 text 字段
|
||||
})
|
||||
this.onChunk({
|
||||
type: ChunkType.LLM_RESPONSE_COMPLETE,
|
||||
response: {
|
||||
text: final.text || '',
|
||||
reasoning_content: final.reasoning_content || '',
|
||||
usage: {
|
||||
completion_tokens: chunk.usage.completionTokens || 0,
|
||||
prompt_tokens: chunk.usage.promptTokens || 0,
|
||||
total_tokens: chunk.usage.totalTokens || 0
|
||||
},
|
||||
metrics: chunk.usage
|
||||
? {
|
||||
completion_tokens: chunk.usage.completionTokens || 0,
|
||||
time_completion_millsec: 0
|
||||
}
|
||||
: undefined
|
||||
}
|
||||
})
|
||||
break
|
||||
|
||||
// === 源和文件相关事件 ===
|
||||
case 'source':
|
||||
// 源信息,可以映射到知识搜索完成
|
||||
this.onChunk({
|
||||
type: ChunkType.KNOWLEDGE_SEARCH_COMPLETE,
|
||||
knowledge: [
|
||||
{
|
||||
id: Number(chunk.source.id) || Date.now(),
|
||||
content: chunk.source.title || '',
|
||||
sourceUrl: chunk.source.url || '',
|
||||
type: 'url'
|
||||
}
|
||||
]
|
||||
})
|
||||
break
|
||||
|
||||
case 'file':
|
||||
// 文件相关事件,可能是图片生成
|
||||
this.onChunk({
|
||||
type: ChunkType.IMAGE_COMPLETE,
|
||||
image: {
|
||||
type: 'base64',
|
||||
images: [chunk.base64]
|
||||
}
|
||||
})
|
||||
break
|
||||
case 'error':
|
||||
this.onChunk({
|
||||
type: ChunkType.ERROR,
|
||||
error: {
|
||||
message: chunk.error || 'Unknown error'
|
||||
}
|
||||
})
|
||||
break
|
||||
|
||||
default:
|
||||
// 其他类型的 chunk 可以忽略或记录日志
|
||||
console.log('Unhandled AI SDK chunk type:', chunk.type, chunk)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default AiSdkToChunkAdapter
|
||||
230
src/renderer/src/aiCore/index_new.ts
Normal file
230
src/renderer/src/aiCore/index_new.ts
Normal file
@ -0,0 +1,230 @@
|
||||
/**
|
||||
* Cherry Studio AI Core - 新版本入口
|
||||
* 集成 @cherry-studio/ai-core 库的渐进式重构方案
|
||||
*
|
||||
* 融合方案:简化实现,专注于核心功能
|
||||
* 1. 优先使用新AI SDK
|
||||
* 2. 失败时fallback到原有实现
|
||||
* 3. 暂时保持接口兼容性
|
||||
*/
|
||||
|
||||
import {
|
||||
AiClient,
|
||||
AiCore,
|
||||
createClient,
|
||||
type OpenAICompatibleProviderSettings,
|
||||
type ProviderId
|
||||
} from '@cherry-studio/ai-core'
|
||||
import { isDedicatedImageGenerationModel } from '@renderer/config/models'
|
||||
import type { GenerateImageParams, Model, Provider } from '@renderer/types'
|
||||
import { Chunk, ChunkType } from '@renderer/types/chunk'
|
||||
import { RequestOptions } from '@renderer/types/sdk'
|
||||
|
||||
// 引入适配器
|
||||
import AiSdkToChunkAdapter from './AiSdkToChunkAdapter'
|
||||
// 引入原有的AiProvider作为fallback
|
||||
import LegacyAiProvider from './index'
|
||||
import { CompletionsParams, CompletionsResult } from './middleware/schemas'
|
||||
// 引入参数转换模块
|
||||
import { buildStreamTextParams } from './transformParameters'
|
||||
|
||||
/**
|
||||
* 将现有 Provider 类型映射到 AI SDK 的 Provider ID
|
||||
* 根据 registry.ts 中的支持列表进行映射
|
||||
*/
|
||||
function mapProviderTypeToAiSdkId(providerType: string): string {
|
||||
// Cherry Studio Provider Type -> AI SDK Provider ID 映射表
|
||||
const typeMapping: Record<string, string> = {
|
||||
// 需要转换的映射
|
||||
grok: 'xai', // grok -> xai
|
||||
'azure-openai': 'azure', // azure-openai -> azure
|
||||
gemini: 'google' // gemini -> google
|
||||
}
|
||||
|
||||
return typeMapping[providerType]
|
||||
}
|
||||
|
||||
/**
|
||||
* 将 Provider 配置转换为新 AI SDK 格式
|
||||
*/
|
||||
function providerToAiSdkConfig(provider: Provider): {
|
||||
providerId: ProviderId | 'openai-compatible'
|
||||
options: any
|
||||
} {
|
||||
console.log('provider', provider)
|
||||
// 1. 先映射 provider 类型到 AI SDK ID
|
||||
const mappedProviderId = mapProviderTypeToAiSdkId(provider.id)
|
||||
|
||||
// 2. 检查映射后的 provider ID 是否在 AI SDK 注册表中
|
||||
const isSupported = AiCore.isSupported(mappedProviderId)
|
||||
|
||||
console.log(`Provider mapping: ${provider.type} -> ${mappedProviderId}, supported: ${isSupported}`)
|
||||
|
||||
// 3. 如果映射的 provider 不支持,则使用 openai-compatible
|
||||
if (isSupported) {
|
||||
return {
|
||||
providerId: mappedProviderId as ProviderId,
|
||||
options: {
|
||||
apiKey: provider.apiKey
|
||||
}
|
||||
}
|
||||
} else {
|
||||
console.log(`Using openai-compatible fallback for provider: ${provider.type}`)
|
||||
const compatibleConfig: OpenAICompatibleProviderSettings = {
|
||||
name: provider.name || provider.type,
|
||||
apiKey: provider.apiKey,
|
||||
baseURL: provider.apiHost
|
||||
}
|
||||
|
||||
return {
|
||||
providerId: 'openai-compatible',
|
||||
options: compatibleConfig
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 检查是否支持使用新的AI SDK
|
||||
*/
|
||||
function isModernSdkSupported(provider: Provider, model?: Model): boolean {
|
||||
// 目前支持主要的providers
|
||||
const supportedProviders = ['openai', 'anthropic', 'gemini', 'azure-openai']
|
||||
|
||||
// 检查provider类型
|
||||
if (!supportedProviders.includes(provider.type)) {
|
||||
return false
|
||||
}
|
||||
|
||||
// 检查是否为图像生成模型(暂时不支持)
|
||||
if (model && isDedicatedImageGenerationModel(model)) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
export default class ModernAiProvider {
|
||||
private modernClient?: AiClient
|
||||
private legacyProvider: LegacyAiProvider
|
||||
private provider: Provider
|
||||
|
||||
constructor(provider: Provider) {
|
||||
this.provider = provider
|
||||
this.legacyProvider = new LegacyAiProvider(provider)
|
||||
|
||||
const config = providerToAiSdkConfig(provider)
|
||||
this.modernClient = createClient(config.providerId, config.options)
|
||||
}
|
||||
|
||||
public async completions(params: CompletionsParams, options?: RequestOptions): Promise<CompletionsResult> {
|
||||
// const model = params.assistant.model
|
||||
|
||||
// 检查是否应该使用现代化客户端
|
||||
// if (this.modernClient && model && isModernSdkSupported(this.provider, model)) {
|
||||
// try {
|
||||
return await this.modernCompletions(params, options)
|
||||
// } catch (error) {
|
||||
// console.warn('Modern client failed, falling back to legacy:', error)
|
||||
// fallback到原有实现
|
||||
// }
|
||||
// }
|
||||
|
||||
// 使用原有实现
|
||||
// return this.legacyProvider.completions(params, options)
|
||||
}
|
||||
|
||||
/**
|
||||
* 使用现代化AI SDK的completions实现
|
||||
* 使用 AiSdkUtils 工具模块进行参数构建
|
||||
*/
|
||||
private async modernCompletions(params: CompletionsParams, options?: RequestOptions): Promise<CompletionsResult> {
|
||||
if (!this.modernClient || !params.assistant.model) {
|
||||
throw new Error('Modern client not available')
|
||||
}
|
||||
|
||||
console.log('Modern completions with params:', params, 'options:', options)
|
||||
|
||||
const model = params.assistant.model
|
||||
const assistant = params.assistant
|
||||
|
||||
// 检查 messages 类型并转换
|
||||
const messages = Array.isArray(params.messages) ? params.messages : []
|
||||
if (typeof params.messages === 'string') {
|
||||
console.warn('Messages is string, using empty array')
|
||||
}
|
||||
|
||||
// 使用 transformParameters 模块构建参数
|
||||
const aiSdkParams = await buildStreamTextParams(messages, assistant, model, {
|
||||
maxTokens: params.maxTokens,
|
||||
mcpTools: params.mcpTools
|
||||
})
|
||||
|
||||
console.log('Built AI SDK params:', aiSdkParams)
|
||||
const chunks: Chunk[] = []
|
||||
|
||||
try {
|
||||
if (params.streamOutput && params.onChunk) {
|
||||
// 流式处理 - 使用适配器
|
||||
const adapter = new AiSdkToChunkAdapter(params.onChunk)
|
||||
const streamResult = await this.modernClient.streamText(model.id, aiSdkParams)
|
||||
const finalText = await adapter.processStream(streamResult)
|
||||
|
||||
return {
|
||||
getText: () => finalText
|
||||
}
|
||||
} else if (params.streamOutput) {
|
||||
// 流式处理但没有 onChunk 回调
|
||||
const streamResult = await this.modernClient.streamText(model.id, aiSdkParams)
|
||||
const finalText = await streamResult.text
|
||||
|
||||
return {
|
||||
getText: () => finalText
|
||||
}
|
||||
} else {
|
||||
// 非流式处理
|
||||
const result = await this.modernClient.generateText(model.id, aiSdkParams)
|
||||
|
||||
const cherryChunk: Chunk = {
|
||||
type: ChunkType.TEXT_COMPLETE,
|
||||
text: result.text || ''
|
||||
}
|
||||
chunks.push(cherryChunk)
|
||||
|
||||
if (params.onChunk) {
|
||||
params.onChunk(cherryChunk)
|
||||
}
|
||||
|
||||
return {
|
||||
getText: () => result.text || ''
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Modern AI SDK error:', error)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
// 代理其他方法到原有实现
|
||||
public async models() {
|
||||
return this.legacyProvider.models()
|
||||
}
|
||||
|
||||
public async getEmbeddingDimensions(model: Model): Promise<number> {
|
||||
return this.legacyProvider.getEmbeddingDimensions(model)
|
||||
}
|
||||
|
||||
public async generateImage(params: GenerateImageParams): Promise<string[]> {
|
||||
return this.legacyProvider.generateImage(params)
|
||||
}
|
||||
|
||||
public getBaseURL(): string {
|
||||
return this.legacyProvider.getBaseURL()
|
||||
}
|
||||
|
||||
public getApiKey(): string {
|
||||
return this.legacyProvider.getApiKey()
|
||||
}
|
||||
}
|
||||
|
||||
// 为了方便调试,导出一些工具函数
|
||||
export { isModernSdkSupported, providerToAiSdkConfig }
|
||||
269
src/renderer/src/aiCore/transformParameters.ts
Normal file
269
src/renderer/src/aiCore/transformParameters.ts
Normal file
@ -0,0 +1,269 @@
|
||||
/**
|
||||
* AI SDK 参数转换模块
|
||||
* 统一管理从各个 apiClient 提取的参数处理和转换功能
|
||||
*/
|
||||
|
||||
import type { StreamTextParams } from '@cherry-studio/ai-core'
|
||||
import { isNotSupportTemperatureAndTopP, isSupportedFlexServiceTier } from '@renderer/config/models'
|
||||
import type { Assistant, MCPTool, Message, Model } from '@renderer/types'
|
||||
import { FileTypes } from '@renderer/types'
|
||||
import { findFileBlocks, findImageBlocks, getMainTextContent } from '@renderer/utils/messageUtils/find'
|
||||
import { buildSystemPrompt } from '@renderer/utils/prompt'
|
||||
import { defaultTimeout } from '@shared/config/constant'
|
||||
|
||||
/**
|
||||
* 获取温度参数
|
||||
*/
|
||||
export function getTemperature(assistant: Assistant, model: Model): number | undefined {
|
||||
return isNotSupportTemperatureAndTopP(model) ? undefined : assistant.settings?.temperature
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取 TopP 参数
|
||||
*/
|
||||
export function getTopP(assistant: Assistant, model: Model): number | undefined {
|
||||
return isNotSupportTemperatureAndTopP(model) ? undefined : assistant.settings?.topP
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取超时设置
|
||||
*/
|
||||
export function getTimeout(model: Model): number {
|
||||
if (isSupportedFlexServiceTier(model)) {
|
||||
return 15 * 1000 * 60
|
||||
}
|
||||
return defaultTimeout
|
||||
}
|
||||
|
||||
/**
|
||||
* 构建系统提示词
|
||||
*/
|
||||
export async function buildSystemPromptWithTools(
|
||||
prompt: string,
|
||||
mcpTools?: MCPTool[],
|
||||
assistant?: Assistant
|
||||
): Promise<string> {
|
||||
return await buildSystemPrompt(prompt, mcpTools, assistant)
|
||||
}
|
||||
|
||||
// /**
|
||||
// * 转换 MCP 工具为 AI SDK 工具格式
|
||||
// * 注意:这里返回通用格式,实际使用时需要根据具体 provider 转换
|
||||
// TODO: 需要使用ai-sdk的mcp
|
||||
// */
|
||||
// export function convertMcpToolsToSdkTools(mcpTools: MCPTool[]): Pick<StreamTextParams, 'tools'> {
|
||||
// return mcpTools.map((tool) => ({
|
||||
// type: 'function',
|
||||
// function: {
|
||||
// name: tool.id,
|
||||
// description: tool.description,
|
||||
// parameters: tool.inputSchema || {}
|
||||
// }
|
||||
// }))
|
||||
// }
|
||||
|
||||
/**
|
||||
* 提取文件内容
|
||||
*/
|
||||
export async function extractFileContent(message: Message): Promise<string> {
|
||||
const fileBlocks = findFileBlocks(message)
|
||||
if (fileBlocks.length > 0) {
|
||||
const textFileBlocks = fileBlocks.filter(
|
||||
(fb) => fb.file && [FileTypes.TEXT, FileTypes.DOCUMENT].includes(fb.file.type)
|
||||
)
|
||||
|
||||
if (textFileBlocks.length > 0) {
|
||||
let text = ''
|
||||
const divider = '\n\n---\n\n'
|
||||
|
||||
for (const fileBlock of textFileBlocks) {
|
||||
const file = fileBlock.file
|
||||
const fileContent = (await window.api.file.read(file.id + file.ext)).trim()
|
||||
const fileNameRow = 'file: ' + file.origin_name + '\n\n'
|
||||
text = text + fileNameRow + fileContent + divider
|
||||
}
|
||||
|
||||
return text
|
||||
}
|
||||
}
|
||||
|
||||
return ''
|
||||
}
|
||||
|
||||
/**
|
||||
* 转换消息为 AI SDK 参数格式
|
||||
* 基于 OpenAI 格式的通用转换,支持文本、图片和文件
|
||||
*/
|
||||
export async function convertMessageToSdkParam(message: Message, isVisionModel = false): Promise<any> {
|
||||
const content = getMainTextContent(message)
|
||||
const fileBlocks = findFileBlocks(message)
|
||||
const imageBlocks = findImageBlocks(message)
|
||||
|
||||
// 简单消息(无文件无图片)
|
||||
if (fileBlocks.length === 0 && imageBlocks.length === 0) {
|
||||
return {
|
||||
role: message.role === 'system' ? 'user' : message.role,
|
||||
content
|
||||
}
|
||||
}
|
||||
|
||||
// 复杂消息(包含文件或图片)
|
||||
const parts: any[] = []
|
||||
|
||||
if (content) {
|
||||
parts.push({ type: 'text', text: content })
|
||||
}
|
||||
|
||||
// 处理图片(仅在支持视觉的模型中)
|
||||
if (isVisionModel) {
|
||||
for (const imageBlock of imageBlocks) {
|
||||
if (imageBlock.file) {
|
||||
try {
|
||||
const image = await window.api.file.base64Image(imageBlock.file.id + imageBlock.file.ext)
|
||||
parts.push({
|
||||
type: 'image_url',
|
||||
image_url: { url: image.data }
|
||||
})
|
||||
} catch (error) {
|
||||
console.warn('Failed to load image:', error)
|
||||
}
|
||||
} else if (imageBlock.url && imageBlock.url.startsWith('data:')) {
|
||||
parts.push({
|
||||
type: 'image_url',
|
||||
image_url: { url: imageBlock.url }
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 处理文件
|
||||
for (const fileBlock of fileBlocks) {
|
||||
const file = fileBlock.file
|
||||
if (!file) continue
|
||||
|
||||
if ([FileTypes.TEXT, FileTypes.DOCUMENT].includes(file.type)) {
|
||||
try {
|
||||
const fileContent = await window.api.file.read(file.id + file.ext)
|
||||
parts.push({
|
||||
type: 'text',
|
||||
text: `${file.origin_name}\n${fileContent.trim()}`
|
||||
})
|
||||
} catch (error) {
|
||||
console.warn('Failed to read file:', error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
role: message.role === 'system' ? 'user' : message.role,
|
||||
content: parts.length === 1 && parts[0].type === 'text' ? parts[0].text : parts
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 转换 Cherry Studio 消息数组为 AI SDK 消息数组
|
||||
*/
|
||||
export async function convertMessagesToSdkMessages(
|
||||
messages: Message[],
|
||||
model: Model
|
||||
): Promise<StreamTextParams['messages']> {
|
||||
const sdkMessages: StreamTextParams['messages'] = []
|
||||
const isVision = model.id.includes('vision') || model.id.includes('gpt-4') // 简单的视觉模型检测
|
||||
|
||||
for (const message of messages) {
|
||||
const sdkMessage = await convertMessageToSdkParam(message, isVision)
|
||||
sdkMessages.push(sdkMessage)
|
||||
}
|
||||
|
||||
return sdkMessages
|
||||
}
|
||||
|
||||
/**
|
||||
* 构建 AI SDK 流式参数
|
||||
* 这是主要的参数构建函数,整合所有转换逻辑
|
||||
*/
|
||||
export async function buildStreamTextParams(
|
||||
messages: Message[],
|
||||
assistant: Assistant,
|
||||
model: Model,
|
||||
options: {
|
||||
maxTokens?: number
|
||||
mcpTools?: MCPTool[]
|
||||
enableTools?: boolean
|
||||
} = {}
|
||||
): Promise<StreamTextParams> {
|
||||
const { maxTokens, mcpTools, enableTools = false } = options
|
||||
|
||||
// 转换消息
|
||||
const sdkMessages = await convertMessagesToSdkMessages(messages, model)
|
||||
|
||||
// 构建系统提示
|
||||
let systemPrompt = assistant.prompt || ''
|
||||
if (mcpTools && mcpTools.length > 0) {
|
||||
systemPrompt = await buildSystemPromptWithTools(systemPrompt, mcpTools, assistant)
|
||||
}
|
||||
|
||||
// 构建基础参数
|
||||
const params: StreamTextParams = {
|
||||
messages: sdkMessages,
|
||||
maxTokens: maxTokens || 1000,
|
||||
temperature: getTemperature(assistant, model),
|
||||
topP: getTopP(assistant, model),
|
||||
system: systemPrompt || undefined,
|
||||
...getCustomParameters(assistant)
|
||||
}
|
||||
|
||||
// 添加工具(如果启用且有工具)
|
||||
if (enableTools && mcpTools && mcpTools.length > 0) {
|
||||
// TODO: 暂时注释掉工具支持,等类型问题解决后再启用
|
||||
// params.tools = convertMcpToolsToSdkTools(mcpTools)
|
||||
}
|
||||
|
||||
return params
|
||||
}
|
||||
|
||||
/**
|
||||
* 构建非流式的 generateText 参数
|
||||
*/
|
||||
export async function buildGenerateTextParams(
|
||||
messages: Message[],
|
||||
assistant: Assistant,
|
||||
model: Model,
|
||||
options: {
|
||||
maxTokens?: number
|
||||
mcpTools?: MCPTool[]
|
||||
enableTools?: boolean
|
||||
} = {}
|
||||
): Promise<any> {
|
||||
// 复用流式参数的构建逻辑
|
||||
return await buildStreamTextParams(messages, assistant, model, options)
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取自定义参数
|
||||
* 从 assistant 设置中提取自定义参数
|
||||
*/
|
||||
export function getCustomParameters(assistant: Assistant): Record<string, any> {
|
||||
return (
|
||||
assistant?.settings?.customParameters?.reduce((acc, param) => {
|
||||
if (!param.name?.trim()) {
|
||||
return acc
|
||||
}
|
||||
if (param.type === 'json') {
|
||||
const value = param.value as string
|
||||
if (value === 'undefined') {
|
||||
return { ...acc, [param.name]: undefined }
|
||||
}
|
||||
try {
|
||||
return { ...acc, [param.name]: JSON.parse(value) }
|
||||
} catch {
|
||||
return { ...acc, [param.name]: value }
|
||||
}
|
||||
}
|
||||
return {
|
||||
...acc,
|
||||
[param.name]: param.value
|
||||
}
|
||||
}, {}) || {}
|
||||
)
|
||||
}
|
||||
@ -37,6 +37,7 @@ import { findFileBlocks, getKnowledgeBaseIds, getMainTextContent } from '@render
|
||||
import { findLast, isEmpty, takeRight } from 'lodash'
|
||||
|
||||
import AiProvider from '../aiCore'
|
||||
import AiProviderNew from '../aiCore/index_new'
|
||||
import {
|
||||
getAssistantProvider,
|
||||
getAssistantSettings,
|
||||
@ -299,7 +300,7 @@ export async function fetchChatCompletion({
|
||||
console.log('fetchChatCompletion', messages, assistant)
|
||||
|
||||
const provider = getAssistantProvider(assistant)
|
||||
const AI = new AiProvider(provider)
|
||||
const AI = new AiProviderNew(provider)
|
||||
|
||||
// Make sure that 'Clear Context' works for all scenarios including external tool and normal chat.
|
||||
messages = filterContextMessages(messages)
|
||||
|
||||
@ -4,7 +4,8 @@
|
||||
"src/renderer/src/**/*",
|
||||
"src/preload/*.d.ts",
|
||||
"local/src/renderer/**/*",
|
||||
"packages/shared/**/*"
|
||||
"packages/shared/**/*",
|
||||
"packages/aiCore/src/**/*"
|
||||
],
|
||||
"compilerOptions": {
|
||||
"composite": true,
|
||||
@ -14,7 +15,8 @@
|
||||
"paths": {
|
||||
"@renderer/*": ["src/renderer/src/*"],
|
||||
"@shared/*": ["packages/shared/*"],
|
||||
"@types": ["src/renderer/src/types/index.ts"]
|
||||
"@types": ["src/renderer/src/types/index.ts"],
|
||||
"@cherry-studio/ai-core": ["packages/aiCore/src/"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
499
yarn.lock
499
yarn.lock
@ -241,7 +241,7 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/openai-compatible@npm:0.2.14":
|
||||
"@ai-sdk/openai-compatible@npm:0.2.14, @ai-sdk/openai-compatible@npm:^0.2.14":
|
||||
version: 0.2.14
|
||||
resolution: "@ai-sdk/openai-compatible@npm:0.2.14"
|
||||
dependencies:
|
||||
@ -311,6 +311,23 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/provider-utils@npm:2.1.5":
|
||||
version: 2.1.5
|
||||
resolution: "@ai-sdk/provider-utils@npm:2.1.5"
|
||||
dependencies:
|
||||
"@ai-sdk/provider": "npm:1.0.6"
|
||||
eventsource-parser: "npm:^3.0.0"
|
||||
nanoid: "npm:^3.3.8"
|
||||
secure-json-parse: "npm:^2.7.0"
|
||||
peerDependencies:
|
||||
zod: ^3.0.0
|
||||
peerDependenciesMeta:
|
||||
zod:
|
||||
optional: true
|
||||
checksum: 10c0/6ec33c1f9cc6bb38a7634cdb1b43f49d78ddcb92a8daa4b65193f3309b5a7ad35b134e2d5e82b8576917079a3f8e3b83b5505151cd259ec747ba27458db58356
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/provider-utils@npm:2.2.8, @ai-sdk/provider-utils@npm:^2.0.0, @ai-sdk/provider-utils@npm:^2.1.6":
|
||||
version: 2.2.8
|
||||
resolution: "@ai-sdk/provider-utils@npm:2.2.8"
|
||||
@ -333,6 +350,15 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/provider@npm:1.0.6":
|
||||
version: 1.0.6
|
||||
resolution: "@ai-sdk/provider@npm:1.0.6"
|
||||
dependencies:
|
||||
json-schema: "npm:^0.4.0"
|
||||
checksum: 10c0/251c8cd4fa53b89dcf751d0faba5482762d88fcc5ffe1cdb660327c14817a4d94206317e95e6e69fc4ae3071001191b5c418b4b9e1212d6a554a90114db216fc
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/provider@npm:1.0.9":
|
||||
version: 1.0.9
|
||||
resolution: "@ai-sdk/provider@npm:1.0.9"
|
||||
@ -684,6 +710,19 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@babel/generator@npm:^7.27.5":
|
||||
version: 7.27.5
|
||||
resolution: "@babel/generator@npm:7.27.5"
|
||||
dependencies:
|
||||
"@babel/parser": "npm:^7.27.5"
|
||||
"@babel/types": "npm:^7.27.3"
|
||||
"@jridgewell/gen-mapping": "npm:^0.3.5"
|
||||
"@jridgewell/trace-mapping": "npm:^0.3.25"
|
||||
jsesc: "npm:^3.0.2"
|
||||
checksum: 10c0/8f649ef4cd81765c832bb11de4d6064b035ffebdecde668ba7abee68a7b0bce5c9feabb5dc5bb8aeba5bd9e5c2afa3899d852d2bd9ca77a711ba8c8379f416f0
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@babel/helper-compilation-targets@npm:^7.26.5":
|
||||
version: 7.27.0
|
||||
resolution: "@babel/helper-compilation-targets@npm:7.27.0"
|
||||
@ -794,6 +833,17 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@babel/parser@npm:^7.27.3, @babel/parser@npm:^7.27.5":
|
||||
version: 7.27.5
|
||||
resolution: "@babel/parser@npm:7.27.5"
|
||||
dependencies:
|
||||
"@babel/types": "npm:^7.27.3"
|
||||
bin:
|
||||
parser: ./bin/babel-parser.js
|
||||
checksum: 10c0/f7faaebf21cc1f25d9ca8ac02c447ed38ef3460ea95be7ea760916dcf529476340d72a5a6010c6641d9ed9d12ad827c8424840277ec2295c5b082ba0f291220a
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@babel/plugin-transform-arrow-functions@npm:^7.25.9":
|
||||
version: 7.25.9
|
||||
resolution: "@babel/plugin-transform-arrow-functions@npm:7.25.9"
|
||||
@ -867,6 +917,16 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@babel/types@npm:^7.27.3, @babel/types@npm:^7.27.6":
|
||||
version: 7.27.6
|
||||
resolution: "@babel/types@npm:7.27.6"
|
||||
dependencies:
|
||||
"@babel/helper-string-parser": "npm:^7.27.1"
|
||||
"@babel/helper-validator-identifier": "npm:^7.27.1"
|
||||
checksum: 10c0/39d556be114f2a6d874ea25ad39826a9e3a0e98de0233ae6d932f6d09a4b222923a90a7274c635ed61f1ba49bbd345329226678800900ad1c8d11afabd573aaf
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@bcoe/v8-coverage@npm:^1.0.2":
|
||||
version: 1.0.2
|
||||
resolution: "@bcoe/v8-coverage@npm:1.0.2"
|
||||
@ -900,7 +960,7 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@cherry-studio/ai-core@workspace:packages/aiCore":
|
||||
"@cherry-studio/ai-core@workspace:*, @cherry-studio/ai-core@workspace:packages/aiCore":
|
||||
version: 0.0.0-use.local
|
||||
resolution: "@cherry-studio/ai-core@workspace:packages/aiCore"
|
||||
dependencies:
|
||||
@ -918,15 +978,18 @@ __metadata:
|
||||
"@ai-sdk/groq": "npm:^1.2.9"
|
||||
"@ai-sdk/mistral": "npm:^1.2.8"
|
||||
"@ai-sdk/openai": "npm:^1.3.22"
|
||||
"@ai-sdk/openai-compatible": "npm:^0.2.14"
|
||||
"@ai-sdk/perplexity": "npm:^1.1.9"
|
||||
"@ai-sdk/replicate": "npm:^0.2.8"
|
||||
"@ai-sdk/togetherai": "npm:^0.2.14"
|
||||
"@ai-sdk/vercel": "npm:^0.0.1"
|
||||
"@ai-sdk/xai": "npm:^1.2.16"
|
||||
"@openrouter/ai-sdk-provider": "npm:^0.1.0"
|
||||
ai: "npm:^4.3.16"
|
||||
anthropic-vertex-ai: "npm:^1.0.2"
|
||||
ollama-ai-provider: "npm:^1.2.0"
|
||||
qwen-ai-provider: "npm:^0.1.0"
|
||||
tsdown: "npm:^0.12.8"
|
||||
typescript: "npm:^5.0.0"
|
||||
zhipu-ai-provider: "npm:^0.1.1"
|
||||
peerDependenciesMeta:
|
||||
@ -1869,6 +1932,34 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@emnapi/core@npm:^1.4.3":
|
||||
version: 1.4.3
|
||||
resolution: "@emnapi/core@npm:1.4.3"
|
||||
dependencies:
|
||||
"@emnapi/wasi-threads": "npm:1.0.2"
|
||||
tslib: "npm:^2.4.0"
|
||||
checksum: 10c0/e30101d16d37ef3283538a35cad60e22095aff2403fb9226a35330b932eb6740b81364d525537a94eb4fb51355e48ae9b10d779c0dd1cdcd55d71461fe4b45c7
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@emnapi/runtime@npm:^1.4.3":
|
||||
version: 1.4.3
|
||||
resolution: "@emnapi/runtime@npm:1.4.3"
|
||||
dependencies:
|
||||
tslib: "npm:^2.4.0"
|
||||
checksum: 10c0/3b7ab72d21cb4e034f07df80165265f85f445ef3f581d1bc87b67e5239428baa00200b68a7d5e37a0425c3a78320b541b07f76c5530f6f6f95336a6294ebf30b
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@emnapi/wasi-threads@npm:1.0.2":
|
||||
version: 1.0.2
|
||||
resolution: "@emnapi/wasi-threads@npm:1.0.2"
|
||||
dependencies:
|
||||
tslib: "npm:^2.4.0"
|
||||
checksum: 10c0/f0621b1fc715221bd2d8332c0ca922617bcd77cdb3050eae50a124eb8923c54fa425d23982dc8f29d505c8798a62d1049bace8b0686098ff9dd82270e06d772e
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@emotion/hash@npm:^0.8.0":
|
||||
version: 0.8.0
|
||||
resolution: "@emotion/hash@npm:0.8.0"
|
||||
@ -3531,6 +3622,17 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@napi-rs/wasm-runtime@npm:^0.2.10":
|
||||
version: 0.2.11
|
||||
resolution: "@napi-rs/wasm-runtime@npm:0.2.11"
|
||||
dependencies:
|
||||
"@emnapi/core": "npm:^1.4.3"
|
||||
"@emnapi/runtime": "npm:^1.4.3"
|
||||
"@tybys/wasm-util": "npm:^0.9.0"
|
||||
checksum: 10c0/049bd14c58b99fbe0967b95e9921c5503df196b59be22948d2155f17652eb305cff6728efd8685338b855da7e476dd2551fbe3a313fc2d810938f0717478441e
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@neon-rs/load@npm:^0.0.4":
|
||||
version: 0.0.4
|
||||
resolution: "@neon-rs/load@npm:0.0.4"
|
||||
@ -3646,6 +3748,18 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@openrouter/ai-sdk-provider@npm:^0.1.0":
|
||||
version: 0.1.0
|
||||
resolution: "@openrouter/ai-sdk-provider@npm:0.1.0"
|
||||
dependencies:
|
||||
"@ai-sdk/provider": "npm:1.0.6"
|
||||
"@ai-sdk/provider-utils": "npm:2.1.5"
|
||||
peerDependencies:
|
||||
zod: ^3.0.0
|
||||
checksum: 10c0/495c0349d6f42adc8e880651c215a183a4f041808019c6b2710df447ff3c81f5e69f951b589c1bb3c0cc1ce16c0d659b624d18f097b3344bce74eb178fb936a1
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@opentelemetry/api@npm:1.9.0":
|
||||
version: 1.9.0
|
||||
resolution: "@opentelemetry/api@npm:1.9.0"
|
||||
@ -3653,6 +3767,20 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@oxc-project/runtime@npm:=0.72.3":
|
||||
version: 0.72.3
|
||||
resolution: "@oxc-project/runtime@npm:0.72.3"
|
||||
checksum: 10c0/35d3be02bbb12252529585483c8fa64bb0e44fb55cf20d0763eb9cc9fe1b9033777125ba768fb7bce8e4776a48f5a175e9ef324575f0932048482c0477799ac8
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@oxc-project/types@npm:=0.72.3":
|
||||
version: 0.72.3
|
||||
resolution: "@oxc-project/types@npm:0.72.3"
|
||||
checksum: 10c0/8c1379671895b3ad3215d13a8194fc19150b16b35ad47b753c25963650055da9f11dade0225d6c522771f038451aff687e9fb9efdea5486ede5880631316c9f7
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@parcel/watcher-android-arm64@npm:2.5.1":
|
||||
version: 2.5.1
|
||||
resolution: "@parcel/watcher-android-arm64@npm:2.5.1"
|
||||
@ -3829,6 +3957,15 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@quansync/fs@npm:^0.1.1":
|
||||
version: 0.1.3
|
||||
resolution: "@quansync/fs@npm:0.1.3"
|
||||
dependencies:
|
||||
quansync: "npm:^0.2.10"
|
||||
checksum: 10c0/15d9914328d296df6626b6b2d5e9f455f618d5c8ffff09270ca3ce42c1bd21e4a91b53d6c1d857fbcae3be8c07b33ab82a83532870f2c5bf74904fe0ac60a3d1
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@rc-component/async-validator@npm:^5.0.3":
|
||||
version: 5.0.4
|
||||
resolution: "@rc-component/async-validator@npm:5.0.4"
|
||||
@ -4037,6 +4174,99 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@rolldown/binding-darwin-arm64@npm:1.0.0-beta.15":
|
||||
version: 1.0.0-beta.15
|
||||
resolution: "@rolldown/binding-darwin-arm64@npm:1.0.0-beta.15"
|
||||
conditions: os=darwin & cpu=arm64
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@rolldown/binding-darwin-x64@npm:1.0.0-beta.15":
|
||||
version: 1.0.0-beta.15
|
||||
resolution: "@rolldown/binding-darwin-x64@npm:1.0.0-beta.15"
|
||||
conditions: os=darwin & cpu=x64
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@rolldown/binding-freebsd-x64@npm:1.0.0-beta.15":
|
||||
version: 1.0.0-beta.15
|
||||
resolution: "@rolldown/binding-freebsd-x64@npm:1.0.0-beta.15"
|
||||
conditions: os=freebsd & cpu=x64
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@rolldown/binding-linux-arm-gnueabihf@npm:1.0.0-beta.15":
|
||||
version: 1.0.0-beta.15
|
||||
resolution: "@rolldown/binding-linux-arm-gnueabihf@npm:1.0.0-beta.15"
|
||||
conditions: os=linux & cpu=arm
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@rolldown/binding-linux-arm64-gnu@npm:1.0.0-beta.15":
|
||||
version: 1.0.0-beta.15
|
||||
resolution: "@rolldown/binding-linux-arm64-gnu@npm:1.0.0-beta.15"
|
||||
conditions: os=linux & cpu=arm64 & libc=glibc
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@rolldown/binding-linux-arm64-musl@npm:1.0.0-beta.15":
|
||||
version: 1.0.0-beta.15
|
||||
resolution: "@rolldown/binding-linux-arm64-musl@npm:1.0.0-beta.15"
|
||||
conditions: os=linux & cpu=arm64 & libc=musl
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@rolldown/binding-linux-x64-gnu@npm:1.0.0-beta.15":
|
||||
version: 1.0.0-beta.15
|
||||
resolution: "@rolldown/binding-linux-x64-gnu@npm:1.0.0-beta.15"
|
||||
conditions: os=linux & cpu=x64 & libc=glibc
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@rolldown/binding-linux-x64-musl@npm:1.0.0-beta.15":
|
||||
version: 1.0.0-beta.15
|
||||
resolution: "@rolldown/binding-linux-x64-musl@npm:1.0.0-beta.15"
|
||||
conditions: os=linux & cpu=x64 & libc=musl
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@rolldown/binding-wasm32-wasi@npm:1.0.0-beta.15":
|
||||
version: 1.0.0-beta.15
|
||||
resolution: "@rolldown/binding-wasm32-wasi@npm:1.0.0-beta.15"
|
||||
dependencies:
|
||||
"@napi-rs/wasm-runtime": "npm:^0.2.10"
|
||||
conditions: cpu=wasm32
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@rolldown/binding-win32-arm64-msvc@npm:1.0.0-beta.15":
|
||||
version: 1.0.0-beta.15
|
||||
resolution: "@rolldown/binding-win32-arm64-msvc@npm:1.0.0-beta.15"
|
||||
conditions: os=win32 & cpu=arm64
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@rolldown/binding-win32-ia32-msvc@npm:1.0.0-beta.15":
|
||||
version: 1.0.0-beta.15
|
||||
resolution: "@rolldown/binding-win32-ia32-msvc@npm:1.0.0-beta.15"
|
||||
conditions: os=win32 & cpu=ia32
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@rolldown/binding-win32-x64-msvc@npm:1.0.0-beta.15":
|
||||
version: 1.0.0-beta.15
|
||||
resolution: "@rolldown/binding-win32-x64-msvc@npm:1.0.0-beta.15"
|
||||
conditions: os=win32 & cpu=x64
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@rolldown/pluginutils@npm:1.0.0-beta.15":
|
||||
version: 1.0.0-beta.15
|
||||
resolution: "@rolldown/pluginutils@npm:1.0.0-beta.15"
|
||||
checksum: 10c0/9e6dad8d82a34db600ef78f8da243e9f00f2b873afef9e071fd2bc1a26d9eb5cb9da474f211c42caabb8f6fcaf9bfbecc7e69af113aa84419967f012eb3228f3
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@rollup/rollup-android-arm-eabi@npm:4.40.0":
|
||||
version: 4.40.0
|
||||
resolution: "@rollup/rollup-android-arm-eabi@npm:4.40.0"
|
||||
@ -4651,6 +4881,15 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@tybys/wasm-util@npm:^0.9.0":
|
||||
version: 0.9.0
|
||||
resolution: "@tybys/wasm-util@npm:0.9.0"
|
||||
dependencies:
|
||||
tslib: "npm:^2.4.0"
|
||||
checksum: 10c0/f9fde5c554455019f33af6c8215f1a1435028803dc2a2825b077d812bed4209a1a64444a4ca0ce2ea7e1175c8d88e2f9173a36a33c199e8a5c671aa31de8242d
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@types/aria-query@npm:^5.0.1":
|
||||
version: 5.0.4
|
||||
resolution: "@types/aria-query@npm:5.0.4"
|
||||
@ -6139,6 +6378,7 @@ __metadata:
|
||||
"@agentic/tavily": "npm:^7.3.3"
|
||||
"@ant-design/v5-patch-for-react-19": "npm:^1.0.3"
|
||||
"@anthropic-ai/sdk": "npm:^0.41.0"
|
||||
"@cherry-studio/ai-core": "workspace:*"
|
||||
"@cherrystudio/embedjs": "npm:^0.1.31"
|
||||
"@cherrystudio/embedjs-libsql": "npm:^0.1.31"
|
||||
"@cherrystudio/embedjs-loader-csv": "npm:^0.1.31"
|
||||
@ -6508,6 +6748,13 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"ansis@npm:^4.0.0, ansis@npm:^4.1.0":
|
||||
version: 4.1.0
|
||||
resolution: "ansis@npm:4.1.0"
|
||||
checksum: 10c0/df62d017a7791babdaf45b93f930d2cfd6d1dab5568b610735c11434c9a5ef8f513740e7cfd80bcbc3530fc8bd892b88f8476f26621efc251230e53cbd1a2c24
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"antd@npm:^5.22.5":
|
||||
version: 5.24.7
|
||||
resolution: "antd@npm:5.24.7"
|
||||
@ -6769,6 +7016,16 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"ast-kit@npm:^2.1.0":
|
||||
version: 2.1.0
|
||||
resolution: "ast-kit@npm:2.1.0"
|
||||
dependencies:
|
||||
"@babel/parser": "npm:^7.27.3"
|
||||
pathe: "npm:^2.0.3"
|
||||
checksum: 10c0/67246f34745f40b6a5bee2467a1a00f7f006a051f80d7cda7e3b7fe5f7d7a1f262521b72643fcbffb21d33f36aa59868636229a769b2802355d687815ad7b13d
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"ast-types@npm:^0.13.4":
|
||||
version: 0.13.4
|
||||
resolution: "ast-types@npm:0.13.4"
|
||||
@ -6924,6 +7181,13 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"birpc@npm:^2.3.0":
|
||||
version: 2.4.0
|
||||
resolution: "birpc@npm:2.4.0"
|
||||
checksum: 10c0/6ecda217b540189221913f215055baf4f10f264a1a8f0000ef6db3ecb0ccc5e4fde135b5f0719c389f1a593e64af3041404019711225ab31badf23c2a98d7778
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"bl@npm:^1.0.0":
|
||||
version: 1.2.3
|
||||
resolution: "bl@npm:1.2.3"
|
||||
@ -7435,7 +7699,7 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"chokidar@npm:^4.0.0":
|
||||
"chokidar@npm:^4.0.0, chokidar@npm:^4.0.3":
|
||||
version: 4.0.3
|
||||
resolution: "chokidar@npm:4.0.3"
|
||||
dependencies:
|
||||
@ -8472,6 +8736,18 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"debug@npm:^4.4.1":
|
||||
version: 4.4.1
|
||||
resolution: "debug@npm:4.4.1"
|
||||
dependencies:
|
||||
ms: "npm:^2.1.3"
|
||||
peerDependenciesMeta:
|
||||
supports-color:
|
||||
optional: true
|
||||
checksum: 10c0/d2b44bc1afd912b49bb7ebb0d50a860dc93a4dd7d946e8de94abc957bb63726b7dd5aa48c18c2386c379ec024c46692e15ed3ed97d481729f929201e671fcd55
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"decamelize@npm:1.2.0":
|
||||
version: 1.2.0
|
||||
resolution: "decamelize@npm:1.2.0"
|
||||
@ -8661,6 +8937,13 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"defu@npm:^6.1.4":
|
||||
version: 6.1.4
|
||||
resolution: "defu@npm:6.1.4"
|
||||
checksum: 10c0/2d6cc366262dc0cb8096e429368e44052fdf43ed48e53ad84cc7c9407f890301aa5fcb80d0995abaaf842b3949f154d060be4160f7a46cb2bc2f7726c81526f5
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"degenerator@npm:^5.0.0":
|
||||
version: 5.0.1
|
||||
resolution: "degenerator@npm:5.0.1"
|
||||
@ -8787,6 +9070,13 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"diff@npm:^8.0.2":
|
||||
version: 8.0.2
|
||||
resolution: "diff@npm:8.0.2"
|
||||
checksum: 10c0/abfb387f033e089df3ec3be960205d17b54df8abf0924d982a7ced3a94c557a4e6cbff2e78b121f216b85f466b3d8d041673a386177c311aaea41459286cc9bc
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"dingbat-to-unicode@npm:^1.0.1":
|
||||
version: 1.0.1
|
||||
resolution: "dingbat-to-unicode@npm:1.0.1"
|
||||
@ -8964,6 +9254,18 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"dts-resolver@npm:^2.1.1":
|
||||
version: 2.1.1
|
||||
resolution: "dts-resolver@npm:2.1.1"
|
||||
peerDependencies:
|
||||
oxc-resolver: ">=11.0.0"
|
||||
peerDependenciesMeta:
|
||||
oxc-resolver:
|
||||
optional: true
|
||||
checksum: 10c0/bc36d71822d39f23cfe274b6781fae4b1729bd8b0a07e4a011fe243a73c5dbbb30ea067fb0d6248fdfedc29cf4dfc0ff19f0dd38950158444409d109c1c55b7e
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"duck@npm:^0.1.12":
|
||||
version: 0.1.12
|
||||
resolution: "duck@npm:0.1.12"
|
||||
@ -9187,6 +9489,13 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"empathic@npm:^1.1.0":
|
||||
version: 1.1.0
|
||||
resolution: "empathic@npm:1.1.0"
|
||||
checksum: 10c0/ed906c4ad6dabe1477ed00d6420f79eff8ac72e2eb580aab42406f50160fd34d66e8381e92b405e96d75a826a840706af261fd397c3e7db4d1a293d23e2e72f7
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"encodeurl@npm:^2.0.0":
|
||||
version: 2.0.0
|
||||
resolution: "encodeurl@npm:2.0.0"
|
||||
@ -10752,6 +11061,15 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"get-tsconfig@npm:^4.10.1":
|
||||
version: 4.10.1
|
||||
resolution: "get-tsconfig@npm:4.10.1"
|
||||
dependencies:
|
||||
resolve-pkg-maps: "npm:^1.0.0"
|
||||
checksum: 10c0/7f8e3dabc6a49b747920a800fb88e1952fef871cdf51b79e98db48275a5de6cdaf499c55ee67df5fa6fe7ce65f0063e26de0f2e53049b408c585aa74d39ffa21
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"get-uri@npm:^6.0.1":
|
||||
version: 6.0.4
|
||||
resolution: "get-uri@npm:6.0.4"
|
||||
@ -11230,6 +11548,13 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"hookable@npm:^5.5.3":
|
||||
version: 5.5.3
|
||||
resolution: "hookable@npm:5.5.3"
|
||||
checksum: 10c0/275f4cc84d27f8d48c5a5cd5685b6c0fea9291be9deea5bff0cfa72856ed566abde1dcd8cb1da0f9a70b4da3d7ec0d60dc3554c4edbba647058cc38816eced3d
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"hosted-git-info@npm:^4.1.0":
|
||||
version: 4.1.0
|
||||
resolution: "hosted-git-info@npm:4.1.0"
|
||||
@ -11963,6 +12288,15 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"jiti@npm:^2.4.2":
|
||||
version: 2.4.2
|
||||
resolution: "jiti@npm:2.4.2"
|
||||
bin:
|
||||
jiti: lib/jiti-cli.mjs
|
||||
checksum: 10c0/4ceac133a08c8faff7eac84aabb917e85e8257f5ad659e843004ce76e981c457c390a220881748ac67ba1b940b9b729b30fb85cbaf6e7989f04b6002c94da331
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"js-base64@npm:^3.7.5":
|
||||
version: 3.7.7
|
||||
resolution: "js-base64@npm:3.7.7"
|
||||
@ -15585,7 +15919,7 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"quansync@npm:^0.2.8":
|
||||
"quansync@npm:^0.2.10, quansync@npm:^0.2.8":
|
||||
version: 0.2.10
|
||||
resolution: "quansync@npm:0.2.10"
|
||||
checksum: 10c0/f86f1d644f812a3a7c42de79eb401c47a5a67af82a9adff8a8afb159325e03e00f77cebbf42af6340a0bd47bd0c1fbe999e7caf7e1bbb30d7acb00c8729b7530
|
||||
@ -16794,6 +17128,13 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"resolve-pkg-maps@npm:^1.0.0":
|
||||
version: 1.0.0
|
||||
resolution: "resolve-pkg-maps@npm:1.0.0"
|
||||
checksum: 10c0/fb8f7bbe2ca281a73b7ef423a1cbc786fb244bd7a95cbe5c3fba25b27d327150beca8ba02f622baea65919a57e061eb5005204daa5f93ed590d9b77463a567ab
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"responselike@npm:^2.0.0":
|
||||
version: 2.0.1
|
||||
resolution: "responselike@npm:2.0.1"
|
||||
@ -16903,6 +17244,85 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"rolldown-plugin-dts@npm:^0.13.11":
|
||||
version: 0.13.11
|
||||
resolution: "rolldown-plugin-dts@npm:0.13.11"
|
||||
dependencies:
|
||||
"@babel/generator": "npm:^7.27.5"
|
||||
"@babel/parser": "npm:^7.27.5"
|
||||
"@babel/types": "npm:^7.27.6"
|
||||
ast-kit: "npm:^2.1.0"
|
||||
birpc: "npm:^2.3.0"
|
||||
debug: "npm:^4.4.1"
|
||||
dts-resolver: "npm:^2.1.1"
|
||||
get-tsconfig: "npm:^4.10.1"
|
||||
peerDependencies:
|
||||
"@typescript/native-preview": ">=7.0.0-dev.20250601.1"
|
||||
rolldown: ^1.0.0-beta.9
|
||||
typescript: ^5.0.0
|
||||
vue-tsc: ~2.2.0
|
||||
peerDependenciesMeta:
|
||||
"@typescript/native-preview":
|
||||
optional: true
|
||||
typescript:
|
||||
optional: true
|
||||
vue-tsc:
|
||||
optional: true
|
||||
checksum: 10c0/026cda47fd0b4d79e51e5dad0aca02749092ab006ac9317d153a42620cfd7f613ed075da3828aa37f49bec585c7871b5c997ed699385b9f326314004bf5c71e0
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"rolldown@npm:1.0.0-beta.15":
|
||||
version: 1.0.0-beta.15
|
||||
resolution: "rolldown@npm:1.0.0-beta.15"
|
||||
dependencies:
|
||||
"@oxc-project/runtime": "npm:=0.72.3"
|
||||
"@oxc-project/types": "npm:=0.72.3"
|
||||
"@rolldown/binding-darwin-arm64": "npm:1.0.0-beta.15"
|
||||
"@rolldown/binding-darwin-x64": "npm:1.0.0-beta.15"
|
||||
"@rolldown/binding-freebsd-x64": "npm:1.0.0-beta.15"
|
||||
"@rolldown/binding-linux-arm-gnueabihf": "npm:1.0.0-beta.15"
|
||||
"@rolldown/binding-linux-arm64-gnu": "npm:1.0.0-beta.15"
|
||||
"@rolldown/binding-linux-arm64-musl": "npm:1.0.0-beta.15"
|
||||
"@rolldown/binding-linux-x64-gnu": "npm:1.0.0-beta.15"
|
||||
"@rolldown/binding-linux-x64-musl": "npm:1.0.0-beta.15"
|
||||
"@rolldown/binding-wasm32-wasi": "npm:1.0.0-beta.15"
|
||||
"@rolldown/binding-win32-arm64-msvc": "npm:1.0.0-beta.15"
|
||||
"@rolldown/binding-win32-ia32-msvc": "npm:1.0.0-beta.15"
|
||||
"@rolldown/binding-win32-x64-msvc": "npm:1.0.0-beta.15"
|
||||
"@rolldown/pluginutils": "npm:1.0.0-beta.15"
|
||||
ansis: "npm:^4.0.0"
|
||||
dependenciesMeta:
|
||||
"@rolldown/binding-darwin-arm64":
|
||||
optional: true
|
||||
"@rolldown/binding-darwin-x64":
|
||||
optional: true
|
||||
"@rolldown/binding-freebsd-x64":
|
||||
optional: true
|
||||
"@rolldown/binding-linux-arm-gnueabihf":
|
||||
optional: true
|
||||
"@rolldown/binding-linux-arm64-gnu":
|
||||
optional: true
|
||||
"@rolldown/binding-linux-arm64-musl":
|
||||
optional: true
|
||||
"@rolldown/binding-linux-x64-gnu":
|
||||
optional: true
|
||||
"@rolldown/binding-linux-x64-musl":
|
||||
optional: true
|
||||
"@rolldown/binding-wasm32-wasi":
|
||||
optional: true
|
||||
"@rolldown/binding-win32-arm64-msvc":
|
||||
optional: true
|
||||
"@rolldown/binding-win32-ia32-msvc":
|
||||
optional: true
|
||||
"@rolldown/binding-win32-x64-msvc":
|
||||
optional: true
|
||||
bin:
|
||||
rolldown: bin/cli.mjs
|
||||
checksum: 10c0/de2ea888c85ce458232707b75c9e8982ee128c497d0e5c992b4085c57bdd36bcc73d04f3fba3186ab70274288d6914c94f280ab380ce0b4347610b51b82e25e8
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"rollup-plugin-visualizer@npm:^5.12.0":
|
||||
version: 5.14.0
|
||||
resolution: "rollup-plugin-visualizer@npm:5.14.0"
|
||||
@ -17199,6 +17619,15 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"semver@npm:^7.7.2":
|
||||
version: 7.7.2
|
||||
resolution: "semver@npm:7.7.2"
|
||||
bin:
|
||||
semver: bin/semver.js
|
||||
checksum: 10c0/aca305edfbf2383c22571cb7714f48cadc7ac95371b4b52362fb8eeffdfbc0de0669368b82b2b15978f8848f01d7114da65697e56cd8c37b0dab8c58e543f9ea
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"send@npm:^1.1.0, send@npm:^1.2.0":
|
||||
version: 1.2.0
|
||||
resolution: "send@npm:1.2.0"
|
||||
@ -18163,6 +18592,16 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"tinyglobby@npm:^0.2.14":
|
||||
version: 0.2.14
|
||||
resolution: "tinyglobby@npm:0.2.14"
|
||||
dependencies:
|
||||
fdir: "npm:^6.4.4"
|
||||
picomatch: "npm:^4.0.2"
|
||||
checksum: 10c0/f789ed6c924287a9b7d3612056ed0cda67306cd2c80c249fd280cf1504742b12583a2089b61f4abbd24605f390809017240e250241f09938054c9b363e51c0a6
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"tinypool@npm:^1.0.2":
|
||||
version: 1.0.2
|
||||
resolution: "tinypool@npm:1.0.2"
|
||||
@ -18381,6 +18820,46 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"tsdown@npm:^0.12.8":
|
||||
version: 0.12.8
|
||||
resolution: "tsdown@npm:0.12.8"
|
||||
dependencies:
|
||||
ansis: "npm:^4.1.0"
|
||||
cac: "npm:^6.7.14"
|
||||
chokidar: "npm:^4.0.3"
|
||||
debug: "npm:^4.4.1"
|
||||
diff: "npm:^8.0.2"
|
||||
empathic: "npm:^1.1.0"
|
||||
hookable: "npm:^5.5.3"
|
||||
rolldown: "npm:1.0.0-beta.15"
|
||||
rolldown-plugin-dts: "npm:^0.13.11"
|
||||
semver: "npm:^7.7.2"
|
||||
tinyexec: "npm:^1.0.1"
|
||||
tinyglobby: "npm:^0.2.14"
|
||||
unconfig: "npm:^7.3.2"
|
||||
peerDependencies:
|
||||
"@arethetypeswrong/core": ^0.18.1
|
||||
publint: ^0.3.0
|
||||
typescript: ^5.0.0
|
||||
unplugin-lightningcss: ^0.4.0
|
||||
unplugin-unused: ^0.5.0
|
||||
peerDependenciesMeta:
|
||||
"@arethetypeswrong/core":
|
||||
optional: true
|
||||
publint:
|
||||
optional: true
|
||||
typescript:
|
||||
optional: true
|
||||
unplugin-lightningcss:
|
||||
optional: true
|
||||
unplugin-unused:
|
||||
optional: true
|
||||
bin:
|
||||
tsdown: dist/run.mjs
|
||||
checksum: 10c0/8b824da2cdabbd9a783ac9d5d71a70d17ee055b614cf95070180b061f4b4d422c66795c6e871da67aa0ce15355d82dc97cb86f126d38f8af5c088959107a8fc9
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"tslib@npm:2.6.2":
|
||||
version: 2.6.2
|
||||
resolution: "tslib@npm:2.6.2"
|
||||
@ -18512,6 +18991,18 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"unconfig@npm:^7.3.2":
|
||||
version: 7.3.2
|
||||
resolution: "unconfig@npm:7.3.2"
|
||||
dependencies:
|
||||
"@quansync/fs": "npm:^0.1.1"
|
||||
defu: "npm:^6.1.4"
|
||||
jiti: "npm:^2.4.2"
|
||||
quansync: "npm:^0.2.8"
|
||||
checksum: 10c0/245a0add92413b9a04a0bad879c7ee4d6904e58c9d091dbb1ea89fb7491d22d0f2ad17bd561329e006cb1954b5ece00f4cd9f9300a72af5013a927dc7fd5d27b
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"underscore@npm:^1.13.1":
|
||||
version: 1.13.7
|
||||
resolution: "underscore@npm:1.13.7"
|
||||
|
||||
Loading…
Reference in New Issue
Block a user