refactor: restructure AI Core architecture and enhance client functionality

- Updated the AI Core documentation to reflect the new architecture and design principles, emphasizing modularity and type safety.
- Refactored the client structure by removing obsolete files and consolidating client creation logic into a more streamlined format.
- Introduced a new core module for managing execution and middleware, improving the overall organization of the codebase.
- Enhanced the orchestration layer to provide a clearer API for users, integrating the creation and execution processes more effectively.
- Added comprehensive type definitions and utility functions for better type safety and usability across the SDK.
This commit is contained in:
MyPrototypeWhat 2025-06-23 19:51:40 +08:00
parent f08c444ffb
commit 2a588fdab2
39 changed files with 1538 additions and 1465 deletions

View File

@ -1,13 +1,14 @@
# Cherry Studio AI Core 基于 Vercel AI SDK 的技术架构
# AI Core 基于 Vercel AI SDK 的技术架构
## 1. 架构设计理念
### 1.1 设计目标
- **分层架构**orchestration编排层→ core核心层职责分离
- **统一接口**:使用 Vercel AI SDK 统一不同 AI Provider 的接口差异
- **动态导入**:通过动态导入实现按需加载,减少打包体积
- **最小包装**:直接使用 AI SDK 的类型和接口,避免重复定义
- **插件系统**:基于钩子的插件架构,支持请求全生命周期扩展
- **插件系统**:基于钩子的通用插件架构,支持请求全生命周期扩展
- **类型安全**:利用 TypeScript 和 AI SDK 的类型系统确保类型安全
- **轻量级**:专注核心功能,保持包的轻量和高效
- **包级独立**:作为独立包管理,便于复用和维护
@ -15,30 +16,57 @@
### 1.2 核心优势
- **标准化**AI SDK 提供统一的模型接口,减少适配工作
- **简化维护**:废弃复杂的 XxxApiClient统一为工厂函数模式
- **分层设计**:清晰的职责分离,便于维护和扩展
- **更好的开发体验**:完整的 TypeScript 支持和丰富的生态系统
- **性能优化**AI SDK 内置优化和最佳实践
- **模块化设计**:独立包结构,支持跨项目复用
- **可扩展插件**基于钩子的插件系统,支持灵活的功能扩展和流转换
- **可扩展插件**通用的流转换和参数处理插件系统
## 2. 整体架构图
```mermaid
graph TD
subgraph "Cherry Studio 主应用"
subgraph "用户应用 (如 Cherry Studio)"
UI["用户界面"]
Components["React 组件"]
Components["应用组件"]
end
subgraph "packages/aiCore (AI Core 包)"
ApiClientFactory["ApiClientFactory (工厂类)"]
UniversalClient["UniversalAiSdkClient (统一客户端)"]
ProviderRegistry["Provider 注册表"]
PluginManager["插件管理器"]
end
subgraph "Orchestration Layer (编排层)"
OrchAPI["api.ts (用户API)"]
OrchTypes["types.ts (编排类型)"]
end
subgraph "动态导入层"
DynamicImport["动态导入"]
subgraph "Core Layer (核心层)"
subgraph "Creation (创建层)"
ConfigManager["ConfigManager (配置管理)"]
ModelCreator["ModelCreator (模型创建)"]
ProviderCreator["ProviderCreator (提供商创建)"]
end
subgraph "Execution (执行层)"
AiExecutor["AiExecutor (执行器)"]
end
subgraph "Clients (客户端层)"
PluginClient["PluginEnabledAiClient (插件客户端)"]
end
subgraph "Middleware (中间件)"
MiddlewareManager["MiddlewareManager (中间件管理)"]
ModelWrapper["ModelWrapper (模型包装)"]
end
subgraph "Plugins (插件)"
PluginManager["PluginManager (插件管理)"]
StreamTransforms["Stream Transforms (流转换)"]
end
subgraph "Providers (提供商)"
Registry["Provider Registry (注册表)"]
Factory["Provider Factory (工厂)"]
end
end
end
subgraph "Vercel AI SDK"
@ -50,61 +78,78 @@ graph TD
Others["其他 19+ Providers"]
end
subgraph "插件生态"
FirstHooks["First Hooks (resolveModel, loadTemplate)"]
SequentialHooks["Sequential Hooks (transformParams, transformResult)"]
ParallelHooks["Parallel Hooks (onRequestStart, onRequestEnd, onError)"]
StreamHooks["Stream Hooks (transformStream)"]
end
UI --> OrchAPI
Components --> OrchAPI
OrchAPI --> AiExecutor
AiExecutor --> PluginClient
PluginClient --> PluginManager
PluginClient --> ConfigManager
ConfigManager --> ModelCreator
ModelCreator --> ProviderCreator
ModelCreator --> MiddlewareManager
MiddlewareManager --> ModelWrapper
ProviderCreator --> Registry
Registry --> Factory
Factory --> OpenAI
Factory --> Anthropic
Factory --> Google
Factory --> XAI
Factory --> Others
UI --> ApiClientFactory
Components --> ApiClientFactory
ApiClientFactory --> UniversalClient
UniversalClient --> PluginManager
PluginManager --> ProviderRegistry
ProviderRegistry --> DynamicImport
DynamicImport --> OpenAI
DynamicImport --> Anthropic
DynamicImport --> Google
DynamicImport --> XAI
DynamicImport --> Others
UniversalClient --> AICore
PluginClient --> AICore
AICore --> streamText
AICore --> generateText
AICore --> streamObject
AICore --> generateObject
PluginManager --> FirstHooks
PluginManager --> SequentialHooks
PluginManager --> ParallelHooks
PluginManager --> StreamHooks
PluginManager --> StreamTransforms
```
## 3. 包结构设计
### 3.1 包级文件结构(当前简化版 + 规划)
### 3.1 当前架构文件结构
```
packages/aiCore/
├── src/
│ ├── providers/
│ │ ├── registry.ts # Provider 注册表 ✅
│ │ └── types.ts # 核心类型定义 ✅
│ ├── clients/
│ │ ├── UniversalAiSdkClient.ts # 统一AI SDK客户端 ✅
│ │ └── ApiClientFactory.ts # 客户端工厂 ✅
│ ├── middleware/ # 插件系统 ✅
│ │ ├── types.ts # 插件类型定义 ✅
│ │ ├── manager.ts # 插件管理器 ✅
│ │ ├── examples/ # 示例插件 ✅
│ │ │ ├── example-plugins.ts # 示例插件实现 ✅
│ │ │ └── example-usage.ts # 使用示例 ✅
│ │ ├── README.md # 插件系统文档 ✅
│ │ └── index.ts # 插件模块入口 ✅
│ ├── services/ # 高级服务 (规划中)
│ │ ├── AiCoreService.ts # 统一服务入口
│ │ ├── CompletionsService.ts # 文本生成服务
│ │ ├── EmbeddingService.ts # 嵌入服务
│ │ └── ImageService.ts # 图像生成服务
│ ├── orchestration/ # 编排层 - 用户面向接口
│ │ ├── api.ts # 主要API函数 ✅
│ │ ├── types.ts # 编排类型定义 ✅
│ │ └── index.ts # 编排层导出 ✅
│ ├── core/ # 核心层 - 内部实现
│ │ ├── creation/ # 创建层
│ │ │ ├── types.ts # 创建类型定义 ✅
│ │ │ ├── ConfigManager.ts # 配置管理器 ✅
│ │ │ ├── ModelCreator.ts # 模型创建器 ✅
│ │ │ ├── ProviderCreator.ts # 提供商创建器 ✅
│ │ │ └── index.ts # 创建层导出 ✅
│ │ ├── execution/ # 执行层
│ │ │ ├── types.ts # 执行类型定义 ✅
│ │ │ ├── AiExecutor.ts # AI执行器 ✅
│ │ │ └── index.ts # 执行层导出 ✅
│ │ ├── clients/ # 客户端层
│ │ │ ├── PluginEnabledAiClient.ts # 插件客户端 ✅
│ │ │ └── index.ts # 客户端导出 ✅
│ │ ├── middleware/ # 中间件系统
│ │ │ ├── types.ts # 中间件类型 ✅
│ │ │ ├── MiddlewareManager.ts # 中间件管理器 ✅
│ │ │ ├── ModelWrapper.ts # 模型包装器 ✅
│ │ │ └── index.ts # 中间件导出 ✅
│ │ ├── plugins/ # 插件系统
│ │ │ ├── types.ts # 插件类型定义 ✅
│ │ │ ├── manager.ts # 插件管理器 ✅
│ │ │ ├── examples/ # 示例插件 ✅
│ │ │ │ ├── example-plugins.ts
│ │ │ │ └── example-usage.ts
│ │ │ ├── README.md # 插件文档 ✅
│ │ │ └── index.ts # 插件导出 ✅
│ │ ├── providers/ # 提供商管理
│ │ │ ├── registry.ts # 提供商注册表 ✅
│ │ │ ├── factory.ts # 提供商工厂 ✅
│ │ │ ├── types.ts # 提供商类型 ✅
│ │ │ └── utils.ts # 工具函数 ✅
│ │ └── index.ts # 核心层导出 ✅
│ ├── types.ts # 全局类型定义 ✅
│ └── index.ts # 包主入口文件 ✅
├── package.json # 包配置文件 ✅
├── tsconfig.json # TypeScript 配置 ✅
@ -112,511 +157,348 @@ packages/aiCore/
└── AI_SDK_ARCHITECTURE.md # 本文档 ✅
```
**图例:**
## 4. 架构分层详解
- ✅ 已实现
- 规划中:设计完成,待实现
### 4.1 Orchestration Layer (编排层)
## 4. 核心组件详解
**职责**面向用户的主要API接口提供简洁的使用体验
### 4.1 Provider 注册表 (`providers/registry.ts`)
**核心文件**
统一管理所有 AI Provider 的注册和动态导入。
- `api.ts`: 主要API函数 (`streamText`, `generateText`, `streamObject`, `generateObject`)
- `types.ts`: 编排配置类型定义
**主要功能:**
**设计特点**
- 动态导入 AI SDK providers
- 提供统一的 Provider 创建接口
- 支持 19+ 官方 AI SDK providers
- 类型安全的 Provider 配置
- 支持两种使用方式配置模式和直接AI SDK模式
- 统一的函数重载设计
- 自动处理执行器创建和调用
**核心 API**
**核心API**
```typescript
export interface ProviderConfig {
id: string
name: string
import: () => Promise<any>
creatorFunctionName: string
}
// 配置模式 - 推荐使用
export async function streamText<T extends ProviderId>(
config: OrchestrationConfig<T>,
modelId: string,
params: StreamTextParams
): Promise<ReturnType<typeof aiStreamText>>
export class AiProviderRegistry {
getProvider(id: string): ProviderConfig | undefined
getAllProviders(): ProviderConfig[]
isSupported(id: string): boolean
registerProvider(config: ProviderConfig): void
// 直接AI SDK模式 - 兼容原生使用
export async function streamText(params: Parameters<typeof aiStreamText>[0]): Promise<ReturnType<typeof aiStreamText>>
```
### 4.2 Core Layer (核心层)
#### 4.2.1 Creation Layer (创建层)
**职责**:负责配置解析、模型创建和提供商管理
**核心组件**
- `ConfigManager`: 配置解析和中间件收集
- `ModelCreator`: 高级模型创建逻辑
- `ProviderCreator`: 底层提供商导入和模型创建
**关键功能**
```typescript
// 配置管理
export function resolveConfig(
providerId: ProviderId,
modelId: string,
userOptions: ProviderSettingsMap[ProviderId],
plugins: AiPlugin[] = []
): ResolvedConfig
// 模型创建
export async function createModel(
providerId: ProviderId,
modelId: string,
userOptions: ProviderSettingsMap[ProviderId],
plugins: AiPlugin[] = []
): Promise<LanguageModel>
```
#### 4.2.2 Execution Layer (执行层)
**职责**AI执行引擎封装插件处理逻辑
**核心组件**
- `AiExecutor`: 主要执行器类
**设计特点**
- 构造时确定插件配置,运行时不可变更
- 内部使用 `PluginEnabledAiClient` 处理插件
- 提供类型安全的API接口
**核心API**
```typescript
export class AiExecutor<T extends ProviderId = any> {
static create<T extends ProviderId>(
providerId: T,
options: ProviderSettingsMap[T],
plugins: AiPlugin[] = []
): AiExecutor<T>
async streamText(modelId: string, params: StreamTextParams): Promise<StreamTextResult<any>>
async generateText(modelId: string, params: GenerateTextParams): Promise<GenerateTextResult<any>>
async streamObject(modelId: string, params: StreamObjectParams): Promise<StreamObjectResult<any>>
async generateObject(modelId: string, params: GenerateObjectParams): Promise<GenerateObjectResult<any>>
}
```
**支持的 Providers**
#### 4.2.3 Clients Layer (客户端层)
**职责**插件处理连接插件系统和AI SDK
**核心组件**
- `PluginEnabledAiClient`: 处理插件执行和AI SDK调用
**设计特点**
- 使用 core/creation 层创建模型
- 区分 streaming 和 non-streaming 插件处理
- 支持 `streamText` 的流转换和其他方法的常规插件处理
#### 4.2.4 Middleware Layer (中间件层)
**职责**AI SDK原生中间件支持
**核心组件**
- `MiddlewareManager`: 中间件管理 (函数式)
- `ModelWrapper`: 模型包装器 (函数式)
**设计哲学**
- 使用函数而非类,简化设计
- 直接使用AI SDK的 `wrapLanguageModel`
- 与插件系统分离,职责明确
#### 4.2.5 Plugins Layer (插件层)
**职责**:特定的插件功能
**核心组件**
- `PluginManager`: 插件管理器
- 流转换收集:`collectStreamTransforms`
**设计特点**
- 支持流转换 (`experimental_transform`)
- 与AI SDK中间件分离
- 专注于特定需求
#### 4.2.6 Providers Layer (提供商层)
**职责**AI Provider注册表和动态导入
**核心组件**
- `registry.ts`: 19+ Provider配置和类型
- `factory.ts`: Provider配置工厂
**支持的Providers**
- OpenAI, Anthropic, Google, XAI
- Azure OpenAI, Amazon Bedrock, Google Vertex
- Groq, Together.ai, Fireworks, DeepSeek
- Cerebras, DeepInfra, Replicate, Perplexity
- Cohere, Fal AI, Vercel (19+ providers)
### 4.2 统一AI SDK客户端 (`clients/UniversalAiSdkClient.ts`)
将不同 AI providers 包装为统一接口。
**主要功能:**
- 异步初始化和动态加载
- 统一的 stream() 和 generate() 方法
- 直接使用 AI SDK 的 streamText() 和 generateText()
- 配置验证和错误处理
**核心 API**
```typescript
export class UniversalAiSdkClient {
async initialize(): Promise<void>
isInitialized(): boolean
async stream(request: any): Promise<any>
async generate(request: any): Promise<any>
validateConfig(): boolean
getProviderInfo(): { id: string; name: string; isInitialized: boolean }
}
```
### 4.3 客户端工厂 (`clients/ApiClientFactory.ts`)
统一创建和管理 AI SDK 客户端。
**主要功能:**
- 统一的客户端创建接口
- 智能缓存和复用机制
- 批量创建和健康检查
- 错误处理和重试
**核心 API**
```typescript
export class ApiClientFactory {
static async createAiSdkClient(providerId: string, options: any): Promise<UniversalAiSdkClient>
static getCachedClient(providerId: string, options: any): UniversalAiSdkClient | undefined
static clearCache(): void
static async healthCheck(): Promise<HealthCheckResult>
static getSupportedProviders(): ProviderInfo[]
}
```
### 4.4 钩子风格插件系统 ✅
基于钩子机制的插件架构设计,提供灵活的扩展系统。
**钩子类型:**
1. **First Hooks**:执行到第一个有效结果就停止
2. **Sequential Hooks**:按序链式执行,可变换数据
3. **Parallel Hooks**:并发执行,用于副作用
4. **Stream Hooks**:流转换,直接传递给 AI SDK
**优先级系统:**
- `pre`:前置处理(-100 到 -1
- `normal`标准处理0 到 99
- `post`后置处理100 到 199
**核心钩子:**
**First Hooks (第一个有效结果)**
- `resolveModel`:模型解析,返回第一个匹配的模型
- `loadTemplate`:模板加载,返回第一个找到的模板
**Sequential Hooks (链式变换)**
- `transformParams`:参数转换,依次变换请求参数
- `transformResult`:结果转换,依次变换响应结果
**Parallel Hooks (并发副作用)**
- `onRequestStart`:请求开始时触发
- `onRequestEnd`:请求结束时触发
- `onError`:错误发生时触发
**Stream Hooks (流转换)**
- `transformStream`:流转换,返回 AI SDK 转换函数
**插件 API 设计:**
```typescript
export interface Plugin {
name: string
enforce?: 'pre' | 'normal' | 'post'
// First hooks - 执行到第一个有效结果
resolveModel?(params: ResolveModelParams): Promise<string | null>
loadTemplate?(params: LoadTemplateParams): Promise<Template | null>
// Sequential hooks - 链式变换
transformParams?(params: any, context: PluginContext): Promise<any>
transformResult?(result: any, context: PluginContext): Promise<any>
// Parallel hooks - 并发副作用
onRequestStart?(context: PluginContext): Promise<void>
onRequestEnd?(context: PluginContext): Promise<void>
onError?(error: Error, context: PluginContext): Promise<void>
// Stream hooks - AI SDK 流转换
transformStream?(context: PluginContext): Promise<(readable: ReadableStream) => ReadableStream>
}
export interface PluginContext {
request: any
response?: any
metadata: Record<string, any>
provider: string
model: string
}
export class PluginManager {
use(plugin: Plugin): this
executeFirstHook<T>(hookName: string, ...args: any[]): Promise<T | null>
executeSequentialHook<T>(hookName: string, initialValue: T, context: PluginContext): Promise<T>
executeParallelHook(hookName: string, ...args: any[]): Promise<void>
collectStreamTransforms(context: PluginContext): Promise<Array<(readable: ReadableStream) => ReadableStream>>
}
```
### 4.5 统一服务接口 (规划中)
作为包的主要对外接口,提供高级 AI 功能。
**服务方法:**
- `completions()`: 文本生成
- `streamCompletions()`: 流式文本生成
- `generateObject()`: 结构化数据生成
- `generateImage()`: 图像生成
- `embed()`: 文本嵌入
**API 设计:**
```typescript
export class AiCoreService {
constructor(middlewares?: Middleware[])
async completions(request: CompletionRequest): Promise<CompletionResponse>
async streamCompletions(request: CompletionRequest): Promise<StreamCompletionResponse>
async generateObject<T>(request: ObjectGenerationRequest): Promise<T>
async generateImage(request: ImageGenerationRequest): Promise<ImageResponse>
async embed(request: EmbeddingRequest): Promise<EmbeddingResponse>
use(middleware: Middleware): this
configure(config: AiCoreConfig): this
}
```
- 等19+ AI SDK官方支持的providers
## 5. 使用方式
### 5.1 多 Provider 支持
### 5.1 推荐使用方式 (Orchestration API)
```typescript
import { createAiSdkClient, AiCore } from '@cherrystudio/ai-core'
import { streamText, generateText } from '@cherrystudio/ai-core'
// 检查支持的 providers
const providers = AiCore.getSupportedProviders()
console.log(`支持 ${providers.length} 个 AI providers`)
// 创建多个 provider 客户端
const openai = await createAiSdkClient('openai', { apiKey: 'openai-key' })
const anthropic = await createAiSdkClient('anthropic', { apiKey: 'anthropic-key' })
const google = await createAiSdkClient('google', { apiKey: 'google-key' })
const xai = await createAiSdkClient('xai', { apiKey: 'xai-key' })
```
### 5.2 在 Cherry Studio 中集成
```typescript
// 替换现有的 XxxApiClient
// 之前:
// const openaiClient = new OpenAIApiClient(config)
// const anthropicClient = new AnthropicApiClient(config)
// 现在:
import { createAiSdkClient } from '@cherrystudio/ai-core'
const createProviderClient = async (provider: CherryProvider) => {
return await createAiSdkClient(provider.id, {
apiKey: provider.apiKey,
baseURL: provider.baseURL
})
}
```
### 5.6 完整的工作流示例 (规划中)
```typescript
import {
createAiSdkClient,
AiCoreService,
MiddlewareChain,
PreRequestMiddleware,
StreamProcessingMiddleware,
PostResponseMiddleware
} from '@cherrystudio/ai-core'
// 创建完整的工作流
const createEnhancedAiService = async () => {
// 创建中间件链
const middlewareChain = new MiddlewareChain()
.use(
new PreRequestMiddleware({
validateApiKey: true,
checkRateLimit: true
})
)
.use(
new StreamProcessingMiddleware({
enableProgressTracking: true,
chunkTransform: (chunk) => ({
...chunk,
timestamp: Date.now()
})
})
)
.use(
new PostResponseMiddleware({
saveToHistory: true,
calculateMetrics: true
})
)
// 创建服务实例
const service = new AiCoreService(middlewareChain.middlewares)
return service
// 配置模式使用
const config = {
providerId: 'openai',
options: { apiKey: 'your-api-key' },
plugins: [thinkingPlugin, toolPlugin]
}
// 使用增强服务
const enhancedService = await createEnhancedAiService()
// 流式文本生成
const stream = await streamText(config, 'gpt-4', {
messages: [{ role: 'user', content: 'Hello!' }]
})
const response = await enhancedService.completions({
provider: 'anthropic',
model: 'claude-3-sonnet',
messages: [{ role: 'user', content: 'Write a technical blog post about AI middleware' }],
options: {
temperature: 0.7,
maxTokens: 2000
},
middleware: {
// 中间件特定配置
thinking: { recordSteps: true },
cache: { enabled: true, ttl: 1800 },
logging: { level: 'debug' }
}
// 普通文本生成
const result = await generateText(config, 'gpt-4', {
messages: [{ role: 'user', content: 'Hello!' }]
})
```
## 6. 简化设计原则
### 6.1 最小包装原则
- 直接使用 AI SDK 的类型,不重复定义
- 避免过度抽象和复杂的中间层
- 保持与 AI SDK 原生 API 的一致性
### 6.2 动态导入优化
### 5.2 直接AI SDK模式 (兼容性)
```typescript
// 按需加载,减少打包体积
const module = await import('@ai-sdk/openai')
const createOpenAI = module.createOpenAI
import { streamText } from '@cherrystudio/ai-core'
import { openai } from '@ai-sdk/openai'
// 直接使用AI SDK模式
const stream = await streamText({
model: openai('gpt-4'),
messages: [{ role: 'user', content: 'Hello!' }]
})
```
### 6.3 类型安全
### 5.3 执行器模式 (高级用法)
```typescript
// 直接使用 AI SDK 类型
import { streamText, generateText } from 'ai'
import { AiExecutor } from '@cherrystudio/ai-core'
// 避免重复定义,直接传递参数
return streamText({ model, ...request })
// 创建执行器
const executor = AiExecutor.create('openai', { apiKey: 'your-api-key' }, [plugin1, plugin2])
// 使用执行器
const stream = await executor.streamText('gpt-4', {
messages: [{ role: 'user', content: 'Hello!' }]
})
```
### 6.4 配置简化
## 6. 插件系统详解
### 6.1 插件接口设计
```typescript
// 简化的 Provider 配置
interface ProviderConfig {
id: string // provider 标识
name: string // 显示名称
import: () => Promise<any> // 动态导入函数
creatorFunctionName: string // 创建函数名
export interface AiPlugin {
name: string
collectStreamTransforms?: (context: AiRequestContext) => StreamTransform[]
transformParams?: (params: any, context: AiRequestContext) => Promise<any>
transformResult?: (result: any, context: AiRequestContext) => Promise<any>
onRequest?: (context: AiRequestContext) => Promise<void>
onSuccess?: (result: any, context: AiRequestContext) => Promise<void>
onError?: (error: Error, context: AiRequestContext) => Promise<void>
}
```
## 7. 技术要点
### 6.2 流转换支持
### 7.1 动态导入策略
专门针对 `streamText` 的流转换功能:
- **按需加载**:只加载用户实际使用的 providers
- **缓存机制**:避免重复导入和初始化
- **错误处理**:优雅处理导入失败的情况
```typescript
// 插件收集流转换
const streamTransforms = pluginManager.collectStreamTransforms(context)
### 7.2 依赖管理策略
// 应用到AI SDK
const result = await streamText({
model,
...params,
experimental_transform: streamTransforms.length > 0 ? composeTransforms(streamTransforms) : undefined
})
```
- **核心依赖**`ai` 库作为必需依赖
- **可选依赖**:所有 `@ai-sdk/*` 包都是可选的
- **版本兼容**:支持 AI SDK v3-v5 版本
### 6.3 插件vs中间件
### 7.3 缓存策略
| 功能 | 插件 (Plugins) | 中间件 (Middleware) |
| -------- | -------------------------------- | ------------------- |
| 用途 | 应用特定功能 | AI SDK原生功能 |
| 流转换 | ✅ 支持 `experimental_transform` | ❌ 不支持 |
| 适用范围 | 所有AI方法 | 所有AI方法 |
| 应用时机 | 运行时 | 创建时 |
| 复杂度 | 简单 | 原生AI SDK |
- **客户端缓存**:基于 provider + options 的智能缓存
- **配置哈希**:安全的 API key 哈希处理
- **生命周期管理**:支持缓存清理和验证
## 7. 架构优势
## 8. 迁移策略
### 7.1 分层清晰
### 8.1 阶段一:包基础搭建 (Week 1) ✅ 已完成
- **Orchestration**: 用户友好的API
- **Core**: 模块化的内部实现
- **职责分离**: 每层专注自己的职责
1. ✅ 创建简化的包结构
2. ✅ 实现 Provider 注册表
3. ✅ 创建统一客户端和工厂
4. ✅ 配置构建和类型系统
### 7.2 函数式设计
### 8.2 阶段二:核心功能完善 (Week 2) ✅ 已完成
- 大部分模块使用函数而非类
- 更简洁的代码和更好的可测试性
- 避免不必要的面向对象复杂性
1. ✅ 支持 19+ 官方 AI SDK providers
2. ✅ 实现缓存和错误处理
3. ✅ 完善类型安全和 API 设计
4. ✅ 添加便捷函数和工具
### 7.3 类型安全
### 8.3 阶段三:集成测试 (Week 3) 🔄 进行中
- 统一使用 `types.ts` 中的类型定义
- 避免重复定义,提高维护性
- 完整的 TypeScript 支持
1. 在 Cherry Studio 中集成测试
2. 功能完整性验证
3. 性能基准测试
4. 兼容性问题修复
### 7.4 灵活扩展
### 8.4 阶段四:插件系统实现 ✅ 已完成
- 插件系统支持流转换
- 中间件系统支持AI SDK原生功能
- 模块化设计便于功能扩展
1. **插件核心架构**
## 8. 迁移状态
- 实现 `PluginManager``PluginContext`
- 创建钩子风格插件接口和类型系统
- 建立四种钩子类型执行机制
### 8.1 已完成 ✅
2. **钩子系统**
1. **架构重构** - 分层设计和职责分离
2. **类型系统** - 统一类型定义和复用
3. **函数式设计** - 从类转换为函数
4. **插件系统** - 流转换和通用插件功能
5. **Orchestration层** - 用户友好的API接口
6. **Core层完整实现** - 创建、执行、客户端、中间件、插件、提供商
- `First Hooks`:第一个有效结果执行
- `Sequential Hooks`:链式数据变换
- `Parallel Hooks`:并发副作用处理
- `Stream Hooks`AI SDK 流转换集成
### 8.2 进行中 🔄
3. **优先级和排序**
1. **集成测试** - 在实际项目中完整测试
2. **性能优化** - 确保无性能退化
3. **文档完善** - 使用指南和最佳实践
- `pre`/`normal`/`post` 优先级系统
- 插件注册顺序维护
- 错误处理和插件隔离
### 8.3 计划中 📋
4. **集成到现有架构**
- 在 `UniversalAiSdkClient` 中集成插件管理器
- 更新 `ApiClientFactory` 支持插件配置
- 创建示例插件和使用文档
1. **生态系统扩展** - 更多通用插件
2. **优化改进** - 基于使用反馈的持续改进
3. **社区贡献** - 开源发布和社区生态
### 8.5 阶段五:特性插件扩展 (规划中)
## 9. 技术决策记录
1. **Cherry Studio 特性插件**
### 9.1 为什么选择分层架构?
- `ThinkingPlugin`:思考过程记录和提取
- `ToolCallPlugin`:工具调用处理和增强
- `WebSearchPlugin`:网络搜索集成
- **用户体验**: Orchestration层提供简洁API
- **内部复杂性**: Core层处理复杂逻辑
- **维护性**: 清晰的职责分离
2. **高级功能**
- 插件组合和条件执行
- 动态插件加载系统
- 插件配置管理和持久化
### 9.2 为什么选择函数式设计?
### 8.6 阶段六:文档和发布 (Week 7) 📋 规划中
- **简洁性**: 避免不必要的类设计
- **可测试性**: 函数更容易测试
- **性能**: 减少对象创建开销
1. 完善使用文档和示例
2. 插件开发指南和最佳实践
3. 准备发布到 npm
4. 建立维护流程
### 9.3 为什么分离插件和中间件?
### 8.7 阶段七:生态系统扩展 (Week 8+) 🚀 未来规划
- **职责明确**: 插件处理应用特定需求
- **原生支持**: 中间件使用AI SDK原生功能
- **灵活性**: 两套系统可以独立演进
1. 社区插件生态系统
2. 可视化插件编排工具
3. 性能监控和分析
4. 高级缓存和优化策略
## 10. 总结
## 9. 预期收益
新的AI Core架构实现了
### 9.1 开发效率提升
### 10.1 设计目标
- **90%** 减少新 Provider 接入时间(只需添加注册表配置)
- **70%** 减少维护工作量
- **95%** 提升开发体验(统一接口 + 类型安全)
- **独立开发**:可以独立于主应用开发和测试
- ✅ **分层架构**: 清晰的编排层和核心层分离
- ✅ **函数式设计**: 简洁的函数式API
- ✅ **类型安全**: 统一的类型定义和复用
- ✅ **插件扩展**: 支持流转换的插件系统
- ✅ **易用性**: 多种使用模式满足不同需求
### 9.2 代码质量改善
### 10.2 核心价值
- 完整的 TypeScript 类型安全
- 统一的错误处理机制
- 标准化的 AI SDK 接口
- 更好的测试覆盖率
- **统一接口**: 一套API支持19+ AI providers
- **灵活使用**: 配置模式、AI SDK模式、执行器模式
- **强类型**: 完整的TypeScript支持
- **可扩展**: 插件和中间件双重扩展能力
- **高性能**: 最小化包装直接使用AI SDK
### 9.3 架构优势
### 10.3 未来发展
- **轻量级**:最小化的包装层
- **可复用**:其他项目可以直接使用
- **可维护**:独立版本管理和发布
- **可扩展**:新 provider 只需配置即可
这个架构提供了坚实的AI基础设施支持
### 9.4 生态系统价值
- 支持 AI SDK 的完整生态系统
- 可以独立发布到 npm
- 为开源社区贡献价值
- 建立统一的 AI 基础设施
## 10. 风险评估与应对
### 10.1 技术风险
- **AI SDK 版本兼容**:支持多版本兼容策略
- **依赖管理**:合理使用 peerDependencies
- **类型一致性**:直接使用 AI SDK 类型
- **性能影响**:最小化包装层开销
### 10.2 迁移风险
- **功能对等性**:确保所有现有功能都能实现
- **API 兼容性**:提供平滑的迁移路径
- **集成复杂度**:保持简单的集成方式
- **学习成本**:提供清晰的使用文档
## 11. 总结
简化的 AI Core 架构专注于核心价值:
### 11.1 核心价值
- **统一接口**:一套 API 支持 19+ AI providers
- **按需加载**:只打包用户实际使用的 providers
- **类型安全**:完整的 TypeScript 支持
- **轻量高效**:最小化的包装层
### 11.2 设计哲学
- **直接使用 AI SDK**:避免重复造轮子,充分利用原生能力
- **最小包装**:只在必要时添加抽象层,保持轻量高效
- **开发者友好**:简单易用的 API 设计,熟悉的钩子风格
- **生态兼容**:充分利用 AI SDK 生态系统和原生流转换
- **插件优先**:基于钩子的扩展模式,支持灵活组合
### 11.3 成功关键
1. **保持简单**:专注核心功能,避免过度设计
2. **充分测试**:确保功能完整性和稳定性
3. **渐进迁移**:平滑过渡,降低风险
4. **文档完善**:支持快速上手和深度使用
这个基于钩子的插件系统架构为 Cherry Studio 提供了一个轻量、高效、可维护的 AI 基础设施,通过熟悉的钩子模式和原生 AI SDK 集成,为开发者提供了强大而简洁的扩展能力,同时为社区贡献了一个高质量的开源包。
- 持续的功能扩展
- 良好的开发体验
- 社区生态建设
- 跨项目复用价值

View File

@ -1,223 +0,0 @@
/**
* API Client Factory
* API客户端工厂
*/
import type { ImageModelV1 } from '@ai-sdk/provider'
import { type LanguageModelV1, LanguageModelV1Middleware, wrapLanguageModel } from 'ai'
import { aiProviderRegistry } from '../providers/registry'
import { type ProviderId, type ProviderSettingsMap } from './types'
// 客户端配置接口
export interface ClientConfig {
providerId: string
options?: any
}
// 错误类型
export class ClientFactoryError extends Error {
constructor(
message: string,
public providerId?: string,
public cause?: Error
) {
super(message)
this.name = 'ClientFactoryError'
}
}
/**
* API Client Factory
* AI SDK客户端
*/
export class ApiClientFactory {
/**
* AI SDK
* Provider 使 Provider 使 openai-compatible
*/
static async createClient<T extends ProviderId>(
providerId: T,
modelId: string,
options: ProviderSettingsMap[T],
middlewares?: LanguageModelV1Middleware[]
): Promise<LanguageModelV1>
static async createClient(
providerId: string,
modelId: string,
options: ProviderSettingsMap['openai-compatible'],
middlewares?: LanguageModelV1Middleware[]
): Promise<LanguageModelV1>
static async createClient(
providerId: string,
modelId: string = 'default',
options: any,
middlewares?: LanguageModelV1Middleware[]
): Promise<LanguageModelV1> {
try {
// 对于不在注册表中的 provider默认使用 openai-compatible
const effectiveProviderId = aiProviderRegistry.isSupported(providerId) ? providerId : 'openai-compatible'
// 获取Provider配置
const providerConfig = aiProviderRegistry.getProvider(effectiveProviderId)
if (!providerConfig) {
throw new ClientFactoryError(`Provider "${effectiveProviderId}" is not registered`, providerId)
}
// 动态导入模块
const module = await providerConfig.import()
// 获取创建函数
const creatorFunction = module[providerConfig.creatorFunctionName]
if (typeof creatorFunction !== 'function') {
throw new ClientFactoryError(
`Creator function "${providerConfig.creatorFunctionName}" not found in the imported module for provider "${effectiveProviderId}"`
)
}
// 创建provider实例
const provider = creatorFunction(options)
// 返回模型实例
if (typeof provider === 'function') {
let model: LanguageModelV1 = provider(modelId)
// 应用 AI SDK 中间件
if (middlewares && middlewares.length > 0) {
model = wrapLanguageModel({
model: model,
middleware: middlewares
})
}
return model
} else {
throw new ClientFactoryError(`Unknown model access pattern for provider "${effectiveProviderId}"`)
}
} catch (error) {
if (error instanceof ClientFactoryError) {
throw error
}
throw new ClientFactoryError(
`Failed to create client for provider "${providerId}": ${error instanceof Error ? error.message : 'Unknown error'}`,
providerId,
error instanceof Error ? error : undefined
)
}
}
static async createImageClient<T extends ProviderId>(
providerId: T,
modelId: string,
options: ProviderSettingsMap[T]
): Promise<ImageModelV1>
static async createImageClient(
providerId: string,
modelId: string,
options: ProviderSettingsMap['openai-compatible']
): Promise<ImageModelV1>
static async createImageClient(providerId: string, modelId: string = 'default', options: any): Promise<ImageModelV1> {
try {
if (!aiProviderRegistry.isSupported(providerId)) {
throw new ClientFactoryError(`Provider "${providerId}" is not supported`, providerId)
}
const providerConfig = aiProviderRegistry.getProvider(providerId)
if (!providerConfig) {
throw new ClientFactoryError(`Provider "${providerId}" is not registered`, providerId)
}
if (!providerConfig.supportsImageGeneration) {
throw new ClientFactoryError(`Provider "${providerId}" does not support image generation`, providerId)
}
const module = await providerConfig.import()
const creatorFunction = module[providerConfig.creatorFunctionName]
if (typeof creatorFunction !== 'function') {
throw new ClientFactoryError(
`Creator function "${providerConfig.creatorFunctionName}" not found in the imported module for provider "${providerId}"`
)
}
const provider = creatorFunction(options)
if (provider && typeof provider.image === 'function') {
return provider.image(modelId)
} else {
throw new ClientFactoryError(`Image model function not found for provider "${providerId}"`)
}
} catch (error) {
if (error instanceof ClientFactoryError) {
throw error
}
throw new ClientFactoryError(
`Failed to create image client for provider "${providerId}": ${error instanceof Error ? error.message : 'Unknown error'}`,
providerId,
error instanceof Error ? error : undefined
)
}
}
/**
* Providers
*/
static getSupportedProviders(): Array<{
id: string
name: string
}> {
return aiProviderRegistry.getAllProviders().map((provider) => ({
id: provider.id,
name: provider.name
}))
}
/**
* Provider
*/
static getClientInfo(providerId: string): {
id: string
name: string
isSupported: boolean
effectiveProvider: string
} {
const effectiveProviderId = aiProviderRegistry.isSupported(providerId) ? providerId : 'openai-compatible'
const provider = aiProviderRegistry.getProvider(effectiveProviderId)
return {
id: providerId,
name: provider?.name || providerId,
isSupported: aiProviderRegistry.isSupported(providerId),
effectiveProvider: effectiveProviderId
}
}
}
// 便捷导出函数
export function createClient<T extends ProviderId>(
providerId: T,
modelId: string,
options: ProviderSettingsMap[T]
): Promise<LanguageModelV1>
export function createClient(
providerId: string,
modelId: string,
options: ProviderSettingsMap['openai-compatible']
): Promise<LanguageModelV1>
export function createClient(providerId: string, modelId: string = 'default', options: any): Promise<LanguageModelV1> {
return ApiClientFactory.createClient(providerId, modelId, options)
}
export const createImageClient = (providerId: string, modelId: string, options: any): Promise<ImageModelV1> =>
ApiClientFactory.createImageClient(providerId, modelId, options)
export const getSupportedProviders = () => ApiClientFactory.getSupportedProviders()
export const getClientInfo = (providerId: string) => ApiClientFactory.getClientInfo(providerId)

View File

@ -1,381 +0,0 @@
/**
* AI Client - Cherry Studio AI Core
* AI
*
* ## 使
*
* ```typescript
* import { AiClient } from '@cherrystudio/ai-core'
*
* // 创建客户端(默认带插件系统)
* const client = AiClient.create('openai', {
* name: 'openai',
* apiKey: process.env.OPENAI_API_KEY
* }, [LoggingPlugin, ContentFilterPlugin])
*
* // 使用方式与 UniversalAiSdkClient 完全相同
* const result = await client.generateText('gpt-4', {
* messages: [{ role: 'user', content: 'Hello!' }]
* })
* ```
*/
import { generateObject, generateText, LanguageModelV1Middleware, streamObject, streamText } from 'ai'
import { AiPlugin, createContext, PluginManager } from '../plugins'
import { isProviderSupported } from '../providers/registry'
import { ApiClientFactory } from './ApiClientFactory'
import { type ProviderId, type ProviderSettingsMap } from './types'
import { UniversalAiSdkClient } from './UniversalAiSdkClient'
/**
* Cherry Studio AI Core
* AI
*/
export class PluginEnabledAiClient<T extends ProviderId = ProviderId> {
private pluginManager: PluginManager
private baseClient: UniversalAiSdkClient<T>
private middlewares: LanguageModelV1Middleware[] = []
constructor(
private readonly providerId: T,
private readonly options: ProviderSettingsMap[T],
plugins: AiPlugin[] = []
) {
this.pluginManager = new PluginManager(plugins)
this.baseClient = UniversalAiSdkClient.create(providerId, options)
this.updateMiddlewares()
}
/**
*
*/
use(plugin: AiPlugin): this {
this.pluginManager.use(plugin)
this.updateMiddlewares()
return this
}
/**
*
*/
usePlugins(plugins: AiPlugin[]): this {
plugins.forEach((plugin) => this.pluginManager.use(plugin))
this.updateMiddlewares()
return this
}
/**
*
*/
removePlugin(pluginName: string): this {
this.pluginManager.remove(pluginName)
this.updateMiddlewares()
return this
}
/**
*
*
*/
private updateMiddlewares(): void {
const pluginMiddlewares = this.pluginManager.collectAiSdkMiddlewares()
this.middlewares = pluginMiddlewares
}
/**
*
*/
getPluginStats() {
return this.pluginManager.getStats()
}
/**
*
*/
getPlugins() {
return this.pluginManager.getPlugins()
}
/**
*
* 1-5
*/
private async executeWithPlugins<TParams, TResult>(
methodName: string,
modelId: string,
params: TParams,
executor: (finalModelId: string, transformedParams: TParams) => Promise<TResult>
): Promise<TResult> {
// 创建请求上下文
const context = createContext(this.providerId, modelId, params)
try {
// 1. 触发请求开始事件
await this.pluginManager.executeParallel('onRequestStart', context)
// 2. 解析模型别名
const resolvedModelId = await this.pluginManager.executeFirst<string>('resolveModel', modelId, context)
const finalModelId = resolvedModelId || modelId
// 3. 转换请求参数
const transformedParams = await this.pluginManager.executeSequential('transformParams', params, context)
// 4. 执行具体的 API 调用
const result = await executor(finalModelId, transformedParams)
// 5. 转换结果(对于非流式调用)
const transformedResult = await this.pluginManager.executeSequential('transformResult', result, context)
// 6. 触发完成事件
await this.pluginManager.executeParallel('onRequestEnd', context, transformedResult)
return transformedResult
} catch (error) {
// 7. 触发错误事件
await this.pluginManager.executeParallel('onError', context, undefined, error as Error)
throw error
}
}
/**
*
*
*/
private async executeStreamWithPlugins<TParams, TResult>(
methodName: string,
modelId: string,
params: TParams,
executor: (finalModelId: string, transformedParams: TParams, streamTransforms: any[]) => Promise<TResult>
): Promise<TResult> {
// 创建请求上下文
const context = createContext(this.providerId, modelId, params)
try {
// 1. 触发请求开始事件
await this.pluginManager.executeParallel('onRequestStart', context)
// 2. 解析模型别名
const resolvedModelId = await this.pluginManager.executeFirst<string>('resolveModel', modelId, context)
const finalModelId = resolvedModelId || modelId
// 3. 转换请求参数
const transformedParams = await this.pluginManager.executeSequential('transformParams', params, context)
// 4. 收集流转换器
const streamTransforms = this.pluginManager.collectStreamTransforms()
// 5. 执行流式 API 调用
const result = await executor(finalModelId, transformedParams, streamTransforms)
// 6. 触发完成事件(注意:对于流式调用,这里触发的是开始流式响应的事件)
await this.pluginManager.executeParallel('onRequestEnd', context, { stream: true })
return result
} catch (error) {
// 7. 触发错误事件
await this.pluginManager.executeParallel('onError', context, undefined, error as Error)
throw error
}
}
/**
* AI SDK
*
*/
private async getModelWithMiddlewares(modelId: string) {
const middlewares = this.middlewares
// 3. 如果有中间件,创建一个新的、注入了中间件的客户端实例
return await ApiClientFactory.createClient(
this.providerId,
modelId,
this.options,
middlewares.length > 0 ? middlewares : undefined
)
}
/**
*
*/
async streamText(
modelId: string,
params: Omit<Parameters<typeof streamText>[0], 'model'>
): Promise<ReturnType<typeof streamText>>
async streamText(params: Parameters<typeof streamText>[0]): Promise<ReturnType<typeof streamText>>
async streamText(
modelIdOrParams: string | Parameters<typeof streamText>[0],
params?: Omit<Parameters<typeof streamText>[0], 'model'>
): Promise<ReturnType<typeof streamText>> {
if (typeof modelIdOrParams === 'string') {
// 传统方式:使用内建逻辑
return this.executeStreamWithPlugins(
'streamText',
modelIdOrParams,
params!,
async (finalModelId, transformedParams, streamTransforms) => {
const model = await this.getModelWithMiddlewares(finalModelId)
const experimental_transform =
params?.experimental_transform ?? (streamTransforms.length > 0 ? streamTransforms : undefined)
return await streamText({
model,
...transformedParams,
experimental_transform
})
}
)
} else {
// 外部 registry 方式:直接使用用户提供的 model
return streamText(modelIdOrParams)
}
}
/**
*
*
*/
async generateText(
modelId: string,
params: Omit<Parameters<typeof generateText>[0], 'model'>
): Promise<ReturnType<typeof generateText>> {
return this.executeWithPlugins('generateText', modelId, params, async (finalModelId, transformedParams) => {
const model = await this.getModelWithMiddlewares(finalModelId)
return await generateText({ model, ...transformedParams })
})
}
/**
*
*/
async generateObject(
modelId: string,
params: Omit<Parameters<typeof generateObject>[0], 'model'>
): Promise<ReturnType<typeof generateObject>>
async generateObject(params: Parameters<typeof generateObject>[0]): Promise<ReturnType<typeof generateObject>>
async generateObject(
modelIdOrParams: string | Parameters<typeof generateObject>[0],
params?: Omit<Parameters<typeof generateObject>[0], 'model'>
): Promise<ReturnType<typeof generateObject>> {
if (typeof modelIdOrParams === 'string') {
// 传统方式:使用内建逻辑
return this.executeWithPlugins(
'generateObject',
modelIdOrParams,
params!,
async (finalModelId, transformedParams) => {
const model = await this.getModelWithMiddlewares(finalModelId)
return await generateObject({ model, ...transformedParams })
}
)
} else {
// 外部 registry 方式:直接使用用户提供的 model
return await generateObject(modelIdOrParams)
}
}
/**
*
*/
async streamObject(
modelId: string,
params: Omit<Parameters<typeof streamObject>[0], 'model'>
): Promise<ReturnType<typeof streamObject>>
async streamObject(params: Parameters<typeof streamObject>[0]): Promise<ReturnType<typeof streamObject>>
async streamObject(
modelIdOrParams: string | Parameters<typeof streamObject>[0],
params?: Omit<Parameters<typeof streamObject>[0], 'model'>
): Promise<ReturnType<typeof streamObject>> {
if (typeof modelIdOrParams === 'string') {
// 传统方式:使用内建逻辑
return this.executeWithPlugins(
'streamObject',
modelIdOrParams,
params!,
async (finalModelId, transformedParams) => {
return await this.baseClient.streamObject(finalModelId, transformedParams)
}
)
} else {
// 外部 registry 方式:直接使用用户提供的 model
return await streamObject(modelIdOrParams)
}
}
/**
*
*/
getClientInfo() {
return this.baseClient.getClientInfo()
}
/**
*
*/
getBaseClient(): UniversalAiSdkClient<T> {
return this.baseClient
}
// === 静态工厂方法 ===
/**
* OpenAI Compatible
*/
static createOpenAICompatible(
config: ProviderSettingsMap['openai-compatible'],
plugins: AiPlugin[] = []
): PluginEnabledAiClient<'openai-compatible'> {
return new PluginEnabledAiClient('openai-compatible', config, plugins)
}
/**
*
*/
static create<T extends ProviderId>(
providerId: T,
options: ProviderSettingsMap[T],
plugins?: AiPlugin[]
): PluginEnabledAiClient<T>
static create(
providerId: string,
options: ProviderSettingsMap['openai-compatible'],
plugins?: AiPlugin[]
): PluginEnabledAiClient<'openai-compatible'>
static create(providerId: string, options: any, plugins: AiPlugin[] = []): PluginEnabledAiClient {
if (isProviderSupported(providerId)) {
return new PluginEnabledAiClient(providerId as ProviderId, options, plugins)
} else {
// 对于未知 provider使用 openai-compatible
return new PluginEnabledAiClient('openai-compatible', options, plugins)
}
}
}
/**
* AI
*/
export function createClient<T extends ProviderId>(
providerId: T,
options: ProviderSettingsMap[T],
plugins?: AiPlugin[]
): PluginEnabledAiClient<T>
export function createClient(
providerId: string,
options: ProviderSettingsMap['openai-compatible'],
plugins?: AiPlugin[]
): PluginEnabledAiClient<'openai-compatible'>
export function createClient(providerId: string, options: any, plugins: AiPlugin[] = []): PluginEnabledAiClient {
return PluginEnabledAiClient.create(providerId, options, plugins)
}
/**
* OpenAI Compatible 便
*/
export function createCompatibleClient(
config: ProviderSettingsMap['openai-compatible'],
plugins: AiPlugin[] = []
): PluginEnabledAiClient<'openai-compatible'> {
return PluginEnabledAiClient.createOpenAICompatible(config, plugins)
}

View File

@ -1,216 +0,0 @@
/**
* Universal AI SDK Client
* AI SDK客户端实现
*
* ## 使
*
* ### 1.
* ```typescript
* import { UniversalAiSdkClient } from '@cherrystudio/ai-core'
*
* // OpenAI
* const openai = UniversalAiSdkClient.create('openai', {
* name: 'openai',
* apiHost: 'https://api.openai.com/v1',
* apiKey: process.env.OPENAI_API_KEY
* })
*
* // Anthropic
* const anthropic = UniversalAiSdkClient.create('anthropic', {
* name: 'anthropic',
* apiHost: 'https://api.anthropic.com',
* apiKey: process.env.ANTHROPIC_API_KEY
* })
* ```
*
* ### 2. OpenAI Compatible
* ```typescript
* // LM Studio (本地运行)
* const lmStudio = UniversalAiSdkClient.createOpenAICompatible({
* name: 'lm-studio',
* baseURL: 'http://localhost:1234/v1'
* })
*
* // Ollama (本地运行)
* const ollama = UniversalAiSdkClient.createOpenAICompatible({
* name: 'ollama',
* baseURL: 'http://localhost:11434/v1'
* })
*
* // 自定义第三方 API
* const customProvider = UniversalAiSdkClient.createOpenAICompatible({
* name: 'my-provider',
* apiKey: process.env.CUSTOM_API_KEY,
* baseURL: 'https://api.customprovider.com/v1',
* headers: {
* 'X-Custom-Header': 'value',
* 'User-Agent': 'MyApp/1.0'
* },
* queryParams: {
* 'api-version': '2024-01'
* }
* })
* ```
*
* ### 3. 使 AI
* ```typescript
* // 流式文本生成
* const stream = await client.streamText('gpt-4', {
* messages: [{ role: 'user', content: 'Hello!' }]
* })
*
* // 生成文本
* const { text } = await client.generateText('gpt-4', {
* messages: [{ role: 'user', content: 'Hello!' }]
* })
*
* // 生成结构化对象
* const { object } = await client.generateObject('gpt-4', {
* messages: [{ role: 'user', content: 'Generate a user profile' }],
* schema: z.object({
* name: z.string(),
* age: z.number()
* })
* })
* ```
*/
import { experimental_generateImage as generateImage, generateObject, generateText, streamObject, streamText } from 'ai'
import { ApiClientFactory } from './ApiClientFactory'
import { type ProviderId, type ProviderSettingsMap } from './types'
/**
* AI SDK
* AI
*/
export class UniversalAiSdkClient<T extends ProviderId = ProviderId> {
constructor(
private readonly providerId: T,
private readonly options: ProviderSettingsMap[T]
) {}
/**
*
* 使 AI SDK streamText
*/
async streamText(modelId: string, params: Omit<Parameters<typeof streamText>[0], 'model'>) {
const model = await ApiClientFactory.createClient(this.providerId, modelId, this.options)
return await streamText({
model,
...params
})
}
/**
*
* 使 AI SDK generateText
*/
async generateText(modelId: string, params: Omit<Parameters<typeof generateText>[0], 'model'>) {
const model = await ApiClientFactory.createClient(this.providerId, modelId, this.options)
return await generateText({
model,
...params
})
}
/**
*
* 使 AI SDK generateObject
*/
async generateObject(modelId: string, params: Omit<Parameters<typeof generateObject>[0], 'model'>) {
const model = await ApiClientFactory.createClient(this.providerId, modelId, this.options)
return await generateObject({
model,
...params
})
}
/**
*
* 使 AI SDK streamObject
*/
async streamObject(modelId: string, params: Omit<Parameters<typeof streamObject>[0], 'model'>) {
const model = await ApiClientFactory.createClient(this.providerId, modelId, this.options)
return await streamObject({
model,
...params
})
}
async generateImage(
modelId: string,
params: Omit<Parameters<typeof generateImage>[0], 'model'>
): Promise<ReturnType<typeof generateImage>> {
const model = await ApiClientFactory.createImageClient(this.providerId, modelId, this.options)
return generateImage({
model,
...params
})
}
/**
*
*/
getClientInfo() {
return ApiClientFactory.getClientInfo(this.providerId)
}
// === 静态工厂方法 ===
/**
* OpenAI Compatible
* OpenAI API
*/
static createOpenAICompatible(
config: ProviderSettingsMap['openai-compatible']
): UniversalAiSdkClient<'openai-compatible'> {
return new UniversalAiSdkClient('openai-compatible', config)
}
/**
*
* Provider 使 Provider 使 openai-compatible
*/
static create<T extends ProviderId>(providerId: T, options: ProviderSettingsMap[T]): UniversalAiSdkClient<T>
static create(
providerId: string,
options: ProviderSettingsMap['openai-compatible']
): UniversalAiSdkClient<'openai-compatible'>
static create(providerId: string, options: any): UniversalAiSdkClient {
if (providerId in ({} as ProviderSettingsMap)) {
return new UniversalAiSdkClient(providerId as ProviderId, options)
} else {
// 对于未知 provider使用 openai-compatible
return new UniversalAiSdkClient('openai-compatible', options)
}
}
}
/**
*
*/
export function createUniversalClient<T extends ProviderId>(
providerId: T,
options: ProviderSettingsMap[T]
): UniversalAiSdkClient<T>
export function createUniversalClient(
providerId: string,
options: ProviderSettingsMap['openai-compatible']
): UniversalAiSdkClient<'openai-compatible'>
export function createUniversalClient(providerId: string, options: any): UniversalAiSdkClient {
return UniversalAiSdkClient.create(providerId, options)
}
/**
* OpenAI Compatible 便
*/
export function createOpenAICompatibleClient(
config: ProviderSettingsMap['openai-compatible']
): UniversalAiSdkClient<'openai-compatible'> {
return UniversalAiSdkClient.createOpenAICompatible(config)
}

View File

@ -0,0 +1,3 @@
# @cherryStudio-aiCore
Core

View File

@ -0,0 +1,238 @@
/**
* AI Client - Cherry Studio AI Core
* AI
*
* ## 使
*
* ```typescript
* import { AiClient } from '@cherrystudio/ai-core'
*
* // 创建客户端(默认带插件系统)
* const client = AiClient.create('openai', {
* name: 'openai',
* apiKey: process.env.OPENAI_API_KEY
* }, [LoggingPlugin, ContentFilterPlugin])
*
* // 使用方式与 UniversalAiSdkClient 完全相同
* const result = await client.generateText('gpt-4', {
* messages: [{ role: 'user', content: 'Hello!' }]
* })
* ```
*/
import { type ProviderId, type ProviderSettingsMap } from '../../types'
import { getProviderInfo } from '..'
import { type AiPlugin, createContext, PluginManager } from '../plugins'
import { isProviderSupported } from '../providers/registry'
/**
* AI
* API
*/
export class PluginEnabledAiClient<T extends ProviderId = ProviderId> {
private pluginManager: PluginManager
constructor(
private readonly providerId: T,
// private readonly options: ProviderSettingsMap[T],
plugins: AiPlugin[] = []
) {
this.pluginManager = new PluginManager(plugins)
}
/**
*
*/
use(plugin: AiPlugin): this {
this.pluginManager.use(plugin)
return this
}
/**
*
*/
usePlugins(plugins: AiPlugin[]): this {
plugins.forEach((plugin) => this.use(plugin))
return this
}
/**
*
*/
removePlugin(pluginName: string): this {
this.pluginManager.remove(pluginName)
return this
}
/**
*
*/
getPluginStats() {
return this.pluginManager.getStats()
}
/**
*
*/
getPlugins() {
return this.pluginManager.getPlugins()
}
// /**
// * 使用core模块创建模型包含中间件
// */
// async createModelWithMiddlewares(modelId: string): Promise<any> {
// // 使用core模块的resolveConfig解析配置
// const config = resolveConfig(this.providerId, modelId, this.options, this.pluginManager.getPlugins())
// // 使用core模块创建包装好的模型
// return createModelFromConfig(config)
// }
/**
*
* AiExecutor使用
*/
async executeWithPlugins<TParams, TResult>(
methodName: string,
modelId: string,
params: TParams,
executor: (finalModelId: string, transformedParams: TParams) => Promise<TResult>
): Promise<TResult> {
// 使用正确的createContext创建请求上下文
const context = createContext(this.providerId, modelId, params)
try {
// 1. 触发请求开始事件
await this.pluginManager.executeParallel('onRequestStart', context)
// 2. 解析模型别名
const resolvedModelId = await this.pluginManager.executeFirst<string>('resolveModel', modelId, context)
const finalModelId = resolvedModelId || modelId
// 3. 转换请求参数
const transformedParams = await this.pluginManager.executeSequential('transformParams', params, context)
// 4. 执行具体的 API 调用
const result = await executor(finalModelId, transformedParams)
// 5. 转换结果(对于非流式调用)
const transformedResult = await this.pluginManager.executeSequential('transformResult', result, context)
// 6. 触发完成事件
await this.pluginManager.executeParallel('onRequestEnd', context, transformedResult)
return transformedResult
} catch (error) {
// 7. 触发错误事件
await this.pluginManager.executeParallel('onError', context, undefined, error as Error)
throw error
}
}
/**
*
* AiExecutor使用
*/
async executeStreamWithPlugins<TParams, TResult>(
methodName: string,
modelId: string,
params: TParams,
executor: (finalModelId: string, transformedParams: TParams, streamTransforms: any[]) => Promise<TResult>
): Promise<TResult> {
// 创建请求上下文
const context = createContext(this.providerId, modelId, params)
try {
// 1. 触发请求开始事件
await this.pluginManager.executeParallel('onRequestStart', context)
// 2. 解析模型别名
const resolvedModelId = await this.pluginManager.executeFirst<string>('resolveModel', modelId, context)
const finalModelId = resolvedModelId || modelId
// 3. 转换请求参数
const transformedParams = await this.pluginManager.executeSequential('transformParams', params, context)
// 4. 收集流转换器
const streamTransforms = this.pluginManager.collectStreamTransforms()
// 5. 执行流式 API 调用
const result = await executor(finalModelId, transformedParams, streamTransforms)
// 6. 触发完成事件(注意:对于流式调用,这里触发的是开始流式响应的事件)
await this.pluginManager.executeParallel('onRequestEnd', context, { stream: true })
return result
} catch (error) {
// 7. 触发错误事件
await this.pluginManager.executeParallel('onError', context, undefined, error as Error)
throw error
}
}
/**
*
*/
getClientInfo() {
return getProviderInfo(this.providerId)
}
// /**
// * 获取底层客户端实例(用于高级用法)
// */
// getBaseClient(): UniversalAiSdkClient<T> {
// return this.baseClient
// }
// === 静态工厂方法 ===
/**
* OpenAI Compatible
*/
static createOpenAICompatible(
config: ProviderSettingsMap['openai-compatible'],
plugins: AiPlugin[] = []
): PluginEnabledAiClient<'openai-compatible'> {
return new PluginEnabledAiClient('openai-compatible', plugins)
}
/**
*
*/
static create<T extends ProviderId>(providerId: T, plugins?: AiPlugin[]): PluginEnabledAiClient<T>
static create(providerId: string, plugins?: AiPlugin[]): PluginEnabledAiClient<'openai-compatible'>
static create(providerId: string, plugins: AiPlugin[] = []): PluginEnabledAiClient {
if (isProviderSupported(providerId)) {
return new PluginEnabledAiClient(providerId as ProviderId, plugins)
} else {
// 对于未知 provider使用 openai-compatible
return new PluginEnabledAiClient('openai-compatible', plugins)
}
}
}
/**
* AI
* @deprecated 使 AiExecutor
*/
export function createClient<T extends ProviderId>(providerId: T, plugins?: AiPlugin[]): PluginEnabledAiClient<T>
export function createClient(providerId: string, plugins?: AiPlugin[]): PluginEnabledAiClient<'openai-compatible'>
export function createClient(providerId: string, plugins: AiPlugin[] = []): PluginEnabledAiClient {
return PluginEnabledAiClient.create(providerId, plugins)
}
/**
* OpenAI Compatible 便
* @deprecated 使 AiExecutor
*/
export function createCompatibleClient(
config: ProviderSettingsMap['openai-compatible'],
plugins: AiPlugin[] = []
): PluginEnabledAiClient<'openai-compatible'> {
return PluginEnabledAiClient.createOpenAICompatible(config, plugins)
}

View File

@ -0,0 +1,37 @@
/**
*
* optionspluginsmiddlewares等配置
*/
import { LanguageModelV1Middleware } from 'ai'
import { ProviderId, ProviderSettingsMap } from '../../types'
import { createMiddlewares } from '../middleware/MiddlewareManager'
import { AiPlugin } from '../plugins'
import { ResolvedConfig } from './types'
/**
*
* provider配置provider选项等
*/
export function resolveConfig(
providerId: ProviderId,
modelId: string,
providerSettings: ProviderSettingsMap[ProviderId],
plugins: AiPlugin[] = [],
middlewares: LanguageModelV1Middleware[] = []
): ResolvedConfig {
// 使用独立的中间件管理器处理中间件
const resolvedMiddlewares = createMiddlewares(middlewares)
return {
provider: {
id: providerId,
options: providerSettings
},
model: {
id: modelId
},
plugins,
middlewares: resolvedMiddlewares
}
}

View File

@ -0,0 +1,31 @@
/**
*
* model给用户
*/
import { LanguageModel } from 'ai'
import { wrapModelWithMiddlewares } from '../middleware'
import { createBaseModel } from './ProviderCreator'
import { ModelCreationRequest, ResolvedConfig } from './types'
/**
*
*/
export async function createModelFromConfig(config: ResolvedConfig): Promise<LanguageModel> {
// 使用ProviderCreator创建基础模型不应用中间件
const baseModel = await createBaseModel(config.provider.id, config.model.id, config.provider.options)
// 在creation层应用中间件用户不直接接触原始model
return wrapModelWithMiddlewares(baseModel, config.middlewares)
}
/**
*
*/
export async function createModel(request: ModelCreationRequest): Promise<LanguageModel> {
// 使用ProviderCreator创建基础模型不应用中间件
const baseModel = await createBaseModel(request.providerId, request.modelId, request.options)
const middlewares = request.middlewares || []
return wrapModelWithMiddlewares(baseModel, middlewares)
}

View File

@ -0,0 +1,194 @@
/**
* Provider
* AI SDK providers
*/
import type { ImageModelV1 } from '@ai-sdk/provider'
import { type LanguageModelV1, LanguageModelV1Middleware, wrapLanguageModel } from 'ai'
import { type ProviderId, type ProviderSettingsMap } from '../../types'
import { aiProviderRegistry, type ProviderConfig } from '../providers/registry'
// 错误类型
export class ProviderCreationError extends Error {
constructor(
message: string,
public providerId?: string,
public cause?: Error
) {
super(message)
this.name = 'ProviderCreationError'
}
}
/**
* AI SDK
* Provider 使 Provider 使 openai-compatible
*/
export async function createBaseModel<T extends ProviderId>(
providerId: T,
modelId: string,
options: ProviderSettingsMap[T],
middlewares?: LanguageModelV1Middleware[]
): Promise<LanguageModelV1>
export async function createBaseModel(
providerId: string,
modelId: string,
options: ProviderSettingsMap['openai-compatible'],
middlewares?: LanguageModelV1Middleware[]
): Promise<LanguageModelV1>
export async function createBaseModel(
providerId: string,
modelId: string = 'default',
options: any,
middlewares?: LanguageModelV1Middleware[]
): Promise<LanguageModelV1> {
try {
// 对于不在注册表中的 provider默认使用 openai-compatible
const effectiveProviderId = aiProviderRegistry.isSupported(providerId) ? providerId : 'openai-compatible'
// 获取Provider配置
const providerConfig = aiProviderRegistry.getProvider(effectiveProviderId)
if (!providerConfig) {
throw new ProviderCreationError(`Provider "${effectiveProviderId}" is not registered`, providerId)
}
// 动态导入模块
const module = await providerConfig.import()
// 获取创建函数
const creatorFunction = module[providerConfig.creatorFunctionName]
if (typeof creatorFunction !== 'function') {
throw new ProviderCreationError(
`Creator function "${providerConfig.creatorFunctionName}" not found in the imported module for provider "${effectiveProviderId}"`
)
}
// 创建provider实例
const provider = creatorFunction(options)
// 返回模型实例
if (typeof provider === 'function') {
let model: LanguageModelV1 = provider(modelId)
// 应用 AI SDK 中间件
if (middlewares && middlewares.length > 0) {
model = wrapLanguageModel({
model: model,
middleware: middlewares
})
}
return model
} else {
throw new ProviderCreationError(`Unknown model access pattern for provider "${effectiveProviderId}"`)
}
} catch (error) {
if (error instanceof ProviderCreationError) {
throw error
}
throw new ProviderCreationError(
`Failed to create base model for provider "${providerId}": ${error instanceof Error ? error.message : 'Unknown error'}`,
providerId,
error instanceof Error ? error : undefined
)
}
}
/**
*
*/
export async function createImageModel<T extends ProviderId>(
providerId: T,
modelId: string,
options: ProviderSettingsMap[T]
): Promise<ImageModelV1>
export async function createImageModel(
providerId: string,
modelId: string,
options: ProviderSettingsMap['openai-compatible']
): Promise<ImageModelV1>
export async function createImageModel(
providerId: string,
modelId: string = 'default',
options: any
): Promise<ImageModelV1> {
try {
if (!aiProviderRegistry.isSupported(providerId)) {
throw new ProviderCreationError(`Provider "${providerId}" is not supported`, providerId)
}
const providerConfig = aiProviderRegistry.getProvider(providerId)
if (!providerConfig) {
throw new ProviderCreationError(`Provider "${providerId}" is not registered`, providerId)
}
if (!providerConfig.supportsImageGeneration) {
throw new ProviderCreationError(`Provider "${providerId}" does not support image generation`, providerId)
}
const module = await providerConfig.import()
const creatorFunction = module[providerConfig.creatorFunctionName]
if (typeof creatorFunction !== 'function') {
throw new ProviderCreationError(
`Creator function "${providerConfig.creatorFunctionName}" not found in the imported module for provider "${providerId}"`
)
}
const provider = creatorFunction(options)
if (provider && typeof provider.image === 'function') {
return provider.image(modelId)
} else {
throw new ProviderCreationError(`Image model function not found for provider "${providerId}"`)
}
} catch (error) {
if (error instanceof ProviderCreationError) {
throw error
}
throw new ProviderCreationError(
`Failed to create image model for provider "${providerId}": ${error instanceof Error ? error.message : 'Unknown error'}`,
providerId,
error instanceof Error ? error : undefined
)
}
}
/**
* Providers
*/
export function getSupportedProviders(): Array<{
id: string
name: string
}> {
return aiProviderRegistry.getAllProviders().map((provider: ProviderConfig) => ({
id: provider.id,
name: provider.name
}))
}
/**
* Provider
*/
export function getProviderInfo(providerId: string): {
id: string
name: string
isSupported: boolean
effectiveProvider: string
} {
const effectiveProviderId = aiProviderRegistry.isSupported(providerId) ? providerId : 'openai-compatible'
const provider = aiProviderRegistry.getProvider(effectiveProviderId)
return {
id: providerId,
name: provider?.name || providerId,
isSupported: aiProviderRegistry.isSupported(providerId),
effectiveProvider: effectiveProviderId
}
}

View File

@ -0,0 +1,15 @@
/**
* Creation
*
*/
export { resolveConfig } from './ConfigManager'
export { createModel, createModelFromConfig } from './ModelCreator'
export {
createBaseModel,
createImageModel,
getProviderInfo,
getSupportedProviders,
ProviderCreationError
} from './ProviderCreator'
export type { ModelCreationRequest, ResolvedConfig } from './types'

View File

@ -0,0 +1,32 @@
/**
* Creation
*/
import { LanguageModelV1Middleware } from 'ai'
import { ProviderId, ProviderSettingsMap } from '../../types'
import { AiPlugin } from '../plugins'
/**
*
*/
export interface ModelCreationRequest {
providerId: ProviderId
modelId: string
options: ProviderSettingsMap[ProviderId]
middlewares?: LanguageModelV1Middleware[]
}
/**
*
*/
export interface ResolvedConfig {
provider: {
id: ProviderId
options: ProviderSettingsMap[ProviderId]
}
model: {
id: string
}
plugins: AiPlugin[]
middlewares: LanguageModelV1Middleware[]
}

View File

@ -0,0 +1,174 @@
/**
* AI
* API入口AI调用
*/
import { generateObject, generateText, LanguageModelV1, streamObject, streamText } from 'ai'
import { type ProviderId } from '../../types'
import { PluginEnabledAiClient } from '../clients/PluginEnabledAiClient'
import { type AiPlugin } from '../plugins'
import { isProviderSupported } from '../providers/registry'
import { type ExecutorConfig, type GenericExecutorConfig } from './types'
export class AiExecutor<T extends ProviderId = ProviderId> {
private pluginClient: PluginEnabledAiClient<T>
constructor(config: ExecutorConfig<T>)
constructor(config: GenericExecutorConfig)
constructor(config: ExecutorConfig<T> | GenericExecutorConfig) {
if (isProviderSupported(config.providerId)) {
this.pluginClient = new PluginEnabledAiClient(config.providerId as T)
} else {
// 对于未知provider使用openai-compatible
this.pluginClient = new PluginEnabledAiClient('openai-compatible' as T)
}
}
/**
*
* API使
*/
async streamText(
model: LanguageModelV1,
params: Omit<Parameters<typeof streamText>[0], 'model'>
): Promise<ReturnType<typeof streamText>>
async streamText(
model: LanguageModelV1,
params?: Omit<Parameters<typeof streamText>[0], 'model'>
): Promise<ReturnType<typeof streamText>> {
// 传统方式:使用插件处理逻辑
return this.pluginClient.executeStreamWithPlugins(
'streamText',
model.modelId,
params!,
async (finalModelId, transformedParams, streamTransforms) => {
// const model = await this.pluginClient.createModelWithMiddlewares(finalModelId)
const experimental_transform =
params?.experimental_transform ?? (streamTransforms.length > 0 ? streamTransforms : undefined)
return await streamText({
model,
...transformedParams,
experimental_transform
})
}
)
}
/**
*
* API使
*/
async generateText(
model: LanguageModelV1,
params: Omit<Parameters<typeof generateText>[0], 'model'>
): Promise<ReturnType<typeof generateText>>
async generateText(
model: LanguageModelV1,
params?: Omit<Parameters<typeof generateText>[0], 'model'>
): Promise<ReturnType<typeof generateText>> {
// 传统方式:使用插件处理逻辑
return this.pluginClient.executeWithPlugins(
'generateText',
model.modelId,
params!,
async (finalModelId, transformedParams) => {
// const model = await this.pluginClient.createModelWithMiddlewares(finalModelId)
return await generateText({ model, ...transformedParams })
}
)
}
/**
*
* API使
*/
async generateObject(
model: LanguageModelV1,
params: Omit<Parameters<typeof generateObject>[0], 'model'>
): Promise<ReturnType<typeof generateObject>>
async generateObject(
model: LanguageModelV1,
params?: Omit<Parameters<typeof generateObject>[0], 'model'>
): Promise<ReturnType<typeof generateObject>> {
// 传统方式:使用插件处理逻辑
return this.pluginClient.executeWithPlugins(
'generateObject',
model.modelId,
params!,
async (finalModelId, transformedParams) => {
// const model = await this.pluginClient.createModelWithMiddlewares(finalModelId)
return await generateObject({ model, ...transformedParams })
}
)
}
/**
*
* API使
*/
async streamObject(
model: LanguageModelV1,
params: Omit<Parameters<typeof streamObject>[0], 'model'>
): Promise<ReturnType<typeof streamObject>>
async streamObject(
model: LanguageModelV1,
params?: Omit<Parameters<typeof streamObject>[0], 'model'>
): Promise<ReturnType<typeof streamObject>> {
// 传统方式:使用插件处理逻辑
return this.pluginClient.executeWithPlugins(
'streamObject',
model.modelId,
params!,
async (finalModelId, transformedParams) => {
// const model = await this.pluginClient.createModelWithMiddlewares(finalModelId)
return await streamObject({ model, ...transformedParams })
}
)
}
/**
*
*/
getPluginStats() {
return this.pluginClient.getPluginStats()
}
/**
*
*/
getPlugins() {
return this.pluginClient.getPlugins()
}
/**
*
*/
getClientInfo() {
return this.pluginClient.getClientInfo()
}
// === 静态工厂方法 ===
/**
* - provider的类型安全
*/
static create<T extends ProviderId>(providerId: T, plugins?: AiPlugin[]): AiExecutor<T>
static create(providerId: string, plugins?: AiPlugin[]): AiExecutor<any>
static create(providerId: string, plugins: AiPlugin[] = []): AiExecutor {
return new AiExecutor({
providerId,
plugins
})
}
/**
* OpenAI Compatible执行器
*/
static createOpenAICompatible(plugins: AiPlugin[] = []): AiExecutor<'openai-compatible'> {
return new AiExecutor({
providerId: 'openai-compatible',
plugins
})
}
}

View File

@ -0,0 +1,47 @@
/**
* Execution
*
*/
// 主要执行器
export { AiExecutor } from './AiExecutor'
export type { ExecutionOptions, ExecutorConfig, GenericExecutorConfig } from './types'
// 便捷工厂函数
import { type ProviderId, type ProviderSettingsMap } from '../../types'
import { type AiPlugin } from '../plugins'
import { AiExecutor } from './AiExecutor'
/**
* AI执行器 - provider
*/
export function createExecutor<T extends ProviderId>(
providerId: T,
options: ProviderSettingsMap[T],
plugins?: AiPlugin[]
): AiExecutor<T>
/**
* AI执行器 - provider
*/
export function createExecutor(providerId: string, options: any, plugins?: AiPlugin[]): AiExecutor<any>
export function createExecutor(providerId: string, options: any, plugins: AiPlugin[] = []): AiExecutor {
return AiExecutor.create(providerId, plugins)
}
/**
* OpenAI Compatible执行器
*/
export function createOpenAICompatibleExecutor(
options: ProviderSettingsMap['openai-compatible'],
plugins: AiPlugin[] = []
): AiExecutor<'openai-compatible'> {
return AiExecutor.createOpenAICompatible(plugins)
}
// 为了未来的agent功能预留目录结构
// 未来将在 ./agents/ 文件夹中添加:
// - AgentExecutor.ts
// - WorkflowManager.ts
// - ConversationManager.ts

View File

@ -0,0 +1,29 @@
/**
* Execution
*/
import { type ProviderId } from '../../types'
import { type AiPlugin } from '../plugins'
/**
*
*/
export interface ExecutorConfig<T extends ProviderId = ProviderId> {
providerId: T
plugins?: AiPlugin[]
}
/**
* provider
*/
export interface GenericExecutorConfig {
providerId: string
plugins?: AiPlugin[]
}
/**
*
*/
export interface ExecutionOptions {
// 未来可以添加执行级别的选项
// 比如:超时设置、重试机制等
}

View File

@ -0,0 +1,25 @@
/**
* Core
* 使
*/
// 中间件系统
export type { NamedMiddleware } from './middleware'
export { MiddlewareManager, wrapModelWithMiddlewares } from './middleware'
// 创建管理
export type { ModelCreationRequest, ResolvedConfig } from './creation'
export {
createBaseModel,
createImageModel,
createModel,
createModelFromConfig,
getProviderInfo,
getSupportedProviders,
ProviderCreationError,
resolveConfig
} from './creation'
// 执行管理
export type { ExecutionOptions, ExecutorConfig, GenericExecutorConfig } from './execution'
export { AiExecutor } from './execution'

View File

@ -0,0 +1,16 @@
/**
*
* AI SDK
*/
import { LanguageModelV1Middleware } from 'ai'
/**
*
*
*/
export function createMiddlewares(userMiddlewares: LanguageModelV1Middleware[] = []): LanguageModelV1Middleware[] {
// 未来可以在这里添加默认的中间件
const defaultMiddlewares: LanguageModelV1Middleware[] = []
return [...defaultMiddlewares, ...userMiddlewares]
}

View File

@ -0,0 +1,22 @@
/**
*
* LanguageModel上
*/
import { LanguageModel, LanguageModelV1Middleware, wrapLanguageModel } from 'ai'
/**
* 使
*/
export function wrapModelWithMiddlewares(
model: LanguageModel,
middlewares: LanguageModelV1Middleware[]
): LanguageModel {
if (middlewares.length === 0) {
return model
}
return wrapLanguageModel({
model,
middleware: middlewares
})
}

View File

@ -0,0 +1,8 @@
/**
* Middleware
*
*/
export { MiddlewareManager } from './MiddlewareManager'
export { wrapModelWithMiddlewares } from './ModelWrapper'
export type { NamedMiddleware } from './types'

View File

@ -0,0 +1,12 @@
/**
*
*/
import { LanguageModelV1Middleware } from 'ai'
/**
*
*/
export interface NamedMiddleware {
name: string
middleware: LanguageModelV1Middleware
}

View File

@ -1,7 +1,7 @@
import { AnthropicProviderOptions } from '@ai-sdk/anthropic'
import { GoogleGenerativeAIProviderOptions } from '@ai-sdk/google'
import { OpenAIResponsesProviderOptions } from '@ai-sdk/openai'
import { LanguageModelV1ProviderMetadata } from '@ai-sdk/provider'
import { type AnthropicProviderOptions } from '@ai-sdk/anthropic'
import { type GoogleGenerativeAIProviderOptions } from '@ai-sdk/google'
import { type OpenAIResponsesProviderOptions } from '@ai-sdk/openai'
import { type LanguageModelV1ProviderMetadata } from '@ai-sdk/provider'
export type ProviderOptions<T extends keyof LanguageModelV1ProviderMetadata> = LanguageModelV1ProviderMetadata[T]

View File

@ -2,7 +2,7 @@ import { openai } from '@ai-sdk/openai'
import { streamText } from 'ai'
import { PluginEnabledAiClient } from '../../clients/PluginEnabledAiClient'
import { createContext, PluginManager } from '../'
import { createContext, PluginManager } from '..'
import { ContentFilterPlugin, LoggingPlugin } from './example-plugins'
/**

View File

@ -1,4 +1,4 @@
import type { LanguageModelV1Middleware, TextStreamPart, ToolSet } from 'ai'
import type { TextStreamPart, ToolSet } from 'ai'
import { AiPlugin, AiRequestContext } from './types'
@ -135,13 +135,6 @@ export class PluginManager {
>
}
/**
* AI SDK
*/
collectAiSdkMiddlewares(): LanguageModelV1Middleware[] {
return this.plugins.flatMap((plugin) => plugin.aiSdkMiddlewares || [])
}
/**
*
*/

View File

@ -1,4 +1,4 @@
import type { LanguageModelV1Middleware, TextStreamPart, ToolSet } from 'ai'
import type { TextStreamPart, ToolSet } from 'ai'
/**
*
@ -62,7 +62,7 @@ export interface AiPlugin {
}) => TransformStream<TextStreamPart<TOOLS>, TextStreamPart<TOOLS>>
// AI SDK 原生中间件
aiSdkMiddlewares?: LanguageModelV1Middleware[]
// aiSdkMiddlewares?: LanguageModelV1Middleware[]
}
/**

View File

@ -4,50 +4,57 @@
*/
// 导入内部使用的类和函数
import { ApiClientFactory } from './clients/ApiClientFactory'
import { createClient } from './clients/PluginEnabledAiClient'
import { type ProviderSettingsMap } from './clients/types'
import { createUniversalClient } from './clients/UniversalAiSdkClient'
import { aiProviderRegistry, isProviderSupported } from './providers/registry'
import { createClient } from './core/clients/PluginEnabledAiClient'
import {
getProviderInfo as factoryGetProviderInfo,
getSupportedProviders as factoryGetSupportedProviders
} from './core/creation'
import { AiExecutor } from './core/execution/AiExecutor'
import { aiProviderRegistry, isProviderSupported } from './core/providers/registry'
import { type ProviderSettingsMap } from './types'
// ==================== 主要客户端接口 ====================
// 默认使用集成插件系统的客户端
// ==================== 主要用户接口 ====================
// orchestration层 - 面向用户的主要API
export {
AiExecutor,
generateObject,
generateText,
type OrchestrationConfig,
streamObject,
streamText
} from './orchestration'
// 为了向后兼容保留AiClient别名内部使用PluginEnabledAiClient
export {
PluginEnabledAiClient as AiClient,
createClient,
createCompatibleClient
} from './clients/PluginEnabledAiClient'
// 为了向后兼容,也导出原名称
export { PluginEnabledAiClient } from './clients/PluginEnabledAiClient'
} from './core/clients/PluginEnabledAiClient'
// ==================== 插件系统 ====================
export type { AiPlugin, AiRequestContext, HookResult, HookType, PluginManagerConfig } from './plugins'
export { createContext, definePlugin, PluginManager } from './plugins'
// ==================== 底层客户端(高级用法) ====================
// 不带插件系统的基础客户端,用于需要绕过插件系统的场景
export {
createOpenAICompatibleClient as createBasicOpenAICompatibleClient,
createUniversalClient,
UniversalAiSdkClient
} from './clients/UniversalAiSdkClient'
export type { AiPlugin, AiRequestContext, HookResult, HookType, PluginManagerConfig } from './core/plugins'
export { createContext, definePlugin, PluginManager } from './core/plugins'
// ==================== 低级 API ====================
export { ApiClientFactory } from './clients/ApiClientFactory'
export { aiProviderRegistry } from './providers/registry'
export {
createBaseModel as createApiClient,
createImageModel,
getProviderInfo as getClientInfo,
getSupportedProviders,
ProviderCreationError
} from './core/creation'
export { aiProviderRegistry } from './core/providers/registry'
// ==================== 类型定义 ====================
export type { ClientFactoryError } from './clients/ApiClientFactory'
export type { ProviderConfig } from './core/providers/registry'
export type { ProviderError } from './core/providers/types'
export type {
GenerateObjectParams,
GenerateTextParams,
ProviderSettings,
StreamObjectParams,
StreamTextParams
} from './clients/types'
export type { ProviderConfig } from './providers/registry'
export type { ProviderError } from './providers/types'
} from './types'
export * as aiSdk from 'ai'
// ==================== AI SDK 常用类型导出 ====================
@ -110,7 +117,7 @@ export type {
VercelProviderSettings,
XaiProviderSettings,
ZhipuProviderSettings
} from './clients/types'
} from './types'
// ==================== 选项 ====================
export {
@ -121,11 +128,10 @@ export {
mergeProviderOptions,
type ProviderOptionsMap,
type TypedProviderOptions
} from './options'
} from './core/options'
// ==================== 工具函数 ====================
export { createClient as createApiClient, getClientInfo, getSupportedProviders } from './clients/ApiClientFactory'
export { getAllProviders, getProvider, isProviderSupported, registerProvider } from './providers/registry'
export { getAllProviders, getProvider, isProviderSupported, registerProvider } from './core/providers/registry'
// ==================== Provider 配置工厂 ====================
export {
@ -134,7 +140,7 @@ export {
type ProviderConfigBuilder,
providerConfigBuilder,
ProviderConfigFactory
} from './providers/factory'
} from './core/providers/factory'
// ==================== 包信息 ====================
export const AI_CORE_VERSION = '1.0.0'
@ -146,19 +152,19 @@ export const AiCore = {
version: AI_CORE_VERSION,
name: AI_CORE_NAME,
// 创建主要客户端(默认带插件系统
create(providerId: string, options: any = {}, plugins: any[] = []) {
return createClient(providerId, options, plugins)
// 创建主要执行器(推荐使用
create(providerId: string, plugins: any[] = []) {
return AiExecutor.create(providerId, plugins)
},
// 创建基础客户端(不带插件系统
createBasic(providerId: string, options: any = {}) {
return createUniversalClient(providerId, options)
// 创建底层客户端(高级用法
createClient(providerId: string, plugins: any[] = []) {
return createClient(providerId, plugins)
},
// 获取支持的providers
getSupportedProviders() {
return ApiClientFactory.getSupportedProviders()
return factoryGetSupportedProviders()
},
// 检查provider支持
@ -168,24 +174,42 @@ export const AiCore = {
// 获取客户端信息
getClientInfo(providerId: string) {
return ApiClientFactory.getClientInfo(providerId)
return factoryGetProviderInfo(providerId)
}
}
// 推荐使用的执行器创建函数
export const createOpenAIExecutor = (options: ProviderSettingsMap['openai'], plugins?: any[]) => {
return AiExecutor.create('openai', plugins)
}
export const createAnthropicExecutor = (options: ProviderSettingsMap['anthropic'], plugins?: any[]) => {
return AiExecutor.create('anthropic', plugins)
}
export const createGoogleExecutor = (options: ProviderSettingsMap['google'], plugins?: any[]) => {
return AiExecutor.create('google', plugins)
}
export const createXAIExecutor = (options: ProviderSettingsMap['xai'], plugins?: any[]) => {
return AiExecutor.create('xai', plugins)
}
// 向后兼容的客户端创建函数
export const createOpenAIClient = (options: ProviderSettingsMap['openai'], plugins?: any[]) => {
return createClient('openai', options, plugins)
return createClient('openai', plugins)
}
export const createAnthropicClient = (options: ProviderSettingsMap['anthropic'], plugins?: any[]) => {
return createClient('anthropic', options, plugins)
return createClient('anthropic', plugins)
}
export const createGoogleClient = (options: ProviderSettingsMap['google'], plugins?: any[]) => {
return createClient('google', options, plugins)
return createClient('google', plugins)
}
export const createXAIClient = (options: ProviderSettingsMap['xai'], plugins?: any[]) => {
return createClient('xai', options, plugins)
return createClient('xai', plugins)
}
// ==================== 调试和开发工具 ====================

View File

@ -0,0 +1,148 @@
/**
* AI
* - APIcreation和execution层
*/
import {
generateObject as aiGenerateObject,
generateText as aiGenerateText,
streamObject as aiStreamObject,
streamText as aiStreamText
} from 'ai'
import { createModelFromConfig, resolveConfig } from '../core/creation'
import { AiExecutor } from '../core/execution/AiExecutor'
import {
type GenerateObjectParams,
type GenerateTextParams,
type StreamObjectParams,
type StreamTextParams
} from '../types'
import { type OrchestrationConfig } from './types'
/**
*
* creation层获取model
*/
// export async function streamText<T extends ProviderId>(
// modelId: string,
// params: StreamTextParams,
// config: OrchestrationConfig<T>
// ): Promise<ReturnType<typeof aiStreamText>>
export async function streamText(
modelId: string,
params: StreamTextParams,
config: OrchestrationConfig
): Promise<ReturnType<typeof aiStreamText>> {
// 外部 registry 方式:直接使用用户提供的 model
// if ('model' in configOrParams) {
// return aiStreamText(configOrParams)
// }
// 1. 使用 creation 层解析配置并创建 model
const resolvedConfig = resolveConfig(
config.providerId,
modelId!,
config.options,
config.plugins || [],
config.middlewares || [] // middlewares
)
const model = await createModelFromConfig(resolvedConfig)
// const providerOptions = extractProviderOptions(resolvedConfig)
// 2. 创建执行器并传入 model
const executor = AiExecutor.create(config.providerId, config.plugins)
return executor.streamText(model, params!)
}
/**
*
* creation层获取model
*/
export async function generateText(
modelId: string,
params: GenerateTextParams,
config: OrchestrationConfig
): Promise<ReturnType<typeof aiGenerateText>> {
// 外部 registry 方式:直接使用用户提供的 model
// if ('model' in configOrParams) {
// return aiGenerateText(configOrParams)
// }
// 编排方式1. creation层获取model 2. execution层执行
// 1. 使用 creation 层解析配置并创建 model
const resolvedConfig = resolveConfig(
config.providerId,
modelId!,
config.options,
config.plugins || [],
config.middlewares || [] // middlewares
)
const model = await createModelFromConfig(resolvedConfig)
// const providerOptions = extractProviderOptions(resolvedConfig)
// 2. 创建执行器并传入 model
const executor = AiExecutor.create(config.providerId, config.plugins)
return executor.generateText(model, params!)
}
/**
*
* creation层获取model
*/
export async function generateObject(
modelId: string,
params: GenerateObjectParams,
config: OrchestrationConfig
): Promise<ReturnType<typeof aiGenerateObject>> {
// 外部 registry 方式:直接使用用户提供的 model
// if ('model' in configOrParams) {
// return aiGenerateObject(configOrParams)
// }
// 编排方式1. creation层获取model 2. execution层执行
// 1. 使用 creation 层解析配置并创建 model
const resolvedConfig = resolveConfig(
config.providerId,
modelId!,
config.options,
config.plugins || [],
config.middlewares || [] // middlewares
)
const model = await createModelFromConfig(resolvedConfig)
// const providerOptions = extractProviderOptions(resolvedConfig)
// 2. 创建执行器并传入 model
const executor = AiExecutor.create(config.providerId, config.plugins)
return executor.generateObject(model, params!)
}
/**
*
* creation层获取model
*/
export async function streamObject(
modelId: string,
params: StreamObjectParams,
config: OrchestrationConfig
): Promise<ReturnType<typeof aiStreamObject>> {
// 外部 registry 方式:直接使用用户提供的 model
// if ('model' in configOrParams) {
// return aiStreamObject(configOrParams)
// }
// 编排方式1. creation层获取model 2. execution层执行
// 1. 使用 creation 层解析配置并创建 model
const resolvedConfig = resolveConfig(
config.providerId,
modelId!,
config.options,
config.plugins || [],
config.middlewares || [] // middlewares
)
const model = await createModelFromConfig(resolvedConfig)
// const providerOptions = extractProviderOptions(resolvedConfig)
// 2. 创建执行器并传入 model
const executor = AiExecutor.create(config.providerId, config.plugins)
return executor.streamObject(model, params!)
}

View File

@ -0,0 +1,13 @@
/**
*
*
*/
// 主要编排函数
export { generateObject, generateText, streamObject, streamText } from './api'
// 类型定义
export type { OrchestrationConfig } from './types'
// 为了向后兼容重新导出AiExecutor
export { AiExecutor } from '../core/execution/AiExecutor'

View File

@ -0,0 +1,26 @@
/**
* Orchestration
*
*/
import { LanguageModelV1Middleware } from 'ai'
import { type AiPlugin } from '../core/plugins'
import { type ProviderId, type ProviderSettingsMap } from '../types'
/**
*
*/
export interface OrchestrationConfig<T extends ProviderId = ProviderId> {
providerId: T
options: ProviderSettingsMap[T]
plugins?: AiPlugin[]
middlewares?: LanguageModelV1Middleware[]
}
/**
*
*/
export interface OrchestrationOptions {
// 未来可以添加编排级别的选项
// 比如:重试机制、超时设置、日志级别等
}

View File

@ -1,6 +1,6 @@
import { generateObject, generateText, streamObject, streamText } from 'ai'
import type { ProviderSettingsMap } from '../providers/registry'
import type { ProviderSettingsMap } from './core/providers/registry'
// ProviderSettings 是所有 Provider Settings 的联合类型
export type ProviderSettings = ProviderSettingsMap[keyof ProviderSettingsMap]
@ -39,4 +39,4 @@ export type {
VercelProviderSettings,
XaiProviderSettings,
ZhipuProviderSettings
} from '../providers/registry'
} from './core/providers/registry'

View File

@ -18,7 +18,7 @@ import {
isWebSearchModel
} from '@renderer/config/models'
import { getAssistantSettings, getDefaultModel } from '@renderer/services/AssistantService'
import type { Assistant, MCPTool, MCPToolResponse, Message, Model } from '@renderer/types'
import type { Assistant, MCPTool, MCPToolInputSchema, MCPToolResponse, Message, Model } from '@renderer/types'
import { FileTypes } from '@renderer/types'
import { callMCPTool } from '@renderer/utils/mcp-tools'
import { findFileBlocks, findImageBlocks, getMainTextContent } from '@renderer/utils/messageUtils/find'
@ -221,11 +221,11 @@ export async function buildStreamTextParams(
(isSupportedDisableGenerationModel(model) ? assistant.enableGenerateImage || false : true)
// 构建系统提示
const systemPrompt = assistant.prompt || ''
let systemPrompt = assistant.prompt || ''
// TODO:根据调用类型判断是否添加systemPrompt
// if (mcpTools && mcpTools.length > 0) {
// systemPrompt = await buildSystemPromptWithTools(systemPrompt, mcpTools, assistant)
// }
if (mcpTools && mcpTools.length > 0 && assistant.settings?.toolUseMode === 'prompt') {
systemPrompt = await buildSystemPromptWithTools(systemPrompt, mcpTools, assistant)
}
// 构建真正的 providerOptions
const providerOptions = buildProviderOptions(assistant, model, {
@ -275,104 +275,28 @@ export async function buildGenerateTextParams(
}
/**
* JSON Schema Zod Schema
* string, number, boolean, array, object
* MCPToolInputSchema JSONSchema7
*/
// function jsonSchemaToZod(schema: MCPToolInputSchema): z.ZodObject<any> {
// const properties: Record<string, z.ZodTypeAny> = {}
// const required = schema.required || []
function convertMcpSchemaToJsonSchema7(schema: MCPToolInputSchema): any {
// 创建符合 JSONSchema7 的对象
const jsonSchema7: Record<string, any> = {
type: 'object',
properties: schema.properties || {},
required: schema.required || []
}
// // 处理每个属性
// for (const [key, propSchema] of Object.entries(schema.properties)) {
// let zodSchema: z.ZodTypeAny
// 如果有 description添加它
if (schema.description) {
jsonSchema7.description = schema.description
}
// // 根据 JSON Schema 类型创建对应的 Zod Schema
// const schemaType = (propSchema as any).type
// switch (schemaType) {
// case 'string': {
// let stringSchema = z.string()
// if ((propSchema as any).description) {
// stringSchema = stringSchema.describe((propSchema as any).description)
// }
// if ((propSchema as any).enum) {
// zodSchema = z.enum((propSchema as any).enum)
// } else {
// zodSchema = stringSchema
// }
// break
// }
// 如果有 title添加它
if (schema.title) {
jsonSchema7.title = schema.title
}
// case 'number':
// case 'integer': {
// let numberSchema = z.number()
// if (schemaType === 'integer') {
// numberSchema = numberSchema.int()
// }
// if ((propSchema as any).minimum !== undefined) {
// numberSchema = numberSchema.min((propSchema as any).minimum)
// }
// if ((propSchema as any).maximum !== undefined) {
// numberSchema = numberSchema.max((propSchema as any).maximum)
// }
// if ((propSchema as any).description) {
// numberSchema = numberSchema.describe((propSchema as any).description)
// }
// zodSchema = numberSchema
// break
// }
// case 'boolean': {
// let booleanSchema = z.boolean()
// if ((propSchema as any).description) {
// booleanSchema = booleanSchema.describe((propSchema as any).description)
// }
// zodSchema = booleanSchema
// break
// }
// case 'array': {
// let itemSchema: z.ZodTypeAny = z.any()
// const itemsType = (propSchema as any).items?.type
// if (itemsType === 'string') {
// itemSchema = z.string()
// } else if (itemsType === 'number') {
// itemSchema = z.number()
// }
// let arraySchema = z.array(itemSchema)
// if ((propSchema as any).description) {
// arraySchema = arraySchema.describe((propSchema as any).description)
// }
// zodSchema = arraySchema
// break
// }
// case 'object': {
// // 对于嵌套对象,简单处理为 z.record
// let objectSchema = z.record(z.any())
// if ((propSchema as any).description) {
// objectSchema = objectSchema.describe((propSchema as any).description)
// }
// zodSchema = objectSchema
// break
// }
// default: {
// // 默认为 any
// zodSchema = z.any()
// break
// }
// }
// // 如果不是必需字段,添加 optional()
// if (!required.includes(key)) {
// zodSchema = zodSchema.optional()
// }
// properties[key] = zodSchema
// }
// return z.object(properties)
// }
return jsonSchema7
}
/**
* MCPTool AI SDK
@ -384,7 +308,7 @@ export function convertMcpToolsToAiSdkTools(mcpTools: MCPTool[]): Record<string,
console.log('mcpTool', mcpTool.inputSchema)
tools[mcpTool.name] = tool({
description: mcpTool.description || `Tool from ${mcpTool.serverName}`,
parameters: jsonSchema<Record<string, object>>(mcpTool.inputSchema),
parameters: jsonSchema<Record<string, object>>(convertMcpSchemaToJsonSchema7(mcpTool.inputSchema)),
execute: async (params) => {
console.log('execute_params', params)
// 创建适配的 MCPToolResponse 对象