feat: enhance AI core functionality with smoothStream integration

- Added smoothStream to the middleware exports in index.ts for improved streaming capabilities.
- Updated PluginEnabledAiClient to conditionally apply middlewares, removing the default simulateStreamingMiddleware.
- Modified ModernAiProvider to utilize smoothStream in streamText, enhancing text processing with configurable chunking and delay options.
This commit is contained in:
suyao 2025-06-20 15:44:11 +08:00 committed by MyPrototypeWhat
parent 3771b24b52
commit 9318d9ffeb
3 changed files with 11 additions and 11 deletions

View File

@ -19,14 +19,7 @@
* })
* ```
*/
import {
generateObject,
generateText,
LanguageModelV1Middleware,
simulateStreamingMiddleware,
streamObject,
streamText
} from 'ai'
import { generateObject, generateText, LanguageModelV1Middleware, streamObject, streamText } from 'ai'
import { AiPlugin, createContext, PluginManager } from '../plugins'
import { isProviderSupported } from '../providers/registry'
@ -196,7 +189,7 @@ export class PluginEnabledAiClient<T extends ProviderId = ProviderId> {
this.providerId,
modelId,
this.options,
middlewares.length > 0 ? middlewares : [simulateStreamingMiddleware()] //TODO: 这里需要改成非流时调用simulateStreamingMiddleware(),这里先随便传一个
middlewares.length > 0 ? middlewares : undefined
)
}

View File

@ -79,7 +79,7 @@ export type {
ToolExecutionError,
ToolResult
} from 'ai'
export { defaultSettingsMiddleware, extractReasoningMiddleware, simulateStreamingMiddleware } from 'ai'
export { defaultSettingsMiddleware, extractReasoningMiddleware, simulateStreamingMiddleware, smoothStream } from 'ai'
// 重新导出所有 Provider Settings 类型
export type {

View File

@ -14,6 +14,7 @@ import {
createClient,
type OpenAICompatibleProviderSettings,
type ProviderId,
smoothStream,
StreamTextParams
} from '@cherrystudio/ai-core'
import { isDedicatedImageGenerationModel } from '@renderer/config/models'
@ -176,7 +177,13 @@ export default class ModernAiProvider {
if (middlewareConfig.onChunk) {
// 流式处理 - 使用适配器
const adapter = new AiSdkToChunkAdapter(middlewareConfig.onChunk)
const streamResult = await clientWithMiddlewares.streamText(modelId, params)
const streamResult = await clientWithMiddlewares.streamText(modelId, {
...params,
experimental_transform: smoothStream({
delayInMs: 100,
chunking: 'word'
})
})
const finalText = await adapter.processStream(streamResult)
return {