mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2026-01-05 04:19:02 +08:00
feat: enhance AI core functionality with smoothStream integration
- Added smoothStream to the middleware exports in index.ts for improved streaming capabilities. - Updated PluginEnabledAiClient to conditionally apply middlewares, removing the default simulateStreamingMiddleware. - Modified ModernAiProvider to utilize smoothStream in streamText, enhancing text processing with configurable chunking and delay options.
This commit is contained in:
parent
3771b24b52
commit
9318d9ffeb
@ -19,14 +19,7 @@
|
|||||||
* })
|
* })
|
||||||
* ```
|
* ```
|
||||||
*/
|
*/
|
||||||
import {
|
import { generateObject, generateText, LanguageModelV1Middleware, streamObject, streamText } from 'ai'
|
||||||
generateObject,
|
|
||||||
generateText,
|
|
||||||
LanguageModelV1Middleware,
|
|
||||||
simulateStreamingMiddleware,
|
|
||||||
streamObject,
|
|
||||||
streamText
|
|
||||||
} from 'ai'
|
|
||||||
|
|
||||||
import { AiPlugin, createContext, PluginManager } from '../plugins'
|
import { AiPlugin, createContext, PluginManager } from '../plugins'
|
||||||
import { isProviderSupported } from '../providers/registry'
|
import { isProviderSupported } from '../providers/registry'
|
||||||
@ -196,7 +189,7 @@ export class PluginEnabledAiClient<T extends ProviderId = ProviderId> {
|
|||||||
this.providerId,
|
this.providerId,
|
||||||
modelId,
|
modelId,
|
||||||
this.options,
|
this.options,
|
||||||
middlewares.length > 0 ? middlewares : [simulateStreamingMiddleware()] //TODO: 这里需要改成非流时调用simulateStreamingMiddleware(),这里先随便传一个
|
middlewares.length > 0 ? middlewares : undefined
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -79,7 +79,7 @@ export type {
|
|||||||
ToolExecutionError,
|
ToolExecutionError,
|
||||||
ToolResult
|
ToolResult
|
||||||
} from 'ai'
|
} from 'ai'
|
||||||
export { defaultSettingsMiddleware, extractReasoningMiddleware, simulateStreamingMiddleware } from 'ai'
|
export { defaultSettingsMiddleware, extractReasoningMiddleware, simulateStreamingMiddleware, smoothStream } from 'ai'
|
||||||
|
|
||||||
// 重新导出所有 Provider Settings 类型
|
// 重新导出所有 Provider Settings 类型
|
||||||
export type {
|
export type {
|
||||||
|
|||||||
@ -14,6 +14,7 @@ import {
|
|||||||
createClient,
|
createClient,
|
||||||
type OpenAICompatibleProviderSettings,
|
type OpenAICompatibleProviderSettings,
|
||||||
type ProviderId,
|
type ProviderId,
|
||||||
|
smoothStream,
|
||||||
StreamTextParams
|
StreamTextParams
|
||||||
} from '@cherrystudio/ai-core'
|
} from '@cherrystudio/ai-core'
|
||||||
import { isDedicatedImageGenerationModel } from '@renderer/config/models'
|
import { isDedicatedImageGenerationModel } from '@renderer/config/models'
|
||||||
@ -176,7 +177,13 @@ export default class ModernAiProvider {
|
|||||||
if (middlewareConfig.onChunk) {
|
if (middlewareConfig.onChunk) {
|
||||||
// 流式处理 - 使用适配器
|
// 流式处理 - 使用适配器
|
||||||
const adapter = new AiSdkToChunkAdapter(middlewareConfig.onChunk)
|
const adapter = new AiSdkToChunkAdapter(middlewareConfig.onChunk)
|
||||||
const streamResult = await clientWithMiddlewares.streamText(modelId, params)
|
const streamResult = await clientWithMiddlewares.streamText(modelId, {
|
||||||
|
...params,
|
||||||
|
experimental_transform: smoothStream({
|
||||||
|
delayInMs: 100,
|
||||||
|
chunking: 'word'
|
||||||
|
})
|
||||||
|
})
|
||||||
const finalText = await adapter.processStream(streamResult)
|
const finalText = await adapter.processStream(streamResult)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user