From 16e015420077dd4136daefe63d7df2fc5ef11cab Mon Sep 17 00:00:00 2001 From: lizhixuan Date: Sat, 12 Jul 2025 11:31:06 +0800 Subject: [PATCH] feat: enhance provider settings and model configuration - Updated `ModelConfig` to include a `mode` property for better differentiation between 'chat' and 'responses'. - Modified `createBaseModel` to conditionally set the provider based on the new `mode` property in `providerSettings`. - Refactored `RuntimeExecutor` to utilize the updated `ModelConfig` for improved type safety and clarity in provider settings. - Adjusted imports in `executor.ts` and `types.ts` to align with the new model configuration structure. --- packages/aiCore/src/core/models/ProviderCreator.ts | 6 +++++- packages/aiCore/src/core/models/types.ts | 3 ++- packages/aiCore/src/core/runtime/executor.ts | 7 ++++--- packages/aiCore/src/core/runtime/types.ts | 5 +++-- src/renderer/src/aiCore/index_new.ts | 8 +++++--- 5 files changed, 19 insertions(+), 10 deletions(-) diff --git a/packages/aiCore/src/core/models/ProviderCreator.ts b/packages/aiCore/src/core/models/ProviderCreator.ts index 6fc37b432f..811c5388e9 100644 --- a/packages/aiCore/src/core/models/ProviderCreator.ts +++ b/packages/aiCore/src/core/models/ProviderCreator.ts @@ -91,7 +91,11 @@ export async function createBaseModel({ // 加一个特判 if (providerConfig.id === 'openai') { - if (!isOpenAIChatCompletionOnlyModel(modelId)) { + if ( + 'mode' in providerSettings && + providerSettings.mode === 'response' && + !isOpenAIChatCompletionOnlyModel(modelId) + ) { provider = provider.responses } else { provider = provider.chat diff --git a/packages/aiCore/src/core/models/types.ts b/packages/aiCore/src/core/models/types.ts index 47623a6359..c29e459557 100644 --- a/packages/aiCore/src/core/models/types.ts +++ b/packages/aiCore/src/core/models/types.ts @@ -8,7 +8,8 @@ import type { ProviderId, ProviderSettingsMap } from '../../types' export interface ModelConfig { providerId: T modelId: string - providerSettings: ProviderSettingsMap[T] + providerSettings: ProviderSettingsMap[T] & { mode: 'chat' | 'responses' } middlewares?: LanguageModelV2Middleware[] + // 额外模型参数 extraModelConfig?: Record } diff --git a/packages/aiCore/src/core/runtime/executor.ts b/packages/aiCore/src/core/runtime/executor.ts index 46ca3af2b5..a31e9e60b9 100644 --- a/packages/aiCore/src/core/runtime/executor.ts +++ b/packages/aiCore/src/core/runtime/executor.ts @@ -5,8 +5,9 @@ import { LanguageModelV2, LanguageModelV2Middleware } from '@ai-sdk/provider' import { generateObject, generateText, LanguageModel, streamObject, streamText } from 'ai' -import { type ProviderId, type ProviderSettingsMap } from '../../types' +import { type ProviderId } from '../../types' import { createModel, getProviderInfo } from '../models' +import { type ModelConfig } from '../models/types' import { type AiPlugin, type AiRequestContext, definePlugin } from '../plugins' import { PluginEngine } from './pluginEngine' import { type RuntimeConfig } from './types' @@ -256,7 +257,7 @@ export class RuntimeExecutor { */ static create( providerId: T, - options: ProviderSettingsMap[T], + options: ModelConfig['providerSettings'], plugins?: AiPlugin[] ): RuntimeExecutor { return new RuntimeExecutor({ @@ -270,7 +271,7 @@ export class RuntimeExecutor { * 创建OpenAI Compatible执行器 */ static createOpenAICompatible( - options: ProviderSettingsMap['openai-compatible'], + options: ModelConfig<'openai-compatible'>['providerSettings'], plugins: AiPlugin[] = [] ): RuntimeExecutor<'openai-compatible'> { return new RuntimeExecutor({ diff --git a/packages/aiCore/src/core/runtime/types.ts b/packages/aiCore/src/core/runtime/types.ts index c5cb6cc7ef..83bdd9ecdd 100644 --- a/packages/aiCore/src/core/runtime/types.ts +++ b/packages/aiCore/src/core/runtime/types.ts @@ -1,7 +1,8 @@ /** * Runtime 层类型定义 */ -import { type ProviderId, type ProviderSettingsMap } from '../../types' +import { type ProviderId } from '../../types' +import { type ModelConfig } from '../models' import { type AiPlugin } from '../plugins' /** @@ -9,7 +10,7 @@ import { type AiPlugin } from '../plugins' */ export interface RuntimeConfig { providerId: T - providerSettings: ProviderSettingsMap[T] + providerSettings: ModelConfig['providerSettings'] plugins?: AiPlugin[] } diff --git a/src/renderer/src/aiCore/index_new.ts b/src/renderer/src/aiCore/index_new.ts index 3a586e2fae..597dcd9835 100644 --- a/src/renderer/src/aiCore/index_new.ts +++ b/src/renderer/src/aiCore/index_new.ts @@ -71,14 +71,16 @@ function providerToAiSdkConfig(actualProvider: Provider): { const openaiResponseOptions = actualProviderId === 'openai' ? { - compatibility: 'strict' + mode: 'response' } : aiSdkProviderId === 'openai' ? { - compatibility: 'compatible' + mode: 'chat' } : undefined - + console.log('openaiResponseOptions', openaiResponseOptions) + console.log('actualProvider', actualProvider) + console.log('aiSdkProviderId', aiSdkProviderId) if (AiCore.isSupported(aiSdkProviderId) && aiSdkProviderId !== 'openai-compatible') { const options = ProviderConfigFactory.fromProvider( aiSdkProviderId,