feat: enhance provider settings and model configuration

- Updated `ModelConfig` to include a `mode` property for better differentiation between 'chat' and 'responses'.
- Modified `createBaseModel` to conditionally set the provider based on the new `mode` property in `providerSettings`.
- Refactored `RuntimeExecutor` to utilize the updated `ModelConfig` for improved type safety and clarity in provider settings.
- Adjusted imports in `executor.ts` and `types.ts` to align with the new model configuration structure.
This commit is contained in:
lizhixuan 2025-07-12 11:31:06 +08:00
parent 3ab904e789
commit 16e0154200
5 changed files with 19 additions and 10 deletions

View File

@ -91,7 +91,11 @@ export async function createBaseModel({
// 加一个特判
if (providerConfig.id === 'openai') {
if (!isOpenAIChatCompletionOnlyModel(modelId)) {
if (
'mode' in providerSettings &&
providerSettings.mode === 'response' &&
!isOpenAIChatCompletionOnlyModel(modelId)
) {
provider = provider.responses
} else {
provider = provider.chat

View File

@ -8,7 +8,8 @@ import type { ProviderId, ProviderSettingsMap } from '../../types'
export interface ModelConfig<T extends ProviderId = ProviderId> {
providerId: T
modelId: string
providerSettings: ProviderSettingsMap[T]
providerSettings: ProviderSettingsMap[T] & { mode: 'chat' | 'responses' }
middlewares?: LanguageModelV2Middleware[]
// 额外模型参数
extraModelConfig?: Record<string, any>
}

View File

@ -5,8 +5,9 @@
import { LanguageModelV2, LanguageModelV2Middleware } from '@ai-sdk/provider'
import { generateObject, generateText, LanguageModel, streamObject, streamText } from 'ai'
import { type ProviderId, type ProviderSettingsMap } from '../../types'
import { type ProviderId } from '../../types'
import { createModel, getProviderInfo } from '../models'
import { type ModelConfig } from '../models/types'
import { type AiPlugin, type AiRequestContext, definePlugin } from '../plugins'
import { PluginEngine } from './pluginEngine'
import { type RuntimeConfig } from './types'
@ -256,7 +257,7 @@ export class RuntimeExecutor<T extends ProviderId = ProviderId> {
*/
static create<T extends ProviderId>(
providerId: T,
options: ProviderSettingsMap[T],
options: ModelConfig<T>['providerSettings'],
plugins?: AiPlugin[]
): RuntimeExecutor<T> {
return new RuntimeExecutor({
@ -270,7 +271,7 @@ export class RuntimeExecutor<T extends ProviderId = ProviderId> {
* OpenAI Compatible执行器
*/
static createOpenAICompatible(
options: ProviderSettingsMap['openai-compatible'],
options: ModelConfig<'openai-compatible'>['providerSettings'],
plugins: AiPlugin[] = []
): RuntimeExecutor<'openai-compatible'> {
return new RuntimeExecutor({

View File

@ -1,7 +1,8 @@
/**
* Runtime
*/
import { type ProviderId, type ProviderSettingsMap } from '../../types'
import { type ProviderId } from '../../types'
import { type ModelConfig } from '../models'
import { type AiPlugin } from '../plugins'
/**
@ -9,7 +10,7 @@ import { type AiPlugin } from '../plugins'
*/
export interface RuntimeConfig<T extends ProviderId = ProviderId> {
providerId: T
providerSettings: ProviderSettingsMap[T]
providerSettings: ModelConfig<T>['providerSettings']
plugins?: AiPlugin[]
}

View File

@ -71,14 +71,16 @@ function providerToAiSdkConfig(actualProvider: Provider): {
const openaiResponseOptions =
actualProviderId === 'openai'
? {
compatibility: 'strict'
mode: 'response'
}
: aiSdkProviderId === 'openai'
? {
compatibility: 'compatible'
mode: 'chat'
}
: undefined
console.log('openaiResponseOptions', openaiResponseOptions)
console.log('actualProvider', actualProvider)
console.log('aiSdkProviderId', aiSdkProviderId)
if (AiCore.isSupported(aiSdkProviderId) && aiSdkProviderId !== 'openai-compatible') {
const options = ProviderConfigFactory.fromProvider(
aiSdkProviderId,