diff --git a/src/renderer/src/aiCore/clients/openai/OpenAIApiClient.ts b/src/renderer/src/aiCore/clients/openai/OpenAIApiClient.ts index 46e01d06d6..617637a7e1 100644 --- a/src/renderer/src/aiCore/clients/openai/OpenAIApiClient.ts +++ b/src/renderer/src/aiCore/clients/openai/OpenAIApiClient.ts @@ -27,7 +27,7 @@ import { import { isSupportArrayContentProvider, isSupportDeveloperRoleProvider, - isSupportQwen3EnableThinkingProvider, + isSupportEnableThinkingProvider, isSupportStreamOptionsProvider } from '@renderer/config/providers' import { processPostsuffixQwen3Model, processReqMessages } from '@renderer/services/ModelMessageService' @@ -151,7 +151,10 @@ export class OpenAIAPIClient extends OpenAIBaseClient< return { reasoning: { enabled: false, exclude: true } } } - if (isSupportedThinkingTokenQwenModel(model) || isSupportedThinkingTokenHunyuanModel(model)) { + if ( + isSupportEnableThinkingProvider(this.provider) && + (isSupportedThinkingTokenQwenModel(model) || isSupportedThinkingTokenHunyuanModel(model)) + ) { return { enable_thinking: false } } @@ -201,7 +204,8 @@ export class OpenAIAPIClient extends OpenAIBaseClient< // Qwen models if (isQwenReasoningModel(model)) { const thinkConfig = { - enable_thinking: isQwenAlwaysThinkModel(model) ? undefined : true, + enable_thinking: + isQwenAlwaysThinkModel(model) || !isSupportEnableThinkingProvider(this.provider) ? undefined : true, thinking_budget: budgetTokens } if (this.provider.id === 'dashscope') { @@ -214,7 +218,7 @@ export class OpenAIAPIClient extends OpenAIBaseClient< } // Hunyuan models - if (isSupportedThinkingTokenHunyuanModel(model)) { + if (isSupportedThinkingTokenHunyuanModel(model) && isSupportEnableThinkingProvider(this.provider)) { return { enable_thinking: true } @@ -544,7 +548,7 @@ export class OpenAIAPIClient extends OpenAIBaseClient< if ( lastUserMsg && isSupportedThinkingTokenQwenModel(model) && - !isSupportQwen3EnableThinkingProvider(this.provider) + !isSupportEnableThinkingProvider(this.provider) ) { const postsuffix = '/no_think' const qwenThinkModeEnabled = assistant.settings?.qwenThinkMode === true diff --git a/src/renderer/src/config/providers.ts b/src/renderer/src/config/providers.ts index 564b4663da..1a26e2c882 100644 --- a/src/renderer/src/config/providers.ts +++ b/src/renderer/src/config/providers.ts @@ -52,7 +52,7 @@ import VoyageAIProviderLogo from '@renderer/assets/images/providers/voyageai.png import XirangProviderLogo from '@renderer/assets/images/providers/xirang.png' import ZeroOneProviderLogo from '@renderer/assets/images/providers/zero-one.png' import ZhipuProviderLogo from '@renderer/assets/images/providers/zhipu.png' -import { OpenAIServiceTiers, Provider, SystemProvider, SystemProviderId } from '@renderer/types' +import { AtLeast, OpenAIServiceTiers, Provider, SystemProvider, SystemProviderId } from '@renderer/types' import { TOKENFLUX_HOST } from './constant' import { SYSTEM_MODELS } from './models' @@ -593,7 +593,7 @@ export const SYSTEM_PROVIDERS_CONFIG: Record = export const SYSTEM_PROVIDERS: SystemProvider[] = Object.values(SYSTEM_PROVIDERS_CONFIG) -const PROVIDER_LOGO_MAP = { +const PROVIDER_LOGO_MAP: AtLeast = { ph8: Ph8ProviderLogo, '302ai': Ai302ProviderLogo, openai: OpenAiProviderLogo, @@ -656,8 +656,8 @@ export function getProviderLogo(providerId: string) { } // export const SUPPORTED_REANK_PROVIDERS = ['silicon', 'jina', 'voyageai', 'dashscope', 'aihubmix'] -export const NOT_SUPPORTED_REANK_PROVIDERS = ['ollama'] -export const ONLY_SUPPORTED_DIMENSION_PROVIDERS = ['ollama', 'infini'] +export const NOT_SUPPORTED_RERANK_PROVIDERS = ['ollama', 'lmstudio'] as const satisfies SystemProviderId[] +export const ONLY_SUPPORTED_DIMENSION_PROVIDERS = ['ollama', 'infini'] as const satisfies SystemProviderId[] type ProviderUrls = { api: { @@ -1247,7 +1247,7 @@ const NOT_SUPPORT_ARRAY_CONTENT_PROVIDERS = [ */ export const isSupportArrayContentProvider = (provider: Provider) => { return ( - provider.isNotSupportArrayContent !== true && + provider.apiOptions?.isNotSupportArrayContent !== true && !NOT_SUPPORT_ARRAY_CONTENT_PROVIDERS.some((pid) => pid === provider.id) ) } @@ -1259,7 +1259,7 @@ const NOT_SUPPORT_DEVELOPER_ROLE_PROVIDERS = ['poe'] as const satisfies SystemPr */ export const isSupportDeveloperRoleProvider = (provider: Provider) => { return ( - provider.isNotSupportDeveloperRole !== true && + provider.apiOptions?.isNotSupportDeveloperRole !== true && !NOT_SUPPORT_DEVELOPER_ROLE_PROVIDERS.some((pid) => pid === provider.id) ) } @@ -1271,18 +1271,22 @@ const NOT_SUPPORT_STREAM_OPTIONS_PROVIDERS = ['mistral'] as const satisfies Syst */ export const isSupportStreamOptionsProvider = (provider: Provider) => { return ( - provider.isNotSupportStreamOptions !== true && + provider.apiOptions?.isNotSupportStreamOptions !== true && !NOT_SUPPORT_STREAM_OPTIONS_PROVIDERS.some((pid) => pid === provider.id) ) } -const SUPPORT_QWEN3_ENABLE_THINKING_PROVIDER = ['dashscope', 'modelscope'] as const satisfies SystemProviderId[] +// NOTE: 暂时不知道哪些系统提供商不支持该参数,先默认都支持。出问题的时候可以先用自定义参数顶着 +const NOT_SUPPORT_QWEN3_ENABLE_THINKING_PROVIDER = [] as const satisfies SystemProviderId[] /** - * 判断提供商是否支持使用enable_thinking参数来控制Qwen3系列模型的思考。 Only for OpenAI Chat Completions API. + * 判断提供商是否支持使用 enable_thinking 参数来控制 Qwen3 等模型的思考。 Only for OpenAI Chat Completions API. */ -export const isSupportQwen3EnableThinkingProvider = (provider: Provider) => { - return SUPPORT_QWEN3_ENABLE_THINKING_PROVIDER.some((pid) => pid === provider.id) +export const isSupportEnableThinkingProvider = (provider: Provider) => { + return ( + provider.apiOptions?.isNotSupportEnableThinking !== true && + !NOT_SUPPORT_QWEN3_ENABLE_THINKING_PROVIDER.some((pid) => pid === provider.id) + ) } const NOT_SUPPORT_SERVICE_TIER_PROVIDERS = ['github', 'copilot'] as const satisfies SystemProviderId[] @@ -1292,6 +1296,7 @@ const NOT_SUPPORT_SERVICE_TIER_PROVIDERS = ['github', 'copilot'] as const satisf */ export const isSupportServiceTierProviders = (provider: Provider) => { return ( - provider.isNotSupportServiceTier !== true || !NOT_SUPPORT_SERVICE_TIER_PROVIDERS.some((pid) => pid === provider.id) + provider.apiOptions?.isNotSupportServiceTier !== true && + !NOT_SUPPORT_SERVICE_TIER_PROVIDERS.some((pid) => pid === provider.id) ) } diff --git a/src/renderer/src/i18n/locales/en-us.json b/src/renderer/src/i18n/locales/en-us.json index f189f1d3d1..9c98ec4b8a 100644 --- a/src/renderer/src/i18n/locales/en-us.json +++ b/src/renderer/src/i18n/locales/en-us.json @@ -3160,6 +3160,10 @@ "help": "Does the provider support messages with role: \"developer\"?", "label": "Support Developer Message" }, + "enable_thinking": { + "help": "Does the provider support controlling the reasoning of models like Qwen3 via the enable_thinking parameter?", + "label": "Support enable_thinking" + }, "label": "API Settings", "service_tier": { "help": "Whether the provider supports configuring the service_tier parameter. When enabled, this parameter can be adjusted in the service tier settings on the chat page. (OpenAI models only)", diff --git a/src/renderer/src/i18n/locales/ja-jp.json b/src/renderer/src/i18n/locales/ja-jp.json index 7b6ebde903..3c76e68a4a 100644 --- a/src/renderer/src/i18n/locales/ja-jp.json +++ b/src/renderer/src/i18n/locales/ja-jp.json @@ -3160,6 +3160,10 @@ "help": "このプロバイダーは role: \"developer\" のメッセージをサポートしていますか", "label": "Developer Message をサポート" }, + "enable_thinking": { + "help": "このプロバイダーは、enable_thinking パラメータを使用して Qwen3 などのモデルの思考を制御することをサポートしていますか。", + "label": "enable_thinking をサポート" + }, "label": "API設定", "service_tier": { "help": "このプロバイダーがservice_tierパラメータの設定をサポートしているかどうか。有効にすると、チャットページのサービスレベル設定でこのパラメータを調整できます。(OpenAIモデルのみ対象)", diff --git a/src/renderer/src/i18n/locales/ru-ru.json b/src/renderer/src/i18n/locales/ru-ru.json index 1b6ebbf3c4..13787ee5b8 100644 --- a/src/renderer/src/i18n/locales/ru-ru.json +++ b/src/renderer/src/i18n/locales/ru-ru.json @@ -3160,6 +3160,10 @@ "help": "Предоставляет ли этот провайдер сообщения с ролью: \"разработчик\"", "label": "Поддержка сообщения разработчика" }, + "enable_thinking": { + "help": "Поддерживает ли данный провайдер возможность управления мышлением моделей, таких как Qwen3, с помощью параметра enable_thinking", + "label": "Поддержка enable_thinking" + }, "label": "API настройки", "service_tier": { "help": "Поддерживает ли этот провайдер настройку параметра service_tier? После включения параметр можно настроить в настройках уровня обслуживания на странице диалога. (Только для моделей OpenAI)", diff --git a/src/renderer/src/i18n/locales/zh-cn.json b/src/renderer/src/i18n/locales/zh-cn.json index 5c5cc2e6d7..165da9373c 100644 --- a/src/renderer/src/i18n/locales/zh-cn.json +++ b/src/renderer/src/i18n/locales/zh-cn.json @@ -3160,6 +3160,10 @@ "help": "该提供商是否支持 role: \"developer\" 的消息", "label": "支持 Developer Message" }, + "enable_thinking": { + "help": "该提供商是否支持通过 enable_thinking 参数控制 Qwen3 等模型的思考", + "label": "支持 enable_thinking" + }, "label": "API 设置", "service_tier": { "help": "该提供商是否支持配置 service_tier 参数。开启后,可在对话页面的服务层级设置中调整该参数。(仅限OpenAI模型)", diff --git a/src/renderer/src/i18n/locales/zh-tw.json b/src/renderer/src/i18n/locales/zh-tw.json index 11545fe75f..ceed2351f9 100644 --- a/src/renderer/src/i18n/locales/zh-tw.json +++ b/src/renderer/src/i18n/locales/zh-tw.json @@ -3160,6 +3160,10 @@ "help": "該提供商是否支援 role: \"developer\" 的訊息", "label": "支援開發人員訊息" }, + "enable_thinking": { + "help": "該提供商是否支援透過 enable_thinking 參數控制 Qwen3 等模型的思考", + "label": "支援 enable_thinking" + }, "label": "API 設定", "service_tier": { "help": "該提供商是否支援設定 service_tier 參數。啟用後,可在對話頁面的服務層級設定中調整此參數。(僅限 OpenAI 模型)", diff --git a/src/renderer/src/i18n/translate/el-gr.json b/src/renderer/src/i18n/translate/el-gr.json index 9367ffc311..9565519fb5 100644 --- a/src/renderer/src/i18n/translate/el-gr.json +++ b/src/renderer/src/i18n/translate/el-gr.json @@ -3160,6 +3160,10 @@ "help": "Ο πάροχος υποστηρίζει μηνύματα με ρόλο: \"developer\";", "label": "Υποστήριξη μηνύματος προγραμματιστή" }, + "enable_thinking": { + "help": "Ο πάροχος υποστηρίζει τον έλεγχο της σκέψης μοντέλων όπως το Qwen3 μέσω της παραμέτρου enable_thinking;", + "label": "Υποστήριξη enable_thinking" + }, "label": "Ρυθμίσεις API", "service_tier": { "help": "Εάν ο πάροχος υποστηρίζει τη δυνατότητα διαμόρφωσης της παραμέτρου service_tier. Αν είναι ενεργοποιημένη, αυτή η παράμετρος μπορεί να ρυθμιστεί μέσω της ρύθμισης επιπέδου υπηρεσίας στη σελίδα διαλόγου. (Μόνο για μοντέλα OpenAI)", diff --git a/src/renderer/src/i18n/translate/es-es.json b/src/renderer/src/i18n/translate/es-es.json index 60f86861f3..d6637609c2 100644 --- a/src/renderer/src/i18n/translate/es-es.json +++ b/src/renderer/src/i18n/translate/es-es.json @@ -3160,6 +3160,10 @@ "help": "¿Admite el proveedor mensajes con el rol: \"developer\"?", "label": "Mensajes para desarrolladores compatibles" }, + "enable_thinking": { + "help": "¿Admite este proveedor el control del pensamiento de modelos como Qwen3 mediante el parámetro enable_thinking?", + "label": "Soporta enable_thinking" + }, "label": "Configuración de la API", "service_tier": { "help": "Si el proveedor admite la configuración del parámetro service_tier. Al activarlo, se podrá ajustar este parámetro en la configuración del nivel de servicio en la página de conversación. (Solo para modelos OpenAI)", diff --git a/src/renderer/src/i18n/translate/fr-fr.json b/src/renderer/src/i18n/translate/fr-fr.json index 2a5aa868a9..d8f8c4d9fa 100644 --- a/src/renderer/src/i18n/translate/fr-fr.json +++ b/src/renderer/src/i18n/translate/fr-fr.json @@ -3160,6 +3160,10 @@ "help": "Le fournisseur prend-il en charge les messages avec le rôle : « développeur » ?", "label": "Prise en charge du message développeur" }, + "enable_thinking": { + "help": "Le fournisseur prend-il en charge le contrôle de la réflexion des modèles tels que Qwen3 via le paramètre enable_thinking ?", + "label": "Prise en charge de enable_thinking" + }, "label": "Paramètres de l'API", "service_tier": { "help": "Le fournisseur prend-il en charge la configuration du paramètre service_tier ? Lorsqu'il est activé, ce paramètre peut être ajusté dans les paramètres de niveau de service sur la page de conversation. (Modèles OpenAI uniquement)", diff --git a/src/renderer/src/i18n/translate/pt-pt.json b/src/renderer/src/i18n/translate/pt-pt.json index 1de48e429f..649df406f6 100644 --- a/src/renderer/src/i18n/translate/pt-pt.json +++ b/src/renderer/src/i18n/translate/pt-pt.json @@ -3160,6 +3160,10 @@ "help": "O fornecedor suporta mensagens com role: \"developer\"?", "label": "Mensagem de suporte ao programador" }, + "enable_thinking": { + "help": "O fornecedor suporta o controlo do pensamento de modelos como o Qwen3 através do parâmetro enable_thinking?", + "label": "Apoiar enable_thinking" + }, "label": "Definições da API", "service_tier": { "help": "Se o fornecedor suporta a configuração do parâmetro service_tier. Quando ativado, este parâmetro pode ser ajustado nas definições do nível de serviço na página de conversa. (Apenas para modelos OpenAI)", diff --git a/src/renderer/src/pages/settings/ProviderSettings/ApiOptionsSettings.tsx b/src/renderer/src/pages/settings/ProviderSettings/ApiOptionsSettings.tsx index 78b4982533..dcf80dbb56 100644 --- a/src/renderer/src/pages/settings/ProviderSettings/ApiOptionsSettings.tsx +++ b/src/renderer/src/pages/settings/ProviderSettings/ApiOptionsSettings.tsx @@ -38,36 +38,55 @@ const ApiOptionsSettings = ({ providerId }: Props) => { label: t('settings.provider.api.options.developer_role.label'), tip: t('settings.provider.api.options.developer_role.help'), onChange: (checked: boolean) => { - updateProviderTransition({ ...provider, isNotSupportDeveloperRole: !checked }) + updateProviderTransition({ + apiOptions: { ...provider.apiOptions, isNotSupportDeveloperRole: !checked } + }) }, - checked: !provider.isNotSupportDeveloperRole + checked: !provider.apiOptions?.isNotSupportDeveloperRole }, { key: 'openai_stream_options', label: t('settings.provider.api.options.stream_options.label'), tip: t('settings.provider.api.options.stream_options.help'), onChange: (checked: boolean) => { - updateProviderTransition({ ...provider, isNotSupportStreamOptions: !checked }) + updateProviderTransition({ + apiOptions: { ...provider.apiOptions, isNotSupportStreamOptions: !checked } + }) }, - checked: !provider.isNotSupportStreamOptions + checked: !provider.apiOptions?.isNotSupportStreamOptions }, { key: 'openai_array_content', label: t('settings.provider.api.options.array_content.label'), tip: t('settings.provider.api.options.array_content.help'), onChange: (checked: boolean) => { - updateProviderTransition({ ...provider, isNotSupportArrayContent: !checked }) + updateProviderTransition({ + apiOptions: { ...provider.apiOptions, isNotSupportArrayContent: !checked } + }) }, - checked: !provider.isNotSupportArrayContent + checked: !provider.apiOptions?.isNotSupportArrayContent }, { key: 'openai_service_tier', label: t('settings.provider.api.options.service_tier.label'), tip: t('settings.provider.api.options.service_tier.help'), onChange: (checked: boolean) => { - updateProviderTransition({ ...provider, isNotSupportServiceTier: !checked }) + updateProviderTransition({ + apiOptions: { ...provider.apiOptions, isNotSupportServiceTier: !checked } + }) }, - checked: !provider.isNotSupportServiceTier + checked: !provider.apiOptions?.isNotSupportServiceTier + }, + { + key: 'openai_enable_thinking', + label: t('settings.provider.api.options.enable_thinking.label'), + tip: t('settings.provider.api.options.enable_thinking.help'), + onChange: (checked: boolean) => { + updateProviderTransition({ + apiOptions: { ...provider.apiOptions, isNotSupportEnableThinking: !checked } + }) + }, + checked: !provider.apiOptions?.isNotSupportEnableThinking } ], [t, provider, updateProviderTransition] diff --git a/src/renderer/src/pages/settings/ToolSettings/WebSearchSettings/CompressionSettings/RagSettings.tsx b/src/renderer/src/pages/settings/ToolSettings/WebSearchSettings/CompressionSettings/RagSettings.tsx index a4bdac7b3a..325915f21d 100644 --- a/src/renderer/src/pages/settings/ToolSettings/WebSearchSettings/CompressionSettings/RagSettings.tsx +++ b/src/renderer/src/pages/settings/ToolSettings/WebSearchSettings/CompressionSettings/RagSettings.tsx @@ -2,7 +2,7 @@ import InputEmbeddingDimension from '@renderer/components/InputEmbeddingDimensio import ModelSelector from '@renderer/components/ModelSelector' import { DEFAULT_WEBSEARCH_RAG_DOCUMENT_COUNT } from '@renderer/config/constant' import { isEmbeddingModel, isRerankModel } from '@renderer/config/models' -import { NOT_SUPPORTED_REANK_PROVIDERS } from '@renderer/config/providers' +import { NOT_SUPPORTED_RERANK_PROVIDERS } from '@renderer/config/providers' import { useProviders } from '@renderer/hooks/useProvider' import { useWebSearchSettings } from '@renderer/hooks/useWebSearchProviders' import { SettingDivider, SettingRow, SettingRowTitle } from '@renderer/pages/settings' @@ -30,7 +30,7 @@ const RagSettings = () => { }, [providers]) const rerankProviders = useMemo(() => { - return providers.filter((p) => !NOT_SUPPORTED_REANK_PROVIDERS.includes(p.id)) + return providers.filter((p) => !NOT_SUPPORTED_RERANK_PROVIDERS.some((pid) => p.id === pid)) }, [providers]) const handleEmbeddingModelChange = (modelValue: string) => { diff --git a/src/renderer/src/store/index.ts b/src/renderer/src/store/index.ts index e41befe6b4..1789d2fdbc 100644 --- a/src/renderer/src/store/index.ts +++ b/src/renderer/src/store/index.ts @@ -60,7 +60,7 @@ const persistedReducer = persistReducer( { key: 'cherry-studio', storage, - version: 128, + version: 129, blacklist: ['runtime', 'messages', 'messageBlocks', 'tabs'], migrate }, diff --git a/src/renderer/src/store/migrate.ts b/src/renderer/src/store/migrate.ts index 918aad5878..cdabaddc7c 100644 --- a/src/renderer/src/store/migrate.ts +++ b/src/renderer/src/store/migrate.ts @@ -18,6 +18,7 @@ import { LanguageCode, Model, Provider, + ProviderApiOptions, SystemProviderIds, WebSearchProvider } from '@renderer/types' @@ -2054,6 +2055,29 @@ const migrateConfig = { logger.error('migrate 128 error', error as Error) return state } + }, + '129': (state: RootState) => { + try { + // 聚合 api options + state.llm.providers.forEach((p) => { + if (isSystemProvider(p)) { + updateProvider(state, p.id, { apiOptions: undefined }) + } else { + const changes: ProviderApiOptions = { + isNotSupportArrayContent: p.isNotSupportArrayContent, + isNotSupportServiceTier: p.isNotSupportServiceTier, + isNotSupportDeveloperRole: p.isNotSupportDeveloperRole, + isNotSupportStreamOptions: p.isNotSupportStreamOptions + } + updateProvider(state, p.id, { apiOptions: changes }) + } + }) + + return state + } catch (error) { + logger.error('migrate 129 error', error as Error) + return state + } } } diff --git a/src/renderer/src/types/index.ts b/src/renderer/src/types/index.ts index fe1d145b7c..82e93b7aa2 100644 --- a/src/renderer/src/types/index.ts +++ b/src/renderer/src/types/index.ts @@ -192,6 +192,20 @@ export type User = { email: string } +// undefined 视为支持,默认支持 +export type ProviderApiOptions = { + /** 是否不支持 message 的 content 为数组类型 */ + isNotSupportArrayContent?: boolean + /** 是否不支持 stream_options 参数 */ + isNotSupportStreamOptions?: boolean + /** 是否不支持 message 的 role 为 developer */ + isNotSupportDeveloperRole?: boolean + /** 是否不支持 service_tier 参数. Only for OpenAI Models. */ + isNotSupportServiceTier?: boolean + /** 是否不支持 enable_thinking 参数 */ + isNotSupportEnableThinking?: boolean +} + export type Provider = { id: string type: ProviderType @@ -206,17 +220,18 @@ export type Provider = { rateLimit?: number // API options - // undefined 视为支持,默认支持 - /** 是否不支持 message 的 content 为数组类型 */ - isNotSupportArrayContent?: boolean - /** 是否不支持 stream_options 参数 */ - isNotSupportStreamOptions?: boolean - /** 是否不支持 message 的 role 为 developer */ - isNotSupportDeveloperRole?: boolean - /** 是否不支持 service_tier 参数. Only for OpenAI Models. */ - isNotSupportServiceTier?: boolean + apiOptions?: ProviderApiOptions serviceTier?: ServiceTier + /** @deprecated */ + isNotSupportArrayContent?: boolean + /** @deprecated */ + isNotSupportStreamOptions?: boolean + /** @deprecated */ + isNotSupportDeveloperRole?: boolean + /** @deprecated */ + isNotSupportServiceTier?: boolean + isVertex?: boolean notes?: string extra_headers?: Record @@ -286,6 +301,7 @@ export const isSystemProviderId = (id: string): id is SystemProviderId => { export type SystemProvider = Provider & { id: SystemProviderId isSystem: true + apiOptions?: never } /** @@ -1065,3 +1081,20 @@ export interface MemoryListOptions extends MemoryEntity { export interface MemoryDeleteAllOptions extends MemoryEntity {} // ======================================================================== + +/** + * 表示一个对象类型,该对象至少包含类型T中指定的所有键,这些键的值类型为U + * 同时也允许包含其他任意string类型的键,这些键的值类型也必须是U + * @template T - 必需包含的键的字面量字符串联合类型 + * @template U - 所有键对应值的类型 + * @example + * type Example = AtLeast<'a' | 'b', number>; + * // 结果类型允许: + * const obj1: Example = { a: 1, b: 2 }; // 只包含必需的键 + * const obj2: Example = { a: 1, b: 2, c: 3 }; // 包含额外的键 + */ +export type AtLeast = { + [K in T]: U +} & { + [key: string]: U +} diff --git a/src/renderer/src/utils/__tests__/match.test.ts b/src/renderer/src/utils/__tests__/match.test.ts index f813241f3f..223382ea01 100644 --- a/src/renderer/src/utils/__tests__/match.test.ts +++ b/src/renderer/src/utils/__tests__/match.test.ts @@ -4,7 +4,7 @@ import { describe, expect, it } from 'vitest' import { includeKeywords, matchKeywordsInModel, matchKeywordsInProvider, matchKeywordsInString } from '../match' describe('match', () => { - const provider: Provider = { + const provider = { id: '12345', type: 'openai', name: 'OpenAI', @@ -12,13 +12,14 @@ describe('match', () => { apiHost: '', models: [], isSystem: false - } + } as const satisfies Provider + const sysProvider: SystemProvider = { ...provider, id: 'dashscope', name: 'doesnt matter', isSystem: true - } + } as const describe('includeKeywords', () => { it('should return true if keywords is empty or blank', () => {