diff --git a/packages/shared/utils.ts b/packages/shared/utils.ts index e87e2f2bef..a14f78958d 100644 --- a/packages/shared/utils.ts +++ b/packages/shared/utils.ts @@ -4,3 +4,34 @@ export const defaultAppHeaders = () => { 'X-Title': 'Cherry Studio' } } + +// Following two function are not being used for now. +// I may use them in the future, so just keep them commented. - by eurfelux + +/** + * Converts an `undefined` value to `null`, otherwise returns the value as-is. + * @param value - The value to check + * @returns `null` if the input is `undefined`; otherwise the input value + */ + +// export function toNullIfUndefined(value: T | undefined): T | null { +// if (value === undefined) { +// return null +// } else { +// return value +// } +// } + +/** + * Converts a `null` value to `undefined`, otherwise returns the value as-is. + * @param value - The value to check + * @returns `undefined` if the input is `null`; otherwise the input value + */ + +// export function toUndefinedIfNull(value: T | null): T | undefined { +// if (value === null) { +// return undefined +// } else { +// return value +// } +// } diff --git a/src/renderer/src/aiCore/legacy/clients/BaseApiClient.ts b/src/renderer/src/aiCore/legacy/clients/BaseApiClient.ts index f520162496..1caf483205 100644 --- a/src/renderer/src/aiCore/legacy/clients/BaseApiClient.ts +++ b/src/renderer/src/aiCore/legacy/clients/BaseApiClient.ts @@ -19,7 +19,6 @@ import type { MCPToolResponse, MemoryItem, Model, - OpenAIVerbosity, Provider, ToolCallResponse, WebSearchProviderResponse, @@ -33,6 +32,7 @@ import { OpenAIServiceTiers, SystemProviderIds } from '@renderer/types' +import type { OpenAIVerbosity } from '@renderer/types/aiCoreTypes' import type { Message } from '@renderer/types/newMessage' import type { RequestOptions, diff --git a/src/renderer/src/aiCore/prepareParams/parameterBuilder.ts b/src/renderer/src/aiCore/prepareParams/parameterBuilder.ts index d55dd9d55e..6f8747a7c5 100644 --- a/src/renderer/src/aiCore/prepareParams/parameterBuilder.ts +++ b/src/renderer/src/aiCore/prepareParams/parameterBuilder.ts @@ -61,7 +61,7 @@ export async function buildStreamTextParams( timeout?: number headers?: Record } - } = {} + } ): Promise<{ params: StreamTextParams modelId: string diff --git a/src/renderer/src/aiCore/utils/options.ts b/src/renderer/src/aiCore/utils/options.ts index 7f4cd33608..2dc142cc46 100644 --- a/src/renderer/src/aiCore/utils/options.ts +++ b/src/renderer/src/aiCore/utils/options.ts @@ -1,3 +1,7 @@ +import type { AnthropicProviderOptions } from '@ai-sdk/anthropic' +import type { GoogleGenerativeAIProviderOptions } from '@ai-sdk/google' +import type { OpenAIResponsesProviderOptions } from '@ai-sdk/openai' +import type { XaiProviderOptions } from '@ai-sdk/xai' import { baseProviderIdSchema, customProviderIdSchema } from '@cherrystudio/ai-core/provider' import { loggerService } from '@logger' import { @@ -9,15 +13,28 @@ import { } from '@renderer/config/models' import { isSupportServiceTierProvider } from '@renderer/config/providers' import { mapLanguageToQwenMTModel } from '@renderer/config/translate' -import type { Assistant, Model, Provider } from '@renderer/types' +import { getStoreSetting } from '@renderer/hooks/useSettings' +import type { RootState } from '@renderer/store' +import type { + Assistant, + GroqServiceTier, + GroqSystemProvider, + Model, + NotGroqProvider, + OpenAIServiceTier, + Provider, + ServiceTier +} from '@renderer/types' import { GroqServiceTiers, isGroqServiceTier, + isGroqSystemProvider, isOpenAIServiceTier, isTranslateAssistant, - OpenAIServiceTiers, - SystemProviderIds + OpenAIServiceTiers } from '@renderer/types' +import type { OpenAIVerbosity } from '@renderer/types/aiCoreTypes' +import type { JSONValue } from 'ai' import { t } from 'i18next' import { getAiSdkProviderId } from '../provider/factory' @@ -35,8 +52,31 @@ import { getWebSearchParams } from './websearch' const logger = loggerService.withContext('aiCore.utils.options') -// copy from BaseApiClient.ts -const getServiceTier = (model: Model, provider: Provider) => { +function toOpenAIServiceTier(model: Model, serviceTier: ServiceTier): OpenAIServiceTier { + if ( + !isOpenAIServiceTier(serviceTier) || + (serviceTier === OpenAIServiceTiers.flex && !isSupportFlexServiceTierModel(model)) + ) { + return undefined + } else { + return serviceTier + } +} + +function toGroqServiceTier(model: Model, serviceTier: ServiceTier): GroqServiceTier { + if ( + !isGroqServiceTier(serviceTier) || + (serviceTier === GroqServiceTiers.flex && !isSupportFlexServiceTierModel(model)) + ) { + return undefined + } else { + return serviceTier + } +} + +function getServiceTier(model: Model, provider: T): GroqServiceTier +function getServiceTier(model: Model, provider: T): OpenAIServiceTier +function getServiceTier(model: Model, provider: T): OpenAIServiceTier | GroqServiceTier { const serviceTierSetting = provider.serviceTier if (!isSupportServiceTierProvider(provider) || !isOpenAIModel(model) || !serviceTierSetting) { @@ -44,24 +84,17 @@ const getServiceTier = (model: Model, provider: Provider) => { } // 处理不同供应商需要 fallback 到默认值的情况 - if (provider.id === SystemProviderIds.groq) { - if ( - !isGroqServiceTier(serviceTierSetting) || - (serviceTierSetting === GroqServiceTiers.flex && !isSupportFlexServiceTierModel(model)) - ) { - return undefined - } + if (isGroqSystemProvider(provider)) { + return toGroqServiceTier(model, serviceTierSetting) } else { // 其他 OpenAI 供应商,假设他们的服务层级设置和 OpenAI 完全相同 - if ( - !isOpenAIServiceTier(serviceTierSetting) || - (serviceTierSetting === OpenAIServiceTiers.flex && !isSupportFlexServiceTierModel(model)) - ) { - return undefined - } + return toOpenAIServiceTier(model, serviceTierSetting) } +} - return serviceTierSetting +function getVerbosity(): OpenAIVerbosity { + const openAI = getStoreSetting('openAI') + return openAI.verbosity } /** @@ -78,13 +111,13 @@ export function buildProviderOptions( enableWebSearch: boolean enableGenerateImage: boolean } -): Record { +): Record> { logger.debug('buildProviderOptions', { assistant, model, actualProvider, capabilities }) const rawProviderId = getAiSdkProviderId(actualProvider) // 构建 provider 特定的选项 let providerSpecificOptions: Record = {} - const serviceTierSetting = getServiceTier(model, actualProvider) - providerSpecificOptions.serviceTier = serviceTierSetting + const serviceTier = getServiceTier(model, actualProvider) + const textVerbosity = getVerbosity() // 根据 provider 类型分离构建逻辑 const { data: baseProviderId, success } = baseProviderIdSchema.safeParse(rawProviderId) if (success) { @@ -94,9 +127,14 @@ export function buildProviderOptions( case 'openai-chat': case 'azure': case 'azure-responses': - providerSpecificOptions = { - ...buildOpenAIProviderOptions(assistant, model, capabilities), - serviceTier: serviceTierSetting + { + const options: OpenAIResponsesProviderOptions = buildOpenAIProviderOptions( + assistant, + model, + capabilities, + serviceTier + ) + providerSpecificOptions = options } break case 'anthropic': @@ -116,12 +154,19 @@ export function buildProviderOptions( // 对于其他 provider,使用通用的构建逻辑 providerSpecificOptions = { ...buildGenericProviderOptions(assistant, model, capabilities), - serviceTier: serviceTierSetting + serviceTier, + textVerbosity } break } case 'cherryin': - providerSpecificOptions = buildCherryInProviderOptions(assistant, model, capabilities, actualProvider) + providerSpecificOptions = buildCherryInProviderOptions( + assistant, + model, + capabilities, + actualProvider, + serviceTier + ) break default: throw new Error(`Unsupported base provider ${baseProviderId}`) @@ -142,13 +187,14 @@ export function buildProviderOptions( providerSpecificOptions = buildBedrockProviderOptions(assistant, model, capabilities) break case 'huggingface': - providerSpecificOptions = buildOpenAIProviderOptions(assistant, model, capabilities) + providerSpecificOptions = buildOpenAIProviderOptions(assistant, model, capabilities, serviceTier) break default: // 对于其他 provider,使用通用的构建逻辑 providerSpecificOptions = { ...buildGenericProviderOptions(assistant, model, capabilities), - serviceTier: serviceTierSetting + serviceTier, + textVerbosity } } } else { @@ -189,10 +235,12 @@ function buildOpenAIProviderOptions( enableReasoning: boolean enableWebSearch: boolean enableGenerateImage: boolean - } -): Record { + }, + serviceTier: OpenAIServiceTier +): OpenAIResponsesProviderOptions { const { enableReasoning } = capabilities let providerOptions: Record = {} + // OpenAI 推理参数 if (enableReasoning) { const reasoningParams = getOpenAIReasoningParams(assistant, model) @@ -203,7 +251,7 @@ function buildOpenAIProviderOptions( } if (isSupportVerbosityModel(model)) { - const state = window.store?.getState() + const state: RootState = window.store?.getState() const userVerbosity = state?.settings?.openAI?.verbosity if (userVerbosity && ['low', 'medium', 'high'].includes(userVerbosity)) { @@ -218,6 +266,11 @@ function buildOpenAIProviderOptions( } } + providerOptions = { + ...providerOptions, + serviceTier + } + return providerOptions } @@ -232,7 +285,7 @@ function buildAnthropicProviderOptions( enableWebSearch: boolean enableGenerateImage: boolean } -): Record { +): AnthropicProviderOptions { const { enableReasoning } = capabilities let providerOptions: Record = {} @@ -259,7 +312,7 @@ function buildGeminiProviderOptions( enableWebSearch: boolean enableGenerateImage: boolean } -): Record { +): GoogleGenerativeAIProviderOptions { const { enableReasoning, enableGenerateImage } = capabilities let providerOptions: Record = {} @@ -290,7 +343,7 @@ function buildXAIProviderOptions( enableWebSearch: boolean enableGenerateImage: boolean } -): Record { +): XaiProviderOptions { const { enableReasoning } = capabilities let providerOptions: Record = {} @@ -313,16 +366,12 @@ function buildCherryInProviderOptions( enableWebSearch: boolean enableGenerateImage: boolean }, - actualProvider: Provider -): Record { - const serviceTierSetting = getServiceTier(model, actualProvider) - + actualProvider: Provider, + serviceTier: OpenAIServiceTier +): OpenAIResponsesProviderOptions | AnthropicProviderOptions | GoogleGenerativeAIProviderOptions { switch (actualProvider.type) { case 'openai': - return { - ...buildOpenAIProviderOptions(assistant, model, capabilities), - serviceTier: serviceTierSetting - } + return buildOpenAIProviderOptions(assistant, model, capabilities, serviceTier) case 'anthropic': return buildAnthropicProviderOptions(assistant, model, capabilities) diff --git a/src/renderer/src/aiCore/utils/reasoning.ts b/src/renderer/src/aiCore/utils/reasoning.ts index dfe084179c..f261f71a7a 100644 --- a/src/renderer/src/aiCore/utils/reasoning.ts +++ b/src/renderer/src/aiCore/utils/reasoning.ts @@ -1,6 +1,7 @@ import type { BedrockProviderOptions } from '@ai-sdk/amazon-bedrock' import type { AnthropicProviderOptions } from '@ai-sdk/anthropic' import type { GoogleGenerativeAIProviderOptions } from '@ai-sdk/google' +import type { OpenAIResponsesProviderOptions } from '@ai-sdk/openai' import type { XaiProviderOptions } from '@ai-sdk/xai' import { loggerService } from '@logger' import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant' @@ -35,9 +36,9 @@ import { import { isSupportEnableThinkingProvider } from '@renderer/config/providers' import { getStoreSetting } from '@renderer/hooks/useSettings' import { getAssistantSettings, getProviderByModel } from '@renderer/services/AssistantService' -import type { SettingsState } from '@renderer/store/settings' import type { Assistant, Model } from '@renderer/types' import { EFFORT_RATIO, isSystemProvider, SystemProviderIds } from '@renderer/types' +import type { OpenAISummaryText } from '@renderer/types/aiCoreTypes' import type { ReasoningEffortOptionalParams } from '@renderer/types/sdk' import { toInteger } from 'lodash' @@ -341,10 +342,14 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin } /** - * 获取 OpenAI 推理参数 - * 从 OpenAIResponseAPIClient 和 OpenAIAPIClient 中提取的逻辑 + * Get OpenAI reasoning parameters + * Extracted from OpenAIResponseAPIClient and OpenAIAPIClient logic + * For official OpenAI provider only */ -export function getOpenAIReasoningParams(assistant: Assistant, model: Model): Record { +export function getOpenAIReasoningParams( + assistant: Assistant, + model: Model +): Pick { if (!isReasoningModel(model)) { return {} } @@ -355,6 +360,10 @@ export function getOpenAIReasoningParams(assistant: Assistant, model: Model): Re return {} } + if (isOpenAIDeepResearchModel(model) || reasoningEffort === 'auto') { + reasoningEffort = 'medium' + } + // 非OpenAI模型,但是Provider类型是responses/azure openai的情况 if (!isOpenAIModel(model)) { return { @@ -362,21 +371,17 @@ export function getOpenAIReasoningParams(assistant: Assistant, model: Model): Re } } - const openAI = getStoreSetting('openAI') as SettingsState['openAI'] - const summaryText = openAI?.summaryText || 'off' + const openAI = getStoreSetting('openAI') + const summaryText = openAI.summaryText - let reasoningSummary: string | undefined = undefined + let reasoningSummary: OpenAISummaryText = undefined - if (summaryText === 'off' || model.id.includes('o1-pro')) { + if (model.id.includes('o1-pro')) { reasoningSummary = undefined } else { reasoningSummary = summaryText } - if (isOpenAIDeepResearchModel(model)) { - reasoningEffort = 'medium' - } - // OpenAI 推理参数 if (isSupportedReasoningEffortOpenAIModel(model)) { return { diff --git a/src/renderer/src/components/Selector.tsx b/src/renderer/src/components/Selector.tsx index e30bc64193..38567fc200 100644 --- a/src/renderer/src/components/Selector.tsx +++ b/src/renderer/src/components/Selector.tsx @@ -6,7 +6,7 @@ import { useEffect, useMemo, useRef, useState } from 'react' import { useTranslation } from 'react-i18next' import styled, { css } from 'styled-components' -interface SelectorOption { +interface SelectorOption { label: string | ReactNode value: V type?: 'group' @@ -14,7 +14,7 @@ interface SelectorOption { disabled?: boolean } -interface BaseSelectorProps { +interface BaseSelectorProps { options: SelectorOption[] placeholder?: string placement?: 'topLeft' | 'topCenter' | 'topRight' | 'bottomLeft' | 'bottomCenter' | 'bottomRight' | 'top' | 'bottom' @@ -39,7 +39,7 @@ interface MultipleSelectorProps extends BaseSelectorProps { export type SelectorProps = SingleSelectorProps | MultipleSelectorProps -const Selector = ({ +const Selector = ({ options, value, onChange = () => {}, diff --git a/src/renderer/src/config/models/utils.ts b/src/renderer/src/config/models/utils.ts index 7fb7c61362..6c75d49251 100644 --- a/src/renderer/src/config/models/utils.ts +++ b/src/renderer/src/config/models/utils.ts @@ -1,6 +1,7 @@ import type OpenAI from '@cherrystudio/openai' import { isEmbeddingModel, isRerankModel } from '@renderer/config/models/embedding' import type { Model } from '@renderer/types' +import type { OpenAIVerbosity, ValidOpenAIVerbosity } from '@renderer/types/aiCoreTypes' import { getLowerBaseModelName } from '@renderer/utils' import { WEB_SEARCH_PROMPT_FOR_OPENROUTER } from '../prompts' @@ -242,17 +243,20 @@ export const isGPT51SeriesModel = (model: Model) => { // GPT-5 verbosity configuration // gpt-5-pro only supports 'high', other GPT-5 models support all levels -export const MODEL_SUPPORTED_VERBOSITY: Record = { +export const MODEL_SUPPORTED_VERBOSITY: Record = { 'gpt-5-pro': ['high'], default: ['low', 'medium', 'high'] -} +} as const -export const getModelSupportedVerbosity = (model: Model): ('low' | 'medium' | 'high')[] => { +export const getModelSupportedVerbosity = (model: Model): OpenAIVerbosity[] => { const modelId = getLowerBaseModelName(model.id) + let supportedValues: ValidOpenAIVerbosity[] if (modelId.includes('gpt-5-pro')) { - return MODEL_SUPPORTED_VERBOSITY['gpt-5-pro'] + supportedValues = MODEL_SUPPORTED_VERBOSITY['gpt-5-pro'] + } else { + supportedValues = MODEL_SUPPORTED_VERBOSITY.default } - return MODEL_SUPPORTED_VERBOSITY.default + return [undefined, ...supportedValues] } export const isGeminiModel = (model: Model) => { diff --git a/src/renderer/src/i18n/locales/en-us.json b/src/renderer/src/i18n/locales/en-us.json index ab3c8aa9a1..5b1de2a257 100644 --- a/src/renderer/src/i18n/locales/en-us.json +++ b/src/renderer/src/i18n/locales/en-us.json @@ -1158,6 +1158,7 @@ "name": "Name", "no_results": "No results", "none": "None", + "off": "Off", "open": "Open", "paste": "Paste", "placeholders": { @@ -4259,7 +4260,6 @@ "default": "default", "flex": "flex", "on_demand": "on demand", - "performance": "performance", "priority": "priority", "tip": "Specifies the latency tier to use for processing the request", "title": "Service Tier" @@ -4278,7 +4278,7 @@ "low": "Low", "medium": "Medium", "tip": "Control the level of detail in the model's output", - "title": "Level of detail" + "title": "Verbosity" } }, "privacy": { diff --git a/src/renderer/src/i18n/locales/zh-cn.json b/src/renderer/src/i18n/locales/zh-cn.json index ff724e1cc8..8d7073fcfd 100644 --- a/src/renderer/src/i18n/locales/zh-cn.json +++ b/src/renderer/src/i18n/locales/zh-cn.json @@ -1158,6 +1158,7 @@ "name": "名称", "no_results": "无结果", "none": "无", + "off": "关闭", "open": "打开", "paste": "粘贴", "placeholders": { @@ -4259,7 +4260,6 @@ "default": "默认", "flex": "灵活", "on_demand": "按需", - "performance": "性能", "priority": "优先", "tip": "指定用于处理请求的延迟层级", "title": "服务层级" diff --git a/src/renderer/src/i18n/locales/zh-tw.json b/src/renderer/src/i18n/locales/zh-tw.json index 76e916a9a7..72eb71ea97 100644 --- a/src/renderer/src/i18n/locales/zh-tw.json +++ b/src/renderer/src/i18n/locales/zh-tw.json @@ -1158,6 +1158,7 @@ "name": "名稱", "no_results": "沒有結果", "none": "無", + "off": "關閉", "open": "開啟", "paste": "貼上", "placeholders": { @@ -4259,7 +4260,6 @@ "default": "預設", "flex": "彈性", "on_demand": "按需", - "performance": "效能", "priority": "優先", "tip": "指定用於處理請求的延遲層級", "title": "服務層級" diff --git a/src/renderer/src/i18n/translate/pt-pt.json b/src/renderer/src/i18n/translate/pt-pt.json index 541a728946..1ca373f394 100644 --- a/src/renderer/src/i18n/translate/pt-pt.json +++ b/src/renderer/src/i18n/translate/pt-pt.json @@ -1158,6 +1158,7 @@ "name": "Nome", "no_results": "Nenhum resultado", "none": "Nenhum", + "off": "Desligado", "open": "Abrir", "paste": "Colar", "placeholders": { @@ -4223,7 +4224,6 @@ "default": "Padrão", "flex": "Flexível", "on_demand": "sob demanda", - "performance": "desempenho", "priority": "prioridade", "tip": "Especifique o nível de latência usado para processar a solicitação", "title": "Nível de Serviço" diff --git a/src/renderer/src/pages/home/Tabs/components/OpenAISettingsGroup.tsx b/src/renderer/src/pages/home/Tabs/components/OpenAISettingsGroup.tsx index 2960724183..b6ecf88c72 100644 --- a/src/renderer/src/pages/home/Tabs/components/OpenAISettingsGroup.tsx +++ b/src/renderer/src/pages/home/Tabs/components/OpenAISettingsGroup.tsx @@ -12,9 +12,9 @@ import { CollapsibleSettingGroup } from '@renderer/pages/settings/SettingGroup' import type { RootState } from '@renderer/store' import { useAppDispatch } from '@renderer/store' import { setOpenAISummaryText, setOpenAIVerbosity } from '@renderer/store/settings' -import type { Model, OpenAIServiceTier, OpenAISummaryText, ServiceTier } from '@renderer/types' +import type { GroqServiceTier, Model, OpenAIServiceTier, ServiceTier } from '@renderer/types' import { GroqServiceTiers, OpenAIServiceTiers, SystemProviderIds } from '@renderer/types' -import type { OpenAIVerbosity } from '@types' +import type { OpenAISummaryText, OpenAIVerbosity } from '@renderer/types/aiCoreTypes' import { Tooltip } from 'antd' import { CircleHelp } from 'lucide-react' import type { FC } from 'react' @@ -22,6 +22,21 @@ import { useCallback, useEffect, useMemo } from 'react' import { useTranslation } from 'react-i18next' import { useSelector } from 'react-redux' +type VerbosityOption = { + value: OpenAIVerbosity + label: string +} + +type SummaryTextOption = { + value: OpenAISummaryText + label: string +} + +type OpenAIServiceTierOption = { value: OpenAIServiceTier; label: string } +type GroqServiceTierOption = { value: GroqServiceTier; label: string } + +type ServiceTierOptions = OpenAIServiceTierOption[] | GroqServiceTierOption[] + interface Props { model: Model providerId: string @@ -67,6 +82,10 @@ const OpenAISettingsGroup: FC = ({ model, providerId, SettingGroup, Setti ) const summaryTextOptions = [ + { + value: undefined, + label: t('common.default') + }, { value: 'auto', label: t('settings.openai.summary_text_mode.auto') @@ -76,13 +95,17 @@ const OpenAISettingsGroup: FC = ({ model, providerId, SettingGroup, Setti label: t('settings.openai.summary_text_mode.detailed') }, { - value: 'off', - label: t('settings.openai.summary_text_mode.off') + value: 'concise', + label: t('settings.openai.summary_text_mode.concise') } - ] + ] as const satisfies SummaryTextOption[] const verbosityOptions = useMemo(() => { const allOptions = [ + { + value: undefined, + label: t('common.default') + }, { value: 'low', label: t('settings.openai.verbosity.low') @@ -95,15 +118,23 @@ const OpenAISettingsGroup: FC = ({ model, providerId, SettingGroup, Setti value: 'high', label: t('settings.openai.verbosity.high') } - ] + ] as const satisfies VerbosityOption[] const supportedVerbosityLevels = getModelSupportedVerbosity(model) - return allOptions.filter((option) => supportedVerbosityLevels.includes(option.value as any)) + return allOptions.filter((option) => supportedVerbosityLevels.includes(option.value)) }, [model, t]) const serviceTierOptions = useMemo(() => { - let baseOptions: { value: ServiceTier; label: string }[] + let options: ServiceTierOptions if (provider.id === SystemProviderIds.groq) { - baseOptions = [ + options = [ + { + value: null, + label: t('common.off') + }, + { + value: undefined, + label: t('common.default') + }, { value: 'auto', label: t('settings.openai.service_tier.auto') @@ -115,15 +146,11 @@ const OpenAISettingsGroup: FC = ({ model, providerId, SettingGroup, Setti { value: 'flex', label: t('settings.openai.service_tier.flex') - }, - { - value: 'performance', - label: t('settings.openai.service_tier.performance') } - ] + ] as const satisfies GroqServiceTierOption[] } else { // 其他情况默认是和 OpenAI 相同 - baseOptions = [ + options = [ { value: 'auto', label: t('settings.openai.service_tier.auto') @@ -140,9 +167,9 @@ const OpenAISettingsGroup: FC = ({ model, providerId, SettingGroup, Setti value: 'priority', label: t('settings.openai.service_tier.priority') } - ] + ] as const satisfies OpenAIServiceTierOption[] } - return baseOptions.filter((option) => { + return options.filter((option) => { if (option.value === 'flex') { return isSupportedFlexServiceTier } diff --git a/src/renderer/src/services/ApiService.ts b/src/renderer/src/services/ApiService.ts index 10e191bb38..f19c90b61f 100644 --- a/src/renderer/src/services/ApiService.ts +++ b/src/renderer/src/services/ApiService.ts @@ -83,7 +83,7 @@ export async function fetchChatCompletion({ messages, prompt, assistant, - options, + requestOptions, onChunkReceived, topicId, uiMessages @@ -124,7 +124,7 @@ export async function fetchChatCompletion({ } = await buildStreamTextParams(messages, assistant, provider, { mcpTools: mcpTools, webSearchProviderId: assistant.webSearchProviderId, - requestOptions: options + requestOptions }) // Safely fallback to prompt tool use when function calling is not supported by model. diff --git a/src/renderer/src/services/OrchestrateService.ts b/src/renderer/src/services/OrchestrateService.ts index 1f365b39b6..71f17d6804 100644 --- a/src/renderer/src/services/OrchestrateService.ts +++ b/src/renderer/src/services/OrchestrateService.ts @@ -48,7 +48,7 @@ export class OrchestrationService { await fetchChatCompletion({ messages: modelMessages, assistant: assistant, - options: request.options, + requestOptions: request.options, onChunkReceived, topicId: request.topicId, uiMessages: uiMessages @@ -80,7 +80,7 @@ export async function transformMessagesAndFetch( await fetchChatCompletion({ messages: modelMessages, assistant: assistant, - options: request.options, + requestOptions: request.options, onChunkReceived, topicId: request.topicId, uiMessages diff --git a/src/renderer/src/services/TranslateService.ts b/src/renderer/src/services/TranslateService.ts index f7abfdb3b9..a5abb2baee 100644 --- a/src/renderer/src/services/TranslateService.ts +++ b/src/renderer/src/services/TranslateService.ts @@ -2,7 +2,7 @@ import { loggerService } from '@logger' import { db } from '@renderer/databases' import type { CustomTranslateLanguage, - FetchChatCompletionOptions, + FetchChatCompletionRequestOptions, TranslateHistory, TranslateLanguage, TranslateLanguageCode @@ -56,15 +56,15 @@ export const translateText = async ( onResponse?.(translatedText, completed) } - const options = { + const requestOptions = { signal - } satisfies FetchChatCompletionOptions + } satisfies FetchChatCompletionRequestOptions try { await fetchChatCompletion({ prompt: assistant.content, assistant, - options, + requestOptions, onChunkReceived: onChunk }) } catch (e) { diff --git a/src/renderer/src/store/index.ts b/src/renderer/src/store/index.ts index 16254dfaa8..2bb9079370 100644 --- a/src/renderer/src/store/index.ts +++ b/src/renderer/src/store/index.ts @@ -67,7 +67,7 @@ const persistedReducer = persistReducer( { key: 'cherry-studio', storage, - version: 176, + version: 177, blacklist: ['runtime', 'messages', 'messageBlocks', 'tabs', 'toolPermissions'], migrate }, diff --git a/src/renderer/src/store/migrate.ts b/src/renderer/src/store/migrate.ts index 8b8b00d20e..13755fdaf1 100644 --- a/src/renderer/src/store/migrate.ts +++ b/src/renderer/src/store/migrate.ts @@ -1499,6 +1499,7 @@ const migrateConfig = { '102': (state: RootState) => { try { state.settings.openAI = { + // @ts-expect-error it's a removed type. migrated on 177 summaryText: 'off', serviceTier: 'auto', verbosity: 'medium' @@ -1592,6 +1593,7 @@ const migrateConfig = { addMiniApp(state, 'google') if (!state.settings.openAI) { state.settings.openAI = { + // @ts-expect-error it's a removed type. migrated on 177 summaryText: 'off', serviceTier: 'auto', verbosity: 'medium' @@ -2856,6 +2858,19 @@ const migrateConfig = { logger.error('migrate 176 error', error as Error) return state } + }, + '177': (state: RootState) => { + try { + // @ts-expect-error it's a removed type + if (state.settings.openAI.summaryText === 'off') { + state.settings.openAI.summaryText = 'auto' + } + logger.info('migrate 177 success') + return state + } catch (error) { + logger.error('migrate 177 error', error as Error) + return state + } } } diff --git a/src/renderer/src/store/settings.ts b/src/renderer/src/store/settings.ts index 45f521b3df..cb871d37e6 100644 --- a/src/renderer/src/store/settings.ts +++ b/src/renderer/src/store/settings.ts @@ -10,16 +10,15 @@ import type { LanguageVarious, MathEngine, OpenAIServiceTier, - OpenAISummaryText, PaintingProvider, S3Config, SidebarIcon, TranslateLanguageCode } from '@renderer/types' import { ThemeMode } from '@renderer/types' +import type { OpenAISummaryText, OpenAIVerbosity } from '@renderer/types/aiCoreTypes' import { uuid } from '@renderer/utils' import { UpgradeChannel } from '@shared/config/constant' -import type { OpenAIVerbosity } from '@types' import type { RemoteSyncState } from './backup' @@ -375,7 +374,7 @@ export const initialState: SettingsState = { }, // OpenAI openAI: { - summaryText: 'off', + summaryText: 'auto', serviceTier: 'auto', verbosity: 'medium' }, diff --git a/src/renderer/src/types/aiCoreTypes.ts b/src/renderer/src/types/aiCoreTypes.ts index a2ff5a4cef..6327fe6835 100644 --- a/src/renderer/src/types/aiCoreTypes.ts +++ b/src/renderer/src/types/aiCoreTypes.ts @@ -1,3 +1,5 @@ +import type OpenAI from '@cherrystudio/openai' +import type { NotNull, NotUndefined } from '@types' import type { ImageModel, LanguageModel } from 'ai' import type { generateObject, generateText, ModelMessage, streamObject, streamText } from 'ai' @@ -27,3 +29,16 @@ export type StreamObjectParams = Omit[0], 'model export type GenerateObjectParams = Omit[0], 'model'> export type AiSdkModel = LanguageModel | ImageModel + +// The original type unite both undefined and null. +// I pick undefined as the unique falsy type since they seem like share the same meaning according to OpenAI API docs. +// Parameter would not be passed into request if it's undefined. +export type OpenAIVerbosity = NotNull +export type ValidOpenAIVerbosity = NotUndefined + +export type OpenAIReasoningEffort = OpenAI.ReasoningEffort + +// The original type unite both undefined and null. +// I pick undefined as the unique falsy type since they seem like share the same meaning according to OpenAI API docs. +// Parameter would not be passed into request if it's undefined. +export type OpenAISummaryText = NotNull diff --git a/src/renderer/src/types/index.ts b/src/renderer/src/types/index.ts index 01d654fdb2..2ec88765fc 100644 --- a/src/renderer/src/types/index.ts +++ b/src/renderer/src/types/index.ts @@ -871,10 +871,6 @@ export interface StoreSyncAction { } } -export type OpenAIVerbosity = 'high' | 'medium' | 'low' - -export type OpenAISummaryText = 'auto' | 'concise' | 'detailed' | 'off' - export type S3Config = { endpoint: string region: string @@ -1091,7 +1087,7 @@ export const isHexColor = (value: string): value is HexColor => { return /^#([0-9A-F]{3}){1,2}$/i.test(value) } -export type FetchChatCompletionOptions = { +export type FetchChatCompletionRequestOptions = { signal?: AbortSignal timeout?: number headers?: Record @@ -1099,7 +1095,7 @@ export type FetchChatCompletionOptions = { type BaseParams = { assistant: Assistant - options?: FetchChatCompletionOptions + requestOptions?: FetchChatCompletionRequestOptions onChunkReceived: (chunk: Chunk) => void topicId?: string // 添加 topicId 参数 uiMessages?: Message[] @@ -1119,3 +1115,7 @@ type PromptParams = BaseParams & { } export type FetchChatCompletionParams = MessagesParams | PromptParams + +// More specific than NonNullable +export type NotUndefined = Exclude +export type NotNull = Exclude diff --git a/src/renderer/src/types/provider.ts b/src/renderer/src/types/provider.ts index 5bd605007e..05988f6a1f 100644 --- a/src/renderer/src/types/provider.ts +++ b/src/renderer/src/types/provider.ts @@ -1,6 +1,9 @@ +import type OpenAI from '@cherrystudio/openai' import type { Model } from '@types' import * as z from 'zod' +import type { OpenAIVerbosity } from './aiCoreTypes' + export const ProviderTypeSchema = z.enum([ 'openai', 'openai-response', @@ -41,36 +44,38 @@ export type ProviderApiOptions = { isNotSupportAPIVersion?: boolean } +// scale is not well supported now. It even lacks of docs +// We take undefined as same as default, and null as same as explicitly off. +// It controls whether the response contains the serviceTier field or not, so undefined and null should be separated. +export type OpenAIServiceTier = Exclude + export const OpenAIServiceTiers = { auto: 'auto', default: 'default', flex: 'flex', priority: 'priority' -} as const +} as const satisfies Record, OpenAIServiceTier> -export type OpenAIServiceTier = keyof typeof OpenAIServiceTiers - -export function isOpenAIServiceTier(tier: string): tier is OpenAIServiceTier { - return Object.hasOwn(OpenAIServiceTiers, tier) +export function isOpenAIServiceTier(tier: string | null | undefined): tier is OpenAIServiceTier { + return tier === null || tier === undefined || Object.hasOwn(OpenAIServiceTiers, tier) } +// https://console.groq.com/docs/api-reference#responses +export type GroqServiceTier = 'auto' | 'on_demand' | 'flex' | undefined | null + export const GroqServiceTiers = { auto: 'auto', on_demand: 'on_demand', - flex: 'flex', - performance: 'performance' -} as const + flex: 'flex' +} as const satisfies Record -// 从 GroqServiceTiers 对象中提取类型 -export type GroqServiceTier = keyof typeof GroqServiceTiers - -export function isGroqServiceTier(tier: string): tier is GroqServiceTier { - return Object.hasOwn(GroqServiceTiers, tier) +export function isGroqServiceTier(tier: string | undefined | null): tier is GroqServiceTier { + return tier === null || tier === undefined || Object.hasOwn(GroqServiceTiers, tier) } export type ServiceTier = OpenAIServiceTier | GroqServiceTier -export function isServiceTier(tier: string): tier is ServiceTier { +export function isServiceTier(tier: string | null | undefined): tier is ServiceTier { return isGroqServiceTier(tier) || isOpenAIServiceTier(tier) } @@ -103,6 +108,7 @@ export type Provider = { // API options apiOptions?: ProviderApiOptions serviceTier?: ServiceTier + verbosity?: OpenAIVerbosity /** @deprecated */ isNotSupportArrayContent?: boolean @@ -119,6 +125,75 @@ export type Provider = { extra_headers?: Record } +export const SystemProviderIdSchema = z.enum([ + 'cherryin', + 'silicon', + 'aihubmix', + 'ocoolai', + 'deepseek', + 'ppio', + 'alayanew', + 'qiniu', + 'dmxapi', + 'burncloud', + 'tokenflux', + '302ai', + 'cephalon', + 'lanyun', + 'ph8', + 'openrouter', + 'ollama', + 'ovms', + 'new-api', + 'lmstudio', + 'anthropic', + 'openai', + 'azure-openai', + 'gemini', + 'vertexai', + 'github', + 'copilot', + 'zhipu', + 'yi', + 'moonshot', + 'baichuan', + 'dashscope', + 'stepfun', + 'doubao', + 'infini', + 'minimax', + 'groq', + 'together', + 'fireworks', + 'nvidia', + 'grok', + 'hyperbolic', + 'mistral', + 'jina', + 'perplexity', + 'modelscope', + 'xirang', + 'hunyuan', + 'tencent-cloud-ti', + 'baidu-cloud', + 'gpustack', + 'voyageai', + 'aws-bedrock', + 'poe', + 'aionly', + 'longcat', + 'huggingface', + 'sophnet', + 'ai-gateway', + 'cerebras' +]) + +export type SystemProviderId = z.infer + +export const isSystemProviderId = (id: string): id is SystemProviderId => { + return SystemProviderIdSchema.safeParse(id).success +} + export const SystemProviderIds = { cherryin: 'cherryin', silicon: 'silicon', @@ -180,13 +255,9 @@ export const SystemProviderIds = { huggingface: 'huggingface', 'ai-gateway': 'ai-gateway', cerebras: 'cerebras' -} as const +} as const satisfies Record -export type SystemProviderId = keyof typeof SystemProviderIds - -export const isSystemProviderId = (id: string): id is SystemProviderId => { - return Object.hasOwn(SystemProviderIds, id) -} +type SystemProviderIdTypeMap = typeof SystemProviderIds export type SystemProvider = Provider & { id: SystemProviderId @@ -216,3 +287,16 @@ export type AzureOpenAIProvider = Provider & { export const isSystemProvider = (provider: Provider): provider is SystemProvider => { return isSystemProviderId(provider.id) && !!provider.isSystem } + +export type GroqSystemProvider = Provider & { + id: SystemProviderIdTypeMap['groq'] + isSystem: true +} + +export type NotGroqProvider = Provider & { + id: Exclude +} + +export const isGroqSystemProvider = (provider: Provider): provider is GroqSystemProvider => { + return provider.id === SystemProviderIds.groq +} diff --git a/src/renderer/src/windows/mini/home/HomeWindow.tsx b/src/renderer/src/windows/mini/home/HomeWindow.tsx index bf01146015..a3da9d9a0b 100644 --- a/src/renderer/src/windows/mini/home/HomeWindow.tsx +++ b/src/renderer/src/windows/mini/home/HomeWindow.tsx @@ -283,7 +283,7 @@ const HomeWindow: FC<{ draggable?: boolean }> = ({ draggable = true }) => { await fetchChatCompletion({ messages: modelMessages, assistant: newAssistant, - options: {}, + requestOptions: {}, topicId, uiMessages: uiMessages, onChunkReceived: (chunk: Chunk) => { diff --git a/src/renderer/src/windows/selection/action/components/ActionUtils.ts b/src/renderer/src/windows/selection/action/components/ActionUtils.ts index 16537f0e81..12f3881fe2 100644 --- a/src/renderer/src/windows/selection/action/components/ActionUtils.ts +++ b/src/renderer/src/windows/selection/action/components/ActionUtils.ts @@ -70,7 +70,7 @@ export const processMessages = async ( await fetchChatCompletion({ messages: modelMessages, assistant: newAssistant, - options: {}, + requestOptions: {}, uiMessages: uiMessages, onChunkReceived: (chunk: Chunk) => { if (finished) {