fix(openai): apply verbosity setting with type safety improvements (#10964)

* refactor(types): consolidate OpenAI types and improve type safety

- Move OpenAI-related types to aiCoreTypes.ts
- Rename FetchChatCompletionOptions to FetchChatCompletionRequestOptions
- Add proper type definitions for service tiers and verbosity
- Improve type guards for service tier checks

* refactor(api): rename options parameter to requestOptions for consistency

Update parameter name across multiple files to use requestOptions instead of options for better clarity and consistency in API calls

* refactor(aiCore): simplify OpenAI summary text handling and improve type safety

- Remove 'off' option from OpenAISummaryText type and use null instead
- Add migration to convert 'off' values to null
- Add utility function to convert undefined to null
- Update Selector component to handle null/undefined values
- Improve type safety in provider options and reasoning params

* fix(i18n): Auto update translations for PR #10964

* feat(utils): add notNull function to convert null to undefined

* refactor(utils): move defined and notNull functions to shared package

Consolidate utility functions into shared package to improve code organization and reuse

* Revert "fix(i18n): Auto update translations for PR #10964"

This reverts commit 68bd7eaac5.

* feat(i18n): add "off" translation and remove "performance" tier

Add "off" translation for multiple languages and remove "performance" service tier option from translations

* Apply suggestion from @EurFelux

* docs(types): clarify handling of undefined and null values

Add comments to explain that undefined is treated as default and null as explicitly off in OpenAIVerbosity and OpenAIServiceTier types. Also update type safety for OpenAIServiceTiers record.

* fix(migration): update migration version from 167 to 171 for removed type

* chore: update store version to 172

* fix(migrate): update migration version number from 171 to 172

* fix(i18n): Auto update translations for PR #10964

* refactor(types): improve type safety for verbosity handling

add NotUndefined and NotNull utility types to better handle null/undefined cases
clarify verbosity types in aiCoreTypes and update related utility functions

* refactor(types): replace null with undefined for verbosity values

Standardize on undefined instead of null for verbosity values to align with OpenAI API docs and improve type consistency

* refactor(aiCore): update OpenAI provider options type import and usage

* fix(openai): change summaryText default from null to 'auto'

Update OpenAI settings to use 'auto' as default summaryText value instead of null for consistency with API behavior. Remove 'off' option and add 'concise' option while maintaining type safety.

* refactor(OpenAISettingsGroup): extract service tier options type for better maintainability

* refactor(types): make SystemProviderIdTypeMap internal type

* docs(provider): clarify OpenAIServiceTier behavior for undefined vs null

Explain that undefined and null values for serviceTier should be treated differently since they affect whether the field appears in the response

* refactor(utils): rename utility functions for clarity

Rename `defined` to `toNullIfUndefined` and `notNull` to `toUndefinedIfNull` to better reflect their functionality

* refactor(aiCore): extract service tier logic and improve type safety

Extract service tier validation logic into separate functions for better reusability
Add proper type annotations for provider options
Pass service tier parameter through provider option builders

* refactor(utils): comment out unused utility functions

Keep commented utility functions for potential future use while cleaning up current codebase

* fix(migration): update migration version number from 172 to 177

* docs(aiCoreTypes): clarify parameter passing behavior in OpenAI API

Update comments to consistently use 'undefined' instead of 'null' when describing parameter passing behavior in OpenAI API requests, as they share the same meaning in this context

---------

Co-authored-by: GitHub Action <action@github.com>
This commit is contained in:
Phantom 2025-11-22 21:41:12 +08:00 committed by GitHub
parent a1ac3207f1
commit 0a72c613af
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
23 changed files with 356 additions and 127 deletions

View File

@ -4,3 +4,34 @@ export const defaultAppHeaders = () => {
'X-Title': 'Cherry Studio'
}
}
// Following two function are not being used for now.
// I may use them in the future, so just keep them commented. - by eurfelux
/**
* Converts an `undefined` value to `null`, otherwise returns the value as-is.
* @param value - The value to check
* @returns `null` if the input is `undefined`; otherwise the input value
*/
// export function toNullIfUndefined<T>(value: T | undefined): T | null {
// if (value === undefined) {
// return null
// } else {
// return value
// }
// }
/**
* Converts a `null` value to `undefined`, otherwise returns the value as-is.
* @param value - The value to check
* @returns `undefined` if the input is `null`; otherwise the input value
*/
// export function toUndefinedIfNull<T>(value: T | null): T | undefined {
// if (value === null) {
// return undefined
// } else {
// return value
// }
// }

View File

@ -19,7 +19,6 @@ import type {
MCPToolResponse,
MemoryItem,
Model,
OpenAIVerbosity,
Provider,
ToolCallResponse,
WebSearchProviderResponse,
@ -33,6 +32,7 @@ import {
OpenAIServiceTiers,
SystemProviderIds
} from '@renderer/types'
import type { OpenAIVerbosity } from '@renderer/types/aiCoreTypes'
import type { Message } from '@renderer/types/newMessage'
import type {
RequestOptions,

View File

@ -61,7 +61,7 @@ export async function buildStreamTextParams(
timeout?: number
headers?: Record<string, string>
}
} = {}
}
): Promise<{
params: StreamTextParams
modelId: string

View File

@ -1,3 +1,7 @@
import type { AnthropicProviderOptions } from '@ai-sdk/anthropic'
import type { GoogleGenerativeAIProviderOptions } from '@ai-sdk/google'
import type { OpenAIResponsesProviderOptions } from '@ai-sdk/openai'
import type { XaiProviderOptions } from '@ai-sdk/xai'
import { baseProviderIdSchema, customProviderIdSchema } from '@cherrystudio/ai-core/provider'
import { loggerService } from '@logger'
import {
@ -9,15 +13,28 @@ import {
} from '@renderer/config/models'
import { isSupportServiceTierProvider } from '@renderer/config/providers'
import { mapLanguageToQwenMTModel } from '@renderer/config/translate'
import type { Assistant, Model, Provider } from '@renderer/types'
import { getStoreSetting } from '@renderer/hooks/useSettings'
import type { RootState } from '@renderer/store'
import type {
Assistant,
GroqServiceTier,
GroqSystemProvider,
Model,
NotGroqProvider,
OpenAIServiceTier,
Provider,
ServiceTier
} from '@renderer/types'
import {
GroqServiceTiers,
isGroqServiceTier,
isGroqSystemProvider,
isOpenAIServiceTier,
isTranslateAssistant,
OpenAIServiceTiers,
SystemProviderIds
OpenAIServiceTiers
} from '@renderer/types'
import type { OpenAIVerbosity } from '@renderer/types/aiCoreTypes'
import type { JSONValue } from 'ai'
import { t } from 'i18next'
import { getAiSdkProviderId } from '../provider/factory'
@ -35,8 +52,31 @@ import { getWebSearchParams } from './websearch'
const logger = loggerService.withContext('aiCore.utils.options')
// copy from BaseApiClient.ts
const getServiceTier = (model: Model, provider: Provider) => {
function toOpenAIServiceTier(model: Model, serviceTier: ServiceTier): OpenAIServiceTier {
if (
!isOpenAIServiceTier(serviceTier) ||
(serviceTier === OpenAIServiceTiers.flex && !isSupportFlexServiceTierModel(model))
) {
return undefined
} else {
return serviceTier
}
}
function toGroqServiceTier(model: Model, serviceTier: ServiceTier): GroqServiceTier {
if (
!isGroqServiceTier(serviceTier) ||
(serviceTier === GroqServiceTiers.flex && !isSupportFlexServiceTierModel(model))
) {
return undefined
} else {
return serviceTier
}
}
function getServiceTier<T extends GroqSystemProvider>(model: Model, provider: T): GroqServiceTier
function getServiceTier<T extends NotGroqProvider>(model: Model, provider: T): OpenAIServiceTier
function getServiceTier<T extends Provider>(model: Model, provider: T): OpenAIServiceTier | GroqServiceTier {
const serviceTierSetting = provider.serviceTier
if (!isSupportServiceTierProvider(provider) || !isOpenAIModel(model) || !serviceTierSetting) {
@ -44,24 +84,17 @@ const getServiceTier = (model: Model, provider: Provider) => {
}
// 处理不同供应商需要 fallback 到默认值的情况
if (provider.id === SystemProviderIds.groq) {
if (
!isGroqServiceTier(serviceTierSetting) ||
(serviceTierSetting === GroqServiceTiers.flex && !isSupportFlexServiceTierModel(model))
) {
return undefined
}
if (isGroqSystemProvider(provider)) {
return toGroqServiceTier(model, serviceTierSetting)
} else {
// 其他 OpenAI 供应商,假设他们的服务层级设置和 OpenAI 完全相同
if (
!isOpenAIServiceTier(serviceTierSetting) ||
(serviceTierSetting === OpenAIServiceTiers.flex && !isSupportFlexServiceTierModel(model))
) {
return undefined
}
return toOpenAIServiceTier(model, serviceTierSetting)
}
}
return serviceTierSetting
function getVerbosity(): OpenAIVerbosity {
const openAI = getStoreSetting('openAI')
return openAI.verbosity
}
/**
@ -78,13 +111,13 @@ export function buildProviderOptions(
enableWebSearch: boolean
enableGenerateImage: boolean
}
): Record<string, any> {
): Record<string, Record<string, JSONValue>> {
logger.debug('buildProviderOptions', { assistant, model, actualProvider, capabilities })
const rawProviderId = getAiSdkProviderId(actualProvider)
// 构建 provider 特定的选项
let providerSpecificOptions: Record<string, any> = {}
const serviceTierSetting = getServiceTier(model, actualProvider)
providerSpecificOptions.serviceTier = serviceTierSetting
const serviceTier = getServiceTier(model, actualProvider)
const textVerbosity = getVerbosity()
// 根据 provider 类型分离构建逻辑
const { data: baseProviderId, success } = baseProviderIdSchema.safeParse(rawProviderId)
if (success) {
@ -94,9 +127,14 @@ export function buildProviderOptions(
case 'openai-chat':
case 'azure':
case 'azure-responses':
providerSpecificOptions = {
...buildOpenAIProviderOptions(assistant, model, capabilities),
serviceTier: serviceTierSetting
{
const options: OpenAIResponsesProviderOptions = buildOpenAIProviderOptions(
assistant,
model,
capabilities,
serviceTier
)
providerSpecificOptions = options
}
break
case 'anthropic':
@ -116,12 +154,19 @@ export function buildProviderOptions(
// 对于其他 provider使用通用的构建逻辑
providerSpecificOptions = {
...buildGenericProviderOptions(assistant, model, capabilities),
serviceTier: serviceTierSetting
serviceTier,
textVerbosity
}
break
}
case 'cherryin':
providerSpecificOptions = buildCherryInProviderOptions(assistant, model, capabilities, actualProvider)
providerSpecificOptions = buildCherryInProviderOptions(
assistant,
model,
capabilities,
actualProvider,
serviceTier
)
break
default:
throw new Error(`Unsupported base provider ${baseProviderId}`)
@ -142,13 +187,14 @@ export function buildProviderOptions(
providerSpecificOptions = buildBedrockProviderOptions(assistant, model, capabilities)
break
case 'huggingface':
providerSpecificOptions = buildOpenAIProviderOptions(assistant, model, capabilities)
providerSpecificOptions = buildOpenAIProviderOptions(assistant, model, capabilities, serviceTier)
break
default:
// 对于其他 provider使用通用的构建逻辑
providerSpecificOptions = {
...buildGenericProviderOptions(assistant, model, capabilities),
serviceTier: serviceTierSetting
serviceTier,
textVerbosity
}
}
} else {
@ -189,10 +235,12 @@ function buildOpenAIProviderOptions(
enableReasoning: boolean
enableWebSearch: boolean
enableGenerateImage: boolean
}
): Record<string, any> {
},
serviceTier: OpenAIServiceTier
): OpenAIResponsesProviderOptions {
const { enableReasoning } = capabilities
let providerOptions: Record<string, any> = {}
// OpenAI 推理参数
if (enableReasoning) {
const reasoningParams = getOpenAIReasoningParams(assistant, model)
@ -203,7 +251,7 @@ function buildOpenAIProviderOptions(
}
if (isSupportVerbosityModel(model)) {
const state = window.store?.getState()
const state: RootState = window.store?.getState()
const userVerbosity = state?.settings?.openAI?.verbosity
if (userVerbosity && ['low', 'medium', 'high'].includes(userVerbosity)) {
@ -218,6 +266,11 @@ function buildOpenAIProviderOptions(
}
}
providerOptions = {
...providerOptions,
serviceTier
}
return providerOptions
}
@ -232,7 +285,7 @@ function buildAnthropicProviderOptions(
enableWebSearch: boolean
enableGenerateImage: boolean
}
): Record<string, any> {
): AnthropicProviderOptions {
const { enableReasoning } = capabilities
let providerOptions: Record<string, any> = {}
@ -259,7 +312,7 @@ function buildGeminiProviderOptions(
enableWebSearch: boolean
enableGenerateImage: boolean
}
): Record<string, any> {
): GoogleGenerativeAIProviderOptions {
const { enableReasoning, enableGenerateImage } = capabilities
let providerOptions: Record<string, any> = {}
@ -290,7 +343,7 @@ function buildXAIProviderOptions(
enableWebSearch: boolean
enableGenerateImage: boolean
}
): Record<string, any> {
): XaiProviderOptions {
const { enableReasoning } = capabilities
let providerOptions: Record<string, any> = {}
@ -313,16 +366,12 @@ function buildCherryInProviderOptions(
enableWebSearch: boolean
enableGenerateImage: boolean
},
actualProvider: Provider
): Record<string, any> {
const serviceTierSetting = getServiceTier(model, actualProvider)
actualProvider: Provider,
serviceTier: OpenAIServiceTier
): OpenAIResponsesProviderOptions | AnthropicProviderOptions | GoogleGenerativeAIProviderOptions {
switch (actualProvider.type) {
case 'openai':
return {
...buildOpenAIProviderOptions(assistant, model, capabilities),
serviceTier: serviceTierSetting
}
return buildOpenAIProviderOptions(assistant, model, capabilities, serviceTier)
case 'anthropic':
return buildAnthropicProviderOptions(assistant, model, capabilities)

View File

@ -1,6 +1,7 @@
import type { BedrockProviderOptions } from '@ai-sdk/amazon-bedrock'
import type { AnthropicProviderOptions } from '@ai-sdk/anthropic'
import type { GoogleGenerativeAIProviderOptions } from '@ai-sdk/google'
import type { OpenAIResponsesProviderOptions } from '@ai-sdk/openai'
import type { XaiProviderOptions } from '@ai-sdk/xai'
import { loggerService } from '@logger'
import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant'
@ -35,9 +36,9 @@ import {
import { isSupportEnableThinkingProvider } from '@renderer/config/providers'
import { getStoreSetting } from '@renderer/hooks/useSettings'
import { getAssistantSettings, getProviderByModel } from '@renderer/services/AssistantService'
import type { SettingsState } from '@renderer/store/settings'
import type { Assistant, Model } from '@renderer/types'
import { EFFORT_RATIO, isSystemProvider, SystemProviderIds } from '@renderer/types'
import type { OpenAISummaryText } from '@renderer/types/aiCoreTypes'
import type { ReasoningEffortOptionalParams } from '@renderer/types/sdk'
import { toInteger } from 'lodash'
@ -341,10 +342,14 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
}
/**
* OpenAI
* OpenAIResponseAPIClient OpenAIAPIClient
* Get OpenAI reasoning parameters
* Extracted from OpenAIResponseAPIClient and OpenAIAPIClient logic
* For official OpenAI provider only
*/
export function getOpenAIReasoningParams(assistant: Assistant, model: Model): Record<string, any> {
export function getOpenAIReasoningParams(
assistant: Assistant,
model: Model
): Pick<OpenAIResponsesProviderOptions, 'reasoningEffort' | 'reasoningSummary'> {
if (!isReasoningModel(model)) {
return {}
}
@ -355,6 +360,10 @@ export function getOpenAIReasoningParams(assistant: Assistant, model: Model): Re
return {}
}
if (isOpenAIDeepResearchModel(model) || reasoningEffort === 'auto') {
reasoningEffort = 'medium'
}
// 非OpenAI模型但是Provider类型是responses/azure openai的情况
if (!isOpenAIModel(model)) {
return {
@ -362,21 +371,17 @@ export function getOpenAIReasoningParams(assistant: Assistant, model: Model): Re
}
}
const openAI = getStoreSetting('openAI') as SettingsState['openAI']
const summaryText = openAI?.summaryText || 'off'
const openAI = getStoreSetting('openAI')
const summaryText = openAI.summaryText
let reasoningSummary: string | undefined = undefined
let reasoningSummary: OpenAISummaryText = undefined
if (summaryText === 'off' || model.id.includes('o1-pro')) {
if (model.id.includes('o1-pro')) {
reasoningSummary = undefined
} else {
reasoningSummary = summaryText
}
if (isOpenAIDeepResearchModel(model)) {
reasoningEffort = 'medium'
}
// OpenAI 推理参数
if (isSupportedReasoningEffortOpenAIModel(model)) {
return {

View File

@ -6,7 +6,7 @@ import { useEffect, useMemo, useRef, useState } from 'react'
import { useTranslation } from 'react-i18next'
import styled, { css } from 'styled-components'
interface SelectorOption<V = string | number> {
interface SelectorOption<V = string | number | undefined | null> {
label: string | ReactNode
value: V
type?: 'group'
@ -14,7 +14,7 @@ interface SelectorOption<V = string | number> {
disabled?: boolean
}
interface BaseSelectorProps<V = string | number> {
interface BaseSelectorProps<V = string | number | undefined | null> {
options: SelectorOption<V>[]
placeholder?: string
placement?: 'topLeft' | 'topCenter' | 'topRight' | 'bottomLeft' | 'bottomCenter' | 'bottomRight' | 'top' | 'bottom'
@ -39,7 +39,7 @@ interface MultipleSelectorProps<V> extends BaseSelectorProps<V> {
export type SelectorProps<V> = SingleSelectorProps<V> | MultipleSelectorProps<V>
const Selector = <V extends string | number>({
const Selector = <V extends string | number | undefined | null>({
options,
value,
onChange = () => {},

View File

@ -1,6 +1,7 @@
import type OpenAI from '@cherrystudio/openai'
import { isEmbeddingModel, isRerankModel } from '@renderer/config/models/embedding'
import type { Model } from '@renderer/types'
import type { OpenAIVerbosity, ValidOpenAIVerbosity } from '@renderer/types/aiCoreTypes'
import { getLowerBaseModelName } from '@renderer/utils'
import { WEB_SEARCH_PROMPT_FOR_OPENROUTER } from '../prompts'
@ -242,17 +243,20 @@ export const isGPT51SeriesModel = (model: Model) => {
// GPT-5 verbosity configuration
// gpt-5-pro only supports 'high', other GPT-5 models support all levels
export const MODEL_SUPPORTED_VERBOSITY: Record<string, ('low' | 'medium' | 'high')[]> = {
export const MODEL_SUPPORTED_VERBOSITY: Record<string, ValidOpenAIVerbosity[]> = {
'gpt-5-pro': ['high'],
default: ['low', 'medium', 'high']
}
} as const
export const getModelSupportedVerbosity = (model: Model): ('low' | 'medium' | 'high')[] => {
export const getModelSupportedVerbosity = (model: Model): OpenAIVerbosity[] => {
const modelId = getLowerBaseModelName(model.id)
let supportedValues: ValidOpenAIVerbosity[]
if (modelId.includes('gpt-5-pro')) {
return MODEL_SUPPORTED_VERBOSITY['gpt-5-pro']
supportedValues = MODEL_SUPPORTED_VERBOSITY['gpt-5-pro']
} else {
supportedValues = MODEL_SUPPORTED_VERBOSITY.default
}
return MODEL_SUPPORTED_VERBOSITY.default
return [undefined, ...supportedValues]
}
export const isGeminiModel = (model: Model) => {

View File

@ -1158,6 +1158,7 @@
"name": "Name",
"no_results": "No results",
"none": "None",
"off": "Off",
"open": "Open",
"paste": "Paste",
"placeholders": {
@ -4259,7 +4260,6 @@
"default": "default",
"flex": "flex",
"on_demand": "on demand",
"performance": "performance",
"priority": "priority",
"tip": "Specifies the latency tier to use for processing the request",
"title": "Service Tier"
@ -4278,7 +4278,7 @@
"low": "Low",
"medium": "Medium",
"tip": "Control the level of detail in the model's output",
"title": "Level of detail"
"title": "Verbosity"
}
},
"privacy": {

View File

@ -1158,6 +1158,7 @@
"name": "名称",
"no_results": "无结果",
"none": "无",
"off": "关闭",
"open": "打开",
"paste": "粘贴",
"placeholders": {
@ -4259,7 +4260,6 @@
"default": "默认",
"flex": "灵活",
"on_demand": "按需",
"performance": "性能",
"priority": "优先",
"tip": "指定用于处理请求的延迟层级",
"title": "服务层级"

View File

@ -1158,6 +1158,7 @@
"name": "名稱",
"no_results": "沒有結果",
"none": "無",
"off": "關閉",
"open": "開啟",
"paste": "貼上",
"placeholders": {
@ -4259,7 +4260,6 @@
"default": "預設",
"flex": "彈性",
"on_demand": "按需",
"performance": "效能",
"priority": "優先",
"tip": "指定用於處理請求的延遲層級",
"title": "服務層級"

View File

@ -1158,6 +1158,7 @@
"name": "Nome",
"no_results": "Nenhum resultado",
"none": "Nenhum",
"off": "Desligado",
"open": "Abrir",
"paste": "Colar",
"placeholders": {
@ -4223,7 +4224,6 @@
"default": "Padrão",
"flex": "Flexível",
"on_demand": "sob demanda",
"performance": "desempenho",
"priority": "prioridade",
"tip": "Especifique o nível de latência usado para processar a solicitação",
"title": "Nível de Serviço"

View File

@ -12,9 +12,9 @@ import { CollapsibleSettingGroup } from '@renderer/pages/settings/SettingGroup'
import type { RootState } from '@renderer/store'
import { useAppDispatch } from '@renderer/store'
import { setOpenAISummaryText, setOpenAIVerbosity } from '@renderer/store/settings'
import type { Model, OpenAIServiceTier, OpenAISummaryText, ServiceTier } from '@renderer/types'
import type { GroqServiceTier, Model, OpenAIServiceTier, ServiceTier } from '@renderer/types'
import { GroqServiceTiers, OpenAIServiceTiers, SystemProviderIds } from '@renderer/types'
import type { OpenAIVerbosity } from '@types'
import type { OpenAISummaryText, OpenAIVerbosity } from '@renderer/types/aiCoreTypes'
import { Tooltip } from 'antd'
import { CircleHelp } from 'lucide-react'
import type { FC } from 'react'
@ -22,6 +22,21 @@ import { useCallback, useEffect, useMemo } from 'react'
import { useTranslation } from 'react-i18next'
import { useSelector } from 'react-redux'
type VerbosityOption = {
value: OpenAIVerbosity
label: string
}
type SummaryTextOption = {
value: OpenAISummaryText
label: string
}
type OpenAIServiceTierOption = { value: OpenAIServiceTier; label: string }
type GroqServiceTierOption = { value: GroqServiceTier; label: string }
type ServiceTierOptions = OpenAIServiceTierOption[] | GroqServiceTierOption[]
interface Props {
model: Model
providerId: string
@ -67,6 +82,10 @@ const OpenAISettingsGroup: FC<Props> = ({ model, providerId, SettingGroup, Setti
)
const summaryTextOptions = [
{
value: undefined,
label: t('common.default')
},
{
value: 'auto',
label: t('settings.openai.summary_text_mode.auto')
@ -76,13 +95,17 @@ const OpenAISettingsGroup: FC<Props> = ({ model, providerId, SettingGroup, Setti
label: t('settings.openai.summary_text_mode.detailed')
},
{
value: 'off',
label: t('settings.openai.summary_text_mode.off')
value: 'concise',
label: t('settings.openai.summary_text_mode.concise')
}
]
] as const satisfies SummaryTextOption[]
const verbosityOptions = useMemo(() => {
const allOptions = [
{
value: undefined,
label: t('common.default')
},
{
value: 'low',
label: t('settings.openai.verbosity.low')
@ -95,15 +118,23 @@ const OpenAISettingsGroup: FC<Props> = ({ model, providerId, SettingGroup, Setti
value: 'high',
label: t('settings.openai.verbosity.high')
}
]
] as const satisfies VerbosityOption[]
const supportedVerbosityLevels = getModelSupportedVerbosity(model)
return allOptions.filter((option) => supportedVerbosityLevels.includes(option.value as any))
return allOptions.filter((option) => supportedVerbosityLevels.includes(option.value))
}, [model, t])
const serviceTierOptions = useMemo(() => {
let baseOptions: { value: ServiceTier; label: string }[]
let options: ServiceTierOptions
if (provider.id === SystemProviderIds.groq) {
baseOptions = [
options = [
{
value: null,
label: t('common.off')
},
{
value: undefined,
label: t('common.default')
},
{
value: 'auto',
label: t('settings.openai.service_tier.auto')
@ -115,15 +146,11 @@ const OpenAISettingsGroup: FC<Props> = ({ model, providerId, SettingGroup, Setti
{
value: 'flex',
label: t('settings.openai.service_tier.flex')
},
{
value: 'performance',
label: t('settings.openai.service_tier.performance')
}
]
] as const satisfies GroqServiceTierOption[]
} else {
// 其他情况默认是和 OpenAI 相同
baseOptions = [
options = [
{
value: 'auto',
label: t('settings.openai.service_tier.auto')
@ -140,9 +167,9 @@ const OpenAISettingsGroup: FC<Props> = ({ model, providerId, SettingGroup, Setti
value: 'priority',
label: t('settings.openai.service_tier.priority')
}
]
] as const satisfies OpenAIServiceTierOption[]
}
return baseOptions.filter((option) => {
return options.filter((option) => {
if (option.value === 'flex') {
return isSupportedFlexServiceTier
}

View File

@ -83,7 +83,7 @@ export async function fetchChatCompletion({
messages,
prompt,
assistant,
options,
requestOptions,
onChunkReceived,
topicId,
uiMessages
@ -124,7 +124,7 @@ export async function fetchChatCompletion({
} = await buildStreamTextParams(messages, assistant, provider, {
mcpTools: mcpTools,
webSearchProviderId: assistant.webSearchProviderId,
requestOptions: options
requestOptions
})
// Safely fallback to prompt tool use when function calling is not supported by model.

View File

@ -48,7 +48,7 @@ export class OrchestrationService {
await fetchChatCompletion({
messages: modelMessages,
assistant: assistant,
options: request.options,
requestOptions: request.options,
onChunkReceived,
topicId: request.topicId,
uiMessages: uiMessages
@ -80,7 +80,7 @@ export async function transformMessagesAndFetch(
await fetchChatCompletion({
messages: modelMessages,
assistant: assistant,
options: request.options,
requestOptions: request.options,
onChunkReceived,
topicId: request.topicId,
uiMessages

View File

@ -2,7 +2,7 @@ import { loggerService } from '@logger'
import { db } from '@renderer/databases'
import type {
CustomTranslateLanguage,
FetchChatCompletionOptions,
FetchChatCompletionRequestOptions,
TranslateHistory,
TranslateLanguage,
TranslateLanguageCode
@ -56,15 +56,15 @@ export const translateText = async (
onResponse?.(translatedText, completed)
}
const options = {
const requestOptions = {
signal
} satisfies FetchChatCompletionOptions
} satisfies FetchChatCompletionRequestOptions
try {
await fetchChatCompletion({
prompt: assistant.content,
assistant,
options,
requestOptions,
onChunkReceived: onChunk
})
} catch (e) {

View File

@ -67,7 +67,7 @@ const persistedReducer = persistReducer(
{
key: 'cherry-studio',
storage,
version: 176,
version: 177,
blacklist: ['runtime', 'messages', 'messageBlocks', 'tabs', 'toolPermissions'],
migrate
},

View File

@ -1499,6 +1499,7 @@ const migrateConfig = {
'102': (state: RootState) => {
try {
state.settings.openAI = {
// @ts-expect-error it's a removed type. migrated on 177
summaryText: 'off',
serviceTier: 'auto',
verbosity: 'medium'
@ -1592,6 +1593,7 @@ const migrateConfig = {
addMiniApp(state, 'google')
if (!state.settings.openAI) {
state.settings.openAI = {
// @ts-expect-error it's a removed type. migrated on 177
summaryText: 'off',
serviceTier: 'auto',
verbosity: 'medium'
@ -2856,6 +2858,19 @@ const migrateConfig = {
logger.error('migrate 176 error', error as Error)
return state
}
},
'177': (state: RootState) => {
try {
// @ts-expect-error it's a removed type
if (state.settings.openAI.summaryText === 'off') {
state.settings.openAI.summaryText = 'auto'
}
logger.info('migrate 177 success')
return state
} catch (error) {
logger.error('migrate 177 error', error as Error)
return state
}
}
}

View File

@ -10,16 +10,15 @@ import type {
LanguageVarious,
MathEngine,
OpenAIServiceTier,
OpenAISummaryText,
PaintingProvider,
S3Config,
SidebarIcon,
TranslateLanguageCode
} from '@renderer/types'
import { ThemeMode } from '@renderer/types'
import type { OpenAISummaryText, OpenAIVerbosity } from '@renderer/types/aiCoreTypes'
import { uuid } from '@renderer/utils'
import { UpgradeChannel } from '@shared/config/constant'
import type { OpenAIVerbosity } from '@types'
import type { RemoteSyncState } from './backup'
@ -375,7 +374,7 @@ export const initialState: SettingsState = {
},
// OpenAI
openAI: {
summaryText: 'off',
summaryText: 'auto',
serviceTier: 'auto',
verbosity: 'medium'
},

View File

@ -1,3 +1,5 @@
import type OpenAI from '@cherrystudio/openai'
import type { NotNull, NotUndefined } from '@types'
import type { ImageModel, LanguageModel } from 'ai'
import type { generateObject, generateText, ModelMessage, streamObject, streamText } from 'ai'
@ -27,3 +29,16 @@ export type StreamObjectParams = Omit<Parameters<typeof streamObject>[0], 'model
export type GenerateObjectParams = Omit<Parameters<typeof generateObject>[0], 'model'>
export type AiSdkModel = LanguageModel | ImageModel
// The original type unite both undefined and null.
// I pick undefined as the unique falsy type since they seem like share the same meaning according to OpenAI API docs.
// Parameter would not be passed into request if it's undefined.
export type OpenAIVerbosity = NotNull<OpenAI.Responses.ResponseTextConfig['verbosity']>
export type ValidOpenAIVerbosity = NotUndefined<OpenAIVerbosity>
export type OpenAIReasoningEffort = OpenAI.ReasoningEffort
// The original type unite both undefined and null.
// I pick undefined as the unique falsy type since they seem like share the same meaning according to OpenAI API docs.
// Parameter would not be passed into request if it's undefined.
export type OpenAISummaryText = NotNull<OpenAI.Reasoning['summary']>

View File

@ -871,10 +871,6 @@ export interface StoreSyncAction {
}
}
export type OpenAIVerbosity = 'high' | 'medium' | 'low'
export type OpenAISummaryText = 'auto' | 'concise' | 'detailed' | 'off'
export type S3Config = {
endpoint: string
region: string
@ -1091,7 +1087,7 @@ export const isHexColor = (value: string): value is HexColor => {
return /^#([0-9A-F]{3}){1,2}$/i.test(value)
}
export type FetchChatCompletionOptions = {
export type FetchChatCompletionRequestOptions = {
signal?: AbortSignal
timeout?: number
headers?: Record<string, string>
@ -1099,7 +1095,7 @@ export type FetchChatCompletionOptions = {
type BaseParams = {
assistant: Assistant
options?: FetchChatCompletionOptions
requestOptions?: FetchChatCompletionRequestOptions
onChunkReceived: (chunk: Chunk) => void
topicId?: string // 添加 topicId 参数
uiMessages?: Message[]
@ -1119,3 +1115,7 @@ type PromptParams = BaseParams & {
}
export type FetchChatCompletionParams = MessagesParams | PromptParams
// More specific than NonNullable
export type NotUndefined<T> = Exclude<T, undefined>
export type NotNull<T> = Exclude<T, null>

View File

@ -1,6 +1,9 @@
import type OpenAI from '@cherrystudio/openai'
import type { Model } from '@types'
import * as z from 'zod'
import type { OpenAIVerbosity } from './aiCoreTypes'
export const ProviderTypeSchema = z.enum([
'openai',
'openai-response',
@ -41,36 +44,38 @@ export type ProviderApiOptions = {
isNotSupportAPIVersion?: boolean
}
// scale is not well supported now. It even lacks of docs
// We take undefined as same as default, and null as same as explicitly off.
// It controls whether the response contains the serviceTier field or not, so undefined and null should be separated.
export type OpenAIServiceTier = Exclude<OpenAI.Responses.ResponseCreateParams['service_tier'], 'scale'>
export const OpenAIServiceTiers = {
auto: 'auto',
default: 'default',
flex: 'flex',
priority: 'priority'
} as const
} as const satisfies Record<NonNullable<OpenAIServiceTier>, OpenAIServiceTier>
export type OpenAIServiceTier = keyof typeof OpenAIServiceTiers
export function isOpenAIServiceTier(tier: string): tier is OpenAIServiceTier {
return Object.hasOwn(OpenAIServiceTiers, tier)
export function isOpenAIServiceTier(tier: string | null | undefined): tier is OpenAIServiceTier {
return tier === null || tier === undefined || Object.hasOwn(OpenAIServiceTiers, tier)
}
// https://console.groq.com/docs/api-reference#responses
export type GroqServiceTier = 'auto' | 'on_demand' | 'flex' | undefined | null
export const GroqServiceTiers = {
auto: 'auto',
on_demand: 'on_demand',
flex: 'flex',
performance: 'performance'
} as const
flex: 'flex'
} as const satisfies Record<string, GroqServiceTier>
// 从 GroqServiceTiers 对象中提取类型
export type GroqServiceTier = keyof typeof GroqServiceTiers
export function isGroqServiceTier(tier: string): tier is GroqServiceTier {
return Object.hasOwn(GroqServiceTiers, tier)
export function isGroqServiceTier(tier: string | undefined | null): tier is GroqServiceTier {
return tier === null || tier === undefined || Object.hasOwn(GroqServiceTiers, tier)
}
export type ServiceTier = OpenAIServiceTier | GroqServiceTier
export function isServiceTier(tier: string): tier is ServiceTier {
export function isServiceTier(tier: string | null | undefined): tier is ServiceTier {
return isGroqServiceTier(tier) || isOpenAIServiceTier(tier)
}
@ -103,6 +108,7 @@ export type Provider = {
// API options
apiOptions?: ProviderApiOptions
serviceTier?: ServiceTier
verbosity?: OpenAIVerbosity
/** @deprecated */
isNotSupportArrayContent?: boolean
@ -119,6 +125,75 @@ export type Provider = {
extra_headers?: Record<string, string>
}
export const SystemProviderIdSchema = z.enum([
'cherryin',
'silicon',
'aihubmix',
'ocoolai',
'deepseek',
'ppio',
'alayanew',
'qiniu',
'dmxapi',
'burncloud',
'tokenflux',
'302ai',
'cephalon',
'lanyun',
'ph8',
'openrouter',
'ollama',
'ovms',
'new-api',
'lmstudio',
'anthropic',
'openai',
'azure-openai',
'gemini',
'vertexai',
'github',
'copilot',
'zhipu',
'yi',
'moonshot',
'baichuan',
'dashscope',
'stepfun',
'doubao',
'infini',
'minimax',
'groq',
'together',
'fireworks',
'nvidia',
'grok',
'hyperbolic',
'mistral',
'jina',
'perplexity',
'modelscope',
'xirang',
'hunyuan',
'tencent-cloud-ti',
'baidu-cloud',
'gpustack',
'voyageai',
'aws-bedrock',
'poe',
'aionly',
'longcat',
'huggingface',
'sophnet',
'ai-gateway',
'cerebras'
])
export type SystemProviderId = z.infer<typeof SystemProviderIdSchema>
export const isSystemProviderId = (id: string): id is SystemProviderId => {
return SystemProviderIdSchema.safeParse(id).success
}
export const SystemProviderIds = {
cherryin: 'cherryin',
silicon: 'silicon',
@ -180,13 +255,9 @@ export const SystemProviderIds = {
huggingface: 'huggingface',
'ai-gateway': 'ai-gateway',
cerebras: 'cerebras'
} as const
} as const satisfies Record<SystemProviderId, SystemProviderId>
export type SystemProviderId = keyof typeof SystemProviderIds
export const isSystemProviderId = (id: string): id is SystemProviderId => {
return Object.hasOwn(SystemProviderIds, id)
}
type SystemProviderIdTypeMap = typeof SystemProviderIds
export type SystemProvider = Provider & {
id: SystemProviderId
@ -216,3 +287,16 @@ export type AzureOpenAIProvider = Provider & {
export const isSystemProvider = (provider: Provider): provider is SystemProvider => {
return isSystemProviderId(provider.id) && !!provider.isSystem
}
export type GroqSystemProvider = Provider & {
id: SystemProviderIdTypeMap['groq']
isSystem: true
}
export type NotGroqProvider = Provider & {
id: Exclude<string, SystemProviderIdTypeMap['groq']>
}
export const isGroqSystemProvider = (provider: Provider): provider is GroqSystemProvider => {
return provider.id === SystemProviderIds.groq
}

View File

@ -283,7 +283,7 @@ const HomeWindow: FC<{ draggable?: boolean }> = ({ draggable = true }) => {
await fetchChatCompletion({
messages: modelMessages,
assistant: newAssistant,
options: {},
requestOptions: {},
topicId,
uiMessages: uiMessages,
onChunkReceived: (chunk: Chunk) => {

View File

@ -70,7 +70,7 @@ export const processMessages = async (
await fetchChatCompletion({
messages: modelMessages,
assistant: newAssistant,
options: {},
requestOptions: {},
uiMessages: uiMessages,
onChunkReceived: (chunk: Chunk) => {
if (finished) {