diff --git a/src/renderer/src/aiCore/legacy/clients/BaseApiClient.ts b/src/renderer/src/aiCore/legacy/clients/BaseApiClient.ts index e755ce3f2..92f24b4ab 100644 --- a/src/renderer/src/aiCore/legacy/clients/BaseApiClient.ts +++ b/src/renderer/src/aiCore/legacy/clients/BaseApiClient.ts @@ -9,6 +9,7 @@ import { import { REFERENCE_PROMPT } from '@renderer/config/prompts' import { getLMStudioKeepAliveTime } from '@renderer/hooks/useLMStudio' import { getAssistantSettings } from '@renderer/services/AssistantService' +import type { RootState } from '@renderer/store' import type { Assistant, GenerateImageParams, @@ -245,23 +246,20 @@ export abstract class BaseApiClient< protected getVerbosity(model?: Model): OpenAIVerbosity { try { - const state = window.store?.getState() + const state = window.store?.getState() as RootState const verbosity = state?.settings?.openAI?.verbosity - if (verbosity && ['low', 'medium', 'high'].includes(verbosity)) { - // If model is provided, check if the verbosity is supported by the model - if (model) { - const supportedVerbosity = getModelSupportedVerbosity(model) - // Use user's verbosity if supported, otherwise use the first supported option - return supportedVerbosity.includes(verbosity) ? verbosity : supportedVerbosity[0] - } - return verbosity + // If model is provided, check if the verbosity is supported by the model + if (model) { + const supportedVerbosity = getModelSupportedVerbosity(model) + // Use user's verbosity if supported, otherwise use the first supported option + return supportedVerbosity.includes(verbosity) ? verbosity : supportedVerbosity[0] } + return verbosity } catch (error) { - logger.warn('Failed to get verbosity from state:', error as Error) + logger.warn('Failed to get verbosity from state. Fallback to undefined.', error as Error) + return undefined } - - return 'medium' } protected getTimeout(model: Model) { diff --git a/src/renderer/src/aiCore/legacy/clients/openai/OpenAIApiClient.ts b/src/renderer/src/aiCore/legacy/clients/openai/OpenAIApiClient.ts index ea50680ea..cfc908754 100644 --- a/src/renderer/src/aiCore/legacy/clients/openai/OpenAIApiClient.ts +++ b/src/renderer/src/aiCore/legacy/clients/openai/OpenAIApiClient.ts @@ -32,7 +32,6 @@ import { isSupportedThinkingTokenModel, isSupportedThinkingTokenQwenModel, isSupportedThinkingTokenZhipuModel, - isSupportVerbosityModel, isVisionModel, MODEL_SUPPORTED_REASONING_EFFORT, ZHIPU_RESULT_TOKENS @@ -714,13 +713,8 @@ export class OpenAIAPIClient extends OpenAIBaseClient< ...modalities, // groq 有不同的 service tier 配置,不符合 openai 接口类型 service_tier: this.getServiceTier(model) as OpenAIServiceTier, - ...(isSupportVerbosityModel(model) - ? { - text: { - verbosity: this.getVerbosity(model) - } - } - : {}), + // verbosity. getVerbosity ensures the returned value is valid. + verbosity: this.getVerbosity(model), ...this.getProviderSpecificParameters(assistant, model), ...reasoningEffort, // ...getOpenAIWebSearchParams(model, enableWebSearch), diff --git a/src/renderer/src/config/models/__tests__/utils.test.ts b/src/renderer/src/config/models/__tests__/utils.test.ts index a163061ea..ae4e33875 100644 --- a/src/renderer/src/config/models/__tests__/utils.test.ts +++ b/src/renderer/src/config/models/__tests__/utils.test.ts @@ -222,18 +222,22 @@ describe('model utils', () => { describe('getModelSupportedVerbosity', () => { it('returns only "high" for GPT-5 Pro models', () => { - expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro' }))).toEqual([undefined, 'high']) - expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro-2025-10-06' }))).toEqual([undefined, 'high']) + expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro' }))).toEqual([undefined, null, 'high']) + expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro-2025-10-06' }))).toEqual([ + undefined, + null, + 'high' + ]) }) it('returns all levels for non-Pro GPT-5 models', () => { const previewModel = createModel({ id: 'gpt-5-preview' }) - expect(getModelSupportedVerbosity(previewModel)).toEqual([undefined, 'low', 'medium', 'high']) + expect(getModelSupportedVerbosity(previewModel)).toEqual([undefined, null, 'low', 'medium', 'high']) }) it('returns all levels for GPT-5.1 models', () => { const gpt51Model = createModel({ id: 'gpt-5.1-preview' }) - expect(getModelSupportedVerbosity(gpt51Model)).toEqual([undefined, 'low', 'medium', 'high']) + expect(getModelSupportedVerbosity(gpt51Model)).toEqual([undefined, null, 'low', 'medium', 'high']) }) it('returns only undefined for non-GPT-5 models', () => { diff --git a/src/renderer/src/config/models/utils.ts b/src/renderer/src/config/models/utils.ts index 25e802b25..accf85e2c 100644 --- a/src/renderer/src/config/models/utils.ts +++ b/src/renderer/src/config/models/utils.ts @@ -10,7 +10,8 @@ import { isGPT51SeriesModel, isOpenAIChatCompletionOnlyModel, isOpenAIOpenWeightModel, - isOpenAIReasoningModel + isOpenAIReasoningModel, + isSupportVerbosityModel } from './openai' import { isQwenMTModel } from './qwen' import { isGenerateImageModel, isTextToImageModel, isVisionModel } from './vision' @@ -154,10 +155,10 @@ const MODEL_SUPPORTED_VERBOSITY: readonly { * For GPT-5-pro, only 'high' is supported; for other GPT-5 models, 'low', 'medium', and 'high' are supported. * For GPT-5.1 series models, 'low', 'medium', and 'high' are supported. * @param model - The model to check - * @returns An array of supported verbosity levels, always including `undefined` as the first element + * @returns An array of supported verbosity levels, always including `undefined` as the first element and `null` when applicable */ export const getModelSupportedVerbosity = (model: Model | undefined | null): OpenAIVerbosity[] => { - if (!model) { + if (!model || !isSupportVerbosityModel(model)) { return [undefined] } @@ -165,7 +166,7 @@ export const getModelSupportedVerbosity = (model: Model | undefined | null): Ope for (const { validator, values } of MODEL_SUPPORTED_VERBOSITY) { if (validator(model)) { - supportedValues = [...values] + supportedValues = [null, ...values] break } } diff --git a/src/renderer/src/pages/home/Tabs/components/OpenAISettingsGroup.tsx b/src/renderer/src/pages/home/Tabs/components/OpenAISettingsGroup.tsx index bf3e1970d..35c943e21 100644 --- a/src/renderer/src/pages/home/Tabs/components/OpenAISettingsGroup.tsx +++ b/src/renderer/src/pages/home/Tabs/components/OpenAISettingsGroup.tsx @@ -24,12 +24,12 @@ import { useTranslation } from 'react-i18next' import { useSelector } from 'react-redux' type VerbosityOption = { - value: NonNullable | 'undefined' + value: NonNullable | 'undefined' | 'null' label: string } type SummaryTextOption = { - value: NonNullable | 'undefined' + value: NonNullable | 'undefined' | 'null' label: string } @@ -85,6 +85,10 @@ const OpenAISettingsGroup: FC = ({ model, providerId, SettingGroup, Setti value: 'undefined', label: t('common.ignore') }, + { + value: 'null', + label: t('common.off') + }, { value: 'auto', label: t('settings.openai.summary_text_mode.auto') @@ -105,6 +109,10 @@ const OpenAISettingsGroup: FC = ({ model, providerId, SettingGroup, Setti value: 'undefined', label: t('common.ignore') }, + { + value: 'null', + label: t('common.off') + }, { value: 'low', label: t('settings.openai.verbosity.low') @@ -203,9 +211,9 @@ const OpenAISettingsGroup: FC = ({ model, providerId, SettingGroup, Setti { - setSummaryText(value as OpenAISummaryText) + setSummaryText(toRealValue(value)) }} options={summaryTextOptions} /> @@ -222,9 +230,9 @@ const OpenAISettingsGroup: FC = ({ model, providerId, SettingGroup, Setti { - setVerbosity(value as OpenAIVerbosity) + setVerbosity(toRealValue(value)) }} options={verbosityOptions} /> diff --git a/src/renderer/src/store/migrate.ts b/src/renderer/src/store/migrate.ts index 4b2e4cef8..b2993baf5 100644 --- a/src/renderer/src/store/migrate.ts +++ b/src/renderer/src/store/migrate.ts @@ -2906,6 +2906,23 @@ const migrateConfig = { logger.error('migrate 179 error', error as Error) return state } + }, + '180': (state: RootState) => { + try { + // @ts-expect-error + if (state.settings.openAI.summaryText === 'undefined') { + state.settings.openAI.summaryText = undefined + } + // @ts-expect-error + if (state.settings.openAI.verbosity === 'undefined') { + state.settings.openAI.verbosity = undefined + } + logger.info('migrate 180 success') + return state + } catch (error) { + logger.error('migrate 180 error', error as Error) + return state + } } } diff --git a/src/renderer/src/types/aiCoreTypes.ts b/src/renderer/src/types/aiCoreTypes.ts index 2e4c09348..6281905cb 100644 --- a/src/renderer/src/types/aiCoreTypes.ts +++ b/src/renderer/src/types/aiCoreTypes.ts @@ -1,5 +1,5 @@ import type OpenAI from '@cherrystudio/openai' -import type { NotNull, NotUndefined } from '@types' +import type { NotUndefined } from '@types' import type { ImageModel, LanguageModel } from 'ai' import type { generateObject, generateText, ModelMessage, streamObject, streamText } from 'ai' import * as z from 'zod' @@ -31,18 +31,26 @@ export type GenerateObjectParams = Omit[0], 'm export type AiSdkModel = LanguageModel | ImageModel -// The original type unite both undefined and null. -// I pick undefined as the unique falsy type since they seem like share the same meaning according to OpenAI API docs. -// Parameter would not be passed into request if it's undefined. -export type OpenAIVerbosity = NotNull +/** + * Constrains the verbosity of the model's response. Lower values will result in more concise responses, while higher values will result in more verbose responses. + * + * The original type unites both undefined and null. + * When undefined, the parameter is omitted from the request. + * When null, verbosity is explicitly disabled. + */ +export type OpenAIVerbosity = OpenAI.Responses.ResponseTextConfig['verbosity'] export type ValidOpenAIVerbosity = NotUndefined export type OpenAIReasoningEffort = OpenAI.ReasoningEffort -// The original type unite both undefined and null. -// I pick undefined as the unique falsy type since they seem like share the same meaning according to OpenAI API docs. -// Parameter would not be passed into request if it's undefined. -export type OpenAISummaryText = NotNull +/** + * A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. + * + * The original type unites both undefined and null. + * When undefined, the parameter is omitted from the request. + * When null, verbosity is explicitly disabled. + */ +export type OpenAISummaryText = OpenAI.Reasoning['summary'] const AiSdkParamsSchema = z.enum([ 'maxOutputTokens', diff --git a/src/renderer/src/types/sdk.ts b/src/renderer/src/types/sdk.ts index bb76c8a63..2b51bee3f 100644 --- a/src/renderer/src/types/sdk.ts +++ b/src/renderer/src/types/sdk.ts @@ -128,10 +128,6 @@ export type OpenAIExtraBody = { source_lang: 'auto' target_lang: string } - // for gpt-5 series models verbosity control - text?: { - verbosity?: 'low' | 'medium' | 'high' - } } // image is for openrouter. audio is ignored for now export type OpenAIModality = OpenAI.ChatCompletionModality | 'image'