From 6696bcacb8bf9df724d8f4394332f89892287c2f Mon Sep 17 00:00:00 2001 From: Phantom Date: Wed, 3 Dec 2025 18:20:55 +0800 Subject: [PATCH] fix(settings): fix wrong type caused by as assertion in OpenAI settings (#11631) * fix(settings): fix wrong type caused by as assertion and migration Add migration step 180 to properly handle 'undefined' string values in OpenAI settings Update selector components to use value conversion helpers for summaryText and verbosity * feat(models): add null as supported verbosity level for OpenAI models Update model utils and types to include null as a valid verbosity level option alongside undefined. This provides more flexibility in controlling verbosity behavior, with null representing an explicit "off" state. Tests and UI components are updated to reflect this change. * fix(verbosity): fix wrong verbosity type definition and handling in #11281 * style: format * fix(store): correct verbosity check in migration config The condition was incorrectly checking for 'undefined' string instead of undefined value, and was assigning to summaryText instead of verbosity. This fixes the migration logic to properly handle the verbosity setting. * docs(aiCore): improve comments for verbosity and summary types Update type comments to better explain the behavior of verbosity and summary parameters in OpenAI API requests --- .../aiCore/legacy/clients/BaseApiClient.ts | 22 +++++++--------- .../legacy/clients/openai/OpenAIApiClient.ts | 10 ++----- .../src/config/models/__tests__/utils.test.ts | 12 ++++++--- src/renderer/src/config/models/utils.ts | 9 ++++--- .../Tabs/components/OpenAISettingsGroup.tsx | 20 +++++++++----- src/renderer/src/store/migrate.ts | 17 ++++++++++++ src/renderer/src/types/aiCoreTypes.ts | 26 ++++++++++++------- src/renderer/src/types/sdk.ts | 4 --- 8 files changed, 73 insertions(+), 47 deletions(-) diff --git a/src/renderer/src/aiCore/legacy/clients/BaseApiClient.ts b/src/renderer/src/aiCore/legacy/clients/BaseApiClient.ts index e755ce3f20..92f24b4abe 100644 --- a/src/renderer/src/aiCore/legacy/clients/BaseApiClient.ts +++ b/src/renderer/src/aiCore/legacy/clients/BaseApiClient.ts @@ -9,6 +9,7 @@ import { import { REFERENCE_PROMPT } from '@renderer/config/prompts' import { getLMStudioKeepAliveTime } from '@renderer/hooks/useLMStudio' import { getAssistantSettings } from '@renderer/services/AssistantService' +import type { RootState } from '@renderer/store' import type { Assistant, GenerateImageParams, @@ -245,23 +246,20 @@ export abstract class BaseApiClient< protected getVerbosity(model?: Model): OpenAIVerbosity { try { - const state = window.store?.getState() + const state = window.store?.getState() as RootState const verbosity = state?.settings?.openAI?.verbosity - if (verbosity && ['low', 'medium', 'high'].includes(verbosity)) { - // If model is provided, check if the verbosity is supported by the model - if (model) { - const supportedVerbosity = getModelSupportedVerbosity(model) - // Use user's verbosity if supported, otherwise use the first supported option - return supportedVerbosity.includes(verbosity) ? verbosity : supportedVerbosity[0] - } - return verbosity + // If model is provided, check if the verbosity is supported by the model + if (model) { + const supportedVerbosity = getModelSupportedVerbosity(model) + // Use user's verbosity if supported, otherwise use the first supported option + return supportedVerbosity.includes(verbosity) ? verbosity : supportedVerbosity[0] } + return verbosity } catch (error) { - logger.warn('Failed to get verbosity from state:', error as Error) + logger.warn('Failed to get verbosity from state. Fallback to undefined.', error as Error) + return undefined } - - return 'medium' } protected getTimeout(model: Model) { diff --git a/src/renderer/src/aiCore/legacy/clients/openai/OpenAIApiClient.ts b/src/renderer/src/aiCore/legacy/clients/openai/OpenAIApiClient.ts index ea50680ea4..cfc9087545 100644 --- a/src/renderer/src/aiCore/legacy/clients/openai/OpenAIApiClient.ts +++ b/src/renderer/src/aiCore/legacy/clients/openai/OpenAIApiClient.ts @@ -32,7 +32,6 @@ import { isSupportedThinkingTokenModel, isSupportedThinkingTokenQwenModel, isSupportedThinkingTokenZhipuModel, - isSupportVerbosityModel, isVisionModel, MODEL_SUPPORTED_REASONING_EFFORT, ZHIPU_RESULT_TOKENS @@ -714,13 +713,8 @@ export class OpenAIAPIClient extends OpenAIBaseClient< ...modalities, // groq 有不同的 service tier 配置,不符合 openai 接口类型 service_tier: this.getServiceTier(model) as OpenAIServiceTier, - ...(isSupportVerbosityModel(model) - ? { - text: { - verbosity: this.getVerbosity(model) - } - } - : {}), + // verbosity. getVerbosity ensures the returned value is valid. + verbosity: this.getVerbosity(model), ...this.getProviderSpecificParameters(assistant, model), ...reasoningEffort, // ...getOpenAIWebSearchParams(model, enableWebSearch), diff --git a/src/renderer/src/config/models/__tests__/utils.test.ts b/src/renderer/src/config/models/__tests__/utils.test.ts index a163061ea1..ae4e33875c 100644 --- a/src/renderer/src/config/models/__tests__/utils.test.ts +++ b/src/renderer/src/config/models/__tests__/utils.test.ts @@ -222,18 +222,22 @@ describe('model utils', () => { describe('getModelSupportedVerbosity', () => { it('returns only "high" for GPT-5 Pro models', () => { - expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro' }))).toEqual([undefined, 'high']) - expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro-2025-10-06' }))).toEqual([undefined, 'high']) + expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro' }))).toEqual([undefined, null, 'high']) + expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro-2025-10-06' }))).toEqual([ + undefined, + null, + 'high' + ]) }) it('returns all levels for non-Pro GPT-5 models', () => { const previewModel = createModel({ id: 'gpt-5-preview' }) - expect(getModelSupportedVerbosity(previewModel)).toEqual([undefined, 'low', 'medium', 'high']) + expect(getModelSupportedVerbosity(previewModel)).toEqual([undefined, null, 'low', 'medium', 'high']) }) it('returns all levels for GPT-5.1 models', () => { const gpt51Model = createModel({ id: 'gpt-5.1-preview' }) - expect(getModelSupportedVerbosity(gpt51Model)).toEqual([undefined, 'low', 'medium', 'high']) + expect(getModelSupportedVerbosity(gpt51Model)).toEqual([undefined, null, 'low', 'medium', 'high']) }) it('returns only undefined for non-GPT-5 models', () => { diff --git a/src/renderer/src/config/models/utils.ts b/src/renderer/src/config/models/utils.ts index 25e802b257..accf85e2cd 100644 --- a/src/renderer/src/config/models/utils.ts +++ b/src/renderer/src/config/models/utils.ts @@ -10,7 +10,8 @@ import { isGPT51SeriesModel, isOpenAIChatCompletionOnlyModel, isOpenAIOpenWeightModel, - isOpenAIReasoningModel + isOpenAIReasoningModel, + isSupportVerbosityModel } from './openai' import { isQwenMTModel } from './qwen' import { isGenerateImageModel, isTextToImageModel, isVisionModel } from './vision' @@ -154,10 +155,10 @@ const MODEL_SUPPORTED_VERBOSITY: readonly { * For GPT-5-pro, only 'high' is supported; for other GPT-5 models, 'low', 'medium', and 'high' are supported. * For GPT-5.1 series models, 'low', 'medium', and 'high' are supported. * @param model - The model to check - * @returns An array of supported verbosity levels, always including `undefined` as the first element + * @returns An array of supported verbosity levels, always including `undefined` as the first element and `null` when applicable */ export const getModelSupportedVerbosity = (model: Model | undefined | null): OpenAIVerbosity[] => { - if (!model) { + if (!model || !isSupportVerbosityModel(model)) { return [undefined] } @@ -165,7 +166,7 @@ export const getModelSupportedVerbosity = (model: Model | undefined | null): Ope for (const { validator, values } of MODEL_SUPPORTED_VERBOSITY) { if (validator(model)) { - supportedValues = [...values] + supportedValues = [null, ...values] break } } diff --git a/src/renderer/src/pages/home/Tabs/components/OpenAISettingsGroup.tsx b/src/renderer/src/pages/home/Tabs/components/OpenAISettingsGroup.tsx index bf3e1970dc..35c943e21b 100644 --- a/src/renderer/src/pages/home/Tabs/components/OpenAISettingsGroup.tsx +++ b/src/renderer/src/pages/home/Tabs/components/OpenAISettingsGroup.tsx @@ -24,12 +24,12 @@ import { useTranslation } from 'react-i18next' import { useSelector } from 'react-redux' type VerbosityOption = { - value: NonNullable | 'undefined' + value: NonNullable | 'undefined' | 'null' label: string } type SummaryTextOption = { - value: NonNullable | 'undefined' + value: NonNullable | 'undefined' | 'null' label: string } @@ -85,6 +85,10 @@ const OpenAISettingsGroup: FC = ({ model, providerId, SettingGroup, Setti value: 'undefined', label: t('common.ignore') }, + { + value: 'null', + label: t('common.off') + }, { value: 'auto', label: t('settings.openai.summary_text_mode.auto') @@ -105,6 +109,10 @@ const OpenAISettingsGroup: FC = ({ model, providerId, SettingGroup, Setti value: 'undefined', label: t('common.ignore') }, + { + value: 'null', + label: t('common.off') + }, { value: 'low', label: t('settings.openai.verbosity.low') @@ -203,9 +211,9 @@ const OpenAISettingsGroup: FC = ({ model, providerId, SettingGroup, Setti { - setSummaryText(value as OpenAISummaryText) + setSummaryText(toRealValue(value)) }} options={summaryTextOptions} /> @@ -222,9 +230,9 @@ const OpenAISettingsGroup: FC = ({ model, providerId, SettingGroup, Setti { - setVerbosity(value as OpenAIVerbosity) + setVerbosity(toRealValue(value)) }} options={verbosityOptions} /> diff --git a/src/renderer/src/store/migrate.ts b/src/renderer/src/store/migrate.ts index 4b2e4cef89..b2993baf5d 100644 --- a/src/renderer/src/store/migrate.ts +++ b/src/renderer/src/store/migrate.ts @@ -2906,6 +2906,23 @@ const migrateConfig = { logger.error('migrate 179 error', error as Error) return state } + }, + '180': (state: RootState) => { + try { + // @ts-expect-error + if (state.settings.openAI.summaryText === 'undefined') { + state.settings.openAI.summaryText = undefined + } + // @ts-expect-error + if (state.settings.openAI.verbosity === 'undefined') { + state.settings.openAI.verbosity = undefined + } + logger.info('migrate 180 success') + return state + } catch (error) { + logger.error('migrate 180 error', error as Error) + return state + } } } diff --git a/src/renderer/src/types/aiCoreTypes.ts b/src/renderer/src/types/aiCoreTypes.ts index 2e4c09348b..6281905cbb 100644 --- a/src/renderer/src/types/aiCoreTypes.ts +++ b/src/renderer/src/types/aiCoreTypes.ts @@ -1,5 +1,5 @@ import type OpenAI from '@cherrystudio/openai' -import type { NotNull, NotUndefined } from '@types' +import type { NotUndefined } from '@types' import type { ImageModel, LanguageModel } from 'ai' import type { generateObject, generateText, ModelMessage, streamObject, streamText } from 'ai' import * as z from 'zod' @@ -31,18 +31,26 @@ export type GenerateObjectParams = Omit[0], 'm export type AiSdkModel = LanguageModel | ImageModel -// The original type unite both undefined and null. -// I pick undefined as the unique falsy type since they seem like share the same meaning according to OpenAI API docs. -// Parameter would not be passed into request if it's undefined. -export type OpenAIVerbosity = NotNull +/** + * Constrains the verbosity of the model's response. Lower values will result in more concise responses, while higher values will result in more verbose responses. + * + * The original type unites both undefined and null. + * When undefined, the parameter is omitted from the request. + * When null, verbosity is explicitly disabled. + */ +export type OpenAIVerbosity = OpenAI.Responses.ResponseTextConfig['verbosity'] export type ValidOpenAIVerbosity = NotUndefined export type OpenAIReasoningEffort = OpenAI.ReasoningEffort -// The original type unite both undefined and null. -// I pick undefined as the unique falsy type since they seem like share the same meaning according to OpenAI API docs. -// Parameter would not be passed into request if it's undefined. -export type OpenAISummaryText = NotNull +/** + * A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. + * + * The original type unites both undefined and null. + * When undefined, the parameter is omitted from the request. + * When null, verbosity is explicitly disabled. + */ +export type OpenAISummaryText = OpenAI.Reasoning['summary'] const AiSdkParamsSchema = z.enum([ 'maxOutputTokens', diff --git a/src/renderer/src/types/sdk.ts b/src/renderer/src/types/sdk.ts index bb76c8a634..2b51bee3fd 100644 --- a/src/renderer/src/types/sdk.ts +++ b/src/renderer/src/types/sdk.ts @@ -128,10 +128,6 @@ export type OpenAIExtraBody = { source_lang: 'auto' target_lang: string } - // for gpt-5 series models verbosity control - text?: { - verbosity?: 'low' | 'medium' | 'high' - } } // image is for openrouter. audio is ignored for now export type OpenAIModality = OpenAI.ChatCompletionModality | 'image'