fix(settings): fix wrong type caused by as assertion in OpenAI settings (#11631)

* fix(settings): fix wrong type caused by as assertion and migration

Add migration step 180 to properly handle 'undefined' string values in OpenAI settings
Update selector components to use value conversion helpers for summaryText and verbosity

* feat(models): add null as supported verbosity level for OpenAI models

Update model utils and types to include null as a valid verbosity level option alongside undefined. This provides more flexibility in controlling verbosity behavior, with null representing an explicit "off" state. Tests and UI components are updated to reflect this change.

* fix(verbosity): fix wrong verbosity type definition and handling in #11281

* style: format

* fix(store): correct verbosity check in migration config

The condition was incorrectly checking for 'undefined' string instead of undefined value, and was assigning to summaryText instead of verbosity. This fixes the migration logic to properly handle the verbosity setting.

* docs(aiCore): improve comments for verbosity and summary types

Update type comments to better explain the behavior of verbosity and summary parameters in OpenAI API requests
This commit is contained in:
Phantom 2025-12-03 18:20:55 +08:00 committed by GitHub
parent a1e95b55f8
commit 6696bcacb8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 73 additions and 47 deletions

View File

@ -9,6 +9,7 @@ import {
import { REFERENCE_PROMPT } from '@renderer/config/prompts'
import { getLMStudioKeepAliveTime } from '@renderer/hooks/useLMStudio'
import { getAssistantSettings } from '@renderer/services/AssistantService'
import type { RootState } from '@renderer/store'
import type {
Assistant,
GenerateImageParams,
@ -245,23 +246,20 @@ export abstract class BaseApiClient<
protected getVerbosity(model?: Model): OpenAIVerbosity {
try {
const state = window.store?.getState()
const state = window.store?.getState() as RootState
const verbosity = state?.settings?.openAI?.verbosity
if (verbosity && ['low', 'medium', 'high'].includes(verbosity)) {
// If model is provided, check if the verbosity is supported by the model
if (model) {
const supportedVerbosity = getModelSupportedVerbosity(model)
// Use user's verbosity if supported, otherwise use the first supported option
return supportedVerbosity.includes(verbosity) ? verbosity : supportedVerbosity[0]
}
return verbosity
// If model is provided, check if the verbosity is supported by the model
if (model) {
const supportedVerbosity = getModelSupportedVerbosity(model)
// Use user's verbosity if supported, otherwise use the first supported option
return supportedVerbosity.includes(verbosity) ? verbosity : supportedVerbosity[0]
}
return verbosity
} catch (error) {
logger.warn('Failed to get verbosity from state:', error as Error)
logger.warn('Failed to get verbosity from state. Fallback to undefined.', error as Error)
return undefined
}
return 'medium'
}
protected getTimeout(model: Model) {

View File

@ -32,7 +32,6 @@ import {
isSupportedThinkingTokenModel,
isSupportedThinkingTokenQwenModel,
isSupportedThinkingTokenZhipuModel,
isSupportVerbosityModel,
isVisionModel,
MODEL_SUPPORTED_REASONING_EFFORT,
ZHIPU_RESULT_TOKENS
@ -714,13 +713,8 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
...modalities,
// groq 有不同的 service tier 配置,不符合 openai 接口类型
service_tier: this.getServiceTier(model) as OpenAIServiceTier,
...(isSupportVerbosityModel(model)
? {
text: {
verbosity: this.getVerbosity(model)
}
}
: {}),
// verbosity. getVerbosity ensures the returned value is valid.
verbosity: this.getVerbosity(model),
...this.getProviderSpecificParameters(assistant, model),
...reasoningEffort,
// ...getOpenAIWebSearchParams(model, enableWebSearch),

View File

@ -222,18 +222,22 @@ describe('model utils', () => {
describe('getModelSupportedVerbosity', () => {
it('returns only "high" for GPT-5 Pro models', () => {
expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro' }))).toEqual([undefined, 'high'])
expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro-2025-10-06' }))).toEqual([undefined, 'high'])
expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro' }))).toEqual([undefined, null, 'high'])
expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro-2025-10-06' }))).toEqual([
undefined,
null,
'high'
])
})
it('returns all levels for non-Pro GPT-5 models', () => {
const previewModel = createModel({ id: 'gpt-5-preview' })
expect(getModelSupportedVerbosity(previewModel)).toEqual([undefined, 'low', 'medium', 'high'])
expect(getModelSupportedVerbosity(previewModel)).toEqual([undefined, null, 'low', 'medium', 'high'])
})
it('returns all levels for GPT-5.1 models', () => {
const gpt51Model = createModel({ id: 'gpt-5.1-preview' })
expect(getModelSupportedVerbosity(gpt51Model)).toEqual([undefined, 'low', 'medium', 'high'])
expect(getModelSupportedVerbosity(gpt51Model)).toEqual([undefined, null, 'low', 'medium', 'high'])
})
it('returns only undefined for non-GPT-5 models', () => {

View File

@ -10,7 +10,8 @@ import {
isGPT51SeriesModel,
isOpenAIChatCompletionOnlyModel,
isOpenAIOpenWeightModel,
isOpenAIReasoningModel
isOpenAIReasoningModel,
isSupportVerbosityModel
} from './openai'
import { isQwenMTModel } from './qwen'
import { isGenerateImageModel, isTextToImageModel, isVisionModel } from './vision'
@ -154,10 +155,10 @@ const MODEL_SUPPORTED_VERBOSITY: readonly {
* For GPT-5-pro, only 'high' is supported; for other GPT-5 models, 'low', 'medium', and 'high' are supported.
* For GPT-5.1 series models, 'low', 'medium', and 'high' are supported.
* @param model - The model to check
* @returns An array of supported verbosity levels, always including `undefined` as the first element
* @returns An array of supported verbosity levels, always including `undefined` as the first element and `null` when applicable
*/
export const getModelSupportedVerbosity = (model: Model | undefined | null): OpenAIVerbosity[] => {
if (!model) {
if (!model || !isSupportVerbosityModel(model)) {
return [undefined]
}
@ -165,7 +166,7 @@ export const getModelSupportedVerbosity = (model: Model | undefined | null): Ope
for (const { validator, values } of MODEL_SUPPORTED_VERBOSITY) {
if (validator(model)) {
supportedValues = [...values]
supportedValues = [null, ...values]
break
}
}

View File

@ -24,12 +24,12 @@ import { useTranslation } from 'react-i18next'
import { useSelector } from 'react-redux'
type VerbosityOption = {
value: NonNullable<OpenAIVerbosity> | 'undefined'
value: NonNullable<OpenAIVerbosity> | 'undefined' | 'null'
label: string
}
type SummaryTextOption = {
value: NonNullable<OpenAISummaryText> | 'undefined'
value: NonNullable<OpenAISummaryText> | 'undefined' | 'null'
label: string
}
@ -85,6 +85,10 @@ const OpenAISettingsGroup: FC<Props> = ({ model, providerId, SettingGroup, Setti
value: 'undefined',
label: t('common.ignore')
},
{
value: 'null',
label: t('common.off')
},
{
value: 'auto',
label: t('settings.openai.summary_text_mode.auto')
@ -105,6 +109,10 @@ const OpenAISettingsGroup: FC<Props> = ({ model, providerId, SettingGroup, Setti
value: 'undefined',
label: t('common.ignore')
},
{
value: 'null',
label: t('common.off')
},
{
value: 'low',
label: t('settings.openai.verbosity.low')
@ -203,9 +211,9 @@ const OpenAISettingsGroup: FC<Props> = ({ model, providerId, SettingGroup, Setti
</Tooltip>
</SettingRowTitleSmall>
<Selector
value={summaryText}
value={toOptionValue(summaryText)}
onChange={(value) => {
setSummaryText(value as OpenAISummaryText)
setSummaryText(toRealValue(value))
}}
options={summaryTextOptions}
/>
@ -222,9 +230,9 @@ const OpenAISettingsGroup: FC<Props> = ({ model, providerId, SettingGroup, Setti
</Tooltip>
</SettingRowTitleSmall>
<Selector
value={verbosity}
value={toOptionValue(verbosity)}
onChange={(value) => {
setVerbosity(value as OpenAIVerbosity)
setVerbosity(toRealValue(value))
}}
options={verbosityOptions}
/>

View File

@ -2906,6 +2906,23 @@ const migrateConfig = {
logger.error('migrate 179 error', error as Error)
return state
}
},
'180': (state: RootState) => {
try {
// @ts-expect-error
if (state.settings.openAI.summaryText === 'undefined') {
state.settings.openAI.summaryText = undefined
}
// @ts-expect-error
if (state.settings.openAI.verbosity === 'undefined') {
state.settings.openAI.verbosity = undefined
}
logger.info('migrate 180 success')
return state
} catch (error) {
logger.error('migrate 180 error', error as Error)
return state
}
}
}

View File

@ -1,5 +1,5 @@
import type OpenAI from '@cherrystudio/openai'
import type { NotNull, NotUndefined } from '@types'
import type { NotUndefined } from '@types'
import type { ImageModel, LanguageModel } from 'ai'
import type { generateObject, generateText, ModelMessage, streamObject, streamText } from 'ai'
import * as z from 'zod'
@ -31,18 +31,26 @@ export type GenerateObjectParams = Omit<Parameters<typeof generateObject>[0], 'm
export type AiSdkModel = LanguageModel | ImageModel
// The original type unite both undefined and null.
// I pick undefined as the unique falsy type since they seem like share the same meaning according to OpenAI API docs.
// Parameter would not be passed into request if it's undefined.
export type OpenAIVerbosity = NotNull<OpenAI.Responses.ResponseTextConfig['verbosity']>
/**
* Constrains the verbosity of the model's response. Lower values will result in more concise responses, while higher values will result in more verbose responses.
*
* The original type unites both undefined and null.
* When undefined, the parameter is omitted from the request.
* When null, verbosity is explicitly disabled.
*/
export type OpenAIVerbosity = OpenAI.Responses.ResponseTextConfig['verbosity']
export type ValidOpenAIVerbosity = NotUndefined<OpenAIVerbosity>
export type OpenAIReasoningEffort = OpenAI.ReasoningEffort
// The original type unite both undefined and null.
// I pick undefined as the unique falsy type since they seem like share the same meaning according to OpenAI API docs.
// Parameter would not be passed into request if it's undefined.
export type OpenAISummaryText = NotNull<OpenAI.Reasoning['summary']>
/**
* A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process.
*
* The original type unites both undefined and null.
* When undefined, the parameter is omitted from the request.
* When null, verbosity is explicitly disabled.
*/
export type OpenAISummaryText = OpenAI.Reasoning['summary']
const AiSdkParamsSchema = z.enum([
'maxOutputTokens',

View File

@ -128,10 +128,6 @@ export type OpenAIExtraBody = {
source_lang: 'auto'
target_lang: string
}
// for gpt-5 series models verbosity control
text?: {
verbosity?: 'low' | 'medium' | 'high'
}
}
// image is for openrouter. audio is ignored for now
export type OpenAIModality = OpenAI.ChatCompletionModality | 'image'