refactor(OpenAIApiClient, models, ThinkingButton): streamline reasoning model checks and enhance support for Perplexity models (#8487)

- Removed the specific check for Grok models in OpenAIApiClient and consolidated it with the general reasoning effort model check.
- Added support for a new Perplexity model, 'sonar-deep-research', in the models configuration.
- Updated the reasoning model checks to include Perplexity models in the models.ts file.
- Enhanced the ThinkingButton component to recognize and handle Perplexity model options.
This commit is contained in:
SuYao 2025-07-26 23:48:45 +08:00 committed by GitHub
parent 4611e2c058
commit fd01653164
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 42 additions and 14 deletions

View File

@ -10,7 +10,6 @@ import {
isQwenMTModel,
isQwenReasoningModel,
isReasoningModel,
isSupportedReasoningEffortGrokModel,
isSupportedReasoningEffortModel,
isSupportedReasoningEffortOpenAIModel,
isSupportedThinkingTokenClaudeModel,
@ -199,15 +198,8 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
}
}
// Grok models
if (isSupportedReasoningEffortGrokModel(model)) {
return {
reasoning_effort: reasoningEffort
}
}
// OpenAI models
if (isSupportedReasoningEffortOpenAIModel(model)) {
// Grok models/Perplexity models/OpenAI models
if (isSupportedReasoningEffortModel(model)) {
return {
reasoning_effort: reasoningEffort
}

View File

@ -1975,6 +1975,12 @@ export const SYSTEM_MODELS: Record<string, Model[]> = {
provider: 'perplexity',
name: 'sonar',
group: 'Sonar'
},
{
id: 'sonar-deep-research',
provider: 'perplexity',
name: 'sonar-deep-research',
group: 'Sonar'
}
],
infini: [
@ -2406,7 +2412,13 @@ export const GEMINI_SEARCH_REGEX = new RegExp('gemini-2\\..*', 'i')
export const OPENAI_NO_SUPPORT_DEV_ROLE_MODELS = ['o1-preview', 'o1-mini']
export const PERPLEXITY_SEARCH_MODELS = ['sonar-pro', 'sonar', 'sonar-reasoning', 'sonar-reasoning-pro']
export const PERPLEXITY_SEARCH_MODELS = [
'sonar-pro',
'sonar',
'sonar-reasoning',
'sonar-reasoning-pro',
'sonar-deep-research'
]
export function isTextToImageModel(model: Model): boolean {
return TEXT_TO_IMAGE_REGEX.test(model.id)
@ -2547,7 +2559,11 @@ export function isSupportedReasoningEffortModel(model?: Model): boolean {
return false
}
return isSupportedReasoningEffortOpenAIModel(model) || isSupportedReasoningEffortGrokModel(model)
return (
isSupportedReasoningEffortOpenAIModel(model) ||
isSupportedReasoningEffortGrokModel(model) ||
isSupportedReasoningEffortPerplexityModel(model)
)
}
export function isGrokModel(model?: Model): boolean {
@ -2683,6 +2699,20 @@ export const isHunyuanReasoningModel = (model?: Model): boolean => {
return isSupportedThinkingTokenHunyuanModel(model) || model.id.toLowerCase().includes('hunyuan-t1')
}
export const isPerplexityReasoningModel = (model?: Model): boolean => {
if (!model) {
return false
}
const baseName = getLowerBaseModelName(model.id, '/')
return isSupportedReasoningEffortPerplexityModel(model) || baseName.includes('reasoning')
}
export const isSupportedReasoningEffortPerplexityModel = (model: Model): boolean => {
const baseName = getLowerBaseModelName(model.id, '/')
return baseName.includes('sonar-deep-research')
}
export function isReasoningModel(model?: Model): boolean {
if (!model || isEmbeddingModel(model) || isRerankModel(model) || isTextToImageModel(model)) {
return false
@ -2708,6 +2738,7 @@ export function isReasoningModel(model?: Model): boolean {
isQwenReasoningModel(model) ||
isGrokReasoningModel(model) ||
isHunyuanReasoningModel(model) ||
isPerplexityReasoningModel(model) ||
model.id.toLowerCase().includes('glm-z1') ||
model.id.toLowerCase().includes('magistral') ||
model.id.toLowerCase().includes('minimax-m1')

View File

@ -10,6 +10,7 @@ import {
GEMINI_FLASH_MODEL_REGEX,
isDoubaoThinkingAutoModel,
isSupportedReasoningEffortGrokModel,
isSupportedReasoningEffortPerplexityModel,
isSupportedThinkingTokenDoubaoModel,
isSupportedThinkingTokenGeminiModel,
isSupportedThinkingTokenHunyuanModel,
@ -44,7 +45,8 @@ const MODEL_SUPPORTED_OPTIONS: Record<string, ThinkingOption[]> = {
qwen: ['off', 'low', 'medium', 'high'],
qwen_3235ba22b_thinking: ['low', 'medium', 'high'],
doubao: ['off', 'auto', 'high'],
hunyuan: ['off', 'auto']
hunyuan: ['off', 'auto'],
perplexity: ['low', 'medium', 'high']
}
// 选项转换映射表:当选项不支持时使用的替代选项
@ -68,6 +70,7 @@ const ThinkingButton: FC<Props> = ({ ref, model, assistant, ToolbarButton }): Re
const isQwen3235BA22BThinkingModel = model.id.includes('qwen3-235b-a22b-thinking')
const isDoubaoModel = isSupportedThinkingTokenDoubaoModel(model)
const isHunyuanModel = isSupportedThinkingTokenHunyuanModel(model)
const isPerplexityModel = isSupportedReasoningEffortPerplexityModel(model)
const currentReasoningEffort = useMemo(() => {
return assistant.settings?.reasoning_effort || 'off'
@ -91,14 +94,16 @@ const ThinkingButton: FC<Props> = ({ ref, model, assistant, ToolbarButton }): Re
}
if (isDoubaoModel) return 'doubao'
if (isHunyuanModel) return 'hunyuan'
if (isPerplexityModel) return 'perplexity'
return 'default'
}, [
isGeminiModel,
isGrokModel,
isQwenModel,
isDoubaoModel,
isHunyuanModel,
isGeminiFlashModel,
isHunyuanModel,
isPerplexityModel,
isQwen3235BA22BThinkingModel
])