mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2025-12-30 07:39:06 +08:00
refactor(OpenAIApiClient, models, ThinkingButton): streamline reasoning model checks and enhance support for Perplexity models (#8487)
- Removed the specific check for Grok models in OpenAIApiClient and consolidated it with the general reasoning effort model check. - Added support for a new Perplexity model, 'sonar-deep-research', in the models configuration. - Updated the reasoning model checks to include Perplexity models in the models.ts file. - Enhanced the ThinkingButton component to recognize and handle Perplexity model options.
This commit is contained in:
parent
4611e2c058
commit
fd01653164
@ -10,7 +10,6 @@ import {
|
|||||||
isQwenMTModel,
|
isQwenMTModel,
|
||||||
isQwenReasoningModel,
|
isQwenReasoningModel,
|
||||||
isReasoningModel,
|
isReasoningModel,
|
||||||
isSupportedReasoningEffortGrokModel,
|
|
||||||
isSupportedReasoningEffortModel,
|
isSupportedReasoningEffortModel,
|
||||||
isSupportedReasoningEffortOpenAIModel,
|
isSupportedReasoningEffortOpenAIModel,
|
||||||
isSupportedThinkingTokenClaudeModel,
|
isSupportedThinkingTokenClaudeModel,
|
||||||
@ -199,15 +198,8 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Grok models
|
// Grok models/Perplexity models/OpenAI models
|
||||||
if (isSupportedReasoningEffortGrokModel(model)) {
|
if (isSupportedReasoningEffortModel(model)) {
|
||||||
return {
|
|
||||||
reasoning_effort: reasoningEffort
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenAI models
|
|
||||||
if (isSupportedReasoningEffortOpenAIModel(model)) {
|
|
||||||
return {
|
return {
|
||||||
reasoning_effort: reasoningEffort
|
reasoning_effort: reasoningEffort
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1975,6 +1975,12 @@ export const SYSTEM_MODELS: Record<string, Model[]> = {
|
|||||||
provider: 'perplexity',
|
provider: 'perplexity',
|
||||||
name: 'sonar',
|
name: 'sonar',
|
||||||
group: 'Sonar'
|
group: 'Sonar'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: 'sonar-deep-research',
|
||||||
|
provider: 'perplexity',
|
||||||
|
name: 'sonar-deep-research',
|
||||||
|
group: 'Sonar'
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
infini: [
|
infini: [
|
||||||
@ -2406,7 +2412,13 @@ export const GEMINI_SEARCH_REGEX = new RegExp('gemini-2\\..*', 'i')
|
|||||||
|
|
||||||
export const OPENAI_NO_SUPPORT_DEV_ROLE_MODELS = ['o1-preview', 'o1-mini']
|
export const OPENAI_NO_SUPPORT_DEV_ROLE_MODELS = ['o1-preview', 'o1-mini']
|
||||||
|
|
||||||
export const PERPLEXITY_SEARCH_MODELS = ['sonar-pro', 'sonar', 'sonar-reasoning', 'sonar-reasoning-pro']
|
export const PERPLEXITY_SEARCH_MODELS = [
|
||||||
|
'sonar-pro',
|
||||||
|
'sonar',
|
||||||
|
'sonar-reasoning',
|
||||||
|
'sonar-reasoning-pro',
|
||||||
|
'sonar-deep-research'
|
||||||
|
]
|
||||||
|
|
||||||
export function isTextToImageModel(model: Model): boolean {
|
export function isTextToImageModel(model: Model): boolean {
|
||||||
return TEXT_TO_IMAGE_REGEX.test(model.id)
|
return TEXT_TO_IMAGE_REGEX.test(model.id)
|
||||||
@ -2547,7 +2559,11 @@ export function isSupportedReasoningEffortModel(model?: Model): boolean {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
return isSupportedReasoningEffortOpenAIModel(model) || isSupportedReasoningEffortGrokModel(model)
|
return (
|
||||||
|
isSupportedReasoningEffortOpenAIModel(model) ||
|
||||||
|
isSupportedReasoningEffortGrokModel(model) ||
|
||||||
|
isSupportedReasoningEffortPerplexityModel(model)
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
export function isGrokModel(model?: Model): boolean {
|
export function isGrokModel(model?: Model): boolean {
|
||||||
@ -2683,6 +2699,20 @@ export const isHunyuanReasoningModel = (model?: Model): boolean => {
|
|||||||
return isSupportedThinkingTokenHunyuanModel(model) || model.id.toLowerCase().includes('hunyuan-t1')
|
return isSupportedThinkingTokenHunyuanModel(model) || model.id.toLowerCase().includes('hunyuan-t1')
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export const isPerplexityReasoningModel = (model?: Model): boolean => {
|
||||||
|
if (!model) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
const baseName = getLowerBaseModelName(model.id, '/')
|
||||||
|
return isSupportedReasoningEffortPerplexityModel(model) || baseName.includes('reasoning')
|
||||||
|
}
|
||||||
|
|
||||||
|
export const isSupportedReasoningEffortPerplexityModel = (model: Model): boolean => {
|
||||||
|
const baseName = getLowerBaseModelName(model.id, '/')
|
||||||
|
return baseName.includes('sonar-deep-research')
|
||||||
|
}
|
||||||
|
|
||||||
export function isReasoningModel(model?: Model): boolean {
|
export function isReasoningModel(model?: Model): boolean {
|
||||||
if (!model || isEmbeddingModel(model) || isRerankModel(model) || isTextToImageModel(model)) {
|
if (!model || isEmbeddingModel(model) || isRerankModel(model) || isTextToImageModel(model)) {
|
||||||
return false
|
return false
|
||||||
@ -2708,6 +2738,7 @@ export function isReasoningModel(model?: Model): boolean {
|
|||||||
isQwenReasoningModel(model) ||
|
isQwenReasoningModel(model) ||
|
||||||
isGrokReasoningModel(model) ||
|
isGrokReasoningModel(model) ||
|
||||||
isHunyuanReasoningModel(model) ||
|
isHunyuanReasoningModel(model) ||
|
||||||
|
isPerplexityReasoningModel(model) ||
|
||||||
model.id.toLowerCase().includes('glm-z1') ||
|
model.id.toLowerCase().includes('glm-z1') ||
|
||||||
model.id.toLowerCase().includes('magistral') ||
|
model.id.toLowerCase().includes('magistral') ||
|
||||||
model.id.toLowerCase().includes('minimax-m1')
|
model.id.toLowerCase().includes('minimax-m1')
|
||||||
|
|||||||
@ -10,6 +10,7 @@ import {
|
|||||||
GEMINI_FLASH_MODEL_REGEX,
|
GEMINI_FLASH_MODEL_REGEX,
|
||||||
isDoubaoThinkingAutoModel,
|
isDoubaoThinkingAutoModel,
|
||||||
isSupportedReasoningEffortGrokModel,
|
isSupportedReasoningEffortGrokModel,
|
||||||
|
isSupportedReasoningEffortPerplexityModel,
|
||||||
isSupportedThinkingTokenDoubaoModel,
|
isSupportedThinkingTokenDoubaoModel,
|
||||||
isSupportedThinkingTokenGeminiModel,
|
isSupportedThinkingTokenGeminiModel,
|
||||||
isSupportedThinkingTokenHunyuanModel,
|
isSupportedThinkingTokenHunyuanModel,
|
||||||
@ -44,7 +45,8 @@ const MODEL_SUPPORTED_OPTIONS: Record<string, ThinkingOption[]> = {
|
|||||||
qwen: ['off', 'low', 'medium', 'high'],
|
qwen: ['off', 'low', 'medium', 'high'],
|
||||||
qwen_3235ba22b_thinking: ['low', 'medium', 'high'],
|
qwen_3235ba22b_thinking: ['low', 'medium', 'high'],
|
||||||
doubao: ['off', 'auto', 'high'],
|
doubao: ['off', 'auto', 'high'],
|
||||||
hunyuan: ['off', 'auto']
|
hunyuan: ['off', 'auto'],
|
||||||
|
perplexity: ['low', 'medium', 'high']
|
||||||
}
|
}
|
||||||
|
|
||||||
// 选项转换映射表:当选项不支持时使用的替代选项
|
// 选项转换映射表:当选项不支持时使用的替代选项
|
||||||
@ -68,6 +70,7 @@ const ThinkingButton: FC<Props> = ({ ref, model, assistant, ToolbarButton }): Re
|
|||||||
const isQwen3235BA22BThinkingModel = model.id.includes('qwen3-235b-a22b-thinking')
|
const isQwen3235BA22BThinkingModel = model.id.includes('qwen3-235b-a22b-thinking')
|
||||||
const isDoubaoModel = isSupportedThinkingTokenDoubaoModel(model)
|
const isDoubaoModel = isSupportedThinkingTokenDoubaoModel(model)
|
||||||
const isHunyuanModel = isSupportedThinkingTokenHunyuanModel(model)
|
const isHunyuanModel = isSupportedThinkingTokenHunyuanModel(model)
|
||||||
|
const isPerplexityModel = isSupportedReasoningEffortPerplexityModel(model)
|
||||||
|
|
||||||
const currentReasoningEffort = useMemo(() => {
|
const currentReasoningEffort = useMemo(() => {
|
||||||
return assistant.settings?.reasoning_effort || 'off'
|
return assistant.settings?.reasoning_effort || 'off'
|
||||||
@ -91,14 +94,16 @@ const ThinkingButton: FC<Props> = ({ ref, model, assistant, ToolbarButton }): Re
|
|||||||
}
|
}
|
||||||
if (isDoubaoModel) return 'doubao'
|
if (isDoubaoModel) return 'doubao'
|
||||||
if (isHunyuanModel) return 'hunyuan'
|
if (isHunyuanModel) return 'hunyuan'
|
||||||
|
if (isPerplexityModel) return 'perplexity'
|
||||||
return 'default'
|
return 'default'
|
||||||
}, [
|
}, [
|
||||||
isGeminiModel,
|
isGeminiModel,
|
||||||
isGrokModel,
|
isGrokModel,
|
||||||
isQwenModel,
|
isQwenModel,
|
||||||
isDoubaoModel,
|
isDoubaoModel,
|
||||||
isHunyuanModel,
|
|
||||||
isGeminiFlashModel,
|
isGeminiFlashModel,
|
||||||
|
isHunyuanModel,
|
||||||
|
isPerplexityModel,
|
||||||
isQwen3235BA22BThinkingModel
|
isQwen3235BA22BThinkingModel
|
||||||
])
|
])
|
||||||
|
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user