fix: refine Qwen model reasoning checks in isQwenReasoningModel and isSupportedThinkingTokenQwenModel functions (#9966)

This commit is contained in:
SuYao 2025-09-06 10:59:09 +08:00 committed by GitHub
parent c506ff6872
commit b5632b0097
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 3390 additions and 3223 deletions

View File

@ -0,0 +1,90 @@
import {
isImageEnhancementModel,
isPureGenerateImageModel,
isQwenReasoningModel,
isSupportedThinkingTokenQwenModel,
isVisionModel,
isWebSearchModel
} from '@renderer/config/models'
import { Model } from '@renderer/types'
import { beforeEach, describe, expect, test, vi } from 'vitest'
// Suggested test cases
describe('Qwen Model Detection', () => {
beforeEach(() => {
vi.mock('@renderer/store/llm', () => ({
initialState: {}
}))
vi.mock('@renderer/services/AssistantService', () => ({
getProviderByModel: vi.fn().mockReturnValue({ id: 'cherryin' })
}))
})
test('isQwenReasoningModel', () => {
expect(isQwenReasoningModel({ id: 'qwen3-thinking' } as Model)).toBe(true)
expect(isQwenReasoningModel({ id: 'qwen3-instruct' } as Model)).toBe(false)
expect(isQwenReasoningModel({ id: 'qwen3-max' } as Model)).toBe(false)
expect(isQwenReasoningModel({ id: 'qwen3-8b' } as Model)).toBe(true)
expect(isQwenReasoningModel({ id: 'qwq-32b' } as Model)).toBe(true)
expect(isQwenReasoningModel({ id: 'qwen-plus' } as Model)).toBe(true)
expect(isQwenReasoningModel({ id: 'qwen3-coder' } as Model)).toBe(false)
})
test('isSupportedThinkingTokenQwenModel', () => {
expect(isSupportedThinkingTokenQwenModel({ id: 'qwen3-max' } as Model)).toBe(false)
expect(isSupportedThinkingTokenQwenModel({ id: 'qwen3-instruct' } as Model)).toBe(false)
expect(isSupportedThinkingTokenQwenModel({ id: 'qwen3-thinking' } as Model)).toBe(false)
expect(isSupportedThinkingTokenQwenModel({ id: 'qwen3-8b' } as Model)).toBe(true)
expect(isSupportedThinkingTokenQwenModel({ id: 'qwen3-235b-a22b-thinking-2507' } as Model)).toBe(false)
expect(isSupportedThinkingTokenQwenModel({ id: 'qwen-plus' } as Model)).toBe(true)
expect(isSupportedThinkingTokenQwenModel({ id: 'qwq-32b' } as Model)).toBe(false)
expect(isSupportedThinkingTokenQwenModel({ id: 'qwen3-coder' } as Model)).toBe(false)
})
test('isVisionModel', () => {
expect(isVisionModel({ id: 'qwen-vl-max' } as Model)).toBe(true)
expect(isVisionModel({ id: 'qwen-omni-turbo' } as Model)).toBe(true)
})
})
describe('Vision Model Detection', () => {
beforeEach(() => {
vi.mock('@renderer/store/llm', () => ({
initialState: {}
}))
vi.mock('@renderer/services/AssistantService', () => ({
getProviderByModel: vi.fn().mockReturnValue({ id: 'cherryin' })
}))
})
test('isVisionModel', () => {
expect(isVisionModel({ id: 'qwen-vl-max' } as Model)).toBe(true)
expect(isVisionModel({ id: 'qwen-omni-turbo' } as Model)).toBe(true)
})
test('isImageEnhancementModel', () => {
expect(isImageEnhancementModel({ id: 'gpt-image-1' } as Model)).toBe(true)
expect(isImageEnhancementModel({ id: 'gemini-2.5-flash-image-preview' } as Model)).toBe(true)
expect(isImageEnhancementModel({ id: 'gemini-2.0-flash-preview-image-generation' } as Model)).toBe(true)
expect(isImageEnhancementModel({ id: 'qwen-image-edit' } as Model)).toBe(true)
expect(isImageEnhancementModel({ id: 'grok-2-image-latest' } as Model)).toBe(true)
})
test('isPureGenerateImageModel', () => {
expect(isPureGenerateImageModel({ id: 'gpt-image-1' } as Model)).toBe(true)
expect(isPureGenerateImageModel({ id: 'gemini-2.5-flash-image-preview' } as Model)).toBe(true)
expect(isPureGenerateImageModel({ id: 'gemini-2.0-flash-preview-image-generation' } as Model)).toBe(true)
expect(isPureGenerateImageModel({ id: 'grok-2-image-latest' } as Model)).toBe(true)
expect(isPureGenerateImageModel({ id: 'gpt-4o' } as Model)).toBe(false)
})
})
describe('Web Search Model Detection', () => {
beforeEach(() => {
vi.mock('@renderer/store/llm', () => ({
initialState: {}
}))
vi.mock('@renderer/services/AssistantService', () => ({
getProviderByModel: vi.fn().mockReturnValue({ id: 'cherryin' })
}))
})
test('isWebSearchModel', () => {
expect(isWebSearchModel({ id: 'grok-2-image-latest' } as Model)).toBe(false)
})
})

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,38 @@
import { Model } from '@renderer/types'
import { getLowerBaseModelName, isUserSelectedModelType } from '@renderer/utils'
// Embedding models
export const EMBEDDING_REGEX =
/(?:^text-|embed|bge-|e5-|LLM2Vec|retrieval|uae-|gte-|jina-clip|jina-embeddings|voyage-)/i
// Rerank models
export const RERANKING_REGEX = /(?:rerank|re-rank|re-ranker|re-ranking|retrieval|retriever)/i
export function isEmbeddingModel(model: Model): boolean {
if (!model || isRerankModel(model)) {
return false
}
const modelId = getLowerBaseModelName(model.id)
if (isUserSelectedModelType(model, 'embedding') !== undefined) {
return isUserSelectedModelType(model, 'embedding')!
}
if (['anthropic'].includes(model?.provider)) {
return false
}
if (model.provider === 'doubao' || modelId.includes('doubao')) {
return EMBEDDING_REGEX.test(model.name)
}
return EMBEDDING_REGEX.test(modelId) || false
}
export function isRerankModel(model: Model): boolean {
if (isUserSelectedModelType(model, 'rerank') !== undefined) {
return isUserSelectedModelType(model, 'rerank')!
}
const modelId = getLowerBaseModelName(model.id)
return model ? RERANKING_REGEX.test(modelId) || false : false
}

View File

@ -0,0 +1,8 @@
export * from './default'
export * from './embedding'
export * from './logo'
export * from './reasoning'
export * from './tooluse'
export * from './utils'
export * from './vision'
export * from './websearch'

View File

@ -0,0 +1,299 @@
import LongCatAppLogo from '@renderer/assets/images/apps/longcat.svg'
import Ai360ModelLogo from '@renderer/assets/images/models/360.png'
import Ai360ModelLogoDark from '@renderer/assets/images/models/360_dark.png'
import AdeptModelLogo from '@renderer/assets/images/models/adept.png'
import AdeptModelLogoDark from '@renderer/assets/images/models/adept_dark.png'
import Ai21ModelLogo from '@renderer/assets/images/models/ai21.png'
import Ai21ModelLogoDark from '@renderer/assets/images/models/ai21_dark.png'
import AimassModelLogo from '@renderer/assets/images/models/aimass.png'
import AimassModelLogoDark from '@renderer/assets/images/models/aimass_dark.png'
import AisingaporeModelLogo from '@renderer/assets/images/models/aisingapore.png'
import AisingaporeModelLogoDark from '@renderer/assets/images/models/aisingapore_dark.png'
import BaichuanModelLogo from '@renderer/assets/images/models/baichuan.png'
import BaichuanModelLogoDark from '@renderer/assets/images/models/baichuan_dark.png'
import BgeModelLogo from '@renderer/assets/images/models/bge.webp'
import BigcodeModelLogo from '@renderer/assets/images/models/bigcode.webp'
import BigcodeModelLogoDark from '@renderer/assets/images/models/bigcode_dark.webp'
import BytedanceModelLogo from '@renderer/assets/images/models/byte_dance.svg'
import ChatGLMModelLogo from '@renderer/assets/images/models/chatglm.png'
import ChatGLMModelLogoDark from '@renderer/assets/images/models/chatglm_dark.png'
import ChatGptModelLogo from '@renderer/assets/images/models/chatgpt.jpeg'
import ClaudeModelLogo from '@renderer/assets/images/models/claude.png'
import ClaudeModelLogoDark from '@renderer/assets/images/models/claude_dark.png'
import CodegeexModelLogo from '@renderer/assets/images/models/codegeex.png'
import CodegeexModelLogoDark from '@renderer/assets/images/models/codegeex_dark.png'
import CodestralModelLogo from '@renderer/assets/images/models/codestral.png'
import CohereModelLogo from '@renderer/assets/images/models/cohere.png'
import CohereModelLogoDark from '@renderer/assets/images/models/cohere_dark.png'
import CopilotModelLogo from '@renderer/assets/images/models/copilot.png'
import CopilotModelLogoDark from '@renderer/assets/images/models/copilot_dark.png'
import DalleModelLogo from '@renderer/assets/images/models/dalle.png'
import DalleModelLogoDark from '@renderer/assets/images/models/dalle_dark.png'
import DbrxModelLogo from '@renderer/assets/images/models/dbrx.png'
import DeepSeekModelLogo from '@renderer/assets/images/models/deepseek.png'
import DeepSeekModelLogoDark from '@renderer/assets/images/models/deepseek_dark.png'
import DianxinModelLogo from '@renderer/assets/images/models/dianxin.png'
import DianxinModelLogoDark from '@renderer/assets/images/models/dianxin_dark.png'
import DoubaoModelLogo from '@renderer/assets/images/models/doubao.png'
import DoubaoModelLogoDark from '@renderer/assets/images/models/doubao_dark.png'
import {
default as EmbeddingModelLogo,
default as EmbeddingModelLogoDark
} from '@renderer/assets/images/models/embedding.png'
import FlashaudioModelLogo from '@renderer/assets/images/models/flashaudio.png'
import FlashaudioModelLogoDark from '@renderer/assets/images/models/flashaudio_dark.png'
import FluxModelLogo from '@renderer/assets/images/models/flux.png'
import FluxModelLogoDark from '@renderer/assets/images/models/flux_dark.png'
import GeminiModelLogo from '@renderer/assets/images/models/gemini.png'
import GeminiModelLogoDark from '@renderer/assets/images/models/gemini_dark.png'
import GemmaModelLogo from '@renderer/assets/images/models/gemma.png'
import GemmaModelLogoDark from '@renderer/assets/images/models/gemma_dark.png'
import { default as GoogleModelLogo, default as GoogleModelLogoDark } from '@renderer/assets/images/models/google.png'
import ChatGPT35ModelLogo from '@renderer/assets/images/models/gpt_3.5.png'
import ChatGPT4ModelLogo from '@renderer/assets/images/models/gpt_4.png'
import {
default as ChatGPT4ModelLogoDark,
default as ChatGPT35ModelLogoDark,
default as ChatGptModelLogoDark,
default as ChatGPTo1ModelLogoDark
} from '@renderer/assets/images/models/gpt_dark.png'
import ChatGPTImageModelLogo from '@renderer/assets/images/models/gpt_image_1.png'
import ChatGPTo1ModelLogo from '@renderer/assets/images/models/gpt_o1.png'
import GPT5ModelLogo from '@renderer/assets/images/models/gpt-5.png'
import GPT5ChatModelLogo from '@renderer/assets/images/models/gpt-5-chat.png'
import GPT5MiniModelLogo from '@renderer/assets/images/models/gpt-5-mini.png'
import GPT5NanoModelLogo from '@renderer/assets/images/models/gpt-5-nano.png'
import GrokModelLogo from '@renderer/assets/images/models/grok.png'
import GrokModelLogoDark from '@renderer/assets/images/models/grok_dark.png'
import GrypheModelLogo from '@renderer/assets/images/models/gryphe.png'
import GrypheModelLogoDark from '@renderer/assets/images/models/gryphe_dark.png'
import HailuoModelLogo from '@renderer/assets/images/models/hailuo.png'
import HailuoModelLogoDark from '@renderer/assets/images/models/hailuo_dark.png'
import HuggingfaceModelLogo from '@renderer/assets/images/models/huggingface.png'
import HuggingfaceModelLogoDark from '@renderer/assets/images/models/huggingface_dark.png'
import HunyuanModelLogo from '@renderer/assets/images/models/hunyuan.png'
import HunyuanModelLogoDark from '@renderer/assets/images/models/hunyuan_dark.png'
import IbmModelLogo from '@renderer/assets/images/models/ibm.png'
import IbmModelLogoDark from '@renderer/assets/images/models/ibm_dark.png'
import IdeogramModelLogo from '@renderer/assets/images/models/ideogram.svg'
import InternlmModelLogo from '@renderer/assets/images/models/internlm.png'
import InternlmModelLogoDark from '@renderer/assets/images/models/internlm_dark.png'
import InternvlModelLogo from '@renderer/assets/images/models/internvl.png'
import JinaModelLogo from '@renderer/assets/images/models/jina.png'
import JinaModelLogoDark from '@renderer/assets/images/models/jina_dark.png'
import KeLingModelLogo from '@renderer/assets/images/models/keling.png'
import KeLingModelLogoDark from '@renderer/assets/images/models/keling_dark.png'
import LlamaModelLogo from '@renderer/assets/images/models/llama.png'
import LlamaModelLogoDark from '@renderer/assets/images/models/llama_dark.png'
import LLavaModelLogo from '@renderer/assets/images/models/llava.png'
import LLavaModelLogoDark from '@renderer/assets/images/models/llava_dark.png'
import LumaModelLogo from '@renderer/assets/images/models/luma.png'
import LumaModelLogoDark from '@renderer/assets/images/models/luma_dark.png'
import MagicModelLogo from '@renderer/assets/images/models/magic.png'
import MagicModelLogoDark from '@renderer/assets/images/models/magic_dark.png'
import MediatekModelLogo from '@renderer/assets/images/models/mediatek.png'
import MediatekModelLogoDark from '@renderer/assets/images/models/mediatek_dark.png'
import MicrosoftModelLogo from '@renderer/assets/images/models/microsoft.png'
import MicrosoftModelLogoDark from '@renderer/assets/images/models/microsoft_dark.png'
import MidjourneyModelLogo from '@renderer/assets/images/models/midjourney.png'
import MidjourneyModelLogoDark from '@renderer/assets/images/models/midjourney_dark.png'
import {
default as MinicpmModelLogo,
default as MinicpmModelLogoDark
} from '@renderer/assets/images/models/minicpm.webp'
import MinimaxModelLogo from '@renderer/assets/images/models/minimax.png'
import MinimaxModelLogoDark from '@renderer/assets/images/models/minimax_dark.png'
import MistralModelLogo from '@renderer/assets/images/models/mixtral.png'
import MistralModelLogoDark from '@renderer/assets/images/models/mixtral_dark.png'
import MoonshotModelLogo from '@renderer/assets/images/models/moonshot.png'
import MoonshotModelLogoDark from '@renderer/assets/images/models/moonshot_dark.png'
import {
default as NousResearchModelLogo,
default as NousResearchModelLogoDark
} from '@renderer/assets/images/models/nousresearch.png'
import NvidiaModelLogo from '@renderer/assets/images/models/nvidia.png'
import NvidiaModelLogoDark from '@renderer/assets/images/models/nvidia_dark.png'
import PalmModelLogo from '@renderer/assets/images/models/palm.png'
import PalmModelLogoDark from '@renderer/assets/images/models/palm_dark.png'
import PanguModelLogo from '@renderer/assets/images/models/pangu.svg'
import {
default as PerplexityModelLogo,
default as PerplexityModelLogoDark
} from '@renderer/assets/images/models/perplexity.png'
import PixtralModelLogo from '@renderer/assets/images/models/pixtral.png'
import PixtralModelLogoDark from '@renderer/assets/images/models/pixtral_dark.png'
import QwenModelLogo from '@renderer/assets/images/models/qwen.png'
import QwenModelLogoDark from '@renderer/assets/images/models/qwen_dark.png'
import RakutenaiModelLogo from '@renderer/assets/images/models/rakutenai.png'
import RakutenaiModelLogoDark from '@renderer/assets/images/models/rakutenai_dark.png'
import SparkDeskModelLogo from '@renderer/assets/images/models/sparkdesk.png'
import SparkDeskModelLogoDark from '@renderer/assets/images/models/sparkdesk_dark.png'
import StabilityModelLogo from '@renderer/assets/images/models/stability.png'
import StabilityModelLogoDark from '@renderer/assets/images/models/stability_dark.png'
import StepModelLogo from '@renderer/assets/images/models/step.png'
import StepModelLogoDark from '@renderer/assets/images/models/step_dark.png'
import SunoModelLogo from '@renderer/assets/images/models/suno.png'
import SunoModelLogoDark from '@renderer/assets/images/models/suno_dark.png'
import TeleModelLogo from '@renderer/assets/images/models/tele.png'
import TeleModelLogoDark from '@renderer/assets/images/models/tele_dark.png'
import TokenFluxModelLogo from '@renderer/assets/images/models/tokenflux.png'
import TokenFluxModelLogoDark from '@renderer/assets/images/models/tokenflux_dark.png'
import UpstageModelLogo from '@renderer/assets/images/models/upstage.png'
import UpstageModelLogoDark from '@renderer/assets/images/models/upstage_dark.png'
import ViduModelLogo from '@renderer/assets/images/models/vidu.png'
import ViduModelLogoDark from '@renderer/assets/images/models/vidu_dark.png'
import VoyageModelLogo from '@renderer/assets/images/models/voyageai.png'
import WenxinModelLogo from '@renderer/assets/images/models/wenxin.png'
import WenxinModelLogoDark from '@renderer/assets/images/models/wenxin_dark.png'
import XirangModelLogo from '@renderer/assets/images/models/xirang.png'
import XirangModelLogoDark from '@renderer/assets/images/models/xirang_dark.png'
import YiModelLogo from '@renderer/assets/images/models/yi.png'
import YiModelLogoDark from '@renderer/assets/images/models/yi_dark.png'
import ZhipuModelLogo from '@renderer/assets/images/models/zhipu.png'
import ZhipuModelLogoDark from '@renderer/assets/images/models/zhipu_dark.png'
import YoudaoLogo from '@renderer/assets/images/providers/netease-youdao.svg'
import NomicLogo from '@renderer/assets/images/providers/nomic.png'
import ZhipuProviderLogo from '@renderer/assets/images/providers/zhipu.png'
export function getModelLogo(modelId: string) {
const isLight = true
if (!modelId) {
return undefined
}
const logoMap = {
pixtral: isLight ? PixtralModelLogo : PixtralModelLogoDark,
jina: isLight ? JinaModelLogo : JinaModelLogoDark,
abab: isLight ? MinimaxModelLogo : MinimaxModelLogoDark,
minimax: isLight ? MinimaxModelLogo : MinimaxModelLogoDark,
veo: isLight ? GeminiModelLogo : GeminiModelLogoDark,
o1: isLight ? ChatGPTo1ModelLogo : ChatGPTo1ModelLogoDark,
o3: isLight ? ChatGPTo1ModelLogo : ChatGPTo1ModelLogoDark,
o4: isLight ? ChatGPTo1ModelLogo : ChatGPTo1ModelLogoDark,
'gpt-image': ChatGPTImageModelLogo,
'gpt-3': isLight ? ChatGPT35ModelLogo : ChatGPT35ModelLogoDark,
'gpt-4': isLight ? ChatGPT4ModelLogo : ChatGPT4ModelLogoDark,
'gpt-5-mini': GPT5MiniModelLogo,
'gpt-5-nano': GPT5NanoModelLogo,
'gpt-5-chat': GPT5ChatModelLogo,
'gpt-5': GPT5ModelLogo,
gpts: isLight ? ChatGPT4ModelLogo : ChatGPT4ModelLogoDark,
'gpt-oss(?:-[\\w-]+)': isLight ? ChatGptModelLogo : ChatGptModelLogoDark,
'text-moderation': isLight ? ChatGptModelLogo : ChatGptModelLogoDark,
'babbage-': isLight ? ChatGptModelLogo : ChatGptModelLogoDark,
'(sora-|sora_)': isLight ? ChatGptModelLogo : ChatGptModelLogoDark,
'(^|/)omni-': isLight ? ChatGptModelLogo : ChatGptModelLogoDark,
'Embedding-V1': isLight ? WenxinModelLogo : WenxinModelLogoDark,
'text-embedding-v': isLight ? QwenModelLogo : QwenModelLogoDark,
'text-embedding': isLight ? ChatGptModelLogo : ChatGptModelLogoDark,
'davinci-': isLight ? ChatGptModelLogo : ChatGptModelLogoDark,
glm: isLight ? ChatGLMModelLogo : ChatGLMModelLogoDark,
deepseek: isLight ? DeepSeekModelLogo : DeepSeekModelLogoDark,
'(qwen|qwq|qwq-|qvq-|wan-)': isLight ? QwenModelLogo : QwenModelLogoDark,
gemma: isLight ? GemmaModelLogo : GemmaModelLogoDark,
'yi-': isLight ? YiModelLogo : YiModelLogoDark,
llama: isLight ? LlamaModelLogo : LlamaModelLogoDark,
mixtral: isLight ? MistralModelLogo : MistralModelLogo,
mistral: isLight ? MistralModelLogo : MistralModelLogoDark,
codestral: CodestralModelLogo,
ministral: isLight ? MistralModelLogo : MistralModelLogoDark,
magistral: isLight ? MistralModelLogo : MistralModelLogoDark,
moonshot: isLight ? MoonshotModelLogo : MoonshotModelLogoDark,
kimi: isLight ? MoonshotModelLogo : MoonshotModelLogoDark,
phi: isLight ? MicrosoftModelLogo : MicrosoftModelLogoDark,
baichuan: isLight ? BaichuanModelLogo : BaichuanModelLogoDark,
'(claude|anthropic-)': isLight ? ClaudeModelLogo : ClaudeModelLogoDark,
gemini: isLight ? GeminiModelLogo : GeminiModelLogoDark,
bison: isLight ? PalmModelLogo : PalmModelLogoDark,
palm: isLight ? PalmModelLogo : PalmModelLogoDark,
step: isLight ? StepModelLogo : StepModelLogoDark,
hailuo: isLight ? HailuoModelLogo : HailuoModelLogoDark,
doubao: isLight ? DoubaoModelLogo : DoubaoModelLogoDark,
seedream: isLight ? DoubaoModelLogo : DoubaoModelLogoDark,
'ep-202': isLight ? DoubaoModelLogo : DoubaoModelLogoDark,
cohere: isLight ? CohereModelLogo : CohereModelLogoDark,
command: isLight ? CohereModelLogo : CohereModelLogoDark,
minicpm: isLight ? MinicpmModelLogo : MinicpmModelLogoDark,
'360': isLight ? Ai360ModelLogo : Ai360ModelLogoDark,
aimass: isLight ? AimassModelLogo : AimassModelLogoDark,
codegeex: isLight ? CodegeexModelLogo : CodegeexModelLogoDark,
copilot: isLight ? CopilotModelLogo : CopilotModelLogoDark,
creative: isLight ? CopilotModelLogo : CopilotModelLogoDark,
balanced: isLight ? CopilotModelLogo : CopilotModelLogoDark,
precise: isLight ? CopilotModelLogo : CopilotModelLogoDark,
dalle: isLight ? DalleModelLogo : DalleModelLogoDark,
'dall-e': isLight ? DalleModelLogo : DalleModelLogoDark,
dbrx: isLight ? DbrxModelLogo : DbrxModelLogo,
flashaudio: isLight ? FlashaudioModelLogo : FlashaudioModelLogoDark,
flux: isLight ? FluxModelLogo : FluxModelLogoDark,
grok: isLight ? GrokModelLogo : GrokModelLogoDark,
hunyuan: isLight ? HunyuanModelLogo : HunyuanModelLogoDark,
internlm: isLight ? InternlmModelLogo : InternlmModelLogoDark,
internvl: InternvlModelLogo,
llava: isLight ? LLavaModelLogo : LLavaModelLogoDark,
magic: isLight ? MagicModelLogo : MagicModelLogoDark,
midjourney: isLight ? MidjourneyModelLogo : MidjourneyModelLogoDark,
'mj-': isLight ? MidjourneyModelLogo : MidjourneyModelLogoDark,
'tao-': isLight ? WenxinModelLogo : WenxinModelLogoDark,
'ernie-': isLight ? WenxinModelLogo : WenxinModelLogoDark,
voice: isLight ? FlashaudioModelLogo : FlashaudioModelLogoDark,
'tts-1': isLight ? ChatGptModelLogo : ChatGptModelLogoDark,
'whisper-': isLight ? ChatGptModelLogo : ChatGptModelLogoDark,
'stable-': isLight ? StabilityModelLogo : StabilityModelLogoDark,
sd2: isLight ? StabilityModelLogo : StabilityModelLogoDark,
sd3: isLight ? StabilityModelLogo : StabilityModelLogoDark,
sdxl: isLight ? StabilityModelLogo : StabilityModelLogoDark,
sparkdesk: isLight ? SparkDeskModelLogo : SparkDeskModelLogoDark,
generalv: isLight ? SparkDeskModelLogo : SparkDeskModelLogoDark,
wizardlm: isLight ? MicrosoftModelLogo : MicrosoftModelLogoDark,
microsoft: isLight ? MicrosoftModelLogo : MicrosoftModelLogoDark,
hermes: isLight ? NousResearchModelLogo : NousResearchModelLogoDark,
gryphe: isLight ? GrypheModelLogo : GrypheModelLogoDark,
suno: isLight ? SunoModelLogo : SunoModelLogoDark,
chirp: isLight ? SunoModelLogo : SunoModelLogoDark,
luma: isLight ? LumaModelLogo : LumaModelLogoDark,
keling: isLight ? KeLingModelLogo : KeLingModelLogoDark,
'vidu-': isLight ? ViduModelLogo : ViduModelLogoDark,
ai21: isLight ? Ai21ModelLogo : Ai21ModelLogoDark,
'jamba-': isLight ? Ai21ModelLogo : Ai21ModelLogoDark,
mythomax: isLight ? GrypheModelLogo : GrypheModelLogoDark,
nvidia: isLight ? NvidiaModelLogo : NvidiaModelLogoDark,
dianxin: isLight ? DianxinModelLogo : DianxinModelLogoDark,
tele: isLight ? TeleModelLogo : TeleModelLogoDark,
adept: isLight ? AdeptModelLogo : AdeptModelLogoDark,
aisingapore: isLight ? AisingaporeModelLogo : AisingaporeModelLogoDark,
bigcode: isLight ? BigcodeModelLogo : BigcodeModelLogoDark,
mediatek: isLight ? MediatekModelLogo : MediatekModelLogoDark,
upstage: isLight ? UpstageModelLogo : UpstageModelLogoDark,
rakutenai: isLight ? RakutenaiModelLogo : RakutenaiModelLogoDark,
ibm: isLight ? IbmModelLogo : IbmModelLogoDark,
'google/': isLight ? GoogleModelLogo : GoogleModelLogoDark,
xirang: isLight ? XirangModelLogo : XirangModelLogoDark,
hugging: isLight ? HuggingfaceModelLogo : HuggingfaceModelLogoDark,
youdao: YoudaoLogo,
'embedding-3': ZhipuProviderLogo,
embedding: isLight ? EmbeddingModelLogo : EmbeddingModelLogoDark,
perplexity: isLight ? PerplexityModelLogo : PerplexityModelLogoDark,
sonar: isLight ? PerplexityModelLogo : PerplexityModelLogoDark,
'bge-': BgeModelLogo,
'voyage-': VoyageModelLogo,
tokenflux: isLight ? TokenFluxModelLogo : TokenFluxModelLogoDark,
'nomic-': NomicLogo,
'pangu-': PanguModelLogo,
cogview: isLight ? ZhipuModelLogo : ZhipuModelLogoDark,
zhipu: isLight ? ZhipuModelLogo : ZhipuModelLogoDark,
longcat: LongCatAppLogo,
bytedance: BytedanceModelLogo,
'(V_1|V_1_TURBO|V_2|V_2A|V_2_TURBO|DESCRIBE|UPSCALE)': IdeogramModelLogo
}
for (const key in logoMap) {
const regex = new RegExp(key, 'i')
if (regex.test(modelId)) {
return logoMap[key]
}
}
return undefined
}

View File

@ -0,0 +1,436 @@
import {
Model,
ReasoningEffortConfig,
SystemProviderId,
ThinkingModelType,
ThinkingOptionConfig
} from '@renderer/types'
import { getLowerBaseModelName, isUserSelectedModelType } from '@renderer/utils'
import { isEmbeddingModel, isRerankModel } from './embedding'
import { isGPT5SeriesModel } from './utils'
import { isTextToImageModel } from './vision'
import { GEMINI_FLASH_MODEL_REGEX } from './websearch'
// Reasoning models
export const REASONING_REGEX =
/^(o\d+(?:-[\w-]+)?|.*\b(?:reasoning|reasoner|thinking)\b.*|.*-[rR]\d+.*|.*\bqwq(?:-[\w-]+)?\b.*|.*\bhunyuan-t1(?:-[\w-]+)?\b.*|.*\bglm-zero-preview\b.*|.*\bgrok-(?:3-mini|4)(?:-[\w-]+)?\b.*)$/i
// 模型类型到支持的reasoning_effort的映射表
// TODO: refactor this. too many identical options
export const MODEL_SUPPORTED_REASONING_EFFORT: ReasoningEffortConfig = {
default: ['low', 'medium', 'high'] as const,
o: ['low', 'medium', 'high'] as const,
gpt5: ['minimal', 'low', 'medium', 'high'] as const,
grok: ['low', 'high'] as const,
gemini: ['low', 'medium', 'high', 'auto'] as const,
gemini_pro: ['low', 'medium', 'high', 'auto'] as const,
qwen: ['low', 'medium', 'high'] as const,
qwen_thinking: ['low', 'medium', 'high'] as const,
doubao: ['auto', 'high'] as const,
doubao_no_auto: ['high'] as const,
hunyuan: ['auto'] as const,
zhipu: ['auto'] as const,
perplexity: ['low', 'medium', 'high'] as const,
deepseek_hybrid: ['auto'] as const
} as const
// 模型类型到支持选项的映射表
export const MODEL_SUPPORTED_OPTIONS: ThinkingOptionConfig = {
default: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.default] as const,
o: MODEL_SUPPORTED_REASONING_EFFORT.o,
gpt5: [...MODEL_SUPPORTED_REASONING_EFFORT.gpt5] as const,
grok: MODEL_SUPPORTED_REASONING_EFFORT.grok,
gemini: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini] as const,
gemini_pro: MODEL_SUPPORTED_REASONING_EFFORT.gemini_pro,
qwen: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.qwen] as const,
qwen_thinking: MODEL_SUPPORTED_REASONING_EFFORT.qwen_thinking,
doubao: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao] as const,
doubao_no_auto: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao_no_auto] as const,
hunyuan: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.hunyuan] as const,
zhipu: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.zhipu] as const,
perplexity: MODEL_SUPPORTED_REASONING_EFFORT.perplexity,
deepseek_hybrid: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.deepseek_hybrid] as const
} as const
export const getThinkModelType = (model: Model): ThinkingModelType => {
let thinkingModelType: ThinkingModelType = 'default'
if (isGPT5SeriesModel(model)) {
thinkingModelType = 'gpt5'
} else if (isSupportedReasoningEffortOpenAIModel(model)) {
thinkingModelType = 'o'
} else if (isSupportedThinkingTokenGeminiModel(model)) {
if (GEMINI_FLASH_MODEL_REGEX.test(model.id)) {
thinkingModelType = 'gemini'
} else {
thinkingModelType = 'gemini_pro'
}
} else if (isSupportedReasoningEffortGrokModel(model)) thinkingModelType = 'grok'
else if (isSupportedThinkingTokenQwenModel(model)) {
if (isQwenAlwaysThinkModel(model)) {
thinkingModelType = 'qwen_thinking'
}
thinkingModelType = 'qwen'
} else if (isSupportedThinkingTokenDoubaoModel(model)) {
if (isDoubaoThinkingAutoModel(model)) {
thinkingModelType = 'doubao'
} else {
thinkingModelType = 'doubao_no_auto'
}
} else if (isSupportedThinkingTokenHunyuanModel(model)) thinkingModelType = 'hunyuan'
else if (isSupportedReasoningEffortPerplexityModel(model)) thinkingModelType = 'perplexity'
else if (isSupportedThinkingTokenZhipuModel(model)) thinkingModelType = 'zhipu'
else if (isDeepSeekHybridInferenceModel(model)) thinkingModelType = 'deepseek_hybrid'
return thinkingModelType
}
/** 用于判断是否支持控制思考但不一定以reasoning_effort的方式 */
export function isSupportedThinkingTokenModel(model?: Model): boolean {
if (!model) {
return false
}
// Specifically for DeepSeek V3.1. White list for now
if (isDeepSeekHybridInferenceModel(model)) {
return (
['openrouter', 'dashscope', 'modelscope', 'doubao', 'silicon', 'nvidia', 'ppio'] satisfies SystemProviderId[]
).some((id) => id === model.provider)
}
return (
isSupportedThinkingTokenGeminiModel(model) ||
isSupportedThinkingTokenQwenModel(model) ||
isSupportedThinkingTokenClaudeModel(model) ||
isSupportedThinkingTokenDoubaoModel(model) ||
isSupportedThinkingTokenHunyuanModel(model) ||
isSupportedThinkingTokenZhipuModel(model)
)
}
export function isSupportedReasoningEffortModel(model?: Model): boolean {
if (!model) {
return false
}
return (
isSupportedReasoningEffortOpenAIModel(model) ||
isSupportedReasoningEffortGrokModel(model) ||
isSupportedReasoningEffortPerplexityModel(model)
)
}
export function isSupportedReasoningEffortGrokModel(model?: Model): boolean {
if (!model) {
return false
}
const modelId = getLowerBaseModelName(model.id)
if (modelId.includes('grok-3-mini')) {
return true
}
return false
}
export function isGrokReasoningModel(model?: Model): boolean {
if (!model) {
return false
}
const modelId = getLowerBaseModelName(model.id)
if (isSupportedReasoningEffortGrokModel(model) || modelId.includes('grok-4')) {
return true
}
return false
}
export function isGeminiReasoningModel(model?: Model): boolean {
if (!model) {
return false
}
const modelId = getLowerBaseModelName(model.id)
if (modelId.startsWith('gemini') && modelId.includes('thinking')) {
return true
}
if (isSupportedThinkingTokenGeminiModel(model)) {
return true
}
return false
}
export const isSupportedThinkingTokenGeminiModel = (model: Model): boolean => {
const modelId = getLowerBaseModelName(model.id, '/')
if (modelId.includes('gemini-2.5')) {
if (modelId.includes('image') || modelId.includes('tts')) {
return false
}
return true
} else {
return false
}
}
/** 是否为Qwen推理模型 */
export function isQwenReasoningModel(model?: Model): boolean {
if (!model) {
return false
}
const modelId = getLowerBaseModelName(model.id, '/')
if (modelId.startsWith('qwen3')) {
if (modelId.includes('thinking')) {
return true
}
}
if (isSupportedThinkingTokenQwenModel(model)) {
return true
}
if (modelId.includes('qwq') || modelId.includes('qvq')) {
return true
}
return false
}
/** 是否为支持思考控制的Qwen3推理模型 */
export function isSupportedThinkingTokenQwenModel(model?: Model): boolean {
if (!model) {
return false
}
const modelId = getLowerBaseModelName(model.id, '/')
if (modelId.includes('coder')) {
return false
}
if (modelId.startsWith('qwen3')) {
// instruct 是非思考模型 thinking 是思考模型,二者都不能控制思考
if (modelId.includes('instruct') || modelId.includes('thinking') || modelId.includes('qwen3-max')) {
return false
}
return true
}
return [
'qwen-plus',
'qwen-plus-latest',
'qwen-plus-0428',
'qwen-plus-2025-04-28',
'qwen-plus-0714',
'qwen-plus-2025-07-14',
'qwen-turbo',
'qwen-turbo-latest',
'qwen-turbo-0428',
'qwen-turbo-2025-04-28',
'qwen-turbo-0715',
'qwen-turbo-2025-07-15',
'qwen-flash',
'qwen-flash-2025-07-28'
].includes(modelId)
}
/** 是否为不支持思考控制的Qwen推理模型 */
export function isQwenAlwaysThinkModel(model?: Model): boolean {
if (!model) {
return false
}
const modelId = getLowerBaseModelName(model.id, '/')
return modelId.startsWith('qwen3') && modelId.includes('thinking')
}
// Doubao 支持思考模式的模型正则
export const DOUBAO_THINKING_MODEL_REGEX =
/doubao-(?:1[.-]5-thinking-vision-pro|1[.-]5-thinking-pro-m|seed-1[.-]6(?:-flash)?(?!-(?:thinking)(?:-|$)))(?:-[\w-]+)*/i
// 支持 auto 的 Doubao 模型 doubao-seed-1.6-xxx doubao-seed-1-6-xxx doubao-1-5-thinking-pro-m-xxx
export const DOUBAO_THINKING_AUTO_MODEL_REGEX =
/doubao-(1-5-thinking-pro-m|seed-1[.-]6)(?!-(?:flash|thinking)(?:-|$))(?:-[\w-]+)*/i
export function isDoubaoThinkingAutoModel(model: Model): boolean {
const modelId = getLowerBaseModelName(model.id)
return DOUBAO_THINKING_AUTO_MODEL_REGEX.test(modelId) || DOUBAO_THINKING_AUTO_MODEL_REGEX.test(model.name)
}
export function isSupportedThinkingTokenDoubaoModel(model?: Model): boolean {
if (!model) {
return false
}
const modelId = getLowerBaseModelName(model.id, '/')
return DOUBAO_THINKING_MODEL_REGEX.test(modelId) || DOUBAO_THINKING_MODEL_REGEX.test(model.name)
}
export function isClaudeReasoningModel(model?: Model): boolean {
if (!model) {
return false
}
const modelId = getLowerBaseModelName(model.id, '/')
return (
modelId.includes('claude-3-7-sonnet') ||
modelId.includes('claude-3.7-sonnet') ||
modelId.includes('claude-sonnet-4') ||
modelId.includes('claude-opus-4')
)
}
export const isSupportedThinkingTokenClaudeModel = isClaudeReasoningModel
export const isSupportedThinkingTokenHunyuanModel = (model?: Model): boolean => {
if (!model) {
return false
}
const modelId = getLowerBaseModelName(model.id, '/')
return modelId.includes('hunyuan-a13b')
}
export const isHunyuanReasoningModel = (model?: Model): boolean => {
if (!model) {
return false
}
const modelId = getLowerBaseModelName(model.id, '/')
return isSupportedThinkingTokenHunyuanModel(model) || modelId.includes('hunyuan-t1')
}
export const isPerplexityReasoningModel = (model?: Model): boolean => {
if (!model) {
return false
}
const modelId = getLowerBaseModelName(model.id, '/')
return isSupportedReasoningEffortPerplexityModel(model) || modelId.includes('reasoning')
}
export const isSupportedReasoningEffortPerplexityModel = (model: Model): boolean => {
const modelId = getLowerBaseModelName(model.id, '/')
return modelId.includes('sonar-deep-research')
}
export const isSupportedThinkingTokenZhipuModel = (model: Model): boolean => {
const modelId = getLowerBaseModelName(model.id, '/')
return modelId.includes('glm-4.5')
}
export const isDeepSeekHybridInferenceModel = (model: Model) => {
const modelId = getLowerBaseModelName(model.id)
// deepseek官方使用chat和reasoner做推理控制其他provider需要单独判断id可能会有所差别
// openrouter: deepseek/deepseek-chat-v3.1 不知道会不会有其他provider仿照ds官方分出一个同id的作为非思考模式的模型这里有风险
return /deepseek-v3(?:\.1|-1-\d+)?/.test(modelId) || modelId.includes('deepseek-chat-v3.1')
}
export const isSupportedThinkingTokenDeepSeekModel = isDeepSeekHybridInferenceModel
export const isZhipuReasoningModel = (model?: Model): boolean => {
if (!model) {
return false
}
const modelId = getLowerBaseModelName(model.id, '/')
return isSupportedThinkingTokenZhipuModel(model) || modelId.includes('glm-z1')
}
export const isStepReasoningModel = (model?: Model): boolean => {
if (!model) {
return false
}
const modelId = getLowerBaseModelName(model.id, '/')
return modelId.includes('step-3') || modelId.includes('step-r1-v-mini')
}
export function isReasoningModel(model?: Model): boolean {
if (!model || isEmbeddingModel(model) || isRerankModel(model) || isTextToImageModel(model)) {
return false
}
if (isUserSelectedModelType(model, 'reasoning') !== undefined) {
return isUserSelectedModelType(model, 'reasoning')!
}
const modelId = getLowerBaseModelName(model.id)
if (model.provider === 'doubao' || modelId.includes('doubao')) {
return (
REASONING_REGEX.test(modelId) ||
REASONING_REGEX.test(model.name) ||
isSupportedThinkingTokenDoubaoModel(model) ||
isDeepSeekHybridInferenceModel(model) ||
isDeepSeekHybridInferenceModel({ ...model, id: model.name }) ||
false
)
}
if (
isClaudeReasoningModel(model) ||
isOpenAIReasoningModel(model) ||
isGeminiReasoningModel(model) ||
isQwenReasoningModel(model) ||
isGrokReasoningModel(model) ||
isHunyuanReasoningModel(model) ||
isPerplexityReasoningModel(model) ||
isZhipuReasoningModel(model) ||
isStepReasoningModel(model) ||
isDeepSeekHybridInferenceModel(model) ||
modelId.includes('magistral') ||
modelId.includes('minimax-m1') ||
modelId.includes('pangu-pro-moe')
) {
return true
}
return REASONING_REGEX.test(modelId) || false
}
export function isOpenAIReasoningModel(model: Model): boolean {
const modelId = getLowerBaseModelName(model.id, '/')
return isSupportedReasoningEffortOpenAIModel(model) || modelId.includes('o1')
}
export function isSupportedReasoningEffortOpenAIModel(model: Model): boolean {
const modelId = getLowerBaseModelName(model.id)
return (
(modelId.includes('o1') && !(modelId.includes('o1-preview') || modelId.includes('o1-mini'))) ||
modelId.includes('o3') ||
modelId.includes('o4') ||
modelId.includes('gpt-oss') ||
(isGPT5SeriesModel(model) && !modelId.includes('chat'))
)
}
export const THINKING_TOKEN_MAP: Record<string, { min: number; max: number }> = {
// Gemini models
'gemini-2\\.5-flash-lite.*$': { min: 512, max: 24576 },
'gemini-.*-flash.*$': { min: 0, max: 24576 },
'gemini-.*-pro.*$': { min: 128, max: 32768 },
// Qwen models
'qwen3-235b-a22b-thinking-2507$': { min: 0, max: 81_920 },
'qwen3-30b-a3b-thinking-2507$': { min: 0, max: 81_920 },
'qwen-plus-2025-07-28$': { min: 0, max: 81_920 },
'qwen-plus-latest$': { min: 0, max: 81_920 },
'qwen3-1\\.7b$': { min: 0, max: 30_720 },
'qwen3-0\\.6b$': { min: 0, max: 30_720 },
'qwen-plus.*$': { min: 0, max: 38_912 },
'qwen-turbo.*$': { min: 0, max: 38_912 },
'qwen-flash.*$': { min: 0, max: 81_920 },
'qwen3-.*$': { min: 1024, max: 38_912 },
// Claude models
'claude-3[.-]7.*sonnet.*$': { min: 1024, max: 64000 },
'claude-(:?sonnet|opus)-4.*$': { min: 1024, max: 32000 }
}
export const findTokenLimit = (modelId: string): { min: number; max: number } | undefined => {
for (const [pattern, limits] of Object.entries(THINKING_TOKEN_MAP)) {
if (new RegExp(pattern, 'i').test(modelId)) {
return limits
}
}
return undefined
}

View File

@ -0,0 +1,92 @@
import { isSystemProviderId, Model } from '@renderer/types'
import { getLowerBaseModelName, isUserSelectedModelType } from '@renderer/utils'
import { isEmbeddingModel, isRerankModel } from './embedding'
import { isDeepSeekHybridInferenceModel } from './reasoning'
import { isPureGenerateImageModel, isTextToImageModel } from './vision'
// Tool calling models
export const FUNCTION_CALLING_MODELS = [
'gpt-4o',
'gpt-4o-mini',
'gpt-4',
'gpt-4.5',
'gpt-oss(?:-[\\w-]+)',
'gpt-5(?:-[0-9-]+)?',
'o(1|3|4)(?:-[\\w-]+)?',
'claude',
'qwen',
'qwen3',
'hunyuan',
'deepseek',
'glm-4(?:-[\\w-]+)?',
'glm-4.5(?:-[\\w-]+)?',
'learnlm(?:-[\\w-]+)?',
'gemini(?:-[\\w-]+)?', // 提前排除了gemini的嵌入模型
'grok-3(?:-[\\w-]+)?',
'doubao-seed-1[.-]6(?:-[\\w-]+)?',
'kimi-k2(?:-[\\w-]+)?'
]
const FUNCTION_CALLING_EXCLUDED_MODELS = [
'aqa(?:-[\\w-]+)?',
'imagen(?:-[\\w-]+)?',
'o1-mini',
'o1-preview',
'AIDC-AI/Marco-o1',
'gemini-1(?:\\.[\\w-]+)?',
'qwen-mt(?:-[\\w-]+)?',
'gpt-5-chat(?:-[\\w-]+)?',
'glm-4\\.5v'
]
export const FUNCTION_CALLING_REGEX = new RegExp(
`\\b(?!(?:${FUNCTION_CALLING_EXCLUDED_MODELS.join('|')})\\b)(?:${FUNCTION_CALLING_MODELS.join('|')})\\b`,
'i'
)
export function isFunctionCallingModel(model?: Model): boolean {
if (
!model ||
isEmbeddingModel(model) ||
isRerankModel(model) ||
isTextToImageModel(model) ||
isPureGenerateImageModel(model)
) {
return false
}
const modelId = getLowerBaseModelName(model.id)
if (isUserSelectedModelType(model, 'function_calling') !== undefined) {
return isUserSelectedModelType(model, 'function_calling')!
}
if (model.provider === 'qiniu') {
return ['deepseek-v3-tool', 'deepseek-v3-0324', 'qwq-32b', 'qwen2.5-72b-instruct'].includes(modelId)
}
if (model.provider === 'doubao' || modelId.includes('doubao')) {
return FUNCTION_CALLING_REGEX.test(modelId) || FUNCTION_CALLING_REGEX.test(model.name)
}
if (['deepseek', 'anthropic', 'kimi', 'moonshot'].includes(model.provider)) {
return true
}
// 2025/08/26 百炼与火山引擎均不支持 v3.1 函数调用
// 先默认支持
if (isDeepSeekHybridInferenceModel(model)) {
if (isSystemProviderId(model.provider)) {
switch (model.provider) {
case 'dashscope':
case 'doubao':
// case 'nvidia': // nvidia api 太烂了 测不了能不能用 先假设能用
return false
}
}
return true
}
return FUNCTION_CALLING_REGEX.test(modelId)
}

View File

@ -0,0 +1,243 @@
import { Model } from '@renderer/types'
import { getLowerBaseModelName } from '@renderer/utils'
import OpenAI from 'openai'
import { WEB_SEARCH_PROMPT_FOR_OPENROUTER } from '../prompts'
import { getWebSearchTools } from '../tools'
import { isOpenAIReasoningModel } from './reasoning'
import { isGenerateImageModel, isVisionModel } from './vision'
import { isOpenAIWebSearchChatCompletionOnlyModel } from './websearch'
export const NOT_SUPPORTED_REGEX = /(?:^tts|whisper|speech)/i
export const OPENAI_NO_SUPPORT_DEV_ROLE_MODELS = ['o1-preview', 'o1-mini']
export function isOpenAILLMModel(model: Model): boolean {
if (!model) {
return false
}
const modelId = getLowerBaseModelName(model.id)
if (modelId.includes('gpt-4o-image')) {
return false
}
if (isOpenAIReasoningModel(model)) {
return true
}
if (modelId.includes('gpt')) {
return true
}
return false
}
export function isOpenAIModel(model: Model): boolean {
if (!model) {
return false
}
const modelId = getLowerBaseModelName(model.id)
return modelId.includes('gpt') || isOpenAIReasoningModel(model)
}
export function isSupportFlexServiceTierModel(model: Model): boolean {
if (!model) {
return false
}
const modelId = getLowerBaseModelName(model.id)
return (
(modelId.includes('o3') && !modelId.includes('o3-mini')) || modelId.includes('o4-mini') || modelId.includes('gpt-5')
)
}
export function isSupportedFlexServiceTier(model: Model): boolean {
return isSupportFlexServiceTierModel(model)
}
export function isSupportVerbosityModel(model: Model): boolean {
const modelId = getLowerBaseModelName(model.id)
return isGPT5SeriesModel(model) && !modelId.includes('chat')
}
export function isOpenAIChatCompletionOnlyModel(model: Model): boolean {
if (!model) {
return false
}
const modelId = getLowerBaseModelName(model.id)
return (
modelId.includes('gpt-4o-search-preview') ||
modelId.includes('gpt-4o-mini-search-preview') ||
modelId.includes('o1-mini') ||
modelId.includes('o1-preview')
)
}
export function isGrokModel(model?: Model): boolean {
if (!model) {
return false
}
const modelId = getLowerBaseModelName(model.id)
return modelId.includes('grok')
}
export function isSupportedModel(model: OpenAI.Models.Model): boolean {
if (!model) {
return false
}
const modelId = getLowerBaseModelName(model.id)
return !NOT_SUPPORTED_REGEX.test(modelId)
}
export function isNotSupportTemperatureAndTopP(model: Model): boolean {
if (!model) {
return true
}
if (
(isOpenAIReasoningModel(model) && !isOpenAIOpenWeightModel(model)) ||
isOpenAIChatCompletionOnlyModel(model) ||
isQwenMTModel(model)
) {
return true
}
return false
}
export function getOpenAIWebSearchParams(model: Model, isEnableWebSearch?: boolean): Record<string, any> {
if (!isEnableWebSearch) {
return {}
}
const webSearchTools = getWebSearchTools(model)
if (model.provider === 'grok') {
return {
search_parameters: {
mode: 'auto',
return_citations: true,
sources: [{ type: 'web' }, { type: 'x' }, { type: 'news' }]
}
}
}
if (model.provider === 'hunyuan') {
return { enable_enhancement: true, citation: true, search_info: true }
}
if (model.provider === 'dashscope') {
return {
enable_search: true,
search_options: {
forced_search: true
}
}
}
if (isOpenAIWebSearchChatCompletionOnlyModel(model)) {
return {
web_search_options: {}
}
}
if (model.provider === 'openrouter') {
return {
plugins: [{ id: 'web', search_prompts: WEB_SEARCH_PROMPT_FOR_OPENROUTER }]
}
}
return {
tools: webSearchTools
}
}
export function isGemmaModel(model?: Model): boolean {
if (!model) {
return false
}
const modelId = getLowerBaseModelName(model.id)
return modelId.includes('gemma-') || model.group === 'Gemma'
}
export function isZhipuModel(model?: Model): boolean {
if (!model) {
return false
}
return model.provider === 'zhipu'
}
/**
* Qwen
* @param models
* @returns
*/
export function groupQwenModels(models: Model[]): Record<string, Model[]> {
return models.reduce(
(groups, model) => {
const modelId = getLowerBaseModelName(model.id)
// 匹配 Qwen 系列模型的前缀
const prefixMatch = modelId.match(/^(qwen(?:\d+\.\d+|2(?:\.\d+)?|-\d+b|-(?:max|coder|vl)))/i)
// 匹配 qwen2.5、qwen2、qwen-7b、qwen-max、qwen-coder 等
const groupKey = prefixMatch ? prefixMatch[1] : model.group || '其他'
if (!groups[groupKey]) {
groups[groupKey] = []
}
groups[groupKey].push(model)
return groups
},
{} as Record<string, Model[]>
)
}
// 模型集合功能测试
export const isVisionModels = (models: Model[]) => {
return models.every((model) => isVisionModel(model))
}
export const isGenerateImageModels = (models: Model[]) => {
return models.every((model) => isGenerateImageModel(model))
}
export const isAnthropicModel = (model?: Model): boolean => {
if (!model) {
return false
}
const modelId = getLowerBaseModelName(model.id)
return modelId.startsWith('claude')
}
export const isQwenMTModel = (model: Model): boolean => {
const modelId = getLowerBaseModelName(model.id)
return modelId.includes('qwen-mt')
}
export const isNotSupportedTextDelta = (model: Model): boolean => {
return isQwenMTModel(model)
}
export const isNotSupportSystemMessageModel = (model: Model): boolean => {
return isQwenMTModel(model) || isGemmaModel(model)
}
export const isGPT5SeriesModel = (model: Model) => {
const modelId = getLowerBaseModelName(model.id)
return modelId.includes('gpt-5')
}
export const isGeminiModel = (model: Model) => {
const modelId = getLowerBaseModelName(model.id)
return modelId.includes('gemini')
}
export const isOpenAIOpenWeightModel = (model: Model) => {
const modelId = getLowerBaseModelName(model.id)
return modelId.includes('gpt-oss')
}
// zhipu 视觉推理模型用这组 special token 标记推理结果
export const ZHIPU_RESULT_TOKENS = ['<|begin_of_box|>', '<|end_of_box|>'] as const

View File

@ -0,0 +1,212 @@
import { getProviderByModel } from '@renderer/services/AssistantService'
import { Model } from '@renderer/types'
import { getLowerBaseModelName, isUserSelectedModelType } from '@renderer/utils'
import { isEmbeddingModel, isRerankModel } from './embedding'
// Vision models
const visionAllowedModels = [
'llava',
'moondream',
'minicpm',
'gemini-1\\.5',
'gemini-2\\.0',
'gemini-2\\.5',
'gemini-exp',
'claude-3',
'claude-sonnet-4',
'claude-opus-4',
'vision',
'glm-4(?:\\.\\d+)?v(?:-[\\w-]+)?',
'qwen-vl',
'qwen2-vl',
'qwen2.5-vl',
'qwen2.5-omni',
'qvq',
'internvl2',
'grok-vision-beta',
'grok-4(?:-[\\w-]+)?',
'pixtral',
'gpt-4(?:-[\\w-]+)',
'gpt-4.1(?:-[\\w-]+)?',
'gpt-4o(?:-[\\w-]+)?',
'gpt-4.5(?:-[\\w-]+)',
'gpt-5(?:-[\\w-]+)?',
'chatgpt-4o(?:-[\\w-]+)?',
'o1(?:-[\\w-]+)?',
'o3(?:-[\\w-]+)?',
'o4(?:-[\\w-]+)?',
'deepseek-vl(?:[\\w-]+)?',
'kimi-latest',
'gemma-3(?:-[\\w-]+)',
'doubao-seed-1[.-]6(?:-[\\w-]+)?',
'kimi-thinking-preview',
`gemma3(?:[-:\\w]+)?`,
'kimi-vl-a3b-thinking(?:-[\\w-]+)?',
'llama-guard-4(?:-[\\w-]+)?',
'llama-4(?:-[\\w-]+)?',
'step-1o(?:.*vision)?',
'step-1v(?:-[\\w-]+)?',
'qwen-omni(?:-[\\w-]+)?'
]
const visionExcludedModels = [
'gpt-4-\\d+-preview',
'gpt-4-turbo-preview',
'gpt-4-32k',
'gpt-4-\\d+',
'o1-mini',
'o3-mini',
'o1-preview',
'AIDC-AI/Marco-o1'
]
export const VISION_REGEX = new RegExp(
`\\b(?!(?:${visionExcludedModels.join('|')})\\b)(${visionAllowedModels.join('|')})\\b`,
'i'
)
// For middleware to identify models that must use the dedicated Image API
export const DEDICATED_IMAGE_MODELS = [
'grok-2-image',
'grok-2-image-1212',
'grok-2-image-latest',
'dall-e-3',
'dall-e-2',
'gpt-image-1'
]
export const IMAGE_ENHANCEMENT_MODELS = [
'grok-2-image(?:-[\\w-]+)?',
'qwen-image-edit',
'gpt-image-1',
'gemini-2.5-flash-image-preview',
'gemini-2.0-flash-preview-image-generation'
]
const IMAGE_ENHANCEMENT_MODELS_REGEX = new RegExp(IMAGE_ENHANCEMENT_MODELS.join('|'), 'i')
// Models that should auto-enable image generation button when selected
export const AUTO_ENABLE_IMAGE_MODELS = ['gemini-2.5-flash-image-preview', ...DEDICATED_IMAGE_MODELS]
export const OPENAI_TOOL_USE_IMAGE_GENERATION_MODELS = [
'o3',
'gpt-4o',
'gpt-4o-mini',
'gpt-4.1',
'gpt-4.1-mini',
'gpt-4.1-nano',
'gpt-5'
]
export const OPENAI_IMAGE_GENERATION_MODELS = [...OPENAI_TOOL_USE_IMAGE_GENERATION_MODELS, 'gpt-image-1']
export const GENERATE_IMAGE_MODELS = [
'gemini-2.0-flash-exp',
'gemini-2.0-flash-exp-image-generation',
'gemini-2.0-flash-preview-image-generation',
'gemini-2.5-flash-image-preview',
...DEDICATED_IMAGE_MODELS
]
export const isDedicatedImageGenerationModel = (model: Model): boolean => {
if (!model) return false
const modelId = getLowerBaseModelName(model.id)
return DEDICATED_IMAGE_MODELS.some((m) => modelId.includes(m))
}
export const isAutoEnableImageGenerationModel = (model: Model): boolean => {
if (!model) return false
const modelId = getLowerBaseModelName(model.id)
return AUTO_ENABLE_IMAGE_MODELS.some((m) => modelId.includes(m))
}
/**
*
* @param model
* @returns
*/
export function isGenerateImageModel(model: Model): boolean {
if (!model || isEmbeddingModel(model) || isRerankModel(model)) {
return false
}
const provider = getProviderByModel(model)
if (!provider) {
return false
}
const modelId = getLowerBaseModelName(model.id, '/')
if (provider.type === 'openai-response') {
return (
OPENAI_IMAGE_GENERATION_MODELS.some((imageModel) => modelId.includes(imageModel)) ||
GENERATE_IMAGE_MODELS.some((imageModel) => modelId.includes(imageModel))
)
}
return GENERATE_IMAGE_MODELS.some((imageModel) => modelId.includes(imageModel))
}
/**
*
* @param model
* @returns
*/
export function isPureGenerateImageModel(model: Model): boolean {
if (!isGenerateImageModel(model) || !isTextToImageModel(model)) {
return false
}
const modelId = getLowerBaseModelName(model.id)
return !OPENAI_TOOL_USE_IMAGE_GENERATION_MODELS.some((imageModel) => modelId.includes(imageModel))
}
// Text to image models
export const TEXT_TO_IMAGE_REGEX = /flux|diffusion|stabilityai|sd-|dall|cogview|janus|midjourney|mj-|image|gpt-image/i
export function isTextToImageModel(model: Model): boolean {
const modelId = getLowerBaseModelName(model.id)
return TEXT_TO_IMAGE_REGEX.test(modelId)
}
export function isNotSupportedImageSizeModel(model?: Model): boolean {
if (!model) {
return false
}
const baseName = getLowerBaseModelName(model.id, '/')
return baseName.includes('grok-2-image')
}
/**
*
* @param model
*/
export function isImageEnhancementModel(model: Model): boolean {
const modelId = getLowerBaseModelName(model.id)
return IMAGE_ENHANCEMENT_MODELS_REGEX.test(modelId)
}
export function isVisionModel(model: Model): boolean {
if (!model || isEmbeddingModel(model) || isRerankModel(model)) {
return false
}
// 新添字段 copilot-vision-request 后可使用 vision
// if (model.provider === 'copilot') {
// return false
// }
if (isUserSelectedModelType(model, 'vision') !== undefined) {
return isUserSelectedModelType(model, 'vision')!
}
const modelId = getLowerBaseModelName(model.id)
if (model.provider === 'doubao' || modelId.includes('doubao')) {
return VISION_REGEX.test(model.name) || VISION_REGEX.test(modelId) || false
}
return VISION_REGEX.test(modelId) || IMAGE_ENHANCEMENT_MODELS_REGEX.test(modelId) || false
}

View File

@ -0,0 +1,181 @@
import { getProviderByModel } from '@renderer/services/AssistantService'
import { Model } from '@renderer/types'
import { getLowerBaseModelName, isUserSelectedModelType } from '@renderer/utils'
import { isEmbeddingModel, isRerankModel } from './embedding'
import { isAnthropicModel } from './utils'
import { isPureGenerateImageModel, isTextToImageModel } from './vision'
export const CLAUDE_SUPPORTED_WEBSEARCH_REGEX = new RegExp(
`\\b(?:claude-3(-|\\.)(7|5)-sonnet(?:-[\\w-]+)|claude-3(-|\\.)5-haiku(?:-[\\w-]+)|claude-sonnet-4(?:-[\\w-]+)?|claude-opus-4(?:-[\\w-]+)?)\\b`,
'i'
)
export const GEMINI_FLASH_MODEL_REGEX = new RegExp('gemini-.*-flash.*$')
export const GEMINI_SEARCH_REGEX = new RegExp('gemini-2\\..*', 'i')
export const PERPLEXITY_SEARCH_MODELS = [
'sonar-pro',
'sonar',
'sonar-reasoning',
'sonar-reasoning-pro',
'sonar-deep-research'
]
export function isWebSearchModel(model: Model): boolean {
if (
!model ||
isEmbeddingModel(model) ||
isRerankModel(model) ||
isTextToImageModel(model) ||
isPureGenerateImageModel(model)
) {
return false
}
if (isUserSelectedModelType(model, 'web_search') !== undefined) {
return isUserSelectedModelType(model, 'web_search')!
}
const provider = getProviderByModel(model)
if (!provider) {
return false
}
const modelId = getLowerBaseModelName(model.id, '/')
// 不管哪个供应商都判断了
if (isAnthropicModel(model)) {
return CLAUDE_SUPPORTED_WEBSEARCH_REGEX.test(modelId)
}
if (provider.type === 'openai-response') {
if (isOpenAIWebSearchModel(model)) {
return true
}
return false
}
if (provider.id === 'perplexity') {
return PERPLEXITY_SEARCH_MODELS.includes(modelId)
}
if (provider.id === 'aihubmix') {
// modelId 不以-search结尾
if (!modelId.endsWith('-search') && GEMINI_SEARCH_REGEX.test(modelId)) {
return true
}
if (isOpenAIWebSearchModel(model)) {
return true
}
return false
}
if (provider?.type === 'openai') {
if (GEMINI_SEARCH_REGEX.test(modelId) || isOpenAIWebSearchModel(model)) {
return true
}
}
if (provider.id === 'gemini' || provider?.type === 'gemini' || provider.type === 'vertexai') {
return GEMINI_SEARCH_REGEX.test(modelId)
}
if (provider.id === 'hunyuan') {
return modelId !== 'hunyuan-lite'
}
if (provider.id === 'zhipu') {
return modelId?.startsWith('glm-4-')
}
if (provider.id === 'dashscope') {
const models = ['qwen-turbo', 'qwen-max', 'qwen-plus', 'qwq', 'qwen-flash']
// matches id like qwen-max-0919, qwen-max-latest
return models.some((i) => modelId.startsWith(i))
}
if (provider.id === 'openrouter') {
return true
}
if (provider.id === 'grok') {
return true
}
return false
}
export function isMandatoryWebSearchModel(model: Model): boolean {
if (!model) {
return false
}
const provider = getProviderByModel(model)
if (!provider) {
return false
}
const modelId = getLowerBaseModelName(model.id)
if (provider.id === 'perplexity' || provider.id === 'openrouter') {
return PERPLEXITY_SEARCH_MODELS.includes(modelId)
}
return false
}
export function isOpenRouterBuiltInWebSearchModel(model: Model): boolean {
if (!model) {
return false
}
const provider = getProviderByModel(model)
if (provider.id !== 'openrouter') {
return false
}
const modelId = getLowerBaseModelName(model.id)
return isOpenAIWebSearchChatCompletionOnlyModel(model) || modelId.includes('sonar')
}
export function isOpenAIWebSearchChatCompletionOnlyModel(model: Model): boolean {
const modelId = getLowerBaseModelName(model.id)
return modelId.includes('gpt-4o-search-preview') || modelId.includes('gpt-4o-mini-search-preview')
}
export function isOpenAIWebSearchModel(model: Model): boolean {
const modelId = getLowerBaseModelName(model.id)
return (
modelId.includes('gpt-4o-search-preview') ||
modelId.includes('gpt-4o-mini-search-preview') ||
(modelId.includes('gpt-4.1') && !modelId.includes('gpt-4.1-nano')) ||
(modelId.includes('gpt-4o') && !modelId.includes('gpt-4o-image')) ||
modelId.includes('o3') ||
modelId.includes('o4') ||
(modelId.includes('gpt-5') && !modelId.includes('chat'))
)
}
export function isHunyuanSearchModel(model?: Model): boolean {
if (!model) {
return false
}
const modelId = getLowerBaseModelName(model.id)
if (model.provider === 'hunyuan') {
return modelId !== 'hunyuan-lite'
}
return false
}

View File

@ -2356,11 +2356,14 @@ const migrateConfig = {
if (state.settings && state.note) { if (state.settings && state.note) {
const showWorkspaceValue = (state.settings as any)?.showWorkspace const showWorkspaceValue = (state.settings as any)?.showWorkspace
if (showWorkspaceValue !== undefined) { if (showWorkspaceValue !== undefined) {
// @ts-ignore eslint-disable-next-line
state.note.settings.showWorkspace = showWorkspaceValue state.note.settings.showWorkspace = showWorkspaceValue
// Remove from settings // Remove from settings
delete (state.settings as any).showWorkspace delete (state.settings as any).showWorkspace
// @ts-ignore eslint-disable-next-line
} else if (state.note.settings.showWorkspace === undefined) { } else if (state.note.settings.showWorkspace === undefined) {
// Set default value if not exists // Set default value if not exists
// @ts-ignore eslint-disable-next-line
state.note.settings.showWorkspace = true state.note.settings.showWorkspace = true
} }
} }