diff --git a/src/renderer/src/config/models/__tests__/reasoning.test.ts b/src/renderer/src/config/models/__tests__/reasoning.test.ts index 0f58be4ef0..5173eed9f0 100644 --- a/src/renderer/src/config/models/__tests__/reasoning.test.ts +++ b/src/renderer/src/config/models/__tests__/reasoning.test.ts @@ -1368,7 +1368,9 @@ describe('findTokenLimit', () => { { modelId: 'qwen-plus-ultra', expected: { min: 0, max: 81_920 } }, { modelId: 'qwen-turbo-pro', expected: { min: 0, max: 38_912 } }, { modelId: 'qwen-flash-lite', expected: { min: 0, max: 81_920 } }, - { modelId: 'qwen3-7b', expected: { min: 1_024, max: 38_912 } } + { modelId: 'qwen3-7b', expected: { min: 1_024, max: 38_912 } }, + { modelId: 'Baichuan-M2', expected: { min: 0, max: 30_000 } }, + { modelId: 'baichuan-m2', expected: { min: 0, max: 30_000 } } ] it.each(cases)('returns correct limits for $modelId', ({ modelId, expected }) => { diff --git a/src/renderer/src/config/models/default.ts b/src/renderer/src/config/models/default.ts index 408c047639..1c15064a11 100644 --- a/src/renderer/src/config/models/default.ts +++ b/src/renderer/src/config/models/default.ts @@ -713,6 +713,30 @@ export const SYSTEM_MODELS: Record = provider: 'baichuan', name: 'Baichuan3 Turbo 128k', group: 'Baichuan3' + }, + { + id: 'Baichuan4-Turbo', + provider: 'baichuan', + name: 'Baichuan4 Turbo', + group: 'Baichuan4' + }, + { + id: 'Baichuan4-Air', + provider: 'baichuan', + name: 'Baichuan4 Air', + group: 'Baichuan4' + }, + { + id: 'Baichuan-M2', + provider: 'baichuan', + name: 'Baichuan M2', + group: 'Baichuan-M2' + }, + { + id: 'Baichuan-M2-Plus', + provider: 'baichuan', + name: 'Baichuan M2 Plus', + group: 'Baichuan-M2' } ], modelscope: [ diff --git a/src/renderer/src/config/models/reasoning.ts b/src/renderer/src/config/models/reasoning.ts index b2b6119b76..0b42ed0934 100644 --- a/src/renderer/src/config/models/reasoning.ts +++ b/src/renderer/src/config/models/reasoning.ts @@ -640,6 +640,16 @@ export const isMiniMaxReasoningModel = (model?: Model): boolean => { return (['minimax-m1', 'minimax-m2', 'minimax-m2.1'] as const).some((id) => modelId.includes(id)) } +export const isBaichuanReasoningModel = (model?: Model): boolean => { + if (!model) { + return false + } + const modelId = getLowerBaseModelName(model.id, '/') + + // 只有 Baichuan-M2 是推理模型(注意:M2-Plus 不是推理模型) + return modelId.includes('baichuan-m2') && !modelId.includes('plus') +} + export function isReasoningModel(model?: Model): boolean { if (!model || isEmbeddingModel(model) || isRerankModel(model) || isTextToImageModel(model)) { return false @@ -675,6 +685,7 @@ export function isReasoningModel(model?: Model): boolean { isLingReasoningModel(model) || isMiniMaxReasoningModel(model) || isMiMoReasoningModel(model) || + isBaichuanReasoningModel(model) || modelId.includes('magistral') || modelId.includes('pangu-pro-moe') || modelId.includes('seed-oss') || @@ -718,7 +729,10 @@ const THINKING_TOKEN_MAP: Record = { '(?:anthropic\\.)?claude-opus-4(?:[.-]0)?(?:[@-](?:\\d{4,}|[a-z][\\w-]*))?(?:-v\\d+:\\d+)?$': { min: 1024, max: 32_000 - } + }, + + // Baichuan models + 'baichuan-m2$': { min: 0, max: 30_000 } } export const findTokenLimit = (modelId: string): { min: number; max: number } | undefined => { diff --git a/src/renderer/src/config/providers.ts b/src/renderer/src/config/providers.ts index f49794aaa7..f38fd2b163 100644 --- a/src/renderer/src/config/providers.ts +++ b/src/renderer/src/config/providers.ts @@ -1025,7 +1025,7 @@ export const PROVIDER_URLS: Record = { official: 'https://www.baichuan-ai.com/', apiKey: 'https://platform.baichuan-ai.com/console/apikey', docs: 'https://platform.baichuan-ai.com/docs', - models: 'https://platform.baichuan-ai.com/price' + models: 'https://platform.baichuan-ai.com/prices' } }, modelscope: {