diff --git a/src/main/services/MCPService.ts b/src/main/services/MCPService.ts
index cc6bbaa366..ebdc2247fc 100644
--- a/src/main/services/MCPService.ts
+++ b/src/main/services/MCPService.ts
@@ -249,6 +249,26 @@ class McpService {
StdioClientTransport | SSEClientTransport | InMemoryTransport | StreamableHTTPClientTransport
> => {
// Create appropriate transport based on configuration
+
+ // Special case for nowledgeMem - uses HTTP transport instead of in-memory
+ if (isBuiltinMCPServer(server) && server.name === BuiltinMCPServerNames.nowledgeMem) {
+ const nowledgeMemUrl = 'http://127.0.0.1:14242/mcp'
+ const options: StreamableHTTPClientTransportOptions = {
+ fetch: async (url, init) => {
+ return net.fetch(typeof url === 'string' ? url : url.toString(), init)
+ },
+ requestInit: {
+ headers: {
+ ...defaultAppHeaders(),
+ APP: 'Cherry Studio'
+ }
+ },
+ authProvider
+ }
+ getServerLogger(server).debug(`Using StreamableHTTPClientTransport for ${server.name}`)
+ return new StreamableHTTPClientTransport(new URL(nowledgeMemUrl), options)
+ }
+
if (isBuiltinMCPServer(server) && server.name !== BuiltinMCPServerNames.mcpAutoInstall) {
getServerLogger(server).debug(`Using in-memory transport`)
const [clientTransport, serverTransport] = InMemoryTransport.createLinkedPair()
diff --git a/src/renderer/src/aiCore/utils/__tests__/reasoning.test.ts b/src/renderer/src/aiCore/utils/__tests__/reasoning.test.ts
index e5561f6fcf..df7d69d0c2 100644
--- a/src/renderer/src/aiCore/utils/__tests__/reasoning.test.ts
+++ b/src/renderer/src/aiCore/utils/__tests__/reasoning.test.ts
@@ -11,6 +11,7 @@ import { beforeEach, describe, expect, it, vi } from 'vitest'
import {
getAnthropicReasoningParams,
+ getAnthropicThinkingBudget,
getBedrockReasoningParams,
getCustomParameters,
getGeminiReasoningParams,
@@ -89,7 +90,8 @@ vi.mock('@renderer/config/models', async (importOriginal) => {
isQwenAlwaysThinkModel: vi.fn(() => false),
isSupportedThinkingTokenHunyuanModel: vi.fn(() => false),
isSupportedThinkingTokenModel: vi.fn(() => false),
- isGPT51SeriesModel: vi.fn(() => false)
+ isGPT51SeriesModel: vi.fn(() => false),
+ findTokenLimit: vi.fn(actual.findTokenLimit)
}
})
@@ -649,7 +651,7 @@ describe('reasoning utils', () => {
expect(result).toEqual({
thinking: {
type: 'enabled',
- budgetTokens: 2048
+ budgetTokens: 4096
}
})
})
@@ -729,7 +731,7 @@ describe('reasoning utils', () => {
const result = getGeminiReasoningParams(assistant, model)
expect(result).toEqual({
thinkingConfig: {
- thinkingBudget: 16448,
+ thinkingBudget: expect.any(Number),
includeThoughts: true
}
})
@@ -893,7 +895,7 @@ describe('reasoning utils', () => {
expect(result).toEqual({
reasoningConfig: {
type: 'enabled',
- budgetTokens: 2048
+ budgetTokens: 4096
}
})
})
@@ -994,4 +996,89 @@ describe('reasoning utils', () => {
})
})
})
+
+ describe('getAnthropicThinkingBudget', () => {
+ it('should return undefined when reasoningEffort is undefined', async () => {
+ const result = getAnthropicThinkingBudget(4096, undefined, 'claude-3-7-sonnet')
+ expect(result).toBeUndefined()
+ })
+
+ it('should return undefined when reasoningEffort is none', async () => {
+ const result = getAnthropicThinkingBudget(4096, 'none', 'claude-3-7-sonnet')
+ expect(result).toBeUndefined()
+ })
+
+ it('should return undefined when tokenLimit is not found', async () => {
+ const { findTokenLimit } = await import('@renderer/config/models')
+ vi.mocked(findTokenLimit).mockReturnValue(undefined)
+
+ const result = getAnthropicThinkingBudget(4096, 'medium', 'unknown-model')
+ expect(result).toBeUndefined()
+ })
+
+ it('should calculate budget correctly when maxTokens is provided', async () => {
+ const { findTokenLimit } = await import('@renderer/config/models')
+ vi.mocked(findTokenLimit).mockReturnValue({ min: 1024, max: 32768 })
+
+ const result = getAnthropicThinkingBudget(4096, 'medium', 'claude-3-7-sonnet')
+ // EFFORT_RATIO['medium'] = 0.5
+ // budget = Math.floor((32768 - 1024) * 0.5 + 1024)
+ // = Math.floor(31744 * 0.5 + 1024) = Math.floor(15872 + 1024) = 16896
+ // budgetTokens = Math.min(16896, 4096) = 4096
+ // result = Math.max(1024, 4096) = 4096
+ expect(result).toBe(4096)
+ })
+
+ it('should use tokenLimit.max when maxTokens is undefined', async () => {
+ const { findTokenLimit } = await import('@renderer/config/models')
+ vi.mocked(findTokenLimit).mockReturnValue({ min: 1024, max: 32768 })
+
+ const result = getAnthropicThinkingBudget(undefined, 'medium', 'claude-3-7-sonnet')
+ // When maxTokens is undefined, budget is not constrained by maxTokens
+ // EFFORT_RATIO['medium'] = 0.5
+ // budget = Math.floor((32768 - 1024) * 0.5 + 1024)
+ // = Math.floor(31744 * 0.5 + 1024) = Math.floor(15872 + 1024) = 16896
+ // result = Math.max(1024, 16896) = 16896
+ expect(result).toBe(16896)
+ })
+
+ it('should enforce minimum budget of 1024', async () => {
+ const { findTokenLimit } = await import('@renderer/config/models')
+ vi.mocked(findTokenLimit).mockReturnValue({ min: 100, max: 1000 })
+
+ const result = getAnthropicThinkingBudget(500, 'low', 'claude-3-7-sonnet')
+ // EFFORT_RATIO['low'] = 0.05
+ // budget = Math.floor((1000 - 100) * 0.05 + 100)
+ // = Math.floor(900 * 0.05 + 100) = Math.floor(45 + 100) = 145
+ // budgetTokens = Math.min(145, 500) = 145
+ // result = Math.max(1024, 145) = 1024
+ expect(result).toBe(1024)
+ })
+
+ it('should respect effort ratio for high reasoning effort', async () => {
+ const { findTokenLimit } = await import('@renderer/config/models')
+ vi.mocked(findTokenLimit).mockReturnValue({ min: 1024, max: 32768 })
+
+ const result = getAnthropicThinkingBudget(8192, 'high', 'claude-3-7-sonnet')
+ // EFFORT_RATIO['high'] = 0.8
+ // budget = Math.floor((32768 - 1024) * 0.8 + 1024)
+ // = Math.floor(31744 * 0.8 + 1024) = Math.floor(25395.2 + 1024) = 26419
+ // budgetTokens = Math.min(26419, 8192) = 8192
+ // result = Math.max(1024, 8192) = 8192
+ expect(result).toBe(8192)
+ })
+
+ it('should use full token limit when maxTokens is undefined and reasoning effort is high', async () => {
+ const { findTokenLimit } = await import('@renderer/config/models')
+ vi.mocked(findTokenLimit).mockReturnValue({ min: 1024, max: 32768 })
+
+ const result = getAnthropicThinkingBudget(undefined, 'high', 'claude-3-7-sonnet')
+ // When maxTokens is undefined, budget is not constrained by maxTokens
+ // EFFORT_RATIO['high'] = 0.8
+ // budget = Math.floor((32768 - 1024) * 0.8 + 1024)
+ // = Math.floor(31744 * 0.8 + 1024) = Math.floor(25395.2 + 1024) = 26419
+ // result = Math.max(1024, 26419) = 26419
+ expect(result).toBe(26419)
+ })
+ })
})
diff --git a/src/renderer/src/aiCore/utils/reasoning.ts b/src/renderer/src/aiCore/utils/reasoning.ts
index 422e2d1bc8..8551f32b3c 100644
--- a/src/renderer/src/aiCore/utils/reasoning.ts
+++ b/src/renderer/src/aiCore/utils/reasoning.ts
@@ -10,6 +10,7 @@ import {
GEMINI_FLASH_MODEL_REGEX,
getModelSupportedReasoningEffortOptions,
isDeepSeekHybridInferenceModel,
+ isDoubaoSeed18Model,
isDoubaoSeedAfter251015,
isDoubaoThinkingAutoModel,
isGemini3ThinkingTokenModel,
@@ -28,6 +29,7 @@ import {
isSupportedThinkingTokenDoubaoModel,
isSupportedThinkingTokenGeminiModel,
isSupportedThinkingTokenHunyuanModel,
+ isSupportedThinkingTokenMiMoModel,
isSupportedThinkingTokenModel,
isSupportedThinkingTokenQwenModel,
isSupportedThinkingTokenZhipuModel
@@ -389,7 +391,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
// Use thinking, doubao, zhipu, etc.
if (isSupportedThinkingTokenDoubaoModel(model)) {
- if (isDoubaoSeedAfter251015(model)) {
+ if (isDoubaoSeedAfter251015(model) || isDoubaoSeed18Model(model)) {
return { reasoningEffort }
}
if (reasoningEffort === 'high') {
@@ -408,6 +410,12 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
return { thinking: { type: 'enabled' } }
}
+ if (isSupportedThinkingTokenMiMoModel(model)) {
+ return {
+ thinking: { type: 'enabled' }
+ }
+ }
+
// Default case: no special thinking settings
return {}
}
@@ -479,16 +487,14 @@ export function getAnthropicThinkingBudget(
return undefined
}
- const budgetTokens = Math.max(
- 1024,
- Math.floor(
- Math.min(
- (tokenLimit.max - tokenLimit.min) * effortRatio + tokenLimit.min,
- (maxTokens || DEFAULT_MAX_TOKENS) * effortRatio
- )
- )
- )
- return budgetTokens
+ const budget = Math.floor((tokenLimit.max - tokenLimit.min) * effortRatio + tokenLimit.min)
+
+ let budgetTokens = budget
+ if (maxTokens !== undefined) {
+ budgetTokens = Math.min(budget, maxTokens)
+ }
+
+ return Math.max(1024, budgetTokens)
}
/**
diff --git a/src/renderer/src/assets/images/models/mimo.svg b/src/renderer/src/assets/images/models/mimo.svg
new file mode 100644
index 0000000000..82370fece3
--- /dev/null
+++ b/src/renderer/src/assets/images/models/mimo.svg
@@ -0,0 +1,17 @@
+
diff --git a/src/renderer/src/assets/images/providers/mimo.svg b/src/renderer/src/assets/images/providers/mimo.svg
new file mode 100644
index 0000000000..82370fece3
--- /dev/null
+++ b/src/renderer/src/assets/images/providers/mimo.svg
@@ -0,0 +1,17 @@
+
diff --git a/src/renderer/src/config/models/__tests__/reasoning.test.ts b/src/renderer/src/config/models/__tests__/reasoning.test.ts
index af9567b80e..a8d354c6d3 100644
--- a/src/renderer/src/config/models/__tests__/reasoning.test.ts
+++ b/src/renderer/src/config/models/__tests__/reasoning.test.ts
@@ -733,6 +733,11 @@ describe('getThinkModelType - Comprehensive Coverage', () => {
expect(getThinkModelType(createModel({ id: 'doubao-seed-1-6-lite-251015' }))).toBe('doubao_after_251015')
})
+ it('should return doubao_after_251015 for Doubao-Seed-1.8 models', () => {
+ expect(getThinkModelType(createModel({ id: 'doubao-seed-1-8-251215' }))).toBe('doubao_after_251015')
+ expect(getThinkModelType(createModel({ id: 'doubao-seed-1.8' }))).toBe('doubao_after_251015')
+ })
+
it('should return doubao_no_auto for other Doubao thinking models', () => {
expect(getThinkModelType(createModel({ id: 'doubao-1.5-thinking-vision-pro' }))).toBe('doubao_no_auto')
})
@@ -863,6 +868,7 @@ describe('getThinkModelType - Comprehensive Coverage', () => {
// auto > after_251015 > no_auto
expect(getThinkModelType(createModel({ id: 'doubao-seed-1.6' }))).toBe('doubao')
expect(getThinkModelType(createModel({ id: 'doubao-seed-1-6-251015' }))).toBe('doubao_after_251015')
+ expect(getThinkModelType(createModel({ id: 'doubao-seed-1-8-251215' }))).toBe('doubao_after_251015')
expect(getThinkModelType(createModel({ id: 'doubao-1.5-thinking-vision-pro' }))).toBe('doubao_no_auto')
})
diff --git a/src/renderer/src/config/models/default.ts b/src/renderer/src/config/models/default.ts
index d7b0e885d0..37854c5749 100644
--- a/src/renderer/src/config/models/default.ts
+++ b/src/renderer/src/config/models/default.ts
@@ -746,6 +746,12 @@ export const SYSTEM_MODELS: Record =
}
],
doubao: [
+ {
+ id: 'doubao-seed-1-8-251215',
+ provider: 'doubao',
+ name: 'Doubao-Seed-1.8',
+ group: 'Doubao-Seed-1.8'
+ },
{
id: 'doubao-1-5-vision-pro-32k-250115',
provider: 'doubao',
@@ -1785,5 +1791,13 @@ export const SYSTEM_MODELS: Record =
provider: 'cerebras',
group: 'qwen'
}
+ ],
+ mimo: [
+ {
+ id: 'mimo-v2-flash',
+ name: 'Mimo V2 Flash',
+ provider: 'mimo',
+ group: 'Mimo'
+ }
]
}
diff --git a/src/renderer/src/config/models/logo.ts b/src/renderer/src/config/models/logo.ts
index fe1a919c5e..75ad71f662 100644
--- a/src/renderer/src/config/models/logo.ts
+++ b/src/renderer/src/config/models/logo.ts
@@ -103,6 +103,7 @@ import MicrosoftModelLogo from '@renderer/assets/images/models/microsoft.png'
import MicrosoftModelLogoDark from '@renderer/assets/images/models/microsoft_dark.png'
import MidjourneyModelLogo from '@renderer/assets/images/models/midjourney.png'
import MidjourneyModelLogoDark from '@renderer/assets/images/models/midjourney_dark.png'
+import MiMoModelLogo from '@renderer/assets/images/models/mimo.svg'
import {
default as MinicpmModelLogo,
default as MinicpmModelLogoDark
@@ -301,7 +302,8 @@ export function getModelLogoById(modelId: string): string | undefined {
bytedance: BytedanceModelLogo,
ling: LingModelLogo,
ring: LingModelLogo,
- '(V_1|V_1_TURBO|V_2|V_2A|V_2_TURBO|DESCRIBE|UPSCALE)': IdeogramModelLogo
+ '(V_1|V_1_TURBO|V_2|V_2A|V_2_TURBO|DESCRIBE|UPSCALE)': IdeogramModelLogo,
+ mimo: MiMoModelLogo
} as const satisfies Record
for (const key in logoMap) {
diff --git a/src/renderer/src/config/models/reasoning.ts b/src/renderer/src/config/models/reasoning.ts
index 0c4c201202..144afc52a7 100644
--- a/src/renderer/src/config/models/reasoning.ts
+++ b/src/renderer/src/config/models/reasoning.ts
@@ -53,6 +53,7 @@ export const MODEL_SUPPORTED_REASONING_EFFORT = {
doubao_no_auto: ['high'] as const,
doubao_after_251015: ['minimal', 'low', 'medium', 'high'] as const,
hunyuan: ['auto'] as const,
+ mimo: ['auto'] as const,
zhipu: ['auto'] as const,
perplexity: ['low', 'medium', 'high'] as const,
deepseek_hybrid: ['auto'] as const
@@ -82,6 +83,7 @@ export const MODEL_SUPPORTED_OPTIONS: ThinkingOptionConfig = {
doubao: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao] as const,
doubao_no_auto: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao_no_auto] as const,
doubao_after_251015: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao_after_251015] as const,
+ mimo: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.mimo] as const,
hunyuan: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.hunyuan] as const,
zhipu: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.zhipu] as const,
perplexity: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.perplexity] as const,
@@ -149,7 +151,7 @@ const _getThinkModelType = (model: Model): ThinkingModelType => {
} else if (isSupportedThinkingTokenDoubaoModel(model)) {
if (isDoubaoThinkingAutoModel(model)) {
thinkingModelType = 'doubao'
- } else if (isDoubaoSeedAfter251015(model)) {
+ } else if (isDoubaoSeedAfter251015(model) || isDoubaoSeed18Model(model)) {
thinkingModelType = 'doubao_after_251015'
} else {
thinkingModelType = 'doubao_no_auto'
@@ -162,6 +164,8 @@ const _getThinkModelType = (model: Model): ThinkingModelType => {
thinkingModelType = 'zhipu'
} else if (isDeepSeekHybridInferenceModel(model)) {
thinkingModelType = 'deepseek_hybrid'
+ } else if (isSupportedThinkingTokenMiMoModel(model)) {
+ thinkingModelType = 'mimo'
}
return thinkingModelType
}
@@ -271,7 +275,8 @@ function _isSupportedThinkingTokenModel(model: Model): boolean {
isSupportedThinkingTokenClaudeModel(model) ||
isSupportedThinkingTokenDoubaoModel(model) ||
isSupportedThinkingTokenHunyuanModel(model) ||
- isSupportedThinkingTokenZhipuModel(model)
+ isSupportedThinkingTokenZhipuModel(model) ||
+ isSupportedThinkingTokenMiMoModel(model)
)
}
@@ -465,7 +470,7 @@ export function isQwenAlwaysThinkModel(model?: Model): boolean {
// Doubao 支持思考模式的模型正则
export const DOUBAO_THINKING_MODEL_REGEX =
- /doubao-(?:1[.-]5-thinking-vision-pro|1[.-]5-thinking-pro-m|seed-1[.-]6(?:-flash)?(?!-(?:thinking)(?:-|$))|seed-code(?:-preview)?(?:-\d+)?)(?:-[\w-]+)*/i
+ /doubao-(?:1[.-]5-thinking-vision-pro|1[.-]5-thinking-pro-m|seed-1[.-][68](?:-flash)?(?!-(?:thinking)(?:-|$))|seed-code(?:-preview)?(?:-\d+)?)(?:-[\w-]+)*/i
// 支持 auto 的 Doubao 模型 doubao-seed-1.6-xxx doubao-seed-1-6-xxx doubao-1-5-thinking-pro-m-xxx
// Auto thinking is no longer supported after version 251015, see https://console.volcengine.com/ark/region:ark+cn-beijing/model/detail?Id=doubao-seed-1-6
@@ -483,6 +488,11 @@ export function isDoubaoSeedAfter251015(model: Model): boolean {
return result
}
+export function isDoubaoSeed18Model(model: Model): boolean {
+ const pattern = /doubao-seed-1[.-]8(?:-[\w-]+)?/i
+ return pattern.test(model.id) || pattern.test(model.name)
+}
+
export function isSupportedThinkingTokenDoubaoModel(model?: Model): boolean {
if (!model) {
return false
@@ -564,6 +574,11 @@ export const isSupportedThinkingTokenZhipuModel = (model: Model): boolean => {
return ['glm-4.5', 'glm-4.6'].some((id) => modelId.includes(id))
}
+export const isSupportedThinkingTokenMiMoModel = (model: Model): boolean => {
+ const modelId = getLowerBaseModelName(model.id, '/')
+ return ['mimo-v2-flash'].some((id) => modelId.includes(id))
+}
+
export const isDeepSeekHybridInferenceModel = (model: Model) => {
const { idResult, nameResult } = withModelIdAndNameAsId(model, (model) => {
const modelId = getLowerBaseModelName(model.id)
@@ -602,6 +617,8 @@ export const isZhipuReasoningModel = (model?: Model): boolean => {
return isSupportedThinkingTokenZhipuModel(model) || modelId.includes('glm-z1')
}
+export const isMiMoReasoningModel = isSupportedThinkingTokenMiMoModel
+
export const isStepReasoningModel = (model?: Model): boolean => {
if (!model) {
return false
@@ -652,6 +669,7 @@ export function isReasoningModel(model?: Model): boolean {
isDeepSeekHybridInferenceModel(model) ||
isLingReasoningModel(model) ||
isMiniMaxReasoningModel(model) ||
+ isMiMoReasoningModel(model) ||
modelId.includes('magistral') ||
modelId.includes('pangu-pro-moe') ||
modelId.includes('seed-oss') ||
diff --git a/src/renderer/src/config/models/tooluse.ts b/src/renderer/src/config/models/tooluse.ts
index 50890aaf8d..54d371dfda 100644
--- a/src/renderer/src/config/models/tooluse.ts
+++ b/src/renderer/src/config/models/tooluse.ts
@@ -25,12 +25,13 @@ export const FUNCTION_CALLING_MODELS = [
'learnlm(?:-[\\w-]+)?',
'gemini(?:-[\\w-]+)?', // 提前排除了gemini的嵌入模型
'grok-3(?:-[\\w-]+)?',
- 'doubao-seed-1[.-]6(?:-[\\w-]+)?',
+ 'doubao-seed-1[.-][68](?:-[\\w-]+)?',
'doubao-seed-code(?:-[\\w-]+)?',
'kimi-k2(?:-[\\w-]+)?',
'ling-\\w+(?:-[\\w-]+)?',
'ring-\\w+(?:-[\\w-]+)?',
- 'minimax-m2'
+ 'minimax-m2',
+ 'mimo-v2-flash'
] as const
const FUNCTION_CALLING_EXCLUDED_MODELS = [
diff --git a/src/renderer/src/config/models/vision.ts b/src/renderer/src/config/models/vision.ts
index 183ec99433..fe4bc9912c 100644
--- a/src/renderer/src/config/models/vision.ts
+++ b/src/renderer/src/config/models/vision.ts
@@ -45,7 +45,7 @@ const visionAllowedModels = [
'deepseek-vl(?:[\\w-]+)?',
'kimi-latest',
'gemma-3(?:-[\\w-]+)',
- 'doubao-seed-1[.-]6(?:-[\\w-]+)?',
+ 'doubao-seed-1[.-][68](?:-[\\w-]+)?',
'doubao-seed-code(?:-[\\w-]+)?',
'kimi-thinking-preview',
`gemma3(?:[-:\\w]+)?`,
diff --git a/src/renderer/src/config/providers.ts b/src/renderer/src/config/providers.ts
index bc32ef3490..1adeb58ad0 100644
--- a/src/renderer/src/config/providers.ts
+++ b/src/renderer/src/config/providers.ts
@@ -31,6 +31,7 @@ import JinaProviderLogo from '@renderer/assets/images/providers/jina.png'
import LanyunProviderLogo from '@renderer/assets/images/providers/lanyun.png'
import LMStudioProviderLogo from '@renderer/assets/images/providers/lmstudio.png'
import LongCatProviderLogo from '@renderer/assets/images/providers/longcat.png'
+import MiMoProviderLogo from '@renderer/assets/images/providers/mimo.svg'
import MinimaxProviderLogo from '@renderer/assets/images/providers/minimax.png'
import MistralProviderLogo from '@renderer/assets/images/providers/mistral.png'
import ModelScopeProviderLogo from '@renderer/assets/images/providers/modelscope.png'
@@ -695,6 +696,17 @@ export const SYSTEM_PROVIDERS_CONFIG: Record =
models: SYSTEM_MODELS.cerebras,
isSystem: true,
enabled: false
+ },
+ mimo: {
+ id: 'mimo',
+ name: 'Xiaomi MiMo',
+ type: 'openai',
+ apiKey: '',
+ apiHost: 'https://api.xiaomimimo.com',
+ anthropicApiHost: 'https://api.xiaomimimo.com/anthropic',
+ models: SYSTEM_MODELS.mimo,
+ isSystem: true,
+ enabled: false
}
} as const
@@ -763,7 +775,8 @@ export const PROVIDER_LOGO_MAP: AtLeast = {
huggingface: HuggingfaceProviderLogo,
sophnet: SophnetProviderLogo,
gateway: AIGatewayProviderLogo,
- cerebras: CerebrasProviderLogo
+ cerebras: CerebrasProviderLogo,
+ mimo: MiMoProviderLogo
} as const
export function getProviderLogo(providerId: string) {
@@ -1434,5 +1447,16 @@ export const PROVIDER_URLS: Record = {
docs: 'https://inference-docs.cerebras.ai/introduction',
models: 'https://inference-docs.cerebras.ai/models/overview'
}
+ },
+ mimo: {
+ api: {
+ url: 'https://api.xiaomimimo.com'
+ },
+ websites: {
+ official: 'https://platform.xiaomimimo.com/',
+ apiKey: 'https://platform.xiaomimimo.com/#/console/usage',
+ docs: 'https://platform.xiaomimimo.com/#/docs/welcome',
+ models: 'https://platform.xiaomimimo.com/'
+ }
}
}
diff --git a/src/renderer/src/i18n/label.ts b/src/renderer/src/i18n/label.ts
index 8e2600a681..2e6f84026e 100644
--- a/src/renderer/src/i18n/label.ts
+++ b/src/renderer/src/i18n/label.ts
@@ -88,7 +88,8 @@ const providerKeyMap = {
huggingface: 'provider.huggingface',
sophnet: 'provider.sophnet',
gateway: 'provider.ai-gateway',
- cerebras: 'provider.cerebras'
+ cerebras: 'provider.cerebras',
+ mimo: 'provider.mimo'
} as const
/**
@@ -330,7 +331,8 @@ const builtInMcpDescriptionKeyMap: Record = {
[BuiltinMCPServerNames.difyKnowledge]: 'settings.mcp.builtinServersDescriptions.dify_knowledge',
[BuiltinMCPServerNames.python]: 'settings.mcp.builtinServersDescriptions.python',
[BuiltinMCPServerNames.didiMCP]: 'settings.mcp.builtinServersDescriptions.didi_mcp',
- [BuiltinMCPServerNames.browser]: 'settings.mcp.builtinServersDescriptions.browser'
+ [BuiltinMCPServerNames.browser]: 'settings.mcp.builtinServersDescriptions.browser',
+ [BuiltinMCPServerNames.nowledgeMem]: 'settings.mcp.builtinServersDescriptions.nowledge_mem'
} as const
export const getBuiltInMcpServerDescriptionLabel = (key: string): string => {
diff --git a/src/renderer/src/i18n/locales/en-us.json b/src/renderer/src/i18n/locales/en-us.json
index 0085cef491..f4012363e3 100644
--- a/src/renderer/src/i18n/locales/en-us.json
+++ b/src/renderer/src/i18n/locales/en-us.json
@@ -2643,6 +2643,7 @@
"lanyun": "LANYUN",
"lmstudio": "LM Studio",
"longcat": "LongCat AI",
+ "mimo": "Xiaomi MiMo",
"minimax": "MiniMax",
"mistral": "Mistral",
"modelscope": "ModelScope",
@@ -3939,6 +3940,7 @@
"mcp_auto_install": "Automatically install MCP service (beta)",
"memory": "Persistent memory implementation based on a local knowledge graph. This enables the model to remember user-related information across different conversations. Requires configuring the MEMORY_FILE_PATH environment variable.",
"no": "No description",
+ "nowledge_mem": "Requires Nowledge Mem app running locally. Keeps AI chats, tools, notes, agents, and files in private memory on your computer. Download from https://mem.nowledge.co/",
"python": "Execute Python code in a secure sandbox environment. Run Python with Pyodide, supporting most standard libraries and scientific computing packages",
"sequentialthinking": "A MCP server implementation that provides tools for dynamic and reflective problem solving through structured thinking processes"
},
diff --git a/src/renderer/src/i18n/locales/zh-cn.json b/src/renderer/src/i18n/locales/zh-cn.json
index 49ba09b840..0e5b2f60e7 100644
--- a/src/renderer/src/i18n/locales/zh-cn.json
+++ b/src/renderer/src/i18n/locales/zh-cn.json
@@ -2643,6 +2643,7 @@
"lanyun": "蓝耘科技",
"lmstudio": "LM Studio",
"longcat": "龙猫",
+ "mimo": "Xiaomi MiMo",
"minimax": "MiniMax",
"mistral": "Mistral",
"modelscope": "ModelScope 魔搭",
@@ -3939,6 +3940,7 @@
"mcp_auto_install": "自动安装 MCP 服务(测试版)",
"memory": "基于本地知识图谱的持久性记忆基础实现。这使得模型能够在不同对话间记住用户的相关信息。需要配置 MEMORY_FILE_PATH 环境变量。",
"no": "无描述",
+ "nowledge_mem": "需要本地运行 Nowledge Mem 应用。将 AI 对话、工具、笔记、智能体和文件保存在本地计算机的私有记忆中。请从 https://mem.nowledge.co/ 下载",
"python": "在安全的沙盒环境中执行 Python 代码。使用 Pyodide 运行 Python,支持大多数标准库和科学计算包",
"sequentialthinking": "一个 MCP 服务器实现,提供了通过结构化思维过程进行动态和反思性问题解决的工具"
},
diff --git a/src/renderer/src/i18n/locales/zh-tw.json b/src/renderer/src/i18n/locales/zh-tw.json
index 67b56e9873..9625c68386 100644
--- a/src/renderer/src/i18n/locales/zh-tw.json
+++ b/src/renderer/src/i18n/locales/zh-tw.json
@@ -2643,6 +2643,7 @@
"lanyun": "藍耘",
"lmstudio": "LM Studio",
"longcat": "龍貓",
+ "mimo": "[to be translated]:Xiaomi MiMo",
"minimax": "MiniMax",
"mistral": "Mistral",
"modelscope": "ModelScope 魔搭",
@@ -3939,6 +3940,7 @@
"mcp_auto_install": "自動安裝 MCP 服務(測試版)",
"memory": "基於本機知識圖譜的持久性記憶基礎實做。這使得模型能夠在不同對話間記住使用者的相關資訊。需要設定 MEMORY_FILE_PATH 環境變數。",
"no": "無描述",
+ "nowledge_mem": "需要本機執行 Nowledge Mem 應用程式。將 AI 對話、工具、筆記、代理和檔案保存在電腦上的私人記憶體中。請從 https://mem.nowledge.co/ 下載",
"python": "在安全的沙盒環境中執行 Python 程式碼。使用 Pyodide 執行 Python,支援大多數標準函式庫和科學計算套件",
"sequentialthinking": "一個 MCP 伺服器實做,提供了透過結構化思維過程進行動態和反思性問題解決的工具"
},
diff --git a/src/renderer/src/i18n/translate/de-de.json b/src/renderer/src/i18n/translate/de-de.json
index 3fcd849548..b3acb49950 100644
--- a/src/renderer/src/i18n/translate/de-de.json
+++ b/src/renderer/src/i18n/translate/de-de.json
@@ -2643,6 +2643,7 @@
"lanyun": "Lanyun Technologie",
"lmstudio": "LM Studio",
"longcat": "Meißner Riesenhamster",
+ "mimo": "[to be translated]:Xiaomi MiMo",
"minimax": "MiniMax",
"mistral": "Mistral",
"modelscope": "ModelScope",
@@ -3939,6 +3940,7 @@
"mcp_auto_install": "MCP-Service automatisch installieren (Beta-Version)",
"memory": "MCP-Server mit persistenter Erinnerungsbasis auf lokalem Wissensgraphen, der Informationen über verschiedene Dialoge hinweg speichert. MEMORY_FILE_PATH-Umgebungsvariable muss konfiguriert werden",
"no": "Keine Beschreibung",
+ "nowledge_mem": "Erfordert lokal laufende Nowledge Mem App. Speichert KI-Chats, Tools, Notizen, Agenten und Dateien in einem privaten Speicher auf Ihrem Computer. Download unter https://mem.nowledge.co/",
"python": "Python-Code in einem sicheren Sandbox-Umgebung ausführen. Verwendung von Pyodide für Python, Unterstützung für die meisten Standardbibliotheken und wissenschaftliche Pakete",
"sequentialthinking": "MCP-Server-Implementierung mit strukturiertem Denkprozess, der dynamische und reflektierende Problemlösungen ermöglicht"
},
diff --git a/src/renderer/src/i18n/translate/el-gr.json b/src/renderer/src/i18n/translate/el-gr.json
index aed40bc2db..ae7b855646 100644
--- a/src/renderer/src/i18n/translate/el-gr.json
+++ b/src/renderer/src/i18n/translate/el-gr.json
@@ -2643,6 +2643,7 @@
"lanyun": "Λανιούν Τεχνολογία",
"lmstudio": "LM Studio",
"longcat": "Τσίρο",
+ "mimo": "[to be translated]:Xiaomi MiMo",
"minimax": "MiniMax",
"mistral": "Mistral",
"modelscope": "ModelScope Magpie",
@@ -3939,6 +3940,7 @@
"mcp_auto_install": "Αυτόματη εγκατάσταση υπηρεσίας MCP (προβολή)",
"memory": "Βασική υλοποίηση μόνιμης μνήμης με βάση τοπικό γράφημα γνώσης. Αυτό επιτρέπει στο μοντέλο να θυμάται πληροφορίες σχετικές με τον χρήστη ανάμεσα σε διαφορετικές συνομιλίες. Απαιτείται η ρύθμιση της μεταβλητής περιβάλλοντος MEMORY_FILE_PATH.",
"no": "Χωρίς περιγραφή",
+ "nowledge_mem": "[to be translated]:Requires Nowledge Mem app running locally. Keeps AI chats, tools, notes, agents, and files in private memory on your computer. Download from https://mem.nowledge.co/",
"python": "Εκτελέστε κώδικα Python σε ένα ασφαλές περιβάλλον sandbox. Χρησιμοποιήστε το Pyodide για να εκτελέσετε Python, υποστηρίζοντας την πλειονότητα των βιβλιοθηκών της τυπικής βιβλιοθήκης και των πακέτων επιστημονικού υπολογισμού",
"sequentialthinking": "ένας εξυπηρετητής MCP που υλοποιείται, παρέχοντας εργαλεία για δυναμική και αναστοχαστική επίλυση προβλημάτων μέσω δομημένων διαδικασιών σκέψης"
},
diff --git a/src/renderer/src/i18n/translate/es-es.json b/src/renderer/src/i18n/translate/es-es.json
index b0f584b3b5..26b499cba2 100644
--- a/src/renderer/src/i18n/translate/es-es.json
+++ b/src/renderer/src/i18n/translate/es-es.json
@@ -2643,6 +2643,7 @@
"lanyun": "Tecnología Lanyun",
"lmstudio": "Estudio LM",
"longcat": "Totoro",
+ "mimo": "[to be translated]:Xiaomi MiMo",
"minimax": "Minimax",
"mistral": "Mistral",
"modelscope": "ModelScope Módulo",
@@ -3939,6 +3940,7 @@
"mcp_auto_install": "Instalación automática del servicio MCP (versión beta)",
"memory": "Implementación básica de memoria persistente basada en un grafo de conocimiento local. Esto permite que el modelo recuerde información relevante del usuario entre diferentes conversaciones. Es necesario configurar la variable de entorno MEMORY_FILE_PATH.",
"no": "sin descripción",
+ "nowledge_mem": "[to be translated]:Requires Nowledge Mem app running locally. Keeps AI chats, tools, notes, agents, and files in private memory on your computer. Download from https://mem.nowledge.co/",
"python": "Ejecuta código Python en un entorno sandbox seguro. Usa Pyodide para ejecutar Python, compatible con la mayoría de las bibliotecas estándar y paquetes de cálculo científico.",
"sequentialthinking": "Una implementación de servidor MCP que proporciona herramientas para la resolución dinámica y reflexiva de problemas mediante un proceso de pensamiento estructurado"
},
diff --git a/src/renderer/src/i18n/translate/fr-fr.json b/src/renderer/src/i18n/translate/fr-fr.json
index eea3ddb1c1..4dff56d7e9 100644
--- a/src/renderer/src/i18n/translate/fr-fr.json
+++ b/src/renderer/src/i18n/translate/fr-fr.json
@@ -2643,6 +2643,7 @@
"lanyun": "Technologie Lan Yun",
"lmstudio": "Studio LM",
"longcat": "Mon voisin Totoro",
+ "mimo": "[to be translated]:Xiaomi MiMo",
"minimax": "MiniMax",
"mistral": "Mistral",
"modelscope": "ModelScope MoDa",
@@ -3939,6 +3940,7 @@
"mcp_auto_install": "Installation automatique du service MCP (version bêta)",
"memory": "Implémentation de base de mémoire persistante basée sur un graphe de connaissances local. Cela permet au modèle de se souvenir des informations relatives à l'utilisateur entre différentes conversations. Nécessite la configuration de la variable d'environnement MEMORY_FILE_PATH.",
"no": "sans description",
+ "nowledge_mem": "[to be translated]:Requires Nowledge Mem app running locally. Keeps AI chats, tools, notes, agents, and files in private memory on your computer. Download from https://mem.nowledge.co/",
"python": "Exécutez du code Python dans un environnement bac à sable sécurisé. Utilisez Pyodide pour exécuter Python, prenant en charge la plupart des bibliothèques standard et des packages de calcul scientifique.",
"sequentialthinking": "Un serveur MCP qui fournit des outils permettant une résolution dynamique et réflexive des problèmes à travers un processus de pensée structuré"
},
diff --git a/src/renderer/src/i18n/translate/ja-jp.json b/src/renderer/src/i18n/translate/ja-jp.json
index ec72e3a3ab..090a1927cd 100644
--- a/src/renderer/src/i18n/translate/ja-jp.json
+++ b/src/renderer/src/i18n/translate/ja-jp.json
@@ -2643,6 +2643,7 @@
"lanyun": "LANYUN",
"lmstudio": "LM Studio",
"longcat": "トトロ",
+ "mimo": "[to be translated]:Xiaomi MiMo",
"minimax": "MiniMax",
"mistral": "Mistral",
"modelscope": "ModelScope",
@@ -3939,6 +3940,7 @@
"mcp_auto_install": "MCPサービスの自動インストール(ベータ版)",
"memory": "ローカルのナレッジグラフに基づく永続的なメモリの基本的な実装です。これにより、モデルは異なる会話間でユーザーの関連情報を記憶できるようになります。MEMORY_FILE_PATH 環境変数の設定が必要です。",
"no": "説明なし",
+ "nowledge_mem": "Nowledge Mem アプリをローカルで実行する必要があります。AI チャット、ツール、ノート、エージェント、ファイルをコンピューター上のプライベートメモリに保存します。https://mem.nowledge.co/ からダウンロードしてください",
"python": "安全なサンドボックス環境でPythonコードを実行します。Pyodideを使用してPythonを実行し、ほとんどの標準ライブラリと科学計算パッケージをサポートしています。",
"sequentialthinking": "構造化された思考プロセスを通じて動的かつ反省的な問題解決を行うためのツールを提供するMCPサーバーの実装"
},
diff --git a/src/renderer/src/i18n/translate/pt-pt.json b/src/renderer/src/i18n/translate/pt-pt.json
index 5c3d2d1915..50cc4fae03 100644
--- a/src/renderer/src/i18n/translate/pt-pt.json
+++ b/src/renderer/src/i18n/translate/pt-pt.json
@@ -2643,6 +2643,7 @@
"lanyun": "Lanyun Tecnologia",
"lmstudio": "Estúdio LM",
"longcat": "Totoro",
+ "mimo": "[to be translated]:Xiaomi MiMo",
"minimax": "Minimax",
"mistral": "Mistral",
"modelscope": "ModelScope MôDá",
@@ -3939,6 +3940,7 @@
"mcp_auto_install": "Instalação automática do serviço MCP (beta)",
"memory": "Implementação base de memória persistente baseada em grafos de conhecimento locais. Isso permite que o modelo lembre informações relevantes do utilizador entre diferentes conversas. É necessário configurar a variável de ambiente MEMORY_FILE_PATH.",
"no": "sem descrição",
+ "nowledge_mem": "Requer a aplicação Nowledge Mem em execução localmente. Mantém conversas de IA, ferramentas, notas, agentes e ficheiros numa memória privada no seu computador. Transfira de https://mem.nowledge.co/",
"python": "Executar código Python num ambiente sandbox seguro. Utilizar Pyodide para executar Python, suportando a maioria das bibliotecas padrão e pacotes de computação científica",
"sequentialthinking": "Uma implementação de servidor MCP que fornece ferramentas para resolução dinâmica e reflexiva de problemas através de um processo de pensamento estruturado"
},
diff --git a/src/renderer/src/i18n/translate/ru-ru.json b/src/renderer/src/i18n/translate/ru-ru.json
index cba8a1e5eb..8a6a781451 100644
--- a/src/renderer/src/i18n/translate/ru-ru.json
+++ b/src/renderer/src/i18n/translate/ru-ru.json
@@ -2643,6 +2643,7 @@
"lanyun": "LANYUN",
"lmstudio": "LM Studio",
"longcat": "Тоторо",
+ "mimo": "[to be translated]:Xiaomi MiMo",
"minimax": "MiniMax",
"mistral": "Mistral",
"modelscope": "ModelScope",
@@ -3939,6 +3940,7 @@
"mcp_auto_install": "Автоматическая установка службы MCP (бета-версия)",
"memory": "реализация постоянной памяти на основе локального графа знаний. Это позволяет модели запоминать информацию о пользователе между различными диалогами. Требуется настроить переменную среды MEMORY_FILE_PATH.",
"no": "без описания",
+ "nowledge_mem": "Требуется запущенное локально приложение Nowledge Mem. Хранит чаты ИИ, инструменты, заметки, агентов и файлы в приватной памяти на вашем компьютере. Скачать можно на https://mem.nowledge.co/",
"python": "Выполняйте код Python в безопасной песочнице. Запускайте Python с помощью Pyodide, поддерживается большинство стандартных библиотек и пакетов для научных вычислений",
"sequentialthinking": "MCP серверная реализация, предоставляющая инструменты для динамического и рефлексивного решения проблем посредством структурированного мыслительного процесса"
},
diff --git a/src/renderer/src/pages/settings/ProviderSettings/ProviderSetting.tsx b/src/renderer/src/pages/settings/ProviderSettings/ProviderSetting.tsx
index 85f54fce87..049c14c0d1 100644
--- a/src/renderer/src/pages/settings/ProviderSettings/ProviderSetting.tsx
+++ b/src/renderer/src/pages/settings/ProviderSettings/ProviderSetting.tsx
@@ -80,7 +80,8 @@ const ANTHROPIC_COMPATIBLE_PROVIDER_IDS = [
SystemProviderIds.minimax,
SystemProviderIds.silicon,
SystemProviderIds.qiniu,
- SystemProviderIds.dmxapi
+ SystemProviderIds.dmxapi,
+ SystemProviderIds.mimo
] as const
type AnthropicCompatibleProviderId = (typeof ANTHROPIC_COMPATIBLE_PROVIDER_IDS)[number]
diff --git a/src/renderer/src/store/mcp.ts b/src/renderer/src/store/mcp.ts
index ed7076bc1c..5b8d5bcdcf 100644
--- a/src/renderer/src/store/mcp.ts
+++ b/src/renderer/src/store/mcp.ts
@@ -183,6 +183,16 @@ export const builtinMCPServers: BuiltinMCPServer[] = [
provider: 'CherryAI',
installSource: 'builtin',
isTrusted: true
+ },
+ {
+ id: nanoid(),
+ name: BuiltinMCPServerNames.nowledgeMem,
+ reference: 'https://mem.nowledge.co/',
+ type: 'inMemory',
+ isActive: false,
+ provider: 'Nowledge',
+ installSource: 'builtin',
+ isTrusted: true
}
] as const
diff --git a/src/renderer/src/store/migrate.ts b/src/renderer/src/store/migrate.ts
index f085db2302..5fe1bc0901 100644
--- a/src/renderer/src/store/migrate.ts
+++ b/src/renderer/src/store/migrate.ts
@@ -3046,6 +3046,7 @@ const migrateConfig = {
assistant.settings.reasoning_effort = 'default'
}
})
+ addProvider(state, 'mimo')
logger.info('migrate 187 success')
return state
} catch (error) {
diff --git a/src/renderer/src/types/index.ts b/src/renderer/src/types/index.ts
index bc01e2da74..eefa380a66 100644
--- a/src/renderer/src/types/index.ts
+++ b/src/renderer/src/types/index.ts
@@ -103,6 +103,7 @@ const ThinkModelTypes = [
'doubao',
'doubao_no_auto',
'doubao_after_251015',
+ 'mimo',
'hunyuan',
'zhipu',
'perplexity',
@@ -752,7 +753,8 @@ export const BuiltinMCPServerNames = {
difyKnowledge: '@cherry/dify-knowledge',
python: '@cherry/python',
didiMCP: '@cherry/didi-mcp',
- browser: '@cherry/browser'
+ browser: '@cherry/browser',
+ nowledgeMem: '@cherry/nowledge-mem'
} as const
export type BuiltinMCPServerName = (typeof BuiltinMCPServerNames)[keyof typeof BuiltinMCPServerNames]
diff --git a/src/renderer/src/types/provider.ts b/src/renderer/src/types/provider.ts
index 4e3e34760c..edab3a7305 100644
--- a/src/renderer/src/types/provider.ts
+++ b/src/renderer/src/types/provider.ts
@@ -189,7 +189,8 @@ export const SystemProviderIdSchema = z.enum([
'huggingface',
'sophnet',
'gateway',
- 'cerebras'
+ 'cerebras',
+ 'mimo'
])
export type SystemProviderId = z.infer
@@ -258,7 +259,8 @@ export const SystemProviderIds = {
longcat: 'longcat',
huggingface: 'huggingface',
gateway: 'gateway',
- cerebras: 'cerebras'
+ cerebras: 'cerebras',
+ mimo: 'mimo'
} as const satisfies Record
type SystemProviderIdTypeMap = typeof SystemProviderIds
diff --git a/yarn.lock b/yarn.lock
index aeb8b3f2de..08b33ec36b 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -11246,7 +11246,7 @@ __metadata:
languageName: node
linkType: hard
-"buffer-equal-constant-time@npm:1.0.1":
+"buffer-equal-constant-time@npm:^1.0.1":
version: 1.0.1
resolution: "buffer-equal-constant-time@npm:1.0.1"
checksum: 10c0/fb2294e64d23c573d0dd1f1e7a466c3e978fe94a4e0f8183937912ca374619773bef8e2aceb854129d2efecbbc515bbd0cc78d2734a3e3031edb0888531bbc8e
@@ -17233,24 +17233,24 @@ __metadata:
languageName: node
linkType: hard
-"jwa@npm:^2.0.0":
- version: 2.0.0
- resolution: "jwa@npm:2.0.0"
+"jwa@npm:^2.0.1":
+ version: 2.0.1
+ resolution: "jwa@npm:2.0.1"
dependencies:
- buffer-equal-constant-time: "npm:1.0.1"
+ buffer-equal-constant-time: "npm:^1.0.1"
ecdsa-sig-formatter: "npm:1.0.11"
safe-buffer: "npm:^5.0.1"
- checksum: 10c0/6baab823b93c038ba1d2a9e531984dcadbc04e9eb98d171f4901b7a40d2be15961a359335de1671d78cb6d987f07cbe5d350d8143255977a889160c4d90fcc3c
+ checksum: 10c0/ab3ebc6598e10dc11419d4ed675c9ca714a387481466b10e8a6f3f65d8d9c9237e2826f2505280a739cf4cbcf511cb288eeec22b5c9c63286fc5a2e4f97e78cf
languageName: node
linkType: hard
"jws@npm:^4.0.0":
- version: 4.0.0
- resolution: "jws@npm:4.0.0"
+ version: 4.0.1
+ resolution: "jws@npm:4.0.1"
dependencies:
- jwa: "npm:^2.0.0"
+ jwa: "npm:^2.0.1"
safe-buffer: "npm:^5.0.1"
- checksum: 10c0/f1ca77ea5451e8dc5ee219cb7053b8a4f1254a79cb22417a2e1043c1eb8a569ae118c68f24d72a589e8a3dd1824697f47d6bd4fb4bebb93a3bdf53545e721661
+ checksum: 10c0/6be1ed93023aef570ccc5ea8d162b065840f3ef12f0d1bb3114cade844de7a357d5dc558201d9a65101e70885a6fa56b17462f520e6b0d426195510618a154d0
languageName: node
linkType: hard