Merge branch 'main' into v2

This commit is contained in:
fullex 2025-12-26 14:18:06 +08:00
commit f84a2588fd
8 changed files with 119 additions and 48 deletions

View File

@ -135,38 +135,68 @@ artifactBuildCompleted: scripts/artifact-build-completed.js
releaseInfo:
releaseNotes: |
<!--LANG:en-->
Cherry Studio 1.7.6 - New Models & MCP Enhancements
Cherry Studio 1.7.7 - New Models & UI Improvements
This release adds support for new AI models and includes a new MCP server for memory management.
This release adds new AI model support, OpenRouter integration, and UI redesigns.
✨ New Features
- [Models] Add support for Xiaomi MiMo model
- [Models] Add support for Gemini 3 Flash and Pro model detection
- [Models] Add support for Volcengine Doubao-Seed-1.8 model
- [MCP] Add Nowledge Mem builtin MCP server for memory management
- [Settings] Add default reasoning effort option to resolve confusion between undefined and none
- [Models] Add GLM-4.7 and MiniMax-M2.1 model support
- [Provider] Add OpenRouter provider support
- [OVMS] Upgrade to 2025.4 with Qwen3-4B-int4-ov preset model
- [OVMS] Close OVMS process when app quits
- [Search] Show keyword-adjacent snippets in history search
- [Painting] Add extend_params support for DMX painting
- [UI] Add MCP logo and replace Hammer icon
🎨 UI Improvements
- [Notes] Move notes settings to popup in NotesPage for quick access
- [WebSearch] Redesign settings with two-column layout and "Set as Default" button
- [Display] Improve font selector for long font names
- [Transfer] Rename LanDrop to LanTransfer
🐛 Bug Fixes
- [Azure] Restore deployment-based URLs for non-v1 apiVersion
- [Translation] Disable reasoning mode for translation to improve efficiency
- [Image] Update API path for image generation requests in OpenAIBaseClient
- [Windows] Auto-discover and persist Git Bash path on Windows for scoop users
- [API] Correct aihubmix Anthropic API path
- [OpenRouter] Support GPT-5.1/5.2 reasoning effort 'none' and improve error handling
- [Thinking] Fix interleaved thinking support
- [Memory] Fix retrieval issues and enable database backup
- [Settings] Update default assistant settings to disable temperature
- [OpenAI] Add persistent server configuration support
- [Azure] Normalize Azure endpoint
- [MCP] Check system npx/uvx before falling back to bundled binaries
- [Prompt] Improve language instruction clarity
- [Models] Include GPT5.2 series in verbosity check
- [URL] Enhance urlContext validation for supported providers and models
<!--LANG:zh-CN-->
Cherry Studio 1.7.6 - 新模型与 MCP 增强
Cherry Studio 1.7.7 - 新模型与界面改进
本次更新添加了多个新 AI 模型支持,并新增记忆管理 MCP 服务器。
本次更新添加了新 AI 模型支持、OpenRouter 集成以及界面重新设计
✨ 新功能
- [模型] 添加小米 MiMo 模型支持
- [模型] 添加 Gemini 3 Flash 和 Pro 模型检测支持
- [模型] 添加火山引擎 Doubao-Seed-1.8 模型支持
- [MCP] 新增 Nowledge Mem 内置 MCP 服务器,用于记忆管理
- [设置] 添加默认推理强度选项,解决 undefined 和 none 之间的混淆
- [模型] 添加 GLM-4.7 和 MiniMax-M2.1 模型支持
- [服务商] 添加 OpenRouter 服务商支持
- [OVMS] 升级至 2025.4,新增 Qwen3-4B-int4-ov 预设模型
- [OVMS] 应用退出时关闭 OVMS 进程
- [搜索] 历史搜索显示关键词上下文片段
- [绘图] DMX 绘图添加扩展参数支持
- [界面] 添加 MCP 图标并替换锤子图标
🎨 界面改进
- [笔记] 将笔记设置移至笔记页弹窗,快速访问无需离开当前页面
- [网页搜索] 采用两栏布局重新设计设置界面,添加"设为默认"按钮
- [显示] 改进长字体名称的字体选择器
- [传输] LanDrop 重命名为 LanTransfer
🐛 问题修复
- [Azure] 修复非 v1 apiVersion 的部署 URL 问题
- [翻译] 禁用翻译时的推理模式以提高效率
- [图像] 更新 OpenAIBaseClient 中图像生成请求的 API 路径
- [Windows] 自动发现并保存 Windows scoop 用户的 Git Bash 路径
- [API] 修复 aihubmix Anthropic API 路径
- [OpenRouter] 支持 GPT-5.1/5.2 reasoning effort 'none' 并改进错误处理
- [思考] 修复交错思考支持
- [记忆] 修复检索问题并启用数据库备份
- [设置] 更新默认助手设置禁用温度
- [OpenAI] 添加持久化服务器配置支持
- [Azure] 规范化 Azure 端点
- [MCP] 优先检查系统 npx/uvx 再回退到内置二进制文件
- [提示词] 改进语言指令清晰度
- [模型] GPT5.2 系列添加到 verbosity 检查
- [URL] 增强 urlContext 对支持的服务商和模型的验证
<!--LANG:END-->

View File

@ -2,7 +2,7 @@ import { loggerService } from '@logger'
import {
checkName,
getFilesDir,
getFileType,
getFileType as getFileTypeByExt,
getName,
getNotesDir,
getTempDir,
@ -11,13 +11,13 @@ import {
} from '@main/utils/file'
import { documentExts, imageExts, KB, MB } from '@shared/config/constant'
import type { FileMetadata, NotesTreeNode } from '@types'
import { FileTypes } from '@types'
import chardet from 'chardet'
import type { FSWatcher } from 'chokidar'
import chokidar from 'chokidar'
import * as crypto from 'crypto'
import type { OpenDialogOptions, OpenDialogReturnValue, SaveDialogOptions, SaveDialogReturnValue } from 'electron'
import { app } from 'electron'
import { dialog, net, shell } from 'electron'
import { app, dialog, net, shell } from 'electron'
import * as fs from 'fs'
import { writeFileSync } from 'fs'
import { readFile } from 'fs/promises'
@ -185,7 +185,7 @@ class FileStorage {
})
}
findDuplicateFile = async (filePath: string): Promise<FileMetadata | null> => {
private findDuplicateFile = async (filePath: string): Promise<FileMetadata | null> => {
const stats = fs.statSync(filePath)
logger.debug(`stats: ${stats}, filePath: ${filePath}`)
const fileSize = stats.size
@ -204,6 +204,8 @@ class FileStorage {
if (originalHash === storedHash) {
const ext = path.extname(file)
const id = path.basename(file, ext)
const type = await this.getFileType(filePath)
return {
id,
origin_name: file,
@ -212,7 +214,7 @@ class FileStorage {
created_at: storedStats.birthtime.toISOString(),
size: storedStats.size,
ext,
type: getFileType(ext),
type,
count: 2
}
}
@ -222,6 +224,13 @@ class FileStorage {
return null
}
public getFileType = async (filePath: string): Promise<FileTypes> => {
const ext = path.extname(filePath)
const fileType = getFileTypeByExt(ext)
return fileType === FileTypes.OTHER && (await this._isTextFile(filePath)) ? FileTypes.TEXT : fileType
}
public selectFile = async (
_: Electron.IpcMainInvokeEvent,
options?: OpenDialogOptions
@ -241,7 +250,7 @@ class FileStorage {
const fileMetadataPromises = result.filePaths.map(async (filePath) => {
const stats = fs.statSync(filePath)
const ext = path.extname(filePath)
const fileType = getFileType(ext)
const fileType = await this.getFileType(filePath)
return {
id: uuidv4(),
@ -307,7 +316,7 @@ class FileStorage {
}
const stats = await fs.promises.stat(destPath)
const fileType = getFileType(ext)
const fileType = await this.getFileType(destPath)
const fileMetadata: FileMetadata = {
id: uuid,
@ -332,8 +341,7 @@ class FileStorage {
}
const stats = fs.statSync(filePath)
const ext = path.extname(filePath)
const fileType = getFileType(ext)
const fileType = await this.getFileType(filePath)
return {
id: uuidv4(),
@ -342,7 +350,7 @@ class FileStorage {
path: filePath,
created_at: stats.birthtime.toISOString(),
size: stats.size,
ext: ext,
ext: path.extname(filePath),
type: fileType,
count: 1
}
@ -690,7 +698,7 @@ class FileStorage {
created_at: new Date().toISOString(),
size: buffer.length,
ext: ext.slice(1),
type: getFileType(ext),
type: getFileTypeByExt(ext),
count: 1
}
} catch (error) {
@ -740,7 +748,7 @@ class FileStorage {
created_at: new Date().toISOString(),
size: stats.size,
ext: ext.slice(1),
type: getFileType(ext),
type: getFileTypeByExt(ext),
count: 1
}
} catch (error) {
@ -1317,7 +1325,7 @@ class FileStorage {
await fs.promises.writeFile(destPath, buffer)
const stats = await fs.promises.stat(destPath)
const fileType = getFileType(ext)
const fileType = await this.getFileType(destPath)
return {
id: uuid,
@ -1604,6 +1612,10 @@ class FileStorage {
}
public isTextFile = async (_: Electron.IpcMainInvokeEvent, filePath: string): Promise<boolean> => {
return this._isTextFile(filePath)
}
private _isTextFile = async (filePath: string): Promise<boolean> => {
try {
const isBinary = await isBinaryFile(filePath)
if (isBinary) {

View File

@ -107,7 +107,7 @@ export const SYSTEM_PROVIDERS_CONFIG: Record<SystemProviderId, SystemProvider> =
type: 'openai',
apiKey: '',
apiHost: 'https://aihubmix.com',
anthropicApiHost: 'https://aihubmix.com/anthropic',
anthropicApiHost: 'https://aihubmix.com',
models: SYSTEM_MODELS.aihubmix,
isSystem: true,
enabled: false
@ -289,7 +289,7 @@ export const SYSTEM_PROVIDERS_CONFIG: Record<SystemProviderId, SystemProvider> =
ollama: {
id: 'ollama',
name: 'Ollama',
type: 'openai',
type: 'ollama',
apiKey: '',
apiHost: 'http://localhost:11434',
models: SYSTEM_MODELS.ollama,

View File

@ -136,11 +136,14 @@ const DmxapiPage: FC<{ Options: string[] }> = ({ Options }) => {
let model = ''
let priceModel = ''
let image_size = ''
let extend_params = {}
for (const provider of Object.keys(modelGroups)) {
if (modelGroups[provider] && modelGroups[provider].length > 0) {
model = modelGroups[provider][0].id
priceModel = modelGroups[provider][0].price
image_size = modelGroups[provider][0].image_sizes[0].value
extend_params = modelGroups[provider][0].extend_params
break
}
}
@ -149,7 +152,8 @@ const DmxapiPage: FC<{ Options: string[] }> = ({ Options }) => {
model,
priceModel,
image_size,
modelGroups
modelGroups,
extend_params
}
}
@ -158,7 +162,7 @@ const DmxapiPage: FC<{ Options: string[] }> = ({ Options }) => {
const generationMode = params?.generationMode || painting?.generationMode || MODEOPTIONS[0].value
const { model, priceModel, image_size, modelGroups } = getFirstModelInfo(generationMode)
const { model, priceModel, image_size, modelGroups, extend_params } = getFirstModelInfo(generationMode)
return {
...DEFAULT_PAINTING,
@ -169,6 +173,7 @@ const DmxapiPage: FC<{ Options: string[] }> = ({ Options }) => {
modelGroups,
priceModel,
image_size,
extend_params,
...params
}
}
@ -186,7 +191,12 @@ const DmxapiPage: FC<{ Options: string[] }> = ({ Options }) => {
const onSelectModel = (modelId: string) => {
const model = allModels.find((m) => m.id === modelId)
if (model) {
updatePaintingState({ model: modelId, priceModel: model.price, image_size: model.image_sizes[0].value })
updatePaintingState({
model: modelId,
priceModel: model.price,
image_size: model.image_sizes[0].value,
extend_params: model.extend_params
})
}
}
@ -289,7 +299,7 @@ const DmxapiPage: FC<{ Options: string[] }> = ({ Options }) => {
clearImages()
const { model, priceModel, image_size, modelGroups } = getFirstModelInfo(v)
const { model, priceModel, image_size, modelGroups, extend_params } = getFirstModelInfo(v)
setModelOptions(modelGroups)
@ -305,9 +315,10 @@ const DmxapiPage: FC<{ Options: string[] }> = ({ Options }) => {
// 否则更新当前painting
updatePaintingState({
generationMode: v,
model: model,
image_size: image_size,
priceModel: priceModel
model,
image_size,
priceModel,
extend_params
})
}
}
@ -351,7 +362,8 @@ const DmxapiPage: FC<{ Options: string[] }> = ({ Options }) => {
const params = {
prompt,
model: painting.model,
n: painting.n
n: painting.n,
...painting?.extend_params
}
const headerExpand = {
@ -393,7 +405,8 @@ const DmxapiPage: FC<{ Options: string[] }> = ({ Options }) => {
const params = {
prompt,
n: painting.n,
model: painting.model
model: painting.model,
...painting?.extend_params
}
if (painting.image_size) {

View File

@ -84,7 +84,7 @@ export const MODEOPTIONS = [
// 获取模型分组数据
export const GetModelGroup = async (): Promise<DMXApiModelGroups> => {
try {
const response = await fetch('https://dmxapi.cn/cherry_painting_models_v2.json')
const response = await fetch('https://dmxapi.cn/cherry_painting_models_v3.json')
if (response.ok) {
const data = await response.json()

View File

@ -71,7 +71,7 @@ const persistedReducer = persistReducer(
{
key: 'cherry-studio',
storage,
version: 189,
version: 190,
blacklist: ['runtime', 'messages', 'messageBlocks', 'tabs', 'toolPermissions'],
migrate
},

View File

@ -3103,6 +3103,21 @@ const migrateConfig = {
logger.error('migrate 189 error', error as Error)
return state
}
},
// 1.7.8
'190': (state: RootState) => {
try {
state.llm.providers.forEach((provider) => {
if (provider.id === SystemProviderIds.ollama) {
provider.type = 'ollama'
}
})
logger.info('migrate 190 success')
return state
} catch (error) {
logger.error('migrate 190 error', error as Error)
return state
}
}
}

View File

@ -395,6 +395,7 @@ export interface DmxapiPainting extends PaintingParams {
autoCreate?: boolean
generationMode?: generationModeType
priceModel?: string
extend_params?: Record<string, unknown>
}
export interface TokenFluxPainting extends PaintingParams {