mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2026-01-12 08:59:02 +08:00
Merge remote-tracking branch 'origin/main' into feat/mcp-hub
This commit is contained in:
commit
b2041a2b2a
@ -134,38 +134,68 @@ artifactBuildCompleted: scripts/artifact-build-completed.js
|
||||
releaseInfo:
|
||||
releaseNotes: |
|
||||
<!--LANG:en-->
|
||||
Cherry Studio 1.7.6 - New Models & MCP Enhancements
|
||||
Cherry Studio 1.7.7 - New Models & UI Improvements
|
||||
|
||||
This release adds support for new AI models and includes a new MCP server for memory management.
|
||||
This release adds new AI model support, OpenRouter integration, and UI redesigns.
|
||||
|
||||
✨ New Features
|
||||
- [Models] Add support for Xiaomi MiMo model
|
||||
- [Models] Add support for Gemini 3 Flash and Pro model detection
|
||||
- [Models] Add support for Volcengine Doubao-Seed-1.8 model
|
||||
- [MCP] Add Nowledge Mem builtin MCP server for memory management
|
||||
- [Settings] Add default reasoning effort option to resolve confusion between undefined and none
|
||||
- [Models] Add GLM-4.7 and MiniMax-M2.1 model support
|
||||
- [Provider] Add OpenRouter provider support
|
||||
- [OVMS] Upgrade to 2025.4 with Qwen3-4B-int4-ov preset model
|
||||
- [OVMS] Close OVMS process when app quits
|
||||
- [Search] Show keyword-adjacent snippets in history search
|
||||
- [Painting] Add extend_params support for DMX painting
|
||||
- [UI] Add MCP logo and replace Hammer icon
|
||||
|
||||
🎨 UI Improvements
|
||||
- [Notes] Move notes settings to popup in NotesPage for quick access
|
||||
- [WebSearch] Redesign settings with two-column layout and "Set as Default" button
|
||||
- [Display] Improve font selector for long font names
|
||||
- [Transfer] Rename LanDrop to LanTransfer
|
||||
|
||||
🐛 Bug Fixes
|
||||
- [Azure] Restore deployment-based URLs for non-v1 apiVersion
|
||||
- [Translation] Disable reasoning mode for translation to improve efficiency
|
||||
- [Image] Update API path for image generation requests in OpenAIBaseClient
|
||||
- [Windows] Auto-discover and persist Git Bash path on Windows for scoop users
|
||||
- [API] Correct aihubmix Anthropic API path
|
||||
- [OpenRouter] Support GPT-5.1/5.2 reasoning effort 'none' and improve error handling
|
||||
- [Thinking] Fix interleaved thinking support
|
||||
- [Memory] Fix retrieval issues and enable database backup
|
||||
- [Settings] Update default assistant settings to disable temperature
|
||||
- [OpenAI] Add persistent server configuration support
|
||||
- [Azure] Normalize Azure endpoint
|
||||
- [MCP] Check system npx/uvx before falling back to bundled binaries
|
||||
- [Prompt] Improve language instruction clarity
|
||||
- [Models] Include GPT5.2 series in verbosity check
|
||||
- [URL] Enhance urlContext validation for supported providers and models
|
||||
|
||||
<!--LANG:zh-CN-->
|
||||
Cherry Studio 1.7.6 - 新模型与 MCP 增强
|
||||
Cherry Studio 1.7.7 - 新模型与界面改进
|
||||
|
||||
本次更新添加了多个新 AI 模型支持,并新增记忆管理 MCP 服务器。
|
||||
本次更新添加了新 AI 模型支持、OpenRouter 集成以及界面重新设计。
|
||||
|
||||
✨ 新功能
|
||||
- [模型] 添加小米 MiMo 模型支持
|
||||
- [模型] 添加 Gemini 3 Flash 和 Pro 模型检测支持
|
||||
- [模型] 添加火山引擎 Doubao-Seed-1.8 模型支持
|
||||
- [MCP] 新增 Nowledge Mem 内置 MCP 服务器,用于记忆管理
|
||||
- [设置] 添加默认推理强度选项,解决 undefined 和 none 之间的混淆
|
||||
- [模型] 添加 GLM-4.7 和 MiniMax-M2.1 模型支持
|
||||
- [服务商] 添加 OpenRouter 服务商支持
|
||||
- [OVMS] 升级至 2025.4,新增 Qwen3-4B-int4-ov 预设模型
|
||||
- [OVMS] 应用退出时关闭 OVMS 进程
|
||||
- [搜索] 历史搜索显示关键词上下文片段
|
||||
- [绘图] DMX 绘图添加扩展参数支持
|
||||
- [界面] 添加 MCP 图标并替换锤子图标
|
||||
|
||||
🎨 界面改进
|
||||
- [笔记] 将笔记设置移至笔记页弹窗,快速访问无需离开当前页面
|
||||
- [网页搜索] 采用两栏布局重新设计设置界面,添加"设为默认"按钮
|
||||
- [显示] 改进长字体名称的字体选择器
|
||||
- [传输] LanDrop 重命名为 LanTransfer
|
||||
|
||||
🐛 问题修复
|
||||
- [Azure] 修复非 v1 apiVersion 的部署 URL 问题
|
||||
- [翻译] 禁用翻译时的推理模式以提高效率
|
||||
- [图像] 更新 OpenAIBaseClient 中图像生成请求的 API 路径
|
||||
- [Windows] 自动发现并保存 Windows scoop 用户的 Git Bash 路径
|
||||
- [API] 修复 aihubmix Anthropic API 路径
|
||||
- [OpenRouter] 支持 GPT-5.1/5.2 reasoning effort 'none' 并改进错误处理
|
||||
- [思考] 修复交错思考支持
|
||||
- [记忆] 修复检索问题并启用数据库备份
|
||||
- [设置] 更新默认助手设置禁用温度
|
||||
- [OpenAI] 添加持久化服务器配置支持
|
||||
- [Azure] 规范化 Azure 端点
|
||||
- [MCP] 优先检查系统 npx/uvx 再回退到内置二进制文件
|
||||
- [提示词] 改进语言指令清晰度
|
||||
- [模型] GPT5.2 系列添加到 verbosity 检查
|
||||
- [URL] 增强 urlContext 对支持的服务商和模型的验证
|
||||
<!--LANG:END-->
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "CherryStudio",
|
||||
"version": "1.7.6",
|
||||
"version": "1.7.7",
|
||||
"private": true,
|
||||
"description": "A powerful AI assistant for producer.",
|
||||
"main": "./out/main/index.js",
|
||||
|
||||
@ -37,6 +37,7 @@ import { versionService } from './services/VersionService'
|
||||
import { windowService } from './services/WindowService'
|
||||
import { initWebviewHotkeys } from './services/WebviewService'
|
||||
import { runAsyncFunction } from './utils'
|
||||
import { ovmsManager } from './services/OvmsManager'
|
||||
|
||||
const logger = loggerService.withContext('MainEntry')
|
||||
|
||||
@ -247,12 +248,15 @@ if (!app.requestSingleInstanceLock()) {
|
||||
|
||||
app.on('will-quit', async () => {
|
||||
// 简单的资源清理,不阻塞退出流程
|
||||
await ovmsManager.stopOvms()
|
||||
|
||||
try {
|
||||
await mcpService.cleanup()
|
||||
await apiServerService.stop()
|
||||
} catch (error) {
|
||||
logger.warn('Error cleaning up MCP service:', error as Error)
|
||||
}
|
||||
|
||||
// finish the logger
|
||||
logger.finish()
|
||||
})
|
||||
|
||||
@ -59,7 +59,7 @@ import NotificationService from './services/NotificationService'
|
||||
import * as NutstoreService from './services/NutstoreService'
|
||||
import ObsidianVaultService from './services/ObsidianVaultService'
|
||||
import { ocrService } from './services/ocr/OcrService'
|
||||
import OvmsManager from './services/OvmsManager'
|
||||
import { ovmsManager } from './services/OvmsManager'
|
||||
import powerMonitorService from './services/PowerMonitorService'
|
||||
import { proxyManager } from './services/ProxyManager'
|
||||
import { pythonService } from './services/PythonService'
|
||||
@ -107,7 +107,6 @@ const obsidianVaultService = new ObsidianVaultService()
|
||||
const vertexAIService = VertexAIService.getInstance()
|
||||
const memoryService = MemoryService.getInstance()
|
||||
const dxtService = new DxtService()
|
||||
const ovmsManager = new OvmsManager()
|
||||
const pluginService = PluginService.getInstance()
|
||||
|
||||
function normalizeError(error: unknown): Error {
|
||||
|
||||
@ -102,32 +102,10 @@ class OvmsManager {
|
||||
*/
|
||||
public async stopOvms(): Promise<{ success: boolean; message?: string }> {
|
||||
try {
|
||||
// Check if OVMS process is running
|
||||
const psCommand = `Get-Process -Name "ovms" -ErrorAction SilentlyContinue | Select-Object Id, Path | ConvertTo-Json`
|
||||
const { stdout } = await execAsync(`powershell -Command "${psCommand}"`)
|
||||
|
||||
if (!stdout.trim()) {
|
||||
logger.info('OVMS process is not running')
|
||||
return { success: true, message: 'OVMS process is not running' }
|
||||
}
|
||||
|
||||
const processes = JSON.parse(stdout)
|
||||
const processList = Array.isArray(processes) ? processes : [processes]
|
||||
|
||||
if (processList.length === 0) {
|
||||
logger.info('OVMS process is not running')
|
||||
return { success: true, message: 'OVMS process is not running' }
|
||||
}
|
||||
|
||||
// Terminate all OVMS processes using terminalProcess
|
||||
for (const process of processList) {
|
||||
const result = await this.terminalProcess(process.Id)
|
||||
if (!result.success) {
|
||||
logger.error(`Failed to terminate OVMS process with PID: ${process.Id}, ${result.message}`)
|
||||
return { success: false, message: `Failed to terminate OVMS process: ${result.message}` }
|
||||
}
|
||||
logger.info(`Terminated OVMS process with PID: ${process.Id}`)
|
||||
}
|
||||
// close the OVMS process
|
||||
await execAsync(
|
||||
`powershell -Command "Get-WmiObject Win32_Process | Where-Object { $_.CommandLine -like 'ovms.exe*' } | ForEach-Object { Stop-Process -Id $_.ProcessId -Force }"`
|
||||
)
|
||||
|
||||
// Reset the ovms instance
|
||||
this.ovms = null
|
||||
@ -584,4 +562,5 @@ class OvmsManager {
|
||||
}
|
||||
}
|
||||
|
||||
export default OvmsManager
|
||||
// Export singleton instance
|
||||
export const ovmsManager = new OvmsManager()
|
||||
|
||||
@ -14,7 +14,6 @@ import {
|
||||
isDoubaoSeedAfter251015,
|
||||
isDoubaoThinkingAutoModel,
|
||||
isGemini3ThinkingTokenModel,
|
||||
isGPT51SeriesModel,
|
||||
isGrok4FastReasoningModel,
|
||||
isOpenAIDeepResearchModel,
|
||||
isOpenAIModel,
|
||||
@ -32,7 +31,8 @@ import {
|
||||
isSupportedThinkingTokenMiMoModel,
|
||||
isSupportedThinkingTokenModel,
|
||||
isSupportedThinkingTokenQwenModel,
|
||||
isSupportedThinkingTokenZhipuModel
|
||||
isSupportedThinkingTokenZhipuModel,
|
||||
isSupportNoneReasoningEffortModel
|
||||
} from '@renderer/config/models'
|
||||
import { getStoreSetting } from '@renderer/hooks/useSettings'
|
||||
import { getAssistantSettings, getProviderByModel } from '@renderer/services/AssistantService'
|
||||
@ -74,9 +74,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
if (reasoningEffort === 'none') {
|
||||
// openrouter: use reasoning
|
||||
if (model.provider === SystemProviderIds.openrouter) {
|
||||
// 'none' is not an available value for effort for now.
|
||||
// I think they should resolve this issue soon, so I'll just go ahead and use this value.
|
||||
if (isGPT51SeriesModel(model) && reasoningEffort === 'none') {
|
||||
if (isSupportNoneReasoningEffortModel(model) && reasoningEffort === 'none') {
|
||||
return { reasoning: { effort: 'none' } }
|
||||
}
|
||||
return { reasoning: { enabled: false, exclude: true } }
|
||||
@ -120,8 +118,8 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
return { thinking: { type: 'disabled' } }
|
||||
}
|
||||
|
||||
// Specially for GPT-5.1. Suppose this is a OpenAI Compatible provider
|
||||
if (isGPT51SeriesModel(model)) {
|
||||
// GPT 5.1, GPT 5.2, or newer
|
||||
if (isSupportNoneReasoningEffortModel(model)) {
|
||||
return {
|
||||
reasoningEffort: 'none'
|
||||
}
|
||||
|
||||
139
src/renderer/src/config/models/__tests__/openai.test.ts
Normal file
139
src/renderer/src/config/models/__tests__/openai.test.ts
Normal file
@ -0,0 +1,139 @@
|
||||
import type { Model } from '@renderer/types'
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import { isSupportNoneReasoningEffortModel } from '../openai'
|
||||
|
||||
// Mock store and settings to avoid initialization issues
|
||||
vi.mock('@renderer/store', () => ({
|
||||
__esModule: true,
|
||||
default: {
|
||||
getState: () => ({
|
||||
llm: { providers: [] },
|
||||
settings: {}
|
||||
})
|
||||
}
|
||||
}))
|
||||
|
||||
vi.mock('@renderer/hooks/useStore', () => ({
|
||||
getStoreProviders: vi.fn(() => [])
|
||||
}))
|
||||
|
||||
const createModel = (overrides: Partial<Model> = {}): Model => ({
|
||||
id: 'gpt-4o',
|
||||
name: 'gpt-4o',
|
||||
provider: 'openai',
|
||||
group: 'OpenAI',
|
||||
...overrides
|
||||
})
|
||||
|
||||
describe('OpenAI Model Detection', () => {
|
||||
describe('isSupportNoneReasoningEffortModel', () => {
|
||||
describe('should return true for GPT-5.1 and GPT-5.2 reasoning models', () => {
|
||||
it('returns true for GPT-5.1 base model', () => {
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.1' }))).toBe(true)
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'GPT-5.1' }))).toBe(true)
|
||||
})
|
||||
|
||||
it('returns true for GPT-5.1 mini model', () => {
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.1-mini' }))).toBe(true)
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.1-mini-preview' }))).toBe(true)
|
||||
})
|
||||
|
||||
it('returns true for GPT-5.1 preview model', () => {
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.1-preview' }))).toBe(true)
|
||||
})
|
||||
|
||||
it('returns true for GPT-5.2 base model', () => {
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.2' }))).toBe(true)
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'GPT-5.2' }))).toBe(true)
|
||||
})
|
||||
|
||||
it('returns true for GPT-5.2 mini model', () => {
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.2-mini' }))).toBe(true)
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.2-mini-preview' }))).toBe(true)
|
||||
})
|
||||
|
||||
it('returns true for GPT-5.2 preview model', () => {
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.2-preview' }))).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe('should return false for pro variants', () => {
|
||||
it('returns false for GPT-5.1-pro models', () => {
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.1-pro' }))).toBe(false)
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'GPT-5.1-Pro' }))).toBe(false)
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.1-pro-preview' }))).toBe(false)
|
||||
})
|
||||
|
||||
it('returns false for GPT-5.2-pro models', () => {
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.2-pro' }))).toBe(false)
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'GPT-5.2-Pro' }))).toBe(false)
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.2-pro-preview' }))).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('should return false for chat variants', () => {
|
||||
it('returns false for GPT-5.1-chat models', () => {
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.1-chat' }))).toBe(false)
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'GPT-5.1-Chat' }))).toBe(false)
|
||||
})
|
||||
|
||||
it('returns false for GPT-5.2-chat models', () => {
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.2-chat' }))).toBe(false)
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'GPT-5.2-Chat' }))).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('should return false for GPT-5 series (non-5.1/5.2)', () => {
|
||||
it('returns false for GPT-5 base model', () => {
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5' }))).toBe(false)
|
||||
})
|
||||
|
||||
it('returns false for GPT-5 pro model', () => {
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5-pro' }))).toBe(false)
|
||||
})
|
||||
|
||||
it('returns false for GPT-5 preview model', () => {
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5-preview' }))).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('should return false for other OpenAI models', () => {
|
||||
it('returns false for GPT-4 models', () => {
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-4o' }))).toBe(false)
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-4-turbo' }))).toBe(false)
|
||||
})
|
||||
|
||||
it('returns false for o1 models', () => {
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'o1' }))).toBe(false)
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'o1-mini' }))).toBe(false)
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'o1-preview' }))).toBe(false)
|
||||
})
|
||||
|
||||
it('returns false for o3 models', () => {
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'o3' }))).toBe(false)
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'o3-mini' }))).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('handles models with version suffixes', () => {
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.1-2025-01-01' }))).toBe(true)
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.2-latest' }))).toBe(true)
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.1-pro-2025-01-01' }))).toBe(false)
|
||||
})
|
||||
|
||||
it('handles models with OpenRouter prefixes', () => {
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'openai/gpt-5.1' }))).toBe(true)
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'openai/gpt-5.2-mini' }))).toBe(true)
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'openai/gpt-5.1-pro' }))).toBe(false)
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'openai/gpt-5.1-chat' }))).toBe(false)
|
||||
})
|
||||
|
||||
it('handles mixed case with chat and pro', () => {
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'GPT-5.1-CHAT' }))).toBe(false)
|
||||
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'GPT-5.2-PRO' }))).toBe(false)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -77,6 +77,34 @@ export function isSupportVerbosityModel(model: Model): boolean {
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if a model supports the "none" reasoning effort parameter.
|
||||
*
|
||||
* This applies to GPT-5.1 and GPT-5.2 series reasoning models (non-chat, non-pro variants).
|
||||
* These models allow setting reasoning_effort to "none" to skip reasoning steps.
|
||||
*
|
||||
* @param model - The model to check
|
||||
* @returns true if the model supports "none" reasoning effort, false otherwise
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* // Returns true
|
||||
* isSupportNoneReasoningEffortModel({ id: 'gpt-5.1', provider: 'openai' })
|
||||
* isSupportNoneReasoningEffortModel({ id: 'gpt-5.2-mini', provider: 'openai' })
|
||||
*
|
||||
* // Returns false
|
||||
* isSupportNoneReasoningEffortModel({ id: 'gpt-5.1-pro', provider: 'openai' })
|
||||
* isSupportNoneReasoningEffortModel({ id: 'gpt-5.1-chat', provider: 'openai' })
|
||||
* isSupportNoneReasoningEffortModel({ id: 'gpt-5-pro', provider: 'openai' })
|
||||
* ```
|
||||
*/
|
||||
export function isSupportNoneReasoningEffortModel(model: Model): boolean {
|
||||
const modelId = getLowerBaseModelName(model.id)
|
||||
return (
|
||||
(isGPT51SeriesModel(model) || isGPT52SeriesModel(model)) && !modelId.includes('chat') && !modelId.includes('pro')
|
||||
)
|
||||
}
|
||||
|
||||
export function isOpenAIChatCompletionOnlyModel(model: Model): boolean {
|
||||
if (!model) {
|
||||
return false
|
||||
|
||||
@ -107,7 +107,7 @@ export const SYSTEM_PROVIDERS_CONFIG: Record<SystemProviderId, SystemProvider> =
|
||||
type: 'openai',
|
||||
apiKey: '',
|
||||
apiHost: 'https://aihubmix.com',
|
||||
anthropicApiHost: 'https://aihubmix.com/anthropic',
|
||||
anthropicApiHost: 'https://aihubmix.com',
|
||||
models: SYSTEM_MODELS.aihubmix,
|
||||
isSystem: true,
|
||||
enabled: false
|
||||
|
||||
@ -140,11 +140,14 @@ const DmxapiPage: FC<{ Options: string[] }> = ({ Options }) => {
|
||||
let model = ''
|
||||
let priceModel = ''
|
||||
let image_size = ''
|
||||
let extend_params = {}
|
||||
|
||||
for (const provider of Object.keys(modelGroups)) {
|
||||
if (modelGroups[provider] && modelGroups[provider].length > 0) {
|
||||
model = modelGroups[provider][0].id
|
||||
priceModel = modelGroups[provider][0].price
|
||||
image_size = modelGroups[provider][0].image_sizes[0].value
|
||||
extend_params = modelGroups[provider][0].extend_params
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -153,7 +156,8 @@ const DmxapiPage: FC<{ Options: string[] }> = ({ Options }) => {
|
||||
model,
|
||||
priceModel,
|
||||
image_size,
|
||||
modelGroups
|
||||
modelGroups,
|
||||
extend_params
|
||||
}
|
||||
}
|
||||
|
||||
@ -162,7 +166,7 @@ const DmxapiPage: FC<{ Options: string[] }> = ({ Options }) => {
|
||||
|
||||
const generationMode = params?.generationMode || painting?.generationMode || MODEOPTIONS[0].value
|
||||
|
||||
const { model, priceModel, image_size, modelGroups } = getFirstModelInfo(generationMode)
|
||||
const { model, priceModel, image_size, modelGroups, extend_params } = getFirstModelInfo(generationMode)
|
||||
|
||||
return {
|
||||
...DEFAULT_PAINTING,
|
||||
@ -173,6 +177,7 @@ const DmxapiPage: FC<{ Options: string[] }> = ({ Options }) => {
|
||||
modelGroups,
|
||||
priceModel,
|
||||
image_size,
|
||||
extend_params,
|
||||
...params
|
||||
}
|
||||
}
|
||||
@ -190,7 +195,12 @@ const DmxapiPage: FC<{ Options: string[] }> = ({ Options }) => {
|
||||
const onSelectModel = (modelId: string) => {
|
||||
const model = allModels.find((m) => m.id === modelId)
|
||||
if (model) {
|
||||
updatePaintingState({ model: modelId, priceModel: model.price, image_size: model.image_sizes[0].value })
|
||||
updatePaintingState({
|
||||
model: modelId,
|
||||
priceModel: model.price,
|
||||
image_size: model.image_sizes[0].value,
|
||||
extend_params: model.extend_params
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -293,7 +303,7 @@ const DmxapiPage: FC<{ Options: string[] }> = ({ Options }) => {
|
||||
|
||||
clearImages()
|
||||
|
||||
const { model, priceModel, image_size, modelGroups } = getFirstModelInfo(v)
|
||||
const { model, priceModel, image_size, modelGroups, extend_params } = getFirstModelInfo(v)
|
||||
|
||||
setModelOptions(modelGroups)
|
||||
|
||||
@ -309,9 +319,10 @@ const DmxapiPage: FC<{ Options: string[] }> = ({ Options }) => {
|
||||
// 否则更新当前painting
|
||||
updatePaintingState({
|
||||
generationMode: v,
|
||||
model: model,
|
||||
image_size: image_size,
|
||||
priceModel: priceModel
|
||||
model,
|
||||
image_size,
|
||||
priceModel,
|
||||
extend_params
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -355,7 +366,8 @@ const DmxapiPage: FC<{ Options: string[] }> = ({ Options }) => {
|
||||
const params = {
|
||||
prompt,
|
||||
model: painting.model,
|
||||
n: painting.n
|
||||
n: painting.n,
|
||||
...painting?.extend_params
|
||||
}
|
||||
|
||||
const headerExpand = {
|
||||
@ -397,7 +409,8 @@ const DmxapiPage: FC<{ Options: string[] }> = ({ Options }) => {
|
||||
const params = {
|
||||
prompt,
|
||||
n: painting.n,
|
||||
model: painting.model
|
||||
model: painting.model,
|
||||
...painting?.extend_params
|
||||
}
|
||||
|
||||
if (painting.image_size) {
|
||||
|
||||
@ -84,7 +84,7 @@ export const MODEOPTIONS = [
|
||||
// 获取模型分组数据
|
||||
export const GetModelGroup = async (): Promise<DMXApiModelGroups> => {
|
||||
try {
|
||||
const response = await fetch('https://dmxapi.cn/cherry_painting_models_v2.json')
|
||||
const response = await fetch('https://dmxapi.cn/cherry_painting_models_v3.json')
|
||||
|
||||
if (response.ok) {
|
||||
const data = await response.json()
|
||||
|
||||
@ -18,7 +18,7 @@ import {
|
||||
setSidebarIcons
|
||||
} from '@renderer/store/settings'
|
||||
import { ThemeMode } from '@renderer/types'
|
||||
import { Button, ColorPicker, Segmented, Select, Switch } from 'antd'
|
||||
import { Button, ColorPicker, Segmented, Select, Switch, Tooltip } from 'antd'
|
||||
import { Minus, Monitor, Moon, Plus, Sun } from 'lucide-react'
|
||||
import type { FC } from 'react'
|
||||
import { useCallback, useEffect, useMemo, useState } from 'react'
|
||||
@ -196,6 +196,21 @@ const DisplaySettings: FC = () => {
|
||||
[t]
|
||||
)
|
||||
|
||||
const renderFontOption = useCallback(
|
||||
(font: string) => (
|
||||
<Tooltip title={font} placement="left" mouseEnterDelay={0.5}>
|
||||
<div
|
||||
className="truncate"
|
||||
style={{
|
||||
fontFamily: font
|
||||
}}>
|
||||
{font}
|
||||
</div>
|
||||
</Tooltip>
|
||||
),
|
||||
[]
|
||||
)
|
||||
|
||||
return (
|
||||
<SettingContainer theme={theme}>
|
||||
<SettingGroup theme={theme}>
|
||||
@ -292,7 +307,7 @@ const DisplaySettings: FC = () => {
|
||||
<SettingRowTitle>{t('settings.display.font.global')}</SettingRowTitle>
|
||||
<SelectRow>
|
||||
<Select
|
||||
style={{ width: 200 }}
|
||||
style={{ width: 280 }}
|
||||
placeholder={t('settings.display.font.select')}
|
||||
options={[
|
||||
{
|
||||
@ -303,7 +318,7 @@ const DisplaySettings: FC = () => {
|
||||
),
|
||||
value: ''
|
||||
},
|
||||
...fontList.map((font) => ({ label: <span style={{ fontFamily: font }}>{font}</span>, value: font }))
|
||||
...fontList.map((font) => ({ label: renderFontOption(font), value: font }))
|
||||
]}
|
||||
value={userTheme.userFontFamily || ''}
|
||||
onChange={(font) => handleUserFontChange(font)}
|
||||
@ -324,7 +339,7 @@ const DisplaySettings: FC = () => {
|
||||
<SettingRowTitle>{t('settings.display.font.code')}</SettingRowTitle>
|
||||
<SelectRow>
|
||||
<Select
|
||||
style={{ width: 200 }}
|
||||
style={{ width: 280 }}
|
||||
placeholder={t('settings.display.font.select')}
|
||||
options={[
|
||||
{
|
||||
@ -335,7 +350,7 @@ const DisplaySettings: FC = () => {
|
||||
),
|
||||
value: ''
|
||||
},
|
||||
...fontList.map((font) => ({ label: <span style={{ fontFamily: font }}>{font}</span>, value: font }))
|
||||
...fontList.map((font) => ({ label: renderFontOption(font), value: font }))
|
||||
]}
|
||||
value={userTheme.userCodeFontFamily || ''}
|
||||
onChange={(font) => handleUserCodeFontChange(font)}
|
||||
@ -480,7 +495,7 @@ const SelectRow = styled.div`
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: flex-end;
|
||||
width: 300px;
|
||||
width: 380px;
|
||||
`
|
||||
|
||||
export default DisplaySettings
|
||||
|
||||
@ -30,8 +30,7 @@ import {
|
||||
} from '@renderer/types'
|
||||
import { getFileExtension, isTextFile, runAsyncFunction, uuid } from '@renderer/utils'
|
||||
import { abortCompletion } from '@renderer/utils/abortController'
|
||||
import { isAbortError } from '@renderer/utils/error'
|
||||
import { formatErrorMessage } from '@renderer/utils/error'
|
||||
import { formatErrorMessageWithPrefix, isAbortError } from '@renderer/utils/error'
|
||||
import { getFilesFromDropEvent, getTextFromDropEvent } from '@renderer/utils/input'
|
||||
import {
|
||||
createInputScrollHandler,
|
||||
@ -181,7 +180,7 @@ const TranslatePage: FC = () => {
|
||||
window.toast.info(t('translate.info.aborted'))
|
||||
} else {
|
||||
logger.error('Failed to translate text', e as Error)
|
||||
window.toast.error(t('translate.error.failed') + ': ' + formatErrorMessage(e))
|
||||
window.toast.error(formatErrorMessageWithPrefix(e, t('translate.error.failed')))
|
||||
}
|
||||
setTranslating(false)
|
||||
return
|
||||
@ -202,11 +201,11 @@ const TranslatePage: FC = () => {
|
||||
await saveTranslateHistory(text, translated, actualSourceLanguage.langCode, actualTargetLanguage.langCode)
|
||||
} catch (e) {
|
||||
logger.error('Failed to save translate history', e as Error)
|
||||
window.toast.error(t('translate.history.error.save') + ': ' + formatErrorMessage(e))
|
||||
window.toast.error(formatErrorMessageWithPrefix(e, t('translate.history.error.save')))
|
||||
}
|
||||
} catch (e) {
|
||||
logger.error('Failed to translate', e as Error)
|
||||
window.toast.error(t('translate.error.unknown') + ': ' + formatErrorMessage(e))
|
||||
window.toast.error(formatErrorMessageWithPrefix(e, t('translate.error.unknown')))
|
||||
}
|
||||
},
|
||||
[autoCopy, copy, dispatch, setTimeoutTimer, setTranslatedContent, setTranslating, t, translating]
|
||||
@ -266,7 +265,7 @@ const TranslatePage: FC = () => {
|
||||
await translate(text, actualSourceLanguage, actualTargetLanguage)
|
||||
} catch (error) {
|
||||
logger.error('Translation error:', error as Error)
|
||||
window.toast.error(t('translate.error.failed') + ': ' + formatErrorMessage(error))
|
||||
window.toast.error(formatErrorMessageWithPrefix(error, t('translate.error.failed')))
|
||||
return
|
||||
} finally {
|
||||
setTranslating(false)
|
||||
@ -427,7 +426,7 @@ const TranslatePage: FC = () => {
|
||||
setAutoDetectionMethod(method)
|
||||
} catch (e) {
|
||||
logger.error('Failed to update auto detection method setting.', e as Error)
|
||||
window.toast.error(t('translate.error.detect.update_setting') + formatErrorMessage(e))
|
||||
window.toast.error(formatErrorMessageWithPrefix(e, t('translate.error.detect.update_setting')))
|
||||
}
|
||||
}
|
||||
|
||||
@ -498,7 +497,7 @@ const TranslatePage: FC = () => {
|
||||
isText = await isTextFile(file.path)
|
||||
} catch (e) {
|
||||
logger.error('Failed to check file type.', e as Error)
|
||||
window.toast.error(t('translate.files.error.check_type') + ': ' + formatErrorMessage(e))
|
||||
window.toast.error(formatErrorMessageWithPrefix(e, t('translate.files.error.check_type')))
|
||||
return
|
||||
}
|
||||
} else {
|
||||
@ -530,11 +529,11 @@ const TranslatePage: FC = () => {
|
||||
setText(text + result)
|
||||
} catch (e) {
|
||||
logger.error('Failed to read file.', e as Error)
|
||||
window.toast.error(t('translate.files.error.unknown') + ': ' + formatErrorMessage(e))
|
||||
window.toast.error(formatErrorMessageWithPrefix(e, t('translate.files.error.unknown')))
|
||||
}
|
||||
} catch (e) {
|
||||
logger.error('Failed to read file.', e as Error)
|
||||
window.toast.error(t('translate.files.error.unknown') + ': ' + formatErrorMessage(e))
|
||||
window.toast.error(formatErrorMessageWithPrefix(e, t('translate.files.error.unknown')))
|
||||
}
|
||||
}
|
||||
const promise = _readFile()
|
||||
@ -578,7 +577,7 @@ const TranslatePage: FC = () => {
|
||||
await processFile(file)
|
||||
} catch (e) {
|
||||
logger.error('Unknown error when selecting file.', e as Error)
|
||||
window.toast.error(t('translate.files.error.unknown') + ': ' + formatErrorMessage(e))
|
||||
window.toast.error(formatErrorMessageWithPrefix(e, t('translate.files.error.unknown')))
|
||||
} finally {
|
||||
clearFiles()
|
||||
setIsProcessing(false)
|
||||
|
||||
@ -42,7 +42,7 @@ export const translateText = async (
|
||||
abortKey?: string,
|
||||
options?: TranslateOptions
|
||||
) => {
|
||||
let abortError
|
||||
let error
|
||||
const assistantSettings: Partial<AssistantSettings> | undefined = options
|
||||
? { reasoning_effort: options?.reasoningEffort }
|
||||
: undefined
|
||||
@ -58,8 +58,8 @@ export const translateText = async (
|
||||
} else if (chunk.type === ChunkType.TEXT_COMPLETE) {
|
||||
completed = true
|
||||
} else if (chunk.type === ChunkType.ERROR) {
|
||||
error = chunk.error
|
||||
if (isAbortError(chunk.error)) {
|
||||
abortError = chunk.error
|
||||
completed = true
|
||||
}
|
||||
}
|
||||
@ -84,8 +84,8 @@ export const translateText = async (
|
||||
}
|
||||
}
|
||||
|
||||
if (abortError) {
|
||||
throw abortError
|
||||
if (error !== undefined && !isAbortError(error)) {
|
||||
throw error
|
||||
}
|
||||
|
||||
const trimmedText = translatedText.trim()
|
||||
|
||||
@ -395,6 +395,7 @@ export interface DmxapiPainting extends PaintingParams {
|
||||
autoCreate?: boolean
|
||||
generationMode?: generationModeType
|
||||
priceModel?: string
|
||||
extend_params?: Record<string, unknown>
|
||||
}
|
||||
|
||||
export interface TokenFluxPainting extends PaintingParams {
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
import { loggerService } from '@logger'
|
||||
import type { McpError } from '@modelcontextprotocol/sdk/types.js'
|
||||
import type { AgentServerError } from '@renderer/types'
|
||||
import { AgentServerErrorSchema } from '@renderer/types'
|
||||
@ -20,7 +21,7 @@ import { ZodError } from 'zod'
|
||||
import { parseJSON } from './json'
|
||||
import { safeSerialize } from './serialize'
|
||||
|
||||
// const logger = loggerService.withContext('Utils:error')
|
||||
const logger = loggerService.withContext('Utils:error')
|
||||
|
||||
export function getErrorDetails(err: any, seen = new WeakSet()): any {
|
||||
// Handle circular references
|
||||
@ -65,11 +66,16 @@ export function formatErrorMessage(error: unknown): string {
|
||||
delete detailedError?.stack
|
||||
delete detailedError?.request_id
|
||||
|
||||
const formattedJson = JSON.stringify(detailedError, null, 2)
|
||||
.split('\n')
|
||||
.map((line) => ` ${line}`)
|
||||
.join('\n')
|
||||
return detailedError.message ? detailedError.message : `Error Details:\n${formattedJson}`
|
||||
if (detailedError) {
|
||||
const formattedJson = JSON.stringify(detailedError, null, 2)
|
||||
.split('\n')
|
||||
.map((line) => ` ${line}`)
|
||||
.join('\n')
|
||||
return detailedError.message ? detailedError.message : `Error Details:\n${formattedJson}`
|
||||
} else {
|
||||
logger.warn('Get detailed error failed.')
|
||||
return ''
|
||||
}
|
||||
}
|
||||
|
||||
export function getErrorMessage(error: unknown): string {
|
||||
|
||||
Loading…
Reference in New Issue
Block a user