mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2025-12-19 14:41:24 +08:00
Compare commits
13 Commits
9804073bc3
...
57874f8d98
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
57874f8d98 | ||
|
|
fd6986076a | ||
|
|
6309cc179d | ||
|
|
c04529a23c | ||
|
|
0f1b3afa72 | ||
|
|
0cf0072b51 | ||
|
|
150bb3e3a0 | ||
|
|
739096deca | ||
|
|
1d5dafa325 | ||
|
|
bdfda7afb1 | ||
|
|
ef25eef0eb | ||
|
|
c676a93595 | ||
|
|
e85009fcd6 |
@ -12,8 +12,13 @@
|
||||
|
||||
; https://github.com/electron-userland/electron-builder/issues/1122
|
||||
!ifndef BUILD_UNINSTALLER
|
||||
; Check VC++ Redistributable based on architecture stored in $1
|
||||
Function checkVCRedist
|
||||
ReadRegDWORD $0 HKLM "SOFTWARE\Microsoft\VisualStudio\14.0\VC\Runtimes\x64" "Installed"
|
||||
${If} $1 == "arm64"
|
||||
ReadRegDWORD $0 HKLM "SOFTWARE\Microsoft\VisualStudio\14.0\VC\Runtimes\ARM64" "Installed"
|
||||
${Else}
|
||||
ReadRegDWORD $0 HKLM "SOFTWARE\Microsoft\VisualStudio\14.0\VC\Runtimes\x64" "Installed"
|
||||
${EndIf}
|
||||
FunctionEnd
|
||||
|
||||
Function checkArchitectureCompatibility
|
||||
@ -97,29 +102,47 @@
|
||||
|
||||
Call checkVCRedist
|
||||
${If} $0 != "1"
|
||||
MessageBox MB_YESNO "\
|
||||
NOTE: ${PRODUCT_NAME} requires $\r$\n\
|
||||
'Microsoft Visual C++ Redistributable'$\r$\n\
|
||||
to function properly.$\r$\n$\r$\n\
|
||||
Download and install now?" /SD IDYES IDYES InstallVCRedist IDNO DontInstall
|
||||
InstallVCRedist:
|
||||
inetc::get /CAPTION " " /BANNER "Downloading Microsoft Visual C++ Redistributable..." "https://aka.ms/vs/17/release/vc_redist.x64.exe" "$TEMP\vc_redist.x64.exe"
|
||||
ExecWait "$TEMP\vc_redist.x64.exe /install /norestart"
|
||||
;IfErrors InstallError ContinueInstall ; vc_redist exit code is unreliable :(
|
||||
Call checkVCRedist
|
||||
${If} $0 == "1"
|
||||
Goto ContinueInstall
|
||||
${EndIf}
|
||||
; VC++ is required - install automatically since declining would abort anyway
|
||||
; Select download URL based on system architecture (stored in $1)
|
||||
${If} $1 == "arm64"
|
||||
StrCpy $2 "https://aka.ms/vs/17/release/vc_redist.arm64.exe"
|
||||
StrCpy $3 "$TEMP\vc_redist.arm64.exe"
|
||||
${Else}
|
||||
StrCpy $2 "https://aka.ms/vs/17/release/vc_redist.x64.exe"
|
||||
StrCpy $3 "$TEMP\vc_redist.x64.exe"
|
||||
${EndIf}
|
||||
|
||||
;InstallError:
|
||||
MessageBox MB_ICONSTOP "\
|
||||
There was an unexpected error installing$\r$\n\
|
||||
Microsoft Visual C++ Redistributable.$\r$\n\
|
||||
The installation of ${PRODUCT_NAME} cannot continue."
|
||||
DontInstall:
|
||||
inetc::get /CAPTION " " /BANNER "Downloading Microsoft Visual C++ Redistributable..." \
|
||||
$2 $3 /END
|
||||
Pop $0 ; Get download status from inetc::get
|
||||
${If} $0 != "OK"
|
||||
MessageBox MB_ICONSTOP|MB_YESNO "\
|
||||
Failed to download Microsoft Visual C++ Redistributable.$\r$\n$\r$\n\
|
||||
Error: $0$\r$\n$\r$\n\
|
||||
Would you like to open the download page in your browser?$\r$\n\
|
||||
$2" IDYES openDownloadUrl IDNO skipDownloadUrl
|
||||
openDownloadUrl:
|
||||
ExecShell "open" $2
|
||||
skipDownloadUrl:
|
||||
Abort
|
||||
${EndIf}
|
||||
|
||||
ExecWait "$3 /install /quiet /norestart"
|
||||
; Note: vc_redist exit code is unreliable, verify via registry check instead
|
||||
|
||||
Call checkVCRedist
|
||||
${If} $0 != "1"
|
||||
MessageBox MB_ICONSTOP|MB_YESNO "\
|
||||
Microsoft Visual C++ Redistributable installation failed.$\r$\n$\r$\n\
|
||||
Would you like to open the download page in your browser?$\r$\n\
|
||||
$2$\r$\n$\r$\n\
|
||||
The installation of ${PRODUCT_NAME} cannot continue." IDYES openInstallUrl IDNO skipInstallUrl
|
||||
openInstallUrl:
|
||||
ExecShell "open" $2
|
||||
skipInstallUrl:
|
||||
Abort
|
||||
${EndIf}
|
||||
${EndIf}
|
||||
ContinueInstall:
|
||||
Pop $4
|
||||
Pop $3
|
||||
Pop $2
|
||||
|
||||
@ -134,54 +134,38 @@ artifactBuildCompleted: scripts/artifact-build-completed.js
|
||||
releaseInfo:
|
||||
releaseNotes: |
|
||||
<!--LANG:en-->
|
||||
Cherry Studio 1.7.4 - New Browser MCP & Model Updates
|
||||
Cherry Studio 1.7.5 - Filesystem MCP Overhaul & Topic Management
|
||||
|
||||
This release adds a powerful browser automation MCP server, new web search provider, and model support updates.
|
||||
This release features a completely rewritten filesystem MCP server, new batch topic management, and improved assistant management.
|
||||
|
||||
✨ New Features
|
||||
- [MCP] Add @cherry/browser CDP MCP server with session management for browser automation
|
||||
- [Web Search] Add ExaMCP free web search provider (no API key required)
|
||||
- [Model] Support GPT 5.2 series models
|
||||
- [Model] Add capabilities support for Doubao Seed Code models (tool calling, reasoning, vision)
|
||||
|
||||
🔧 Improvements
|
||||
- [Translate] Add reasoning effort option to translate service
|
||||
- [i18n] Improve zh-TW Traditional Chinese locale
|
||||
- [Settings] Update MCP Settings layout and styling
|
||||
- [MCP] Rewrite filesystem MCP server with improved tool set (glob, ls, grep, read, write, edit, delete)
|
||||
- [Topics] Add topic manage mode for batch delete and move operations with search functionality
|
||||
- [Assistants] Merge import/subscribe popups and add export to assistant management
|
||||
- [Knowledge] Use prompt injection for forced knowledge base search (faster response times)
|
||||
- [Settings] Add tool use mode setting (prompt/function) to default assistant settings
|
||||
|
||||
🐛 Bug Fixes
|
||||
- [Chat] Fix line numbers being wrongly copied from code blocks
|
||||
- [Translate] Fix default to first supported reasoning effort when translating
|
||||
- [Chat] Fix preserve thinking block in assistant messages
|
||||
- [Web Search] Fix max search result limit
|
||||
- [Embedding] Fix embedding dimensions retrieval for ModernAiProvider
|
||||
- [Chat] Fix token calculation in prompt tool use plugin
|
||||
- [Model] Fix Ollama provider options for Qwen model support
|
||||
- [UI] Fix Chat component marginRight calculation for improved layout
|
||||
- [Model] Correct typo in Gemini 3 Pro Image Preview model name
|
||||
- [Installer] Auto-install VC++ Redistributable without user prompt
|
||||
- [Notes] Fix notes directory validation and default path reset for cross-platform restore
|
||||
- [OAuth] Bind OAuth callback server to localhost (127.0.0.1) for security
|
||||
|
||||
<!--LANG:zh-CN-->
|
||||
Cherry Studio 1.7.4 - 新增浏览器 MCP 与模型更新
|
||||
Cherry Studio 1.7.5 - 文件系统 MCP 重构与话题管理
|
||||
|
||||
本次更新新增强大的浏览器自动化 MCP 服务器、新的网页搜索提供商以及模型支持更新。
|
||||
本次更新完全重写了文件系统 MCP 服务器,新增批量话题管理功能,并改进了助手管理。
|
||||
|
||||
✨ 新功能
|
||||
- [MCP] 新增 @cherry/browser CDP MCP 服务器,支持会话管理的浏览器自动化
|
||||
- [网页搜索] 新增 ExaMCP 免费网页搜索提供商(无需 API 密钥)
|
||||
- [模型] 支持 GPT 5.2 系列模型
|
||||
- [模型] 为豆包 Seed Code 模型添加能力支持(工具调用、推理、视觉)
|
||||
|
||||
🔧 功能改进
|
||||
- [翻译] 为翻译服务添加推理强度选项
|
||||
- [国际化] 改进繁体中文(zh-TW)本地化
|
||||
- [设置] 优化 MCP 设置布局和样式
|
||||
- [MCP] 重写文件系统 MCP 服务器,提供改进的工具集(glob、ls、grep、read、write、edit、delete)
|
||||
- [话题] 新增话题管理模式,支持批量删除和移动操作,带搜索功能
|
||||
- [助手] 合并导入/订阅弹窗,并在助手管理中添加导出功能
|
||||
- [知识库] 使用提示词注入进行强制知识库搜索(响应更快)
|
||||
- [设置] 在默认助手设置中添加工具使用模式设置(prompt/function)
|
||||
|
||||
🐛 问题修复
|
||||
- [聊天] 修复代码块中行号被错误复制的问题
|
||||
- [翻译] 修复翻译时默认使用第一个支持的推理强度
|
||||
- [聊天] 修复助手消息中思考块的保留问题
|
||||
- [网页搜索] 修复最大搜索结果数限制
|
||||
- [嵌入] 修复 ModernAiProvider 嵌入维度获取问题
|
||||
- [聊天] 修复提示词工具使用插件的 token 计算问题
|
||||
- [模型] 修复 Ollama 提供商对 Qwen 模型的支持选项
|
||||
- [界面] 修复聊天组件右边距计算以改善布局
|
||||
- [模型] 修正 Gemini 3 Pro Image Preview 模型名称的拼写错误
|
||||
- [安装程序] 自动安装 VC++ 运行库,无需用户确认
|
||||
- [笔记] 修复跨平台恢复场景下的笔记目录验证和默认路径重置逻辑
|
||||
- [OAuth] 将 OAuth 回调服务器绑定到 localhost (127.0.0.1) 以提高安全性
|
||||
<!--LANG:END-->
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "CherryStudio",
|
||||
"version": "1.7.4",
|
||||
"version": "1.7.5",
|
||||
"private": true,
|
||||
"description": "A powerful AI assistant for producer.",
|
||||
"main": "./out/main/index.js",
|
||||
|
||||
@ -244,6 +244,7 @@ export enum IpcChannel {
|
||||
System_GetCpuName = 'system:getCpuName',
|
||||
System_CheckGitBash = 'system:checkGitBash',
|
||||
System_GetGitBashPath = 'system:getGitBashPath',
|
||||
System_GetGitBashPathInfo = 'system:getGitBashPathInfo',
|
||||
System_SetGitBashPath = 'system:setGitBashPath',
|
||||
|
||||
// DevTools
|
||||
|
||||
@ -488,3 +488,11 @@ export const MACOS_TERMINALS_WITH_COMMANDS: TerminalConfigWithCommand[] = [
|
||||
|
||||
// resources/scripts should be maintained manually
|
||||
export const HOME_CHERRY_DIR = '.cherrystudio'
|
||||
|
||||
// Git Bash path configuration types
|
||||
export type GitBashPathSource = 'manual' | 'auto'
|
||||
|
||||
export interface GitBashPathInfo {
|
||||
path: string | null
|
||||
source: GitBashPathSource | null
|
||||
}
|
||||
|
||||
@ -6,7 +6,14 @@ import { loggerService } from '@logger'
|
||||
import { isLinux, isMac, isPortable, isWin } from '@main/constant'
|
||||
import { generateSignature } from '@main/integration/cherryai'
|
||||
import anthropicService from '@main/services/AnthropicService'
|
||||
import { findGitBash, getBinaryPath, isBinaryExists, runInstallScript, validateGitBashPath } from '@main/utils/process'
|
||||
import {
|
||||
autoDiscoverGitBash,
|
||||
getBinaryPath,
|
||||
getGitBashPathInfo,
|
||||
isBinaryExists,
|
||||
runInstallScript,
|
||||
validateGitBashPath
|
||||
} from '@main/utils/process'
|
||||
import { handleZoomFactor } from '@main/utils/zoom'
|
||||
import type { SpanEntity, TokenUsage } from '@mcp-trace/trace-core'
|
||||
import type { UpgradeChannel } from '@shared/config/constant'
|
||||
@ -499,9 +506,8 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
}
|
||||
|
||||
try {
|
||||
const customPath = configManager.get(ConfigKeys.GitBashPath) as string | undefined
|
||||
const bashPath = findGitBash(customPath)
|
||||
|
||||
// Use autoDiscoverGitBash to handle auto-discovery and persistence
|
||||
const bashPath = autoDiscoverGitBash()
|
||||
if (bashPath) {
|
||||
logger.info('Git Bash is available', { path: bashPath })
|
||||
return true
|
||||
@ -524,13 +530,22 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
return customPath ?? null
|
||||
})
|
||||
|
||||
// Returns { path, source } where source is 'manual' | 'auto' | null
|
||||
ipcMain.handle(IpcChannel.System_GetGitBashPathInfo, () => {
|
||||
return getGitBashPathInfo()
|
||||
})
|
||||
|
||||
ipcMain.handle(IpcChannel.System_SetGitBashPath, (_, newPath: string | null) => {
|
||||
if (!isWin) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (!newPath) {
|
||||
// Clear manual setting and re-run auto-discovery
|
||||
configManager.set(ConfigKeys.GitBashPath, null)
|
||||
configManager.set(ConfigKeys.GitBashPathSource, null)
|
||||
// Re-run auto-discovery to restore auto-discovered path if available
|
||||
autoDiscoverGitBash()
|
||||
return true
|
||||
}
|
||||
|
||||
@ -539,7 +554,9 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Set path with 'manual' source
|
||||
configManager.set(ConfigKeys.GitBashPath, validated)
|
||||
configManager.set(ConfigKeys.GitBashPathSource, 'manual')
|
||||
return true
|
||||
})
|
||||
|
||||
|
||||
@ -36,7 +36,7 @@ export function createInMemoryMCPServer(
|
||||
return new FetchServer().server
|
||||
}
|
||||
case BuiltinMCPServerNames.filesystem: {
|
||||
return new FileSystemServer(args).server
|
||||
return new FileSystemServer(envs.WORKSPACE_ROOT).server
|
||||
}
|
||||
case BuiltinMCPServerNames.difyKnowledge: {
|
||||
const difyKey = envs.DIFY_KEY
|
||||
|
||||
@ -1,652 +0,0 @@
|
||||
// port https://github.com/modelcontextprotocol/servers/blob/main/src/filesystem/index.ts
|
||||
|
||||
import { loggerService } from '@logger'
|
||||
import { Server } from '@modelcontextprotocol/sdk/server/index.js'
|
||||
import { CallToolRequestSchema, ListToolsRequestSchema } from '@modelcontextprotocol/sdk/types.js'
|
||||
import { createTwoFilesPatch } from 'diff'
|
||||
import fs from 'fs/promises'
|
||||
import { minimatch } from 'minimatch'
|
||||
import os from 'os'
|
||||
import path from 'path'
|
||||
import * as z from 'zod'
|
||||
|
||||
const logger = loggerService.withContext('MCP:FileSystemServer')
|
||||
|
||||
// Normalize all paths consistently
|
||||
function normalizePath(p: string): string {
|
||||
return path.normalize(p)
|
||||
}
|
||||
|
||||
function expandHome(filepath: string): string {
|
||||
if (filepath.startsWith('~/') || filepath === '~') {
|
||||
return path.join(os.homedir(), filepath.slice(1))
|
||||
}
|
||||
return filepath
|
||||
}
|
||||
|
||||
// Security utilities
|
||||
async function validatePath(allowedDirectories: string[], requestedPath: string): Promise<string> {
|
||||
const expandedPath = expandHome(requestedPath)
|
||||
const absolute = path.isAbsolute(expandedPath)
|
||||
? path.resolve(expandedPath)
|
||||
: path.resolve(process.cwd(), expandedPath)
|
||||
|
||||
const normalizedRequested = normalizePath(absolute)
|
||||
|
||||
// Check if path is within allowed directories
|
||||
const isAllowed = allowedDirectories.some((dir) => normalizedRequested.startsWith(dir))
|
||||
if (!isAllowed) {
|
||||
throw new Error(
|
||||
`Access denied - path outside allowed directories: ${absolute} not in ${allowedDirectories.join(', ')}`
|
||||
)
|
||||
}
|
||||
|
||||
// Handle symlinks by checking their real path
|
||||
try {
|
||||
const realPath = await fs.realpath(absolute)
|
||||
const normalizedReal = normalizePath(realPath)
|
||||
const isRealPathAllowed = allowedDirectories.some((dir) => normalizedReal.startsWith(dir))
|
||||
if (!isRealPathAllowed) {
|
||||
throw new Error('Access denied - symlink target outside allowed directories')
|
||||
}
|
||||
return realPath
|
||||
} catch (error) {
|
||||
// For new files that don't exist yet, verify parent directory
|
||||
const parentDir = path.dirname(absolute)
|
||||
try {
|
||||
const realParentPath = await fs.realpath(parentDir)
|
||||
const normalizedParent = normalizePath(realParentPath)
|
||||
const isParentAllowed = allowedDirectories.some((dir) => normalizedParent.startsWith(dir))
|
||||
if (!isParentAllowed) {
|
||||
throw new Error('Access denied - parent directory outside allowed directories')
|
||||
}
|
||||
return absolute
|
||||
} catch {
|
||||
throw new Error(`Parent directory does not exist: ${parentDir}`)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Schema definitions
|
||||
const ReadFileArgsSchema = z.object({
|
||||
path: z.string()
|
||||
})
|
||||
|
||||
const ReadMultipleFilesArgsSchema = z.object({
|
||||
paths: z.array(z.string())
|
||||
})
|
||||
|
||||
const WriteFileArgsSchema = z.object({
|
||||
path: z.string(),
|
||||
content: z.string()
|
||||
})
|
||||
|
||||
const EditOperation = z.object({
|
||||
oldText: z.string().describe('Text to search for - must match exactly'),
|
||||
newText: z.string().describe('Text to replace with')
|
||||
})
|
||||
|
||||
const EditFileArgsSchema = z.object({
|
||||
path: z.string(),
|
||||
edits: z.array(EditOperation),
|
||||
dryRun: z.boolean().default(false).describe('Preview changes using git-style diff format')
|
||||
})
|
||||
|
||||
const CreateDirectoryArgsSchema = z.object({
|
||||
path: z.string()
|
||||
})
|
||||
|
||||
const ListDirectoryArgsSchema = z.object({
|
||||
path: z.string()
|
||||
})
|
||||
|
||||
const DirectoryTreeArgsSchema = z.object({
|
||||
path: z.string()
|
||||
})
|
||||
|
||||
const MoveFileArgsSchema = z.object({
|
||||
source: z.string(),
|
||||
destination: z.string()
|
||||
})
|
||||
|
||||
const SearchFilesArgsSchema = z.object({
|
||||
path: z.string(),
|
||||
pattern: z.string(),
|
||||
excludePatterns: z.array(z.string()).optional().default([])
|
||||
})
|
||||
|
||||
const GetFileInfoArgsSchema = z.object({
|
||||
path: z.string()
|
||||
})
|
||||
|
||||
interface FileInfo {
|
||||
size: number
|
||||
created: Date
|
||||
modified: Date
|
||||
accessed: Date
|
||||
isDirectory: boolean
|
||||
isFile: boolean
|
||||
permissions: string
|
||||
}
|
||||
|
||||
// Tool implementations
|
||||
async function getFileStats(filePath: string): Promise<FileInfo> {
|
||||
const stats = await fs.stat(filePath)
|
||||
return {
|
||||
size: stats.size,
|
||||
created: stats.birthtime,
|
||||
modified: stats.mtime,
|
||||
accessed: stats.atime,
|
||||
isDirectory: stats.isDirectory(),
|
||||
isFile: stats.isFile(),
|
||||
permissions: stats.mode.toString(8).slice(-3)
|
||||
}
|
||||
}
|
||||
|
||||
async function searchFiles(
|
||||
allowedDirectories: string[],
|
||||
rootPath: string,
|
||||
pattern: string,
|
||||
excludePatterns: string[] = []
|
||||
): Promise<string[]> {
|
||||
const results: string[] = []
|
||||
|
||||
async function search(currentPath: string) {
|
||||
const entries = await fs.readdir(currentPath, { withFileTypes: true })
|
||||
|
||||
for (const entry of entries) {
|
||||
const fullPath = path.join(currentPath, entry.name)
|
||||
|
||||
try {
|
||||
// Validate each path before processing
|
||||
await validatePath(allowedDirectories, fullPath)
|
||||
|
||||
// Check if path matches any exclude pattern
|
||||
const relativePath = path.relative(rootPath, fullPath)
|
||||
const shouldExclude = excludePatterns.some((pattern) => {
|
||||
const globPattern = pattern.includes('*') ? pattern : `**/${pattern}/**`
|
||||
return minimatch(relativePath, globPattern, { dot: true })
|
||||
})
|
||||
|
||||
if (shouldExclude) {
|
||||
continue
|
||||
}
|
||||
|
||||
if (entry.name.toLowerCase().includes(pattern.toLowerCase())) {
|
||||
results.push(fullPath)
|
||||
}
|
||||
|
||||
if (entry.isDirectory()) {
|
||||
await search(fullPath)
|
||||
}
|
||||
} catch (error) {
|
||||
// Skip invalid paths during search
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
await search(rootPath)
|
||||
return results
|
||||
}
|
||||
|
||||
// file editing and diffing utilities
|
||||
function normalizeLineEndings(text: string): string {
|
||||
return text.replace(/\r\n/g, '\n')
|
||||
}
|
||||
|
||||
function createUnifiedDiff(originalContent: string, newContent: string, filepath: string = 'file'): string {
|
||||
// Ensure consistent line endings for diff
|
||||
const normalizedOriginal = normalizeLineEndings(originalContent)
|
||||
const normalizedNew = normalizeLineEndings(newContent)
|
||||
|
||||
return createTwoFilesPatch(filepath, filepath, normalizedOriginal, normalizedNew, 'original', 'modified')
|
||||
}
|
||||
|
||||
async function applyFileEdits(
|
||||
filePath: string,
|
||||
edits: Array<{ oldText: string; newText: string }>,
|
||||
dryRun = false
|
||||
): Promise<string> {
|
||||
// Read file content and normalize line endings
|
||||
const content = normalizeLineEndings(await fs.readFile(filePath, 'utf-8'))
|
||||
|
||||
// Apply edits sequentially
|
||||
let modifiedContent = content
|
||||
for (const edit of edits) {
|
||||
const normalizedOld = normalizeLineEndings(edit.oldText)
|
||||
const normalizedNew = normalizeLineEndings(edit.newText)
|
||||
|
||||
// If exact match exists, use it
|
||||
if (modifiedContent.includes(normalizedOld)) {
|
||||
modifiedContent = modifiedContent.replace(normalizedOld, normalizedNew)
|
||||
continue
|
||||
}
|
||||
|
||||
// Otherwise, try line-by-line matching with flexibility for whitespace
|
||||
const oldLines = normalizedOld.split('\n')
|
||||
const contentLines = modifiedContent.split('\n')
|
||||
let matchFound = false
|
||||
|
||||
for (let i = 0; i <= contentLines.length - oldLines.length; i++) {
|
||||
const potentialMatch = contentLines.slice(i, i + oldLines.length)
|
||||
|
||||
// Compare lines with normalized whitespace
|
||||
const isMatch = oldLines.every((oldLine, j) => {
|
||||
const contentLine = potentialMatch[j]
|
||||
return oldLine.trim() === contentLine.trim()
|
||||
})
|
||||
|
||||
if (isMatch) {
|
||||
// Preserve original indentation of first line
|
||||
const originalIndent = contentLines[i].match(/^\s*/)?.[0] || ''
|
||||
const newLines = normalizedNew.split('\n').map((line, j) => {
|
||||
if (j === 0) return originalIndent + line.trimStart()
|
||||
// For subsequent lines, try to preserve relative indentation
|
||||
const oldIndent = oldLines[j]?.match(/^\s*/)?.[0] || ''
|
||||
const newIndent = line.match(/^\s*/)?.[0] || ''
|
||||
if (oldIndent && newIndent) {
|
||||
const relativeIndent = newIndent.length - oldIndent.length
|
||||
return originalIndent + ' '.repeat(Math.max(0, relativeIndent)) + line.trimStart()
|
||||
}
|
||||
return line
|
||||
})
|
||||
|
||||
contentLines.splice(i, oldLines.length, ...newLines)
|
||||
modifiedContent = contentLines.join('\n')
|
||||
matchFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if (!matchFound) {
|
||||
throw new Error(`Could not find exact match for edit:\n${edit.oldText}`)
|
||||
}
|
||||
}
|
||||
|
||||
// Create unified diff
|
||||
const diff = createUnifiedDiff(content, modifiedContent, filePath)
|
||||
|
||||
// Format diff with appropriate number of backticks
|
||||
let numBackticks = 3
|
||||
while (diff.includes('`'.repeat(numBackticks))) {
|
||||
numBackticks++
|
||||
}
|
||||
const formattedDiff = `${'`'.repeat(numBackticks)}diff\n${diff}${'`'.repeat(numBackticks)}\n\n`
|
||||
|
||||
if (!dryRun) {
|
||||
await fs.writeFile(filePath, modifiedContent, 'utf-8')
|
||||
}
|
||||
|
||||
return formattedDiff
|
||||
}
|
||||
|
||||
class FileSystemServer {
|
||||
public server: Server
|
||||
private allowedDirectories: string[]
|
||||
constructor(allowedDirs: string[]) {
|
||||
if (!Array.isArray(allowedDirs) || allowedDirs.length === 0) {
|
||||
throw new Error('No allowed directories provided, please specify at least one directory in args')
|
||||
}
|
||||
|
||||
this.allowedDirectories = allowedDirs.map((dir) => normalizePath(path.resolve(expandHome(dir))))
|
||||
|
||||
// Validate that all directories exist and are accessible
|
||||
this.validateDirs().catch((error) => {
|
||||
logger.error('Error validating allowed directories:', error)
|
||||
throw new Error(`Error validating allowed directories: ${error}`)
|
||||
})
|
||||
|
||||
this.server = new Server(
|
||||
{
|
||||
name: 'secure-filesystem-server',
|
||||
version: '0.2.0'
|
||||
},
|
||||
{
|
||||
capabilities: {
|
||||
tools: {}
|
||||
}
|
||||
}
|
||||
)
|
||||
this.initialize()
|
||||
}
|
||||
|
||||
async validateDirs() {
|
||||
// Validate that all directories exist and are accessible
|
||||
await Promise.all(
|
||||
this.allowedDirectories.map(async (dir) => {
|
||||
try {
|
||||
const stats = await fs.stat(expandHome(dir))
|
||||
if (!stats.isDirectory()) {
|
||||
logger.error(`Error: ${dir} is not a directory`)
|
||||
throw new Error(`Error: ${dir} is not a directory`)
|
||||
}
|
||||
} catch (error: any) {
|
||||
logger.error(`Error accessing directory ${dir}:`, error)
|
||||
throw new Error(`Error accessing directory ${dir}:`, error)
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
initialize() {
|
||||
// Tool handlers
|
||||
this.server.setRequestHandler(ListToolsRequestSchema, async () => {
|
||||
return {
|
||||
tools: [
|
||||
{
|
||||
name: 'read_file',
|
||||
description:
|
||||
'Read the complete contents of a file from the file system. ' +
|
||||
'Handles various text encodings and provides detailed error messages ' +
|
||||
'if the file cannot be read. Use this tool when you need to examine ' +
|
||||
'the contents of a single file. Only works within allowed directories.',
|
||||
inputSchema: z.toJSONSchema(ReadFileArgsSchema)
|
||||
},
|
||||
{
|
||||
name: 'read_multiple_files',
|
||||
description:
|
||||
'Read the contents of multiple files simultaneously. This is more ' +
|
||||
'efficient than reading files one by one when you need to analyze ' +
|
||||
"or compare multiple files. Each file's content is returned with its " +
|
||||
"path as a reference. Failed reads for individual files won't stop " +
|
||||
'the entire operation. Only works within allowed directories.',
|
||||
inputSchema: z.toJSONSchema(ReadMultipleFilesArgsSchema)
|
||||
},
|
||||
{
|
||||
name: 'write_file',
|
||||
description:
|
||||
'Create a new file or completely overwrite an existing file with new content. ' +
|
||||
'Use with caution as it will overwrite existing files without warning. ' +
|
||||
'Handles text content with proper encoding. Only works within allowed directories.',
|
||||
inputSchema: z.toJSONSchema(WriteFileArgsSchema)
|
||||
},
|
||||
{
|
||||
name: 'edit_file',
|
||||
description:
|
||||
'Make line-based edits to a text file. Each edit replaces exact line sequences ' +
|
||||
'with new content. Returns a git-style diff showing the changes made. ' +
|
||||
'Only works within allowed directories.',
|
||||
inputSchema: z.toJSONSchema(EditFileArgsSchema)
|
||||
},
|
||||
{
|
||||
name: 'create_directory',
|
||||
description:
|
||||
'Create a new directory or ensure a directory exists. Can create multiple ' +
|
||||
'nested directories in one operation. If the directory already exists, ' +
|
||||
'this operation will succeed silently. Perfect for setting up directory ' +
|
||||
'structures for projects or ensuring required paths exist. Only works within allowed directories.',
|
||||
inputSchema: z.toJSONSchema(CreateDirectoryArgsSchema)
|
||||
},
|
||||
{
|
||||
name: 'list_directory',
|
||||
description:
|
||||
'Get a detailed listing of all files and directories in a specified path. ' +
|
||||
'Results clearly distinguish between files and directories with [FILE] and [DIR] ' +
|
||||
'prefixes. This tool is essential for understanding directory structure and ' +
|
||||
'finding specific files within a directory. Only works within allowed directories.',
|
||||
inputSchema: z.toJSONSchema(ListDirectoryArgsSchema)
|
||||
},
|
||||
{
|
||||
name: 'directory_tree',
|
||||
description:
|
||||
'Get a recursive tree view of files and directories as a JSON structure. ' +
|
||||
"Each entry includes 'name', 'type' (file/directory), and 'children' for directories. " +
|
||||
'Files have no children array, while directories always have a children array (which may be empty). ' +
|
||||
'The output is formatted with 2-space indentation for readability. Only works within allowed directories.',
|
||||
inputSchema: z.toJSONSchema(DirectoryTreeArgsSchema)
|
||||
},
|
||||
{
|
||||
name: 'move_file',
|
||||
description:
|
||||
'Move or rename files and directories. Can move files between directories ' +
|
||||
'and rename them in a single operation. If the destination exists, the ' +
|
||||
'operation will fail. Works across different directories and can be used ' +
|
||||
'for simple renaming within the same directory. Both source and destination must be within allowed directories.',
|
||||
inputSchema: z.toJSONSchema(MoveFileArgsSchema)
|
||||
},
|
||||
{
|
||||
name: 'search_files',
|
||||
description:
|
||||
'Recursively search for files and directories matching a pattern. ' +
|
||||
'Searches through all subdirectories from the starting path. The search ' +
|
||||
'is case-insensitive and matches partial names. Returns full paths to all ' +
|
||||
"matching items. Great for finding files when you don't know their exact location. " +
|
||||
'Only searches within allowed directories.',
|
||||
inputSchema: z.toJSONSchema(SearchFilesArgsSchema)
|
||||
},
|
||||
{
|
||||
name: 'get_file_info',
|
||||
description:
|
||||
'Retrieve detailed metadata about a file or directory. Returns comprehensive ' +
|
||||
'information including size, creation time, last modified time, permissions, ' +
|
||||
'and type. This tool is perfect for understanding file characteristics ' +
|
||||
'without reading the actual content. Only works within allowed directories.',
|
||||
inputSchema: z.toJSONSchema(GetFileInfoArgsSchema)
|
||||
},
|
||||
{
|
||||
name: 'list_allowed_directories',
|
||||
description:
|
||||
'Returns the list of directories that this server is allowed to access. ' +
|
||||
'Use this to understand which directories are available before trying to access files.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {},
|
||||
required: []
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
})
|
||||
|
||||
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
||||
try {
|
||||
const { name, arguments: args } = request.params
|
||||
|
||||
switch (name) {
|
||||
case 'read_file': {
|
||||
const parsed = ReadFileArgsSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for read_file: ${parsed.error}`)
|
||||
}
|
||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
||||
const content = await fs.readFile(validPath, 'utf-8')
|
||||
return {
|
||||
content: [{ type: 'text', text: content }]
|
||||
}
|
||||
}
|
||||
|
||||
case 'read_multiple_files': {
|
||||
const parsed = ReadMultipleFilesArgsSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for read_multiple_files: ${parsed.error}`)
|
||||
}
|
||||
const results = await Promise.all(
|
||||
parsed.data.paths.map(async (filePath: string) => {
|
||||
try {
|
||||
const validPath = await validatePath(this.allowedDirectories, filePath)
|
||||
const content = await fs.readFile(validPath, 'utf-8')
|
||||
return `${filePath}:\n${content}\n`
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error)
|
||||
return `${filePath}: Error - ${errorMessage}`
|
||||
}
|
||||
})
|
||||
)
|
||||
return {
|
||||
content: [{ type: 'text', text: results.join('\n---\n') }]
|
||||
}
|
||||
}
|
||||
|
||||
case 'write_file': {
|
||||
const parsed = WriteFileArgsSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for write_file: ${parsed.error}`)
|
||||
}
|
||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
||||
await fs.writeFile(validPath, parsed.data.content, 'utf-8')
|
||||
return {
|
||||
content: [{ type: 'text', text: `Successfully wrote to ${parsed.data.path}` }]
|
||||
}
|
||||
}
|
||||
|
||||
case 'edit_file': {
|
||||
const parsed = EditFileArgsSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for edit_file: ${parsed.error}`)
|
||||
}
|
||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
||||
const result = await applyFileEdits(validPath, parsed.data.edits, parsed.data.dryRun)
|
||||
return {
|
||||
content: [{ type: 'text', text: result }]
|
||||
}
|
||||
}
|
||||
|
||||
case 'create_directory': {
|
||||
const parsed = CreateDirectoryArgsSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for create_directory: ${parsed.error}`)
|
||||
}
|
||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
||||
await fs.mkdir(validPath, { recursive: true })
|
||||
return {
|
||||
content: [{ type: 'text', text: `Successfully created directory ${parsed.data.path}` }]
|
||||
}
|
||||
}
|
||||
|
||||
case 'list_directory': {
|
||||
const parsed = ListDirectoryArgsSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for list_directory: ${parsed.error}`)
|
||||
}
|
||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
||||
const entries = await fs.readdir(validPath, { withFileTypes: true })
|
||||
const formatted = entries
|
||||
.map((entry) => `${entry.isDirectory() ? '[DIR]' : '[FILE]'} ${entry.name}`)
|
||||
.join('\n')
|
||||
return {
|
||||
content: [{ type: 'text', text: formatted }]
|
||||
}
|
||||
}
|
||||
|
||||
case 'directory_tree': {
|
||||
const parsed = DirectoryTreeArgsSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for directory_tree: ${parsed.error}`)
|
||||
}
|
||||
|
||||
interface TreeEntry {
|
||||
name: string
|
||||
type: 'file' | 'directory'
|
||||
children?: TreeEntry[]
|
||||
}
|
||||
|
||||
async function buildTree(allowedDirectories: string[], currentPath: string): Promise<TreeEntry[]> {
|
||||
const validPath = await validatePath(allowedDirectories, currentPath)
|
||||
const entries = await fs.readdir(validPath, { withFileTypes: true })
|
||||
const result: TreeEntry[] = []
|
||||
|
||||
for (const entry of entries) {
|
||||
const entryData: TreeEntry = {
|
||||
name: entry.name,
|
||||
type: entry.isDirectory() ? 'directory' : 'file'
|
||||
}
|
||||
|
||||
if (entry.isDirectory()) {
|
||||
const subPath = path.join(currentPath, entry.name)
|
||||
entryData.children = await buildTree(allowedDirectories, subPath)
|
||||
}
|
||||
|
||||
result.push(entryData)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
const treeData = await buildTree(this.allowedDirectories, parsed.data.path)
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: JSON.stringify(treeData, null, 2)
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
case 'move_file': {
|
||||
const parsed = MoveFileArgsSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for move_file: ${parsed.error}`)
|
||||
}
|
||||
const validSourcePath = await validatePath(this.allowedDirectories, parsed.data.source)
|
||||
const validDestPath = await validatePath(this.allowedDirectories, parsed.data.destination)
|
||||
await fs.rename(validSourcePath, validDestPath)
|
||||
return {
|
||||
content: [
|
||||
{ type: 'text', text: `Successfully moved ${parsed.data.source} to ${parsed.data.destination}` }
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
case 'search_files': {
|
||||
const parsed = SearchFilesArgsSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for search_files: ${parsed.error}`)
|
||||
}
|
||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
||||
const results = await searchFiles(
|
||||
this.allowedDirectories,
|
||||
validPath,
|
||||
parsed.data.pattern,
|
||||
parsed.data.excludePatterns
|
||||
)
|
||||
return {
|
||||
content: [{ type: 'text', text: results.length > 0 ? results.join('\n') : 'No matches found' }]
|
||||
}
|
||||
}
|
||||
|
||||
case 'get_file_info': {
|
||||
const parsed = GetFileInfoArgsSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for get_file_info: ${parsed.error}`)
|
||||
}
|
||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
||||
const info = await getFileStats(validPath)
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: Object.entries(info)
|
||||
.map(([key, value]) => `${key}: ${value}`)
|
||||
.join('\n')
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
case 'list_allowed_directories': {
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: `Allowed directories:\n${this.allowedDirectories.join('\n')}`
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
throw new Error(`Unknown tool: ${name}`)
|
||||
}
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error)
|
||||
return {
|
||||
content: [{ type: 'text', text: `Error: ${errorMessage}` }],
|
||||
isError: true
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export default FileSystemServer
|
||||
2
src/main/mcpServers/filesystem/index.ts
Normal file
2
src/main/mcpServers/filesystem/index.ts
Normal file
@ -0,0 +1,2 @@
|
||||
// Re-export FileSystemServer to maintain existing import pattern
|
||||
export { default, FileSystemServer } from './server'
|
||||
118
src/main/mcpServers/filesystem/server.ts
Normal file
118
src/main/mcpServers/filesystem/server.ts
Normal file
@ -0,0 +1,118 @@
|
||||
import { Server } from '@modelcontextprotocol/sdk/server/index.js'
|
||||
import { CallToolRequestSchema, ListToolsRequestSchema } from '@modelcontextprotocol/sdk/types.js'
|
||||
import { app } from 'electron'
|
||||
import fs from 'fs/promises'
|
||||
import path from 'path'
|
||||
|
||||
import {
|
||||
deleteToolDefinition,
|
||||
editToolDefinition,
|
||||
globToolDefinition,
|
||||
grepToolDefinition,
|
||||
handleDeleteTool,
|
||||
handleEditTool,
|
||||
handleGlobTool,
|
||||
handleGrepTool,
|
||||
handleLsTool,
|
||||
handleReadTool,
|
||||
handleWriteTool,
|
||||
lsToolDefinition,
|
||||
readToolDefinition,
|
||||
writeToolDefinition
|
||||
} from './tools'
|
||||
import { logger } from './types'
|
||||
|
||||
export class FileSystemServer {
|
||||
public server: Server
|
||||
private baseDir: string
|
||||
|
||||
constructor(baseDir?: string) {
|
||||
if (baseDir && path.isAbsolute(baseDir)) {
|
||||
this.baseDir = baseDir
|
||||
logger.info(`Using provided baseDir for filesystem MCP: ${baseDir}`)
|
||||
} else {
|
||||
const userData = app.getPath('userData')
|
||||
this.baseDir = path.join(userData, 'Data', 'Workspace')
|
||||
logger.info(`Using default workspace for filesystem MCP baseDir: ${this.baseDir}`)
|
||||
}
|
||||
|
||||
this.server = new Server(
|
||||
{
|
||||
name: 'filesystem-server',
|
||||
version: '2.0.0'
|
||||
},
|
||||
{
|
||||
capabilities: {
|
||||
tools: {}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
this.initialize()
|
||||
}
|
||||
|
||||
async initialize() {
|
||||
try {
|
||||
await fs.mkdir(this.baseDir, { recursive: true })
|
||||
} catch (error) {
|
||||
logger.error('Failed to create filesystem MCP baseDir', { error, baseDir: this.baseDir })
|
||||
}
|
||||
|
||||
// Register tool list handler
|
||||
this.server.setRequestHandler(ListToolsRequestSchema, async () => {
|
||||
return {
|
||||
tools: [
|
||||
globToolDefinition,
|
||||
lsToolDefinition,
|
||||
grepToolDefinition,
|
||||
readToolDefinition,
|
||||
editToolDefinition,
|
||||
writeToolDefinition,
|
||||
deleteToolDefinition
|
||||
]
|
||||
}
|
||||
})
|
||||
|
||||
// Register tool call handler
|
||||
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
||||
try {
|
||||
const { name, arguments: args } = request.params
|
||||
|
||||
switch (name) {
|
||||
case 'glob':
|
||||
return await handleGlobTool(args, this.baseDir)
|
||||
|
||||
case 'ls':
|
||||
return await handleLsTool(args, this.baseDir)
|
||||
|
||||
case 'grep':
|
||||
return await handleGrepTool(args, this.baseDir)
|
||||
|
||||
case 'read':
|
||||
return await handleReadTool(args, this.baseDir)
|
||||
|
||||
case 'edit':
|
||||
return await handleEditTool(args, this.baseDir)
|
||||
|
||||
case 'write':
|
||||
return await handleWriteTool(args, this.baseDir)
|
||||
|
||||
case 'delete':
|
||||
return await handleDeleteTool(args, this.baseDir)
|
||||
|
||||
default:
|
||||
throw new Error(`Unknown tool: ${name}`)
|
||||
}
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error)
|
||||
logger.error(`Tool execution error for ${request.params.name}:`, { error })
|
||||
return {
|
||||
content: [{ type: 'text', text: `Error: ${errorMessage}` }],
|
||||
isError: true
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export default FileSystemServer
|
||||
93
src/main/mcpServers/filesystem/tools/delete.ts
Normal file
93
src/main/mcpServers/filesystem/tools/delete.ts
Normal file
@ -0,0 +1,93 @@
|
||||
import fs from 'fs/promises'
|
||||
import path from 'path'
|
||||
import * as z from 'zod'
|
||||
|
||||
import { logger, validatePath } from '../types'
|
||||
|
||||
// Schema definition
|
||||
export const DeleteToolSchema = z.object({
|
||||
path: z.string().describe('The path to the file or directory to delete'),
|
||||
recursive: z.boolean().optional().describe('For directories, whether to delete recursively (default: false)')
|
||||
})
|
||||
|
||||
// Tool definition with detailed description
|
||||
export const deleteToolDefinition = {
|
||||
name: 'delete',
|
||||
description: `Deletes a file or directory from the filesystem.
|
||||
|
||||
CAUTION: This operation cannot be undone!
|
||||
|
||||
- For files: simply provide the path
|
||||
- For empty directories: provide the path
|
||||
- For non-empty directories: set recursive=true
|
||||
- The path must be an absolute path, not a relative path
|
||||
- Always verify the path before deleting to avoid data loss`,
|
||||
inputSchema: z.toJSONSchema(DeleteToolSchema)
|
||||
}
|
||||
|
||||
// Handler implementation
|
||||
export async function handleDeleteTool(args: unknown, baseDir: string) {
|
||||
const parsed = DeleteToolSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for delete: ${parsed.error}`)
|
||||
}
|
||||
|
||||
const targetPath = parsed.data.path
|
||||
const validPath = await validatePath(targetPath, baseDir)
|
||||
const recursive = parsed.data.recursive || false
|
||||
|
||||
// Check if path exists and get stats
|
||||
let stats
|
||||
try {
|
||||
stats = await fs.stat(validPath)
|
||||
} catch (error: any) {
|
||||
if (error.code === 'ENOENT') {
|
||||
throw new Error(`Path not found: ${targetPath}`)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
|
||||
const isDirectory = stats.isDirectory()
|
||||
const relativePath = path.relative(baseDir, validPath)
|
||||
|
||||
// Perform deletion
|
||||
try {
|
||||
if (isDirectory) {
|
||||
if (recursive) {
|
||||
// Delete directory recursively
|
||||
await fs.rm(validPath, { recursive: true, force: true })
|
||||
} else {
|
||||
// Try to delete empty directory
|
||||
await fs.rmdir(validPath)
|
||||
}
|
||||
} else {
|
||||
// Delete file
|
||||
await fs.unlink(validPath)
|
||||
}
|
||||
} catch (error: any) {
|
||||
if (error.code === 'ENOTEMPTY') {
|
||||
throw new Error(`Directory not empty: ${targetPath}. Use recursive=true to delete non-empty directories.`)
|
||||
}
|
||||
throw new Error(`Failed to delete: ${error.message}`)
|
||||
}
|
||||
|
||||
// Log the operation
|
||||
logger.info('Path deleted', {
|
||||
path: validPath,
|
||||
type: isDirectory ? 'directory' : 'file',
|
||||
recursive: isDirectory ? recursive : undefined
|
||||
})
|
||||
|
||||
// Format output
|
||||
const itemType = isDirectory ? 'Directory' : 'File'
|
||||
const recursiveNote = isDirectory && recursive ? ' (recursive)' : ''
|
||||
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: `${itemType} deleted${recursiveNote}: ${relativePath}`
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
130
src/main/mcpServers/filesystem/tools/edit.ts
Normal file
130
src/main/mcpServers/filesystem/tools/edit.ts
Normal file
@ -0,0 +1,130 @@
|
||||
import fs from 'fs/promises'
|
||||
import path from 'path'
|
||||
import * as z from 'zod'
|
||||
|
||||
import { logger, replaceWithFuzzyMatch, validatePath } from '../types'
|
||||
|
||||
// Schema definition
|
||||
export const EditToolSchema = z.object({
|
||||
file_path: z.string().describe('The path to the file to modify'),
|
||||
old_string: z.string().describe('The text to replace'),
|
||||
new_string: z.string().describe('The text to replace it with'),
|
||||
replace_all: z.boolean().optional().default(false).describe('Replace all occurrences of old_string (default false)')
|
||||
})
|
||||
|
||||
// Tool definition with detailed description
|
||||
export const editToolDefinition = {
|
||||
name: 'edit',
|
||||
description: `Performs exact string replacements in files.
|
||||
|
||||
- You must use the 'read' tool at least once before editing
|
||||
- The file_path must be an absolute path, not a relative path
|
||||
- Preserve exact indentation from read output (after the line number prefix)
|
||||
- Never include line number prefixes in old_string or new_string
|
||||
- ALWAYS prefer editing existing files over creating new ones
|
||||
- The edit will FAIL if old_string is not found in the file
|
||||
- The edit will FAIL if old_string appears multiple times (provide more context or use replace_all)
|
||||
- The edit will FAIL if old_string equals new_string
|
||||
- Use replace_all to rename variables or replace all occurrences`,
|
||||
inputSchema: z.toJSONSchema(EditToolSchema)
|
||||
}
|
||||
|
||||
// Handler implementation
|
||||
export async function handleEditTool(args: unknown, baseDir: string) {
|
||||
const parsed = EditToolSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for edit: ${parsed.error}`)
|
||||
}
|
||||
|
||||
const { file_path: filePath, old_string: oldString, new_string: newString, replace_all: replaceAll } = parsed.data
|
||||
|
||||
// Validate path
|
||||
const validPath = await validatePath(filePath, baseDir)
|
||||
|
||||
// Check if file exists
|
||||
try {
|
||||
const stats = await fs.stat(validPath)
|
||||
if (!stats.isFile()) {
|
||||
throw new Error(`Path is not a file: ${filePath}`)
|
||||
}
|
||||
} catch (error: any) {
|
||||
if (error.code === 'ENOENT') {
|
||||
// If old_string is empty, this is a create new file operation
|
||||
if (oldString === '') {
|
||||
// Create parent directory if needed
|
||||
const parentDir = path.dirname(validPath)
|
||||
await fs.mkdir(parentDir, { recursive: true })
|
||||
|
||||
// Write the new content
|
||||
await fs.writeFile(validPath, newString, 'utf-8')
|
||||
|
||||
logger.info('File created', { path: validPath })
|
||||
|
||||
const relativePath = path.relative(baseDir, validPath)
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: `Created new file: ${relativePath}\nLines: ${newString.split('\n').length}`
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
throw new Error(`File not found: ${filePath}`)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
|
||||
// Read current content
|
||||
const content = await fs.readFile(validPath, 'utf-8')
|
||||
|
||||
// Handle special case: old_string is empty (create file with content)
|
||||
if (oldString === '') {
|
||||
await fs.writeFile(validPath, newString, 'utf-8')
|
||||
|
||||
logger.info('File overwritten', { path: validPath })
|
||||
|
||||
const relativePath = path.relative(baseDir, validPath)
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: `Overwrote file: ${relativePath}\nLines: ${newString.split('\n').length}`
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
// Perform the replacement with fuzzy matching
|
||||
const newContent = replaceWithFuzzyMatch(content, oldString, newString, replaceAll)
|
||||
|
||||
// Write the modified content
|
||||
await fs.writeFile(validPath, newContent, 'utf-8')
|
||||
|
||||
logger.info('File edited', {
|
||||
path: validPath,
|
||||
replaceAll
|
||||
})
|
||||
|
||||
// Generate a simple diff summary
|
||||
const oldLines = content.split('\n').length
|
||||
const newLines = newContent.split('\n').length
|
||||
const lineDiff = newLines - oldLines
|
||||
|
||||
const relativePath = path.relative(baseDir, validPath)
|
||||
let diffSummary = `Edited: ${relativePath}`
|
||||
if (lineDiff > 0) {
|
||||
diffSummary += `\n+${lineDiff} lines`
|
||||
} else if (lineDiff < 0) {
|
||||
diffSummary += `\n${lineDiff} lines`
|
||||
}
|
||||
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: diffSummary
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
149
src/main/mcpServers/filesystem/tools/glob.ts
Normal file
149
src/main/mcpServers/filesystem/tools/glob.ts
Normal file
@ -0,0 +1,149 @@
|
||||
import fs from 'fs/promises'
|
||||
import path from 'path'
|
||||
import * as z from 'zod'
|
||||
|
||||
import type { FileInfo } from '../types'
|
||||
import { logger, MAX_FILES_LIMIT, runRipgrep, validatePath } from '../types'
|
||||
|
||||
// Schema definition
|
||||
export const GlobToolSchema = z.object({
|
||||
pattern: z.string().describe('The glob pattern to match files against'),
|
||||
path: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe('The directory to search in (must be absolute path). Defaults to the base directory')
|
||||
})
|
||||
|
||||
// Tool definition with detailed description
|
||||
export const globToolDefinition = {
|
||||
name: 'glob',
|
||||
description: `Fast file pattern matching tool that works with any codebase size.
|
||||
|
||||
- Supports glob patterns like "**/*.js" or "src/**/*.ts"
|
||||
- Returns matching absolute file paths sorted by modification time (newest first)
|
||||
- Use this when you need to find files by name patterns
|
||||
- Patterns without "/" (e.g., "*.txt") match files at ANY depth in the directory tree
|
||||
- Patterns with "/" (e.g., "src/*.ts") match relative to the search path
|
||||
- Pattern syntax: * (any chars), ** (any path), {a,b} (alternatives), ? (single char)
|
||||
- Results are limited to 100 files
|
||||
- The path parameter must be an absolute path if specified
|
||||
- If path is not specified, defaults to the base directory
|
||||
- IMPORTANT: Omit the path field for the default directory (don't use "undefined" or "null")`,
|
||||
inputSchema: z.toJSONSchema(GlobToolSchema)
|
||||
}
|
||||
|
||||
// Handler implementation
|
||||
export async function handleGlobTool(args: unknown, baseDir: string) {
|
||||
const parsed = GlobToolSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for glob: ${parsed.error}`)
|
||||
}
|
||||
|
||||
const searchPath = parsed.data.path || baseDir
|
||||
const validPath = await validatePath(searchPath, baseDir)
|
||||
|
||||
// Verify the search directory exists
|
||||
try {
|
||||
const stats = await fs.stat(validPath)
|
||||
if (!stats.isDirectory()) {
|
||||
throw new Error(`Path is not a directory: ${validPath}`)
|
||||
}
|
||||
} catch (error: unknown) {
|
||||
if (error && typeof error === 'object' && 'code' in error && error.code === 'ENOENT') {
|
||||
throw new Error(`Directory not found: ${validPath}`)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
|
||||
// Validate pattern
|
||||
const pattern = parsed.data.pattern.trim()
|
||||
if (!pattern) {
|
||||
throw new Error('Pattern cannot be empty')
|
||||
}
|
||||
|
||||
const files: FileInfo[] = []
|
||||
let truncated = false
|
||||
|
||||
// Build ripgrep arguments for file listing using --glob=pattern format
|
||||
const rgArgs: string[] = [
|
||||
'--files',
|
||||
'--follow',
|
||||
'--hidden',
|
||||
`--glob=${pattern}`,
|
||||
'--glob=!.git/*',
|
||||
'--glob=!node_modules/*',
|
||||
'--glob=!dist/*',
|
||||
'--glob=!build/*',
|
||||
'--glob=!__pycache__/*',
|
||||
validPath
|
||||
]
|
||||
|
||||
// Use ripgrep for file listing
|
||||
logger.debug('Running ripgrep with args', { rgArgs })
|
||||
const rgResult = await runRipgrep(rgArgs)
|
||||
logger.debug('Ripgrep result', {
|
||||
ok: rgResult.ok,
|
||||
exitCode: rgResult.exitCode,
|
||||
stdoutLength: rgResult.stdout.length,
|
||||
stdoutPreview: rgResult.stdout.slice(0, 500)
|
||||
})
|
||||
|
||||
// Process results if we have stdout content
|
||||
// Exit code 2 can indicate partial errors (e.g., permission denied on some dirs) but still have valid results
|
||||
if (rgResult.ok && rgResult.stdout.length > 0) {
|
||||
const lines = rgResult.stdout.split('\n').filter(Boolean)
|
||||
logger.debug('Parsed lines from ripgrep', { lineCount: lines.length, lines })
|
||||
|
||||
for (const line of lines) {
|
||||
if (files.length >= MAX_FILES_LIMIT) {
|
||||
truncated = true
|
||||
break
|
||||
}
|
||||
|
||||
const filePath = line.trim()
|
||||
if (!filePath) continue
|
||||
|
||||
const absolutePath = path.isAbsolute(filePath) ? filePath : path.resolve(validPath, filePath)
|
||||
|
||||
try {
|
||||
const stats = await fs.stat(absolutePath)
|
||||
files.push({
|
||||
path: absolutePath,
|
||||
type: 'file', // ripgrep --files only returns files
|
||||
size: stats.size,
|
||||
modified: stats.mtime
|
||||
})
|
||||
} catch (error) {
|
||||
logger.debug('Failed to stat file from ripgrep output, skipping', { file: absolutePath, error })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by modification time (newest first)
|
||||
files.sort((a, b) => {
|
||||
const aTime = a.modified ? a.modified.getTime() : 0
|
||||
const bTime = b.modified ? b.modified.getTime() : 0
|
||||
return bTime - aTime
|
||||
})
|
||||
|
||||
// Format output - always use absolute paths
|
||||
const output: string[] = []
|
||||
if (files.length === 0) {
|
||||
output.push(`No files found matching pattern "${parsed.data.pattern}" in ${validPath}`)
|
||||
} else {
|
||||
output.push(...files.map((f) => f.path))
|
||||
if (truncated) {
|
||||
output.push('')
|
||||
output.push(`(Results truncated to ${MAX_FILES_LIMIT} files. Consider using a more specific pattern.)`)
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: output.join('\n')
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
266
src/main/mcpServers/filesystem/tools/grep.ts
Normal file
266
src/main/mcpServers/filesystem/tools/grep.ts
Normal file
@ -0,0 +1,266 @@
|
||||
import fs from 'fs/promises'
|
||||
import path from 'path'
|
||||
import * as z from 'zod'
|
||||
|
||||
import type { GrepMatch } from '../types'
|
||||
import { isBinaryFile, MAX_GREP_MATCHES, MAX_LINE_LENGTH, runRipgrep, validatePath } from '../types'
|
||||
|
||||
// Schema definition
|
||||
export const GrepToolSchema = z.object({
|
||||
pattern: z.string().describe('The regex pattern to search for in file contents'),
|
||||
path: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe('The directory to search in (must be absolute path). Defaults to the base directory'),
|
||||
include: z.string().optional().describe('File pattern to include in the search (e.g. "*.js", "*.{ts,tsx}")')
|
||||
})
|
||||
|
||||
// Tool definition with detailed description
|
||||
export const grepToolDefinition = {
|
||||
name: 'grep',
|
||||
description: `Fast content search tool that works with any codebase size.
|
||||
|
||||
- Searches file contents using regular expressions
|
||||
- Supports full regex syntax (e.g., "log.*Error", "function\\s+\\w+")
|
||||
- Filter files by pattern with include (e.g., "*.js", "*.{ts,tsx}")
|
||||
- Returns absolute file paths and line numbers with matching content
|
||||
- Results are limited to 100 matches
|
||||
- Binary files are automatically skipped
|
||||
- Common directories (node_modules, .git, dist) are excluded
|
||||
- The path parameter must be an absolute path if specified
|
||||
- If path is not specified, defaults to the base directory`,
|
||||
inputSchema: z.toJSONSchema(GrepToolSchema)
|
||||
}
|
||||
|
||||
// Handler implementation
|
||||
export async function handleGrepTool(args: unknown, baseDir: string) {
|
||||
const parsed = GrepToolSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for grep: ${parsed.error}`)
|
||||
}
|
||||
|
||||
const data = parsed.data
|
||||
|
||||
if (!data.pattern) {
|
||||
throw new Error('Pattern is required for grep')
|
||||
}
|
||||
|
||||
const searchPath = data.path || baseDir
|
||||
const validPath = await validatePath(searchPath, baseDir)
|
||||
|
||||
const matches: GrepMatch[] = []
|
||||
let truncated = false
|
||||
let regex: RegExp
|
||||
|
||||
// Build ripgrep arguments
|
||||
const rgArgs: string[] = [
|
||||
'--no-heading',
|
||||
'--line-number',
|
||||
'--color',
|
||||
'never',
|
||||
'--ignore-case',
|
||||
'--glob',
|
||||
'!.git/**',
|
||||
'--glob',
|
||||
'!node_modules/**',
|
||||
'--glob',
|
||||
'!dist/**',
|
||||
'--glob',
|
||||
'!build/**',
|
||||
'--glob',
|
||||
'!__pycache__/**'
|
||||
]
|
||||
|
||||
if (data.include) {
|
||||
for (const pat of data.include
|
||||
.split(',')
|
||||
.map((p) => p.trim())
|
||||
.filter(Boolean)) {
|
||||
rgArgs.push('--glob', pat)
|
||||
}
|
||||
}
|
||||
|
||||
rgArgs.push(data.pattern)
|
||||
rgArgs.push(validPath)
|
||||
|
||||
try {
|
||||
regex = new RegExp(data.pattern, 'gi')
|
||||
} catch (error) {
|
||||
throw new Error(`Invalid regex pattern: ${data.pattern}`)
|
||||
}
|
||||
|
||||
async function searchFile(filePath: string): Promise<void> {
|
||||
if (matches.length >= MAX_GREP_MATCHES) {
|
||||
truncated = true
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
// Skip binary files
|
||||
if (await isBinaryFile(filePath)) {
|
||||
return
|
||||
}
|
||||
|
||||
const content = await fs.readFile(filePath, 'utf-8')
|
||||
const lines = content.split('\n')
|
||||
|
||||
lines.forEach((line, index) => {
|
||||
if (matches.length >= MAX_GREP_MATCHES) {
|
||||
truncated = true
|
||||
return
|
||||
}
|
||||
|
||||
if (regex.test(line)) {
|
||||
// Truncate long lines
|
||||
const truncatedLine = line.length > MAX_LINE_LENGTH ? line.substring(0, MAX_LINE_LENGTH) + '...' : line
|
||||
|
||||
matches.push({
|
||||
file: filePath,
|
||||
line: index + 1,
|
||||
content: truncatedLine.trim()
|
||||
})
|
||||
}
|
||||
})
|
||||
} catch (error) {
|
||||
// Skip files we can't read
|
||||
}
|
||||
}
|
||||
|
||||
async function searchDirectory(dir: string): Promise<void> {
|
||||
if (matches.length >= MAX_GREP_MATCHES) {
|
||||
truncated = true
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
const entries = await fs.readdir(dir, { withFileTypes: true })
|
||||
|
||||
for (const entry of entries) {
|
||||
if (matches.length >= MAX_GREP_MATCHES) {
|
||||
truncated = true
|
||||
break
|
||||
}
|
||||
|
||||
const fullPath = path.join(dir, entry.name)
|
||||
|
||||
// Skip common ignore patterns
|
||||
if (entry.name.startsWith('.') && entry.name !== '.env.example') {
|
||||
continue
|
||||
}
|
||||
if (['node_modules', 'dist', 'build', '__pycache__', '.git'].includes(entry.name)) {
|
||||
continue
|
||||
}
|
||||
|
||||
if (entry.isFile()) {
|
||||
// Check if file matches include pattern
|
||||
if (data.include) {
|
||||
const includePatterns = data.include.split(',').map((p) => p.trim())
|
||||
const fileName = path.basename(fullPath)
|
||||
const matchesInclude = includePatterns.some((pattern) => {
|
||||
// Simple glob pattern matching
|
||||
const regexPattern = pattern
|
||||
.replace(/\*/g, '.*')
|
||||
.replace(/\?/g, '.')
|
||||
.replace(/\{([^}]+)\}/g, (_, group) => `(${group.split(',').join('|')})`)
|
||||
return new RegExp(`^${regexPattern}$`).test(fileName)
|
||||
})
|
||||
if (!matchesInclude) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
await searchFile(fullPath)
|
||||
} else if (entry.isDirectory()) {
|
||||
await searchDirectory(fullPath)
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
// Skip directories we can't read
|
||||
}
|
||||
}
|
||||
|
||||
// Perform the search
|
||||
let usedRipgrep = false
|
||||
try {
|
||||
const rgResult = await runRipgrep(rgArgs)
|
||||
if (rgResult.ok && rgResult.exitCode !== null && rgResult.exitCode !== 2) {
|
||||
usedRipgrep = true
|
||||
const lines = rgResult.stdout.split('\n').filter(Boolean)
|
||||
for (const line of lines) {
|
||||
if (matches.length >= MAX_GREP_MATCHES) {
|
||||
truncated = true
|
||||
break
|
||||
}
|
||||
|
||||
const firstColon = line.indexOf(':')
|
||||
const secondColon = line.indexOf(':', firstColon + 1)
|
||||
if (firstColon === -1 || secondColon === -1) continue
|
||||
|
||||
const filePart = line.slice(0, firstColon)
|
||||
const linePart = line.slice(firstColon + 1, secondColon)
|
||||
const contentPart = line.slice(secondColon + 1)
|
||||
const lineNum = Number.parseInt(linePart, 10)
|
||||
if (!Number.isFinite(lineNum)) continue
|
||||
|
||||
const absoluteFilePath = path.isAbsolute(filePart) ? filePart : path.resolve(baseDir, filePart)
|
||||
const truncatedLine =
|
||||
contentPart.length > MAX_LINE_LENGTH ? contentPart.substring(0, MAX_LINE_LENGTH) + '...' : contentPart
|
||||
|
||||
matches.push({
|
||||
file: absoluteFilePath,
|
||||
line: lineNum,
|
||||
content: truncatedLine.trim()
|
||||
})
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
usedRipgrep = false
|
||||
}
|
||||
|
||||
if (!usedRipgrep) {
|
||||
const stats = await fs.stat(validPath)
|
||||
if (stats.isFile()) {
|
||||
await searchFile(validPath)
|
||||
} else {
|
||||
await searchDirectory(validPath)
|
||||
}
|
||||
}
|
||||
|
||||
// Format output
|
||||
const output: string[] = []
|
||||
|
||||
if (matches.length === 0) {
|
||||
output.push('No matches found')
|
||||
} else {
|
||||
// Group matches by file
|
||||
const fileGroups = new Map<string, GrepMatch[]>()
|
||||
matches.forEach((match) => {
|
||||
if (!fileGroups.has(match.file)) {
|
||||
fileGroups.set(match.file, [])
|
||||
}
|
||||
fileGroups.get(match.file)!.push(match)
|
||||
})
|
||||
|
||||
// Format grouped matches - always use absolute paths
|
||||
fileGroups.forEach((fileMatches, filePath) => {
|
||||
output.push(`\n${filePath}:`)
|
||||
fileMatches.forEach((match) => {
|
||||
output.push(` ${match.line}: ${match.content}`)
|
||||
})
|
||||
})
|
||||
|
||||
if (truncated) {
|
||||
output.push('')
|
||||
output.push(`(Results truncated to ${MAX_GREP_MATCHES} matches. Consider using a more specific pattern or path.)`)
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: output.join('\n')
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
8
src/main/mcpServers/filesystem/tools/index.ts
Normal file
8
src/main/mcpServers/filesystem/tools/index.ts
Normal file
@ -0,0 +1,8 @@
|
||||
// Export all tool definitions and handlers
|
||||
export { deleteToolDefinition, handleDeleteTool } from './delete'
|
||||
export { editToolDefinition, handleEditTool } from './edit'
|
||||
export { globToolDefinition, handleGlobTool } from './glob'
|
||||
export { grepToolDefinition, handleGrepTool } from './grep'
|
||||
export { handleLsTool, lsToolDefinition } from './ls'
|
||||
export { handleReadTool, readToolDefinition } from './read'
|
||||
export { handleWriteTool, writeToolDefinition } from './write'
|
||||
150
src/main/mcpServers/filesystem/tools/ls.ts
Normal file
150
src/main/mcpServers/filesystem/tools/ls.ts
Normal file
@ -0,0 +1,150 @@
|
||||
import fs from 'fs/promises'
|
||||
import path from 'path'
|
||||
import * as z from 'zod'
|
||||
|
||||
import { MAX_FILES_LIMIT, validatePath } from '../types'
|
||||
|
||||
// Schema definition
|
||||
export const LsToolSchema = z.object({
|
||||
path: z.string().optional().describe('The directory to list (must be absolute path). Defaults to the base directory'),
|
||||
recursive: z.boolean().optional().describe('Whether to list directories recursively (default: false)')
|
||||
})
|
||||
|
||||
// Tool definition with detailed description
|
||||
export const lsToolDefinition = {
|
||||
name: 'ls',
|
||||
description: `Lists files and directories in a specified path.
|
||||
|
||||
- Returns a tree-like structure with icons (📁 directories, 📄 files)
|
||||
- Shows the absolute directory path in the header
|
||||
- Entries are sorted alphabetically with directories first
|
||||
- Can list recursively with recursive=true (up to 5 levels deep)
|
||||
- Common directories (node_modules, dist, .git) are excluded
|
||||
- Hidden files (starting with .) are excluded except .env.example
|
||||
- Results are limited to 100 entries
|
||||
- The path parameter must be an absolute path if specified
|
||||
- If path is not specified, defaults to the base directory`,
|
||||
inputSchema: z.toJSONSchema(LsToolSchema)
|
||||
}
|
||||
|
||||
// Handler implementation
|
||||
export async function handleLsTool(args: unknown, baseDir: string) {
|
||||
const parsed = LsToolSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for ls: ${parsed.error}`)
|
||||
}
|
||||
|
||||
const targetPath = parsed.data.path || baseDir
|
||||
const validPath = await validatePath(targetPath, baseDir)
|
||||
const recursive = parsed.data.recursive || false
|
||||
|
||||
interface TreeNode {
|
||||
name: string
|
||||
type: 'file' | 'directory'
|
||||
children?: TreeNode[]
|
||||
}
|
||||
|
||||
let fileCount = 0
|
||||
let truncated = false
|
||||
|
||||
async function buildTree(dirPath: string, depth: number = 0): Promise<TreeNode[]> {
|
||||
if (fileCount >= MAX_FILES_LIMIT) {
|
||||
truncated = true
|
||||
return []
|
||||
}
|
||||
|
||||
try {
|
||||
const entries = await fs.readdir(dirPath, { withFileTypes: true })
|
||||
const nodes: TreeNode[] = []
|
||||
|
||||
// Sort entries: directories first, then files, alphabetically
|
||||
entries.sort((a, b) => {
|
||||
if (a.isDirectory() && !b.isDirectory()) return -1
|
||||
if (!a.isDirectory() && b.isDirectory()) return 1
|
||||
return a.name.localeCompare(b.name)
|
||||
})
|
||||
|
||||
for (const entry of entries) {
|
||||
if (fileCount >= MAX_FILES_LIMIT) {
|
||||
truncated = true
|
||||
break
|
||||
}
|
||||
|
||||
// Skip hidden files and common ignore patterns
|
||||
if (entry.name.startsWith('.') && entry.name !== '.env.example') {
|
||||
continue
|
||||
}
|
||||
if (['node_modules', 'dist', 'build', '__pycache__'].includes(entry.name)) {
|
||||
continue
|
||||
}
|
||||
|
||||
fileCount++
|
||||
const node: TreeNode = {
|
||||
name: entry.name,
|
||||
type: entry.isDirectory() ? 'directory' : 'file'
|
||||
}
|
||||
|
||||
if (entry.isDirectory() && recursive && depth < 5) {
|
||||
// Limit depth to prevent infinite recursion
|
||||
const childPath = path.join(dirPath, entry.name)
|
||||
node.children = await buildTree(childPath, depth + 1)
|
||||
}
|
||||
|
||||
nodes.push(node)
|
||||
}
|
||||
|
||||
return nodes
|
||||
} catch (error) {
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
// Build the tree
|
||||
const tree = await buildTree(validPath)
|
||||
|
||||
// Format as text output
|
||||
function formatTree(nodes: TreeNode[], prefix: string = ''): string[] {
|
||||
const lines: string[] = []
|
||||
|
||||
nodes.forEach((node, index) => {
|
||||
const isLastNode = index === nodes.length - 1
|
||||
const connector = isLastNode ? '└── ' : '├── '
|
||||
const icon = node.type === 'directory' ? '📁 ' : '📄 '
|
||||
|
||||
lines.push(prefix + connector + icon + node.name)
|
||||
|
||||
if (node.children && node.children.length > 0) {
|
||||
const childPrefix = prefix + (isLastNode ? ' ' : '│ ')
|
||||
lines.push(...formatTree(node.children, childPrefix))
|
||||
}
|
||||
})
|
||||
|
||||
return lines
|
||||
}
|
||||
|
||||
// Generate output
|
||||
const output: string[] = []
|
||||
output.push(`Directory: ${validPath}`)
|
||||
output.push('')
|
||||
|
||||
if (tree.length === 0) {
|
||||
output.push('(empty directory)')
|
||||
} else {
|
||||
const treeLines = formatTree(tree, '')
|
||||
output.push(...treeLines)
|
||||
|
||||
if (truncated) {
|
||||
output.push('')
|
||||
output.push(`(Results truncated to ${MAX_FILES_LIMIT} files. Consider listing a more specific directory.)`)
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: output.join('\n')
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
101
src/main/mcpServers/filesystem/tools/read.ts
Normal file
101
src/main/mcpServers/filesystem/tools/read.ts
Normal file
@ -0,0 +1,101 @@
|
||||
import fs from 'fs/promises'
|
||||
import path from 'path'
|
||||
import * as z from 'zod'
|
||||
|
||||
import { DEFAULT_READ_LIMIT, isBinaryFile, MAX_LINE_LENGTH, validatePath } from '../types'
|
||||
|
||||
// Schema definition
|
||||
export const ReadToolSchema = z.object({
|
||||
file_path: z.string().describe('The path to the file to read'),
|
||||
offset: z.number().optional().describe('The line number to start reading from (1-based)'),
|
||||
limit: z.number().optional().describe('The number of lines to read (defaults to 2000)')
|
||||
})
|
||||
|
||||
// Tool definition with detailed description
|
||||
export const readToolDefinition = {
|
||||
name: 'read',
|
||||
description: `Reads a file from the local filesystem.
|
||||
|
||||
- Assumes this tool can read all files on the machine
|
||||
- The file_path parameter must be an absolute path, not a relative path
|
||||
- By default, reads up to 2000 lines starting from the beginning
|
||||
- You can optionally specify a line offset and limit for long files
|
||||
- Any lines longer than 2000 characters will be truncated
|
||||
- Results are returned with line numbers starting at 1
|
||||
- Binary files are detected and rejected with an error
|
||||
- Empty files return a warning`,
|
||||
inputSchema: z.toJSONSchema(ReadToolSchema)
|
||||
}
|
||||
|
||||
// Handler implementation
|
||||
export async function handleReadTool(args: unknown, baseDir: string) {
|
||||
const parsed = ReadToolSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for read: ${parsed.error}`)
|
||||
}
|
||||
|
||||
const filePath = parsed.data.file_path
|
||||
const validPath = await validatePath(filePath, baseDir)
|
||||
|
||||
// Check if file exists
|
||||
try {
|
||||
const stats = await fs.stat(validPath)
|
||||
if (!stats.isFile()) {
|
||||
throw new Error(`Path is not a file: ${filePath}`)
|
||||
}
|
||||
} catch (error: any) {
|
||||
if (error.code === 'ENOENT') {
|
||||
throw new Error(`File not found: ${filePath}`)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
|
||||
// Check if file is binary
|
||||
if (await isBinaryFile(validPath)) {
|
||||
throw new Error(`Cannot read binary file: ${filePath}`)
|
||||
}
|
||||
|
||||
// Read file content
|
||||
const content = await fs.readFile(validPath, 'utf-8')
|
||||
const lines = content.split('\n')
|
||||
|
||||
// Apply offset and limit
|
||||
const offset = (parsed.data.offset || 1) - 1 // Convert to 0-based
|
||||
const limit = parsed.data.limit || DEFAULT_READ_LIMIT
|
||||
|
||||
if (offset < 0 || offset >= lines.length) {
|
||||
throw new Error(`Invalid offset: ${offset + 1}. File has ${lines.length} lines.`)
|
||||
}
|
||||
|
||||
const selectedLines = lines.slice(offset, offset + limit)
|
||||
|
||||
// Format output with line numbers and truncate long lines
|
||||
const output: string[] = []
|
||||
const relativePath = path.relative(baseDir, validPath)
|
||||
|
||||
output.push(`File: ${relativePath}`)
|
||||
if (offset > 0 || limit < lines.length) {
|
||||
output.push(`Lines ${offset + 1} to ${Math.min(offset + limit, lines.length)} of ${lines.length}`)
|
||||
}
|
||||
output.push('')
|
||||
|
||||
selectedLines.forEach((line, index) => {
|
||||
const lineNumber = offset + index + 1
|
||||
const truncatedLine = line.length > MAX_LINE_LENGTH ? line.substring(0, MAX_LINE_LENGTH) + '...' : line
|
||||
output.push(`${lineNumber.toString().padStart(6)}\t${truncatedLine}`)
|
||||
})
|
||||
|
||||
if (offset + limit < lines.length) {
|
||||
output.push('')
|
||||
output.push(`(${lines.length - (offset + limit)} more lines not shown)`)
|
||||
}
|
||||
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: output.join('\n')
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
83
src/main/mcpServers/filesystem/tools/write.ts
Normal file
83
src/main/mcpServers/filesystem/tools/write.ts
Normal file
@ -0,0 +1,83 @@
|
||||
import fs from 'fs/promises'
|
||||
import path from 'path'
|
||||
import * as z from 'zod'
|
||||
|
||||
import { logger, validatePath } from '../types'
|
||||
|
||||
// Schema definition
|
||||
export const WriteToolSchema = z.object({
|
||||
file_path: z.string().describe('The path to the file to write'),
|
||||
content: z.string().describe('The content to write to the file')
|
||||
})
|
||||
|
||||
// Tool definition with detailed description
|
||||
export const writeToolDefinition = {
|
||||
name: 'write',
|
||||
description: `Writes a file to the local filesystem.
|
||||
|
||||
- This tool will overwrite the existing file if one exists at the path
|
||||
- You MUST use the read tool first to understand what you're overwriting
|
||||
- ALWAYS prefer using the 'edit' tool for existing files
|
||||
- NEVER proactively create documentation files unless explicitly requested
|
||||
- Parent directories will be created automatically if they don't exist
|
||||
- The file_path must be an absolute path, not a relative path`,
|
||||
inputSchema: z.toJSONSchema(WriteToolSchema)
|
||||
}
|
||||
|
||||
// Handler implementation
|
||||
export async function handleWriteTool(args: unknown, baseDir: string) {
|
||||
const parsed = WriteToolSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for write: ${parsed.error}`)
|
||||
}
|
||||
|
||||
const filePath = parsed.data.file_path
|
||||
const validPath = await validatePath(filePath, baseDir)
|
||||
|
||||
// Create parent directory if it doesn't exist
|
||||
const parentDir = path.dirname(validPath)
|
||||
try {
|
||||
await fs.mkdir(parentDir, { recursive: true })
|
||||
} catch (error: any) {
|
||||
if (error.code !== 'EEXIST') {
|
||||
throw new Error(`Failed to create parent directory: ${error.message}`)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if file exists (for logging)
|
||||
let isOverwrite = false
|
||||
try {
|
||||
await fs.stat(validPath)
|
||||
isOverwrite = true
|
||||
} catch {
|
||||
// File doesn't exist, that's fine
|
||||
}
|
||||
|
||||
// Write the file
|
||||
try {
|
||||
await fs.writeFile(validPath, parsed.data.content, 'utf-8')
|
||||
} catch (error: any) {
|
||||
throw new Error(`Failed to write file: ${error.message}`)
|
||||
}
|
||||
|
||||
// Log the operation
|
||||
logger.info('File written', {
|
||||
path: validPath,
|
||||
overwrite: isOverwrite,
|
||||
size: parsed.data.content.length
|
||||
})
|
||||
|
||||
// Format output
|
||||
const relativePath = path.relative(baseDir, validPath)
|
||||
const action = isOverwrite ? 'Updated' : 'Created'
|
||||
const lines = parsed.data.content.split('\n').length
|
||||
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: `${action} file: ${relativePath}\n` + `Size: ${parsed.data.content.length} bytes\n` + `Lines: ${lines}`
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
627
src/main/mcpServers/filesystem/types.ts
Normal file
627
src/main/mcpServers/filesystem/types.ts
Normal file
@ -0,0 +1,627 @@
|
||||
import { loggerService } from '@logger'
|
||||
import { isMac, isWin } from '@main/constant'
|
||||
import { spawn } from 'child_process'
|
||||
import fs from 'fs/promises'
|
||||
import os from 'os'
|
||||
import path from 'path'
|
||||
|
||||
export const logger = loggerService.withContext('MCP:FileSystemServer')
|
||||
|
||||
// Constants
|
||||
export const MAX_LINE_LENGTH = 2000
|
||||
export const DEFAULT_READ_LIMIT = 2000
|
||||
export const MAX_FILES_LIMIT = 100
|
||||
export const MAX_GREP_MATCHES = 100
|
||||
|
||||
// Common types
|
||||
export interface FileInfo {
|
||||
path: string
|
||||
type: 'file' | 'directory'
|
||||
size?: number
|
||||
modified?: Date
|
||||
}
|
||||
|
||||
export interface GrepMatch {
|
||||
file: string
|
||||
line: number
|
||||
content: string
|
||||
}
|
||||
|
||||
// Utility functions for path handling
|
||||
export function normalizePath(p: string): string {
|
||||
return path.normalize(p)
|
||||
}
|
||||
|
||||
export function expandHome(filepath: string): string {
|
||||
if (filepath.startsWith('~/') || filepath === '~') {
|
||||
return path.join(os.homedir(), filepath.slice(1))
|
||||
}
|
||||
return filepath
|
||||
}
|
||||
|
||||
// Security validation
|
||||
export async function validatePath(requestedPath: string, baseDir?: string): Promise<string> {
|
||||
const expandedPath = expandHome(requestedPath)
|
||||
const root = baseDir ?? process.cwd()
|
||||
const absolute = path.isAbsolute(expandedPath) ? path.resolve(expandedPath) : path.resolve(root, expandedPath)
|
||||
|
||||
// Handle symlinks by checking their real path
|
||||
try {
|
||||
const realPath = await fs.realpath(absolute)
|
||||
return normalizePath(realPath)
|
||||
} catch (error) {
|
||||
// For new files that don't exist yet, verify parent directory
|
||||
const parentDir = path.dirname(absolute)
|
||||
try {
|
||||
const realParentPath = await fs.realpath(parentDir)
|
||||
normalizePath(realParentPath)
|
||||
return normalizePath(absolute)
|
||||
} catch {
|
||||
return normalizePath(absolute)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Edit Tool Utilities - Fuzzy matching replacers from opencode
|
||||
// ============================================================================
|
||||
|
||||
export type Replacer = (content: string, find: string) => Generator<string, void, unknown>
|
||||
|
||||
// Similarity thresholds for block anchor fallback matching
|
||||
const SINGLE_CANDIDATE_SIMILARITY_THRESHOLD = 0.0
|
||||
const MULTIPLE_CANDIDATES_SIMILARITY_THRESHOLD = 0.3
|
||||
|
||||
/**
|
||||
* Levenshtein distance algorithm implementation
|
||||
*/
|
||||
function levenshtein(a: string, b: string): number {
|
||||
if (a === '' || b === '') {
|
||||
return Math.max(a.length, b.length)
|
||||
}
|
||||
const matrix = Array.from({ length: a.length + 1 }, (_, i) =>
|
||||
Array.from({ length: b.length + 1 }, (_, j) => (i === 0 ? j : j === 0 ? i : 0))
|
||||
)
|
||||
|
||||
for (let i = 1; i <= a.length; i++) {
|
||||
for (let j = 1; j <= b.length; j++) {
|
||||
const cost = a[i - 1] === b[j - 1] ? 0 : 1
|
||||
matrix[i][j] = Math.min(matrix[i - 1][j] + 1, matrix[i][j - 1] + 1, matrix[i - 1][j - 1] + cost)
|
||||
}
|
||||
}
|
||||
return matrix[a.length][b.length]
|
||||
}
|
||||
|
||||
export const SimpleReplacer: Replacer = function* (_content, find) {
|
||||
yield find
|
||||
}
|
||||
|
||||
export const LineTrimmedReplacer: Replacer = function* (content, find) {
|
||||
const originalLines = content.split('\n')
|
||||
const searchLines = find.split('\n')
|
||||
|
||||
if (searchLines[searchLines.length - 1] === '') {
|
||||
searchLines.pop()
|
||||
}
|
||||
|
||||
for (let i = 0; i <= originalLines.length - searchLines.length; i++) {
|
||||
let matches = true
|
||||
|
||||
for (let j = 0; j < searchLines.length; j++) {
|
||||
const originalTrimmed = originalLines[i + j].trim()
|
||||
const searchTrimmed = searchLines[j].trim()
|
||||
|
||||
if (originalTrimmed !== searchTrimmed) {
|
||||
matches = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if (matches) {
|
||||
let matchStartIndex = 0
|
||||
for (let k = 0; k < i; k++) {
|
||||
matchStartIndex += originalLines[k].length + 1
|
||||
}
|
||||
|
||||
let matchEndIndex = matchStartIndex
|
||||
for (let k = 0; k < searchLines.length; k++) {
|
||||
matchEndIndex += originalLines[i + k].length
|
||||
if (k < searchLines.length - 1) {
|
||||
matchEndIndex += 1
|
||||
}
|
||||
}
|
||||
|
||||
yield content.substring(matchStartIndex, matchEndIndex)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const BlockAnchorReplacer: Replacer = function* (content, find) {
|
||||
const originalLines = content.split('\n')
|
||||
const searchLines = find.split('\n')
|
||||
|
||||
if (searchLines.length < 3) {
|
||||
return
|
||||
}
|
||||
|
||||
if (searchLines[searchLines.length - 1] === '') {
|
||||
searchLines.pop()
|
||||
}
|
||||
|
||||
const firstLineSearch = searchLines[0].trim()
|
||||
const lastLineSearch = searchLines[searchLines.length - 1].trim()
|
||||
const searchBlockSize = searchLines.length
|
||||
|
||||
const candidates: Array<{ startLine: number; endLine: number }> = []
|
||||
for (let i = 0; i < originalLines.length; i++) {
|
||||
if (originalLines[i].trim() !== firstLineSearch) {
|
||||
continue
|
||||
}
|
||||
|
||||
for (let j = i + 2; j < originalLines.length; j++) {
|
||||
if (originalLines[j].trim() === lastLineSearch) {
|
||||
candidates.push({ startLine: i, endLine: j })
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (candidates.length === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
if (candidates.length === 1) {
|
||||
const { startLine, endLine } = candidates[0]
|
||||
const actualBlockSize = endLine - startLine + 1
|
||||
|
||||
let similarity = 0
|
||||
const linesToCheck = Math.min(searchBlockSize - 2, actualBlockSize - 2)
|
||||
|
||||
if (linesToCheck > 0) {
|
||||
for (let j = 1; j < searchBlockSize - 1 && j < actualBlockSize - 1; j++) {
|
||||
const originalLine = originalLines[startLine + j].trim()
|
||||
const searchLine = searchLines[j].trim()
|
||||
const maxLen = Math.max(originalLine.length, searchLine.length)
|
||||
if (maxLen === 0) {
|
||||
continue
|
||||
}
|
||||
const distance = levenshtein(originalLine, searchLine)
|
||||
similarity += (1 - distance / maxLen) / linesToCheck
|
||||
|
||||
if (similarity >= SINGLE_CANDIDATE_SIMILARITY_THRESHOLD) {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
similarity = 1.0
|
||||
}
|
||||
|
||||
if (similarity >= SINGLE_CANDIDATE_SIMILARITY_THRESHOLD) {
|
||||
let matchStartIndex = 0
|
||||
for (let k = 0; k < startLine; k++) {
|
||||
matchStartIndex += originalLines[k].length + 1
|
||||
}
|
||||
let matchEndIndex = matchStartIndex
|
||||
for (let k = startLine; k <= endLine; k++) {
|
||||
matchEndIndex += originalLines[k].length
|
||||
if (k < endLine) {
|
||||
matchEndIndex += 1
|
||||
}
|
||||
}
|
||||
yield content.substring(matchStartIndex, matchEndIndex)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
let bestMatch: { startLine: number; endLine: number } | null = null
|
||||
let maxSimilarity = -1
|
||||
|
||||
for (const candidate of candidates) {
|
||||
const { startLine, endLine } = candidate
|
||||
const actualBlockSize = endLine - startLine + 1
|
||||
|
||||
let similarity = 0
|
||||
const linesToCheck = Math.min(searchBlockSize - 2, actualBlockSize - 2)
|
||||
|
||||
if (linesToCheck > 0) {
|
||||
for (let j = 1; j < searchBlockSize - 1 && j < actualBlockSize - 1; j++) {
|
||||
const originalLine = originalLines[startLine + j].trim()
|
||||
const searchLine = searchLines[j].trim()
|
||||
const maxLen = Math.max(originalLine.length, searchLine.length)
|
||||
if (maxLen === 0) {
|
||||
continue
|
||||
}
|
||||
const distance = levenshtein(originalLine, searchLine)
|
||||
similarity += 1 - distance / maxLen
|
||||
}
|
||||
similarity /= linesToCheck
|
||||
} else {
|
||||
similarity = 1.0
|
||||
}
|
||||
|
||||
if (similarity > maxSimilarity) {
|
||||
maxSimilarity = similarity
|
||||
bestMatch = candidate
|
||||
}
|
||||
}
|
||||
|
||||
if (maxSimilarity >= MULTIPLE_CANDIDATES_SIMILARITY_THRESHOLD && bestMatch) {
|
||||
const { startLine, endLine } = bestMatch
|
||||
let matchStartIndex = 0
|
||||
for (let k = 0; k < startLine; k++) {
|
||||
matchStartIndex += originalLines[k].length + 1
|
||||
}
|
||||
let matchEndIndex = matchStartIndex
|
||||
for (let k = startLine; k <= endLine; k++) {
|
||||
matchEndIndex += originalLines[k].length
|
||||
if (k < endLine) {
|
||||
matchEndIndex += 1
|
||||
}
|
||||
}
|
||||
yield content.substring(matchStartIndex, matchEndIndex)
|
||||
}
|
||||
}
|
||||
|
||||
export const WhitespaceNormalizedReplacer: Replacer = function* (content, find) {
|
||||
const normalizeWhitespace = (text: string) => text.replace(/\s+/g, ' ').trim()
|
||||
const normalizedFind = normalizeWhitespace(find)
|
||||
|
||||
const lines = content.split('\n')
|
||||
for (let i = 0; i < lines.length; i++) {
|
||||
const line = lines[i]
|
||||
if (normalizeWhitespace(line) === normalizedFind) {
|
||||
yield line
|
||||
} else {
|
||||
const normalizedLine = normalizeWhitespace(line)
|
||||
if (normalizedLine.includes(normalizedFind)) {
|
||||
const words = find.trim().split(/\s+/)
|
||||
if (words.length > 0) {
|
||||
const pattern = words.map((word) => word.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')).join('\\s+')
|
||||
try {
|
||||
const regex = new RegExp(pattern)
|
||||
const match = line.match(regex)
|
||||
if (match) {
|
||||
yield match[0]
|
||||
}
|
||||
} catch {
|
||||
// Invalid regex pattern, skip
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const findLines = find.split('\n')
|
||||
if (findLines.length > 1) {
|
||||
for (let i = 0; i <= lines.length - findLines.length; i++) {
|
||||
const block = lines.slice(i, i + findLines.length)
|
||||
if (normalizeWhitespace(block.join('\n')) === normalizedFind) {
|
||||
yield block.join('\n')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const IndentationFlexibleReplacer: Replacer = function* (content, find) {
|
||||
const removeIndentation = (text: string) => {
|
||||
const lines = text.split('\n')
|
||||
const nonEmptyLines = lines.filter((line) => line.trim().length > 0)
|
||||
if (nonEmptyLines.length === 0) return text
|
||||
|
||||
const minIndent = Math.min(
|
||||
...nonEmptyLines.map((line) => {
|
||||
const match = line.match(/^(\s*)/)
|
||||
return match ? match[1].length : 0
|
||||
})
|
||||
)
|
||||
|
||||
return lines.map((line) => (line.trim().length === 0 ? line : line.slice(minIndent))).join('\n')
|
||||
}
|
||||
|
||||
const normalizedFind = removeIndentation(find)
|
||||
const contentLines = content.split('\n')
|
||||
const findLines = find.split('\n')
|
||||
|
||||
for (let i = 0; i <= contentLines.length - findLines.length; i++) {
|
||||
const block = contentLines.slice(i, i + findLines.length).join('\n')
|
||||
if (removeIndentation(block) === normalizedFind) {
|
||||
yield block
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const EscapeNormalizedReplacer: Replacer = function* (content, find) {
|
||||
const unescapeString = (str: string): string => {
|
||||
return str.replace(/\\(n|t|r|'|"|`|\\|\n|\$)/g, (match, capturedChar) => {
|
||||
switch (capturedChar) {
|
||||
case 'n':
|
||||
return '\n'
|
||||
case 't':
|
||||
return '\t'
|
||||
case 'r':
|
||||
return '\r'
|
||||
case "'":
|
||||
return "'"
|
||||
case '"':
|
||||
return '"'
|
||||
case '`':
|
||||
return '`'
|
||||
case '\\':
|
||||
return '\\'
|
||||
case '\n':
|
||||
return '\n'
|
||||
case '$':
|
||||
return '$'
|
||||
default:
|
||||
return match
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
const unescapedFind = unescapeString(find)
|
||||
|
||||
if (content.includes(unescapedFind)) {
|
||||
yield unescapedFind
|
||||
}
|
||||
|
||||
const lines = content.split('\n')
|
||||
const findLines = unescapedFind.split('\n')
|
||||
|
||||
for (let i = 0; i <= lines.length - findLines.length; i++) {
|
||||
const block = lines.slice(i, i + findLines.length).join('\n')
|
||||
const unescapedBlock = unescapeString(block)
|
||||
|
||||
if (unescapedBlock === unescapedFind) {
|
||||
yield block
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const TrimmedBoundaryReplacer: Replacer = function* (content, find) {
|
||||
const trimmedFind = find.trim()
|
||||
|
||||
if (trimmedFind === find) {
|
||||
return
|
||||
}
|
||||
|
||||
if (content.includes(trimmedFind)) {
|
||||
yield trimmedFind
|
||||
}
|
||||
|
||||
const lines = content.split('\n')
|
||||
const findLines = find.split('\n')
|
||||
|
||||
for (let i = 0; i <= lines.length - findLines.length; i++) {
|
||||
const block = lines.slice(i, i + findLines.length).join('\n')
|
||||
|
||||
if (block.trim() === trimmedFind) {
|
||||
yield block
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const ContextAwareReplacer: Replacer = function* (content, find) {
|
||||
const findLines = find.split('\n')
|
||||
if (findLines.length < 3) {
|
||||
return
|
||||
}
|
||||
|
||||
if (findLines[findLines.length - 1] === '') {
|
||||
findLines.pop()
|
||||
}
|
||||
|
||||
const contentLines = content.split('\n')
|
||||
|
||||
const firstLine = findLines[0].trim()
|
||||
const lastLine = findLines[findLines.length - 1].trim()
|
||||
|
||||
for (let i = 0; i < contentLines.length; i++) {
|
||||
if (contentLines[i].trim() !== firstLine) continue
|
||||
|
||||
for (let j = i + 2; j < contentLines.length; j++) {
|
||||
if (contentLines[j].trim() === lastLine) {
|
||||
const blockLines = contentLines.slice(i, j + 1)
|
||||
const block = blockLines.join('\n')
|
||||
|
||||
if (blockLines.length === findLines.length) {
|
||||
let matchingLines = 0
|
||||
let totalNonEmptyLines = 0
|
||||
|
||||
for (let k = 1; k < blockLines.length - 1; k++) {
|
||||
const blockLine = blockLines[k].trim()
|
||||
const findLine = findLines[k].trim()
|
||||
|
||||
if (blockLine.length > 0 || findLine.length > 0) {
|
||||
totalNonEmptyLines++
|
||||
if (blockLine === findLine) {
|
||||
matchingLines++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (totalNonEmptyLines === 0 || matchingLines / totalNonEmptyLines >= 0.5) {
|
||||
yield block
|
||||
break
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const MultiOccurrenceReplacer: Replacer = function* (content, find) {
|
||||
let startIndex = 0
|
||||
|
||||
while (true) {
|
||||
const index = content.indexOf(find, startIndex)
|
||||
if (index === -1) break
|
||||
|
||||
yield find
|
||||
startIndex = index + find.length
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* All replacers in order of specificity
|
||||
*/
|
||||
export const ALL_REPLACERS: Replacer[] = [
|
||||
SimpleReplacer,
|
||||
LineTrimmedReplacer,
|
||||
BlockAnchorReplacer,
|
||||
WhitespaceNormalizedReplacer,
|
||||
IndentationFlexibleReplacer,
|
||||
EscapeNormalizedReplacer,
|
||||
TrimmedBoundaryReplacer,
|
||||
ContextAwareReplacer,
|
||||
MultiOccurrenceReplacer
|
||||
]
|
||||
|
||||
/**
|
||||
* Replace oldString with newString in content using fuzzy matching
|
||||
*/
|
||||
export function replaceWithFuzzyMatch(
|
||||
content: string,
|
||||
oldString: string,
|
||||
newString: string,
|
||||
replaceAll = false
|
||||
): string {
|
||||
if (oldString === newString) {
|
||||
throw new Error('old_string and new_string must be different')
|
||||
}
|
||||
|
||||
let notFound = true
|
||||
|
||||
for (const replacer of ALL_REPLACERS) {
|
||||
for (const search of replacer(content, oldString)) {
|
||||
const index = content.indexOf(search)
|
||||
if (index === -1) continue
|
||||
notFound = false
|
||||
if (replaceAll) {
|
||||
return content.replaceAll(search, newString)
|
||||
}
|
||||
const lastIndex = content.lastIndexOf(search)
|
||||
if (index !== lastIndex) continue
|
||||
return content.substring(0, index) + newString + content.substring(index + search.length)
|
||||
}
|
||||
}
|
||||
|
||||
if (notFound) {
|
||||
throw new Error('old_string not found in content')
|
||||
}
|
||||
throw new Error(
|
||||
'Found multiple matches for old_string. Provide more surrounding lines in old_string to identify the correct match.'
|
||||
)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Binary File Detection
|
||||
// ============================================================================
|
||||
|
||||
// Check if a file is likely binary
|
||||
export async function isBinaryFile(filePath: string): Promise<boolean> {
|
||||
try {
|
||||
const buffer = Buffer.alloc(4096)
|
||||
const fd = await fs.open(filePath, 'r')
|
||||
const { bytesRead } = await fd.read(buffer, 0, buffer.length, 0)
|
||||
await fd.close()
|
||||
|
||||
if (bytesRead === 0) return false
|
||||
|
||||
const view = buffer.subarray(0, bytesRead)
|
||||
|
||||
let zeroBytes = 0
|
||||
let evenZeros = 0
|
||||
let oddZeros = 0
|
||||
let nonPrintable = 0
|
||||
|
||||
for (let i = 0; i < view.length; i++) {
|
||||
const b = view[i]
|
||||
|
||||
if (b === 0) {
|
||||
zeroBytes++
|
||||
if (i % 2 === 0) evenZeros++
|
||||
else oddZeros++
|
||||
continue
|
||||
}
|
||||
|
||||
// treat common whitespace as printable
|
||||
if (b === 9 || b === 10 || b === 13) continue
|
||||
|
||||
// basic ASCII printable range
|
||||
if (b >= 32 && b <= 126) continue
|
||||
|
||||
// bytes >= 128 are likely part of UTF-8 sequences; count as printable
|
||||
if (b >= 128) continue
|
||||
|
||||
nonPrintable++
|
||||
}
|
||||
|
||||
// If there are lots of null bytes, it's probably binary unless it looks like UTF-16 text.
|
||||
if (zeroBytes > 0) {
|
||||
const evenSlots = Math.ceil(view.length / 2)
|
||||
const oddSlots = Math.floor(view.length / 2)
|
||||
const evenZeroRatio = evenSlots > 0 ? evenZeros / evenSlots : 0
|
||||
const oddZeroRatio = oddSlots > 0 ? oddZeros / oddSlots : 0
|
||||
|
||||
// UTF-16LE/BE tends to have zeros on every other byte.
|
||||
if (evenZeroRatio > 0.7 || oddZeroRatio > 0.7) return false
|
||||
|
||||
if (zeroBytes / view.length > 0.05) return true
|
||||
}
|
||||
|
||||
// Heuristic: too many non-printable bytes => binary.
|
||||
return nonPrintable / view.length > 0.3
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Ripgrep Utilities
|
||||
// ============================================================================
|
||||
|
||||
export interface RipgrepResult {
|
||||
ok: boolean
|
||||
stdout: string
|
||||
exitCode: number | null
|
||||
}
|
||||
|
||||
export function getRipgrepAddonPath(): string {
|
||||
const pkgJsonPath = require.resolve('@anthropic-ai/claude-agent-sdk/package.json')
|
||||
const pkgRoot = path.dirname(pkgJsonPath)
|
||||
const platform = isMac ? 'darwin' : isWin ? 'win32' : 'linux'
|
||||
const arch = process.arch === 'arm64' ? 'arm64' : 'x64'
|
||||
return path.join(pkgRoot, 'vendor', 'ripgrep', `${arch}-${platform}`, 'ripgrep.node')
|
||||
}
|
||||
|
||||
export async function runRipgrep(args: string[]): Promise<RipgrepResult> {
|
||||
const addonPath = getRipgrepAddonPath()
|
||||
const childScript = `const { ripgrepMain } = require(process.env.RIPGREP_ADDON_PATH); process.exit(ripgrepMain(process.argv.slice(1)));`
|
||||
|
||||
return new Promise((resolve) => {
|
||||
const child = spawn(process.execPath, ['--eval', childScript, 'rg', ...args], {
|
||||
cwd: process.cwd(),
|
||||
env: {
|
||||
...process.env,
|
||||
ELECTRON_RUN_AS_NODE: '1',
|
||||
RIPGREP_ADDON_PATH: addonPath
|
||||
},
|
||||
stdio: ['ignore', 'pipe', 'pipe']
|
||||
})
|
||||
|
||||
let stdout = ''
|
||||
|
||||
child.stdout?.on('data', (chunk) => {
|
||||
stdout += chunk.toString('utf-8')
|
||||
})
|
||||
|
||||
child.on('error', () => {
|
||||
resolve({ ok: false, stdout: '', exitCode: null })
|
||||
})
|
||||
|
||||
child.on('close', (code) => {
|
||||
resolve({ ok: true, stdout, exitCode: code })
|
||||
})
|
||||
})
|
||||
}
|
||||
@ -32,7 +32,8 @@ export enum ConfigKeys {
|
||||
Proxy = 'proxy',
|
||||
EnableDeveloperMode = 'enableDeveloperMode',
|
||||
ClientId = 'clientId',
|
||||
GitBashPath = 'gitBashPath'
|
||||
GitBashPath = 'gitBashPath',
|
||||
GitBashPathSource = 'gitBashPathSource' // 'manual' | 'auto' | null
|
||||
}
|
||||
|
||||
export class ConfigManager {
|
||||
|
||||
@ -249,6 +249,26 @@ class McpService {
|
||||
StdioClientTransport | SSEClientTransport | InMemoryTransport | StreamableHTTPClientTransport
|
||||
> => {
|
||||
// Create appropriate transport based on configuration
|
||||
|
||||
// Special case for nowledgeMem - uses HTTP transport instead of in-memory
|
||||
if (isBuiltinMCPServer(server) && server.name === BuiltinMCPServerNames.nowledgeMem) {
|
||||
const nowledgeMemUrl = 'http://127.0.0.1:14242/mcp'
|
||||
const options: StreamableHTTPClientTransportOptions = {
|
||||
fetch: async (url, init) => {
|
||||
return net.fetch(typeof url === 'string' ? url : url.toString(), init)
|
||||
},
|
||||
requestInit: {
|
||||
headers: {
|
||||
...defaultAppHeaders(),
|
||||
APP: 'Cherry Studio'
|
||||
}
|
||||
},
|
||||
authProvider
|
||||
}
|
||||
getServerLogger(server).debug(`Using StreamableHTTPClientTransport for ${server.name}`)
|
||||
return new StreamableHTTPClientTransport(new URL(nowledgeMemUrl), options)
|
||||
}
|
||||
|
||||
if (isBuiltinMCPServer(server) && server.name !== BuiltinMCPServerNames.mcpAutoInstall) {
|
||||
getServerLogger(server).debug(`Using in-memory transport`)
|
||||
const [clientTransport, serverTransport] = InMemoryTransport.createLinkedPair()
|
||||
|
||||
@ -15,8 +15,8 @@ import { query } from '@anthropic-ai/claude-agent-sdk'
|
||||
import { loggerService } from '@logger'
|
||||
import { config as apiConfigService } from '@main/apiServer/config'
|
||||
import { validateModelId } from '@main/apiServer/utils'
|
||||
import { ConfigKeys, configManager } from '@main/services/ConfigManager'
|
||||
import { validateGitBashPath } from '@main/utils/process'
|
||||
import { isWin } from '@main/constant'
|
||||
import { autoDiscoverGitBash } from '@main/utils/process'
|
||||
import getLoginShellEnvironment from '@main/utils/shell-env'
|
||||
import { app } from 'electron'
|
||||
|
||||
@ -109,7 +109,8 @@ class ClaudeCodeService implements AgentServiceInterface {
|
||||
Object.entries(loginShellEnv).filter(([key]) => !key.toLowerCase().endsWith('_proxy'))
|
||||
) as Record<string, string>
|
||||
|
||||
const customGitBashPath = validateGitBashPath(configManager.get(ConfigKeys.GitBashPath) as string | undefined)
|
||||
// Auto-discover Git Bash path on Windows (already logs internally)
|
||||
const customGitBashPath = isWin ? autoDiscoverGitBash() : null
|
||||
|
||||
const env = {
|
||||
...loginShellEnvWithoutProxies,
|
||||
|
||||
@ -1,9 +1,21 @@
|
||||
import { configManager } from '@main/services/ConfigManager'
|
||||
import { execFileSync } from 'child_process'
|
||||
import fs from 'fs'
|
||||
import path from 'path'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import { findExecutable, findGitBash, validateGitBashPath } from '../process'
|
||||
import { autoDiscoverGitBash, findExecutable, findGitBash, validateGitBashPath } from '../process'
|
||||
|
||||
// Mock configManager
|
||||
vi.mock('@main/services/ConfigManager', () => ({
|
||||
ConfigKeys: {
|
||||
GitBashPath: 'gitBashPath'
|
||||
},
|
||||
configManager: {
|
||||
get: vi.fn(),
|
||||
set: vi.fn()
|
||||
}
|
||||
}))
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('child_process')
|
||||
@ -695,4 +707,284 @@ describe.skipIf(process.platform !== 'win32')('process utilities', () => {
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('autoDiscoverGitBash', () => {
|
||||
const originalEnvVar = process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||
|
||||
beforeEach(() => {
|
||||
vi.mocked(configManager.get).mockReset()
|
||||
vi.mocked(configManager.set).mockReset()
|
||||
delete process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
// Restore original environment variable
|
||||
if (originalEnvVar !== undefined) {
|
||||
process.env.CLAUDE_CODE_GIT_BASH_PATH = originalEnvVar
|
||||
} else {
|
||||
delete process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||
}
|
||||
})
|
||||
|
||||
/**
|
||||
* Helper to mock fs.existsSync with a set of valid paths
|
||||
*/
|
||||
const mockExistingPaths = (...validPaths: string[]) => {
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => validPaths.includes(p as string))
|
||||
}
|
||||
|
||||
describe('with no existing config path', () => {
|
||||
it('should discover and persist Git Bash path when not configured', () => {
|
||||
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||
process.env.ProgramFiles = 'C:\\Program Files'
|
||||
mockExistingPaths(gitPath, bashPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
expect(result).toBe(bashPath)
|
||||
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', bashPath)
|
||||
})
|
||||
|
||||
it('should return null and not persist when Git Bash is not found', () => {
|
||||
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false)
|
||||
vi.mocked(execFileSync).mockImplementation(() => {
|
||||
throw new Error('Not found')
|
||||
})
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
expect(result).toBeNull()
|
||||
expect(configManager.set).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe('environment variable precedence', () => {
|
||||
it('should use env var over valid config path', () => {
|
||||
const envPath = 'C:\\EnvGit\\bin\\bash.exe'
|
||||
const configPath = 'C:\\ConfigGit\\bin\\bash.exe'
|
||||
|
||||
process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath
|
||||
vi.mocked(configManager.get).mockReturnValue(configPath)
|
||||
mockExistingPaths(envPath, configPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
// Env var should take precedence
|
||||
expect(result).toBe(envPath)
|
||||
// Should not persist env var path (it's a runtime override)
|
||||
expect(configManager.set).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should fall back to config path when env var is invalid', () => {
|
||||
const envPath = 'C:\\Invalid\\bash.exe'
|
||||
const configPath = 'C:\\ConfigGit\\bin\\bash.exe'
|
||||
|
||||
process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath
|
||||
vi.mocked(configManager.get).mockReturnValue(configPath)
|
||||
// Env path is invalid (doesn't exist), only config path exists
|
||||
mockExistingPaths(configPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
// Should fall back to config path
|
||||
expect(result).toBe(configPath)
|
||||
expect(configManager.set).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should fall back to auto-discovery when both env var and config are invalid', () => {
|
||||
const envPath = 'C:\\InvalidEnv\\bash.exe'
|
||||
const configPath = 'C:\\InvalidConfig\\bash.exe'
|
||||
const discoveredPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
|
||||
process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath
|
||||
process.env.ProgramFiles = 'C:\\Program Files'
|
||||
vi.mocked(configManager.get).mockReturnValue(configPath)
|
||||
// Both env and config paths are invalid, only standard Git exists
|
||||
mockExistingPaths(gitPath, discoveredPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
expect(result).toBe(discoveredPath)
|
||||
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', discoveredPath)
|
||||
})
|
||||
})
|
||||
|
||||
describe('with valid existing config path', () => {
|
||||
it('should validate and return existing path without re-discovering', () => {
|
||||
const existingPath = 'C:\\CustomGit\\bin\\bash.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(existingPath)
|
||||
mockExistingPaths(existingPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
expect(result).toBe(existingPath)
|
||||
// Should not call findGitBash or persist again
|
||||
expect(configManager.set).not.toHaveBeenCalled()
|
||||
// Should not call execFileSync (which findGitBash would use for discovery)
|
||||
expect(execFileSync).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should not override existing valid config with auto-discovery', () => {
|
||||
const existingPath = 'C:\\CustomGit\\bin\\bash.exe'
|
||||
const discoveredPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(existingPath)
|
||||
mockExistingPaths(existingPath, discoveredPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
expect(result).toBe(existingPath)
|
||||
expect(configManager.set).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe('with invalid existing config path', () => {
|
||||
it('should attempt auto-discovery when existing path does not exist', () => {
|
||||
const existingPath = 'C:\\NonExistent\\bin\\bash.exe'
|
||||
const discoveredPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(existingPath)
|
||||
process.env.ProgramFiles = 'C:\\Program Files'
|
||||
// Invalid path doesn't exist, but Git is installed at standard location
|
||||
mockExistingPaths(gitPath, discoveredPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
// Should discover and return the new path
|
||||
expect(result).toBe(discoveredPath)
|
||||
// Should persist the discovered path (overwrites invalid)
|
||||
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', discoveredPath)
|
||||
})
|
||||
|
||||
it('should attempt auto-discovery when existing path is not bash.exe', () => {
|
||||
const existingPath = 'C:\\CustomGit\\bin\\git.exe'
|
||||
const discoveredPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(existingPath)
|
||||
process.env.ProgramFiles = 'C:\\Program Files'
|
||||
// Invalid path exists but is not bash.exe (validation will fail)
|
||||
// Git is installed at standard location
|
||||
mockExistingPaths(existingPath, gitPath, discoveredPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
// Should discover and return the new path
|
||||
expect(result).toBe(discoveredPath)
|
||||
// Should persist the discovered path (overwrites invalid)
|
||||
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', discoveredPath)
|
||||
})
|
||||
|
||||
it('should return null when existing path is invalid and discovery fails', () => {
|
||||
const existingPath = 'C:\\NonExistent\\bin\\bash.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(existingPath)
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false)
|
||||
vi.mocked(execFileSync).mockImplementation(() => {
|
||||
throw new Error('Not found')
|
||||
})
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
// Both validation and discovery failed
|
||||
expect(result).toBeNull()
|
||||
// Should not persist when discovery fails
|
||||
expect(configManager.set).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe('config persistence verification', () => {
|
||||
it('should persist discovered path with correct config key', () => {
|
||||
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||
process.env.ProgramFiles = 'C:\\Program Files'
|
||||
mockExistingPaths(gitPath, bashPath)
|
||||
|
||||
autoDiscoverGitBash()
|
||||
|
||||
// Verify the exact call to configManager.set
|
||||
expect(configManager.set).toHaveBeenCalledTimes(1)
|
||||
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', bashPath)
|
||||
})
|
||||
|
||||
it('should persist on each discovery when config remains undefined', () => {
|
||||
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||
process.env.ProgramFiles = 'C:\\Program Files'
|
||||
mockExistingPaths(gitPath, bashPath)
|
||||
|
||||
autoDiscoverGitBash()
|
||||
autoDiscoverGitBash()
|
||||
|
||||
// Each call discovers and persists since config remains undefined (mocked)
|
||||
expect(configManager.set).toHaveBeenCalledTimes(2)
|
||||
})
|
||||
})
|
||||
|
||||
describe('real-world scenarios', () => {
|
||||
it('should discover and persist standard Git for Windows installation', () => {
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||
process.env.ProgramFiles = 'C:\\Program Files'
|
||||
mockExistingPaths(gitPath, bashPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
expect(result).toBe(bashPath)
|
||||
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', bashPath)
|
||||
})
|
||||
|
||||
it('should discover portable Git via where.exe and persist', () => {
|
||||
const gitPath = 'D:\\PortableApps\\Git\\bin\\git.exe'
|
||||
const bashPath = 'D:\\PortableApps\\Git\\bin\\bash.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||
const pathStr = p?.toString() || ''
|
||||
// Common git paths don't exist
|
||||
if (pathStr.includes('Program Files\\Git\\cmd\\git.exe')) return false
|
||||
if (pathStr.includes('Program Files (x86)\\Git\\cmd\\git.exe')) return false
|
||||
// Portable bash path exists
|
||||
if (pathStr === bashPath) return true
|
||||
return false
|
||||
})
|
||||
|
||||
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
expect(result).toBe(bashPath)
|
||||
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', bashPath)
|
||||
})
|
||||
|
||||
it('should respect user-configured path over auto-discovery', () => {
|
||||
const userConfiguredPath = 'D:\\MyGit\\bin\\bash.exe'
|
||||
const systemPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(userConfiguredPath)
|
||||
mockExistingPaths(userConfiguredPath, systemPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
expect(result).toBe(userConfiguredPath)
|
||||
expect(configManager.set).not.toHaveBeenCalled()
|
||||
// Verify findGitBash was not called for discovery
|
||||
expect(execFileSync).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
import { loggerService } from '@logger'
|
||||
import type { GitBashPathInfo, GitBashPathSource } from '@shared/config/constant'
|
||||
import { HOME_CHERRY_DIR } from '@shared/config/constant'
|
||||
import { execFileSync, spawn } from 'child_process'
|
||||
import fs from 'fs'
|
||||
@ -6,6 +7,7 @@ import os from 'os'
|
||||
import path from 'path'
|
||||
|
||||
import { isWin } from '../constant'
|
||||
import { ConfigKeys, configManager } from '../services/ConfigManager'
|
||||
import { getResourcePath } from '.'
|
||||
|
||||
const logger = loggerService.withContext('Utils:Process')
|
||||
@ -59,7 +61,7 @@ export async function getBinaryPath(name?: string): Promise<string> {
|
||||
|
||||
export async function isBinaryExists(name: string): Promise<boolean> {
|
||||
const cmd = await getBinaryPath(name)
|
||||
return await fs.existsSync(cmd)
|
||||
return fs.existsSync(cmd)
|
||||
}
|
||||
|
||||
/**
|
||||
@ -225,3 +227,77 @@ export function validateGitBashPath(customPath?: string | null): string | null {
|
||||
logger.debug('Validated custom Git Bash path', { path: resolved })
|
||||
return resolved
|
||||
}
|
||||
|
||||
/**
|
||||
* Auto-discover and persist Git Bash path if not already configured
|
||||
* Only called when Git Bash is actually needed
|
||||
*
|
||||
* Precedence order:
|
||||
* 1. CLAUDE_CODE_GIT_BASH_PATH environment variable (highest - runtime override)
|
||||
* 2. Configured path from settings (manual or auto)
|
||||
* 3. Auto-discovery via findGitBash (only if no valid config exists)
|
||||
*/
|
||||
export function autoDiscoverGitBash(): string | null {
|
||||
if (!isWin) {
|
||||
return null
|
||||
}
|
||||
|
||||
// 1. Check environment variable override first (highest priority)
|
||||
const envOverride = process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||
if (envOverride) {
|
||||
const validated = validateGitBashPath(envOverride)
|
||||
if (validated) {
|
||||
logger.debug('Using CLAUDE_CODE_GIT_BASH_PATH override', { path: validated })
|
||||
return validated
|
||||
}
|
||||
logger.warn('CLAUDE_CODE_GIT_BASH_PATH provided but path is invalid', { path: envOverride })
|
||||
}
|
||||
|
||||
// 2. Check if a path is already configured
|
||||
const existingPath = configManager.get<string | undefined>(ConfigKeys.GitBashPath)
|
||||
const existingSource = configManager.get<GitBashPathSource | undefined>(ConfigKeys.GitBashPathSource)
|
||||
|
||||
if (existingPath) {
|
||||
const validated = validateGitBashPath(existingPath)
|
||||
if (validated) {
|
||||
return validated
|
||||
}
|
||||
// Existing path is invalid, try to auto-discover
|
||||
logger.warn('Existing Git Bash path is invalid, attempting auto-discovery', {
|
||||
path: existingPath,
|
||||
source: existingSource
|
||||
})
|
||||
}
|
||||
|
||||
// 3. Try to find Git Bash via auto-discovery
|
||||
const discoveredPath = findGitBash()
|
||||
if (discoveredPath) {
|
||||
// Persist the discovered path with 'auto' source
|
||||
configManager.set(ConfigKeys.GitBashPath, discoveredPath)
|
||||
configManager.set(ConfigKeys.GitBashPathSource, 'auto')
|
||||
logger.info('Auto-discovered Git Bash path', { path: discoveredPath })
|
||||
}
|
||||
|
||||
return discoveredPath
|
||||
}
|
||||
|
||||
/**
|
||||
* Get Git Bash path info including source
|
||||
* If no path is configured, triggers auto-discovery first
|
||||
*/
|
||||
export function getGitBashPathInfo(): GitBashPathInfo {
|
||||
if (!isWin) {
|
||||
return { path: null, source: null }
|
||||
}
|
||||
|
||||
let path = configManager.get<string | null>(ConfigKeys.GitBashPath) ?? null
|
||||
let source = configManager.get<GitBashPathSource | null>(ConfigKeys.GitBashPathSource) ?? null
|
||||
|
||||
// If no path configured, trigger auto-discovery (handles upgrade from old versions)
|
||||
if (!path) {
|
||||
path = autoDiscoverGitBash()
|
||||
source = path ? 'auto' : null
|
||||
}
|
||||
|
||||
return { path, source }
|
||||
}
|
||||
|
||||
@ -2,7 +2,7 @@ import type { PermissionUpdate } from '@anthropic-ai/claude-agent-sdk'
|
||||
import { electronAPI } from '@electron-toolkit/preload'
|
||||
import type { SpanEntity, TokenUsage } from '@mcp-trace/trace-core'
|
||||
import type { SpanContext } from '@opentelemetry/api'
|
||||
import type { TerminalConfig, UpgradeChannel } from '@shared/config/constant'
|
||||
import type { GitBashPathInfo, TerminalConfig, UpgradeChannel } from '@shared/config/constant'
|
||||
import type { LogLevel, LogSourceWithContext } from '@shared/config/logger'
|
||||
import type { FileChangeEvent, WebviewKeyEvent } from '@shared/config/types'
|
||||
import type { MCPServerLogEntry } from '@shared/config/types'
|
||||
@ -126,6 +126,7 @@ const api = {
|
||||
getCpuName: () => ipcRenderer.invoke(IpcChannel.System_GetCpuName),
|
||||
checkGitBash: (): Promise<boolean> => ipcRenderer.invoke(IpcChannel.System_CheckGitBash),
|
||||
getGitBashPath: (): Promise<string | null> => ipcRenderer.invoke(IpcChannel.System_GetGitBashPath),
|
||||
getGitBashPathInfo: (): Promise<GitBashPathInfo> => ipcRenderer.invoke(IpcChannel.System_GetGitBashPathInfo),
|
||||
setGitBashPath: (newPath: string | null): Promise<boolean> =>
|
||||
ipcRenderer.invoke(IpcChannel.System_SetGitBashPath, newPath)
|
||||
},
|
||||
|
||||
@ -142,6 +142,10 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
|
||||
return { thinking: { type: reasoningEffort ? 'enabled' : 'disabled' } }
|
||||
}
|
||||
|
||||
if (reasoningEffort === 'default') {
|
||||
return {}
|
||||
}
|
||||
|
||||
if (!reasoningEffort) {
|
||||
// DeepSeek hybrid inference models, v3.1 and maybe more in the future
|
||||
// 不同的 provider 有不同的思考控制方式,在这里统一解决
|
||||
@ -303,7 +307,7 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
|
||||
// Grok models/Perplexity models/OpenAI models
|
||||
if (isSupportedReasoningEffortModel(model)) {
|
||||
// 检查模型是否支持所选选项
|
||||
const supportedOptions = getModelSupportedReasoningEffortOptions(model)
|
||||
const supportedOptions = getModelSupportedReasoningEffortOptions(model)?.filter((option) => option !== 'default')
|
||||
if (supportedOptions?.includes(reasoningEffort)) {
|
||||
return {
|
||||
reasoning_effort: reasoningEffort
|
||||
|
||||
@ -7,7 +7,6 @@ import type { Chunk } from '@renderer/types/chunk'
|
||||
import { isOllamaProvider, isSupportEnableThinkingProvider } from '@renderer/utils/provider'
|
||||
import type { LanguageModelMiddleware } from 'ai'
|
||||
import { extractReasoningMiddleware, simulateStreamingMiddleware } from 'ai'
|
||||
import { isEmpty } from 'lodash'
|
||||
|
||||
import { getAiSdkProviderId } from '../provider/factory'
|
||||
import { isOpenRouterGeminiGenerateImageModel } from '../utils/image'
|
||||
@ -16,7 +15,6 @@ import { openrouterGenerateImageMiddleware } from './openrouterGenerateImageMidd
|
||||
import { openrouterReasoningMiddleware } from './openrouterReasoningMiddleware'
|
||||
import { qwenThinkingMiddleware } from './qwenThinkingMiddleware'
|
||||
import { skipGeminiThoughtSignatureMiddleware } from './skipGeminiThoughtSignatureMiddleware'
|
||||
import { toolChoiceMiddleware } from './toolChoiceMiddleware'
|
||||
|
||||
const logger = loggerService.withContext('AiSdkMiddlewareBuilder')
|
||||
|
||||
@ -136,15 +134,6 @@ export class AiSdkMiddlewareBuilder {
|
||||
export function buildAiSdkMiddlewares(config: AiSdkMiddlewareConfig): LanguageModelMiddleware[] {
|
||||
const builder = new AiSdkMiddlewareBuilder()
|
||||
|
||||
// 0. 知识库强制调用中间件(必须在最前面,确保第一轮强制调用知识库)
|
||||
if (!isEmpty(config.assistant?.knowledge_bases?.map((base) => base.id)) && config.knowledgeRecognition !== 'on') {
|
||||
builder.add({
|
||||
name: 'force-knowledge-first',
|
||||
middleware: toolChoiceMiddleware('builtin_knowledge_search')
|
||||
})
|
||||
logger.debug('Added toolChoice middleware to force knowledge base search on first round')
|
||||
}
|
||||
|
||||
// 1. 根据provider添加特定中间件
|
||||
if (config.provider) {
|
||||
addProviderSpecificMiddlewares(builder, config)
|
||||
|
||||
@ -31,7 +31,7 @@ import { webSearchToolWithPreExtractedKeywords } from '../tools/WebSearchTool'
|
||||
|
||||
const logger = loggerService.withContext('SearchOrchestrationPlugin')
|
||||
|
||||
const getMessageContent = (message: ModelMessage) => {
|
||||
export const getMessageContent = (message: ModelMessage) => {
|
||||
if (typeof message.content === 'string') return message.content
|
||||
return message.content.reduce((acc, part) => {
|
||||
if (part.type === 'text') {
|
||||
@ -266,14 +266,14 @@ export const searchOrchestrationPlugin = (assistant: Assistant, topicId: string)
|
||||
// 判断是否需要各种搜索
|
||||
const knowledgeBaseIds = assistant.knowledge_bases?.map((base) => base.id)
|
||||
const hasKnowledgeBase = !isEmpty(knowledgeBaseIds)
|
||||
const knowledgeRecognition = assistant.knowledgeRecognition || 'on'
|
||||
const knowledgeRecognition = assistant.knowledgeRecognition || 'off'
|
||||
const globalMemoryEnabled = selectGlobalMemoryEnabled(store.getState())
|
||||
const shouldWebSearch = !!assistant.webSearchProviderId
|
||||
const shouldKnowledgeSearch = hasKnowledgeBase && knowledgeRecognition === 'on'
|
||||
const shouldMemorySearch = globalMemoryEnabled && assistant.enableMemory
|
||||
|
||||
// 执行意图分析
|
||||
if (shouldWebSearch || hasKnowledgeBase) {
|
||||
if (shouldWebSearch || shouldKnowledgeSearch) {
|
||||
const analysisResult = await analyzeSearchIntent(lastUserMessage, assistant, {
|
||||
shouldWebSearch,
|
||||
shouldKnowledgeSearch,
|
||||
@ -330,41 +330,25 @@ export const searchOrchestrationPlugin = (assistant: Assistant, topicId: string)
|
||||
// 📚 知识库搜索工具配置
|
||||
const knowledgeBaseIds = assistant.knowledge_bases?.map((base) => base.id)
|
||||
const hasKnowledgeBase = !isEmpty(knowledgeBaseIds)
|
||||
const knowledgeRecognition = assistant.knowledgeRecognition || 'on'
|
||||
const knowledgeRecognition = assistant.knowledgeRecognition || 'off'
|
||||
const shouldKnowledgeSearch = hasKnowledgeBase && knowledgeRecognition === 'on'
|
||||
|
||||
if (hasKnowledgeBase) {
|
||||
if (knowledgeRecognition === 'off') {
|
||||
// off 模式:直接添加知识库搜索工具,使用用户消息作为搜索关键词
|
||||
if (shouldKnowledgeSearch) {
|
||||
// on 模式:根据意图识别结果决定是否添加工具
|
||||
const needsKnowledgeSearch =
|
||||
analysisResult?.knowledge &&
|
||||
analysisResult.knowledge.question &&
|
||||
analysisResult.knowledge.question[0] !== 'not_needed'
|
||||
|
||||
if (needsKnowledgeSearch && analysisResult.knowledge) {
|
||||
// logger.info('📚 Adding knowledge search tool (intent-based)')
|
||||
const userMessage = userMessages[context.requestId]
|
||||
const fallbackKeywords = {
|
||||
question: [getMessageContent(userMessage) || 'search'],
|
||||
rewrite: getMessageContent(userMessage) || 'search'
|
||||
}
|
||||
// logger.info('📚 Adding knowledge search tool (force mode)')
|
||||
params.tools['builtin_knowledge_search'] = knowledgeSearchTool(
|
||||
assistant,
|
||||
fallbackKeywords,
|
||||
analysisResult.knowledge,
|
||||
getMessageContent(userMessage),
|
||||
topicId
|
||||
)
|
||||
// params.toolChoice = { type: 'tool', toolName: 'builtin_knowledge_search' }
|
||||
} else {
|
||||
// on 模式:根据意图识别结果决定是否添加工具
|
||||
const needsKnowledgeSearch =
|
||||
analysisResult?.knowledge &&
|
||||
analysisResult.knowledge.question &&
|
||||
analysisResult.knowledge.question[0] !== 'not_needed'
|
||||
|
||||
if (needsKnowledgeSearch && analysisResult.knowledge) {
|
||||
// logger.info('📚 Adding knowledge search tool (intent-based)')
|
||||
const userMessage = userMessages[context.requestId]
|
||||
params.tools['builtin_knowledge_search'] = knowledgeSearchTool(
|
||||
assistant,
|
||||
analysisResult.knowledge,
|
||||
getMessageContent(userMessage),
|
||||
topicId
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -18,7 +18,7 @@ vi.mock('@renderer/services/AssistantService', () => ({
|
||||
toolUseMode: assistant.settings?.toolUseMode ?? 'prompt',
|
||||
defaultModel: assistant.defaultModel,
|
||||
customParameters: assistant.settings?.customParameters ?? [],
|
||||
reasoning_effort: assistant.settings?.reasoning_effort,
|
||||
reasoning_effort: assistant.settings?.reasoning_effort ?? 'default',
|
||||
reasoning_effort_cache: assistant.settings?.reasoning_effort_cache,
|
||||
qwenThinkMode: assistant.settings?.qwenThinkMode
|
||||
})
|
||||
|
||||
@ -11,6 +11,7 @@ import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import {
|
||||
getAnthropicReasoningParams,
|
||||
getAnthropicThinkingBudget,
|
||||
getBedrockReasoningParams,
|
||||
getCustomParameters,
|
||||
getGeminiReasoningParams,
|
||||
@ -89,7 +90,8 @@ vi.mock('@renderer/config/models', async (importOriginal) => {
|
||||
isQwenAlwaysThinkModel: vi.fn(() => false),
|
||||
isSupportedThinkingTokenHunyuanModel: vi.fn(() => false),
|
||||
isSupportedThinkingTokenModel: vi.fn(() => false),
|
||||
isGPT51SeriesModel: vi.fn(() => false)
|
||||
isGPT51SeriesModel: vi.fn(() => false),
|
||||
findTokenLimit: vi.fn(actual.findTokenLimit)
|
||||
}
|
||||
})
|
||||
|
||||
@ -596,7 +598,7 @@ describe('reasoning utils', () => {
|
||||
expect(result).toEqual({})
|
||||
})
|
||||
|
||||
it('should return disabled thinking when no reasoning effort', async () => {
|
||||
it('should return disabled thinking when reasoning effort is none', async () => {
|
||||
const { isReasoningModel, isSupportedThinkingTokenClaudeModel } = await import('@renderer/config/models')
|
||||
|
||||
vi.mocked(isReasoningModel).mockReturnValue(true)
|
||||
@ -611,7 +613,9 @@ describe('reasoning utils', () => {
|
||||
const assistant: Assistant = {
|
||||
id: 'test',
|
||||
name: 'Test',
|
||||
settings: {}
|
||||
settings: {
|
||||
reasoning_effort: 'none'
|
||||
}
|
||||
} as Assistant
|
||||
|
||||
const result = getAnthropicReasoningParams(assistant, model)
|
||||
@ -647,7 +651,7 @@ describe('reasoning utils', () => {
|
||||
expect(result).toEqual({
|
||||
thinking: {
|
||||
type: 'enabled',
|
||||
budgetTokens: 2048
|
||||
budgetTokens: 4096
|
||||
}
|
||||
})
|
||||
})
|
||||
@ -675,7 +679,7 @@ describe('reasoning utils', () => {
|
||||
expect(result).toEqual({})
|
||||
})
|
||||
|
||||
it('should disable thinking for Flash models without reasoning effort', async () => {
|
||||
it('should disable thinking for Flash models when reasoning effort is none', async () => {
|
||||
const { isReasoningModel, isSupportedThinkingTokenGeminiModel } = await import('@renderer/config/models')
|
||||
|
||||
vi.mocked(isReasoningModel).mockReturnValue(true)
|
||||
@ -690,7 +694,9 @@ describe('reasoning utils', () => {
|
||||
const assistant: Assistant = {
|
||||
id: 'test',
|
||||
name: 'Test',
|
||||
settings: {}
|
||||
settings: {
|
||||
reasoning_effort: 'none'
|
||||
}
|
||||
} as Assistant
|
||||
|
||||
const result = getGeminiReasoningParams(assistant, model)
|
||||
@ -725,7 +731,7 @@ describe('reasoning utils', () => {
|
||||
const result = getGeminiReasoningParams(assistant, model)
|
||||
expect(result).toEqual({
|
||||
thinkingConfig: {
|
||||
thinkingBudget: 16448,
|
||||
thinkingBudget: expect.any(Number),
|
||||
includeThoughts: true
|
||||
}
|
||||
})
|
||||
@ -889,7 +895,7 @@ describe('reasoning utils', () => {
|
||||
expect(result).toEqual({
|
||||
reasoningConfig: {
|
||||
type: 'enabled',
|
||||
budgetTokens: 2048
|
||||
budgetTokens: 4096
|
||||
}
|
||||
})
|
||||
})
|
||||
@ -990,4 +996,89 @@ describe('reasoning utils', () => {
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('getAnthropicThinkingBudget', () => {
|
||||
it('should return undefined when reasoningEffort is undefined', async () => {
|
||||
const result = getAnthropicThinkingBudget(4096, undefined, 'claude-3-7-sonnet')
|
||||
expect(result).toBeUndefined()
|
||||
})
|
||||
|
||||
it('should return undefined when reasoningEffort is none', async () => {
|
||||
const result = getAnthropicThinkingBudget(4096, 'none', 'claude-3-7-sonnet')
|
||||
expect(result).toBeUndefined()
|
||||
})
|
||||
|
||||
it('should return undefined when tokenLimit is not found', async () => {
|
||||
const { findTokenLimit } = await import('@renderer/config/models')
|
||||
vi.mocked(findTokenLimit).mockReturnValue(undefined)
|
||||
|
||||
const result = getAnthropicThinkingBudget(4096, 'medium', 'unknown-model')
|
||||
expect(result).toBeUndefined()
|
||||
})
|
||||
|
||||
it('should calculate budget correctly when maxTokens is provided', async () => {
|
||||
const { findTokenLimit } = await import('@renderer/config/models')
|
||||
vi.mocked(findTokenLimit).mockReturnValue({ min: 1024, max: 32768 })
|
||||
|
||||
const result = getAnthropicThinkingBudget(4096, 'medium', 'claude-3-7-sonnet')
|
||||
// EFFORT_RATIO['medium'] = 0.5
|
||||
// budget = Math.floor((32768 - 1024) * 0.5 + 1024)
|
||||
// = Math.floor(31744 * 0.5 + 1024) = Math.floor(15872 + 1024) = 16896
|
||||
// budgetTokens = Math.min(16896, 4096) = 4096
|
||||
// result = Math.max(1024, 4096) = 4096
|
||||
expect(result).toBe(4096)
|
||||
})
|
||||
|
||||
it('should use tokenLimit.max when maxTokens is undefined', async () => {
|
||||
const { findTokenLimit } = await import('@renderer/config/models')
|
||||
vi.mocked(findTokenLimit).mockReturnValue({ min: 1024, max: 32768 })
|
||||
|
||||
const result = getAnthropicThinkingBudget(undefined, 'medium', 'claude-3-7-sonnet')
|
||||
// When maxTokens is undefined, budget is not constrained by maxTokens
|
||||
// EFFORT_RATIO['medium'] = 0.5
|
||||
// budget = Math.floor((32768 - 1024) * 0.5 + 1024)
|
||||
// = Math.floor(31744 * 0.5 + 1024) = Math.floor(15872 + 1024) = 16896
|
||||
// result = Math.max(1024, 16896) = 16896
|
||||
expect(result).toBe(16896)
|
||||
})
|
||||
|
||||
it('should enforce minimum budget of 1024', async () => {
|
||||
const { findTokenLimit } = await import('@renderer/config/models')
|
||||
vi.mocked(findTokenLimit).mockReturnValue({ min: 100, max: 1000 })
|
||||
|
||||
const result = getAnthropicThinkingBudget(500, 'low', 'claude-3-7-sonnet')
|
||||
// EFFORT_RATIO['low'] = 0.05
|
||||
// budget = Math.floor((1000 - 100) * 0.05 + 100)
|
||||
// = Math.floor(900 * 0.05 + 100) = Math.floor(45 + 100) = 145
|
||||
// budgetTokens = Math.min(145, 500) = 145
|
||||
// result = Math.max(1024, 145) = 1024
|
||||
expect(result).toBe(1024)
|
||||
})
|
||||
|
||||
it('should respect effort ratio for high reasoning effort', async () => {
|
||||
const { findTokenLimit } = await import('@renderer/config/models')
|
||||
vi.mocked(findTokenLimit).mockReturnValue({ min: 1024, max: 32768 })
|
||||
|
||||
const result = getAnthropicThinkingBudget(8192, 'high', 'claude-3-7-sonnet')
|
||||
// EFFORT_RATIO['high'] = 0.8
|
||||
// budget = Math.floor((32768 - 1024) * 0.8 + 1024)
|
||||
// = Math.floor(31744 * 0.8 + 1024) = Math.floor(25395.2 + 1024) = 26419
|
||||
// budgetTokens = Math.min(26419, 8192) = 8192
|
||||
// result = Math.max(1024, 8192) = 8192
|
||||
expect(result).toBe(8192)
|
||||
})
|
||||
|
||||
it('should use full token limit when maxTokens is undefined and reasoning effort is high', async () => {
|
||||
const { findTokenLimit } = await import('@renderer/config/models')
|
||||
vi.mocked(findTokenLimit).mockReturnValue({ min: 1024, max: 32768 })
|
||||
|
||||
const result = getAnthropicThinkingBudget(undefined, 'high', 'claude-3-7-sonnet')
|
||||
// When maxTokens is undefined, budget is not constrained by maxTokens
|
||||
// EFFORT_RATIO['high'] = 0.8
|
||||
// budget = Math.floor((32768 - 1024) * 0.8 + 1024)
|
||||
// = Math.floor(31744 * 0.8 + 1024) = Math.floor(25395.2 + 1024) = 26419
|
||||
// result = Math.max(1024, 26419) = 26419
|
||||
expect(result).toBe(26419)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@ -10,6 +10,7 @@ import {
|
||||
GEMINI_FLASH_MODEL_REGEX,
|
||||
getModelSupportedReasoningEffortOptions,
|
||||
isDeepSeekHybridInferenceModel,
|
||||
isDoubaoSeed18Model,
|
||||
isDoubaoSeedAfter251015,
|
||||
isDoubaoThinkingAutoModel,
|
||||
isGemini3ThinkingTokenModel,
|
||||
@ -65,7 +66,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
// reasoningEffort is not set, no extra reasoning setting
|
||||
// Generally, for every model which supports reasoning control, the reasoning effort won't be undefined.
|
||||
// It's for some reasoning models that don't support reasoning control, such as deepseek reasoner.
|
||||
if (!reasoningEffort) {
|
||||
if (!reasoningEffort || reasoningEffort === 'default') {
|
||||
return {}
|
||||
}
|
||||
|
||||
@ -330,7 +331,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
// Grok models/Perplexity models/OpenAI models, use reasoning_effort
|
||||
if (isSupportedReasoningEffortModel(model)) {
|
||||
// 检查模型是否支持所选选项
|
||||
const supportedOptions = getModelSupportedReasoningEffortOptions(model)
|
||||
const supportedOptions = getModelSupportedReasoningEffortOptions(model)?.filter((option) => option !== 'default')
|
||||
if (supportedOptions?.includes(reasoningEffort)) {
|
||||
return {
|
||||
reasoningEffort
|
||||
@ -390,7 +391,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
|
||||
// Use thinking, doubao, zhipu, etc.
|
||||
if (isSupportedThinkingTokenDoubaoModel(model)) {
|
||||
if (isDoubaoSeedAfter251015(model)) {
|
||||
if (isDoubaoSeedAfter251015(model) || isDoubaoSeed18Model(model)) {
|
||||
return { reasoningEffort }
|
||||
}
|
||||
if (reasoningEffort === 'high') {
|
||||
@ -434,7 +435,7 @@ export function getOpenAIReasoningParams(
|
||||
|
||||
let reasoningEffort = assistant?.settings?.reasoning_effort
|
||||
|
||||
if (!reasoningEffort) {
|
||||
if (!reasoningEffort || reasoningEffort === 'default') {
|
||||
return {}
|
||||
}
|
||||
|
||||
@ -486,16 +487,14 @@ export function getAnthropicThinkingBudget(
|
||||
return undefined
|
||||
}
|
||||
|
||||
const budgetTokens = Math.max(
|
||||
1024,
|
||||
Math.floor(
|
||||
Math.min(
|
||||
(tokenLimit.max - tokenLimit.min) * effortRatio + tokenLimit.min,
|
||||
(maxTokens || DEFAULT_MAX_TOKENS) * effortRatio
|
||||
)
|
||||
)
|
||||
)
|
||||
return budgetTokens
|
||||
const budget = Math.floor((tokenLimit.max - tokenLimit.min) * effortRatio + tokenLimit.min)
|
||||
|
||||
let budgetTokens = budget
|
||||
if (maxTokens !== undefined) {
|
||||
budgetTokens = Math.min(budget, maxTokens)
|
||||
}
|
||||
|
||||
return Math.max(1024, budgetTokens)
|
||||
}
|
||||
|
||||
/**
|
||||
@ -512,7 +511,11 @@ export function getAnthropicReasoningParams(
|
||||
|
||||
const reasoningEffort = assistant?.settings?.reasoning_effort
|
||||
|
||||
if (reasoningEffort === undefined || reasoningEffort === 'none') {
|
||||
if (!reasoningEffort || reasoningEffort === 'default') {
|
||||
return {}
|
||||
}
|
||||
|
||||
if (reasoningEffort === 'none') {
|
||||
return {
|
||||
thinking: {
|
||||
type: 'disabled'
|
||||
@ -567,6 +570,10 @@ export function getGeminiReasoningParams(
|
||||
|
||||
const reasoningEffort = assistant?.settings?.reasoning_effort
|
||||
|
||||
if (!reasoningEffort || reasoningEffort === 'default') {
|
||||
return {}
|
||||
}
|
||||
|
||||
// Gemini 推理参数
|
||||
if (isSupportedThinkingTokenGeminiModel(model)) {
|
||||
if (reasoningEffort === undefined || reasoningEffort === 'none') {
|
||||
@ -627,10 +634,6 @@ export function getXAIReasoningParams(assistant: Assistant, model: Model): Pick<
|
||||
|
||||
const { reasoning_effort: reasoningEffort } = getAssistantSettings(assistant)
|
||||
|
||||
if (!reasoningEffort || reasoningEffort === 'none') {
|
||||
return {}
|
||||
}
|
||||
|
||||
switch (reasoningEffort) {
|
||||
case 'auto':
|
||||
case 'minimal':
|
||||
@ -641,6 +644,10 @@ export function getXAIReasoningParams(assistant: Assistant, model: Model): Pick<
|
||||
return { reasoningEffort }
|
||||
case 'xhigh':
|
||||
return { reasoningEffort: 'high' }
|
||||
case 'default':
|
||||
case 'none':
|
||||
default:
|
||||
return {}
|
||||
}
|
||||
}
|
||||
|
||||
@ -657,7 +664,7 @@ export function getBedrockReasoningParams(
|
||||
|
||||
const reasoningEffort = assistant?.settings?.reasoning_effort
|
||||
|
||||
if (reasoningEffort === undefined) {
|
||||
if (reasoningEffort === undefined || reasoningEffort === 'default') {
|
||||
return {}
|
||||
}
|
||||
|
||||
|
||||
@ -113,6 +113,18 @@ export function MdiLightbulbOn(props: SVGProps<SVGSVGElement>) {
|
||||
)
|
||||
}
|
||||
|
||||
export function MdiLightbulbQuestion(props: SVGProps<SVGSVGElement>) {
|
||||
// {/* Icon from Material Design Icons by Pictogrammers - https://github.com/Templarian/MaterialDesign/blob/master/LICENSE */}
|
||||
return (
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="1em" height="1em" viewBox="0 0 24 24" {...props}>
|
||||
<path
|
||||
fill="currentColor"
|
||||
d="M8 2C11.9 2 15 5.1 15 9C15 11.4 13.8 13.5 12 14.7V17C12 17.6 11.6 18 11 18H5C4.4 18 4 17.6 4 17V14.7C2.2 13.5 1 11.4 1 9C1 5.1 4.1 2 8 2M5 21V20H11V21C11 21.6 10.6 22 10 22H6C5.4 22 5 21.6 5 21M8 4C5.2 4 3 6.2 3 9C3 11.1 4.2 12.8 6 13.6V16H10V13.6C11.8 12.8 13 11.1 13 9C13 6.2 10.8 4 8 4M20.5 14.5V16H19V14.5H20.5M18.5 9.5H17V9C17 7.3 18.3 6 20 6S23 7.3 23 9C23 10 22.5 10.9 21.7 11.4L21.4 11.6C20.8 12 20.5 12.6 20.5 13.3V13.5H19V13.3C19 12.1 19.6 11 20.6 10.4L20.9 10.2C21.3 9.9 21.5 9.5 21.5 9C21.5 8.2 20.8 7.5 20 7.5S18.5 8.2 18.5 9V9.5Z"
|
||||
/>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
export function BingLogo(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg
|
||||
|
||||
@ -3,6 +3,7 @@ import { ErrorBoundary } from '@renderer/components/ErrorBoundary'
|
||||
import { HelpTooltip } from '@renderer/components/TooltipIcons'
|
||||
import { TopView } from '@renderer/components/TopView'
|
||||
import { permissionModeCards } from '@renderer/config/agent'
|
||||
import { isWin } from '@renderer/config/constant'
|
||||
import { useAgents } from '@renderer/hooks/agents/useAgents'
|
||||
import { useUpdateAgent } from '@renderer/hooks/agents/useUpdateAgent'
|
||||
import SelectAgentBaseModelButton from '@renderer/pages/home/components/SelectAgentBaseModelButton'
|
||||
@ -16,7 +17,8 @@ import type {
|
||||
UpdateAgentForm
|
||||
} from '@renderer/types'
|
||||
import { AgentConfigurationSchema, isAgentType } from '@renderer/types'
|
||||
import { Alert, Button, Input, Modal, Select } from 'antd'
|
||||
import type { GitBashPathInfo } from '@shared/config/constant'
|
||||
import { Button, Input, Modal, Select } from 'antd'
|
||||
import { AlertTriangleIcon } from 'lucide-react'
|
||||
import type { ChangeEvent, FormEvent } from 'react'
|
||||
import { useCallback, useEffect, useMemo, useRef, useState } from 'react'
|
||||
@ -59,8 +61,7 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
||||
const isEditing = (agent?: AgentWithTools) => agent !== undefined
|
||||
|
||||
const [form, setForm] = useState<BaseAgentForm>(() => buildAgentForm(agent))
|
||||
const [hasGitBash, setHasGitBash] = useState<boolean>(true)
|
||||
const [customGitBashPath, setCustomGitBashPath] = useState<string>('')
|
||||
const [gitBashPathInfo, setGitBashPathInfo] = useState<GitBashPathInfo>({ path: null, source: null })
|
||||
|
||||
useEffect(() => {
|
||||
if (open) {
|
||||
@ -68,29 +69,15 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
||||
}
|
||||
}, [agent, open])
|
||||
|
||||
const checkGitBash = useCallback(
|
||||
async (showToast = false) => {
|
||||
try {
|
||||
const [gitBashInstalled, savedPath] = await Promise.all([
|
||||
window.api.system.checkGitBash(),
|
||||
window.api.system.getGitBashPath().catch(() => null)
|
||||
])
|
||||
setCustomGitBashPath(savedPath ?? '')
|
||||
setHasGitBash(gitBashInstalled)
|
||||
if (showToast) {
|
||||
if (gitBashInstalled) {
|
||||
window.toast.success(t('agent.gitBash.success', 'Git Bash detected successfully!'))
|
||||
} else {
|
||||
window.toast.error(t('agent.gitBash.notFound', 'Git Bash not found. Please install it first.'))
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Failed to check Git Bash:', error as Error)
|
||||
setHasGitBash(true) // Default to true on error to avoid false warnings
|
||||
}
|
||||
},
|
||||
[t]
|
||||
)
|
||||
const checkGitBash = useCallback(async () => {
|
||||
if (!isWin) return
|
||||
try {
|
||||
const pathInfo = await window.api.system.getGitBashPathInfo()
|
||||
setGitBashPathInfo(pathInfo)
|
||||
} catch (error) {
|
||||
logger.error('Failed to check Git Bash:', error as Error)
|
||||
}
|
||||
}, [])
|
||||
|
||||
useEffect(() => {
|
||||
checkGitBash()
|
||||
@ -119,24 +106,22 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
||||
return
|
||||
}
|
||||
|
||||
setCustomGitBashPath(pickedPath)
|
||||
await checkGitBash(true)
|
||||
await checkGitBash()
|
||||
} catch (error) {
|
||||
logger.error('Failed to pick Git Bash path', error as Error)
|
||||
window.toast.error(t('agent.gitBash.pick.failed', 'Failed to set Git Bash path'))
|
||||
}
|
||||
}, [checkGitBash, t])
|
||||
|
||||
const handleClearGitBash = useCallback(async () => {
|
||||
const handleResetGitBash = useCallback(async () => {
|
||||
try {
|
||||
// Clear manual setting and re-run auto-discovery
|
||||
await window.api.system.setGitBashPath(null)
|
||||
setCustomGitBashPath('')
|
||||
await checkGitBash(true)
|
||||
await checkGitBash()
|
||||
} catch (error) {
|
||||
logger.error('Failed to clear Git Bash path', error as Error)
|
||||
window.toast.error(t('agent.gitBash.pick.failed', 'Failed to set Git Bash path'))
|
||||
logger.error('Failed to reset Git Bash path', error as Error)
|
||||
}
|
||||
}, [checkGitBash, t])
|
||||
}, [checkGitBash])
|
||||
|
||||
const onPermissionModeChange = useCallback((value: PermissionMode) => {
|
||||
setForm((prev) => {
|
||||
@ -268,6 +253,12 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
||||
return
|
||||
}
|
||||
|
||||
if (isWin && !gitBashPathInfo.path) {
|
||||
window.toast.error(t('agent.gitBash.error.required', 'Git Bash path is required on Windows'))
|
||||
loadingRef.current = false
|
||||
return
|
||||
}
|
||||
|
||||
if (isEditing(agent)) {
|
||||
if (!agent) {
|
||||
loadingRef.current = false
|
||||
@ -327,7 +318,8 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
||||
t,
|
||||
updateAgent,
|
||||
afterSubmit,
|
||||
addAgent
|
||||
addAgent,
|
||||
gitBashPathInfo.path
|
||||
]
|
||||
)
|
||||
|
||||
@ -346,66 +338,6 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
||||
footer={null}>
|
||||
<StyledForm onSubmit={onSubmit}>
|
||||
<FormContent>
|
||||
{!hasGitBash && (
|
||||
<Alert
|
||||
message={t('agent.gitBash.error.title', 'Git Bash Required')}
|
||||
description={
|
||||
<div>
|
||||
<div style={{ marginBottom: 8 }}>
|
||||
{t(
|
||||
'agent.gitBash.error.description',
|
||||
'Git Bash is required to run agents on Windows. The agent cannot function without it. Please install Git for Windows from'
|
||||
)}{' '}
|
||||
<a
|
||||
href="https://git-scm.com/download/win"
|
||||
onClick={(e) => {
|
||||
e.preventDefault()
|
||||
window.api.openWebsite('https://git-scm.com/download/win')
|
||||
}}
|
||||
style={{ textDecoration: 'underline' }}>
|
||||
git-scm.com
|
||||
</a>
|
||||
</div>
|
||||
<Button size="small" onClick={() => checkGitBash(true)}>
|
||||
{t('agent.gitBash.error.recheck', 'Recheck Git Bash Installation')}
|
||||
</Button>
|
||||
<Button size="small" style={{ marginLeft: 8 }} onClick={handlePickGitBash}>
|
||||
{t('agent.gitBash.pick.button', 'Select Git Bash Path')}
|
||||
</Button>
|
||||
</div>
|
||||
}
|
||||
type="error"
|
||||
showIcon
|
||||
style={{ marginBottom: 16 }}
|
||||
/>
|
||||
)}
|
||||
|
||||
{hasGitBash && customGitBashPath && (
|
||||
<Alert
|
||||
message={t('agent.gitBash.found.title', 'Git Bash configured')}
|
||||
description={
|
||||
<div style={{ display: 'flex', flexDirection: 'column', gap: 8 }}>
|
||||
<div>
|
||||
{t('agent.gitBash.customPath', {
|
||||
defaultValue: 'Using custom path: {{path}}',
|
||||
path: customGitBashPath
|
||||
})}
|
||||
</div>
|
||||
<div style={{ display: 'flex', gap: 8 }}>
|
||||
<Button size="small" onClick={handlePickGitBash}>
|
||||
{t('agent.gitBash.pick.button', 'Select Git Bash Path')}
|
||||
</Button>
|
||||
<Button size="small" onClick={handleClearGitBash}>
|
||||
{t('agent.gitBash.clear.button', 'Clear custom path')}
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
}
|
||||
type="success"
|
||||
showIcon
|
||||
style={{ marginBottom: 16 }}
|
||||
/>
|
||||
)}
|
||||
<FormRow>
|
||||
<FormItem style={{ flex: 1 }}>
|
||||
<Label>
|
||||
@ -439,6 +371,40 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
||||
/>
|
||||
</FormItem>
|
||||
|
||||
{isWin && (
|
||||
<FormItem>
|
||||
<div className="flex items-center gap-2">
|
||||
<Label>
|
||||
Git Bash <RequiredMark>*</RequiredMark>
|
||||
</Label>
|
||||
<HelpTooltip
|
||||
title={t(
|
||||
'agent.gitBash.tooltip',
|
||||
'Git Bash is required to run agents on Windows. Install from git-scm.com if not available.'
|
||||
)}
|
||||
/>
|
||||
</div>
|
||||
<GitBashInputWrapper>
|
||||
<Input
|
||||
value={gitBashPathInfo.path ?? ''}
|
||||
readOnly
|
||||
placeholder={t('agent.gitBash.placeholder', 'Select bash.exe path')}
|
||||
/>
|
||||
<Button size="small" onClick={handlePickGitBash}>
|
||||
{t('common.select', 'Select')}
|
||||
</Button>
|
||||
{gitBashPathInfo.source === 'manual' && (
|
||||
<Button size="small" onClick={handleResetGitBash}>
|
||||
{t('common.reset', 'Reset')}
|
||||
</Button>
|
||||
)}
|
||||
</GitBashInputWrapper>
|
||||
{gitBashPathInfo.path && gitBashPathInfo.source === 'auto' && (
|
||||
<SourceHint>{t('agent.gitBash.autoDiscoveredHint', 'Auto-discovered')}</SourceHint>
|
||||
)}
|
||||
</FormItem>
|
||||
)}
|
||||
|
||||
<FormItem>
|
||||
<Label>
|
||||
{t('agent.settings.tooling.permissionMode.title', 'Permission mode')} <RequiredMark>*</RequiredMark>
|
||||
@ -511,7 +477,11 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
||||
|
||||
<FormFooter>
|
||||
<Button onClick={onCancel}>{t('common.close')}</Button>
|
||||
<Button type="primary" htmlType="submit" loading={loadingRef.current} disabled={!hasGitBash}>
|
||||
<Button
|
||||
type="primary"
|
||||
htmlType="submit"
|
||||
loading={loadingRef.current}
|
||||
disabled={isWin && !gitBashPathInfo.path}>
|
||||
{isEditing(agent) ? t('common.confirm') : t('common.add')}
|
||||
</Button>
|
||||
</FormFooter>
|
||||
@ -582,6 +552,21 @@ const FormItem = styled.div`
|
||||
gap: 8px;
|
||||
`
|
||||
|
||||
const GitBashInputWrapper = styled.div`
|
||||
display: flex;
|
||||
gap: 8px;
|
||||
align-items: center;
|
||||
|
||||
input {
|
||||
flex: 1;
|
||||
}
|
||||
`
|
||||
|
||||
const SourceHint = styled.span`
|
||||
font-size: 12px;
|
||||
color: var(--color-text-3);
|
||||
`
|
||||
|
||||
const Label = styled.label`
|
||||
font-size: 14px;
|
||||
color: var(--color-text-1);
|
||||
|
||||
@ -631,7 +631,7 @@ describe('Reasoning option configuration', () => {
|
||||
|
||||
it('restricts GPT-5 Pro reasoning to high effort only', () => {
|
||||
expect(MODEL_SUPPORTED_REASONING_EFFORT.gpt5pro).toEqual(['high'])
|
||||
expect(MODEL_SUPPORTED_OPTIONS.gpt5pro).toEqual(['high'])
|
||||
expect(MODEL_SUPPORTED_OPTIONS.gpt5pro).toEqual(['default', 'high'])
|
||||
})
|
||||
})
|
||||
|
||||
@ -733,6 +733,11 @@ describe('getThinkModelType - Comprehensive Coverage', () => {
|
||||
expect(getThinkModelType(createModel({ id: 'doubao-seed-1-6-lite-251015' }))).toBe('doubao_after_251015')
|
||||
})
|
||||
|
||||
it('should return doubao_after_251015 for Doubao-Seed-1.8 models', () => {
|
||||
expect(getThinkModelType(createModel({ id: 'doubao-seed-1-8-251215' }))).toBe('doubao_after_251015')
|
||||
expect(getThinkModelType(createModel({ id: 'doubao-seed-1.8' }))).toBe('doubao_after_251015')
|
||||
})
|
||||
|
||||
it('should return doubao_no_auto for other Doubao thinking models', () => {
|
||||
expect(getThinkModelType(createModel({ id: 'doubao-1.5-thinking-vision-pro' }))).toBe('doubao_no_auto')
|
||||
})
|
||||
@ -863,6 +868,7 @@ describe('getThinkModelType - Comprehensive Coverage', () => {
|
||||
// auto > after_251015 > no_auto
|
||||
expect(getThinkModelType(createModel({ id: 'doubao-seed-1.6' }))).toBe('doubao')
|
||||
expect(getThinkModelType(createModel({ id: 'doubao-seed-1-6-251015' }))).toBe('doubao_after_251015')
|
||||
expect(getThinkModelType(createModel({ id: 'doubao-seed-1-8-251215' }))).toBe('doubao_after_251015')
|
||||
expect(getThinkModelType(createModel({ id: 'doubao-1.5-thinking-vision-pro' }))).toBe('doubao_no_auto')
|
||||
})
|
||||
|
||||
@ -1672,10 +1678,26 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
|
||||
describe('OpenAI models', () => {
|
||||
it('should return correct options for o-series models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'o3' }))).toEqual(['low', 'medium', 'high'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'o3-mini' }))).toEqual(['low', 'medium', 'high'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'o4' }))).toEqual(['low', 'medium', 'high'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'o3' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'o3-mini' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'o4' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-oss-reasoning' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
@ -1685,17 +1707,22 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
it('should return correct options for deep research models', () => {
|
||||
// Note: Deep research models need to be actual OpenAI reasoning models to be detected
|
||||
// 'sonar-deep-research' from Perplexity is the primary deep research model
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'sonar-deep-research' }))).toEqual(['medium'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'sonar-deep-research' }))).toEqual([
|
||||
'default',
|
||||
'medium'
|
||||
])
|
||||
})
|
||||
|
||||
it('should return correct options for GPT-5 models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5' }))).toEqual([
|
||||
'default',
|
||||
'minimal',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-preview' }))).toEqual([
|
||||
'default',
|
||||
'minimal',
|
||||
'low',
|
||||
'medium',
|
||||
@ -1704,17 +1731,22 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
})
|
||||
|
||||
it('should return correct options for GPT-5 Pro models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-pro' }))).toEqual(['high'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-pro-preview' }))).toEqual(['high'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-pro' }))).toEqual(['default', 'high'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-pro-preview' }))).toEqual([
|
||||
'default',
|
||||
'high'
|
||||
])
|
||||
})
|
||||
|
||||
it('should return correct options for GPT-5 Codex models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-codex' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-codex-mini' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
@ -1723,18 +1755,21 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
|
||||
it('should return correct options for GPT-5.1 models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5.1' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5.1-preview' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5.1-mini' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
@ -1744,11 +1779,13 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
|
||||
it('should return correct options for GPT-5.1 Codex models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5.1-codex' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5.1-codex-mini' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'medium',
|
||||
'high'
|
||||
@ -1758,19 +1795,24 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
|
||||
describe('Grok models', () => {
|
||||
it('should return correct options for Grok 3 mini', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'grok-3-mini' }))).toEqual(['low', 'high'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'grok-3-mini' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'high'
|
||||
])
|
||||
})
|
||||
|
||||
it('should return correct options for Grok 4 Fast', () => {
|
||||
expect(
|
||||
getModelSupportedReasoningEffortOptions(createModel({ id: 'grok-4-fast', provider: 'openrouter' }))
|
||||
).toEqual(['none', 'auto'])
|
||||
).toEqual(['default', 'none', 'auto'])
|
||||
})
|
||||
})
|
||||
|
||||
describe('Gemini models', () => {
|
||||
it('should return correct options for Gemini Flash models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-2.5-flash-latest' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
@ -1778,6 +1820,7 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
'auto'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-flash-latest' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
@ -1788,12 +1831,14 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
|
||||
it('should return correct options for Gemini Pro models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-2.5-pro-latest' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high',
|
||||
'auto'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-pro-latest' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high',
|
||||
@ -1803,11 +1848,13 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
|
||||
it('should return correct options for Gemini 3 models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-3-flash' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-3-pro-preview' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
@ -1818,24 +1865,28 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
describe('Qwen models', () => {
|
||||
it('should return correct options for controllable Qwen models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'qwen-plus' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'qwen-turbo' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'qwen-flash' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'qwen3-8b' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
@ -1853,11 +1904,13 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
describe('Doubao models', () => {
|
||||
it('should return correct options for auto-thinking Doubao models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'doubao-seed-1.6' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'auto',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'doubao-1-5-thinking-pro-m' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'auto',
|
||||
'high'
|
||||
@ -1866,12 +1919,14 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
|
||||
it('should return correct options for Doubao models after 251015', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'doubao-seed-1-6-251015' }))).toEqual([
|
||||
'default',
|
||||
'minimal',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'doubao-seed-1-6-lite-251015' }))).toEqual([
|
||||
'default',
|
||||
'minimal',
|
||||
'low',
|
||||
'medium',
|
||||
@ -1881,6 +1936,7 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
|
||||
it('should return correct options for other Doubao thinking models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'doubao-1.5-thinking-vision-pro' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'high'
|
||||
])
|
||||
@ -1889,28 +1945,43 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
|
||||
describe('Other providers', () => {
|
||||
it('should return correct options for Hunyuan models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'hunyuan-a13b' }))).toEqual(['none', 'auto'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'hunyuan-a13b' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'auto'
|
||||
])
|
||||
})
|
||||
|
||||
it('should return correct options for Zhipu models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'glm-4.5' }))).toEqual(['none', 'auto'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'glm-4.6' }))).toEqual(['none', 'auto'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'glm-4.5' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'auto'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'glm-4.6' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'auto'
|
||||
])
|
||||
})
|
||||
|
||||
it('should return correct options for Perplexity models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'sonar-deep-research' }))).toEqual(['medium'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'sonar-deep-research' }))).toEqual([
|
||||
'default',
|
||||
'medium'
|
||||
])
|
||||
})
|
||||
|
||||
it('should return correct options for DeepSeek hybrid models', () => {
|
||||
expect(
|
||||
getModelSupportedReasoningEffortOptions(createModel({ id: 'deepseek-v3.1', provider: 'deepseek' }))
|
||||
).toEqual(['none', 'auto'])
|
||||
).toEqual(['default', 'none', 'auto'])
|
||||
expect(
|
||||
getModelSupportedReasoningEffortOptions(createModel({ id: 'deepseek-v3.2', provider: 'openrouter' }))
|
||||
).toEqual(['none', 'auto'])
|
||||
).toEqual(['default', 'none', 'auto'])
|
||||
expect(
|
||||
getModelSupportedReasoningEffortOptions(createModel({ id: 'deepseek-chat', provider: 'deepseek' }))
|
||||
).toEqual(['none', 'auto'])
|
||||
).toEqual(['default', 'none', 'auto'])
|
||||
})
|
||||
})
|
||||
|
||||
@ -1925,7 +1996,7 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
provider: 'openrouter'
|
||||
})
|
||||
)
|
||||
).toEqual(['none', 'auto'])
|
||||
).toEqual(['default', 'none', 'auto'])
|
||||
|
||||
expect(
|
||||
getModelSupportedReasoningEffortOptions(
|
||||
@ -1934,7 +2005,7 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
name: 'gpt-5.1'
|
||||
})
|
||||
)
|
||||
).toEqual(['none', 'low', 'medium', 'high'])
|
||||
).toEqual(['default', 'none', 'low', 'medium', 'high'])
|
||||
|
||||
// Qwen models work well for name-based fallback
|
||||
expect(
|
||||
@ -1944,7 +2015,7 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
name: 'qwen-plus'
|
||||
})
|
||||
)
|
||||
).toEqual(['none', 'low', 'medium', 'high'])
|
||||
).toEqual(['default', 'none', 'low', 'medium', 'high'])
|
||||
})
|
||||
|
||||
it('should use id result when id matches', () => {
|
||||
@ -1955,7 +2026,7 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
name: 'Different Name'
|
||||
})
|
||||
)
|
||||
).toEqual(['none', 'low', 'medium', 'high'])
|
||||
).toEqual(['default', 'none', 'low', 'medium', 'high'])
|
||||
|
||||
expect(
|
||||
getModelSupportedReasoningEffortOptions(
|
||||
@ -1964,20 +2035,27 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
name: 'Some other name'
|
||||
})
|
||||
)
|
||||
).toEqual(['low', 'medium', 'high'])
|
||||
).toEqual(['default', 'low', 'medium', 'high'])
|
||||
})
|
||||
})
|
||||
|
||||
describe('Case sensitivity', () => {
|
||||
it('should handle case insensitive model IDs', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'GPT-5.1' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'O3-MINI' }))).toEqual(['low', 'medium', 'high'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'O3-MINI' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'Gemini-2.5-Flash-Latest' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
|
||||
@ -362,7 +362,7 @@ export const SYSTEM_MODELS: Record<SystemProviderId | 'defaultModel', Model[]> =
|
||||
{
|
||||
id: 'gemini-3-pro-image-preview',
|
||||
provider: 'gemini',
|
||||
name: 'Gemini 3 Pro Image Privew',
|
||||
name: 'Gemini 3 Pro Image Preview',
|
||||
group: 'Gemini 3'
|
||||
},
|
||||
{
|
||||
@ -746,6 +746,12 @@ export const SYSTEM_MODELS: Record<SystemProviderId | 'defaultModel', Model[]> =
|
||||
}
|
||||
],
|
||||
doubao: [
|
||||
{
|
||||
id: 'doubao-seed-1-8-251215',
|
||||
provider: 'doubao',
|
||||
name: 'Doubao-Seed-1.8',
|
||||
group: 'Doubao-Seed-1.8'
|
||||
},
|
||||
{
|
||||
id: 'doubao-1-5-vision-pro-32k-250115',
|
||||
provider: 'doubao',
|
||||
|
||||
@ -60,32 +60,32 @@ export const MODEL_SUPPORTED_REASONING_EFFORT = {
|
||||
|
||||
// 模型类型到支持选项的映射表
|
||||
export const MODEL_SUPPORTED_OPTIONS: ThinkingOptionConfig = {
|
||||
default: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.default] as const,
|
||||
o: MODEL_SUPPORTED_REASONING_EFFORT.o,
|
||||
openai_deep_research: MODEL_SUPPORTED_REASONING_EFFORT.openai_deep_research,
|
||||
gpt5: [...MODEL_SUPPORTED_REASONING_EFFORT.gpt5] as const,
|
||||
gpt5pro: MODEL_SUPPORTED_REASONING_EFFORT.gpt5pro,
|
||||
gpt5_codex: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_codex,
|
||||
gpt5_1: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1,
|
||||
gpt5_1_codex: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1_codex,
|
||||
gpt5_2: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_2,
|
||||
gpt5_1_codex_max: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1_codex_max,
|
||||
gpt52pro: MODEL_SUPPORTED_REASONING_EFFORT.gpt52pro,
|
||||
grok: MODEL_SUPPORTED_REASONING_EFFORT.grok,
|
||||
grok4_fast: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.grok4_fast] as const,
|
||||
gemini: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini] as const,
|
||||
gemini_pro: MODEL_SUPPORTED_REASONING_EFFORT.gemini_pro,
|
||||
gemini3: MODEL_SUPPORTED_REASONING_EFFORT.gemini3,
|
||||
qwen: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.qwen] as const,
|
||||
qwen_thinking: MODEL_SUPPORTED_REASONING_EFFORT.qwen_thinking,
|
||||
doubao: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao] as const,
|
||||
doubao_no_auto: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao_no_auto] as const,
|
||||
doubao_after_251015: MODEL_SUPPORTED_REASONING_EFFORT.doubao_after_251015,
|
||||
mimo: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.mimo] as const,
|
||||
hunyuan: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.hunyuan] as const,
|
||||
zhipu: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.zhipu] as const,
|
||||
perplexity: MODEL_SUPPORTED_REASONING_EFFORT.perplexity,
|
||||
deepseek_hybrid: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.deepseek_hybrid] as const
|
||||
default: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.default] as const,
|
||||
o: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.o] as const,
|
||||
openai_deep_research: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.openai_deep_research] as const,
|
||||
gpt5: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5] as const,
|
||||
gpt5pro: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5pro] as const,
|
||||
gpt5_codex: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5_codex] as const,
|
||||
gpt5_1: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1] as const,
|
||||
gpt5_1_codex: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1_codex] as const,
|
||||
gpt5_2: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5_2] as const,
|
||||
gpt5_1_codex_max: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1_codex_max] as const,
|
||||
gpt52pro: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt52pro] as const,
|
||||
grok: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.grok] as const,
|
||||
grok4_fast: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.grok4_fast] as const,
|
||||
gemini: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini] as const,
|
||||
gemini_pro: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini_pro] as const,
|
||||
gemini3: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini3] as const,
|
||||
qwen: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.qwen] as const,
|
||||
qwen_thinking: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.qwen_thinking] as const,
|
||||
doubao: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao] as const,
|
||||
doubao_no_auto: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao_no_auto] as const,
|
||||
doubao_after_251015: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao_after_251015] as const,
|
||||
mimo: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.mimo] as const,
|
||||
hunyuan: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.hunyuan] as const,
|
||||
zhipu: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.zhipu] as const,
|
||||
perplexity: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.perplexity] as const,
|
||||
deepseek_hybrid: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.deepseek_hybrid] as const
|
||||
} as const
|
||||
|
||||
const withModelIdAndNameAsId = <T>(model: Model, fn: (model: Model) => T): { idResult: T; nameResult: T } => {
|
||||
@ -148,7 +148,7 @@ const _getThinkModelType = (model: Model): ThinkingModelType => {
|
||||
} else if (isSupportedThinkingTokenDoubaoModel(model)) {
|
||||
if (isDoubaoThinkingAutoModel(model)) {
|
||||
thinkingModelType = 'doubao'
|
||||
} else if (isDoubaoSeedAfter251015(model)) {
|
||||
} else if (isDoubaoSeedAfter251015(model) || isDoubaoSeed18Model(model)) {
|
||||
thinkingModelType = 'doubao_after_251015'
|
||||
} else {
|
||||
thinkingModelType = 'doubao_no_auto'
|
||||
@ -194,20 +194,28 @@ const _getModelSupportedReasoningEffortOptions = (model: Model): ReasoningEffort
|
||||
* - The model is null/undefined
|
||||
* - The model doesn't support reasoning effort or thinking tokens
|
||||
*
|
||||
* All reasoning models support the 'default' option (always the first element),
|
||||
* which represents no additional configuration for thinking behavior.
|
||||
*
|
||||
* @example
|
||||
* // OpenAI o-series models support low, medium, high
|
||||
* // OpenAI o-series models support default, low, medium, high
|
||||
* getModelSupportedReasoningEffortOptions({ id: 'o3-mini', ... })
|
||||
* // Returns: ['low', 'medium', 'high']
|
||||
* // Returns: ['default', 'low', 'medium', 'high']
|
||||
* // 'default' = no additional configuration for thinking behavior
|
||||
*
|
||||
* @example
|
||||
* // GPT-5.1 models support none, low, medium, high
|
||||
* // GPT-5.1 models support default, none, low, medium, high
|
||||
* getModelSupportedReasoningEffortOptions({ id: 'gpt-5.1', ... })
|
||||
* // Returns: ['none', 'low', 'medium', 'high']
|
||||
* // Returns: ['default', 'none', 'low', 'medium', 'high']
|
||||
* // 'default' = no additional configuration
|
||||
* // 'none' = explicitly disable reasoning
|
||||
*
|
||||
* @example
|
||||
* // Gemini Flash models support none, low, medium, high, auto
|
||||
* // Gemini Flash models support default, none, low, medium, high, auto
|
||||
* getModelSupportedReasoningEffortOptions({ id: 'gemini-2.5-flash-latest', ... })
|
||||
* // Returns: ['none', 'low', 'medium', 'high', 'auto']
|
||||
* // Returns: ['default', 'none', 'low', 'medium', 'high', 'auto']
|
||||
* // 'default' = no additional configuration
|
||||
* // 'auto' = let the model automatically decide
|
||||
*
|
||||
* @example
|
||||
* // Non-reasoning models return undefined
|
||||
@ -217,7 +225,7 @@ const _getModelSupportedReasoningEffortOptions = (model: Model): ReasoningEffort
|
||||
* @example
|
||||
* // Name fallback when id doesn't match
|
||||
* getModelSupportedReasoningEffortOptions({ id: 'custom-id', name: 'gpt-5.1', ... })
|
||||
* // Returns: ['none', 'low', 'medium', 'high']
|
||||
* // Returns: ['default', 'none', 'low', 'medium', 'high']
|
||||
*/
|
||||
export const getModelSupportedReasoningEffortOptions = (
|
||||
model: Model | undefined | null
|
||||
@ -453,7 +461,7 @@ export function isQwenAlwaysThinkModel(model?: Model): boolean {
|
||||
|
||||
// Doubao 支持思考模式的模型正则
|
||||
export const DOUBAO_THINKING_MODEL_REGEX =
|
||||
/doubao-(?:1[.-]5-thinking-vision-pro|1[.-]5-thinking-pro-m|seed-1[.-]6(?:-flash)?(?!-(?:thinking)(?:-|$))|seed-code(?:-preview)?(?:-\d+)?)(?:-[\w-]+)*/i
|
||||
/doubao-(?:1[.-]5-thinking-vision-pro|1[.-]5-thinking-pro-m|seed-1[.-][68](?:-flash)?(?!-(?:thinking)(?:-|$))|seed-code(?:-preview)?(?:-\d+)?)(?:-[\w-]+)*/i
|
||||
|
||||
// 支持 auto 的 Doubao 模型 doubao-seed-1.6-xxx doubao-seed-1-6-xxx doubao-1-5-thinking-pro-m-xxx
|
||||
// Auto thinking is no longer supported after version 251015, see https://console.volcengine.com/ark/region:ark+cn-beijing/model/detail?Id=doubao-seed-1-6
|
||||
@ -471,6 +479,11 @@ export function isDoubaoSeedAfter251015(model: Model): boolean {
|
||||
return result
|
||||
}
|
||||
|
||||
export function isDoubaoSeed18Model(model: Model): boolean {
|
||||
const pattern = /doubao-seed-1[.-]8(?:-[\w-]+)?/i
|
||||
return pattern.test(model.id) || pattern.test(model.name)
|
||||
}
|
||||
|
||||
export function isSupportedThinkingTokenDoubaoModel(model?: Model): boolean {
|
||||
if (!model) {
|
||||
return false
|
||||
|
||||
@ -25,7 +25,7 @@ export const FUNCTION_CALLING_MODELS = [
|
||||
'learnlm(?:-[\\w-]+)?',
|
||||
'gemini(?:-[\\w-]+)?', // 提前排除了gemini的嵌入模型
|
||||
'grok-3(?:-[\\w-]+)?',
|
||||
'doubao-seed-1[.-]6(?:-[\\w-]+)?',
|
||||
'doubao-seed-1[.-][68](?:-[\\w-]+)?',
|
||||
'doubao-seed-code(?:-[\\w-]+)?',
|
||||
'kimi-k2(?:-[\\w-]+)?',
|
||||
'ling-\\w+(?:-[\\w-]+)?',
|
||||
|
||||
@ -45,7 +45,7 @@ const visionAllowedModels = [
|
||||
'deepseek-vl(?:[\\w-]+)?',
|
||||
'kimi-latest',
|
||||
'gemma-3(?:-[\\w-]+)',
|
||||
'doubao-seed-1[.-]6(?:-[\\w-]+)?',
|
||||
'doubao-seed-1[.-][68](?:-[\\w-]+)?',
|
||||
'doubao-seed-code(?:-[\\w-]+)?',
|
||||
'kimi-thinking-preview',
|
||||
`gemma3(?:[-:\\w]+)?`,
|
||||
|
||||
@ -5,7 +5,7 @@
|
||||
*/
|
||||
|
||||
import { loggerService } from '@logger'
|
||||
import type { AgentType, BuiltinMCPServerName, BuiltinOcrProviderId, ThinkingOption } from '@renderer/types'
|
||||
import type { AgentType, BuiltinMCPServerName, BuiltinOcrProviderId } from '@renderer/types'
|
||||
import { BuiltinMCPServerNames } from '@renderer/types'
|
||||
|
||||
import i18n from './index'
|
||||
@ -311,20 +311,6 @@ export const getHttpMessageLabel = (key: string): string => {
|
||||
return getLabel(httpMessageKeyMap, key)
|
||||
}
|
||||
|
||||
const reasoningEffortOptionsKeyMap: Record<ThinkingOption, string> = {
|
||||
none: 'assistants.settings.reasoning_effort.off',
|
||||
minimal: 'assistants.settings.reasoning_effort.minimal',
|
||||
high: 'assistants.settings.reasoning_effort.high',
|
||||
low: 'assistants.settings.reasoning_effort.low',
|
||||
medium: 'assistants.settings.reasoning_effort.medium',
|
||||
auto: 'assistants.settings.reasoning_effort.default',
|
||||
xhigh: 'assistants.settings.reasoning_effort.xhigh'
|
||||
} as const
|
||||
|
||||
export const getReasoningEffortOptionsLabel = (key: string): string => {
|
||||
return getLabel(reasoningEffortOptionsKeyMap, key)
|
||||
}
|
||||
|
||||
const fileFieldKeyMap = {
|
||||
created_at: 'files.created_at',
|
||||
size: 'files.size',
|
||||
@ -345,7 +331,8 @@ const builtInMcpDescriptionKeyMap: Record<BuiltinMCPServerName, string> = {
|
||||
[BuiltinMCPServerNames.difyKnowledge]: 'settings.mcp.builtinServersDescriptions.dify_knowledge',
|
||||
[BuiltinMCPServerNames.python]: 'settings.mcp.builtinServersDescriptions.python',
|
||||
[BuiltinMCPServerNames.didiMCP]: 'settings.mcp.builtinServersDescriptions.didi_mcp',
|
||||
[BuiltinMCPServerNames.browser]: 'settings.mcp.builtinServersDescriptions.browser'
|
||||
[BuiltinMCPServerNames.browser]: 'settings.mcp.builtinServersDescriptions.browser',
|
||||
[BuiltinMCPServerNames.nowledgeMem]: 'settings.mcp.builtinServersDescriptions.nowledge_mem'
|
||||
} as const
|
||||
|
||||
export const getBuiltInMcpServerDescriptionLabel = (key: string): string => {
|
||||
|
||||
@ -32,6 +32,7 @@
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "Using auto-detected Git Bash",
|
||||
"autoDiscoveredHint": "Auto-discovered",
|
||||
"clear": {
|
||||
"button": "Clear custom path"
|
||||
},
|
||||
@ -39,6 +40,7 @@
|
||||
"error": {
|
||||
"description": "Git Bash is required to run agents on Windows. The agent cannot function without it. Please install Git for Windows from",
|
||||
"recheck": "Recheck Git Bash Installation",
|
||||
"required": "Git Bash path is required on Windows",
|
||||
"title": "Git Bash Required"
|
||||
},
|
||||
"found": {
|
||||
@ -51,7 +53,9 @@
|
||||
"invalidPath": "Selected file is not a valid Git Bash executable (bash.exe).",
|
||||
"title": "Select Git Bash executable"
|
||||
},
|
||||
"success": "Git Bash detected successfully!"
|
||||
"placeholder": "Select bash.exe path",
|
||||
"success": "Git Bash detected successfully!",
|
||||
"tooltip": "Git Bash is required to run agents on Windows. Install from git-scm.com if not available."
|
||||
},
|
||||
"input": {
|
||||
"placeholder": "Enter your message here, send with {{key}} - @ select path, / select command"
|
||||
@ -472,6 +476,7 @@
|
||||
"button": "Import",
|
||||
"error": {
|
||||
"fetch_failed": "Failed to fetch from URL",
|
||||
"file_required": "Please select a file first",
|
||||
"invalid_format": "Invalid assistant format: missing required fields",
|
||||
"url_required": "Please enter a URL"
|
||||
},
|
||||
@ -486,11 +491,14 @@
|
||||
},
|
||||
"manage": {
|
||||
"batch_delete": {
|
||||
"button": "Batch Delete",
|
||||
"button": "Delete",
|
||||
"confirm": "Are you sure you want to delete the selected {{count}} assistants?"
|
||||
},
|
||||
"batch_export": {
|
||||
"button": "Export"
|
||||
},
|
||||
"mode": {
|
||||
"delete": "Delete",
|
||||
"manage": "Manage",
|
||||
"sort": "Sort"
|
||||
},
|
||||
"title": "Manage Assistants"
|
||||
@ -540,14 +548,23 @@
|
||||
"more": "Assistant Settings",
|
||||
"prompt": "Prompt Settings",
|
||||
"reasoning_effort": {
|
||||
"auto": "Auto",
|
||||
"auto_description": "Flexibly determine reasoning effort",
|
||||
"default": "Default",
|
||||
"default_description": "Depend on the model's default behavior, without any configuration.",
|
||||
"high": "High",
|
||||
"high_description": "High level reasoning",
|
||||
"label": "Reasoning effort",
|
||||
"low": "Low",
|
||||
"low_description": "Low level reasoning",
|
||||
"medium": "Medium",
|
||||
"medium_description": "Medium level reasoning",
|
||||
"minimal": "Minimal",
|
||||
"minimal_description": "Minimal reasoning",
|
||||
"off": "Off",
|
||||
"xhigh": "Extra High"
|
||||
"off_description": "Disable reasoning",
|
||||
"xhigh": "Extra High",
|
||||
"xhigh_description": "Extra high level reasoning"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "Add Phrase",
|
||||
@ -1248,11 +1265,13 @@
|
||||
}
|
||||
},
|
||||
"stop": "Stop",
|
||||
"subscribe": "Subscribe",
|
||||
"success": "Success",
|
||||
"swap": "Swap",
|
||||
"topics": "Topics",
|
||||
"unknown": "Unknown",
|
||||
"unnamed": "Unnamed",
|
||||
"unsubscribe": "Unsubscribe",
|
||||
"update_success": "Update successfully",
|
||||
"upload_files": "Upload file",
|
||||
"warning": "Warning",
|
||||
@ -1747,7 +1766,7 @@
|
||||
"import": {
|
||||
"error": "Import failed"
|
||||
},
|
||||
"imported": "Imported successfully"
|
||||
"imported": "Successfully imported {{count}} assistant(s)"
|
||||
},
|
||||
"api": {
|
||||
"check": {
|
||||
@ -3921,6 +3940,7 @@
|
||||
"mcp_auto_install": "Automatically install MCP service (beta)",
|
||||
"memory": "Persistent memory implementation based on a local knowledge graph. This enables the model to remember user-related information across different conversations. Requires configuring the MEMORY_FILE_PATH environment variable.",
|
||||
"no": "No description",
|
||||
"nowledge_mem": "Requires Nowledge Mem app running locally. Keeps AI chats, tools, notes, agents, and files in private memory on your computer. Download from https://mem.nowledge.co/",
|
||||
"python": "Execute Python code in a secure sandbox environment. Run Python with Pyodide, supporting most standard libraries and scientific computing packages",
|
||||
"sequentialthinking": "A MCP server implementation that provides tools for dynamic and reflective problem solving through structured thinking processes"
|
||||
},
|
||||
|
||||
@ -32,6 +32,7 @@
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "使用自动检测的 Git Bash",
|
||||
"autoDiscoveredHint": "自动发现",
|
||||
"clear": {
|
||||
"button": "清除自定义路径"
|
||||
},
|
||||
@ -39,6 +40,7 @@
|
||||
"error": {
|
||||
"description": "在 Windows 上运行智能体需要 Git Bash。没有它智能体无法运行。请从以下地址安装 Git for Windows",
|
||||
"recheck": "重新检测 Git Bash 安装",
|
||||
"required": "在 Windows 上需要配置 Git Bash 路径",
|
||||
"title": "需要 Git Bash"
|
||||
},
|
||||
"found": {
|
||||
@ -51,7 +53,9 @@
|
||||
"invalidPath": "选择的文件不是有效的 Git Bash 可执行文件(bash.exe)。",
|
||||
"title": "选择 Git Bash 可执行文件"
|
||||
},
|
||||
"success": "成功检测到 Git Bash!"
|
||||
"placeholder": "选择 bash.exe 路径",
|
||||
"success": "成功检测到 Git Bash!",
|
||||
"tooltip": "在 Windows 上运行智能体需要 Git Bash。如果未安装,请从 git-scm.com 下载安装。"
|
||||
},
|
||||
"input": {
|
||||
"placeholder": "在这里输入消息,按 {{key}} 发送 - @ 选择路径, / 选择命令"
|
||||
@ -472,6 +476,7 @@
|
||||
"button": "导入",
|
||||
"error": {
|
||||
"fetch_failed": "从 URL 获取数据失败",
|
||||
"file_required": "请先选择文件",
|
||||
"invalid_format": "无效的助手格式:缺少必填字段",
|
||||
"url_required": "请输入 URL"
|
||||
},
|
||||
@ -486,11 +491,14 @@
|
||||
},
|
||||
"manage": {
|
||||
"batch_delete": {
|
||||
"button": "批量删除",
|
||||
"button": "删除",
|
||||
"confirm": "确定要删除选中的 {{count}} 个助手吗?"
|
||||
},
|
||||
"batch_export": {
|
||||
"button": "导出"
|
||||
},
|
||||
"mode": {
|
||||
"delete": "删除",
|
||||
"manage": "管理",
|
||||
"sort": "排序"
|
||||
},
|
||||
"title": "管理助手"
|
||||
@ -540,14 +548,23 @@
|
||||
"more": "助手设置",
|
||||
"prompt": "提示词设置",
|
||||
"reasoning_effort": {
|
||||
"auto": "自动",
|
||||
"auto_description": "灵活决定推理力度",
|
||||
"default": "默认",
|
||||
"default_description": "依赖模型默认行为,不作任何配置",
|
||||
"high": "沉思",
|
||||
"high_description": "高强度推理",
|
||||
"label": "思维链长度",
|
||||
"low": "浮想",
|
||||
"low_description": "低强度推理",
|
||||
"medium": "斟酌",
|
||||
"medium_description": "中强度推理",
|
||||
"minimal": "微念",
|
||||
"minimal_description": "最小程度的思考",
|
||||
"off": "关闭",
|
||||
"xhigh": "穷究"
|
||||
"off_description": "禁用推理",
|
||||
"xhigh": "穷究",
|
||||
"xhigh_description": "超高强度推理"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "添加短语",
|
||||
@ -1248,11 +1265,13 @@
|
||||
}
|
||||
},
|
||||
"stop": "停止",
|
||||
"subscribe": "订阅",
|
||||
"success": "成功",
|
||||
"swap": "交换",
|
||||
"topics": "话题",
|
||||
"unknown": "未知",
|
||||
"unnamed": "未命名",
|
||||
"unsubscribe": "退订",
|
||||
"update_success": "更新成功",
|
||||
"upload_files": "上传文件",
|
||||
"warning": "警告",
|
||||
@ -1747,7 +1766,7 @@
|
||||
"import": {
|
||||
"error": "导入失败"
|
||||
},
|
||||
"imported": "导入成功"
|
||||
"imported": "成功导入 {{count}} 个助手"
|
||||
},
|
||||
"api": {
|
||||
"check": {
|
||||
@ -3921,6 +3940,7 @@
|
||||
"mcp_auto_install": "自动安装 MCP 服务(测试版)",
|
||||
"memory": "基于本地知识图谱的持久性记忆基础实现。这使得模型能够在不同对话间记住用户的相关信息。需要配置 MEMORY_FILE_PATH 环境变量。",
|
||||
"no": "无描述",
|
||||
"nowledge_mem": "需要本地运行 Nowledge Mem 应用。将 AI 对话、工具、笔记、智能体和文件保存在本地计算机的私有记忆中。请从 https://mem.nowledge.co/ 下载",
|
||||
"python": "在安全的沙盒环境中执行 Python 代码。使用 Pyodide 运行 Python,支持大多数标准库和科学计算包",
|
||||
"sequentialthinking": "一个 MCP 服务器实现,提供了通过结构化思维过程进行动态和反思性问题解决的工具"
|
||||
},
|
||||
|
||||
@ -32,6 +32,7 @@
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "使用自動偵測的 Git Bash",
|
||||
"autoDiscoveredHint": "自動發現",
|
||||
"clear": {
|
||||
"button": "清除自訂路徑"
|
||||
},
|
||||
@ -39,6 +40,7 @@
|
||||
"error": {
|
||||
"description": "在 Windows 上執行 Agent 需要 Git Bash。沒有它 Agent 無法運作。請從以下網址安裝 Git for Windows",
|
||||
"recheck": "重新偵測 Git Bash 安裝",
|
||||
"required": "在 Windows 上需要設定 Git Bash 路徑",
|
||||
"title": "需要 Git Bash"
|
||||
},
|
||||
"found": {
|
||||
@ -51,7 +53,9 @@
|
||||
"invalidPath": "選擇的檔案不是有效的 Git Bash 可執行檔(bash.exe)。",
|
||||
"title": "選擇 Git Bash 可執行檔"
|
||||
},
|
||||
"success": "成功偵測到 Git Bash!"
|
||||
"placeholder": "選擇 bash.exe 路徑",
|
||||
"success": "成功偵測到 Git Bash!",
|
||||
"tooltip": "在 Windows 上執行 Agent 需要 Git Bash。如未安裝,請從 git-scm.com 下載安裝。"
|
||||
},
|
||||
"input": {
|
||||
"placeholder": "在這裡輸入您的訊息,使用 {{key}} 傳送 - @ 選擇路徑,/ 選擇命令"
|
||||
@ -472,6 +476,7 @@
|
||||
"button": "匯入",
|
||||
"error": {
|
||||
"fetch_failed": "從 URL 取得資料失敗",
|
||||
"file_required": "請先選擇一個檔案",
|
||||
"invalid_format": "無效的助手格式:缺少必填欄位",
|
||||
"url_required": "請輸入 URL"
|
||||
},
|
||||
@ -489,8 +494,11 @@
|
||||
"button": "批次刪除",
|
||||
"confirm": "確定要刪除所選的 {{count}} 個助手嗎?"
|
||||
},
|
||||
"batch_export": {
|
||||
"button": "匯出"
|
||||
},
|
||||
"mode": {
|
||||
"delete": "刪除",
|
||||
"manage": "管理",
|
||||
"sort": "排序"
|
||||
},
|
||||
"title": "管理助手"
|
||||
@ -540,14 +548,23 @@
|
||||
"more": "助手設定",
|
||||
"prompt": "提示詞設定",
|
||||
"reasoning_effort": {
|
||||
"auto": "自動",
|
||||
"auto_description": "彈性決定推理投入的心力",
|
||||
"default": "預設",
|
||||
"default_description": "依賴模型的預設行為,無需任何配置。",
|
||||
"high": "盡力思考",
|
||||
"high_description": "高級推理",
|
||||
"label": "思維鏈長度",
|
||||
"low": "稍微思考",
|
||||
"low_description": "低階推理",
|
||||
"medium": "正常思考",
|
||||
"medium_description": "中等程度推理",
|
||||
"minimal": "最少思考",
|
||||
"minimal_description": "最少推理",
|
||||
"off": "關閉",
|
||||
"xhigh": "極力思考"
|
||||
"off_description": "禁用推理",
|
||||
"xhigh": "極力思考",
|
||||
"xhigh_description": "超高階推理"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "新增短語",
|
||||
@ -1248,11 +1265,13 @@
|
||||
}
|
||||
},
|
||||
"stop": "停止",
|
||||
"subscribe": "訂閱",
|
||||
"success": "成功",
|
||||
"swap": "交換",
|
||||
"topics": "話題",
|
||||
"unknown": "未知",
|
||||
"unnamed": "未命名",
|
||||
"unsubscribe": "取消訂閱",
|
||||
"update_success": "更新成功",
|
||||
"upload_files": "上傳檔案",
|
||||
"warning": "警告",
|
||||
@ -3921,6 +3940,7 @@
|
||||
"mcp_auto_install": "自動安裝 MCP 服務(測試版)",
|
||||
"memory": "基於本機知識圖譜的持久性記憶基礎實做。這使得模型能夠在不同對話間記住使用者的相關資訊。需要設定 MEMORY_FILE_PATH 環境變數。",
|
||||
"no": "無描述",
|
||||
"nowledge_mem": "需要本機執行 Nowledge Mem 應用程式。將 AI 對話、工具、筆記、代理和檔案保存在電腦上的私人記憶體中。請從 https://mem.nowledge.co/ 下載",
|
||||
"python": "在安全的沙盒環境中執行 Python 程式碼。使用 Pyodide 執行 Python,支援大多數標準函式庫和科學計算套件",
|
||||
"sequentialthinking": "一個 MCP 伺服器實做,提供了透過結構化思維過程進行動態和反思性問題解決的工具"
|
||||
},
|
||||
|
||||
@ -32,6 +32,7 @@
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "Automatisch ermitteltes Git Bash wird verwendet",
|
||||
"autoDiscoveredHint": "[to be translated]:Auto-discovered",
|
||||
"clear": {
|
||||
"button": "Benutzerdefinierten Pfad löschen"
|
||||
},
|
||||
@ -39,6 +40,7 @@
|
||||
"error": {
|
||||
"description": "Git Bash ist erforderlich, um Agents unter Windows auszuführen. Der Agent kann ohne es nicht funktionieren. Bitte installieren Sie Git für Windows von",
|
||||
"recheck": "Überprüfe die Git Bash-Installation erneut",
|
||||
"required": "[to be translated]:Git Bash path is required on Windows",
|
||||
"title": "Git Bash erforderlich"
|
||||
},
|
||||
"found": {
|
||||
@ -51,7 +53,9 @@
|
||||
"invalidPath": "Die ausgewählte Datei ist keine gültige Git Bash ausführbare Datei (bash.exe).",
|
||||
"title": "Git Bash ausführbare Datei auswählen"
|
||||
},
|
||||
"success": "Git Bash erfolgreich erkannt!"
|
||||
"placeholder": "[to be translated]:Select bash.exe path",
|
||||
"success": "Git Bash erfolgreich erkannt!",
|
||||
"tooltip": "[to be translated]:Git Bash is required to run agents on Windows. Install from git-scm.com if not available."
|
||||
},
|
||||
"input": {
|
||||
"placeholder": "Gib hier deine Nachricht ein, senden mit {{key}} – @ Pfad auswählen, / Befehl auswählen"
|
||||
@ -472,6 +476,7 @@
|
||||
"button": "Importieren",
|
||||
"error": {
|
||||
"fetch_failed": "Daten von URL abrufen fehlgeschlagen",
|
||||
"file_required": "Bitte wählen Sie zuerst eine Datei aus",
|
||||
"invalid_format": "Ungültiges Assistentenformat: Pflichtfelder fehlen",
|
||||
"url_required": "Bitte geben Sie eine URL ein"
|
||||
},
|
||||
@ -489,8 +494,11 @@
|
||||
"button": "Stapel löschen",
|
||||
"confirm": "Sind Sie sicher, dass Sie die ausgewählten {{count}} Assistenten löschen möchten?"
|
||||
},
|
||||
"batch_export": {
|
||||
"button": "Exportieren"
|
||||
},
|
||||
"mode": {
|
||||
"delete": "Löschen",
|
||||
"manage": "Verwalten",
|
||||
"sort": "Sortieren"
|
||||
},
|
||||
"title": "Assistenten verwalten"
|
||||
@ -540,14 +548,23 @@
|
||||
"more": "Assistenteneinstellungen",
|
||||
"prompt": "Prompt-Einstellungen",
|
||||
"reasoning_effort": {
|
||||
"auto": "Auto",
|
||||
"auto_description": "Denkaufwand flexibel bestimmen",
|
||||
"default": "Standard",
|
||||
"default_description": "Vom Standardverhalten des Modells abhängen, ohne Konfiguration.",
|
||||
"high": "Tiefes Nachdenken",
|
||||
"high_description": "Ganzheitliches Denken",
|
||||
"label": "Gedankenkettenlänge",
|
||||
"low": "Spontan",
|
||||
"low_description": "Geringfügige Argumentation",
|
||||
"medium": "Überlegt",
|
||||
"medium_description": "Denken auf mittlerem Niveau",
|
||||
"minimal": "Minimal",
|
||||
"minimal_description": "Minimales Denken",
|
||||
"off": "Aus",
|
||||
"xhigh": "Extra hoch"
|
||||
"off_description": "Denken deaktivieren",
|
||||
"xhigh": "Extra hoch",
|
||||
"xhigh_description": "Extra hohes Denkvermögen"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "Phrase hinzufügen",
|
||||
@ -1248,11 +1265,13 @@
|
||||
}
|
||||
},
|
||||
"stop": "Stoppen",
|
||||
"subscribe": "Abonnieren",
|
||||
"success": "Erfolgreich",
|
||||
"swap": "Tauschen",
|
||||
"topics": "Themen",
|
||||
"unknown": "Unbekannt",
|
||||
"unnamed": "Unbenannt",
|
||||
"unsubscribe": "Abmelden",
|
||||
"update_success": "Erfolgreich aktualisiert",
|
||||
"upload_files": "Dateien hochladen",
|
||||
"warning": "Warnung",
|
||||
@ -3921,6 +3940,7 @@
|
||||
"mcp_auto_install": "MCP-Service automatisch installieren (Beta-Version)",
|
||||
"memory": "MCP-Server mit persistenter Erinnerungsbasis auf lokalem Wissensgraphen, der Informationen über verschiedene Dialoge hinweg speichert. MEMORY_FILE_PATH-Umgebungsvariable muss konfiguriert werden",
|
||||
"no": "Keine Beschreibung",
|
||||
"nowledge_mem": "Erfordert lokal laufende Nowledge Mem App. Speichert KI-Chats, Tools, Notizen, Agenten und Dateien in einem privaten Speicher auf Ihrem Computer. Download unter https://mem.nowledge.co/",
|
||||
"python": "Python-Code in einem sicheren Sandbox-Umgebung ausführen. Verwendung von Pyodide für Python, Unterstützung für die meisten Standardbibliotheken und wissenschaftliche Pakete",
|
||||
"sequentialthinking": "MCP-Server-Implementierung mit strukturiertem Denkprozess, der dynamische und reflektierende Problemlösungen ermöglicht"
|
||||
},
|
||||
|
||||
@ -32,6 +32,7 @@
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "Χρησιμοποιείται αυτόματα εντοπισμένο Git Bash",
|
||||
"autoDiscoveredHint": "[to be translated]:Auto-discovered",
|
||||
"clear": {
|
||||
"button": "Διαγραφή προσαρμοσμένης διαδρομής"
|
||||
},
|
||||
@ -39,6 +40,7 @@
|
||||
"error": {
|
||||
"description": "Το Git Bash απαιτείται για την εκτέλεση πρακτόρων στα Windows. Ο πράκτορας δεν μπορεί να λειτουργήσει χωρίς αυτό. Παρακαλούμε εγκαταστήστε το Git για Windows από",
|
||||
"recheck": "Επανέλεγχος Εγκατάστασης του Git Bash",
|
||||
"required": "[to be translated]:Git Bash path is required on Windows",
|
||||
"title": "Απαιτείται Git Bash"
|
||||
},
|
||||
"found": {
|
||||
@ -51,7 +53,9 @@
|
||||
"invalidPath": "Το επιλεγμένο αρχείο δεν είναι έγκυρο εκτελέσιμο Git Bash (bash.exe).",
|
||||
"title": "Επιλογή εκτελέσιμου Git Bash"
|
||||
},
|
||||
"success": "Το Git Bash εντοπίστηκε με επιτυχία!"
|
||||
"placeholder": "[to be translated]:Select bash.exe path",
|
||||
"success": "Το Git Bash εντοπίστηκε με επιτυχία!",
|
||||
"tooltip": "[to be translated]:Git Bash is required to run agents on Windows. Install from git-scm.com if not available."
|
||||
},
|
||||
"input": {
|
||||
"placeholder": "Εισάγετε το μήνυμά σας εδώ, στείλτε με {{key}} - @ επιλέξτε διαδρομή, / επιλέξτε εντολή"
|
||||
@ -472,6 +476,7 @@
|
||||
"button": "Εισαγωγή",
|
||||
"error": {
|
||||
"fetch_failed": "Αποτυχία λήψης δεδομένων από το URL",
|
||||
"file_required": "Παρακαλώ επιλέξτε πρώτα ένα αρχείο",
|
||||
"invalid_format": "Μη έγκυρη μορφή βοηθού: λείπουν υποχρεωτικά πεδία",
|
||||
"url_required": "Παρακαλώ εισάγετε ένα URL"
|
||||
},
|
||||
@ -489,8 +494,11 @@
|
||||
"button": "Μαζική Διαγραφή",
|
||||
"confirm": "Είστε βέβαιοι ότι θέλετε να διαγράψετε τους επιλεγμένους {{count}} βοηθούς;"
|
||||
},
|
||||
"batch_export": {
|
||||
"button": "Εξαγωγή"
|
||||
},
|
||||
"mode": {
|
||||
"delete": "Διαγραφή",
|
||||
"manage": "Διαχειριστείτε",
|
||||
"sort": "Ταξινόμηση"
|
||||
},
|
||||
"title": "Διαχείριση βοηθών"
|
||||
@ -540,14 +548,23 @@
|
||||
"more": "Ρυθμίσεις Βοηθού",
|
||||
"prompt": "Ρυθμίσεις προκαλύμματος",
|
||||
"reasoning_effort": {
|
||||
"auto": "Αυτοκίνητο",
|
||||
"auto_description": "Ευέλικτος καθορισμός της προσπάθειας συλλογισμού",
|
||||
"default": "Προεπιλογή",
|
||||
"default_description": "Εξαρτηθείτε από την προεπιλεγμένη συμπεριφορά του μοντέλου, χωρίς καμία διαμόρφωση.",
|
||||
"high": "Μεγάλο",
|
||||
"high_description": "Υψηλού επιπέδου συλλογισμός",
|
||||
"label": "Μήκος λογισμικού αλυσίδας",
|
||||
"low": "Μικρό",
|
||||
"low_description": "Χαμηλού επιπέδου συλλογιστική",
|
||||
"medium": "Μεσαίο",
|
||||
"medium_description": "Αιτιολόγηση μεσαίου επιπέδου",
|
||||
"minimal": "ελάχιστος",
|
||||
"minimal_description": "Ελάχιστος συλλογισμός",
|
||||
"off": "Απενεργοποίηση",
|
||||
"xhigh": "Εξαιρετικά Υψηλή"
|
||||
"off_description": "Απενεργοποίηση λογικής",
|
||||
"xhigh": "Εξαιρετικά Υψηλή",
|
||||
"xhigh_description": "Εξαιρετικά υψηλού επιπέδου συλλογισμός"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "Προσθήκη φράσης",
|
||||
@ -1248,11 +1265,13 @@
|
||||
}
|
||||
},
|
||||
"stop": "σταματήστε",
|
||||
"subscribe": "Εγγραφείτε",
|
||||
"success": "Επιτυχία",
|
||||
"swap": "Εναλλαγή",
|
||||
"topics": "Θέματα",
|
||||
"unknown": "Άγνωστο",
|
||||
"unnamed": "Χωρίς όνομα",
|
||||
"unsubscribe": "Απεγγραφή",
|
||||
"update_success": "Επιτυχής ενημέρωση",
|
||||
"upload_files": "Ανέβασμα αρχείου",
|
||||
"warning": "Προσοχή",
|
||||
@ -3921,6 +3940,7 @@
|
||||
"mcp_auto_install": "Αυτόματη εγκατάσταση υπηρεσίας MCP (προβολή)",
|
||||
"memory": "Βασική υλοποίηση μόνιμης μνήμης με βάση τοπικό γράφημα γνώσης. Αυτό επιτρέπει στο μοντέλο να θυμάται πληροφορίες σχετικές με τον χρήστη ανάμεσα σε διαφορετικές συνομιλίες. Απαιτείται η ρύθμιση της μεταβλητής περιβάλλοντος MEMORY_FILE_PATH.",
|
||||
"no": "Χωρίς περιγραφή",
|
||||
"nowledge_mem": "[to be translated]:Requires Nowledge Mem app running locally. Keeps AI chats, tools, notes, agents, and files in private memory on your computer. Download from https://mem.nowledge.co/",
|
||||
"python": "Εκτελέστε κώδικα Python σε ένα ασφαλές περιβάλλον sandbox. Χρησιμοποιήστε το Pyodide για να εκτελέσετε Python, υποστηρίζοντας την πλειονότητα των βιβλιοθηκών της τυπικής βιβλιοθήκης και των πακέτων επιστημονικού υπολογισμού",
|
||||
"sequentialthinking": "ένας εξυπηρετητής MCP που υλοποιείται, παρέχοντας εργαλεία για δυναμική και αναστοχαστική επίλυση προβλημάτων μέσω δομημένων διαδικασιών σκέψης"
|
||||
},
|
||||
|
||||
@ -32,6 +32,7 @@
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "Usando Git Bash detectado automáticamente",
|
||||
"autoDiscoveredHint": "[to be translated]:Auto-discovered",
|
||||
"clear": {
|
||||
"button": "Borrar ruta personalizada"
|
||||
},
|
||||
@ -39,6 +40,7 @@
|
||||
"error": {
|
||||
"description": "Se requiere Git Bash para ejecutar agentes en Windows. El agente no puede funcionar sin él. Instale Git para Windows desde",
|
||||
"recheck": "Volver a verificar la instalación de Git Bash",
|
||||
"required": "[to be translated]:Git Bash path is required on Windows",
|
||||
"title": "Git Bash Requerido"
|
||||
},
|
||||
"found": {
|
||||
@ -51,7 +53,9 @@
|
||||
"invalidPath": "El archivo seleccionado no es un ejecutable válido de Git Bash (bash.exe).",
|
||||
"title": "Seleccionar ejecutable de Git Bash"
|
||||
},
|
||||
"success": "¡Git Bash detectado con éxito!"
|
||||
"placeholder": "[to be translated]:Select bash.exe path",
|
||||
"success": "¡Git Bash detectado con éxito!",
|
||||
"tooltip": "[to be translated]:Git Bash is required to run agents on Windows. Install from git-scm.com if not available."
|
||||
},
|
||||
"input": {
|
||||
"placeholder": "Introduce tu mensaje aquí, envía con {{key}} - @ seleccionar ruta, / seleccionar comando"
|
||||
@ -472,6 +476,7 @@
|
||||
"button": "Importar",
|
||||
"error": {
|
||||
"fetch_failed": "Error al obtener datos desde la URL",
|
||||
"file_required": "Por favor, selecciona primero un archivo",
|
||||
"invalid_format": "Formato de asistente inválido: faltan campos obligatorios",
|
||||
"url_required": "Por favor introduce una URL"
|
||||
},
|
||||
@ -489,8 +494,11 @@
|
||||
"button": "Eliminación por lotes",
|
||||
"confirm": "¿Estás seguro de que quieres eliminar los {{count}} asistentes seleccionados?"
|
||||
},
|
||||
"batch_export": {
|
||||
"button": "Exportar"
|
||||
},
|
||||
"mode": {
|
||||
"delete": "Eliminar",
|
||||
"manage": "Gestionar",
|
||||
"sort": "Ordenar"
|
||||
},
|
||||
"title": "Gestionar asistentes"
|
||||
@ -540,14 +548,23 @@
|
||||
"more": "Configuración del Asistente",
|
||||
"prompt": "Configuración de Palabras Clave",
|
||||
"reasoning_effort": {
|
||||
"auto": "Automóvil",
|
||||
"auto_description": "Determinar flexiblemente el esfuerzo de razonamiento",
|
||||
"default": "Por defecto",
|
||||
"default_description": "Depender del comportamiento predeterminado del modelo, sin ninguna configuración.",
|
||||
"high": "Largo",
|
||||
"high_description": "Razonamiento de alto nivel",
|
||||
"label": "Longitud de Cadena de Razonamiento",
|
||||
"low": "Corto",
|
||||
"low_description": "Razonamiento de bajo nivel",
|
||||
"medium": "Medio",
|
||||
"medium_description": "Razonamiento de nivel medio",
|
||||
"minimal": "minimal",
|
||||
"minimal_description": "Razonamiento mínimo",
|
||||
"off": "Apagado",
|
||||
"xhigh": "Extra Alta"
|
||||
"off_description": "Deshabilitar razonamiento",
|
||||
"xhigh": "Extra Alta",
|
||||
"xhigh_description": "Razonamiento de extra alto nivel"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "Agregar frase",
|
||||
@ -1248,11 +1265,13 @@
|
||||
}
|
||||
},
|
||||
"stop": "Detener",
|
||||
"subscribe": "Suscribirse",
|
||||
"success": "Éxito",
|
||||
"swap": "Intercambiar",
|
||||
"topics": "Temas",
|
||||
"unknown": "Desconocido",
|
||||
"unnamed": "Sin nombre",
|
||||
"unsubscribe": "Cancelar suscripción",
|
||||
"update_success": "Actualización exitosa",
|
||||
"upload_files": "Subir archivo",
|
||||
"warning": "Advertencia",
|
||||
@ -3921,6 +3940,7 @@
|
||||
"mcp_auto_install": "Instalación automática del servicio MCP (versión beta)",
|
||||
"memory": "Implementación básica de memoria persistente basada en un grafo de conocimiento local. Esto permite que el modelo recuerde información relevante del usuario entre diferentes conversaciones. Es necesario configurar la variable de entorno MEMORY_FILE_PATH.",
|
||||
"no": "sin descripción",
|
||||
"nowledge_mem": "[to be translated]:Requires Nowledge Mem app running locally. Keeps AI chats, tools, notes, agents, and files in private memory on your computer. Download from https://mem.nowledge.co/",
|
||||
"python": "Ejecuta código Python en un entorno sandbox seguro. Usa Pyodide para ejecutar Python, compatible con la mayoría de las bibliotecas estándar y paquetes de cálculo científico.",
|
||||
"sequentialthinking": "Una implementación de servidor MCP que proporciona herramientas para la resolución dinámica y reflexiva de problemas mediante un proceso de pensamiento estructurado"
|
||||
},
|
||||
|
||||
@ -32,6 +32,7 @@
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "Utilisation de Git Bash détecté automatiquement",
|
||||
"autoDiscoveredHint": "[to be translated]:Auto-discovered",
|
||||
"clear": {
|
||||
"button": "Effacer le chemin personnalisé"
|
||||
},
|
||||
@ -39,6 +40,7 @@
|
||||
"error": {
|
||||
"description": "Git Bash est requis pour exécuter des agents sur Windows. L'agent ne peut pas fonctionner sans. Veuillez installer Git pour Windows depuis",
|
||||
"recheck": "Revérifier l'installation de Git Bash",
|
||||
"required": "[to be translated]:Git Bash path is required on Windows",
|
||||
"title": "Git Bash requis"
|
||||
},
|
||||
"found": {
|
||||
@ -51,7 +53,9 @@
|
||||
"invalidPath": "Le fichier sélectionné n'est pas un exécutable Git Bash valide (bash.exe).",
|
||||
"title": "Sélectionner l'exécutable Git Bash"
|
||||
},
|
||||
"success": "Git Bash détecté avec succès !"
|
||||
"placeholder": "[to be translated]:Select bash.exe path",
|
||||
"success": "Git Bash détecté avec succès !",
|
||||
"tooltip": "[to be translated]:Git Bash is required to run agents on Windows. Install from git-scm.com if not available."
|
||||
},
|
||||
"input": {
|
||||
"placeholder": "Entrez votre message ici, envoyez avec {{key}} - @ sélectionner le chemin, / sélectionner la commande"
|
||||
@ -472,6 +476,7 @@
|
||||
"button": "Importer",
|
||||
"error": {
|
||||
"fetch_failed": "Échec de la récupération des données depuis l'URL",
|
||||
"file_required": "Veuillez d'abord sélectionner un fichier",
|
||||
"invalid_format": "Format d'assistant invalide : champs obligatoires manquants",
|
||||
"url_required": "Veuillez saisir une URL"
|
||||
},
|
||||
@ -489,8 +494,11 @@
|
||||
"button": "Suppression par lot",
|
||||
"confirm": "Êtes-vous sûr de vouloir supprimer les {{count}} assistants sélectionnés ?"
|
||||
},
|
||||
"batch_export": {
|
||||
"button": "Exporter"
|
||||
},
|
||||
"mode": {
|
||||
"delete": "Supprimer",
|
||||
"manage": "Gérer",
|
||||
"sort": "Trier"
|
||||
},
|
||||
"title": "Gérer les assistants"
|
||||
@ -540,14 +548,23 @@
|
||||
"more": "Paramètres de l'assistant",
|
||||
"prompt": "Paramètres de l'invite",
|
||||
"reasoning_effort": {
|
||||
"auto": "Auto",
|
||||
"auto_description": "Déterminer de manière flexible l'effort de raisonnement",
|
||||
"default": "Par défaut",
|
||||
"default_description": "Dépendre du comportement par défaut du modèle, sans aucune configuration.",
|
||||
"high": "Long",
|
||||
"high_description": "Raisonnement de haut niveau",
|
||||
"label": "Longueur de la chaîne de raisonnement",
|
||||
"low": "Court",
|
||||
"low_description": "Raisonnement de bas niveau",
|
||||
"medium": "Moyen",
|
||||
"medium_description": "Raisonnement de niveau moyen",
|
||||
"minimal": "minimal",
|
||||
"minimal_description": "Réflexion minimale",
|
||||
"off": "Off",
|
||||
"xhigh": "Très élevée"
|
||||
"off_description": "Désactiver le raisonnement",
|
||||
"xhigh": "Très élevée",
|
||||
"xhigh_description": "Raisonnement de très haut niveau"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "Добавить фразу",
|
||||
@ -1248,11 +1265,13 @@
|
||||
}
|
||||
},
|
||||
"stop": "Arrêter",
|
||||
"subscribe": "S'abonner",
|
||||
"success": "Succès",
|
||||
"swap": "Échanger",
|
||||
"topics": "Sujets",
|
||||
"unknown": "Inconnu",
|
||||
"unnamed": "Sans nom",
|
||||
"unsubscribe": "Se désabonner",
|
||||
"update_success": "Mise à jour réussie",
|
||||
"upload_files": "Uploader des fichiers",
|
||||
"warning": "Avertissement",
|
||||
@ -3921,6 +3940,7 @@
|
||||
"mcp_auto_install": "Installation automatique du service MCP (version bêta)",
|
||||
"memory": "Implémentation de base de mémoire persistante basée sur un graphe de connaissances local. Cela permet au modèle de se souvenir des informations relatives à l'utilisateur entre différentes conversations. Nécessite la configuration de la variable d'environnement MEMORY_FILE_PATH.",
|
||||
"no": "sans description",
|
||||
"nowledge_mem": "[to be translated]:Requires Nowledge Mem app running locally. Keeps AI chats, tools, notes, agents, and files in private memory on your computer. Download from https://mem.nowledge.co/",
|
||||
"python": "Exécutez du code Python dans un environnement bac à sable sécurisé. Utilisez Pyodide pour exécuter Python, prenant en charge la plupart des bibliothèques standard et des packages de calcul scientifique.",
|
||||
"sequentialthinking": "Un serveur MCP qui fournit des outils permettant une résolution dynamique et réflexive des problèmes à travers un processus de pensée structuré"
|
||||
},
|
||||
|
||||
@ -32,6 +32,7 @@
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "自動検出されたGit Bashを使用中",
|
||||
"autoDiscoveredHint": "[to be translated]:Auto-discovered",
|
||||
"clear": {
|
||||
"button": "カスタムパスをクリア"
|
||||
},
|
||||
@ -39,6 +40,7 @@
|
||||
"error": {
|
||||
"description": "Windowsでエージェントを実行するにはGit Bashが必要です。これがないとエージェントは動作しません。以下からGit for Windowsをインストールしてください。",
|
||||
"recheck": "Git Bashのインストールを再確認してください",
|
||||
"required": "[to be translated]:Git Bash path is required on Windows",
|
||||
"title": "Git Bashが必要です"
|
||||
},
|
||||
"found": {
|
||||
@ -51,7 +53,9 @@
|
||||
"invalidPath": "選択されたファイルは有効なGit Bash実行ファイル(bash.exe)ではありません。",
|
||||
"title": "Git Bash実行ファイルを選択"
|
||||
},
|
||||
"success": "Git Bashが正常に検出されました!"
|
||||
"placeholder": "[to be translated]:Select bash.exe path",
|
||||
"success": "Git Bashが正常に検出されました!",
|
||||
"tooltip": "[to be translated]:Git Bash is required to run agents on Windows. Install from git-scm.com if not available."
|
||||
},
|
||||
"input": {
|
||||
"placeholder": "メッセージをここに入力し、{{key}}で送信 - @でパスを選択、/でコマンドを選択"
|
||||
@ -472,6 +476,7 @@
|
||||
"button": "インポート",
|
||||
"error": {
|
||||
"fetch_failed": "URLからのデータ取得に失敗しました",
|
||||
"file_required": "まずファイルを選択してください",
|
||||
"invalid_format": "無効なアシスタント形式:必須フィールドが不足しています",
|
||||
"url_required": "URLを入力してください"
|
||||
},
|
||||
@ -489,8 +494,11 @@
|
||||
"button": "バッチ削除",
|
||||
"confirm": "選択した{{count}}件のアシスタントを削除してもよろしいですか?"
|
||||
},
|
||||
"batch_export": {
|
||||
"button": "エクスポート"
|
||||
},
|
||||
"mode": {
|
||||
"delete": "削除",
|
||||
"manage": "管理",
|
||||
"sort": "並べ替え"
|
||||
},
|
||||
"title": "アシスタントを管理"
|
||||
@ -540,14 +548,23 @@
|
||||
"more": "アシスタント設定",
|
||||
"prompt": "プロンプト設定",
|
||||
"reasoning_effort": {
|
||||
"auto": "自動",
|
||||
"auto_description": "推論にかける労力を柔軟に調整する",
|
||||
"default": "デフォルト",
|
||||
"default_description": "設定なしで、モデルの既定の動作に依存する。",
|
||||
"high": "最大限の思考",
|
||||
"high_description": "高度な推論",
|
||||
"label": "思考連鎖の長さ",
|
||||
"low": "少しの思考",
|
||||
"low_description": "低レベル推論",
|
||||
"medium": "普通の思考",
|
||||
"medium_description": "中レベル推論",
|
||||
"minimal": "最小限の思考",
|
||||
"minimal_description": "最小限の推論",
|
||||
"off": "オフ",
|
||||
"xhigh": "超高"
|
||||
"off_description": "推論を無効にする",
|
||||
"xhigh": "超高",
|
||||
"xhigh_description": "超高度な推論"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "プロンプトを追加",
|
||||
@ -1248,11 +1265,13 @@
|
||||
}
|
||||
},
|
||||
"stop": "停止",
|
||||
"subscribe": "購読",
|
||||
"success": "成功",
|
||||
"swap": "交換",
|
||||
"topics": "トピック",
|
||||
"unknown": "Unknown",
|
||||
"unnamed": "無題",
|
||||
"unsubscribe": "配信停止",
|
||||
"update_success": "更新成功",
|
||||
"upload_files": "ファイルをアップロードする",
|
||||
"warning": "警告",
|
||||
@ -3921,6 +3940,7 @@
|
||||
"mcp_auto_install": "MCPサービスの自動インストール(ベータ版)",
|
||||
"memory": "ローカルのナレッジグラフに基づく永続的なメモリの基本的な実装です。これにより、モデルは異なる会話間でユーザーの関連情報を記憶できるようになります。MEMORY_FILE_PATH 環境変数の設定が必要です。",
|
||||
"no": "説明なし",
|
||||
"nowledge_mem": "Nowledge Mem アプリをローカルで実行する必要があります。AI チャット、ツール、ノート、エージェント、ファイルをコンピューター上のプライベートメモリに保存します。https://mem.nowledge.co/ からダウンロードしてください",
|
||||
"python": "安全なサンドボックス環境でPythonコードを実行します。Pyodideを使用してPythonを実行し、ほとんどの標準ライブラリと科学計算パッケージをサポートしています。",
|
||||
"sequentialthinking": "構造化された思考プロセスを通じて動的かつ反省的な問題解決を行うためのツールを提供するMCPサーバーの実装"
|
||||
},
|
||||
|
||||
@ -32,6 +32,7 @@
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "Usando Git Bash detectado automaticamente",
|
||||
"autoDiscoveredHint": "[to be translated]:Auto-discovered",
|
||||
"clear": {
|
||||
"button": "Limpar caminho personalizado"
|
||||
},
|
||||
@ -39,6 +40,7 @@
|
||||
"error": {
|
||||
"description": "O Git Bash é necessário para executar agentes no Windows. O agente não pode funcionar sem ele. Por favor, instale o Git para Windows a partir de",
|
||||
"recheck": "Reverificar a Instalação do Git Bash",
|
||||
"required": "[to be translated]:Git Bash path is required on Windows",
|
||||
"title": "Git Bash Necessário"
|
||||
},
|
||||
"found": {
|
||||
@ -51,7 +53,9 @@
|
||||
"invalidPath": "O arquivo selecionado não é um executável válido do Git Bash (bash.exe).",
|
||||
"title": "Selecionar executável do Git Bash"
|
||||
},
|
||||
"success": "Git Bash detectado com sucesso!"
|
||||
"placeholder": "[to be translated]:Select bash.exe path",
|
||||
"success": "Git Bash detectado com sucesso!",
|
||||
"tooltip": "[to be translated]:Git Bash is required to run agents on Windows. Install from git-scm.com if not available."
|
||||
},
|
||||
"input": {
|
||||
"placeholder": "Digite sua mensagem aqui, envie com {{key}} - @ selecionar caminho, / selecionar comando"
|
||||
@ -472,6 +476,7 @@
|
||||
"button": "Importar",
|
||||
"error": {
|
||||
"fetch_failed": "Falha ao obter dados do URL",
|
||||
"file_required": "Por favor, selecione um arquivo primeiro",
|
||||
"invalid_format": "Formato de assistente inválido: campos obrigatórios em falta",
|
||||
"url_required": "Por favor insere um URL"
|
||||
},
|
||||
@ -489,8 +494,11 @@
|
||||
"button": "Exclusão em Lote",
|
||||
"confirm": "Tem certeza de que deseja excluir os {{count}} assistentes selecionados?"
|
||||
},
|
||||
"batch_export": {
|
||||
"button": "Exportar"
|
||||
},
|
||||
"mode": {
|
||||
"delete": "Excluir",
|
||||
"manage": "Gerenciar",
|
||||
"sort": "Ordenar"
|
||||
},
|
||||
"title": "Gerir assistentes"
|
||||
@ -540,14 +548,23 @@
|
||||
"more": "Configurações do Assistente",
|
||||
"prompt": "Configurações de Prompt",
|
||||
"reasoning_effort": {
|
||||
"auto": "Automóvel",
|
||||
"auto_description": "Determinar flexivelmente o esforço de raciocínio",
|
||||
"default": "Padrão",
|
||||
"default_description": "Depender do comportamento padrão do modelo, sem qualquer configuração.",
|
||||
"high": "Longo",
|
||||
"high_description": "Raciocínio de alto nível",
|
||||
"label": "Comprimento da Cadeia de Raciocínio",
|
||||
"low": "Curto",
|
||||
"low_description": "Raciocínio de baixo nível",
|
||||
"medium": "Médio",
|
||||
"medium_description": "Raciocínio de nível médio",
|
||||
"minimal": "mínimo",
|
||||
"minimal_description": "Raciocínio mínimo",
|
||||
"off": "Desligado",
|
||||
"xhigh": "Extra Alta"
|
||||
"off_description": "Desabilitar raciocínio",
|
||||
"xhigh": "Extra Alta",
|
||||
"xhigh_description": "Raciocínio de altíssimo nível"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "Adicionar Frase",
|
||||
@ -1248,11 +1265,13 @@
|
||||
}
|
||||
},
|
||||
"stop": "Parar",
|
||||
"subscribe": "Subscrever",
|
||||
"success": "Sucesso",
|
||||
"swap": "Trocar",
|
||||
"topics": "Tópicos",
|
||||
"unknown": "Desconhecido",
|
||||
"unnamed": "Sem nome",
|
||||
"unsubscribe": "Cancelar inscrição",
|
||||
"update_success": "Atualização bem-sucedida",
|
||||
"upload_files": "Carregar arquivo",
|
||||
"warning": "Aviso",
|
||||
@ -3921,6 +3940,7 @@
|
||||
"mcp_auto_install": "Instalação automática do serviço MCP (beta)",
|
||||
"memory": "Implementação base de memória persistente baseada em grafos de conhecimento locais. Isso permite que o modelo lembre informações relevantes do utilizador entre diferentes conversas. É necessário configurar a variável de ambiente MEMORY_FILE_PATH.",
|
||||
"no": "sem descrição",
|
||||
"nowledge_mem": "Requer a aplicação Nowledge Mem em execução localmente. Mantém conversas de IA, ferramentas, notas, agentes e ficheiros numa memória privada no seu computador. Transfira de https://mem.nowledge.co/",
|
||||
"python": "Executar código Python num ambiente sandbox seguro. Utilizar Pyodide para executar Python, suportando a maioria das bibliotecas padrão e pacotes de computação científica",
|
||||
"sequentialthinking": "Uma implementação de servidor MCP que fornece ferramentas para resolução dinâmica e reflexiva de problemas através de um processo de pensamento estruturado"
|
||||
},
|
||||
|
||||
@ -32,6 +32,7 @@
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "Используется автоматически обнаруженный Git Bash",
|
||||
"autoDiscoveredHint": "[to be translated]:Auto-discovered",
|
||||
"clear": {
|
||||
"button": "Очистить пользовательский путь"
|
||||
},
|
||||
@ -39,6 +40,7 @@
|
||||
"error": {
|
||||
"description": "Для запуска агентов в Windows требуется Git Bash. Без него агент не может работать. Пожалуйста, установите Git для Windows с",
|
||||
"recheck": "Повторная проверка установки Git Bash",
|
||||
"required": "[to be translated]:Git Bash path is required on Windows",
|
||||
"title": "Требуется Git Bash"
|
||||
},
|
||||
"found": {
|
||||
@ -51,7 +53,9 @@
|
||||
"invalidPath": "Выбранный файл не является допустимым исполняемым файлом Git Bash (bash.exe).",
|
||||
"title": "Выберите исполняемый файл Git Bash"
|
||||
},
|
||||
"success": "Git Bash успешно обнаружен!"
|
||||
"placeholder": "[to be translated]:Select bash.exe path",
|
||||
"success": "Git Bash успешно обнаружен!",
|
||||
"tooltip": "[to be translated]:Git Bash is required to run agents on Windows. Install from git-scm.com if not available."
|
||||
},
|
||||
"input": {
|
||||
"placeholder": "Введите ваше сообщение здесь, отправьте с помощью {{key}} — @ выбрать путь, / выбрать команду"
|
||||
@ -472,6 +476,7 @@
|
||||
"button": "Импортировать",
|
||||
"error": {
|
||||
"fetch_failed": "Ошибка получения данных с URL",
|
||||
"file_required": "Сначала выберите файл",
|
||||
"invalid_format": "Неверный формат помощника: отсутствуют обязательные поля",
|
||||
"url_required": "Пожалуйста, введите URL"
|
||||
},
|
||||
@ -489,8 +494,11 @@
|
||||
"button": "Массовое удаление",
|
||||
"confirm": "Вы уверены, что хотите удалить выбранных {{count}} ассистентов?"
|
||||
},
|
||||
"batch_export": {
|
||||
"button": "Экспорт"
|
||||
},
|
||||
"mode": {
|
||||
"delete": "Удалить",
|
||||
"manage": "Управлять",
|
||||
"sort": "Сортировать"
|
||||
},
|
||||
"title": "Управление помощниками"
|
||||
@ -540,14 +548,23 @@
|
||||
"more": "Настройки ассистента",
|
||||
"prompt": "Настройки промптов",
|
||||
"reasoning_effort": {
|
||||
"auto": "Авто",
|
||||
"auto_description": "Гибко определяйте усилие на рассуждение",
|
||||
"default": "По умолчанию",
|
||||
"default_description": "Полагаться на поведение модели по умолчанию, без какой-либо конфигурации.",
|
||||
"high": "Стараюсь думать",
|
||||
"high_description": "Высокоуровневое рассуждение",
|
||||
"label": "Настройки размышлений",
|
||||
"low": "Меньше думать",
|
||||
"low_description": "Низкоуровневое рассуждение",
|
||||
"medium": "Среднее",
|
||||
"medium_description": "Средний уровень рассуждения",
|
||||
"minimal": "минимальный",
|
||||
"minimal_description": "Минимальное рассуждение",
|
||||
"off": "Выключить",
|
||||
"xhigh": "Сверхвысокое"
|
||||
"off_description": "Отключить рассуждение",
|
||||
"xhigh": "Сверхвысокое",
|
||||
"xhigh_description": "Высочайший уровень рассуждений"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "Добавить подсказку",
|
||||
@ -1248,11 +1265,13 @@
|
||||
}
|
||||
},
|
||||
"stop": "остановить",
|
||||
"subscribe": "Подписаться",
|
||||
"success": "Успешно",
|
||||
"swap": "Поменять местами",
|
||||
"topics": "Топики",
|
||||
"unknown": "Неизвестно",
|
||||
"unnamed": "Без имени",
|
||||
"unsubscribe": "Отписаться",
|
||||
"update_success": "Обновление выполнено успешно",
|
||||
"upload_files": "Загрузить файл",
|
||||
"warning": "Предупреждение",
|
||||
@ -3921,6 +3940,7 @@
|
||||
"mcp_auto_install": "Автоматическая установка службы MCP (бета-версия)",
|
||||
"memory": "реализация постоянной памяти на основе локального графа знаний. Это позволяет модели запоминать информацию о пользователе между различными диалогами. Требуется настроить переменную среды MEMORY_FILE_PATH.",
|
||||
"no": "без описания",
|
||||
"nowledge_mem": "Требуется запущенное локально приложение Nowledge Mem. Хранит чаты ИИ, инструменты, заметки, агентов и файлы в приватной памяти на вашем компьютере. Скачать можно на https://mem.nowledge.co/",
|
||||
"python": "Выполняйте код Python в безопасной песочнице. Запускайте Python с помощью Pyodide, поддерживается большинство стандартных библиотек и пакетов для научных вычислений",
|
||||
"sequentialthinking": "MCP серверная реализация, предоставляющая инструменты для динамического и рефлексивного решения проблем посредством структурированного мыслительного процесса"
|
||||
},
|
||||
|
||||
@ -6,7 +6,8 @@ import {
|
||||
MdiLightbulbOn30,
|
||||
MdiLightbulbOn50,
|
||||
MdiLightbulbOn80,
|
||||
MdiLightbulbOn90
|
||||
MdiLightbulbOn90,
|
||||
MdiLightbulbQuestion
|
||||
} from '@renderer/components/Icons/SVGIcon'
|
||||
import { QuickPanelReservedSymbol, useQuickPanel } from '@renderer/components/QuickPanel'
|
||||
import {
|
||||
@ -18,7 +19,6 @@ import {
|
||||
MODEL_SUPPORTED_OPTIONS
|
||||
} from '@renderer/config/models'
|
||||
import { useAssistant } from '@renderer/hooks/useAssistant'
|
||||
import { getReasoningEffortOptionsLabel } from '@renderer/i18n/label'
|
||||
import type { ToolQuickPanelApi } from '@renderer/pages/home/Inputbar/types'
|
||||
import type { Model, ThinkingOption } from '@renderer/types'
|
||||
import { Tooltip } from 'antd'
|
||||
@ -88,19 +88,48 @@ const ThinkingButton: FC<Props> = ({ quickPanel, model, assistantId }): ReactEle
|
||||
[updateAssistantSettings, assistant.enableWebSearch, model, t]
|
||||
)
|
||||
|
||||
const reasoningEffortOptionLabelMap = {
|
||||
default: t('assistants.settings.reasoning_effort.default'),
|
||||
none: t('assistants.settings.reasoning_effort.off'),
|
||||
minimal: t('assistants.settings.reasoning_effort.minimal'),
|
||||
high: t('assistants.settings.reasoning_effort.high'),
|
||||
low: t('assistants.settings.reasoning_effort.low'),
|
||||
medium: t('assistants.settings.reasoning_effort.medium'),
|
||||
auto: t('assistants.settings.reasoning_effort.auto'),
|
||||
xhigh: t('assistants.settings.reasoning_effort.xhigh')
|
||||
} as const satisfies Record<ThinkingOption, string>
|
||||
|
||||
const reasoningEffortDescriptionMap = {
|
||||
default: t('assistants.settings.reasoning_effort.default_description'),
|
||||
none: t('assistants.settings.reasoning_effort.off_description'),
|
||||
minimal: t('assistants.settings.reasoning_effort.minimal_description'),
|
||||
low: t('assistants.settings.reasoning_effort.low_description'),
|
||||
medium: t('assistants.settings.reasoning_effort.medium_description'),
|
||||
high: t('assistants.settings.reasoning_effort.high_description'),
|
||||
xhigh: t('assistants.settings.reasoning_effort.xhigh_description'),
|
||||
auto: t('assistants.settings.reasoning_effort.auto_description')
|
||||
} as const satisfies Record<ThinkingOption, string>
|
||||
|
||||
const panelItems = useMemo(() => {
|
||||
// 使用表中定义的选项创建UI选项
|
||||
return supportedOptions.map((option) => ({
|
||||
level: option,
|
||||
label: getReasoningEffortOptionsLabel(option),
|
||||
description: '',
|
||||
label: reasoningEffortOptionLabelMap[option],
|
||||
description: reasoningEffortDescriptionMap[option],
|
||||
icon: ThinkingIcon({ option }),
|
||||
isSelected: currentReasoningEffort === option,
|
||||
action: () => onThinkingChange(option)
|
||||
}))
|
||||
}, [currentReasoningEffort, supportedOptions, onThinkingChange])
|
||||
}, [
|
||||
supportedOptions,
|
||||
reasoningEffortOptionLabelMap,
|
||||
reasoningEffortDescriptionMap,
|
||||
currentReasoningEffort,
|
||||
onThinkingChange
|
||||
])
|
||||
|
||||
const isThinkingEnabled = currentReasoningEffort !== undefined && currentReasoningEffort !== 'none'
|
||||
const isThinkingEnabled =
|
||||
currentReasoningEffort !== undefined && currentReasoningEffort !== 'none' && currentReasoningEffort !== 'default'
|
||||
|
||||
const disableThinking = useCallback(() => {
|
||||
onThinkingChange('none')
|
||||
@ -197,8 +226,9 @@ const ThinkingIcon = (props: { option?: ThinkingOption; isFixedReasoning?: boole
|
||||
case 'none':
|
||||
IconComponent = MdiLightbulbOffOutline
|
||||
break
|
||||
case 'default':
|
||||
default:
|
||||
IconComponent = MdiLightbulbOffOutline
|
||||
IconComponent = MdiLightbulbQuestion
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@ -61,9 +61,14 @@ const BuiltinMCPServerList: FC = () => {
|
||||
{getMcpTypeLabel(server.type ?? 'stdio')}
|
||||
</Tag>
|
||||
{server?.shouldConfig && (
|
||||
<Tag color="warning" style={{ borderRadius: 20, margin: 0, fontWeight: 500 }}>
|
||||
{t('settings.mcp.requiresConfig')}
|
||||
</Tag>
|
||||
<a
|
||||
href="https://docs.cherry-ai.com/advanced-basic/mcp/buildin"
|
||||
target="_blank"
|
||||
rel="noopener noreferrer">
|
||||
<Tag color="warning" style={{ borderRadius: 20, margin: 0, fontWeight: 500 }}>
|
||||
{t('settings.mcp.requiresConfig')}
|
||||
</Tag>
|
||||
</a>
|
||||
)}
|
||||
</ServerFooter>
|
||||
</ServerCard>
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
import { Navbar, NavbarCenter } from '@renderer/components/app/Navbar'
|
||||
import { HStack } from '@renderer/components/Layout'
|
||||
import ListItem from '@renderer/components/ListItem'
|
||||
import GeneralPopup from '@renderer/components/Popups/GeneralPopup'
|
||||
import Scrollbar from '@renderer/components/Scrollbar'
|
||||
import CustomTag from '@renderer/components/Tags/CustomTag'
|
||||
import { useAssistantPresets } from '@renderer/hooks/useAssistantPresets'
|
||||
@ -11,7 +10,7 @@ import type { AssistantPreset } from '@renderer/types'
|
||||
import { uuid } from '@renderer/utils'
|
||||
import { Button, Empty, Flex, Input } from 'antd'
|
||||
import { omit } from 'lodash'
|
||||
import { Import, Plus, Rss, Search, Settings2 } from 'lucide-react'
|
||||
import { Import, Plus, Search, Settings2 } from 'lucide-react'
|
||||
import type { FC } from 'react'
|
||||
import { useCallback, useEffect, useMemo, useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
@ -23,7 +22,6 @@ import { groupTranslations } from './assistantPresetGroupTranslations'
|
||||
import AddAssistantPresetPopup from './components/AddAssistantPresetPopup'
|
||||
import AssistantPresetCard from './components/AssistantPresetCard'
|
||||
import { AssistantPresetGroupIcon } from './components/AssistantPresetGroupIcon'
|
||||
import AssistantsSubscribeUrlSettings from './components/AssistantsSubscribeUrlSettings'
|
||||
import ImportAssistantPresetPopup from './components/ImportAssistantPresetPopup'
|
||||
import ManageAssistantPresetsPopup from './components/ManageAssistantPresetsPopup'
|
||||
|
||||
@ -177,15 +175,6 @@ const AssistantPresetsPage: FC = () => {
|
||||
}
|
||||
}
|
||||
|
||||
const handleSubscribeSettings = () => {
|
||||
GeneralPopup.show({
|
||||
title: t('assistants.presets.settings.title'),
|
||||
content: <AssistantsSubscribeUrlSettings />,
|
||||
footer: null,
|
||||
width: 600
|
||||
})
|
||||
}
|
||||
|
||||
const handleManageAgents = () => {
|
||||
ManageAssistantPresetsPopup.show()
|
||||
}
|
||||
@ -292,9 +281,6 @@ const AssistantPresetsPage: FC = () => {
|
||||
<Button type="text" onClick={handleImportAgent} icon={<Import size={18} color="var(--color-icon)" />}>
|
||||
{t('assistants.presets.import.title')}
|
||||
</Button>
|
||||
<Button type="text" onClick={handleSubscribeSettings} icon={<Rss size={18} color="var(--color-icon)" />}>
|
||||
{t('assistants.presets.settings.title')}
|
||||
</Button>
|
||||
<Button type="text" onClick={handleManageAgents} icon={<Settings2 size={18} color="var(--color-icon)" />}>
|
||||
{t('assistants.presets.manage.title')}
|
||||
</Button>
|
||||
|
||||
@ -1,58 +0,0 @@
|
||||
import { HStack } from '@renderer/components/Layout'
|
||||
import { useTheme } from '@renderer/context/ThemeProvider'
|
||||
import { useSettings } from '@renderer/hooks/useSettings'
|
||||
import { SettingDivider, SettingGroup, SettingRow, SettingRowTitle, SettingTitle } from '@renderer/pages/settings'
|
||||
import { useAppDispatch } from '@renderer/store'
|
||||
import { setAgentssubscribeUrl } from '@renderer/store/settings'
|
||||
import Input from 'antd/es/input/Input'
|
||||
import { HelpCircle } from 'lucide-react'
|
||||
import type { FC } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
|
||||
const AssistantsSubscribeUrlSettings: FC = () => {
|
||||
const { t } = useTranslation()
|
||||
const { theme } = useTheme()
|
||||
const dispatch = useAppDispatch()
|
||||
|
||||
const { agentssubscribeUrl } = useSettings()
|
||||
|
||||
const handleAgentChange = (e: React.ChangeEvent<HTMLInputElement>) => {
|
||||
dispatch(setAgentssubscribeUrl(e.target.value))
|
||||
}
|
||||
|
||||
const handleHelpClick = () => {
|
||||
window.open('https://docs.cherry-ai.com/data-settings/assistants-subscribe', '_blank')
|
||||
}
|
||||
|
||||
return (
|
||||
<SettingGroup theme={theme}>
|
||||
<HStack alignItems="center" gap="8px">
|
||||
<SettingTitle>
|
||||
{t('assistants.presets.tag.agent')}
|
||||
{t('settings.tool.websearch.subscribe_add')}
|
||||
</SettingTitle>
|
||||
<HelpCircle
|
||||
size={16}
|
||||
color="var(--color-icon)"
|
||||
onClick={handleHelpClick}
|
||||
className="hover:!text-[var(--color-primary)] cursor-pointer transition-colors"
|
||||
/>
|
||||
</HStack>
|
||||
<SettingDivider />
|
||||
<SettingRow>
|
||||
<SettingRowTitle>{t('settings.tool.websearch.subscribe_url')}</SettingRowTitle>
|
||||
<HStack alignItems="center" gap="5px" style={{ width: 315 }}>
|
||||
<Input
|
||||
type="text"
|
||||
value={agentssubscribeUrl || ''}
|
||||
onChange={handleAgentChange}
|
||||
style={{ width: 315 }}
|
||||
placeholder={t('settings.tool.websearch.subscribe_url')}
|
||||
/>
|
||||
</HStack>
|
||||
</SettingRow>
|
||||
</SettingGroup>
|
||||
)
|
||||
}
|
||||
|
||||
export default AssistantsSubscribeUrlSettings
|
||||
@ -1,11 +1,15 @@
|
||||
import { TopView } from '@renderer/components/TopView'
|
||||
import { useAssistantPresets } from '@renderer/hooks/useAssistantPresets'
|
||||
import { useSettings } from '@renderer/hooks/useSettings'
|
||||
import { useTimer } from '@renderer/hooks/useTimer'
|
||||
import { getDefaultModel } from '@renderer/services/AssistantService'
|
||||
import { EVENT_NAMES, EventEmitter } from '@renderer/services/EventService'
|
||||
import { useAppDispatch } from '@renderer/store'
|
||||
import { setAgentssubscribeUrl } from '@renderer/store/settings'
|
||||
import type { AssistantPreset } from '@renderer/types'
|
||||
import { uuid } from '@renderer/utils'
|
||||
import { Button, Flex, Form, Input, Modal, Radio } from 'antd'
|
||||
import { Button, Divider, Flex, Form, Input, Modal, Radio, Typography } from 'antd'
|
||||
import { HelpCircle } from 'lucide-react'
|
||||
import { useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
|
||||
@ -20,35 +24,53 @@ const PopupContainer: React.FC<Props> = ({ resolve }) => {
|
||||
const { addAssistantPreset } = useAssistantPresets()
|
||||
const [importType, setImportType] = useState<'url' | 'file'>('url')
|
||||
const [loading, setLoading] = useState(false)
|
||||
const [subscribeLoading, setSubscribeLoading] = useState(false)
|
||||
const { setTimeoutTimer } = useTimer()
|
||||
const dispatch = useAppDispatch()
|
||||
const { agentssubscribeUrl } = useSettings()
|
||||
const [subscribeUrl, setSubscribeUrl] = useState(agentssubscribeUrl || '')
|
||||
const [selectedFile, setSelectedFile] = useState<{ name: string; content: Uint8Array } | null>(null)
|
||||
const [urlValue, setUrlValue] = useState('')
|
||||
|
||||
const isImportDisabled = importType === 'url' ? !urlValue.trim() : !selectedFile
|
||||
const isSubscribed = !!agentssubscribeUrl
|
||||
|
||||
const handleSelectFile = async () => {
|
||||
const result = await window.api.file.open({
|
||||
filters: [{ name: t('assistants.presets.import.file_filter'), extensions: ['json'] }]
|
||||
})
|
||||
|
||||
if (result) {
|
||||
setSelectedFile({ name: result.fileName, content: result.content })
|
||||
}
|
||||
}
|
||||
|
||||
const onFinish = async () => {
|
||||
// Validate before setting loading
|
||||
if (importType === 'url' && !urlValue.trim()) {
|
||||
window.toast.error(t('assistants.presets.import.error.url_required'))
|
||||
return
|
||||
}
|
||||
if (importType === 'file' && !selectedFile) {
|
||||
window.toast.error(t('assistants.presets.import.error.file_required'))
|
||||
return
|
||||
}
|
||||
|
||||
const onFinish = async (values: { url?: string }) => {
|
||||
setLoading(true)
|
||||
try {
|
||||
let presets: AssistantPreset[] = []
|
||||
|
||||
if (importType === 'url') {
|
||||
if (!values.url) {
|
||||
throw new Error(t('assistants.presets.import.error.url_required'))
|
||||
}
|
||||
const response = await fetch(values.url)
|
||||
const response = await fetch(urlValue.trim())
|
||||
if (!response.ok) {
|
||||
throw new Error(t('assistants.presets.import.error.fetch_failed'))
|
||||
}
|
||||
const data = await response.json()
|
||||
presets = Array.isArray(data) ? data : [data]
|
||||
} else {
|
||||
const result = await window.api.file.open({
|
||||
filters: [{ name: t('assistants.presets.import.file_filter'), extensions: ['json'] }]
|
||||
})
|
||||
|
||||
if (result) {
|
||||
presets = JSON.parse(new TextDecoder('utf-8').decode(result.content))
|
||||
if (!Array.isArray(presets)) {
|
||||
presets = [presets]
|
||||
}
|
||||
} else {
|
||||
return
|
||||
presets = JSON.parse(new TextDecoder('utf-8').decode(selectedFile!.content))
|
||||
if (!Array.isArray(presets)) {
|
||||
presets = [presets]
|
||||
}
|
||||
}
|
||||
|
||||
@ -74,7 +96,7 @@ const PopupContainer: React.FC<Props> = ({ resolve }) => {
|
||||
addAssistantPreset(newPreset)
|
||||
}
|
||||
|
||||
window.toast.success(t('message.agents.imported'))
|
||||
window.toast.success(t('message.agents.imported', { count: presets.length }))
|
||||
|
||||
setTimeoutTimer('onFinish', () => EventEmitter.emit(EVENT_NAMES.SHOW_ASSISTANTS), 0)
|
||||
setOpen(false)
|
||||
@ -88,7 +110,42 @@ const PopupContainer: React.FC<Props> = ({ resolve }) => {
|
||||
|
||||
const onCancel = () => {
|
||||
setOpen(false)
|
||||
resolve(null)
|
||||
}
|
||||
|
||||
const handleSubscribeUrlChange = (e: React.ChangeEvent<HTMLInputElement>) => {
|
||||
setSubscribeUrl(e.target.value)
|
||||
}
|
||||
|
||||
const handleSubscribe = async () => {
|
||||
// If already subscribed, unsubscribe
|
||||
if (isSubscribed) {
|
||||
dispatch(setAgentssubscribeUrl(''))
|
||||
setSubscribeUrl('')
|
||||
window.location.reload()
|
||||
return
|
||||
}
|
||||
|
||||
if (!subscribeUrl.trim()) {
|
||||
return
|
||||
}
|
||||
|
||||
setSubscribeLoading(true)
|
||||
try {
|
||||
const response = await fetch(subscribeUrl)
|
||||
if (!response.ok) {
|
||||
throw new Error(t('assistants.presets.import.error.fetch_failed'))
|
||||
}
|
||||
dispatch(setAgentssubscribeUrl(subscribeUrl))
|
||||
window.location.reload()
|
||||
} catch (error) {
|
||||
window.toast.error(error instanceof Error ? error.message : t('message.agents.import.error'))
|
||||
} finally {
|
||||
setSubscribeLoading(false)
|
||||
}
|
||||
}
|
||||
|
||||
const handleHelpClick = () => {
|
||||
window.open('https://docs.cherry-ai.com/data-settings/assistants-subscribe', '_blank')
|
||||
}
|
||||
|
||||
return (
|
||||
@ -96,39 +153,79 @@ const PopupContainer: React.FC<Props> = ({ resolve }) => {
|
||||
title={t('assistants.presets.import.title')}
|
||||
open={open}
|
||||
onCancel={onCancel}
|
||||
maskClosable={false}
|
||||
footer={
|
||||
<Flex justify="end" gap={8}>
|
||||
<Button onClick={onCancel}>{t('common.cancel')}</Button>
|
||||
<Button type="primary" onClick={() => form.submit()} loading={loading}>
|
||||
{t('assistants.presets.import.button')}
|
||||
</Button>
|
||||
</Flex>
|
||||
}
|
||||
afterClose={() => resolve(null)}
|
||||
footer={null}
|
||||
transitionName="animation-move-down"
|
||||
styles={{ body: { padding: '16px' } }}
|
||||
centered>
|
||||
<Form form={form} onFinish={onFinish} layout="vertical">
|
||||
<Form.Item>
|
||||
<Radio.Group value={importType} onChange={(e) => setImportType(e.target.value)}>
|
||||
<Radio.Button value="url">{t('assistants.presets.import.type.url')}</Radio.Button>
|
||||
<Radio.Button value="file">{t('assistants.presets.import.type.file')}</Radio.Button>
|
||||
</Radio.Group>
|
||||
<Form.Item style={{ marginBottom: 0 }}>
|
||||
<Flex align="center" gap={12} style={{ width: '100%' }}>
|
||||
<Radio.Group value={importType} onChange={(e) => setImportType(e.target.value)}>
|
||||
<Radio.Button value="url">{t('assistants.presets.import.type.url')}</Radio.Button>
|
||||
<Radio.Button value="file">{t('assistants.presets.import.type.file')}</Radio.Button>
|
||||
</Radio.Group>
|
||||
|
||||
{importType === 'url' && (
|
||||
<Form.Item
|
||||
name="url"
|
||||
rules={[{ required: true, message: t('assistants.presets.import.error.url_required') }]}
|
||||
style={{ flex: 1, marginBottom: 0 }}>
|
||||
<Input
|
||||
placeholder={t('assistants.presets.import.url_placeholder')}
|
||||
value={urlValue}
|
||||
onChange={(e) => setUrlValue(e.target.value)}
|
||||
/>
|
||||
</Form.Item>
|
||||
)}
|
||||
|
||||
{importType === 'file' && (
|
||||
<>
|
||||
<Button onClick={handleSelectFile}>{t('assistants.presets.import.select_file')}</Button>
|
||||
{selectedFile && (
|
||||
<Typography.Text type="secondary" ellipsis style={{ maxWidth: 200 }}>
|
||||
{selectedFile.name}
|
||||
</Typography.Text>
|
||||
)}
|
||||
<div style={{ flex: 1 }} />
|
||||
</>
|
||||
)}
|
||||
|
||||
<Button type="primary" onClick={onFinish} loading={loading} disabled={isImportDisabled}>
|
||||
{t('assistants.presets.import.button')}
|
||||
</Button>
|
||||
</Flex>
|
||||
</Form.Item>
|
||||
|
||||
{importType === 'url' && (
|
||||
<Form.Item
|
||||
name="url"
|
||||
rules={[{ required: true, message: t('assistants.presets.import.error.url_required') }]}>
|
||||
<Input placeholder={t('assistants.presets.import.url_placeholder')} />
|
||||
</Form.Item>
|
||||
)}
|
||||
|
||||
{importType === 'file' && (
|
||||
<Form.Item>
|
||||
<Button onClick={() => form.submit()}>{t('assistants.presets.import.select_file')}</Button>
|
||||
</Form.Item>
|
||||
)}
|
||||
</Form>
|
||||
|
||||
<Divider style={{ margin: '16px 0' }} />
|
||||
|
||||
<Flex align="center" gap={4}>
|
||||
<Typography.Text strong style={{ flexShrink: 0, fontSize: 16 }}>
|
||||
{t('assistants.presets.tag.agent')}
|
||||
{t('settings.tool.websearch.subscribe_add')}
|
||||
</Typography.Text>
|
||||
<HelpCircle
|
||||
size={16}
|
||||
color="var(--color-icon)"
|
||||
onClick={handleHelpClick}
|
||||
className="hover:!text-[var(--color-primary)] cursor-pointer transition-colors"
|
||||
style={{ flexShrink: 0 }}
|
||||
/>
|
||||
</Flex>
|
||||
|
||||
<Flex align="center" gap={12} style={{ marginTop: 10 }}>
|
||||
<Input
|
||||
type="text"
|
||||
value={subscribeUrl}
|
||||
onChange={handleSubscribeUrlChange}
|
||||
style={{ flex: 1 }}
|
||||
placeholder={t('settings.tool.websearch.subscribe_url')}
|
||||
/>
|
||||
<Button type="primary" onClick={handleSubscribe} loading={subscribeLoading} disabled={!subscribeUrl.trim()}>
|
||||
{isSubscribed ? t('common.unsubscribe') : t('common.subscribe')}
|
||||
</Button>
|
||||
</Flex>
|
||||
</Modal>
|
||||
)
|
||||
}
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
import { MenuOutlined } from '@ant-design/icons'
|
||||
import { ExportOutlined, MenuOutlined } from '@ant-design/icons'
|
||||
import { DraggableList } from '@renderer/components/DraggableList'
|
||||
import { DeleteIcon } from '@renderer/components/Icons'
|
||||
import { Box, HStack } from '@renderer/components/Layout'
|
||||
@ -10,13 +10,13 @@ import { useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import styled from 'styled-components'
|
||||
|
||||
type Mode = 'sort' | 'delete'
|
||||
type Mode = 'sort' | 'manage'
|
||||
|
||||
const PopupContainer: React.FC = () => {
|
||||
const [open, setOpen] = useState(true)
|
||||
const { t } = useTranslation()
|
||||
const { presets, setAssistantPresets } = useAssistantPresets()
|
||||
const [mode, setMode] = useState<Mode>(() => (presets.length > 50 ? 'delete' : 'sort'))
|
||||
const [mode, setMode] = useState<Mode>('manage')
|
||||
const [selectedIds, setSelectedIds] = useState<Set<string>>(new Set())
|
||||
|
||||
const onCancel = () => {
|
||||
@ -88,6 +88,23 @@ const PopupContainer: React.FC = () => {
|
||||
})
|
||||
}
|
||||
|
||||
const handleBatchExport = async () => {
|
||||
if (selectedIds.size === 0) return
|
||||
|
||||
const selectedPresets = presets.filter((p) => selectedIds.has(p.id))
|
||||
const exportData = selectedPresets.map((p) => ({
|
||||
name: p.name,
|
||||
emoji: p.emoji,
|
||||
prompt: p.prompt,
|
||||
description: p.description,
|
||||
group: p.group
|
||||
}))
|
||||
|
||||
const fileName = selectedIds.size === 1 ? `${selectedPresets[0].name}.json` : `assistants_${selectedIds.size}.json`
|
||||
|
||||
await window.api.file.save(fileName, JSON.stringify(exportData, null, 2))
|
||||
}
|
||||
|
||||
const isAllSelected = presets.length > 0 && selectedIds.size === presets.length
|
||||
const isIndeterminate = selectedIds.size > 0 && selectedIds.size < presets.length
|
||||
|
||||
@ -98,13 +115,14 @@ const PopupContainer: React.FC = () => {
|
||||
onCancel={onCancel}
|
||||
afterClose={onClose}
|
||||
footer={null}
|
||||
width={600}
|
||||
transitionName="animation-move-down"
|
||||
centered>
|
||||
<Container>
|
||||
{presets.length > 0 && (
|
||||
<>
|
||||
<ActionBar>
|
||||
{mode === 'delete' ? (
|
||||
{mode === 'manage' ? (
|
||||
<HStack alignItems="center">
|
||||
<Checkbox checked={isAllSelected} indeterminate={isIndeterminate} onChange={handleSelectAll}>
|
||||
{t('common.select_all')}
|
||||
@ -119,15 +137,24 @@ const PopupContainer: React.FC = () => {
|
||||
<div />
|
||||
)}
|
||||
<HStack gap="8px" alignItems="center">
|
||||
{mode === 'delete' && (
|
||||
<Button
|
||||
danger
|
||||
type="text"
|
||||
icon={<DeleteIcon size={14} />}
|
||||
disabled={selectedIds.size === 0}
|
||||
onClick={handleBatchDelete}>
|
||||
{t('assistants.presets.manage.batch_delete.button')} ({selectedIds.size})
|
||||
</Button>
|
||||
{mode === 'manage' && (
|
||||
<>
|
||||
<Button
|
||||
type="text"
|
||||
icon={<ExportOutlined />}
|
||||
disabled={selectedIds.size === 0}
|
||||
onClick={handleBatchExport}>
|
||||
{t('assistants.presets.manage.batch_export.button')} ({selectedIds.size})
|
||||
</Button>
|
||||
<Button
|
||||
danger
|
||||
type="text"
|
||||
icon={<DeleteIcon size={14} />}
|
||||
disabled={selectedIds.size === 0}
|
||||
onClick={handleBatchDelete}>
|
||||
{t('assistants.presets.manage.batch_delete.button')} ({selectedIds.size})
|
||||
</Button>
|
||||
</>
|
||||
)}
|
||||
<Segmented
|
||||
size="small"
|
||||
@ -135,7 +162,7 @@ const PopupContainer: React.FC = () => {
|
||||
onChange={(value) => handleModeChange(value as Mode)}
|
||||
options={[
|
||||
{ label: t('assistants.presets.manage.mode.sort'), value: 'sort' },
|
||||
{ label: t('assistants.presets.manage.mode.delete'), value: 'delete' }
|
||||
{ label: t('assistants.presets.manage.mode.manage'), value: 'manage' }
|
||||
]}
|
||||
/>
|
||||
</HStack>
|
||||
|
||||
@ -34,6 +34,10 @@ import {
|
||||
getProviderByModel,
|
||||
getQuickModel
|
||||
} from './AssistantService'
|
||||
import { ConversationService } from './ConversationService'
|
||||
import { injectUserMessageWithKnowledgeSearchPrompt } from './KnowledgeService'
|
||||
import type { BlockManager } from './messageStreaming'
|
||||
import type { StreamProcessorCallbacks } from './StreamProcessingService'
|
||||
// import { processKnowledgeSearch } from './KnowledgeService'
|
||||
// import {
|
||||
// filterContextMessages,
|
||||
@ -79,6 +83,59 @@ export async function fetchMcpTools(assistant: Assistant) {
|
||||
return mcpTools
|
||||
}
|
||||
|
||||
/**
|
||||
* 将用户消息转换为LLM可以理解的格式并发送请求
|
||||
* @param request - 包含消息内容和助手信息的请求对象
|
||||
* @param onChunkReceived - 接收流式响应数据的回调函数
|
||||
*/
|
||||
// 目前先按照函数来写,后续如果有需要到class的地方就改回来
|
||||
export async function transformMessagesAndFetch(
|
||||
request: {
|
||||
messages: Message[]
|
||||
assistant: Assistant
|
||||
blockManager: BlockManager
|
||||
assistantMsgId: string
|
||||
callbacks: StreamProcessorCallbacks
|
||||
topicId?: string // 添加 topicId 用于 trace
|
||||
options: {
|
||||
signal?: AbortSignal
|
||||
timeout?: number
|
||||
headers?: Record<string, string>
|
||||
}
|
||||
},
|
||||
onChunkReceived: (chunk: Chunk) => void
|
||||
) {
|
||||
const { messages, assistant } = request
|
||||
|
||||
try {
|
||||
const { modelMessages, uiMessages } = await ConversationService.prepareMessagesForModel(messages, assistant)
|
||||
|
||||
// replace prompt variables
|
||||
assistant.prompt = await replacePromptVariables(assistant.prompt, assistant.model?.name)
|
||||
|
||||
// inject knowledge search prompt into model messages
|
||||
await injectUserMessageWithKnowledgeSearchPrompt({
|
||||
modelMessages,
|
||||
assistant,
|
||||
assistantMsgId: request.assistantMsgId,
|
||||
topicId: request.topicId,
|
||||
blockManager: request.blockManager,
|
||||
setCitationBlockId: request.callbacks.setCitationBlockId!
|
||||
})
|
||||
|
||||
await fetchChatCompletion({
|
||||
messages: modelMessages,
|
||||
assistant: assistant,
|
||||
topicId: request.topicId,
|
||||
requestOptions: request.options,
|
||||
uiMessages,
|
||||
onChunkReceived
|
||||
})
|
||||
} catch (error: any) {
|
||||
onChunkReceived({ type: ChunkType.ERROR, error })
|
||||
}
|
||||
}
|
||||
|
||||
export async function fetchChatCompletion({
|
||||
messages,
|
||||
prompt,
|
||||
|
||||
@ -38,7 +38,8 @@ export const DEFAULT_ASSISTANT_SETTINGS = {
|
||||
enableTopP: false,
|
||||
// It would gracefully fallback to prompt if not supported by model.
|
||||
toolUseMode: 'function',
|
||||
customParameters: []
|
||||
customParameters: [],
|
||||
reasoning_effort: 'default'
|
||||
} as const satisfies AssistantSettings
|
||||
|
||||
export function getDefaultAssistant(): Assistant {
|
||||
@ -186,7 +187,7 @@ export const getAssistantSettings = (assistant: Assistant): AssistantSettings =>
|
||||
streamOutput: assistant?.settings?.streamOutput ?? true,
|
||||
toolUseMode: assistant?.settings?.toolUseMode ?? 'function',
|
||||
defaultModel: assistant?.defaultModel ?? undefined,
|
||||
reasoning_effort: assistant?.settings?.reasoning_effort ?? undefined,
|
||||
reasoning_effort: assistant?.settings?.reasoning_effort ?? 'default',
|
||||
customParameters: assistant?.settings?.customParameters ?? []
|
||||
}
|
||||
}
|
||||
|
||||
@ -2,10 +2,13 @@ import { loggerService } from '@logger'
|
||||
import type { Span } from '@opentelemetry/api'
|
||||
import { ModernAiProvider } from '@renderer/aiCore'
|
||||
import AiProvider from '@renderer/aiCore/legacy'
|
||||
import { getMessageContent } from '@renderer/aiCore/plugins/searchOrchestrationPlugin'
|
||||
import { DEFAULT_KNOWLEDGE_DOCUMENT_COUNT, DEFAULT_KNOWLEDGE_THRESHOLD } from '@renderer/config/constant'
|
||||
import { getEmbeddingMaxContext } from '@renderer/config/embedings'
|
||||
import { REFERENCE_PROMPT } from '@renderer/config/prompts'
|
||||
import { addSpan, endSpan } from '@renderer/services/SpanManagerService'
|
||||
import store from '@renderer/store'
|
||||
import type { Assistant } from '@renderer/types'
|
||||
import {
|
||||
type FileMetadata,
|
||||
type KnowledgeBase,
|
||||
@ -16,13 +19,17 @@ import {
|
||||
} from '@renderer/types'
|
||||
import type { Chunk } from '@renderer/types/chunk'
|
||||
import { ChunkType } from '@renderer/types/chunk'
|
||||
import { MessageBlockStatus, MessageBlockType } from '@renderer/types/newMessage'
|
||||
import { routeToEndpoint } from '@renderer/utils'
|
||||
import type { ExtractResults } from '@renderer/utils/extract'
|
||||
import { createCitationBlock } from '@renderer/utils/messageUtils/create'
|
||||
import { isAzureOpenAIProvider, isGeminiProvider } from '@renderer/utils/provider'
|
||||
import type { ModelMessage, UserModelMessage } from 'ai'
|
||||
import { isEmpty } from 'lodash'
|
||||
|
||||
import { getProviderByModel } from './AssistantService'
|
||||
import FileManager from './FileManager'
|
||||
import type { BlockManager } from './messageStreaming'
|
||||
|
||||
const logger = loggerService.withContext('RendererKnowledgeService')
|
||||
|
||||
@ -338,3 +345,128 @@ export function processKnowledgeReferences(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const injectUserMessageWithKnowledgeSearchPrompt = async ({
|
||||
modelMessages,
|
||||
assistant,
|
||||
assistantMsgId,
|
||||
topicId,
|
||||
blockManager,
|
||||
setCitationBlockId
|
||||
}: {
|
||||
modelMessages: ModelMessage[]
|
||||
assistant: Assistant
|
||||
assistantMsgId: string
|
||||
topicId?: string
|
||||
blockManager: BlockManager
|
||||
setCitationBlockId: (blockId: string) => void
|
||||
}) => {
|
||||
if (assistant.knowledge_bases?.length && modelMessages.length > 0) {
|
||||
const lastUserMessage = modelMessages[modelMessages.length - 1]
|
||||
const isUserMessage = lastUserMessage.role === 'user'
|
||||
|
||||
if (!isUserMessage) {
|
||||
return
|
||||
}
|
||||
|
||||
const knowledgeReferences = await getKnowledgeReferences({
|
||||
assistant,
|
||||
lastUserMessage,
|
||||
topicId: topicId
|
||||
})
|
||||
|
||||
if (knowledgeReferences.length === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
await createKnowledgeReferencesBlock({
|
||||
assistantMsgId,
|
||||
knowledgeReferences,
|
||||
blockManager,
|
||||
setCitationBlockId
|
||||
})
|
||||
|
||||
const question = getMessageContent(lastUserMessage) || ''
|
||||
const references = JSON.stringify(knowledgeReferences, null, 2)
|
||||
|
||||
const knowledgeSearchPrompt = REFERENCE_PROMPT.replace('{question}', question).replace('{references}', references)
|
||||
|
||||
if (typeof lastUserMessage.content === 'string') {
|
||||
lastUserMessage.content = knowledgeSearchPrompt
|
||||
} else if (Array.isArray(lastUserMessage.content)) {
|
||||
const textPart = lastUserMessage.content.find((part) => part.type === 'text')
|
||||
if (textPart) {
|
||||
textPart.text = knowledgeSearchPrompt
|
||||
} else {
|
||||
lastUserMessage.content.push({
|
||||
type: 'text',
|
||||
text: knowledgeSearchPrompt
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const getKnowledgeReferences = async ({
|
||||
assistant,
|
||||
lastUserMessage,
|
||||
topicId
|
||||
}: {
|
||||
assistant: Assistant
|
||||
lastUserMessage: UserModelMessage
|
||||
topicId?: string
|
||||
}) => {
|
||||
// 如果助手没有知识库,返回空字符串
|
||||
if (!assistant || isEmpty(assistant.knowledge_bases)) {
|
||||
return []
|
||||
}
|
||||
|
||||
// 获取知识库ID
|
||||
const knowledgeBaseIds = assistant.knowledge_bases?.map((base) => base.id)
|
||||
|
||||
// 获取用户消息内容
|
||||
const question = getMessageContent(lastUserMessage) || ''
|
||||
|
||||
// 获取知识库引用
|
||||
const knowledgeReferences = await processKnowledgeSearch(
|
||||
{
|
||||
knowledge: {
|
||||
question: [question],
|
||||
rewrite: ''
|
||||
}
|
||||
},
|
||||
knowledgeBaseIds,
|
||||
topicId!
|
||||
)
|
||||
|
||||
// 返回提示词
|
||||
return knowledgeReferences
|
||||
}
|
||||
|
||||
export const createKnowledgeReferencesBlock = async ({
|
||||
assistantMsgId,
|
||||
knowledgeReferences,
|
||||
blockManager,
|
||||
setCitationBlockId
|
||||
}: {
|
||||
assistantMsgId: string
|
||||
knowledgeReferences: KnowledgeReference[]
|
||||
blockManager: BlockManager
|
||||
setCitationBlockId: (blockId: string) => void
|
||||
}) => {
|
||||
// 创建引用块
|
||||
const citationBlock = createCitationBlock(
|
||||
assistantMsgId,
|
||||
{ knowledge: knowledgeReferences },
|
||||
{ status: MessageBlockStatus.SUCCESS }
|
||||
)
|
||||
|
||||
// 处理引用块
|
||||
blockManager.handleBlockTransition(citationBlock, MessageBlockType.CITATION)
|
||||
|
||||
// 设置引用块ID
|
||||
setCitationBlockId(citationBlock.id)
|
||||
|
||||
// 返回引用块
|
||||
return citationBlock
|
||||
}
|
||||
|
||||
@ -1,91 +0,0 @@
|
||||
import type { Assistant, Message } from '@renderer/types'
|
||||
import type { Chunk } from '@renderer/types/chunk'
|
||||
import { ChunkType } from '@renderer/types/chunk'
|
||||
import { replacePromptVariables } from '@renderer/utils/prompt'
|
||||
|
||||
import { fetchChatCompletion } from './ApiService'
|
||||
import { ConversationService } from './ConversationService'
|
||||
|
||||
/**
|
||||
* The request object for handling a user message.
|
||||
*/
|
||||
export interface OrchestrationRequest {
|
||||
messages: Message[]
|
||||
assistant: Assistant
|
||||
options: {
|
||||
signal?: AbortSignal
|
||||
timeout?: number
|
||||
headers?: Record<string, string>
|
||||
}
|
||||
topicId?: string // 添加 topicId 用于 trace
|
||||
}
|
||||
|
||||
/**
|
||||
* The OrchestrationService is responsible for orchestrating the different services
|
||||
* to handle a user's message. It contains the core logic of the application.
|
||||
*/
|
||||
// NOTE:暂时没有用到这个类
|
||||
export class OrchestrationService {
|
||||
constructor() {
|
||||
// In the future, this could be a singleton, but for now, a new instance is fine.
|
||||
// this.conversationService = new ConversationService()
|
||||
}
|
||||
|
||||
/**
|
||||
* This is the core method to handle user messages.
|
||||
* It takes the message context and an events object for callbacks,
|
||||
* and orchestrates the call to the LLM.
|
||||
* The logic is moved from `messageThunk.ts`.
|
||||
* @param request The orchestration request containing messages and assistant info.
|
||||
* @param events A set of callbacks to report progress and results to the UI layer.
|
||||
*/
|
||||
async transformMessagesAndFetch(request: OrchestrationRequest, onChunkReceived: (chunk: Chunk) => void) {
|
||||
const { messages, assistant } = request
|
||||
|
||||
try {
|
||||
const { modelMessages, uiMessages } = await ConversationService.prepareMessagesForModel(messages, assistant)
|
||||
|
||||
await fetchChatCompletion({
|
||||
messages: modelMessages,
|
||||
assistant: assistant,
|
||||
requestOptions: request.options,
|
||||
onChunkReceived,
|
||||
topicId: request.topicId,
|
||||
uiMessages: uiMessages
|
||||
})
|
||||
} catch (error: any) {
|
||||
onChunkReceived({ type: ChunkType.ERROR, error })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 将用户消息转换为LLM可以理解的格式并发送请求
|
||||
* @param request - 包含消息内容和助手信息的请求对象
|
||||
* @param onChunkReceived - 接收流式响应数据的回调函数
|
||||
*/
|
||||
// 目前先按照函数来写,后续如果有需要到class的地方就改回来
|
||||
export async function transformMessagesAndFetch(
|
||||
request: OrchestrationRequest,
|
||||
onChunkReceived: (chunk: Chunk) => void
|
||||
) {
|
||||
const { messages, assistant } = request
|
||||
|
||||
try {
|
||||
const { modelMessages, uiMessages } = await ConversationService.prepareMessagesForModel(messages, assistant)
|
||||
|
||||
// replace prompt variables
|
||||
assistant.prompt = await replacePromptVariables(assistant.prompt, assistant.model?.name)
|
||||
|
||||
await fetchChatCompletion({
|
||||
messages: modelMessages,
|
||||
assistant: assistant,
|
||||
requestOptions: request.options,
|
||||
onChunkReceived,
|
||||
topicId: request.topicId,
|
||||
uiMessages
|
||||
})
|
||||
} catch (error: any) {
|
||||
onChunkReceived({ type: ChunkType.ERROR, error })
|
||||
}
|
||||
}
|
||||
@ -34,6 +34,10 @@ export interface StreamProcessorCallbacks {
|
||||
onLLMWebSearchInProgress?: () => void
|
||||
// LLM Web search complete
|
||||
onLLMWebSearchComplete?: (llmWebSearchResult: WebSearchResponse) => void
|
||||
// Get citation block ID
|
||||
getCitationBlockId?: () => string | null
|
||||
// Set citation block ID
|
||||
setCitationBlockId?: (blockId: string) => void
|
||||
// Image generation chunk received
|
||||
onImageCreated?: () => void
|
||||
onImageDelta?: (imageData: GenerateImageResponse) => void
|
||||
|
||||
@ -121,6 +121,11 @@ export const createCitationCallbacks = (deps: CitationCallbacksDependencies) =>
|
||||
},
|
||||
|
||||
// 暴露给外部的方法,用于textCallbacks中获取citationBlockId
|
||||
getCitationBlockId: () => citationBlockId
|
||||
getCitationBlockId: () => citationBlockId,
|
||||
|
||||
// 暴露给外部的方法,用于 KnowledgeService 中设置 citationBlockId
|
||||
setCitationBlockId: (blockId: string) => {
|
||||
citationBlockId = blockId
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -67,7 +67,7 @@ const persistedReducer = persistReducer(
|
||||
{
|
||||
key: 'cherry-studio',
|
||||
storage,
|
||||
version: 186,
|
||||
version: 187,
|
||||
blacklist: ['runtime', 'messages', 'messageBlocks', 'tabs', 'toolPermissions'],
|
||||
migrate
|
||||
},
|
||||
|
||||
@ -183,6 +183,16 @@ export const builtinMCPServers: BuiltinMCPServer[] = [
|
||||
provider: 'CherryAI',
|
||||
installSource: 'builtin',
|
||||
isTrusted: true
|
||||
},
|
||||
{
|
||||
id: nanoid(),
|
||||
name: BuiltinMCPServerNames.nowledgeMem,
|
||||
reference: 'https://mem.nowledge.co/',
|
||||
type: 'inMemory',
|
||||
isActive: false,
|
||||
provider: 'Nowledge',
|
||||
installSource: 'builtin',
|
||||
isTrusted: true
|
||||
}
|
||||
] as const
|
||||
|
||||
|
||||
@ -3032,13 +3032,27 @@ const migrateConfig = {
|
||||
provider.type = 'ollama'
|
||||
}
|
||||
})
|
||||
addProvider(state, 'mimo')
|
||||
logger.info('migrate 186 success')
|
||||
return state
|
||||
} catch (error) {
|
||||
logger.error('migrate 186 error', error as Error)
|
||||
return state
|
||||
}
|
||||
},
|
||||
'187': (state: RootState) => {
|
||||
try {
|
||||
state.assistants.assistants.forEach((assistant) => {
|
||||
if (assistant.settings && assistant.settings.reasoning_effort === undefined) {
|
||||
assistant.settings.reasoning_effort = 'default'
|
||||
}
|
||||
})
|
||||
addProvider(state, 'mimo')
|
||||
logger.info('migrate 187 success')
|
||||
return state
|
||||
} catch (error) {
|
||||
logger.error('migrate 187 error', error as Error)
|
||||
return state
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -2,12 +2,11 @@ import { loggerService } from '@logger'
|
||||
import { AiSdkToChunkAdapter } from '@renderer/aiCore/chunk/AiSdkToChunkAdapter'
|
||||
import { AgentApiClient } from '@renderer/api/agent'
|
||||
import db from '@renderer/databases'
|
||||
import { fetchMessagesSummary } from '@renderer/services/ApiService'
|
||||
import { fetchMessagesSummary, transformMessagesAndFetch } from '@renderer/services/ApiService'
|
||||
import { DbService } from '@renderer/services/db/DbService'
|
||||
import FileManager from '@renderer/services/FileManager'
|
||||
import { BlockManager } from '@renderer/services/messageStreaming/BlockManager'
|
||||
import { createCallbacks } from '@renderer/services/messageStreaming/callbacks'
|
||||
import { transformMessagesAndFetch } from '@renderer/services/OrchestrateService'
|
||||
import { endSpan } from '@renderer/services/SpanManagerService'
|
||||
import { createStreamProcessor, type StreamProcessorCallbacks } from '@renderer/services/StreamProcessingService'
|
||||
import store from '@renderer/store'
|
||||
@ -814,6 +813,9 @@ const fetchAndProcessAssistantResponseImpl = async (
|
||||
messages: messagesForContext,
|
||||
assistant,
|
||||
topicId,
|
||||
blockManager,
|
||||
assistantMsgId,
|
||||
callbacks,
|
||||
options: {
|
||||
signal: abortController.signal,
|
||||
timeout: 30000,
|
||||
|
||||
@ -109,7 +109,7 @@ const ThinkModelTypes = [
|
||||
'deepseek_hybrid'
|
||||
] as const
|
||||
|
||||
export type ReasoningEffortOption = NonNullable<OpenAI.ReasoningEffort> | 'auto'
|
||||
export type ReasoningEffortOption = NonNullable<OpenAI.ReasoningEffort> | 'auto' | 'default'
|
||||
export type ThinkingOption = ReasoningEffortOption
|
||||
export type ThinkingModelType = (typeof ThinkModelTypes)[number]
|
||||
export type ThinkingOptionConfig = Record<ThinkingModelType, ThinkingOption[]>
|
||||
@ -121,6 +121,8 @@ export function isThinkModelType(type: string): type is ThinkingModelType {
|
||||
}
|
||||
|
||||
export const EFFORT_RATIO: EffortRatio = {
|
||||
// 'default' is not expected to be used.
|
||||
default: 0,
|
||||
none: 0.01,
|
||||
minimal: 0.05,
|
||||
low: 0.05,
|
||||
@ -141,12 +143,11 @@ export type AssistantSettings = {
|
||||
streamOutput: boolean
|
||||
defaultModel?: Model
|
||||
customParameters?: AssistantSettingCustomParameters[]
|
||||
reasoning_effort?: ReasoningEffortOption
|
||||
/** 保留上一次使用思考模型时的 reasoning effort, 在从非思考模型切换到思考模型时恢复.
|
||||
*
|
||||
* TODO: 目前 reasoning_effort === undefined 有两个语义,有的场景是显式关闭思考,有的场景是不传参。
|
||||
* 未来应该重构思考控制,将启用/关闭思考和思考选项分离,这样就不用依赖 cache 了。
|
||||
*
|
||||
reasoning_effort: ReasoningEffortOption
|
||||
/**
|
||||
* Preserve the effective reasoning effort (not 'default') from the last use of a thinking model which supports thinking control,
|
||||
* and restore it when switching back from a non-thinking or fixed reasoning model.
|
||||
* FIXME: It should be managed by external cache service instead of being stored in the assistant
|
||||
*/
|
||||
reasoning_effort_cache?: ReasoningEffortOption
|
||||
qwenThinkMode?: boolean
|
||||
@ -751,7 +752,8 @@ export const BuiltinMCPServerNames = {
|
||||
difyKnowledge: '@cherry/dify-knowledge',
|
||||
python: '@cherry/python',
|
||||
didiMCP: '@cherry/didi-mcp',
|
||||
browser: '@cherry/browser'
|
||||
browser: '@cherry/browser',
|
||||
nowledgeMem: '@cherry/nowledge-mem'
|
||||
} as const
|
||||
|
||||
export type BuiltinMCPServerName = (typeof BuiltinMCPServerNames)[keyof typeof BuiltinMCPServerNames]
|
||||
|
||||
20
yarn.lock
20
yarn.lock
@ -11246,7 +11246,7 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"buffer-equal-constant-time@npm:1.0.1":
|
||||
"buffer-equal-constant-time@npm:^1.0.1":
|
||||
version: 1.0.1
|
||||
resolution: "buffer-equal-constant-time@npm:1.0.1"
|
||||
checksum: 10c0/fb2294e64d23c573d0dd1f1e7a466c3e978fe94a4e0f8183937912ca374619773bef8e2aceb854129d2efecbbc515bbd0cc78d2734a3e3031edb0888531bbc8e
|
||||
@ -17178,24 +17178,24 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"jwa@npm:^2.0.0":
|
||||
version: 2.0.0
|
||||
resolution: "jwa@npm:2.0.0"
|
||||
"jwa@npm:^2.0.1":
|
||||
version: 2.0.1
|
||||
resolution: "jwa@npm:2.0.1"
|
||||
dependencies:
|
||||
buffer-equal-constant-time: "npm:1.0.1"
|
||||
buffer-equal-constant-time: "npm:^1.0.1"
|
||||
ecdsa-sig-formatter: "npm:1.0.11"
|
||||
safe-buffer: "npm:^5.0.1"
|
||||
checksum: 10c0/6baab823b93c038ba1d2a9e531984dcadbc04e9eb98d171f4901b7a40d2be15961a359335de1671d78cb6d987f07cbe5d350d8143255977a889160c4d90fcc3c
|
||||
checksum: 10c0/ab3ebc6598e10dc11419d4ed675c9ca714a387481466b10e8a6f3f65d8d9c9237e2826f2505280a739cf4cbcf511cb288eeec22b5c9c63286fc5a2e4f97e78cf
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"jws@npm:^4.0.0":
|
||||
version: 4.0.0
|
||||
resolution: "jws@npm:4.0.0"
|
||||
version: 4.0.1
|
||||
resolution: "jws@npm:4.0.1"
|
||||
dependencies:
|
||||
jwa: "npm:^2.0.0"
|
||||
jwa: "npm:^2.0.1"
|
||||
safe-buffer: "npm:^5.0.1"
|
||||
checksum: 10c0/f1ca77ea5451e8dc5ee219cb7053b8a4f1254a79cb22417a2e1043c1eb8a569ae118c68f24d72a589e8a3dd1824697f47d6bd4fb4bebb93a3bdf53545e721661
|
||||
checksum: 10c0/6be1ed93023aef570ccc5ea8d162b065840f3ef12f0d1bb3114cade844de7a357d5dc558201d9a65101e70885a6fa56b17462f520e6b0d426195510618a154d0
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user