mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2026-01-03 11:19:10 +08:00
Merge branch 'main' of github.com:CherryHQ/cherry-studio into v2
This commit is contained in:
commit
799db1f8d1
129
docs/en/references/fuzzy-search.md
Normal file
129
docs/en/references/fuzzy-search.md
Normal file
@ -0,0 +1,129 @@
|
||||
# Fuzzy Search for File List
|
||||
|
||||
This document describes the fuzzy search implementation for file listing in Cherry Studio.
|
||||
|
||||
## Overview
|
||||
|
||||
The fuzzy search feature allows users to find files by typing partial or approximate file names/paths. It uses a two-tier file filtering strategy (ripgrep glob pre-filtering with greedy substring fallback) combined with subsequence-based scoring for optimal performance and flexibility.
|
||||
|
||||
## Features
|
||||
|
||||
- **Ripgrep Glob Pre-filtering**: Primary filtering using glob patterns for fast native-level filtering
|
||||
- **Greedy Substring Matching**: Fallback file filtering strategy when ripgrep glob pre-filtering returns no results
|
||||
- **Subsequence-based Segment Scoring**: During scoring, path segments gain additional weight when query characters appear in order
|
||||
- **Relevance Scoring**: Results are sorted by a relevance score derived from multiple factors
|
||||
|
||||
## Matching Strategies
|
||||
|
||||
### 1. Ripgrep Glob Pre-filtering (Primary)
|
||||
|
||||
The query is converted to a glob pattern for ripgrep to do initial filtering:
|
||||
|
||||
```
|
||||
Query: "updater"
|
||||
Glob: "*u*p*d*a*t*e*r*"
|
||||
```
|
||||
|
||||
This leverages ripgrep's native performance for the initial file filtering.
|
||||
|
||||
### 2. Greedy Substring Matching (Fallback)
|
||||
|
||||
When the glob pre-filter returns no results, the system falls back to greedy substring matching. This allows more flexible matching:
|
||||
|
||||
```
|
||||
Query: "updatercontroller"
|
||||
File: "packages/update/src/node/updateController.ts"
|
||||
|
||||
Matching process:
|
||||
1. Find "update" (longest match from start)
|
||||
2. Remaining "rcontroller" → find "r" then "controller"
|
||||
3. All parts matched → Success
|
||||
```
|
||||
|
||||
## Scoring Algorithm
|
||||
|
||||
Results are ranked by a relevance score based on named constants defined in `FileStorage.ts`:
|
||||
|
||||
| Constant | Value | Description |
|
||||
|----------|-------|-------------|
|
||||
| `SCORE_FILENAME_STARTS` | 100 | Filename starts with query (highest priority) |
|
||||
| `SCORE_FILENAME_CONTAINS` | 80 | Filename contains exact query substring |
|
||||
| `SCORE_SEGMENT_MATCH` | 60 | Per path segment that matches query |
|
||||
| `SCORE_WORD_BOUNDARY` | 20 | Query matches start of a word |
|
||||
| `SCORE_CONSECUTIVE_CHAR` | 15 | Per consecutive character match |
|
||||
| `PATH_LENGTH_PENALTY_FACTOR` | 4 | Logarithmic penalty for longer paths |
|
||||
|
||||
### Scoring Strategy
|
||||
|
||||
The scoring prioritizes:
|
||||
1. **Filename matches** (highest): Files where the query appears in the filename are most relevant
|
||||
2. **Path segment matches**: Multiple matching segments indicate stronger relevance
|
||||
3. **Word boundaries**: Matching at word starts (e.g., "upd" matching "update") is preferred
|
||||
4. **Consecutive matches**: Longer consecutive character sequences score higher
|
||||
5. **Path length**: Shorter paths are preferred (logarithmic penalty prevents long paths from dominating)
|
||||
|
||||
### Example Scoring
|
||||
|
||||
For query `updater`:
|
||||
|
||||
| File | Score Factors |
|
||||
|------|---------------|
|
||||
| `RCUpdater.js` | Short path + filename contains "updater" |
|
||||
| `updateController.ts` | Multiple segment matches |
|
||||
| `UpdaterHelper.plist` | Long path penalty |
|
||||
|
||||
## Configuration
|
||||
|
||||
### DirectoryListOptions
|
||||
|
||||
```typescript
|
||||
interface DirectoryListOptions {
|
||||
recursive?: boolean // Default: true
|
||||
maxDepth?: number // Default: 10
|
||||
includeHidden?: boolean // Default: false
|
||||
includeFiles?: boolean // Default: true
|
||||
includeDirectories?: boolean // Default: true
|
||||
maxEntries?: number // Default: 20
|
||||
searchPattern?: string // Default: '.'
|
||||
fuzzy?: boolean // Default: true
|
||||
}
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```typescript
|
||||
// Basic fuzzy search
|
||||
const files = await window.api.file.listDirectory(dirPath, {
|
||||
searchPattern: 'updater',
|
||||
fuzzy: true,
|
||||
maxEntries: 20
|
||||
})
|
||||
|
||||
// Disable fuzzy search (exact glob matching)
|
||||
const files = await window.api.file.listDirectory(dirPath, {
|
||||
searchPattern: 'update',
|
||||
fuzzy: false
|
||||
})
|
||||
```
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
1. **Ripgrep Pre-filtering**: Most queries are handled by ripgrep's native glob matching, which is extremely fast
|
||||
2. **Fallback Only When Needed**: Greedy substring matching (which loads all files) only runs when glob matching returns empty results
|
||||
3. **Result Limiting**: Only top 20 results are returned by default
|
||||
4. **Excluded Directories**: Common large directories are automatically excluded:
|
||||
- `node_modules`
|
||||
- `.git`
|
||||
- `dist`, `build`
|
||||
- `.next`, `.nuxt`
|
||||
- `coverage`, `.cache`
|
||||
|
||||
## Implementation Details
|
||||
|
||||
The implementation is located in `src/main/services/FileStorage.ts`:
|
||||
|
||||
- `queryToGlobPattern()`: Converts query to ripgrep glob pattern
|
||||
- `isFuzzyMatch()`: Subsequence matching algorithm
|
||||
- `isGreedySubstringMatch()`: Greedy substring matching fallback
|
||||
- `getFuzzyMatchScore()`: Calculates relevance score
|
||||
- `listDirectoryWithRipgrep()`: Main search orchestration
|
||||
129
docs/zh/references/fuzzy-search.md
Normal file
129
docs/zh/references/fuzzy-search.md
Normal file
@ -0,0 +1,129 @@
|
||||
# 文件列表模糊搜索
|
||||
|
||||
本文档描述了 Cherry Studio 中文件列表的模糊搜索实现。
|
||||
|
||||
## 概述
|
||||
|
||||
模糊搜索功能允许用户通过输入部分或近似的文件名/路径来查找文件。它使用两层文件过滤策略(ripgrep glob 预过滤 + 贪婪子串匹配回退),结合基于子序列的评分,以获得最佳性能和灵活性。
|
||||
|
||||
## 功能特性
|
||||
|
||||
- **Ripgrep Glob 预过滤**:使用 glob 模式进行快速原生级过滤的主要过滤策略
|
||||
- **贪婪子串匹配**:当 ripgrep glob 预过滤无结果时的回退文件过滤策略
|
||||
- **基于子序列的段评分**:评分时,当查询字符按顺序出现时,路径段获得额外权重
|
||||
- **相关性评分**:结果按多因素相关性分数排序
|
||||
|
||||
## 匹配策略
|
||||
|
||||
### 1. Ripgrep Glob 预过滤(主要)
|
||||
|
||||
查询被转换为 glob 模式供 ripgrep 进行初始过滤:
|
||||
|
||||
```
|
||||
查询: "updater"
|
||||
Glob: "*u*p*d*a*t*e*r*"
|
||||
```
|
||||
|
||||
这利用了 ripgrep 的原生性能进行初始文件过滤。
|
||||
|
||||
### 2. 贪婪子串匹配(回退)
|
||||
|
||||
当 glob 预过滤无结果时,系统回退到贪婪子串匹配。这允许更灵活的匹配:
|
||||
|
||||
```
|
||||
查询: "updatercontroller"
|
||||
文件: "packages/update/src/node/updateController.ts"
|
||||
|
||||
匹配过程:
|
||||
1. 找到 "update"(从开头的最长匹配)
|
||||
2. 剩余 "rcontroller" → 找到 "r" 然后 "controller"
|
||||
3. 所有部分都匹配 → 成功
|
||||
```
|
||||
|
||||
## 评分算法
|
||||
|
||||
结果根据 `FileStorage.ts` 中定义的命名常量进行相关性分数排名:
|
||||
|
||||
| 常量 | 值 | 描述 |
|
||||
|------|-----|------|
|
||||
| `SCORE_FILENAME_STARTS` | 100 | 文件名以查询开头(最高优先级)|
|
||||
| `SCORE_FILENAME_CONTAINS` | 80 | 文件名包含精确查询子串 |
|
||||
| `SCORE_SEGMENT_MATCH` | 60 | 每个匹配查询的路径段 |
|
||||
| `SCORE_WORD_BOUNDARY` | 20 | 查询匹配单词开头 |
|
||||
| `SCORE_CONSECUTIVE_CHAR` | 15 | 每个连续字符匹配 |
|
||||
| `PATH_LENGTH_PENALTY_FACTOR` | 4 | 较长路径的对数惩罚 |
|
||||
|
||||
### 评分策略
|
||||
|
||||
评分优先级:
|
||||
1. **文件名匹配**(最高):查询出现在文件名中的文件最相关
|
||||
2. **路径段匹配**:多个匹配段表示更强的相关性
|
||||
3. **词边界**:在单词开头匹配(如 "upd" 匹配 "update")更优先
|
||||
4. **连续匹配**:更长的连续字符序列得分更高
|
||||
5. **路径长度**:较短路径更优先(对数惩罚防止长路径主导评分)
|
||||
|
||||
### 评分示例
|
||||
|
||||
对于查询 `updater`:
|
||||
|
||||
| 文件 | 评分因素 |
|
||||
|------|----------|
|
||||
| `RCUpdater.js` | 短路径 + 文件名包含 "updater" |
|
||||
| `updateController.ts` | 多个路径段匹配 |
|
||||
| `UpdaterHelper.plist` | 长路径惩罚 |
|
||||
|
||||
## 配置
|
||||
|
||||
### DirectoryListOptions
|
||||
|
||||
```typescript
|
||||
interface DirectoryListOptions {
|
||||
recursive?: boolean // 默认: true
|
||||
maxDepth?: number // 默认: 10
|
||||
includeHidden?: boolean // 默认: false
|
||||
includeFiles?: boolean // 默认: true
|
||||
includeDirectories?: boolean // 默认: true
|
||||
maxEntries?: number // 默认: 20
|
||||
searchPattern?: string // 默认: '.'
|
||||
fuzzy?: boolean // 默认: true
|
||||
}
|
||||
```
|
||||
|
||||
## 使用方法
|
||||
|
||||
```typescript
|
||||
// 基本模糊搜索
|
||||
const files = await window.api.file.listDirectory(dirPath, {
|
||||
searchPattern: 'updater',
|
||||
fuzzy: true,
|
||||
maxEntries: 20
|
||||
})
|
||||
|
||||
// 禁用模糊搜索(精确 glob 匹配)
|
||||
const files = await window.api.file.listDirectory(dirPath, {
|
||||
searchPattern: 'update',
|
||||
fuzzy: false
|
||||
})
|
||||
```
|
||||
|
||||
## 性能考虑
|
||||
|
||||
1. **Ripgrep 预过滤**:大多数查询由 ripgrep 的原生 glob 匹配处理,速度极快
|
||||
2. **仅在需要时回退**:贪婪子串匹配(加载所有文件)仅在 glob 匹配返回空结果时运行
|
||||
3. **结果限制**:默认只返回前 20 个结果
|
||||
4. **排除目录**:自动排除常见的大型目录:
|
||||
- `node_modules`
|
||||
- `.git`
|
||||
- `dist`、`build`
|
||||
- `.next`、`.nuxt`
|
||||
- `coverage`、`.cache`
|
||||
|
||||
## 实现细节
|
||||
|
||||
实现位于 `src/main/services/FileStorage.ts`:
|
||||
|
||||
- `queryToGlobPattern()`:将查询转换为 ripgrep glob 模式
|
||||
- `isFuzzyMatch()`:子序列匹配算法
|
||||
- `isGreedySubstringMatch()`:贪婪子串匹配回退
|
||||
- `getFuzzyMatchScore()`:计算相关性分数
|
||||
- `listDirectoryWithRipgrep()`:主搜索协调
|
||||
@ -395,6 +395,7 @@ export enum IpcChannel {
|
||||
OCR_ListProviders = 'ocr:list-providers',
|
||||
|
||||
// OVMS
|
||||
Ovms_IsSupported = 'ovms:is-supported',
|
||||
Ovms_AddModel = 'ovms:add-model',
|
||||
Ovms_StopAddModel = 'ovms:stop-addmodel',
|
||||
Ovms_GetModels = 'ovms:get-models',
|
||||
|
||||
@ -47,7 +47,7 @@ import { dataApiService } from '@data/DataApiService'
|
||||
import { cacheService } from '@data/CacheService'
|
||||
import { initWebviewHotkeys } from './services/WebviewService'
|
||||
import { runAsyncFunction } from './utils'
|
||||
import { ovmsManager } from './services/OvmsManager'
|
||||
import { isOvmsSupported } from './services/OvmsManager'
|
||||
|
||||
const logger = loggerService.withContext('MainEntry')
|
||||
|
||||
@ -259,7 +259,7 @@ if (!app.requestSingleInstanceLock()) {
|
||||
})
|
||||
|
||||
registerShortcuts(mainWindow)
|
||||
registerIpc(mainWindow, app)
|
||||
await registerIpc(mainWindow, app)
|
||||
localTransferService.startDiscovery({ resetList: true })
|
||||
|
||||
replaceDevtoolsFont(mainWindow)
|
||||
@ -349,7 +349,14 @@ if (!app.requestSingleInstanceLock()) {
|
||||
|
||||
app.on('will-quit', async () => {
|
||||
// 简单的资源清理,不阻塞退出流程
|
||||
await ovmsManager.stopOvms()
|
||||
if (isOvmsSupported) {
|
||||
const { ovmsManager } = await import('./services/OvmsManager')
|
||||
if (ovmsManager) {
|
||||
await ovmsManager.stopOvms()
|
||||
} else {
|
||||
logger.warn('Unexpected behavior: undefined ovmsManager, but OVMS should be supported.')
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
await mcpService.cleanup()
|
||||
|
||||
@ -60,7 +60,7 @@ import NotificationService from './services/NotificationService'
|
||||
import * as NutstoreService from './services/NutstoreService'
|
||||
import ObsidianVaultService from './services/ObsidianVaultService'
|
||||
import { ocrService } from './services/ocr/OcrService'
|
||||
import { ovmsManager } from './services/OvmsManager'
|
||||
import { isOvmsSupported } from './services/OvmsManager'
|
||||
import powerMonitorService from './services/PowerMonitorService'
|
||||
import { proxyManager } from './services/ProxyManager'
|
||||
import { pythonService } from './services/PythonService'
|
||||
@ -97,6 +97,7 @@ import {
|
||||
untildify
|
||||
} from './utils/file'
|
||||
import { updateAppDataConfig } from './utils/init'
|
||||
import { getCpuName, getDeviceType, getHostname } from './utils/system'
|
||||
import { compress, decompress } from './utils/zip'
|
||||
|
||||
const logger = loggerService.withContext('IPC')
|
||||
@ -120,7 +121,7 @@ function extractPluginError(error: unknown): PluginError | null {
|
||||
return null
|
||||
}
|
||||
|
||||
export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
export async function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
const appUpdater = new AppUpdater()
|
||||
const notificationService = new NotificationService()
|
||||
|
||||
@ -499,9 +500,9 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
ipcMain.handle(IpcChannel.Zip_Decompress, (_, text: Buffer) => decompress(text))
|
||||
|
||||
// system
|
||||
ipcMain.handle(IpcChannel.System_GetDeviceType, () => (isMac ? 'mac' : isWin ? 'windows' : 'linux'))
|
||||
ipcMain.handle(IpcChannel.System_GetHostname, () => require('os').hostname())
|
||||
ipcMain.handle(IpcChannel.System_GetCpuName, () => require('os').cpus()[0].model)
|
||||
ipcMain.handle(IpcChannel.System_GetDeviceType, getDeviceType)
|
||||
ipcMain.handle(IpcChannel.System_GetHostname, getHostname)
|
||||
ipcMain.handle(IpcChannel.System_GetCpuName, getCpuName)
|
||||
ipcMain.handle(IpcChannel.System_CheckGitBash, () => {
|
||||
if (!isWin) {
|
||||
return true // Non-Windows systems don't need Git Bash
|
||||
@ -975,15 +976,36 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
ipcMain.handle(IpcChannel.OCR_ListProviders, () => ocrService.listProviderIds())
|
||||
|
||||
// OVMS
|
||||
ipcMain.handle(IpcChannel.Ovms_AddModel, (_, modelName: string, modelId: string, modelSource: string, task: string) =>
|
||||
ovmsManager.addModel(modelName, modelId, modelSource, task)
|
||||
)
|
||||
ipcMain.handle(IpcChannel.Ovms_StopAddModel, () => ovmsManager.stopAddModel())
|
||||
ipcMain.handle(IpcChannel.Ovms_GetModels, () => ovmsManager.getModels())
|
||||
ipcMain.handle(IpcChannel.Ovms_IsRunning, () => ovmsManager.initializeOvms())
|
||||
ipcMain.handle(IpcChannel.Ovms_GetStatus, () => ovmsManager.getOvmsStatus())
|
||||
ipcMain.handle(IpcChannel.Ovms_RunOVMS, () => ovmsManager.runOvms())
|
||||
ipcMain.handle(IpcChannel.Ovms_StopOVMS, () => ovmsManager.stopOvms())
|
||||
ipcMain.handle(IpcChannel.Ovms_IsSupported, () => isOvmsSupported)
|
||||
if (isOvmsSupported) {
|
||||
const { ovmsManager } = await import('./services/OvmsManager')
|
||||
if (ovmsManager) {
|
||||
ipcMain.handle(
|
||||
IpcChannel.Ovms_AddModel,
|
||||
(_, modelName: string, modelId: string, modelSource: string, task: string) =>
|
||||
ovmsManager.addModel(modelName, modelId, modelSource, task)
|
||||
)
|
||||
ipcMain.handle(IpcChannel.Ovms_StopAddModel, () => ovmsManager.stopAddModel())
|
||||
ipcMain.handle(IpcChannel.Ovms_GetModels, () => ovmsManager.getModels())
|
||||
ipcMain.handle(IpcChannel.Ovms_IsRunning, () => ovmsManager.initializeOvms())
|
||||
ipcMain.handle(IpcChannel.Ovms_GetStatus, () => ovmsManager.getOvmsStatus())
|
||||
ipcMain.handle(IpcChannel.Ovms_RunOVMS, () => ovmsManager.runOvms())
|
||||
ipcMain.handle(IpcChannel.Ovms_StopOVMS, () => ovmsManager.stopOvms())
|
||||
} else {
|
||||
logger.error('Unexpected behavior: undefined ovmsManager, but OVMS should be supported.')
|
||||
}
|
||||
} else {
|
||||
const fallback = () => {
|
||||
throw new Error('OVMS is only supported on Windows with intel CPU.')
|
||||
}
|
||||
ipcMain.handle(IpcChannel.Ovms_AddModel, fallback)
|
||||
ipcMain.handle(IpcChannel.Ovms_StopAddModel, fallback)
|
||||
ipcMain.handle(IpcChannel.Ovms_GetModels, fallback)
|
||||
ipcMain.handle(IpcChannel.Ovms_IsRunning, fallback)
|
||||
ipcMain.handle(IpcChannel.Ovms_GetStatus, fallback)
|
||||
ipcMain.handle(IpcChannel.Ovms_RunOVMS, fallback)
|
||||
ipcMain.handle(IpcChannel.Ovms_StopOVMS, fallback)
|
||||
}
|
||||
|
||||
// CherryAI
|
||||
ipcMain.handle(IpcChannel.Cherryai_GetSignature, (_, params) => generateSignature(params))
|
||||
|
||||
@ -130,16 +130,18 @@ interface DirectoryListOptions {
|
||||
includeDirectories?: boolean
|
||||
maxEntries?: number
|
||||
searchPattern?: string
|
||||
fuzzy?: boolean
|
||||
}
|
||||
|
||||
const DEFAULT_DIRECTORY_LIST_OPTIONS: Required<DirectoryListOptions> = {
|
||||
recursive: true,
|
||||
maxDepth: 3,
|
||||
maxDepth: 10,
|
||||
includeHidden: false,
|
||||
includeFiles: true,
|
||||
includeDirectories: true,
|
||||
maxEntries: 10,
|
||||
searchPattern: '.'
|
||||
maxEntries: 20,
|
||||
searchPattern: '.',
|
||||
fuzzy: true
|
||||
}
|
||||
|
||||
class FileStorage {
|
||||
@ -1046,10 +1048,226 @@ class FileStorage {
|
||||
}
|
||||
|
||||
/**
|
||||
* Search files by content pattern
|
||||
* Fuzzy match: checks if all characters in query appear in text in order (case-insensitive)
|
||||
* Example: "updater" matches "packages/update/src/node/updateController.ts"
|
||||
*/
|
||||
private async searchByContent(resolvedPath: string, options: Required<DirectoryListOptions>): Promise<string[]> {
|
||||
const args: string[] = ['-l']
|
||||
private isFuzzyMatch(text: string, query: string): boolean {
|
||||
let i = 0 // text index
|
||||
let j = 0 // query index
|
||||
const textLower = text.toLowerCase()
|
||||
const queryLower = query.toLowerCase()
|
||||
|
||||
while (i < textLower.length && j < queryLower.length) {
|
||||
if (textLower[i] === queryLower[j]) {
|
||||
j++
|
||||
}
|
||||
i++
|
||||
}
|
||||
return j === queryLower.length
|
||||
}
|
||||
|
||||
/**
|
||||
* Scoring constants for fuzzy match relevance ranking
|
||||
* Higher values = higher priority in search results
|
||||
*/
|
||||
private static readonly SCORE_SEGMENT_MATCH = 60 // Per path segment that matches query
|
||||
private static readonly SCORE_FILENAME_CONTAINS = 80 // Filename contains exact query substring
|
||||
private static readonly SCORE_FILENAME_STARTS = 100 // Filename starts with query (highest priority)
|
||||
private static readonly SCORE_CONSECUTIVE_CHAR = 15 // Per consecutive character match
|
||||
private static readonly SCORE_WORD_BOUNDARY = 20 // Query matches start of a word
|
||||
private static readonly PATH_LENGTH_PENALTY_FACTOR = 4 // Logarithmic penalty multiplier for longer paths
|
||||
|
||||
/**
|
||||
* Calculate fuzzy match score (higher is better)
|
||||
* Scoring factors:
|
||||
* - Consecutive character matches (bonus)
|
||||
* - Match at word boundaries (bonus)
|
||||
* - Shorter path length (bonus)
|
||||
* - Match in filename vs directory (bonus)
|
||||
*/
|
||||
private getFuzzyMatchScore(filePath: string, query: string): number {
|
||||
const pathLower = filePath.toLowerCase()
|
||||
const queryLower = query.toLowerCase()
|
||||
const fileName = filePath.split('/').pop() || ''
|
||||
const fileNameLower = fileName.toLowerCase()
|
||||
|
||||
let score = 0
|
||||
|
||||
// Count how many times query-related words appear in path segments
|
||||
const pathSegments = pathLower.split(/[/\\]/)
|
||||
let segmentMatchCount = 0
|
||||
for (const segment of pathSegments) {
|
||||
if (this.isFuzzyMatch(segment, queryLower)) {
|
||||
segmentMatchCount++
|
||||
}
|
||||
}
|
||||
score += segmentMatchCount * FileStorage.SCORE_SEGMENT_MATCH
|
||||
|
||||
// Bonus for filename starting with query (stronger than generic "contains")
|
||||
if (fileNameLower.startsWith(queryLower)) {
|
||||
score += FileStorage.SCORE_FILENAME_STARTS
|
||||
} else if (fileNameLower.includes(queryLower)) {
|
||||
// Bonus for exact substring match in filename (e.g., "updater" in "RCUpdater.js")
|
||||
score += FileStorage.SCORE_FILENAME_CONTAINS
|
||||
}
|
||||
|
||||
// Calculate consecutive match bonus
|
||||
let i = 0
|
||||
let j = 0
|
||||
let consecutiveCount = 0
|
||||
let maxConsecutive = 0
|
||||
|
||||
while (i < pathLower.length && j < queryLower.length) {
|
||||
if (pathLower[i] === queryLower[j]) {
|
||||
consecutiveCount++
|
||||
maxConsecutive = Math.max(maxConsecutive, consecutiveCount)
|
||||
j++
|
||||
} else {
|
||||
consecutiveCount = 0
|
||||
}
|
||||
i++
|
||||
}
|
||||
score += maxConsecutive * FileStorage.SCORE_CONSECUTIVE_CHAR
|
||||
|
||||
// Bonus for word boundary matches (e.g., "upd" matches start of "update")
|
||||
// Only count once to avoid inflating scores for paths with repeated patterns
|
||||
const boundaryPrefix = queryLower.slice(0, Math.min(3, queryLower.length))
|
||||
const words = pathLower.split(/[/\\._-]/)
|
||||
for (const word of words) {
|
||||
if (word.startsWith(boundaryPrefix)) {
|
||||
score += FileStorage.SCORE_WORD_BOUNDARY
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Penalty for longer paths (prefer shorter, more specific matches)
|
||||
// Use logarithmic scaling to prevent long paths from dominating the score
|
||||
// A 50-char path gets ~-16 penalty, 100-char gets ~-18, 200-char gets ~-21
|
||||
score -= Math.log(filePath.length + 1) * FileStorage.PATH_LENGTH_PENALTY_FACTOR
|
||||
|
||||
return score
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert query to glob pattern for ripgrep pre-filtering
|
||||
* e.g., "updater" -> "*u*p*d*a*t*e*r*"
|
||||
*/
|
||||
private queryToGlobPattern(query: string): string {
|
||||
// Escape special glob characters (including ! for negation)
|
||||
const escaped = query.replace(/[[\]{}()*+?.,\\^$|#!]/g, '\\$&')
|
||||
// Convert to fuzzy glob: each char separated by *
|
||||
return '*' + escaped.split('').join('*') + '*'
|
||||
}
|
||||
|
||||
/**
|
||||
* Greedy substring match: check if all characters in query can be matched
|
||||
* by finding consecutive substrings in text (not necessarily single chars)
|
||||
* e.g., "updatercontroller" matches "updateController" by:
|
||||
* "update" + "r" (from Controller) + "controller"
|
||||
*/
|
||||
private isGreedySubstringMatch(text: string, query: string): boolean {
|
||||
const textLower = text.toLowerCase()
|
||||
const queryLower = query.toLowerCase()
|
||||
|
||||
let queryIndex = 0
|
||||
let searchStart = 0
|
||||
|
||||
while (queryIndex < queryLower.length) {
|
||||
// Try to find the longest matching substring starting at queryIndex
|
||||
let bestMatchLen = 0
|
||||
let bestMatchPos = -1
|
||||
|
||||
for (let len = queryLower.length - queryIndex; len >= 1; len--) {
|
||||
const substr = queryLower.slice(queryIndex, queryIndex + len)
|
||||
const foundAt = textLower.indexOf(substr, searchStart)
|
||||
if (foundAt !== -1) {
|
||||
bestMatchLen = len
|
||||
bestMatchPos = foundAt
|
||||
break // Found longest possible match
|
||||
}
|
||||
}
|
||||
|
||||
if (bestMatchLen === 0) {
|
||||
// No substring match found, query cannot be matched
|
||||
return false
|
||||
}
|
||||
|
||||
queryIndex += bestMatchLen
|
||||
searchStart = bestMatchPos + bestMatchLen
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate greedy substring match score (higher is better)
|
||||
* Rewards: fewer match fragments, shorter match span, matches in filename
|
||||
*/
|
||||
private getGreedyMatchScore(filePath: string, query: string): number {
|
||||
const textLower = filePath.toLowerCase()
|
||||
const queryLower = query.toLowerCase()
|
||||
const fileName = filePath.split('/').pop() || ''
|
||||
const fileNameLower = fileName.toLowerCase()
|
||||
|
||||
let queryIndex = 0
|
||||
let searchStart = 0
|
||||
let fragmentCount = 0
|
||||
let firstMatchPos = -1
|
||||
let lastMatchEnd = 0
|
||||
|
||||
while (queryIndex < queryLower.length) {
|
||||
let bestMatchLen = 0
|
||||
let bestMatchPos = -1
|
||||
|
||||
for (let len = queryLower.length - queryIndex; len >= 1; len--) {
|
||||
const substr = queryLower.slice(queryIndex, queryIndex + len)
|
||||
const foundAt = textLower.indexOf(substr, searchStart)
|
||||
if (foundAt !== -1) {
|
||||
bestMatchLen = len
|
||||
bestMatchPos = foundAt
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if (bestMatchLen === 0) {
|
||||
return -Infinity // No match
|
||||
}
|
||||
|
||||
fragmentCount++
|
||||
if (firstMatchPos === -1) firstMatchPos = bestMatchPos
|
||||
lastMatchEnd = bestMatchPos + bestMatchLen
|
||||
queryIndex += bestMatchLen
|
||||
searchStart = lastMatchEnd
|
||||
}
|
||||
|
||||
const matchSpan = lastMatchEnd - firstMatchPos
|
||||
let score = 0
|
||||
|
||||
// Fewer fragments = better (single continuous match is best)
|
||||
// Max bonus when fragmentCount=1, decreases as fragments increase
|
||||
score += Math.max(0, 100 - (fragmentCount - 1) * 30)
|
||||
|
||||
// Shorter span relative to query length = better (tighter match)
|
||||
// Perfect match: span equals query length
|
||||
const spanRatio = queryLower.length / matchSpan
|
||||
score += spanRatio * 50
|
||||
|
||||
// Bonus for match in filename
|
||||
if (this.isGreedySubstringMatch(fileNameLower, queryLower)) {
|
||||
score += 80
|
||||
}
|
||||
|
||||
// Penalty for longer paths
|
||||
score -= Math.log(filePath.length + 1) * 4
|
||||
|
||||
return score
|
||||
}
|
||||
|
||||
/**
|
||||
* Build common ripgrep arguments for file listing
|
||||
*/
|
||||
private buildRipgrepBaseArgs(options: Required<DirectoryListOptions>, resolvedPath: string): string[] {
|
||||
const args: string[] = ['--files']
|
||||
|
||||
// Handle hidden files
|
||||
if (!options.includeHidden) {
|
||||
@ -1076,82 +1294,74 @@ class FileStorage {
|
||||
args.push('--max-depth', options.maxDepth.toString())
|
||||
}
|
||||
|
||||
// Handle max count
|
||||
if (options.maxEntries > 0) {
|
||||
args.push('--max-count', options.maxEntries.toString())
|
||||
}
|
||||
|
||||
// Add search pattern (search in content)
|
||||
args.push(options.searchPattern)
|
||||
|
||||
// Add the directory path
|
||||
args.push(resolvedPath)
|
||||
|
||||
const { exitCode, output } = await executeRipgrep(args)
|
||||
|
||||
// Exit code 0 means files found, 1 means no files found (still success), 2+ means error
|
||||
if (exitCode >= 2) {
|
||||
throw new Error(`Ripgrep failed with exit code ${exitCode}: ${output}`)
|
||||
}
|
||||
|
||||
// Parse ripgrep output (already sorted by relevance)
|
||||
const results = output
|
||||
.split('\n')
|
||||
.filter((line) => line.trim())
|
||||
.map((line) => line.replace(/\\/g, '/'))
|
||||
.slice(0, options.maxEntries)
|
||||
|
||||
return results
|
||||
return args
|
||||
}
|
||||
|
||||
private async listDirectoryWithRipgrep(
|
||||
resolvedPath: string,
|
||||
options: Required<DirectoryListOptions>
|
||||
): Promise<string[]> {
|
||||
const maxEntries = options.maxEntries
|
||||
// Fuzzy search mode: use ripgrep glob for pre-filtering, then score in JS
|
||||
if (options.fuzzy && options.searchPattern && options.searchPattern !== '.') {
|
||||
const args = this.buildRipgrepBaseArgs(options, resolvedPath)
|
||||
|
||||
// Step 1: Search by filename first
|
||||
// Insert glob pattern before the path (last element)
|
||||
const globPattern = this.queryToGlobPattern(options.searchPattern)
|
||||
args.splice(args.length - 1, 0, '--iglob', globPattern)
|
||||
|
||||
const { exitCode, output } = await executeRipgrep(args)
|
||||
|
||||
if (exitCode >= 2) {
|
||||
throw new Error(`Ripgrep failed with exit code ${exitCode}: ${output}`)
|
||||
}
|
||||
|
||||
const filteredFiles = output
|
||||
.split('\n')
|
||||
.filter((line) => line.trim())
|
||||
.map((line) => line.replace(/\\/g, '/'))
|
||||
|
||||
// If fuzzy glob found results, validate fuzzy match, sort and return
|
||||
if (filteredFiles.length > 0) {
|
||||
return filteredFiles
|
||||
.filter((file) => this.isFuzzyMatch(file, options.searchPattern))
|
||||
.map((file) => ({ file, score: this.getFuzzyMatchScore(file, options.searchPattern) }))
|
||||
.sort((a, b) => b.score - a.score)
|
||||
.slice(0, options.maxEntries)
|
||||
.map((item) => item.file)
|
||||
}
|
||||
|
||||
// Fallback: if no results, try greedy substring match on all files
|
||||
logger.debug('Fuzzy glob returned no results, falling back to greedy substring match')
|
||||
const fallbackArgs = this.buildRipgrepBaseArgs(options, resolvedPath)
|
||||
|
||||
const fallbackResult = await executeRipgrep(fallbackArgs)
|
||||
|
||||
if (fallbackResult.exitCode >= 2) {
|
||||
return []
|
||||
}
|
||||
|
||||
const allFiles = fallbackResult.output
|
||||
.split('\n')
|
||||
.filter((line) => line.trim())
|
||||
.map((line) => line.replace(/\\/g, '/'))
|
||||
|
||||
const greedyMatched = allFiles.filter((file) => this.isGreedySubstringMatch(file, options.searchPattern))
|
||||
|
||||
return greedyMatched
|
||||
.map((file) => ({ file, score: this.getGreedyMatchScore(file, options.searchPattern) }))
|
||||
.sort((a, b) => b.score - a.score)
|
||||
.slice(0, options.maxEntries)
|
||||
.map((item) => item.file)
|
||||
}
|
||||
|
||||
// Fallback: search by filename only (non-fuzzy mode)
|
||||
logger.debug('Searching by filename pattern', { pattern: options.searchPattern, path: resolvedPath })
|
||||
const filenameResults = await this.searchByFilename(resolvedPath, options)
|
||||
|
||||
logger.debug('Found matches by filename', { count: filenameResults.length })
|
||||
|
||||
// If we have enough filename matches, return them
|
||||
if (filenameResults.length >= maxEntries) {
|
||||
return filenameResults.slice(0, maxEntries)
|
||||
}
|
||||
|
||||
// Step 2: If filename matches are less than maxEntries, search by content to fill up
|
||||
logger.debug('Filename matches insufficient, searching by content to fill up', {
|
||||
filenameCount: filenameResults.length,
|
||||
needed: maxEntries - filenameResults.length
|
||||
})
|
||||
|
||||
// Adjust maxEntries for content search to get enough results
|
||||
const contentOptions = {
|
||||
...options,
|
||||
maxEntries: maxEntries - filenameResults.length + 20 // Request extra to account for duplicates
|
||||
}
|
||||
|
||||
const contentResults = await this.searchByContent(resolvedPath, contentOptions)
|
||||
|
||||
logger.debug('Found matches by content', { count: contentResults.length })
|
||||
|
||||
// Combine results: filename matches first, then content matches (deduplicated)
|
||||
const combined = [...filenameResults]
|
||||
const filenameSet = new Set(filenameResults)
|
||||
|
||||
for (const filePath of contentResults) {
|
||||
if (!filenameSet.has(filePath)) {
|
||||
combined.push(filePath)
|
||||
if (combined.length >= maxEntries) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.debug('Combined results', { total: combined.length, filenameCount: filenameResults.length })
|
||||
return combined.slice(0, maxEntries)
|
||||
return filenameResults.slice(0, options.maxEntries)
|
||||
}
|
||||
|
||||
public validateNotesDirectory = async (_: Electron.IpcMainInvokeEvent, dirPath: string): Promise<boolean> => {
|
||||
|
||||
@ -785,7 +785,7 @@ class McpService {
|
||||
...tool,
|
||||
inputSchema: z.parse(MCPToolInputSchema, tool.inputSchema),
|
||||
outputSchema: tool.outputSchema ? z.parse(MCPToolOutputSchema, tool.outputSchema) : undefined,
|
||||
id: buildFunctionCallToolName(server.name, tool.name, server.id),
|
||||
id: buildFunctionCallToolName(server.name, tool.name),
|
||||
serverId: server.id,
|
||||
serverName: server.name,
|
||||
type: 'mcp'
|
||||
|
||||
@ -3,6 +3,8 @@ import { homedir } from 'node:os'
|
||||
import { promisify } from 'node:util'
|
||||
|
||||
import { loggerService } from '@logger'
|
||||
import { isWin } from '@main/constant'
|
||||
import { getCpuName } from '@main/utils/system'
|
||||
import { HOME_CHERRY_DIR } from '@shared/config/constant'
|
||||
import * as fs from 'fs-extra'
|
||||
import * as path from 'path'
|
||||
@ -11,6 +13,8 @@ const logger = loggerService.withContext('OvmsManager')
|
||||
|
||||
const execAsync = promisify(exec)
|
||||
|
||||
export const isOvmsSupported = isWin && getCpuName().toLowerCase().includes('intel')
|
||||
|
||||
interface OvmsProcess {
|
||||
pid: number
|
||||
path: string
|
||||
@ -29,6 +33,12 @@ interface OvmsConfig {
|
||||
class OvmsManager {
|
||||
private ovms: OvmsProcess | null = null
|
||||
|
||||
constructor() {
|
||||
if (!isOvmsSupported) {
|
||||
throw new Error('OVMS Manager is only supported on Windows platform with Intel CPU.')
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Recursively terminate a process and all its child processes
|
||||
* @param pid Process ID to terminate
|
||||
@ -563,4 +573,4 @@ class OvmsManager {
|
||||
}
|
||||
|
||||
// Export singleton instance
|
||||
export const ovmsManager = new OvmsManager()
|
||||
export const ovmsManager = isOvmsSupported ? new OvmsManager() : undefined
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import { loggerService } from '@logger'
|
||||
import { mcpApiService } from '@main/apiServer/services/mcp'
|
||||
import { type ModelValidationError, validateModelId } from '@main/apiServer/utils'
|
||||
import { buildFunctionCallToolName } from '@main/utils/mcp'
|
||||
import type { AgentType, MCPTool, SlashCommand, Tool } from '@types'
|
||||
import { objectKeys } from '@types'
|
||||
import fs from 'fs'
|
||||
@ -12,6 +13,17 @@ import { builtinSlashCommands } from './services/claudecode/commands'
|
||||
import { builtinTools } from './services/claudecode/tools'
|
||||
|
||||
const logger = loggerService.withContext('BaseService')
|
||||
const MCP_TOOL_ID_PREFIX = 'mcp__'
|
||||
const MCP_TOOL_LEGACY_PREFIX = 'mcp_'
|
||||
|
||||
const buildMcpToolId = (serverId: string, toolName: string) => `${MCP_TOOL_ID_PREFIX}${serverId}__${toolName}`
|
||||
const toLegacyMcpToolId = (toolId: string) => {
|
||||
if (!toolId.startsWith(MCP_TOOL_ID_PREFIX)) {
|
||||
return null
|
||||
}
|
||||
const rawId = toolId.slice(MCP_TOOL_ID_PREFIX.length)
|
||||
return `${MCP_TOOL_LEGACY_PREFIX}${rawId.replace(/__/g, '_')}`
|
||||
}
|
||||
|
||||
/**
|
||||
* Base service class providing shared utilities for all agent-related services.
|
||||
@ -33,8 +45,12 @@ export abstract class BaseService {
|
||||
'slash_commands'
|
||||
]
|
||||
|
||||
public async listMcpTools(agentType: AgentType, ids?: string[]): Promise<Tool[]> {
|
||||
public async listMcpTools(
|
||||
agentType: AgentType,
|
||||
ids?: string[]
|
||||
): Promise<{ tools: Tool[]; legacyIdMap: Map<string, string> }> {
|
||||
const tools: Tool[] = []
|
||||
const legacyIdMap = new Map<string, string>()
|
||||
if (agentType === 'claude-code') {
|
||||
tools.push(...builtinTools)
|
||||
}
|
||||
@ -44,13 +60,21 @@ export abstract class BaseService {
|
||||
const server = await mcpApiService.getServerInfo(id)
|
||||
if (server) {
|
||||
server.tools.forEach((tool: MCPTool) => {
|
||||
const canonicalId = buildFunctionCallToolName(server.name, tool.name)
|
||||
const serverIdBasedId = buildMcpToolId(id, tool.name)
|
||||
const legacyId = toLegacyMcpToolId(serverIdBasedId)
|
||||
|
||||
tools.push({
|
||||
id: `mcp_${id}_${tool.name}`,
|
||||
id: canonicalId,
|
||||
name: tool.name,
|
||||
type: 'mcp',
|
||||
description: tool.description || '',
|
||||
requirePermissions: true
|
||||
})
|
||||
legacyIdMap.set(serverIdBasedId, canonicalId)
|
||||
if (legacyId) {
|
||||
legacyIdMap.set(legacyId, canonicalId)
|
||||
}
|
||||
})
|
||||
}
|
||||
} catch (error) {
|
||||
@ -62,7 +86,53 @@ export abstract class BaseService {
|
||||
}
|
||||
}
|
||||
|
||||
return tools
|
||||
return { tools, legacyIdMap }
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize MCP tool IDs in allowed_tools to the current format.
|
||||
*
|
||||
* Legacy formats:
|
||||
* - "mcp__<serverId>__<toolName>" (double underscore separators, server ID based)
|
||||
* - "mcp_<serverId>_<toolName>" (single underscore separators)
|
||||
* Current format: "mcp__<serverName>__<toolName>" (double underscore separators).
|
||||
*
|
||||
* This keeps persisted data compatible without requiring a database migration.
|
||||
*/
|
||||
protected normalizeAllowedTools(
|
||||
allowedTools: string[] | undefined,
|
||||
tools: Tool[],
|
||||
legacyIdMap?: Map<string, string>
|
||||
): string[] | undefined {
|
||||
if (!allowedTools || allowedTools.length === 0) {
|
||||
return allowedTools
|
||||
}
|
||||
|
||||
const resolvedLegacyIdMap = new Map<string, string>()
|
||||
|
||||
if (legacyIdMap) {
|
||||
for (const [legacyId, canonicalId] of legacyIdMap) {
|
||||
resolvedLegacyIdMap.set(legacyId, canonicalId)
|
||||
}
|
||||
}
|
||||
|
||||
for (const tool of tools) {
|
||||
if (tool.type !== 'mcp') {
|
||||
continue
|
||||
}
|
||||
const legacyId = toLegacyMcpToolId(tool.id)
|
||||
if (!legacyId) {
|
||||
continue
|
||||
}
|
||||
resolvedLegacyIdMap.set(legacyId, tool.id)
|
||||
}
|
||||
|
||||
if (resolvedLegacyIdMap.size === 0) {
|
||||
return allowedTools
|
||||
}
|
||||
|
||||
const normalized = allowedTools.map((toolId) => resolvedLegacyIdMap.get(toolId) ?? toolId)
|
||||
return Array.from(new Set(normalized))
|
||||
}
|
||||
|
||||
public async listSlashCommands(agentType: AgentType): Promise<SlashCommand[]> {
|
||||
|
||||
@ -89,7 +89,9 @@ export class AgentService extends BaseService {
|
||||
}
|
||||
|
||||
const agent = this.deserializeJsonFields(result[0]) as GetAgentResponse
|
||||
agent.tools = await this.listMcpTools(agent.type, agent.mcps)
|
||||
const { tools, legacyIdMap } = await this.listMcpTools(agent.type, agent.mcps)
|
||||
agent.tools = tools
|
||||
agent.allowed_tools = this.normalizeAllowedTools(agent.allowed_tools, agent.tools, legacyIdMap)
|
||||
|
||||
// Load installed_plugins from cache file instead of database
|
||||
const workdir = agent.accessible_paths?.[0]
|
||||
@ -134,7 +136,9 @@ export class AgentService extends BaseService {
|
||||
const agents = result.map((row) => this.deserializeJsonFields(row)) as GetAgentResponse[]
|
||||
|
||||
for (const agent of agents) {
|
||||
agent.tools = await this.listMcpTools(agent.type, agent.mcps)
|
||||
const { tools, legacyIdMap } = await this.listMcpTools(agent.type, agent.mcps)
|
||||
agent.tools = tools
|
||||
agent.allowed_tools = this.normalizeAllowedTools(agent.allowed_tools, agent.tools, legacyIdMap)
|
||||
}
|
||||
|
||||
return { agents, total: totalResult[0].count }
|
||||
|
||||
@ -157,7 +157,9 @@ export class SessionService extends BaseService {
|
||||
}
|
||||
|
||||
const session = this.deserializeJsonFields(result[0]) as GetAgentSessionResponse
|
||||
session.tools = await this.listMcpTools(session.agent_type, session.mcps)
|
||||
const { tools, legacyIdMap } = await this.listMcpTools(session.agent_type, session.mcps)
|
||||
session.tools = tools
|
||||
session.allowed_tools = this.normalizeAllowedTools(session.allowed_tools, session.tools, legacyIdMap)
|
||||
|
||||
// If slash_commands is not in database yet (e.g., first invoke before init message),
|
||||
// fall back to builtin + local commands. Otherwise, use the merged commands from database.
|
||||
@ -203,6 +205,12 @@ export class SessionService extends BaseService {
|
||||
|
||||
const sessions = result.map((row) => this.deserializeJsonFields(row)) as GetAgentSessionResponse[]
|
||||
|
||||
for (const session of sessions) {
|
||||
const { tools, legacyIdMap } = await this.listMcpTools(session.agent_type, session.mcps)
|
||||
session.tools = tools
|
||||
session.allowed_tools = this.normalizeAllowedTools(session.allowed_tools, session.tools, legacyIdMap)
|
||||
}
|
||||
|
||||
return { sessions, total }
|
||||
}
|
||||
|
||||
|
||||
91
src/main/services/agents/tests/BaseService.test.ts
Normal file
91
src/main/services/agents/tests/BaseService.test.ts
Normal file
@ -0,0 +1,91 @@
|
||||
import type { Tool } from '@types'
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
|
||||
vi.mock('@main/apiServer/services/mcp', () => ({
|
||||
mcpApiService: {
|
||||
getServerInfo: vi.fn()
|
||||
}
|
||||
}))
|
||||
|
||||
vi.mock('@main/apiServer/utils', () => ({
|
||||
validateModelId: vi.fn()
|
||||
}))
|
||||
|
||||
import { BaseService } from '../BaseService'
|
||||
|
||||
class TestBaseService extends BaseService {
|
||||
public normalize(
|
||||
allowedTools: string[] | undefined,
|
||||
tools: Tool[],
|
||||
legacyIdMap?: Map<string, string>
|
||||
): string[] | undefined {
|
||||
return this.normalizeAllowedTools(allowedTools, tools, legacyIdMap)
|
||||
}
|
||||
}
|
||||
|
||||
const buildMcpTool = (id: string): Tool => ({
|
||||
id,
|
||||
name: id,
|
||||
type: 'mcp',
|
||||
description: 'test tool',
|
||||
requirePermissions: true
|
||||
})
|
||||
|
||||
describe('BaseService.normalizeAllowedTools', () => {
|
||||
const service = new TestBaseService()
|
||||
|
||||
it('returns undefined or empty inputs unchanged', () => {
|
||||
expect(service.normalize(undefined, [])).toBeUndefined()
|
||||
expect(service.normalize([], [])).toEqual([])
|
||||
})
|
||||
|
||||
it('normalizes legacy MCP tool IDs and deduplicates entries', () => {
|
||||
const tools: Tool[] = [
|
||||
buildMcpTool('mcp__server_one__tool_one'),
|
||||
buildMcpTool('mcp__server_two__tool_two'),
|
||||
{ id: 'custom_tool', name: 'custom_tool', type: 'custom' }
|
||||
]
|
||||
|
||||
const legacyIdMap = new Map<string, string>([
|
||||
['mcp__server-1__tool-one', 'mcp__server_one__tool_one'],
|
||||
['mcp_server-1_tool-one', 'mcp__server_one__tool_one'],
|
||||
['mcp__server-2__tool-two', 'mcp__server_two__tool_two']
|
||||
])
|
||||
|
||||
const allowedTools = [
|
||||
'mcp__server-1__tool-one',
|
||||
'mcp_server-1_tool-one',
|
||||
'mcp_server_one_tool_one',
|
||||
'mcp__server_one__tool_one',
|
||||
'custom_tool',
|
||||
'mcp__server_two__tool_two',
|
||||
'mcp_server_two_tool_two',
|
||||
'mcp__server-2__tool-two'
|
||||
]
|
||||
|
||||
expect(service.normalize(allowedTools, tools, legacyIdMap)).toEqual([
|
||||
'mcp__server_one__tool_one',
|
||||
'custom_tool',
|
||||
'mcp__server_two__tool_two'
|
||||
])
|
||||
})
|
||||
|
||||
it('keeps legacy IDs when no matching MCP tool exists', () => {
|
||||
const tools: Tool[] = [buildMcpTool('mcp__server_one__tool_one')]
|
||||
const legacyIdMap = new Map<string, string>([['mcp__server-1__tool-one', 'mcp__server_one__tool_one']])
|
||||
|
||||
const allowedTools = ['mcp__unknown__tool', 'mcp__server_one__tool_one']
|
||||
|
||||
expect(service.normalize(allowedTools, tools, legacyIdMap)).toEqual([
|
||||
'mcp__unknown__tool',
|
||||
'mcp__server_one__tool_one'
|
||||
])
|
||||
})
|
||||
|
||||
it('returns allowed tools unchanged when no MCP tools are available', () => {
|
||||
const allowedTools = ['custom_tool', 'builtin_tool']
|
||||
const tools: Tool[] = [{ id: 'custom_tool', name: 'custom_tool', type: 'custom' }]
|
||||
|
||||
expect(service.normalize(allowedTools, tools)).toEqual(allowedTools)
|
||||
})
|
||||
})
|
||||
@ -3,194 +3,223 @@ import { describe, expect, it } from 'vitest'
|
||||
import { buildFunctionCallToolName } from '../mcp'
|
||||
|
||||
describe('buildFunctionCallToolName', () => {
|
||||
describe('basic functionality', () => {
|
||||
it('should combine server name and tool name', () => {
|
||||
describe('basic format', () => {
|
||||
it('should return format mcp__{server}__{tool}', () => {
|
||||
const result = buildFunctionCallToolName('github', 'search_issues')
|
||||
expect(result).toContain('github')
|
||||
expect(result).toContain('search')
|
||||
expect(result).toBe('mcp__github__search_issues')
|
||||
})
|
||||
|
||||
it('should sanitize names by replacing dashes with underscores', () => {
|
||||
const result = buildFunctionCallToolName('my-server', 'my-tool')
|
||||
// Input dashes are replaced, but the separator between server and tool is a dash
|
||||
expect(result).toBe('my_serv-my_tool')
|
||||
expect(result).toContain('_')
|
||||
})
|
||||
|
||||
it('should handle empty server names gracefully', () => {
|
||||
const result = buildFunctionCallToolName('', 'tool')
|
||||
expect(result).toBeTruthy()
|
||||
it('should handle simple server and tool names', () => {
|
||||
expect(buildFunctionCallToolName('fetch', 'get_page')).toBe('mcp__fetch__get_page')
|
||||
expect(buildFunctionCallToolName('database', 'query')).toBe('mcp__database__query')
|
||||
expect(buildFunctionCallToolName('cherry_studio', 'search')).toBe('mcp__cherry_studio__search')
|
||||
})
|
||||
})
|
||||
|
||||
describe('uniqueness with serverId', () => {
|
||||
it('should generate different IDs for same server name but different serverIds', () => {
|
||||
const serverId1 = 'server-id-123456'
|
||||
const serverId2 = 'server-id-789012'
|
||||
const serverName = 'github'
|
||||
const toolName = 'search_repos'
|
||||
|
||||
const result1 = buildFunctionCallToolName(serverName, toolName, serverId1)
|
||||
const result2 = buildFunctionCallToolName(serverName, toolName, serverId2)
|
||||
|
||||
expect(result1).not.toBe(result2)
|
||||
expect(result1).toContain('123456')
|
||||
expect(result2).toContain('789012')
|
||||
describe('valid JavaScript identifier', () => {
|
||||
it('should always start with mcp__ prefix (valid JS identifier start)', () => {
|
||||
const result = buildFunctionCallToolName('123server', '456tool')
|
||||
expect(result).toMatch(/^mcp__/)
|
||||
expect(result).toBe('mcp__123server__456tool')
|
||||
})
|
||||
|
||||
it('should generate same ID when serverId is not provided', () => {
|
||||
it('should only contain alphanumeric chars and underscores', () => {
|
||||
const result = buildFunctionCallToolName('my-server', 'my-tool')
|
||||
expect(result).toBe('mcp__my_server__my_tool')
|
||||
expect(result).toMatch(/^[a-zA-Z][a-zA-Z0-9_]*$/)
|
||||
})
|
||||
|
||||
it('should be a valid JavaScript identifier', () => {
|
||||
const testCases = [
|
||||
['github', 'create_issue'],
|
||||
['my-server', 'fetch-data'],
|
||||
['test@server', 'tool#name'],
|
||||
['server.name', 'tool.action'],
|
||||
['123abc', 'def456']
|
||||
]
|
||||
|
||||
for (const [server, tool] of testCases) {
|
||||
const result = buildFunctionCallToolName(server, tool)
|
||||
// Valid JS identifiers match this pattern
|
||||
expect(result).toMatch(/^[a-zA-Z_][a-zA-Z0-9_]*$/)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
describe('character sanitization', () => {
|
||||
it('should replace dashes with underscores', () => {
|
||||
const result = buildFunctionCallToolName('my-server', 'my-tool-name')
|
||||
expect(result).toBe('mcp__my_server__my_tool_name')
|
||||
})
|
||||
|
||||
it('should replace special characters with underscores', () => {
|
||||
const result = buildFunctionCallToolName('test@server!', 'tool#name$')
|
||||
expect(result).toBe('mcp__test_server__tool_name')
|
||||
})
|
||||
|
||||
it('should replace dots with underscores', () => {
|
||||
const result = buildFunctionCallToolName('server.name', 'tool.action')
|
||||
expect(result).toBe('mcp__server_name__tool_action')
|
||||
})
|
||||
|
||||
it('should replace spaces with underscores', () => {
|
||||
const result = buildFunctionCallToolName('my server', 'my tool')
|
||||
expect(result).toBe('mcp__my_server__my_tool')
|
||||
})
|
||||
|
||||
it('should collapse consecutive underscores', () => {
|
||||
const result = buildFunctionCallToolName('my--server', 'my___tool')
|
||||
expect(result).toBe('mcp__my_server__my_tool')
|
||||
expect(result).not.toMatch(/_{3,}/)
|
||||
})
|
||||
|
||||
it('should trim leading and trailing underscores from parts', () => {
|
||||
const result = buildFunctionCallToolName('_server_', '_tool_')
|
||||
expect(result).toBe('mcp__server__tool')
|
||||
})
|
||||
|
||||
it('should handle names with only special characters', () => {
|
||||
const result = buildFunctionCallToolName('---', '###')
|
||||
expect(result).toBe('mcp____')
|
||||
})
|
||||
})
|
||||
|
||||
describe('length constraints', () => {
|
||||
it('should not exceed 63 characters', () => {
|
||||
const longServerName = 'a'.repeat(50)
|
||||
const longToolName = 'b'.repeat(50)
|
||||
const result = buildFunctionCallToolName(longServerName, longToolName)
|
||||
|
||||
expect(result.length).toBeLessThanOrEqual(63)
|
||||
})
|
||||
|
||||
it('should truncate server name to max 20 chars', () => {
|
||||
const longServerName = 'abcdefghijklmnopqrstuvwxyz' // 26 chars
|
||||
const result = buildFunctionCallToolName(longServerName, 'tool')
|
||||
|
||||
expect(result).toBe('mcp__abcdefghijklmnopqrst__tool')
|
||||
expect(result).toContain('abcdefghijklmnopqrst') // First 20 chars
|
||||
expect(result).not.toContain('uvwxyz') // Truncated
|
||||
})
|
||||
|
||||
it('should truncate tool name to max 35 chars', () => {
|
||||
const longToolName = 'a'.repeat(40)
|
||||
const result = buildFunctionCallToolName('server', longToolName)
|
||||
|
||||
const expectedTool = 'a'.repeat(35)
|
||||
expect(result).toBe(`mcp__server__${expectedTool}`)
|
||||
})
|
||||
|
||||
it('should not end with underscores after truncation', () => {
|
||||
// Create a name that would end with underscores after truncation
|
||||
const longServerName = 'a'.repeat(20)
|
||||
const longToolName = 'b'.repeat(35) + '___extra'
|
||||
const result = buildFunctionCallToolName(longServerName, longToolName)
|
||||
|
||||
expect(result).not.toMatch(/_+$/)
|
||||
expect(result.length).toBeLessThanOrEqual(63)
|
||||
})
|
||||
|
||||
it('should handle max length edge case exactly', () => {
|
||||
// mcp__ (5) + server (20) + __ (2) + tool (35) = 62 chars
|
||||
const server = 'a'.repeat(20)
|
||||
const tool = 'b'.repeat(35)
|
||||
const result = buildFunctionCallToolName(server, tool)
|
||||
|
||||
expect(result.length).toBe(62)
|
||||
expect(result).toBe(`mcp__${'a'.repeat(20)}__${'b'.repeat(35)}`)
|
||||
})
|
||||
})
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle empty server name', () => {
|
||||
const result = buildFunctionCallToolName('', 'tool')
|
||||
expect(result).toBe('mcp____tool')
|
||||
})
|
||||
|
||||
it('should handle empty tool name', () => {
|
||||
const result = buildFunctionCallToolName('server', '')
|
||||
expect(result).toBe('mcp__server__')
|
||||
})
|
||||
|
||||
it('should handle both empty names', () => {
|
||||
const result = buildFunctionCallToolName('', '')
|
||||
expect(result).toBe('mcp____')
|
||||
})
|
||||
|
||||
it('should handle whitespace-only names', () => {
|
||||
const result = buildFunctionCallToolName(' ', ' ')
|
||||
expect(result).toBe('mcp____')
|
||||
})
|
||||
|
||||
it('should trim whitespace from names', () => {
|
||||
const result = buildFunctionCallToolName(' server ', ' tool ')
|
||||
expect(result).toBe('mcp__server__tool')
|
||||
})
|
||||
|
||||
it('should handle unicode characters', () => {
|
||||
const result = buildFunctionCallToolName('服务器', '工具')
|
||||
// Unicode chars are replaced with underscores, then collapsed
|
||||
expect(result).toMatch(/^mcp__/)
|
||||
})
|
||||
|
||||
it('should handle mixed case', () => {
|
||||
const result = buildFunctionCallToolName('MyServer', 'MyTool')
|
||||
expect(result).toBe('mcp__MyServer__MyTool')
|
||||
})
|
||||
})
|
||||
|
||||
describe('deterministic output', () => {
|
||||
it('should produce consistent results for same input', () => {
|
||||
const serverName = 'github'
|
||||
const toolName = 'search_repos'
|
||||
|
||||
const result1 = buildFunctionCallToolName(serverName, toolName)
|
||||
const result2 = buildFunctionCallToolName(serverName, toolName)
|
||||
const result3 = buildFunctionCallToolName(serverName, toolName)
|
||||
|
||||
expect(result1).toBe(result2)
|
||||
expect(result2).toBe(result3)
|
||||
})
|
||||
|
||||
it('should include serverId suffix when provided', () => {
|
||||
const serverId = 'abc123def456'
|
||||
const result = buildFunctionCallToolName('server', 'tool', serverId)
|
||||
it('should produce different results for different inputs', () => {
|
||||
const result1 = buildFunctionCallToolName('server1', 'tool')
|
||||
const result2 = buildFunctionCallToolName('server2', 'tool')
|
||||
const result3 = buildFunctionCallToolName('server', 'tool1')
|
||||
const result4 = buildFunctionCallToolName('server', 'tool2')
|
||||
|
||||
// Should include last 6 chars of serverId
|
||||
expect(result).toContain('ef456')
|
||||
})
|
||||
})
|
||||
|
||||
describe('character sanitization', () => {
|
||||
it('should replace invalid characters with underscores', () => {
|
||||
const result = buildFunctionCallToolName('test@server', 'tool#name')
|
||||
expect(result).not.toMatch(/[@#]/)
|
||||
expect(result).toMatch(/^[a-zA-Z0-9_-]+$/)
|
||||
})
|
||||
|
||||
it('should ensure name starts with a letter', () => {
|
||||
const result = buildFunctionCallToolName('123server', '456tool')
|
||||
expect(result).toMatch(/^[a-zA-Z]/)
|
||||
})
|
||||
|
||||
it('should handle consecutive underscores/dashes', () => {
|
||||
const result = buildFunctionCallToolName('my--server', 'my__tool')
|
||||
expect(result).not.toMatch(/[_-]{2,}/)
|
||||
})
|
||||
})
|
||||
|
||||
describe('length constraints', () => {
|
||||
it('should truncate names longer than 63 characters', () => {
|
||||
const longServerName = 'a'.repeat(50)
|
||||
const longToolName = 'b'.repeat(50)
|
||||
const result = buildFunctionCallToolName(longServerName, longToolName, 'id123456')
|
||||
|
||||
expect(result.length).toBeLessThanOrEqual(63)
|
||||
})
|
||||
|
||||
it('should not end with underscore or dash after truncation', () => {
|
||||
const longServerName = 'a'.repeat(50)
|
||||
const longToolName = 'b'.repeat(50)
|
||||
const result = buildFunctionCallToolName(longServerName, longToolName, 'id123456')
|
||||
|
||||
expect(result).not.toMatch(/[_-]$/)
|
||||
})
|
||||
|
||||
it('should preserve serverId suffix even with long server/tool names', () => {
|
||||
const longServerName = 'a'.repeat(50)
|
||||
const longToolName = 'b'.repeat(50)
|
||||
const serverId = 'server-id-xyz789'
|
||||
|
||||
const result = buildFunctionCallToolName(longServerName, longToolName, serverId)
|
||||
|
||||
// The suffix should be preserved and not truncated
|
||||
expect(result).toContain('xyz789')
|
||||
expect(result.length).toBeLessThanOrEqual(63)
|
||||
})
|
||||
|
||||
it('should ensure two long-named servers with different IDs produce different results', () => {
|
||||
const longServerName = 'a'.repeat(50)
|
||||
const longToolName = 'b'.repeat(50)
|
||||
const serverId1 = 'server-id-abc123'
|
||||
const serverId2 = 'server-id-def456'
|
||||
|
||||
const result1 = buildFunctionCallToolName(longServerName, longToolName, serverId1)
|
||||
const result2 = buildFunctionCallToolName(longServerName, longToolName, serverId2)
|
||||
|
||||
// Both should be within limit
|
||||
expect(result1.length).toBeLessThanOrEqual(63)
|
||||
expect(result2.length).toBeLessThanOrEqual(63)
|
||||
|
||||
// They should be different due to preserved suffix
|
||||
expect(result1).not.toBe(result2)
|
||||
})
|
||||
})
|
||||
|
||||
describe('edge cases with serverId', () => {
|
||||
it('should handle serverId with only non-alphanumeric characters', () => {
|
||||
const serverId = '------' // All dashes
|
||||
const result = buildFunctionCallToolName('server', 'tool', serverId)
|
||||
|
||||
// Should still produce a valid unique suffix via fallback hash
|
||||
expect(result).toBeTruthy()
|
||||
expect(result.length).toBeLessThanOrEqual(63)
|
||||
expect(result).toMatch(/^[a-zA-Z][a-zA-Z0-9_-]*$/)
|
||||
// Should have a suffix (underscore followed by something)
|
||||
expect(result).toMatch(/_[a-z0-9]+$/)
|
||||
})
|
||||
|
||||
it('should produce different results for different non-alphanumeric serverIds', () => {
|
||||
const serverId1 = '------'
|
||||
const serverId2 = '!!!!!!'
|
||||
|
||||
const result1 = buildFunctionCallToolName('server', 'tool', serverId1)
|
||||
const result2 = buildFunctionCallToolName('server', 'tool', serverId2)
|
||||
|
||||
// Should be different because the hash fallback produces different values
|
||||
expect(result1).not.toBe(result2)
|
||||
})
|
||||
|
||||
it('should handle empty string serverId differently from undefined', () => {
|
||||
const resultWithEmpty = buildFunctionCallToolName('server', 'tool', '')
|
||||
const resultWithUndefined = buildFunctionCallToolName('server', 'tool', undefined)
|
||||
|
||||
// Empty string is falsy, so both should behave the same (no suffix)
|
||||
expect(resultWithEmpty).toBe(resultWithUndefined)
|
||||
})
|
||||
|
||||
it('should handle serverId with mixed alphanumeric and special chars', () => {
|
||||
const serverId = 'ab@#cd' // Mixed chars, last 6 chars contain some alphanumeric
|
||||
const result = buildFunctionCallToolName('server', 'tool', serverId)
|
||||
|
||||
// Should extract alphanumeric chars: 'abcd' from 'ab@#cd'
|
||||
expect(result).toContain('abcd')
|
||||
expect(result3).not.toBe(result4)
|
||||
})
|
||||
})
|
||||
|
||||
describe('real-world scenarios', () => {
|
||||
it('should handle GitHub MCP server instances correctly', () => {
|
||||
const serverName = 'github'
|
||||
const toolName = 'search_repositories'
|
||||
|
||||
const githubComId = 'server-github-com-abc123'
|
||||
const gheId = 'server-ghe-internal-xyz789'
|
||||
|
||||
const tool1 = buildFunctionCallToolName(serverName, toolName, githubComId)
|
||||
const tool2 = buildFunctionCallToolName(serverName, toolName, gheId)
|
||||
|
||||
// Should be different
|
||||
expect(tool1).not.toBe(tool2)
|
||||
|
||||
// Both should be valid identifiers
|
||||
expect(tool1).toMatch(/^[a-zA-Z][a-zA-Z0-9_-]*$/)
|
||||
expect(tool2).toMatch(/^[a-zA-Z][a-zA-Z0-9_-]*$/)
|
||||
|
||||
// Both should be <= 63 chars
|
||||
expect(tool1.length).toBeLessThanOrEqual(63)
|
||||
expect(tool2.length).toBeLessThanOrEqual(63)
|
||||
it('should handle GitHub MCP server', () => {
|
||||
expect(buildFunctionCallToolName('github', 'create_issue')).toBe('mcp__github__create_issue')
|
||||
expect(buildFunctionCallToolName('github', 'search_repositories')).toBe('mcp__github__search_repositories')
|
||||
expect(buildFunctionCallToolName('github', 'get_pull_request')).toBe('mcp__github__get_pull_request')
|
||||
})
|
||||
|
||||
it('should handle tool names that already include server name prefix', () => {
|
||||
const result = buildFunctionCallToolName('github', 'github_search_repos')
|
||||
expect(result).toBeTruthy()
|
||||
// Should not double the server name
|
||||
expect(result.split('github').length - 1).toBeLessThanOrEqual(2)
|
||||
it('should handle filesystem MCP server', () => {
|
||||
expect(buildFunctionCallToolName('filesystem', 'read_file')).toBe('mcp__filesystem__read_file')
|
||||
expect(buildFunctionCallToolName('filesystem', 'write_file')).toBe('mcp__filesystem__write_file')
|
||||
expect(buildFunctionCallToolName('filesystem', 'list_directory')).toBe('mcp__filesystem__list_directory')
|
||||
})
|
||||
|
||||
it('should handle hyphenated server names (common in npm packages)', () => {
|
||||
expect(buildFunctionCallToolName('cherry-fetch', 'get_page')).toBe('mcp__cherry_fetch__get_page')
|
||||
expect(buildFunctionCallToolName('mcp-server-github', 'search')).toBe('mcp__mcp_server_github__search')
|
||||
})
|
||||
|
||||
it('should handle scoped npm package style names', () => {
|
||||
const result = buildFunctionCallToolName('@anthropic/mcp-server', 'chat')
|
||||
expect(result).toBe('mcp__anthropic_mcp_server__chat')
|
||||
})
|
||||
|
||||
it('should handle tools with long descriptive names', () => {
|
||||
const result = buildFunctionCallToolName('github', 'search_repositories_by_language_and_stars')
|
||||
expect(result.length).toBeLessThanOrEqual(63)
|
||||
expect(result).toMatch(/^mcp__github__search_repositories_by_lan/)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@ -1,56 +1,28 @@
|
||||
export function buildFunctionCallToolName(serverName: string, toolName: string, serverId?: string) {
|
||||
const sanitizedServer = serverName.trim().replace(/-/g, '_')
|
||||
const sanitizedTool = toolName.trim().replace(/-/g, '_')
|
||||
/**
|
||||
* Builds a valid JavaScript function name for MCP tool calls.
|
||||
* Format: mcp__{server_name}__{tool_name}
|
||||
*
|
||||
* @param serverName - The MCP server name
|
||||
* @param toolName - The tool name from the server
|
||||
* @returns A valid JS identifier in format mcp__{server}__{tool}, max 63 chars
|
||||
*/
|
||||
export function buildFunctionCallToolName(serverName: string, toolName: string): string {
|
||||
// Sanitize to valid JS identifier chars (alphanumeric + underscore only)
|
||||
const sanitize = (str: string): string =>
|
||||
str
|
||||
.trim()
|
||||
.replace(/[^a-zA-Z0-9]/g, '_') // Replace all non-alphanumeric with underscore
|
||||
.replace(/_{2,}/g, '_') // Collapse multiple underscores
|
||||
.replace(/^_+|_+$/g, '') // Trim leading/trailing underscores
|
||||
|
||||
// Calculate suffix first to reserve space for it
|
||||
// Suffix format: "_" + 6 alphanumeric chars = 7 chars total
|
||||
let serverIdSuffix = ''
|
||||
if (serverId) {
|
||||
// Take the last 6 characters of the serverId for brevity
|
||||
serverIdSuffix = serverId.slice(-6).replace(/[^a-zA-Z0-9]/g, '')
|
||||
const server = sanitize(serverName).slice(0, 20) // Keep server name short
|
||||
const tool = sanitize(toolName).slice(0, 35) // More room for tool name
|
||||
|
||||
// Fallback: if suffix becomes empty (all non-alphanumeric chars), use a simple hash
|
||||
if (!serverIdSuffix) {
|
||||
const hash = serverId.split('').reduce((acc, char) => acc + char.charCodeAt(0), 0)
|
||||
serverIdSuffix = hash.toString(36).slice(-6) || 'x'
|
||||
}
|
||||
}
|
||||
let name = `mcp__${server}__${tool}`
|
||||
|
||||
// Reserve space for suffix when calculating max base name length
|
||||
const SUFFIX_LENGTH = serverIdSuffix ? serverIdSuffix.length + 1 : 0 // +1 for underscore
|
||||
const MAX_BASE_LENGTH = 63 - SUFFIX_LENGTH
|
||||
|
||||
// Combine server name and tool name
|
||||
let name = sanitizedTool
|
||||
if (!sanitizedTool.includes(sanitizedServer.slice(0, 7))) {
|
||||
name = `${sanitizedServer.slice(0, 7) || ''}-${sanitizedTool || ''}`
|
||||
}
|
||||
|
||||
// Replace invalid characters with underscores or dashes
|
||||
// Keep a-z, A-Z, 0-9, underscores and dashes
|
||||
name = name.replace(/[^a-zA-Z0-9_-]/g, '_')
|
||||
|
||||
// Ensure name starts with a letter or underscore (for valid JavaScript identifier)
|
||||
if (!/^[a-zA-Z]/.test(name)) {
|
||||
name = `tool-${name}`
|
||||
}
|
||||
|
||||
// Remove consecutive underscores/dashes (optional improvement)
|
||||
name = name.replace(/[_-]{2,}/g, '_')
|
||||
|
||||
// Truncate base name BEFORE adding suffix to ensure suffix is never cut off
|
||||
if (name.length > MAX_BASE_LENGTH) {
|
||||
name = name.slice(0, MAX_BASE_LENGTH)
|
||||
}
|
||||
|
||||
// Handle edge case: ensure we still have a valid name if truncation left invalid chars at edges
|
||||
if (name.endsWith('_') || name.endsWith('-')) {
|
||||
name = name.slice(0, -1)
|
||||
}
|
||||
|
||||
// Now append the suffix - it will always fit within 63 chars
|
||||
if (serverIdSuffix) {
|
||||
name = `${name}_${serverIdSuffix}`
|
||||
// Ensure max 63 chars and clean trailing underscores
|
||||
if (name.length > 63) {
|
||||
name = name.slice(0, 63).replace(/_+$/, '')
|
||||
}
|
||||
|
||||
return name
|
||||
|
||||
19
src/main/utils/system.ts
Normal file
19
src/main/utils/system.ts
Normal file
@ -0,0 +1,19 @@
|
||||
import os from 'node:os'
|
||||
|
||||
import { isMac, isWin } from '@main/constant'
|
||||
|
||||
export const getDeviceType = () => (isMac ? 'mac' : isWin ? 'windows' : 'linux')
|
||||
|
||||
export const getHostname = () => os.hostname()
|
||||
|
||||
export const getCpuName = () => {
|
||||
try {
|
||||
const cpus = os.cpus()
|
||||
if (!cpus || cpus.length === 0 || !cpus[0].model) {
|
||||
return 'Unknown CPU'
|
||||
}
|
||||
return cpus[0].model
|
||||
} catch {
|
||||
return 'Unknown CPU'
|
||||
}
|
||||
}
|
||||
@ -346,6 +346,7 @@ const api = {
|
||||
ipcRenderer.invoke(IpcChannel.VertexAI_ClearAuthCache, projectId, clientEmail)
|
||||
},
|
||||
ovms: {
|
||||
isSupported: (): Promise<boolean> => ipcRenderer.invoke(IpcChannel.Ovms_IsSupported),
|
||||
addModel: (modelName: string, modelId: string, modelSource: string, task: string) =>
|
||||
ipcRenderer.invoke(IpcChannel.Ovms_AddModel, modelName, modelId, modelSource, task),
|
||||
stopAddModel: () => ipcRenderer.invoke(IpcChannel.Ovms_StopAddModel),
|
||||
|
||||
@ -65,6 +65,11 @@ export class ZhipuAPIClient extends OpenAIAPIClient {
|
||||
|
||||
public async listModels(): Promise<OpenAI.Models.Model[]> {
|
||||
const models = [
|
||||
'glm-4.7',
|
||||
'glm-4.6',
|
||||
'glm-4.6v',
|
||||
'glm-4.6v-flash',
|
||||
'glm-4.6v-flashx',
|
||||
'glm-4.5',
|
||||
'glm-4.5-x',
|
||||
'glm-4.5-air',
|
||||
|
||||
@ -31,7 +31,8 @@ const STATIC_PROVIDER_MAPPING: Record<string, ProviderId> = {
|
||||
'azure-openai': 'azure', // Azure OpenAI -> azure
|
||||
'openai-response': 'openai', // OpenAI Responses -> openai
|
||||
grok: 'xai', // Grok -> xai
|
||||
copilot: 'github-copilot-openai-compatible'
|
||||
copilot: 'github-copilot-openai-compatible',
|
||||
tokenflux: 'openrouter' // TokenFlux -> openrouter (fully compatible)
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@ -617,6 +617,24 @@ export const SYSTEM_MODELS: Record<SystemProviderId | 'defaultModel', Model[]> =
|
||||
name: 'GLM-4.6',
|
||||
group: 'GLM-4.6'
|
||||
},
|
||||
{
|
||||
id: 'glm-4.6v',
|
||||
provider: 'zhipu',
|
||||
name: 'GLM-4.6V',
|
||||
group: 'GLM-4.6V'
|
||||
},
|
||||
{
|
||||
id: 'glm-4.6v-flash',
|
||||
provider: 'zhipu',
|
||||
name: 'GLM-4.6V-Flash',
|
||||
group: 'GLM-4.6V'
|
||||
},
|
||||
{
|
||||
id: 'glm-4.6v-flashx',
|
||||
provider: 'zhipu',
|
||||
name: 'GLM-4.6V-FlashX',
|
||||
group: 'GLM-4.6V'
|
||||
},
|
||||
{
|
||||
id: 'glm-4.7',
|
||||
provider: 'zhipu',
|
||||
|
||||
@ -200,7 +200,8 @@ export const SYSTEM_PROVIDERS_CONFIG: Record<SystemProviderId, SystemProvider> =
|
||||
name: 'TokenFlux',
|
||||
type: 'openai',
|
||||
apiKey: '',
|
||||
apiHost: 'https://tokenflux.ai',
|
||||
apiHost: 'https://api.tokenflux.ai/openai/v1',
|
||||
anthropicApiHost: 'https://api.tokenflux.ai/anthropic',
|
||||
models: SYSTEM_MODELS.tokenflux,
|
||||
isSystem: true,
|
||||
enabled: false
|
||||
|
||||
@ -13,6 +13,7 @@ import { useAppDispatch } from '@renderer/store'
|
||||
import { useAppSelector } from '@renderer/store'
|
||||
import { handleSaveData } from '@renderer/store'
|
||||
import { selectMemoryConfig } from '@renderer/store/memory'
|
||||
import { setIsOvmsSupported } from '@renderer/store/runtime'
|
||||
import {
|
||||
type ToolPermissionRequestPayload,
|
||||
type ToolPermissionResultPayload,
|
||||
@ -274,4 +275,17 @@ export function useAppInit() {
|
||||
useEffect(() => {
|
||||
checkDataLimit()
|
||||
}, [])
|
||||
|
||||
useEffect(() => {
|
||||
// Check once when initing
|
||||
window.api.ovms
|
||||
.isSupported()
|
||||
.then((result) => {
|
||||
dispatch(setIsOvmsSupported(result))
|
||||
})
|
||||
.catch((e) => {
|
||||
logger.error('Failed to check isOvmsSupported. Fallback to false.', e as Error)
|
||||
dispatch(setIsOvmsSupported(false))
|
||||
})
|
||||
}, [dispatch])
|
||||
}
|
||||
|
||||
@ -9,6 +9,7 @@ import { useTranslation } from 'react-i18next'
|
||||
|
||||
const logger = loggerService.withContext('useActivityDirectoryPanel')
|
||||
const MAX_FILE_RESULTS = 500
|
||||
const MAX_SEARCH_RESULTS = 20
|
||||
const areFileListsEqual = (prev: string[], next: string[]) => {
|
||||
if (prev === next) return true
|
||||
if (prev.length !== next.length) return false
|
||||
@ -193,11 +194,11 @@ export const useActivityDirectoryPanel = (params: Params, role: 'button' | 'mana
|
||||
try {
|
||||
const files = await window.api.file.listDirectory(dirPath, {
|
||||
recursive: true,
|
||||
maxDepth: 4,
|
||||
maxDepth: 10,
|
||||
includeHidden: false,
|
||||
includeFiles: true,
|
||||
includeDirectories: true,
|
||||
maxEntries: MAX_FILE_RESULTS,
|
||||
maxEntries: MAX_SEARCH_RESULTS,
|
||||
searchPattern: searchPattern || '.'
|
||||
})
|
||||
|
||||
|
||||
@ -4,6 +4,7 @@ import { useTemporaryValue } from '@renderer/hooks/useTemporaryValue'
|
||||
import store from '@renderer/store'
|
||||
import { messageBlocksSelectors } from '@renderer/store/messageBlock'
|
||||
import { Check } from 'lucide-react'
|
||||
import MarkdownIt from 'markdown-it'
|
||||
import React, { memo, useCallback } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import styled from 'styled-components'
|
||||
@ -22,18 +23,26 @@ const Table: React.FC<Props> = ({ children, node, blockId }) => {
|
||||
const { t } = useTranslation()
|
||||
const [copied, setCopied] = useTemporaryValue(false, 2000)
|
||||
|
||||
const handleCopyTable = useCallback(() => {
|
||||
const handleCopyTable = useCallback(async () => {
|
||||
const tableMarkdown = extractTableMarkdown(blockId ?? '', node?.position)
|
||||
if (!tableMarkdown) return
|
||||
|
||||
navigator.clipboard
|
||||
.writeText(tableMarkdown)
|
||||
.then(() => {
|
||||
setCopied(true)
|
||||
})
|
||||
.catch((error) => {
|
||||
window.toast?.error(`${t('message.copy.failed')}: ${error}`)
|
||||
})
|
||||
try {
|
||||
const tableHtml = convertMarkdownTableToHtml(tableMarkdown)
|
||||
|
||||
if (navigator.clipboard && window.ClipboardItem) {
|
||||
const clipboardItem = new ClipboardItem({
|
||||
'text/plain': new Blob([tableMarkdown], { type: 'text/plain' }),
|
||||
'text/html': new Blob([tableHtml], { type: 'text/html' })
|
||||
})
|
||||
await navigator.clipboard.write([clipboardItem])
|
||||
} else {
|
||||
await navigator.clipboard.writeText(tableMarkdown)
|
||||
}
|
||||
setCopied(true)
|
||||
} catch (error) {
|
||||
window.toast?.error(`${t('message.copy.failed')}: ${error}`)
|
||||
}
|
||||
}, [blockId, node?.position, setCopied, t])
|
||||
|
||||
return (
|
||||
@ -60,7 +69,6 @@ export function extractTableMarkdown(blockId: string, position: any): string {
|
||||
if (!position || !blockId) return ''
|
||||
|
||||
const block = messageBlocksSelectors.selectById(store.getState(), blockId)
|
||||
|
||||
if (!block || !('content' in block) || typeof block.content !== 'string') return ''
|
||||
|
||||
const { start, end } = position
|
||||
@ -71,6 +79,16 @@ export function extractTableMarkdown(blockId: string, position: any): string {
|
||||
return tableLines.join('\n').trim()
|
||||
}
|
||||
|
||||
function convertMarkdownTableToHtml(markdownTable: string): string {
|
||||
const md = new MarkdownIt({
|
||||
html: true,
|
||||
breaks: false,
|
||||
linkify: false
|
||||
})
|
||||
|
||||
return md.render(markdownTable)
|
||||
}
|
||||
|
||||
const TableWrapper = styled.div`
|
||||
position: relative;
|
||||
|
||||
|
||||
@ -9,6 +9,7 @@ import {
|
||||
import { DeleteIcon, EditIcon } from '@renderer/components/Icons'
|
||||
import { ProviderAvatar } from '@renderer/components/ProviderAvatar'
|
||||
import { useAllProviders, useProviders } from '@renderer/hooks/useProvider'
|
||||
import { useRuntime } from '@renderer/hooks/useRuntime'
|
||||
import { useTimer } from '@renderer/hooks/useTimer'
|
||||
import ImageStorage from '@renderer/services/ImageStorage'
|
||||
import type { Provider, ProviderType } from '@renderer/types'
|
||||
@ -31,8 +32,6 @@ import UrlSchemaInfoPopup from './UrlSchemaInfoPopup'
|
||||
const logger = loggerService.withContext('ProviderList')
|
||||
|
||||
const BUTTON_WRAPPER_HEIGHT = 50
|
||||
const systemType = await window.api.system.getDeviceType()
|
||||
const cpuName = await window.api.system.getCpuName()
|
||||
|
||||
const ProviderList: FC = () => {
|
||||
const [searchParams, setSearchParams] = useSearchParams()
|
||||
@ -45,6 +44,7 @@ const ProviderList: FC = () => {
|
||||
const [dragging, setDragging] = useState(false)
|
||||
const [providerLogos, setProviderLogos] = useState<Record<string, string>>({})
|
||||
const listRef = useRef<DraggableVirtualListRef>(null)
|
||||
const { isOvmsSupported } = useRuntime()
|
||||
|
||||
const setSelectedProvider = useCallback((provider: Provider) => {
|
||||
startTransition(() => _setSelectedProvider(provider))
|
||||
@ -279,7 +279,7 @@ const ProviderList: FC = () => {
|
||||
}
|
||||
|
||||
const filteredProviders = providers.filter((provider) => {
|
||||
if (provider.id === 'ovms' && (systemType !== 'windows' || !cpuName.toLowerCase().includes('intel'))) {
|
||||
if (provider.id === 'ovms' && !isOvmsSupported) {
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@ -82,7 +82,8 @@ const ANTHROPIC_COMPATIBLE_PROVIDER_IDS = [
|
||||
SystemProviderIds.qiniu,
|
||||
SystemProviderIds.dmxapi,
|
||||
SystemProviderIds.mimo,
|
||||
SystemProviderIds.openrouter
|
||||
SystemProviderIds.openrouter,
|
||||
SystemProviderIds.tokenflux
|
||||
] as const
|
||||
type AnthropicCompatibleProviderId = (typeof ANTHROPIC_COMPATIBLE_PROVIDER_IDS)[number]
|
||||
|
||||
|
||||
@ -83,7 +83,7 @@ const persistedReducer = persistReducer(
|
||||
{
|
||||
key: 'cherry-studio',
|
||||
storage,
|
||||
version: 190,
|
||||
version: 191,
|
||||
blacklist: ['runtime', 'messages', 'messageBlocks', 'tabs', 'toolPermissions'],
|
||||
migrate
|
||||
},
|
||||
|
||||
@ -3131,6 +3131,21 @@ const migrateConfig = {
|
||||
logger.error('migrate 190 error', error as Error)
|
||||
return state
|
||||
}
|
||||
},
|
||||
'191': (state: RootState) => {
|
||||
try {
|
||||
state.llm.providers.forEach((provider) => {
|
||||
if (provider.id === 'tokenflux') {
|
||||
provider.apiHost = 'https://api.tokenflux.ai/openai/v1'
|
||||
provider.anthropicApiHost = 'https://api.tokenflux.ai/anthropic'
|
||||
}
|
||||
})
|
||||
logger.info('migrate 191 success')
|
||||
return state
|
||||
} catch (error) {
|
||||
logger.error('migrate 191 error', error as Error)
|
||||
return state
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -72,6 +72,7 @@ export interface RuntimeState {
|
||||
// chat: ChatState
|
||||
// websearch: WebSearchState
|
||||
placeHolder: string
|
||||
isOvmsSupported: boolean | undefined
|
||||
}
|
||||
|
||||
// export interface ExportState {
|
||||
@ -115,7 +116,8 @@ const initialState: RuntimeState = {
|
||||
// websearch: {
|
||||
// activeSearches: {}
|
||||
// },
|
||||
placeHolder: ''
|
||||
placeHolder: '',
|
||||
isOvmsSupported: undefined
|
||||
}
|
||||
|
||||
const runtimeSlice = createSlice({
|
||||
@ -161,6 +163,9 @@ const runtimeSlice = createSlice({
|
||||
// setExportState: (state, action: PayloadAction<Partial<ExportState>>) => {
|
||||
// state.export = { ...state.export, ...action.payload }
|
||||
// },
|
||||
setIsOvmsSupported: (state, action: PayloadAction<boolean>) => {
|
||||
state.isOvmsSupported = action.payload
|
||||
},
|
||||
// // Chat related actions
|
||||
// toggleMultiSelectMode: (state, action: PayloadAction<boolean>) => {
|
||||
// state.chat.isMultiSelectMode = action.payload
|
||||
@ -227,6 +232,7 @@ export const {
|
||||
// setResourcesPath,
|
||||
// setUpdateState,
|
||||
// setExportState,
|
||||
setIsOvmsSupported,
|
||||
// // Chat related actions
|
||||
// toggleMultiSelectMode,
|
||||
// setSelectedMessageIds,
|
||||
|
||||
Loading…
Reference in New Issue
Block a user