mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2025-12-19 14:41:24 +08:00
refactor: rewrite filesystem MCP server with improved tool set (#11937)
* refactor: rewrite filesystem MCP server with new tool set - Replace existing filesystem MCP with modular architecture - Implement 6 new tools: glob, ls, grep, read, write, delete - Add comprehensive TypeScript types and Zod schemas - Maintain security with path validation and allowed directories - Improve error handling and user feedback - Add result limits for performance (100 files/matches max) - Format output with clear, helpful messages - Keep backward compatibility with existing import patterns BREAKING CHANGE: Tools renamed from snake_case to lowercase - read_file → read - write_file → write - list_directory → ls - search_files → glob - New tools: grep, delete - Removed: edit_file, create_directory, directory_tree, move_file, get_file_info * 🐛 fix: remove filesystem allowed directories restriction * 🐛 fix: relax binary detection for text files * ✨ feat: add edit tool with fuzzy matching to filesystem MCP server - Add edit tool with 9 fallback replacers from opencode for robust string replacement (SimpleReplacer, LineTrimmedReplacer, BlockAnchorReplacer, WhitespaceNormalizedReplacer, etc.) - Add Levenshtein distance algorithm for similarity matching - Improve descriptions for all tools (read, write, glob, grep, ls, delete) following opencode patterns for better LLM guidance - Register edit tool in server and export from tools index * ♻️ refactor: replace allowedDirectories with baseDir in filesystem MCP server - Change server to use single baseDir (from WORKSPACE_ROOT env or userData/workspace default) - Remove list_allowed_directories tool as restriction mechanism is removed - Add ripgrep integration for faster grep searches with JS fallback - Simplify validatePath() by removing allowlist checks - Display paths relative to baseDir in tool outputs * 📝 docs: standardize filesystem MCP server tool descriptions - Unify description format to bullet-point style across all tools - Add absolute path requirement to ls, glob, grep schemas and descriptions - Update glob and grep to output absolute paths instead of relative paths - Add missing error case documentation for edit tool (old_string === new_string) - Standardize optional path parameter descriptions * ♻️ refactor: use ripgrep for glob tool and extract shared utilities - Extract shared ripgrep utilities (runRipgrep, getRipgrepAddonPath) to types.ts - Rewrite glob tool to use `rg --files --glob` for reliable file matching - Update grep tool to import shared ripgrep utilities * 🐛 fix: handle ripgrep exit code 2 with valid results in glob tool - Process ripgrep stdout when content exists, regardless of exit code - Exit code 2 can indicate partial errors while still returning valid results - Remove fallback directory listing (had buggy regex for root-level files) - Update tool description to clarify patterns without "/" match at any depth * 🔥 chore: remove filesystem.ts.backup file Remove unnecessary backup file from mcpServers directory * 🐛 fix: use correct default workspace path in filesystem MCP server Change default baseDir from userData/workspace to userData/Data/Workspace to match the app's data storage convention (Data/Files, Data/Notes, etc.) Addresses PR #11937 review feedback. * 🐛 fix: pass WORKSPACE_ROOT to FileSystemServer constructor The envs object passed to createInMemoryMCPServer was not being used for the filesystem server. Now WORKSPACE_ROOT is passed as a constructor parameter, following the same pattern as other MCP servers. * \feat: add link to documentation for MCP server configuration requirement Wrap the configuration requirement tag in a link to the documentation for better user guidance on MCP server settings. --------- Co-authored-by: kangfenmao <kangfenmao@qq.com>
This commit is contained in:
parent
bdfda7afb1
commit
1d5dafa325
@ -36,7 +36,7 @@ export function createInMemoryMCPServer(
|
|||||||
return new FetchServer().server
|
return new FetchServer().server
|
||||||
}
|
}
|
||||||
case BuiltinMCPServerNames.filesystem: {
|
case BuiltinMCPServerNames.filesystem: {
|
||||||
return new FileSystemServer(args).server
|
return new FileSystemServer(envs.WORKSPACE_ROOT).server
|
||||||
}
|
}
|
||||||
case BuiltinMCPServerNames.difyKnowledge: {
|
case BuiltinMCPServerNames.difyKnowledge: {
|
||||||
const difyKey = envs.DIFY_KEY
|
const difyKey = envs.DIFY_KEY
|
||||||
|
|||||||
@ -1,652 +0,0 @@
|
|||||||
// port https://github.com/modelcontextprotocol/servers/blob/main/src/filesystem/index.ts
|
|
||||||
|
|
||||||
import { loggerService } from '@logger'
|
|
||||||
import { Server } from '@modelcontextprotocol/sdk/server/index.js'
|
|
||||||
import { CallToolRequestSchema, ListToolsRequestSchema } from '@modelcontextprotocol/sdk/types.js'
|
|
||||||
import { createTwoFilesPatch } from 'diff'
|
|
||||||
import fs from 'fs/promises'
|
|
||||||
import { minimatch } from 'minimatch'
|
|
||||||
import os from 'os'
|
|
||||||
import path from 'path'
|
|
||||||
import * as z from 'zod'
|
|
||||||
|
|
||||||
const logger = loggerService.withContext('MCP:FileSystemServer')
|
|
||||||
|
|
||||||
// Normalize all paths consistently
|
|
||||||
function normalizePath(p: string): string {
|
|
||||||
return path.normalize(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
function expandHome(filepath: string): string {
|
|
||||||
if (filepath.startsWith('~/') || filepath === '~') {
|
|
||||||
return path.join(os.homedir(), filepath.slice(1))
|
|
||||||
}
|
|
||||||
return filepath
|
|
||||||
}
|
|
||||||
|
|
||||||
// Security utilities
|
|
||||||
async function validatePath(allowedDirectories: string[], requestedPath: string): Promise<string> {
|
|
||||||
const expandedPath = expandHome(requestedPath)
|
|
||||||
const absolute = path.isAbsolute(expandedPath)
|
|
||||||
? path.resolve(expandedPath)
|
|
||||||
: path.resolve(process.cwd(), expandedPath)
|
|
||||||
|
|
||||||
const normalizedRequested = normalizePath(absolute)
|
|
||||||
|
|
||||||
// Check if path is within allowed directories
|
|
||||||
const isAllowed = allowedDirectories.some((dir) => normalizedRequested.startsWith(dir))
|
|
||||||
if (!isAllowed) {
|
|
||||||
throw new Error(
|
|
||||||
`Access denied - path outside allowed directories: ${absolute} not in ${allowedDirectories.join(', ')}`
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle symlinks by checking their real path
|
|
||||||
try {
|
|
||||||
const realPath = await fs.realpath(absolute)
|
|
||||||
const normalizedReal = normalizePath(realPath)
|
|
||||||
const isRealPathAllowed = allowedDirectories.some((dir) => normalizedReal.startsWith(dir))
|
|
||||||
if (!isRealPathAllowed) {
|
|
||||||
throw new Error('Access denied - symlink target outside allowed directories')
|
|
||||||
}
|
|
||||||
return realPath
|
|
||||||
} catch (error) {
|
|
||||||
// For new files that don't exist yet, verify parent directory
|
|
||||||
const parentDir = path.dirname(absolute)
|
|
||||||
try {
|
|
||||||
const realParentPath = await fs.realpath(parentDir)
|
|
||||||
const normalizedParent = normalizePath(realParentPath)
|
|
||||||
const isParentAllowed = allowedDirectories.some((dir) => normalizedParent.startsWith(dir))
|
|
||||||
if (!isParentAllowed) {
|
|
||||||
throw new Error('Access denied - parent directory outside allowed directories')
|
|
||||||
}
|
|
||||||
return absolute
|
|
||||||
} catch {
|
|
||||||
throw new Error(`Parent directory does not exist: ${parentDir}`)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schema definitions
|
|
||||||
const ReadFileArgsSchema = z.object({
|
|
||||||
path: z.string()
|
|
||||||
})
|
|
||||||
|
|
||||||
const ReadMultipleFilesArgsSchema = z.object({
|
|
||||||
paths: z.array(z.string())
|
|
||||||
})
|
|
||||||
|
|
||||||
const WriteFileArgsSchema = z.object({
|
|
||||||
path: z.string(),
|
|
||||||
content: z.string()
|
|
||||||
})
|
|
||||||
|
|
||||||
const EditOperation = z.object({
|
|
||||||
oldText: z.string().describe('Text to search for - must match exactly'),
|
|
||||||
newText: z.string().describe('Text to replace with')
|
|
||||||
})
|
|
||||||
|
|
||||||
const EditFileArgsSchema = z.object({
|
|
||||||
path: z.string(),
|
|
||||||
edits: z.array(EditOperation),
|
|
||||||
dryRun: z.boolean().default(false).describe('Preview changes using git-style diff format')
|
|
||||||
})
|
|
||||||
|
|
||||||
const CreateDirectoryArgsSchema = z.object({
|
|
||||||
path: z.string()
|
|
||||||
})
|
|
||||||
|
|
||||||
const ListDirectoryArgsSchema = z.object({
|
|
||||||
path: z.string()
|
|
||||||
})
|
|
||||||
|
|
||||||
const DirectoryTreeArgsSchema = z.object({
|
|
||||||
path: z.string()
|
|
||||||
})
|
|
||||||
|
|
||||||
const MoveFileArgsSchema = z.object({
|
|
||||||
source: z.string(),
|
|
||||||
destination: z.string()
|
|
||||||
})
|
|
||||||
|
|
||||||
const SearchFilesArgsSchema = z.object({
|
|
||||||
path: z.string(),
|
|
||||||
pattern: z.string(),
|
|
||||||
excludePatterns: z.array(z.string()).optional().default([])
|
|
||||||
})
|
|
||||||
|
|
||||||
const GetFileInfoArgsSchema = z.object({
|
|
||||||
path: z.string()
|
|
||||||
})
|
|
||||||
|
|
||||||
interface FileInfo {
|
|
||||||
size: number
|
|
||||||
created: Date
|
|
||||||
modified: Date
|
|
||||||
accessed: Date
|
|
||||||
isDirectory: boolean
|
|
||||||
isFile: boolean
|
|
||||||
permissions: string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tool implementations
|
|
||||||
async function getFileStats(filePath: string): Promise<FileInfo> {
|
|
||||||
const stats = await fs.stat(filePath)
|
|
||||||
return {
|
|
||||||
size: stats.size,
|
|
||||||
created: stats.birthtime,
|
|
||||||
modified: stats.mtime,
|
|
||||||
accessed: stats.atime,
|
|
||||||
isDirectory: stats.isDirectory(),
|
|
||||||
isFile: stats.isFile(),
|
|
||||||
permissions: stats.mode.toString(8).slice(-3)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async function searchFiles(
|
|
||||||
allowedDirectories: string[],
|
|
||||||
rootPath: string,
|
|
||||||
pattern: string,
|
|
||||||
excludePatterns: string[] = []
|
|
||||||
): Promise<string[]> {
|
|
||||||
const results: string[] = []
|
|
||||||
|
|
||||||
async function search(currentPath: string) {
|
|
||||||
const entries = await fs.readdir(currentPath, { withFileTypes: true })
|
|
||||||
|
|
||||||
for (const entry of entries) {
|
|
||||||
const fullPath = path.join(currentPath, entry.name)
|
|
||||||
|
|
||||||
try {
|
|
||||||
// Validate each path before processing
|
|
||||||
await validatePath(allowedDirectories, fullPath)
|
|
||||||
|
|
||||||
// Check if path matches any exclude pattern
|
|
||||||
const relativePath = path.relative(rootPath, fullPath)
|
|
||||||
const shouldExclude = excludePatterns.some((pattern) => {
|
|
||||||
const globPattern = pattern.includes('*') ? pattern : `**/${pattern}/**`
|
|
||||||
return minimatch(relativePath, globPattern, { dot: true })
|
|
||||||
})
|
|
||||||
|
|
||||||
if (shouldExclude) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if (entry.name.toLowerCase().includes(pattern.toLowerCase())) {
|
|
||||||
results.push(fullPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
if (entry.isDirectory()) {
|
|
||||||
await search(fullPath)
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// Skip invalid paths during search
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
await search(rootPath)
|
|
||||||
return results
|
|
||||||
}
|
|
||||||
|
|
||||||
// file editing and diffing utilities
|
|
||||||
function normalizeLineEndings(text: string): string {
|
|
||||||
return text.replace(/\r\n/g, '\n')
|
|
||||||
}
|
|
||||||
|
|
||||||
function createUnifiedDiff(originalContent: string, newContent: string, filepath: string = 'file'): string {
|
|
||||||
// Ensure consistent line endings for diff
|
|
||||||
const normalizedOriginal = normalizeLineEndings(originalContent)
|
|
||||||
const normalizedNew = normalizeLineEndings(newContent)
|
|
||||||
|
|
||||||
return createTwoFilesPatch(filepath, filepath, normalizedOriginal, normalizedNew, 'original', 'modified')
|
|
||||||
}
|
|
||||||
|
|
||||||
async function applyFileEdits(
|
|
||||||
filePath: string,
|
|
||||||
edits: Array<{ oldText: string; newText: string }>,
|
|
||||||
dryRun = false
|
|
||||||
): Promise<string> {
|
|
||||||
// Read file content and normalize line endings
|
|
||||||
const content = normalizeLineEndings(await fs.readFile(filePath, 'utf-8'))
|
|
||||||
|
|
||||||
// Apply edits sequentially
|
|
||||||
let modifiedContent = content
|
|
||||||
for (const edit of edits) {
|
|
||||||
const normalizedOld = normalizeLineEndings(edit.oldText)
|
|
||||||
const normalizedNew = normalizeLineEndings(edit.newText)
|
|
||||||
|
|
||||||
// If exact match exists, use it
|
|
||||||
if (modifiedContent.includes(normalizedOld)) {
|
|
||||||
modifiedContent = modifiedContent.replace(normalizedOld, normalizedNew)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise, try line-by-line matching with flexibility for whitespace
|
|
||||||
const oldLines = normalizedOld.split('\n')
|
|
||||||
const contentLines = modifiedContent.split('\n')
|
|
||||||
let matchFound = false
|
|
||||||
|
|
||||||
for (let i = 0; i <= contentLines.length - oldLines.length; i++) {
|
|
||||||
const potentialMatch = contentLines.slice(i, i + oldLines.length)
|
|
||||||
|
|
||||||
// Compare lines with normalized whitespace
|
|
||||||
const isMatch = oldLines.every((oldLine, j) => {
|
|
||||||
const contentLine = potentialMatch[j]
|
|
||||||
return oldLine.trim() === contentLine.trim()
|
|
||||||
})
|
|
||||||
|
|
||||||
if (isMatch) {
|
|
||||||
// Preserve original indentation of first line
|
|
||||||
const originalIndent = contentLines[i].match(/^\s*/)?.[0] || ''
|
|
||||||
const newLines = normalizedNew.split('\n').map((line, j) => {
|
|
||||||
if (j === 0) return originalIndent + line.trimStart()
|
|
||||||
// For subsequent lines, try to preserve relative indentation
|
|
||||||
const oldIndent = oldLines[j]?.match(/^\s*/)?.[0] || ''
|
|
||||||
const newIndent = line.match(/^\s*/)?.[0] || ''
|
|
||||||
if (oldIndent && newIndent) {
|
|
||||||
const relativeIndent = newIndent.length - oldIndent.length
|
|
||||||
return originalIndent + ' '.repeat(Math.max(0, relativeIndent)) + line.trimStart()
|
|
||||||
}
|
|
||||||
return line
|
|
||||||
})
|
|
||||||
|
|
||||||
contentLines.splice(i, oldLines.length, ...newLines)
|
|
||||||
modifiedContent = contentLines.join('\n')
|
|
||||||
matchFound = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!matchFound) {
|
|
||||||
throw new Error(`Could not find exact match for edit:\n${edit.oldText}`)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create unified diff
|
|
||||||
const diff = createUnifiedDiff(content, modifiedContent, filePath)
|
|
||||||
|
|
||||||
// Format diff with appropriate number of backticks
|
|
||||||
let numBackticks = 3
|
|
||||||
while (diff.includes('`'.repeat(numBackticks))) {
|
|
||||||
numBackticks++
|
|
||||||
}
|
|
||||||
const formattedDiff = `${'`'.repeat(numBackticks)}diff\n${diff}${'`'.repeat(numBackticks)}\n\n`
|
|
||||||
|
|
||||||
if (!dryRun) {
|
|
||||||
await fs.writeFile(filePath, modifiedContent, 'utf-8')
|
|
||||||
}
|
|
||||||
|
|
||||||
return formattedDiff
|
|
||||||
}
|
|
||||||
|
|
||||||
class FileSystemServer {
|
|
||||||
public server: Server
|
|
||||||
private allowedDirectories: string[]
|
|
||||||
constructor(allowedDirs: string[]) {
|
|
||||||
if (!Array.isArray(allowedDirs) || allowedDirs.length === 0) {
|
|
||||||
throw new Error('No allowed directories provided, please specify at least one directory in args')
|
|
||||||
}
|
|
||||||
|
|
||||||
this.allowedDirectories = allowedDirs.map((dir) => normalizePath(path.resolve(expandHome(dir))))
|
|
||||||
|
|
||||||
// Validate that all directories exist and are accessible
|
|
||||||
this.validateDirs().catch((error) => {
|
|
||||||
logger.error('Error validating allowed directories:', error)
|
|
||||||
throw new Error(`Error validating allowed directories: ${error}`)
|
|
||||||
})
|
|
||||||
|
|
||||||
this.server = new Server(
|
|
||||||
{
|
|
||||||
name: 'secure-filesystem-server',
|
|
||||||
version: '0.2.0'
|
|
||||||
},
|
|
||||||
{
|
|
||||||
capabilities: {
|
|
||||||
tools: {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
this.initialize()
|
|
||||||
}
|
|
||||||
|
|
||||||
async validateDirs() {
|
|
||||||
// Validate that all directories exist and are accessible
|
|
||||||
await Promise.all(
|
|
||||||
this.allowedDirectories.map(async (dir) => {
|
|
||||||
try {
|
|
||||||
const stats = await fs.stat(expandHome(dir))
|
|
||||||
if (!stats.isDirectory()) {
|
|
||||||
logger.error(`Error: ${dir} is not a directory`)
|
|
||||||
throw new Error(`Error: ${dir} is not a directory`)
|
|
||||||
}
|
|
||||||
} catch (error: any) {
|
|
||||||
logger.error(`Error accessing directory ${dir}:`, error)
|
|
||||||
throw new Error(`Error accessing directory ${dir}:`, error)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
initialize() {
|
|
||||||
// Tool handlers
|
|
||||||
this.server.setRequestHandler(ListToolsRequestSchema, async () => {
|
|
||||||
return {
|
|
||||||
tools: [
|
|
||||||
{
|
|
||||||
name: 'read_file',
|
|
||||||
description:
|
|
||||||
'Read the complete contents of a file from the file system. ' +
|
|
||||||
'Handles various text encodings and provides detailed error messages ' +
|
|
||||||
'if the file cannot be read. Use this tool when you need to examine ' +
|
|
||||||
'the contents of a single file. Only works within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(ReadFileArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'read_multiple_files',
|
|
||||||
description:
|
|
||||||
'Read the contents of multiple files simultaneously. This is more ' +
|
|
||||||
'efficient than reading files one by one when you need to analyze ' +
|
|
||||||
"or compare multiple files. Each file's content is returned with its " +
|
|
||||||
"path as a reference. Failed reads for individual files won't stop " +
|
|
||||||
'the entire operation. Only works within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(ReadMultipleFilesArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'write_file',
|
|
||||||
description:
|
|
||||||
'Create a new file or completely overwrite an existing file with new content. ' +
|
|
||||||
'Use with caution as it will overwrite existing files without warning. ' +
|
|
||||||
'Handles text content with proper encoding. Only works within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(WriteFileArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'edit_file',
|
|
||||||
description:
|
|
||||||
'Make line-based edits to a text file. Each edit replaces exact line sequences ' +
|
|
||||||
'with new content. Returns a git-style diff showing the changes made. ' +
|
|
||||||
'Only works within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(EditFileArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'create_directory',
|
|
||||||
description:
|
|
||||||
'Create a new directory or ensure a directory exists. Can create multiple ' +
|
|
||||||
'nested directories in one operation. If the directory already exists, ' +
|
|
||||||
'this operation will succeed silently. Perfect for setting up directory ' +
|
|
||||||
'structures for projects or ensuring required paths exist. Only works within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(CreateDirectoryArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'list_directory',
|
|
||||||
description:
|
|
||||||
'Get a detailed listing of all files and directories in a specified path. ' +
|
|
||||||
'Results clearly distinguish between files and directories with [FILE] and [DIR] ' +
|
|
||||||
'prefixes. This tool is essential for understanding directory structure and ' +
|
|
||||||
'finding specific files within a directory. Only works within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(ListDirectoryArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'directory_tree',
|
|
||||||
description:
|
|
||||||
'Get a recursive tree view of files and directories as a JSON structure. ' +
|
|
||||||
"Each entry includes 'name', 'type' (file/directory), and 'children' for directories. " +
|
|
||||||
'Files have no children array, while directories always have a children array (which may be empty). ' +
|
|
||||||
'The output is formatted with 2-space indentation for readability. Only works within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(DirectoryTreeArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'move_file',
|
|
||||||
description:
|
|
||||||
'Move or rename files and directories. Can move files between directories ' +
|
|
||||||
'and rename them in a single operation. If the destination exists, the ' +
|
|
||||||
'operation will fail. Works across different directories and can be used ' +
|
|
||||||
'for simple renaming within the same directory. Both source and destination must be within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(MoveFileArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'search_files',
|
|
||||||
description:
|
|
||||||
'Recursively search for files and directories matching a pattern. ' +
|
|
||||||
'Searches through all subdirectories from the starting path. The search ' +
|
|
||||||
'is case-insensitive and matches partial names. Returns full paths to all ' +
|
|
||||||
"matching items. Great for finding files when you don't know their exact location. " +
|
|
||||||
'Only searches within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(SearchFilesArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'get_file_info',
|
|
||||||
description:
|
|
||||||
'Retrieve detailed metadata about a file or directory. Returns comprehensive ' +
|
|
||||||
'information including size, creation time, last modified time, permissions, ' +
|
|
||||||
'and type. This tool is perfect for understanding file characteristics ' +
|
|
||||||
'without reading the actual content. Only works within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(GetFileInfoArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'list_allowed_directories',
|
|
||||||
description:
|
|
||||||
'Returns the list of directories that this server is allowed to access. ' +
|
|
||||||
'Use this to understand which directories are available before trying to access files.',
|
|
||||||
inputSchema: {
|
|
||||||
type: 'object',
|
|
||||||
properties: {},
|
|
||||||
required: []
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
||||||
try {
|
|
||||||
const { name, arguments: args } = request.params
|
|
||||||
|
|
||||||
switch (name) {
|
|
||||||
case 'read_file': {
|
|
||||||
const parsed = ReadFileArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for read_file: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
|
||||||
const content = await fs.readFile(validPath, 'utf-8')
|
|
||||||
return {
|
|
||||||
content: [{ type: 'text', text: content }]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'read_multiple_files': {
|
|
||||||
const parsed = ReadMultipleFilesArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for read_multiple_files: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const results = await Promise.all(
|
|
||||||
parsed.data.paths.map(async (filePath: string) => {
|
|
||||||
try {
|
|
||||||
const validPath = await validatePath(this.allowedDirectories, filePath)
|
|
||||||
const content = await fs.readFile(validPath, 'utf-8')
|
|
||||||
return `${filePath}:\n${content}\n`
|
|
||||||
} catch (error) {
|
|
||||||
const errorMessage = error instanceof Error ? error.message : String(error)
|
|
||||||
return `${filePath}: Error - ${errorMessage}`
|
|
||||||
}
|
|
||||||
})
|
|
||||||
)
|
|
||||||
return {
|
|
||||||
content: [{ type: 'text', text: results.join('\n---\n') }]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'write_file': {
|
|
||||||
const parsed = WriteFileArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for write_file: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
|
||||||
await fs.writeFile(validPath, parsed.data.content, 'utf-8')
|
|
||||||
return {
|
|
||||||
content: [{ type: 'text', text: `Successfully wrote to ${parsed.data.path}` }]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'edit_file': {
|
|
||||||
const parsed = EditFileArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for edit_file: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
|
||||||
const result = await applyFileEdits(validPath, parsed.data.edits, parsed.data.dryRun)
|
|
||||||
return {
|
|
||||||
content: [{ type: 'text', text: result }]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'create_directory': {
|
|
||||||
const parsed = CreateDirectoryArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for create_directory: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
|
||||||
await fs.mkdir(validPath, { recursive: true })
|
|
||||||
return {
|
|
||||||
content: [{ type: 'text', text: `Successfully created directory ${parsed.data.path}` }]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'list_directory': {
|
|
||||||
const parsed = ListDirectoryArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for list_directory: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
|
||||||
const entries = await fs.readdir(validPath, { withFileTypes: true })
|
|
||||||
const formatted = entries
|
|
||||||
.map((entry) => `${entry.isDirectory() ? '[DIR]' : '[FILE]'} ${entry.name}`)
|
|
||||||
.join('\n')
|
|
||||||
return {
|
|
||||||
content: [{ type: 'text', text: formatted }]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'directory_tree': {
|
|
||||||
const parsed = DirectoryTreeArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for directory_tree: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
|
|
||||||
interface TreeEntry {
|
|
||||||
name: string
|
|
||||||
type: 'file' | 'directory'
|
|
||||||
children?: TreeEntry[]
|
|
||||||
}
|
|
||||||
|
|
||||||
async function buildTree(allowedDirectories: string[], currentPath: string): Promise<TreeEntry[]> {
|
|
||||||
const validPath = await validatePath(allowedDirectories, currentPath)
|
|
||||||
const entries = await fs.readdir(validPath, { withFileTypes: true })
|
|
||||||
const result: TreeEntry[] = []
|
|
||||||
|
|
||||||
for (const entry of entries) {
|
|
||||||
const entryData: TreeEntry = {
|
|
||||||
name: entry.name,
|
|
||||||
type: entry.isDirectory() ? 'directory' : 'file'
|
|
||||||
}
|
|
||||||
|
|
||||||
if (entry.isDirectory()) {
|
|
||||||
const subPath = path.join(currentPath, entry.name)
|
|
||||||
entryData.children = await buildTree(allowedDirectories, subPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
result.push(entryData)
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
const treeData = await buildTree(this.allowedDirectories, parsed.data.path)
|
|
||||||
return {
|
|
||||||
content: [
|
|
||||||
{
|
|
||||||
type: 'text',
|
|
||||||
text: JSON.stringify(treeData, null, 2)
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'move_file': {
|
|
||||||
const parsed = MoveFileArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for move_file: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const validSourcePath = await validatePath(this.allowedDirectories, parsed.data.source)
|
|
||||||
const validDestPath = await validatePath(this.allowedDirectories, parsed.data.destination)
|
|
||||||
await fs.rename(validSourcePath, validDestPath)
|
|
||||||
return {
|
|
||||||
content: [
|
|
||||||
{ type: 'text', text: `Successfully moved ${parsed.data.source} to ${parsed.data.destination}` }
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'search_files': {
|
|
||||||
const parsed = SearchFilesArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for search_files: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
|
||||||
const results = await searchFiles(
|
|
||||||
this.allowedDirectories,
|
|
||||||
validPath,
|
|
||||||
parsed.data.pattern,
|
|
||||||
parsed.data.excludePatterns
|
|
||||||
)
|
|
||||||
return {
|
|
||||||
content: [{ type: 'text', text: results.length > 0 ? results.join('\n') : 'No matches found' }]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'get_file_info': {
|
|
||||||
const parsed = GetFileInfoArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for get_file_info: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
|
||||||
const info = await getFileStats(validPath)
|
|
||||||
return {
|
|
||||||
content: [
|
|
||||||
{
|
|
||||||
type: 'text',
|
|
||||||
text: Object.entries(info)
|
|
||||||
.map(([key, value]) => `${key}: ${value}`)
|
|
||||||
.join('\n')
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'list_allowed_directories': {
|
|
||||||
return {
|
|
||||||
content: [
|
|
||||||
{
|
|
||||||
type: 'text',
|
|
||||||
text: `Allowed directories:\n${this.allowedDirectories.join('\n')}`
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
|
||||||
throw new Error(`Unknown tool: ${name}`)
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
const errorMessage = error instanceof Error ? error.message : String(error)
|
|
||||||
return {
|
|
||||||
content: [{ type: 'text', text: `Error: ${errorMessage}` }],
|
|
||||||
isError: true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
export default FileSystemServer
|
|
||||||
2
src/main/mcpServers/filesystem/index.ts
Normal file
2
src/main/mcpServers/filesystem/index.ts
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
// Re-export FileSystemServer to maintain existing import pattern
|
||||||
|
export { default, FileSystemServer } from './server'
|
||||||
118
src/main/mcpServers/filesystem/server.ts
Normal file
118
src/main/mcpServers/filesystem/server.ts
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
import { Server } from '@modelcontextprotocol/sdk/server/index.js'
|
||||||
|
import { CallToolRequestSchema, ListToolsRequestSchema } from '@modelcontextprotocol/sdk/types.js'
|
||||||
|
import { app } from 'electron'
|
||||||
|
import fs from 'fs/promises'
|
||||||
|
import path from 'path'
|
||||||
|
|
||||||
|
import {
|
||||||
|
deleteToolDefinition,
|
||||||
|
editToolDefinition,
|
||||||
|
globToolDefinition,
|
||||||
|
grepToolDefinition,
|
||||||
|
handleDeleteTool,
|
||||||
|
handleEditTool,
|
||||||
|
handleGlobTool,
|
||||||
|
handleGrepTool,
|
||||||
|
handleLsTool,
|
||||||
|
handleReadTool,
|
||||||
|
handleWriteTool,
|
||||||
|
lsToolDefinition,
|
||||||
|
readToolDefinition,
|
||||||
|
writeToolDefinition
|
||||||
|
} from './tools'
|
||||||
|
import { logger } from './types'
|
||||||
|
|
||||||
|
export class FileSystemServer {
|
||||||
|
public server: Server
|
||||||
|
private baseDir: string
|
||||||
|
|
||||||
|
constructor(baseDir?: string) {
|
||||||
|
if (baseDir && path.isAbsolute(baseDir)) {
|
||||||
|
this.baseDir = baseDir
|
||||||
|
logger.info(`Using provided baseDir for filesystem MCP: ${baseDir}`)
|
||||||
|
} else {
|
||||||
|
const userData = app.getPath('userData')
|
||||||
|
this.baseDir = path.join(userData, 'Data', 'Workspace')
|
||||||
|
logger.info(`Using default workspace for filesystem MCP baseDir: ${this.baseDir}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
this.server = new Server(
|
||||||
|
{
|
||||||
|
name: 'filesystem-server',
|
||||||
|
version: '2.0.0'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
capabilities: {
|
||||||
|
tools: {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
this.initialize()
|
||||||
|
}
|
||||||
|
|
||||||
|
async initialize() {
|
||||||
|
try {
|
||||||
|
await fs.mkdir(this.baseDir, { recursive: true })
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Failed to create filesystem MCP baseDir', { error, baseDir: this.baseDir })
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register tool list handler
|
||||||
|
this.server.setRequestHandler(ListToolsRequestSchema, async () => {
|
||||||
|
return {
|
||||||
|
tools: [
|
||||||
|
globToolDefinition,
|
||||||
|
lsToolDefinition,
|
||||||
|
grepToolDefinition,
|
||||||
|
readToolDefinition,
|
||||||
|
editToolDefinition,
|
||||||
|
writeToolDefinition,
|
||||||
|
deleteToolDefinition
|
||||||
|
]
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Register tool call handler
|
||||||
|
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
||||||
|
try {
|
||||||
|
const { name, arguments: args } = request.params
|
||||||
|
|
||||||
|
switch (name) {
|
||||||
|
case 'glob':
|
||||||
|
return await handleGlobTool(args, this.baseDir)
|
||||||
|
|
||||||
|
case 'ls':
|
||||||
|
return await handleLsTool(args, this.baseDir)
|
||||||
|
|
||||||
|
case 'grep':
|
||||||
|
return await handleGrepTool(args, this.baseDir)
|
||||||
|
|
||||||
|
case 'read':
|
||||||
|
return await handleReadTool(args, this.baseDir)
|
||||||
|
|
||||||
|
case 'edit':
|
||||||
|
return await handleEditTool(args, this.baseDir)
|
||||||
|
|
||||||
|
case 'write':
|
||||||
|
return await handleWriteTool(args, this.baseDir)
|
||||||
|
|
||||||
|
case 'delete':
|
||||||
|
return await handleDeleteTool(args, this.baseDir)
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw new Error(`Unknown tool: ${name}`)
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
const errorMessage = error instanceof Error ? error.message : String(error)
|
||||||
|
logger.error(`Tool execution error for ${request.params.name}:`, { error })
|
||||||
|
return {
|
||||||
|
content: [{ type: 'text', text: `Error: ${errorMessage}` }],
|
||||||
|
isError: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export default FileSystemServer
|
||||||
93
src/main/mcpServers/filesystem/tools/delete.ts
Normal file
93
src/main/mcpServers/filesystem/tools/delete.ts
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
import fs from 'fs/promises'
|
||||||
|
import path from 'path'
|
||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import { logger, validatePath } from '../types'
|
||||||
|
|
||||||
|
// Schema definition
|
||||||
|
export const DeleteToolSchema = z.object({
|
||||||
|
path: z.string().describe('The path to the file or directory to delete'),
|
||||||
|
recursive: z.boolean().optional().describe('For directories, whether to delete recursively (default: false)')
|
||||||
|
})
|
||||||
|
|
||||||
|
// Tool definition with detailed description
|
||||||
|
export const deleteToolDefinition = {
|
||||||
|
name: 'delete',
|
||||||
|
description: `Deletes a file or directory from the filesystem.
|
||||||
|
|
||||||
|
CAUTION: This operation cannot be undone!
|
||||||
|
|
||||||
|
- For files: simply provide the path
|
||||||
|
- For empty directories: provide the path
|
||||||
|
- For non-empty directories: set recursive=true
|
||||||
|
- The path must be an absolute path, not a relative path
|
||||||
|
- Always verify the path before deleting to avoid data loss`,
|
||||||
|
inputSchema: z.toJSONSchema(DeleteToolSchema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler implementation
|
||||||
|
export async function handleDeleteTool(args: unknown, baseDir: string) {
|
||||||
|
const parsed = DeleteToolSchema.safeParse(args)
|
||||||
|
if (!parsed.success) {
|
||||||
|
throw new Error(`Invalid arguments for delete: ${parsed.error}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const targetPath = parsed.data.path
|
||||||
|
const validPath = await validatePath(targetPath, baseDir)
|
||||||
|
const recursive = parsed.data.recursive || false
|
||||||
|
|
||||||
|
// Check if path exists and get stats
|
||||||
|
let stats
|
||||||
|
try {
|
||||||
|
stats = await fs.stat(validPath)
|
||||||
|
} catch (error: any) {
|
||||||
|
if (error.code === 'ENOENT') {
|
||||||
|
throw new Error(`Path not found: ${targetPath}`)
|
||||||
|
}
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
|
||||||
|
const isDirectory = stats.isDirectory()
|
||||||
|
const relativePath = path.relative(baseDir, validPath)
|
||||||
|
|
||||||
|
// Perform deletion
|
||||||
|
try {
|
||||||
|
if (isDirectory) {
|
||||||
|
if (recursive) {
|
||||||
|
// Delete directory recursively
|
||||||
|
await fs.rm(validPath, { recursive: true, force: true })
|
||||||
|
} else {
|
||||||
|
// Try to delete empty directory
|
||||||
|
await fs.rmdir(validPath)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Delete file
|
||||||
|
await fs.unlink(validPath)
|
||||||
|
}
|
||||||
|
} catch (error: any) {
|
||||||
|
if (error.code === 'ENOTEMPTY') {
|
||||||
|
throw new Error(`Directory not empty: ${targetPath}. Use recursive=true to delete non-empty directories.`)
|
||||||
|
}
|
||||||
|
throw new Error(`Failed to delete: ${error.message}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log the operation
|
||||||
|
logger.info('Path deleted', {
|
||||||
|
path: validPath,
|
||||||
|
type: isDirectory ? 'directory' : 'file',
|
||||||
|
recursive: isDirectory ? recursive : undefined
|
||||||
|
})
|
||||||
|
|
||||||
|
// Format output
|
||||||
|
const itemType = isDirectory ? 'Directory' : 'File'
|
||||||
|
const recursiveNote = isDirectory && recursive ? ' (recursive)' : ''
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: `${itemType} deleted${recursiveNote}: ${relativePath}`
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
130
src/main/mcpServers/filesystem/tools/edit.ts
Normal file
130
src/main/mcpServers/filesystem/tools/edit.ts
Normal file
@ -0,0 +1,130 @@
|
|||||||
|
import fs from 'fs/promises'
|
||||||
|
import path from 'path'
|
||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import { logger, replaceWithFuzzyMatch, validatePath } from '../types'
|
||||||
|
|
||||||
|
// Schema definition
|
||||||
|
export const EditToolSchema = z.object({
|
||||||
|
file_path: z.string().describe('The path to the file to modify'),
|
||||||
|
old_string: z.string().describe('The text to replace'),
|
||||||
|
new_string: z.string().describe('The text to replace it with'),
|
||||||
|
replace_all: z.boolean().optional().default(false).describe('Replace all occurrences of old_string (default false)')
|
||||||
|
})
|
||||||
|
|
||||||
|
// Tool definition with detailed description
|
||||||
|
export const editToolDefinition = {
|
||||||
|
name: 'edit',
|
||||||
|
description: `Performs exact string replacements in files.
|
||||||
|
|
||||||
|
- You must use the 'read' tool at least once before editing
|
||||||
|
- The file_path must be an absolute path, not a relative path
|
||||||
|
- Preserve exact indentation from read output (after the line number prefix)
|
||||||
|
- Never include line number prefixes in old_string or new_string
|
||||||
|
- ALWAYS prefer editing existing files over creating new ones
|
||||||
|
- The edit will FAIL if old_string is not found in the file
|
||||||
|
- The edit will FAIL if old_string appears multiple times (provide more context or use replace_all)
|
||||||
|
- The edit will FAIL if old_string equals new_string
|
||||||
|
- Use replace_all to rename variables or replace all occurrences`,
|
||||||
|
inputSchema: z.toJSONSchema(EditToolSchema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler implementation
|
||||||
|
export async function handleEditTool(args: unknown, baseDir: string) {
|
||||||
|
const parsed = EditToolSchema.safeParse(args)
|
||||||
|
if (!parsed.success) {
|
||||||
|
throw new Error(`Invalid arguments for edit: ${parsed.error}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const { file_path: filePath, old_string: oldString, new_string: newString, replace_all: replaceAll } = parsed.data
|
||||||
|
|
||||||
|
// Validate path
|
||||||
|
const validPath = await validatePath(filePath, baseDir)
|
||||||
|
|
||||||
|
// Check if file exists
|
||||||
|
try {
|
||||||
|
const stats = await fs.stat(validPath)
|
||||||
|
if (!stats.isFile()) {
|
||||||
|
throw new Error(`Path is not a file: ${filePath}`)
|
||||||
|
}
|
||||||
|
} catch (error: any) {
|
||||||
|
if (error.code === 'ENOENT') {
|
||||||
|
// If old_string is empty, this is a create new file operation
|
||||||
|
if (oldString === '') {
|
||||||
|
// Create parent directory if needed
|
||||||
|
const parentDir = path.dirname(validPath)
|
||||||
|
await fs.mkdir(parentDir, { recursive: true })
|
||||||
|
|
||||||
|
// Write the new content
|
||||||
|
await fs.writeFile(validPath, newString, 'utf-8')
|
||||||
|
|
||||||
|
logger.info('File created', { path: validPath })
|
||||||
|
|
||||||
|
const relativePath = path.relative(baseDir, validPath)
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: `Created new file: ${relativePath}\nLines: ${newString.split('\n').length}`
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
throw new Error(`File not found: ${filePath}`)
|
||||||
|
}
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read current content
|
||||||
|
const content = await fs.readFile(validPath, 'utf-8')
|
||||||
|
|
||||||
|
// Handle special case: old_string is empty (create file with content)
|
||||||
|
if (oldString === '') {
|
||||||
|
await fs.writeFile(validPath, newString, 'utf-8')
|
||||||
|
|
||||||
|
logger.info('File overwritten', { path: validPath })
|
||||||
|
|
||||||
|
const relativePath = path.relative(baseDir, validPath)
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: `Overwrote file: ${relativePath}\nLines: ${newString.split('\n').length}`
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform the replacement with fuzzy matching
|
||||||
|
const newContent = replaceWithFuzzyMatch(content, oldString, newString, replaceAll)
|
||||||
|
|
||||||
|
// Write the modified content
|
||||||
|
await fs.writeFile(validPath, newContent, 'utf-8')
|
||||||
|
|
||||||
|
logger.info('File edited', {
|
||||||
|
path: validPath,
|
||||||
|
replaceAll
|
||||||
|
})
|
||||||
|
|
||||||
|
// Generate a simple diff summary
|
||||||
|
const oldLines = content.split('\n').length
|
||||||
|
const newLines = newContent.split('\n').length
|
||||||
|
const lineDiff = newLines - oldLines
|
||||||
|
|
||||||
|
const relativePath = path.relative(baseDir, validPath)
|
||||||
|
let diffSummary = `Edited: ${relativePath}`
|
||||||
|
if (lineDiff > 0) {
|
||||||
|
diffSummary += `\n+${lineDiff} lines`
|
||||||
|
} else if (lineDiff < 0) {
|
||||||
|
diffSummary += `\n${lineDiff} lines`
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: diffSummary
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
149
src/main/mcpServers/filesystem/tools/glob.ts
Normal file
149
src/main/mcpServers/filesystem/tools/glob.ts
Normal file
@ -0,0 +1,149 @@
|
|||||||
|
import fs from 'fs/promises'
|
||||||
|
import path from 'path'
|
||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import type { FileInfo } from '../types'
|
||||||
|
import { logger, MAX_FILES_LIMIT, runRipgrep, validatePath } from '../types'
|
||||||
|
|
||||||
|
// Schema definition
|
||||||
|
export const GlobToolSchema = z.object({
|
||||||
|
pattern: z.string().describe('The glob pattern to match files against'),
|
||||||
|
path: z
|
||||||
|
.string()
|
||||||
|
.optional()
|
||||||
|
.describe('The directory to search in (must be absolute path). Defaults to the base directory')
|
||||||
|
})
|
||||||
|
|
||||||
|
// Tool definition with detailed description
|
||||||
|
export const globToolDefinition = {
|
||||||
|
name: 'glob',
|
||||||
|
description: `Fast file pattern matching tool that works with any codebase size.
|
||||||
|
|
||||||
|
- Supports glob patterns like "**/*.js" or "src/**/*.ts"
|
||||||
|
- Returns matching absolute file paths sorted by modification time (newest first)
|
||||||
|
- Use this when you need to find files by name patterns
|
||||||
|
- Patterns without "/" (e.g., "*.txt") match files at ANY depth in the directory tree
|
||||||
|
- Patterns with "/" (e.g., "src/*.ts") match relative to the search path
|
||||||
|
- Pattern syntax: * (any chars), ** (any path), {a,b} (alternatives), ? (single char)
|
||||||
|
- Results are limited to 100 files
|
||||||
|
- The path parameter must be an absolute path if specified
|
||||||
|
- If path is not specified, defaults to the base directory
|
||||||
|
- IMPORTANT: Omit the path field for the default directory (don't use "undefined" or "null")`,
|
||||||
|
inputSchema: z.toJSONSchema(GlobToolSchema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler implementation
|
||||||
|
export async function handleGlobTool(args: unknown, baseDir: string) {
|
||||||
|
const parsed = GlobToolSchema.safeParse(args)
|
||||||
|
if (!parsed.success) {
|
||||||
|
throw new Error(`Invalid arguments for glob: ${parsed.error}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const searchPath = parsed.data.path || baseDir
|
||||||
|
const validPath = await validatePath(searchPath, baseDir)
|
||||||
|
|
||||||
|
// Verify the search directory exists
|
||||||
|
try {
|
||||||
|
const stats = await fs.stat(validPath)
|
||||||
|
if (!stats.isDirectory()) {
|
||||||
|
throw new Error(`Path is not a directory: ${validPath}`)
|
||||||
|
}
|
||||||
|
} catch (error: unknown) {
|
||||||
|
if (error && typeof error === 'object' && 'code' in error && error.code === 'ENOENT') {
|
||||||
|
throw new Error(`Directory not found: ${validPath}`)
|
||||||
|
}
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate pattern
|
||||||
|
const pattern = parsed.data.pattern.trim()
|
||||||
|
if (!pattern) {
|
||||||
|
throw new Error('Pattern cannot be empty')
|
||||||
|
}
|
||||||
|
|
||||||
|
const files: FileInfo[] = []
|
||||||
|
let truncated = false
|
||||||
|
|
||||||
|
// Build ripgrep arguments for file listing using --glob=pattern format
|
||||||
|
const rgArgs: string[] = [
|
||||||
|
'--files',
|
||||||
|
'--follow',
|
||||||
|
'--hidden',
|
||||||
|
`--glob=${pattern}`,
|
||||||
|
'--glob=!.git/*',
|
||||||
|
'--glob=!node_modules/*',
|
||||||
|
'--glob=!dist/*',
|
||||||
|
'--glob=!build/*',
|
||||||
|
'--glob=!__pycache__/*',
|
||||||
|
validPath
|
||||||
|
]
|
||||||
|
|
||||||
|
// Use ripgrep for file listing
|
||||||
|
logger.debug('Running ripgrep with args', { rgArgs })
|
||||||
|
const rgResult = await runRipgrep(rgArgs)
|
||||||
|
logger.debug('Ripgrep result', {
|
||||||
|
ok: rgResult.ok,
|
||||||
|
exitCode: rgResult.exitCode,
|
||||||
|
stdoutLength: rgResult.stdout.length,
|
||||||
|
stdoutPreview: rgResult.stdout.slice(0, 500)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Process results if we have stdout content
|
||||||
|
// Exit code 2 can indicate partial errors (e.g., permission denied on some dirs) but still have valid results
|
||||||
|
if (rgResult.ok && rgResult.stdout.length > 0) {
|
||||||
|
const lines = rgResult.stdout.split('\n').filter(Boolean)
|
||||||
|
logger.debug('Parsed lines from ripgrep', { lineCount: lines.length, lines })
|
||||||
|
|
||||||
|
for (const line of lines) {
|
||||||
|
if (files.length >= MAX_FILES_LIMIT) {
|
||||||
|
truncated = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
const filePath = line.trim()
|
||||||
|
if (!filePath) continue
|
||||||
|
|
||||||
|
const absolutePath = path.isAbsolute(filePath) ? filePath : path.resolve(validPath, filePath)
|
||||||
|
|
||||||
|
try {
|
||||||
|
const stats = await fs.stat(absolutePath)
|
||||||
|
files.push({
|
||||||
|
path: absolutePath,
|
||||||
|
type: 'file', // ripgrep --files only returns files
|
||||||
|
size: stats.size,
|
||||||
|
modified: stats.mtime
|
||||||
|
})
|
||||||
|
} catch (error) {
|
||||||
|
logger.debug('Failed to stat file from ripgrep output, skipping', { file: absolutePath, error })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by modification time (newest first)
|
||||||
|
files.sort((a, b) => {
|
||||||
|
const aTime = a.modified ? a.modified.getTime() : 0
|
||||||
|
const bTime = b.modified ? b.modified.getTime() : 0
|
||||||
|
return bTime - aTime
|
||||||
|
})
|
||||||
|
|
||||||
|
// Format output - always use absolute paths
|
||||||
|
const output: string[] = []
|
||||||
|
if (files.length === 0) {
|
||||||
|
output.push(`No files found matching pattern "${parsed.data.pattern}" in ${validPath}`)
|
||||||
|
} else {
|
||||||
|
output.push(...files.map((f) => f.path))
|
||||||
|
if (truncated) {
|
||||||
|
output.push('')
|
||||||
|
output.push(`(Results truncated to ${MAX_FILES_LIMIT} files. Consider using a more specific pattern.)`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: output.join('\n')
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
266
src/main/mcpServers/filesystem/tools/grep.ts
Normal file
266
src/main/mcpServers/filesystem/tools/grep.ts
Normal file
@ -0,0 +1,266 @@
|
|||||||
|
import fs from 'fs/promises'
|
||||||
|
import path from 'path'
|
||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import type { GrepMatch } from '../types'
|
||||||
|
import { isBinaryFile, MAX_GREP_MATCHES, MAX_LINE_LENGTH, runRipgrep, validatePath } from '../types'
|
||||||
|
|
||||||
|
// Schema definition
|
||||||
|
export const GrepToolSchema = z.object({
|
||||||
|
pattern: z.string().describe('The regex pattern to search for in file contents'),
|
||||||
|
path: z
|
||||||
|
.string()
|
||||||
|
.optional()
|
||||||
|
.describe('The directory to search in (must be absolute path). Defaults to the base directory'),
|
||||||
|
include: z.string().optional().describe('File pattern to include in the search (e.g. "*.js", "*.{ts,tsx}")')
|
||||||
|
})
|
||||||
|
|
||||||
|
// Tool definition with detailed description
|
||||||
|
export const grepToolDefinition = {
|
||||||
|
name: 'grep',
|
||||||
|
description: `Fast content search tool that works with any codebase size.
|
||||||
|
|
||||||
|
- Searches file contents using regular expressions
|
||||||
|
- Supports full regex syntax (e.g., "log.*Error", "function\\s+\\w+")
|
||||||
|
- Filter files by pattern with include (e.g., "*.js", "*.{ts,tsx}")
|
||||||
|
- Returns absolute file paths and line numbers with matching content
|
||||||
|
- Results are limited to 100 matches
|
||||||
|
- Binary files are automatically skipped
|
||||||
|
- Common directories (node_modules, .git, dist) are excluded
|
||||||
|
- The path parameter must be an absolute path if specified
|
||||||
|
- If path is not specified, defaults to the base directory`,
|
||||||
|
inputSchema: z.toJSONSchema(GrepToolSchema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler implementation
|
||||||
|
export async function handleGrepTool(args: unknown, baseDir: string) {
|
||||||
|
const parsed = GrepToolSchema.safeParse(args)
|
||||||
|
if (!parsed.success) {
|
||||||
|
throw new Error(`Invalid arguments for grep: ${parsed.error}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const data = parsed.data
|
||||||
|
|
||||||
|
if (!data.pattern) {
|
||||||
|
throw new Error('Pattern is required for grep')
|
||||||
|
}
|
||||||
|
|
||||||
|
const searchPath = data.path || baseDir
|
||||||
|
const validPath = await validatePath(searchPath, baseDir)
|
||||||
|
|
||||||
|
const matches: GrepMatch[] = []
|
||||||
|
let truncated = false
|
||||||
|
let regex: RegExp
|
||||||
|
|
||||||
|
// Build ripgrep arguments
|
||||||
|
const rgArgs: string[] = [
|
||||||
|
'--no-heading',
|
||||||
|
'--line-number',
|
||||||
|
'--color',
|
||||||
|
'never',
|
||||||
|
'--ignore-case',
|
||||||
|
'--glob',
|
||||||
|
'!.git/**',
|
||||||
|
'--glob',
|
||||||
|
'!node_modules/**',
|
||||||
|
'--glob',
|
||||||
|
'!dist/**',
|
||||||
|
'--glob',
|
||||||
|
'!build/**',
|
||||||
|
'--glob',
|
||||||
|
'!__pycache__/**'
|
||||||
|
]
|
||||||
|
|
||||||
|
if (data.include) {
|
||||||
|
for (const pat of data.include
|
||||||
|
.split(',')
|
||||||
|
.map((p) => p.trim())
|
||||||
|
.filter(Boolean)) {
|
||||||
|
rgArgs.push('--glob', pat)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rgArgs.push(data.pattern)
|
||||||
|
rgArgs.push(validPath)
|
||||||
|
|
||||||
|
try {
|
||||||
|
regex = new RegExp(data.pattern, 'gi')
|
||||||
|
} catch (error) {
|
||||||
|
throw new Error(`Invalid regex pattern: ${data.pattern}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
async function searchFile(filePath: string): Promise<void> {
|
||||||
|
if (matches.length >= MAX_GREP_MATCHES) {
|
||||||
|
truncated = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Skip binary files
|
||||||
|
if (await isBinaryFile(filePath)) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
const content = await fs.readFile(filePath, 'utf-8')
|
||||||
|
const lines = content.split('\n')
|
||||||
|
|
||||||
|
lines.forEach((line, index) => {
|
||||||
|
if (matches.length >= MAX_GREP_MATCHES) {
|
||||||
|
truncated = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if (regex.test(line)) {
|
||||||
|
// Truncate long lines
|
||||||
|
const truncatedLine = line.length > MAX_LINE_LENGTH ? line.substring(0, MAX_LINE_LENGTH) + '...' : line
|
||||||
|
|
||||||
|
matches.push({
|
||||||
|
file: filePath,
|
||||||
|
line: index + 1,
|
||||||
|
content: truncatedLine.trim()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
} catch (error) {
|
||||||
|
// Skip files we can't read
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function searchDirectory(dir: string): Promise<void> {
|
||||||
|
if (matches.length >= MAX_GREP_MATCHES) {
|
||||||
|
truncated = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const entries = await fs.readdir(dir, { withFileTypes: true })
|
||||||
|
|
||||||
|
for (const entry of entries) {
|
||||||
|
if (matches.length >= MAX_GREP_MATCHES) {
|
||||||
|
truncated = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
const fullPath = path.join(dir, entry.name)
|
||||||
|
|
||||||
|
// Skip common ignore patterns
|
||||||
|
if (entry.name.startsWith('.') && entry.name !== '.env.example') {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if (['node_modules', 'dist', 'build', '__pycache__', '.git'].includes(entry.name)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if (entry.isFile()) {
|
||||||
|
// Check if file matches include pattern
|
||||||
|
if (data.include) {
|
||||||
|
const includePatterns = data.include.split(',').map((p) => p.trim())
|
||||||
|
const fileName = path.basename(fullPath)
|
||||||
|
const matchesInclude = includePatterns.some((pattern) => {
|
||||||
|
// Simple glob pattern matching
|
||||||
|
const regexPattern = pattern
|
||||||
|
.replace(/\*/g, '.*')
|
||||||
|
.replace(/\?/g, '.')
|
||||||
|
.replace(/\{([^}]+)\}/g, (_, group) => `(${group.split(',').join('|')})`)
|
||||||
|
return new RegExp(`^${regexPattern}$`).test(fileName)
|
||||||
|
})
|
||||||
|
if (!matchesInclude) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
await searchFile(fullPath)
|
||||||
|
} else if (entry.isDirectory()) {
|
||||||
|
await searchDirectory(fullPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
// Skip directories we can't read
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform the search
|
||||||
|
let usedRipgrep = false
|
||||||
|
try {
|
||||||
|
const rgResult = await runRipgrep(rgArgs)
|
||||||
|
if (rgResult.ok && rgResult.exitCode !== null && rgResult.exitCode !== 2) {
|
||||||
|
usedRipgrep = true
|
||||||
|
const lines = rgResult.stdout.split('\n').filter(Boolean)
|
||||||
|
for (const line of lines) {
|
||||||
|
if (matches.length >= MAX_GREP_MATCHES) {
|
||||||
|
truncated = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
const firstColon = line.indexOf(':')
|
||||||
|
const secondColon = line.indexOf(':', firstColon + 1)
|
||||||
|
if (firstColon === -1 || secondColon === -1) continue
|
||||||
|
|
||||||
|
const filePart = line.slice(0, firstColon)
|
||||||
|
const linePart = line.slice(firstColon + 1, secondColon)
|
||||||
|
const contentPart = line.slice(secondColon + 1)
|
||||||
|
const lineNum = Number.parseInt(linePart, 10)
|
||||||
|
if (!Number.isFinite(lineNum)) continue
|
||||||
|
|
||||||
|
const absoluteFilePath = path.isAbsolute(filePart) ? filePart : path.resolve(baseDir, filePart)
|
||||||
|
const truncatedLine =
|
||||||
|
contentPart.length > MAX_LINE_LENGTH ? contentPart.substring(0, MAX_LINE_LENGTH) + '...' : contentPart
|
||||||
|
|
||||||
|
matches.push({
|
||||||
|
file: absoluteFilePath,
|
||||||
|
line: lineNum,
|
||||||
|
content: truncatedLine.trim()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
usedRipgrep = false
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!usedRipgrep) {
|
||||||
|
const stats = await fs.stat(validPath)
|
||||||
|
if (stats.isFile()) {
|
||||||
|
await searchFile(validPath)
|
||||||
|
} else {
|
||||||
|
await searchDirectory(validPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format output
|
||||||
|
const output: string[] = []
|
||||||
|
|
||||||
|
if (matches.length === 0) {
|
||||||
|
output.push('No matches found')
|
||||||
|
} else {
|
||||||
|
// Group matches by file
|
||||||
|
const fileGroups = new Map<string, GrepMatch[]>()
|
||||||
|
matches.forEach((match) => {
|
||||||
|
if (!fileGroups.has(match.file)) {
|
||||||
|
fileGroups.set(match.file, [])
|
||||||
|
}
|
||||||
|
fileGroups.get(match.file)!.push(match)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Format grouped matches - always use absolute paths
|
||||||
|
fileGroups.forEach((fileMatches, filePath) => {
|
||||||
|
output.push(`\n${filePath}:`)
|
||||||
|
fileMatches.forEach((match) => {
|
||||||
|
output.push(` ${match.line}: ${match.content}`)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
if (truncated) {
|
||||||
|
output.push('')
|
||||||
|
output.push(`(Results truncated to ${MAX_GREP_MATCHES} matches. Consider using a more specific pattern or path.)`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: output.join('\n')
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
8
src/main/mcpServers/filesystem/tools/index.ts
Normal file
8
src/main/mcpServers/filesystem/tools/index.ts
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
// Export all tool definitions and handlers
|
||||||
|
export { deleteToolDefinition, handleDeleteTool } from './delete'
|
||||||
|
export { editToolDefinition, handleEditTool } from './edit'
|
||||||
|
export { globToolDefinition, handleGlobTool } from './glob'
|
||||||
|
export { grepToolDefinition, handleGrepTool } from './grep'
|
||||||
|
export { handleLsTool, lsToolDefinition } from './ls'
|
||||||
|
export { handleReadTool, readToolDefinition } from './read'
|
||||||
|
export { handleWriteTool, writeToolDefinition } from './write'
|
||||||
150
src/main/mcpServers/filesystem/tools/ls.ts
Normal file
150
src/main/mcpServers/filesystem/tools/ls.ts
Normal file
@ -0,0 +1,150 @@
|
|||||||
|
import fs from 'fs/promises'
|
||||||
|
import path from 'path'
|
||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import { MAX_FILES_LIMIT, validatePath } from '../types'
|
||||||
|
|
||||||
|
// Schema definition
|
||||||
|
export const LsToolSchema = z.object({
|
||||||
|
path: z.string().optional().describe('The directory to list (must be absolute path). Defaults to the base directory'),
|
||||||
|
recursive: z.boolean().optional().describe('Whether to list directories recursively (default: false)')
|
||||||
|
})
|
||||||
|
|
||||||
|
// Tool definition with detailed description
|
||||||
|
export const lsToolDefinition = {
|
||||||
|
name: 'ls',
|
||||||
|
description: `Lists files and directories in a specified path.
|
||||||
|
|
||||||
|
- Returns a tree-like structure with icons (📁 directories, 📄 files)
|
||||||
|
- Shows the absolute directory path in the header
|
||||||
|
- Entries are sorted alphabetically with directories first
|
||||||
|
- Can list recursively with recursive=true (up to 5 levels deep)
|
||||||
|
- Common directories (node_modules, dist, .git) are excluded
|
||||||
|
- Hidden files (starting with .) are excluded except .env.example
|
||||||
|
- Results are limited to 100 entries
|
||||||
|
- The path parameter must be an absolute path if specified
|
||||||
|
- If path is not specified, defaults to the base directory`,
|
||||||
|
inputSchema: z.toJSONSchema(LsToolSchema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler implementation
|
||||||
|
export async function handleLsTool(args: unknown, baseDir: string) {
|
||||||
|
const parsed = LsToolSchema.safeParse(args)
|
||||||
|
if (!parsed.success) {
|
||||||
|
throw new Error(`Invalid arguments for ls: ${parsed.error}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const targetPath = parsed.data.path || baseDir
|
||||||
|
const validPath = await validatePath(targetPath, baseDir)
|
||||||
|
const recursive = parsed.data.recursive || false
|
||||||
|
|
||||||
|
interface TreeNode {
|
||||||
|
name: string
|
||||||
|
type: 'file' | 'directory'
|
||||||
|
children?: TreeNode[]
|
||||||
|
}
|
||||||
|
|
||||||
|
let fileCount = 0
|
||||||
|
let truncated = false
|
||||||
|
|
||||||
|
async function buildTree(dirPath: string, depth: number = 0): Promise<TreeNode[]> {
|
||||||
|
if (fileCount >= MAX_FILES_LIMIT) {
|
||||||
|
truncated = true
|
||||||
|
return []
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const entries = await fs.readdir(dirPath, { withFileTypes: true })
|
||||||
|
const nodes: TreeNode[] = []
|
||||||
|
|
||||||
|
// Sort entries: directories first, then files, alphabetically
|
||||||
|
entries.sort((a, b) => {
|
||||||
|
if (a.isDirectory() && !b.isDirectory()) return -1
|
||||||
|
if (!a.isDirectory() && b.isDirectory()) return 1
|
||||||
|
return a.name.localeCompare(b.name)
|
||||||
|
})
|
||||||
|
|
||||||
|
for (const entry of entries) {
|
||||||
|
if (fileCount >= MAX_FILES_LIMIT) {
|
||||||
|
truncated = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip hidden files and common ignore patterns
|
||||||
|
if (entry.name.startsWith('.') && entry.name !== '.env.example') {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if (['node_modules', 'dist', 'build', '__pycache__'].includes(entry.name)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fileCount++
|
||||||
|
const node: TreeNode = {
|
||||||
|
name: entry.name,
|
||||||
|
type: entry.isDirectory() ? 'directory' : 'file'
|
||||||
|
}
|
||||||
|
|
||||||
|
if (entry.isDirectory() && recursive && depth < 5) {
|
||||||
|
// Limit depth to prevent infinite recursion
|
||||||
|
const childPath = path.join(dirPath, entry.name)
|
||||||
|
node.children = await buildTree(childPath, depth + 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes.push(node)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nodes
|
||||||
|
} catch (error) {
|
||||||
|
return []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the tree
|
||||||
|
const tree = await buildTree(validPath)
|
||||||
|
|
||||||
|
// Format as text output
|
||||||
|
function formatTree(nodes: TreeNode[], prefix: string = ''): string[] {
|
||||||
|
const lines: string[] = []
|
||||||
|
|
||||||
|
nodes.forEach((node, index) => {
|
||||||
|
const isLastNode = index === nodes.length - 1
|
||||||
|
const connector = isLastNode ? '└── ' : '├── '
|
||||||
|
const icon = node.type === 'directory' ? '📁 ' : '📄 '
|
||||||
|
|
||||||
|
lines.push(prefix + connector + icon + node.name)
|
||||||
|
|
||||||
|
if (node.children && node.children.length > 0) {
|
||||||
|
const childPrefix = prefix + (isLastNode ? ' ' : '│ ')
|
||||||
|
lines.push(...formatTree(node.children, childPrefix))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return lines
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate output
|
||||||
|
const output: string[] = []
|
||||||
|
output.push(`Directory: ${validPath}`)
|
||||||
|
output.push('')
|
||||||
|
|
||||||
|
if (tree.length === 0) {
|
||||||
|
output.push('(empty directory)')
|
||||||
|
} else {
|
||||||
|
const treeLines = formatTree(tree, '')
|
||||||
|
output.push(...treeLines)
|
||||||
|
|
||||||
|
if (truncated) {
|
||||||
|
output.push('')
|
||||||
|
output.push(`(Results truncated to ${MAX_FILES_LIMIT} files. Consider listing a more specific directory.)`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: output.join('\n')
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
101
src/main/mcpServers/filesystem/tools/read.ts
Normal file
101
src/main/mcpServers/filesystem/tools/read.ts
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
import fs from 'fs/promises'
|
||||||
|
import path from 'path'
|
||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import { DEFAULT_READ_LIMIT, isBinaryFile, MAX_LINE_LENGTH, validatePath } from '../types'
|
||||||
|
|
||||||
|
// Schema definition
|
||||||
|
export const ReadToolSchema = z.object({
|
||||||
|
file_path: z.string().describe('The path to the file to read'),
|
||||||
|
offset: z.number().optional().describe('The line number to start reading from (1-based)'),
|
||||||
|
limit: z.number().optional().describe('The number of lines to read (defaults to 2000)')
|
||||||
|
})
|
||||||
|
|
||||||
|
// Tool definition with detailed description
|
||||||
|
export const readToolDefinition = {
|
||||||
|
name: 'read',
|
||||||
|
description: `Reads a file from the local filesystem.
|
||||||
|
|
||||||
|
- Assumes this tool can read all files on the machine
|
||||||
|
- The file_path parameter must be an absolute path, not a relative path
|
||||||
|
- By default, reads up to 2000 lines starting from the beginning
|
||||||
|
- You can optionally specify a line offset and limit for long files
|
||||||
|
- Any lines longer than 2000 characters will be truncated
|
||||||
|
- Results are returned with line numbers starting at 1
|
||||||
|
- Binary files are detected and rejected with an error
|
||||||
|
- Empty files return a warning`,
|
||||||
|
inputSchema: z.toJSONSchema(ReadToolSchema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler implementation
|
||||||
|
export async function handleReadTool(args: unknown, baseDir: string) {
|
||||||
|
const parsed = ReadToolSchema.safeParse(args)
|
||||||
|
if (!parsed.success) {
|
||||||
|
throw new Error(`Invalid arguments for read: ${parsed.error}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const filePath = parsed.data.file_path
|
||||||
|
const validPath = await validatePath(filePath, baseDir)
|
||||||
|
|
||||||
|
// Check if file exists
|
||||||
|
try {
|
||||||
|
const stats = await fs.stat(validPath)
|
||||||
|
if (!stats.isFile()) {
|
||||||
|
throw new Error(`Path is not a file: ${filePath}`)
|
||||||
|
}
|
||||||
|
} catch (error: any) {
|
||||||
|
if (error.code === 'ENOENT') {
|
||||||
|
throw new Error(`File not found: ${filePath}`)
|
||||||
|
}
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if file is binary
|
||||||
|
if (await isBinaryFile(validPath)) {
|
||||||
|
throw new Error(`Cannot read binary file: ${filePath}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read file content
|
||||||
|
const content = await fs.readFile(validPath, 'utf-8')
|
||||||
|
const lines = content.split('\n')
|
||||||
|
|
||||||
|
// Apply offset and limit
|
||||||
|
const offset = (parsed.data.offset || 1) - 1 // Convert to 0-based
|
||||||
|
const limit = parsed.data.limit || DEFAULT_READ_LIMIT
|
||||||
|
|
||||||
|
if (offset < 0 || offset >= lines.length) {
|
||||||
|
throw new Error(`Invalid offset: ${offset + 1}. File has ${lines.length} lines.`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const selectedLines = lines.slice(offset, offset + limit)
|
||||||
|
|
||||||
|
// Format output with line numbers and truncate long lines
|
||||||
|
const output: string[] = []
|
||||||
|
const relativePath = path.relative(baseDir, validPath)
|
||||||
|
|
||||||
|
output.push(`File: ${relativePath}`)
|
||||||
|
if (offset > 0 || limit < lines.length) {
|
||||||
|
output.push(`Lines ${offset + 1} to ${Math.min(offset + limit, lines.length)} of ${lines.length}`)
|
||||||
|
}
|
||||||
|
output.push('')
|
||||||
|
|
||||||
|
selectedLines.forEach((line, index) => {
|
||||||
|
const lineNumber = offset + index + 1
|
||||||
|
const truncatedLine = line.length > MAX_LINE_LENGTH ? line.substring(0, MAX_LINE_LENGTH) + '...' : line
|
||||||
|
output.push(`${lineNumber.toString().padStart(6)}\t${truncatedLine}`)
|
||||||
|
})
|
||||||
|
|
||||||
|
if (offset + limit < lines.length) {
|
||||||
|
output.push('')
|
||||||
|
output.push(`(${lines.length - (offset + limit)} more lines not shown)`)
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: output.join('\n')
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
83
src/main/mcpServers/filesystem/tools/write.ts
Normal file
83
src/main/mcpServers/filesystem/tools/write.ts
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
import fs from 'fs/promises'
|
||||||
|
import path from 'path'
|
||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import { logger, validatePath } from '../types'
|
||||||
|
|
||||||
|
// Schema definition
|
||||||
|
export const WriteToolSchema = z.object({
|
||||||
|
file_path: z.string().describe('The path to the file to write'),
|
||||||
|
content: z.string().describe('The content to write to the file')
|
||||||
|
})
|
||||||
|
|
||||||
|
// Tool definition with detailed description
|
||||||
|
export const writeToolDefinition = {
|
||||||
|
name: 'write',
|
||||||
|
description: `Writes a file to the local filesystem.
|
||||||
|
|
||||||
|
- This tool will overwrite the existing file if one exists at the path
|
||||||
|
- You MUST use the read tool first to understand what you're overwriting
|
||||||
|
- ALWAYS prefer using the 'edit' tool for existing files
|
||||||
|
- NEVER proactively create documentation files unless explicitly requested
|
||||||
|
- Parent directories will be created automatically if they don't exist
|
||||||
|
- The file_path must be an absolute path, not a relative path`,
|
||||||
|
inputSchema: z.toJSONSchema(WriteToolSchema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler implementation
|
||||||
|
export async function handleWriteTool(args: unknown, baseDir: string) {
|
||||||
|
const parsed = WriteToolSchema.safeParse(args)
|
||||||
|
if (!parsed.success) {
|
||||||
|
throw new Error(`Invalid arguments for write: ${parsed.error}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const filePath = parsed.data.file_path
|
||||||
|
const validPath = await validatePath(filePath, baseDir)
|
||||||
|
|
||||||
|
// Create parent directory if it doesn't exist
|
||||||
|
const parentDir = path.dirname(validPath)
|
||||||
|
try {
|
||||||
|
await fs.mkdir(parentDir, { recursive: true })
|
||||||
|
} catch (error: any) {
|
||||||
|
if (error.code !== 'EEXIST') {
|
||||||
|
throw new Error(`Failed to create parent directory: ${error.message}`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if file exists (for logging)
|
||||||
|
let isOverwrite = false
|
||||||
|
try {
|
||||||
|
await fs.stat(validPath)
|
||||||
|
isOverwrite = true
|
||||||
|
} catch {
|
||||||
|
// File doesn't exist, that's fine
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the file
|
||||||
|
try {
|
||||||
|
await fs.writeFile(validPath, parsed.data.content, 'utf-8')
|
||||||
|
} catch (error: any) {
|
||||||
|
throw new Error(`Failed to write file: ${error.message}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log the operation
|
||||||
|
logger.info('File written', {
|
||||||
|
path: validPath,
|
||||||
|
overwrite: isOverwrite,
|
||||||
|
size: parsed.data.content.length
|
||||||
|
})
|
||||||
|
|
||||||
|
// Format output
|
||||||
|
const relativePath = path.relative(baseDir, validPath)
|
||||||
|
const action = isOverwrite ? 'Updated' : 'Created'
|
||||||
|
const lines = parsed.data.content.split('\n').length
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: `${action} file: ${relativePath}\n` + `Size: ${parsed.data.content.length} bytes\n` + `Lines: ${lines}`
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
627
src/main/mcpServers/filesystem/types.ts
Normal file
627
src/main/mcpServers/filesystem/types.ts
Normal file
@ -0,0 +1,627 @@
|
|||||||
|
import { loggerService } from '@logger'
|
||||||
|
import { isMac, isWin } from '@main/constant'
|
||||||
|
import { spawn } from 'child_process'
|
||||||
|
import fs from 'fs/promises'
|
||||||
|
import os from 'os'
|
||||||
|
import path from 'path'
|
||||||
|
|
||||||
|
export const logger = loggerService.withContext('MCP:FileSystemServer')
|
||||||
|
|
||||||
|
// Constants
|
||||||
|
export const MAX_LINE_LENGTH = 2000
|
||||||
|
export const DEFAULT_READ_LIMIT = 2000
|
||||||
|
export const MAX_FILES_LIMIT = 100
|
||||||
|
export const MAX_GREP_MATCHES = 100
|
||||||
|
|
||||||
|
// Common types
|
||||||
|
export interface FileInfo {
|
||||||
|
path: string
|
||||||
|
type: 'file' | 'directory'
|
||||||
|
size?: number
|
||||||
|
modified?: Date
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface GrepMatch {
|
||||||
|
file: string
|
||||||
|
line: number
|
||||||
|
content: string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Utility functions for path handling
|
||||||
|
export function normalizePath(p: string): string {
|
||||||
|
return path.normalize(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
export function expandHome(filepath: string): string {
|
||||||
|
if (filepath.startsWith('~/') || filepath === '~') {
|
||||||
|
return path.join(os.homedir(), filepath.slice(1))
|
||||||
|
}
|
||||||
|
return filepath
|
||||||
|
}
|
||||||
|
|
||||||
|
// Security validation
|
||||||
|
export async function validatePath(requestedPath: string, baseDir?: string): Promise<string> {
|
||||||
|
const expandedPath = expandHome(requestedPath)
|
||||||
|
const root = baseDir ?? process.cwd()
|
||||||
|
const absolute = path.isAbsolute(expandedPath) ? path.resolve(expandedPath) : path.resolve(root, expandedPath)
|
||||||
|
|
||||||
|
// Handle symlinks by checking their real path
|
||||||
|
try {
|
||||||
|
const realPath = await fs.realpath(absolute)
|
||||||
|
return normalizePath(realPath)
|
||||||
|
} catch (error) {
|
||||||
|
// For new files that don't exist yet, verify parent directory
|
||||||
|
const parentDir = path.dirname(absolute)
|
||||||
|
try {
|
||||||
|
const realParentPath = await fs.realpath(parentDir)
|
||||||
|
normalizePath(realParentPath)
|
||||||
|
return normalizePath(absolute)
|
||||||
|
} catch {
|
||||||
|
return normalizePath(absolute)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Edit Tool Utilities - Fuzzy matching replacers from opencode
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
export type Replacer = (content: string, find: string) => Generator<string, void, unknown>
|
||||||
|
|
||||||
|
// Similarity thresholds for block anchor fallback matching
|
||||||
|
const SINGLE_CANDIDATE_SIMILARITY_THRESHOLD = 0.0
|
||||||
|
const MULTIPLE_CANDIDATES_SIMILARITY_THRESHOLD = 0.3
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Levenshtein distance algorithm implementation
|
||||||
|
*/
|
||||||
|
function levenshtein(a: string, b: string): number {
|
||||||
|
if (a === '' || b === '') {
|
||||||
|
return Math.max(a.length, b.length)
|
||||||
|
}
|
||||||
|
const matrix = Array.from({ length: a.length + 1 }, (_, i) =>
|
||||||
|
Array.from({ length: b.length + 1 }, (_, j) => (i === 0 ? j : j === 0 ? i : 0))
|
||||||
|
)
|
||||||
|
|
||||||
|
for (let i = 1; i <= a.length; i++) {
|
||||||
|
for (let j = 1; j <= b.length; j++) {
|
||||||
|
const cost = a[i - 1] === b[j - 1] ? 0 : 1
|
||||||
|
matrix[i][j] = Math.min(matrix[i - 1][j] + 1, matrix[i][j - 1] + 1, matrix[i - 1][j - 1] + cost)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return matrix[a.length][b.length]
|
||||||
|
}
|
||||||
|
|
||||||
|
export const SimpleReplacer: Replacer = function* (_content, find) {
|
||||||
|
yield find
|
||||||
|
}
|
||||||
|
|
||||||
|
export const LineTrimmedReplacer: Replacer = function* (content, find) {
|
||||||
|
const originalLines = content.split('\n')
|
||||||
|
const searchLines = find.split('\n')
|
||||||
|
|
||||||
|
if (searchLines[searchLines.length - 1] === '') {
|
||||||
|
searchLines.pop()
|
||||||
|
}
|
||||||
|
|
||||||
|
for (let i = 0; i <= originalLines.length - searchLines.length; i++) {
|
||||||
|
let matches = true
|
||||||
|
|
||||||
|
for (let j = 0; j < searchLines.length; j++) {
|
||||||
|
const originalTrimmed = originalLines[i + j].trim()
|
||||||
|
const searchTrimmed = searchLines[j].trim()
|
||||||
|
|
||||||
|
if (originalTrimmed !== searchTrimmed) {
|
||||||
|
matches = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (matches) {
|
||||||
|
let matchStartIndex = 0
|
||||||
|
for (let k = 0; k < i; k++) {
|
||||||
|
matchStartIndex += originalLines[k].length + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
let matchEndIndex = matchStartIndex
|
||||||
|
for (let k = 0; k < searchLines.length; k++) {
|
||||||
|
matchEndIndex += originalLines[i + k].length
|
||||||
|
if (k < searchLines.length - 1) {
|
||||||
|
matchEndIndex += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
yield content.substring(matchStartIndex, matchEndIndex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export const BlockAnchorReplacer: Replacer = function* (content, find) {
|
||||||
|
const originalLines = content.split('\n')
|
||||||
|
const searchLines = find.split('\n')
|
||||||
|
|
||||||
|
if (searchLines.length < 3) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if (searchLines[searchLines.length - 1] === '') {
|
||||||
|
searchLines.pop()
|
||||||
|
}
|
||||||
|
|
||||||
|
const firstLineSearch = searchLines[0].trim()
|
||||||
|
const lastLineSearch = searchLines[searchLines.length - 1].trim()
|
||||||
|
const searchBlockSize = searchLines.length
|
||||||
|
|
||||||
|
const candidates: Array<{ startLine: number; endLine: number }> = []
|
||||||
|
for (let i = 0; i < originalLines.length; i++) {
|
||||||
|
if (originalLines[i].trim() !== firstLineSearch) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for (let j = i + 2; j < originalLines.length; j++) {
|
||||||
|
if (originalLines[j].trim() === lastLineSearch) {
|
||||||
|
candidates.push({ startLine: i, endLine: j })
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (candidates.length === 0) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if (candidates.length === 1) {
|
||||||
|
const { startLine, endLine } = candidates[0]
|
||||||
|
const actualBlockSize = endLine - startLine + 1
|
||||||
|
|
||||||
|
let similarity = 0
|
||||||
|
const linesToCheck = Math.min(searchBlockSize - 2, actualBlockSize - 2)
|
||||||
|
|
||||||
|
if (linesToCheck > 0) {
|
||||||
|
for (let j = 1; j < searchBlockSize - 1 && j < actualBlockSize - 1; j++) {
|
||||||
|
const originalLine = originalLines[startLine + j].trim()
|
||||||
|
const searchLine = searchLines[j].trim()
|
||||||
|
const maxLen = Math.max(originalLine.length, searchLine.length)
|
||||||
|
if (maxLen === 0) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
const distance = levenshtein(originalLine, searchLine)
|
||||||
|
similarity += (1 - distance / maxLen) / linesToCheck
|
||||||
|
|
||||||
|
if (similarity >= SINGLE_CANDIDATE_SIMILARITY_THRESHOLD) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
similarity = 1.0
|
||||||
|
}
|
||||||
|
|
||||||
|
if (similarity >= SINGLE_CANDIDATE_SIMILARITY_THRESHOLD) {
|
||||||
|
let matchStartIndex = 0
|
||||||
|
for (let k = 0; k < startLine; k++) {
|
||||||
|
matchStartIndex += originalLines[k].length + 1
|
||||||
|
}
|
||||||
|
let matchEndIndex = matchStartIndex
|
||||||
|
for (let k = startLine; k <= endLine; k++) {
|
||||||
|
matchEndIndex += originalLines[k].length
|
||||||
|
if (k < endLine) {
|
||||||
|
matchEndIndex += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
yield content.substring(matchStartIndex, matchEndIndex)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
let bestMatch: { startLine: number; endLine: number } | null = null
|
||||||
|
let maxSimilarity = -1
|
||||||
|
|
||||||
|
for (const candidate of candidates) {
|
||||||
|
const { startLine, endLine } = candidate
|
||||||
|
const actualBlockSize = endLine - startLine + 1
|
||||||
|
|
||||||
|
let similarity = 0
|
||||||
|
const linesToCheck = Math.min(searchBlockSize - 2, actualBlockSize - 2)
|
||||||
|
|
||||||
|
if (linesToCheck > 0) {
|
||||||
|
for (let j = 1; j < searchBlockSize - 1 && j < actualBlockSize - 1; j++) {
|
||||||
|
const originalLine = originalLines[startLine + j].trim()
|
||||||
|
const searchLine = searchLines[j].trim()
|
||||||
|
const maxLen = Math.max(originalLine.length, searchLine.length)
|
||||||
|
if (maxLen === 0) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
const distance = levenshtein(originalLine, searchLine)
|
||||||
|
similarity += 1 - distance / maxLen
|
||||||
|
}
|
||||||
|
similarity /= linesToCheck
|
||||||
|
} else {
|
||||||
|
similarity = 1.0
|
||||||
|
}
|
||||||
|
|
||||||
|
if (similarity > maxSimilarity) {
|
||||||
|
maxSimilarity = similarity
|
||||||
|
bestMatch = candidate
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (maxSimilarity >= MULTIPLE_CANDIDATES_SIMILARITY_THRESHOLD && bestMatch) {
|
||||||
|
const { startLine, endLine } = bestMatch
|
||||||
|
let matchStartIndex = 0
|
||||||
|
for (let k = 0; k < startLine; k++) {
|
||||||
|
matchStartIndex += originalLines[k].length + 1
|
||||||
|
}
|
||||||
|
let matchEndIndex = matchStartIndex
|
||||||
|
for (let k = startLine; k <= endLine; k++) {
|
||||||
|
matchEndIndex += originalLines[k].length
|
||||||
|
if (k < endLine) {
|
||||||
|
matchEndIndex += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
yield content.substring(matchStartIndex, matchEndIndex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export const WhitespaceNormalizedReplacer: Replacer = function* (content, find) {
|
||||||
|
const normalizeWhitespace = (text: string) => text.replace(/\s+/g, ' ').trim()
|
||||||
|
const normalizedFind = normalizeWhitespace(find)
|
||||||
|
|
||||||
|
const lines = content.split('\n')
|
||||||
|
for (let i = 0; i < lines.length; i++) {
|
||||||
|
const line = lines[i]
|
||||||
|
if (normalizeWhitespace(line) === normalizedFind) {
|
||||||
|
yield line
|
||||||
|
} else {
|
||||||
|
const normalizedLine = normalizeWhitespace(line)
|
||||||
|
if (normalizedLine.includes(normalizedFind)) {
|
||||||
|
const words = find.trim().split(/\s+/)
|
||||||
|
if (words.length > 0) {
|
||||||
|
const pattern = words.map((word) => word.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')).join('\\s+')
|
||||||
|
try {
|
||||||
|
const regex = new RegExp(pattern)
|
||||||
|
const match = line.match(regex)
|
||||||
|
if (match) {
|
||||||
|
yield match[0]
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Invalid regex pattern, skip
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const findLines = find.split('\n')
|
||||||
|
if (findLines.length > 1) {
|
||||||
|
for (let i = 0; i <= lines.length - findLines.length; i++) {
|
||||||
|
const block = lines.slice(i, i + findLines.length)
|
||||||
|
if (normalizeWhitespace(block.join('\n')) === normalizedFind) {
|
||||||
|
yield block.join('\n')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export const IndentationFlexibleReplacer: Replacer = function* (content, find) {
|
||||||
|
const removeIndentation = (text: string) => {
|
||||||
|
const lines = text.split('\n')
|
||||||
|
const nonEmptyLines = lines.filter((line) => line.trim().length > 0)
|
||||||
|
if (nonEmptyLines.length === 0) return text
|
||||||
|
|
||||||
|
const minIndent = Math.min(
|
||||||
|
...nonEmptyLines.map((line) => {
|
||||||
|
const match = line.match(/^(\s*)/)
|
||||||
|
return match ? match[1].length : 0
|
||||||
|
})
|
||||||
|
)
|
||||||
|
|
||||||
|
return lines.map((line) => (line.trim().length === 0 ? line : line.slice(minIndent))).join('\n')
|
||||||
|
}
|
||||||
|
|
||||||
|
const normalizedFind = removeIndentation(find)
|
||||||
|
const contentLines = content.split('\n')
|
||||||
|
const findLines = find.split('\n')
|
||||||
|
|
||||||
|
for (let i = 0; i <= contentLines.length - findLines.length; i++) {
|
||||||
|
const block = contentLines.slice(i, i + findLines.length).join('\n')
|
||||||
|
if (removeIndentation(block) === normalizedFind) {
|
||||||
|
yield block
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export const EscapeNormalizedReplacer: Replacer = function* (content, find) {
|
||||||
|
const unescapeString = (str: string): string => {
|
||||||
|
return str.replace(/\\(n|t|r|'|"|`|\\|\n|\$)/g, (match, capturedChar) => {
|
||||||
|
switch (capturedChar) {
|
||||||
|
case 'n':
|
||||||
|
return '\n'
|
||||||
|
case 't':
|
||||||
|
return '\t'
|
||||||
|
case 'r':
|
||||||
|
return '\r'
|
||||||
|
case "'":
|
||||||
|
return "'"
|
||||||
|
case '"':
|
||||||
|
return '"'
|
||||||
|
case '`':
|
||||||
|
return '`'
|
||||||
|
case '\\':
|
||||||
|
return '\\'
|
||||||
|
case '\n':
|
||||||
|
return '\n'
|
||||||
|
case '$':
|
||||||
|
return '$'
|
||||||
|
default:
|
||||||
|
return match
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
const unescapedFind = unescapeString(find)
|
||||||
|
|
||||||
|
if (content.includes(unescapedFind)) {
|
||||||
|
yield unescapedFind
|
||||||
|
}
|
||||||
|
|
||||||
|
const lines = content.split('\n')
|
||||||
|
const findLines = unescapedFind.split('\n')
|
||||||
|
|
||||||
|
for (let i = 0; i <= lines.length - findLines.length; i++) {
|
||||||
|
const block = lines.slice(i, i + findLines.length).join('\n')
|
||||||
|
const unescapedBlock = unescapeString(block)
|
||||||
|
|
||||||
|
if (unescapedBlock === unescapedFind) {
|
||||||
|
yield block
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export const TrimmedBoundaryReplacer: Replacer = function* (content, find) {
|
||||||
|
const trimmedFind = find.trim()
|
||||||
|
|
||||||
|
if (trimmedFind === find) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if (content.includes(trimmedFind)) {
|
||||||
|
yield trimmedFind
|
||||||
|
}
|
||||||
|
|
||||||
|
const lines = content.split('\n')
|
||||||
|
const findLines = find.split('\n')
|
||||||
|
|
||||||
|
for (let i = 0; i <= lines.length - findLines.length; i++) {
|
||||||
|
const block = lines.slice(i, i + findLines.length).join('\n')
|
||||||
|
|
||||||
|
if (block.trim() === trimmedFind) {
|
||||||
|
yield block
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export const ContextAwareReplacer: Replacer = function* (content, find) {
|
||||||
|
const findLines = find.split('\n')
|
||||||
|
if (findLines.length < 3) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if (findLines[findLines.length - 1] === '') {
|
||||||
|
findLines.pop()
|
||||||
|
}
|
||||||
|
|
||||||
|
const contentLines = content.split('\n')
|
||||||
|
|
||||||
|
const firstLine = findLines[0].trim()
|
||||||
|
const lastLine = findLines[findLines.length - 1].trim()
|
||||||
|
|
||||||
|
for (let i = 0; i < contentLines.length; i++) {
|
||||||
|
if (contentLines[i].trim() !== firstLine) continue
|
||||||
|
|
||||||
|
for (let j = i + 2; j < contentLines.length; j++) {
|
||||||
|
if (contentLines[j].trim() === lastLine) {
|
||||||
|
const blockLines = contentLines.slice(i, j + 1)
|
||||||
|
const block = blockLines.join('\n')
|
||||||
|
|
||||||
|
if (blockLines.length === findLines.length) {
|
||||||
|
let matchingLines = 0
|
||||||
|
let totalNonEmptyLines = 0
|
||||||
|
|
||||||
|
for (let k = 1; k < blockLines.length - 1; k++) {
|
||||||
|
const blockLine = blockLines[k].trim()
|
||||||
|
const findLine = findLines[k].trim()
|
||||||
|
|
||||||
|
if (blockLine.length > 0 || findLine.length > 0) {
|
||||||
|
totalNonEmptyLines++
|
||||||
|
if (blockLine === findLine) {
|
||||||
|
matchingLines++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (totalNonEmptyLines === 0 || matchingLines / totalNonEmptyLines >= 0.5) {
|
||||||
|
yield block
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export const MultiOccurrenceReplacer: Replacer = function* (content, find) {
|
||||||
|
let startIndex = 0
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
const index = content.indexOf(find, startIndex)
|
||||||
|
if (index === -1) break
|
||||||
|
|
||||||
|
yield find
|
||||||
|
startIndex = index + find.length
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* All replacers in order of specificity
|
||||||
|
*/
|
||||||
|
export const ALL_REPLACERS: Replacer[] = [
|
||||||
|
SimpleReplacer,
|
||||||
|
LineTrimmedReplacer,
|
||||||
|
BlockAnchorReplacer,
|
||||||
|
WhitespaceNormalizedReplacer,
|
||||||
|
IndentationFlexibleReplacer,
|
||||||
|
EscapeNormalizedReplacer,
|
||||||
|
TrimmedBoundaryReplacer,
|
||||||
|
ContextAwareReplacer,
|
||||||
|
MultiOccurrenceReplacer
|
||||||
|
]
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Replace oldString with newString in content using fuzzy matching
|
||||||
|
*/
|
||||||
|
export function replaceWithFuzzyMatch(
|
||||||
|
content: string,
|
||||||
|
oldString: string,
|
||||||
|
newString: string,
|
||||||
|
replaceAll = false
|
||||||
|
): string {
|
||||||
|
if (oldString === newString) {
|
||||||
|
throw new Error('old_string and new_string must be different')
|
||||||
|
}
|
||||||
|
|
||||||
|
let notFound = true
|
||||||
|
|
||||||
|
for (const replacer of ALL_REPLACERS) {
|
||||||
|
for (const search of replacer(content, oldString)) {
|
||||||
|
const index = content.indexOf(search)
|
||||||
|
if (index === -1) continue
|
||||||
|
notFound = false
|
||||||
|
if (replaceAll) {
|
||||||
|
return content.replaceAll(search, newString)
|
||||||
|
}
|
||||||
|
const lastIndex = content.lastIndexOf(search)
|
||||||
|
if (index !== lastIndex) continue
|
||||||
|
return content.substring(0, index) + newString + content.substring(index + search.length)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (notFound) {
|
||||||
|
throw new Error('old_string not found in content')
|
||||||
|
}
|
||||||
|
throw new Error(
|
||||||
|
'Found multiple matches for old_string. Provide more surrounding lines in old_string to identify the correct match.'
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Binary File Detection
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
// Check if a file is likely binary
|
||||||
|
export async function isBinaryFile(filePath: string): Promise<boolean> {
|
||||||
|
try {
|
||||||
|
const buffer = Buffer.alloc(4096)
|
||||||
|
const fd = await fs.open(filePath, 'r')
|
||||||
|
const { bytesRead } = await fd.read(buffer, 0, buffer.length, 0)
|
||||||
|
await fd.close()
|
||||||
|
|
||||||
|
if (bytesRead === 0) return false
|
||||||
|
|
||||||
|
const view = buffer.subarray(0, bytesRead)
|
||||||
|
|
||||||
|
let zeroBytes = 0
|
||||||
|
let evenZeros = 0
|
||||||
|
let oddZeros = 0
|
||||||
|
let nonPrintable = 0
|
||||||
|
|
||||||
|
for (let i = 0; i < view.length; i++) {
|
||||||
|
const b = view[i]
|
||||||
|
|
||||||
|
if (b === 0) {
|
||||||
|
zeroBytes++
|
||||||
|
if (i % 2 === 0) evenZeros++
|
||||||
|
else oddZeros++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// treat common whitespace as printable
|
||||||
|
if (b === 9 || b === 10 || b === 13) continue
|
||||||
|
|
||||||
|
// basic ASCII printable range
|
||||||
|
if (b >= 32 && b <= 126) continue
|
||||||
|
|
||||||
|
// bytes >= 128 are likely part of UTF-8 sequences; count as printable
|
||||||
|
if (b >= 128) continue
|
||||||
|
|
||||||
|
nonPrintable++
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there are lots of null bytes, it's probably binary unless it looks like UTF-16 text.
|
||||||
|
if (zeroBytes > 0) {
|
||||||
|
const evenSlots = Math.ceil(view.length / 2)
|
||||||
|
const oddSlots = Math.floor(view.length / 2)
|
||||||
|
const evenZeroRatio = evenSlots > 0 ? evenZeros / evenSlots : 0
|
||||||
|
const oddZeroRatio = oddSlots > 0 ? oddZeros / oddSlots : 0
|
||||||
|
|
||||||
|
// UTF-16LE/BE tends to have zeros on every other byte.
|
||||||
|
if (evenZeroRatio > 0.7 || oddZeroRatio > 0.7) return false
|
||||||
|
|
||||||
|
if (zeroBytes / view.length > 0.05) return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Heuristic: too many non-printable bytes => binary.
|
||||||
|
return nonPrintable / view.length > 0.3
|
||||||
|
} catch {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Ripgrep Utilities
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
export interface RipgrepResult {
|
||||||
|
ok: boolean
|
||||||
|
stdout: string
|
||||||
|
exitCode: number | null
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getRipgrepAddonPath(): string {
|
||||||
|
const pkgJsonPath = require.resolve('@anthropic-ai/claude-agent-sdk/package.json')
|
||||||
|
const pkgRoot = path.dirname(pkgJsonPath)
|
||||||
|
const platform = isMac ? 'darwin' : isWin ? 'win32' : 'linux'
|
||||||
|
const arch = process.arch === 'arm64' ? 'arm64' : 'x64'
|
||||||
|
return path.join(pkgRoot, 'vendor', 'ripgrep', `${arch}-${platform}`, 'ripgrep.node')
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function runRipgrep(args: string[]): Promise<RipgrepResult> {
|
||||||
|
const addonPath = getRipgrepAddonPath()
|
||||||
|
const childScript = `const { ripgrepMain } = require(process.env.RIPGREP_ADDON_PATH); process.exit(ripgrepMain(process.argv.slice(1)));`
|
||||||
|
|
||||||
|
return new Promise((resolve) => {
|
||||||
|
const child = spawn(process.execPath, ['--eval', childScript, 'rg', ...args], {
|
||||||
|
cwd: process.cwd(),
|
||||||
|
env: {
|
||||||
|
...process.env,
|
||||||
|
ELECTRON_RUN_AS_NODE: '1',
|
||||||
|
RIPGREP_ADDON_PATH: addonPath
|
||||||
|
},
|
||||||
|
stdio: ['ignore', 'pipe', 'pipe']
|
||||||
|
})
|
||||||
|
|
||||||
|
let stdout = ''
|
||||||
|
|
||||||
|
child.stdout?.on('data', (chunk) => {
|
||||||
|
stdout += chunk.toString('utf-8')
|
||||||
|
})
|
||||||
|
|
||||||
|
child.on('error', () => {
|
||||||
|
resolve({ ok: false, stdout: '', exitCode: null })
|
||||||
|
})
|
||||||
|
|
||||||
|
child.on('close', (code) => {
|
||||||
|
resolve({ ok: true, stdout, exitCode: code })
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
@ -61,9 +61,14 @@ const BuiltinMCPServerList: FC = () => {
|
|||||||
{getMcpTypeLabel(server.type ?? 'stdio')}
|
{getMcpTypeLabel(server.type ?? 'stdio')}
|
||||||
</Tag>
|
</Tag>
|
||||||
{server?.shouldConfig && (
|
{server?.shouldConfig && (
|
||||||
<Tag color="warning" style={{ borderRadius: 20, margin: 0, fontWeight: 500 }}>
|
<a
|
||||||
{t('settings.mcp.requiresConfig')}
|
href="https://docs.cherry-ai.com/advanced-basic/mcp/buildin"
|
||||||
</Tag>
|
target="_blank"
|
||||||
|
rel="noopener noreferrer">
|
||||||
|
<Tag color="warning" style={{ borderRadius: 20, margin: 0, fontWeight: 500 }}>
|
||||||
|
{t('settings.mcp.requiresConfig')}
|
||||||
|
</Tag>
|
||||||
|
</a>
|
||||||
)}
|
)}
|
||||||
</ServerFooter>
|
</ServerFooter>
|
||||||
</ServerCard>
|
</ServerCard>
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user