mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2026-01-10 23:59:45 +08:00
Merge branch 'v2' of github.com:CherryHQ/cherry-studio into v2-toast
This commit is contained in:
commit
1c3866aec0
29
.github/workflows/auto-i18n.yml
vendored
29
.github/workflows/auto-i18n.yml
vendored
@ -32,38 +32,37 @@ jobs:
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
- name: 📦 Install corepack
|
||||
run: corepack enable && corepack prepare yarn@4.9.1 --activate
|
||||
- name: 📦 Install pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
|
||||
- name: 📂 Get yarn cache directory path
|
||||
id: yarn-cache-dir-path
|
||||
run: echo "dir=$(yarn config get cacheFolder)" >> $GITHUB_OUTPUT
|
||||
- name: 📂 Get pnpm store directory
|
||||
id: pnpm-cache
|
||||
shell: bash
|
||||
run: echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: 💾 Cache yarn dependencies
|
||||
- name: 💾 Cache pnpm dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
${{ steps.yarn-cache-dir-path.outputs.dir }}
|
||||
node_modules
|
||||
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
|
||||
path: ${{ steps.pnpm-cache.outputs.STORE_PATH }}
|
||||
key: ${{ runner.os }}-pnpm-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-yarn-
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: 📦 Install dependencies
|
||||
run: |
|
||||
yarn install
|
||||
pnpm install
|
||||
|
||||
- name: 🏃♀️ Translate
|
||||
run: yarn i18n:sync && yarn i18n:translate
|
||||
run: pnpm i18n:sync && pnpm i18n:translate
|
||||
|
||||
- name: 🔍 Format
|
||||
run: yarn format
|
||||
run: pnpm format
|
||||
|
||||
- name: 🔍 Check for changes
|
||||
id: git_status
|
||||
run: |
|
||||
# Check if there are any uncommitted changes
|
||||
git reset -- package.json yarn.lock # 不提交 package.json 和 yarn.lock 的更改
|
||||
git reset -- package.json pnpm-lock.yaml # 不提交 package.json 和 pnpm-lock.yaml 的更改
|
||||
git diff --exit-code --quiet || echo "::set-output name=has_changes::true"
|
||||
git status --porcelain
|
||||
|
||||
|
||||
29
.github/workflows/nightly-build.yml
vendored
29
.github/workflows/nightly-build.yml
vendored
@ -65,25 +65,24 @@ jobs:
|
||||
run: |
|
||||
brew install python-setuptools
|
||||
|
||||
- name: Install corepack
|
||||
run: corepack enable && corepack prepare yarn@4.9.1 --activate
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
|
||||
- name: Get yarn cache directory path
|
||||
id: yarn-cache-dir-path
|
||||
run: echo "dir=$(yarn config get cacheFolder)" >> $GITHUB_OUTPUT
|
||||
- name: Get pnpm store directory
|
||||
id: pnpm-cache
|
||||
shell: bash
|
||||
run: echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache yarn dependencies
|
||||
- name: Cache pnpm dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
${{ steps.yarn-cache-dir-path.outputs.dir }}
|
||||
node_modules
|
||||
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
|
||||
path: ${{ steps.pnpm-cache.outputs.STORE_PATH }}
|
||||
key: ${{ runner.os }}-pnpm-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-yarn-
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install Dependencies
|
||||
run: yarn install
|
||||
run: pnpm install
|
||||
|
||||
- name: Generate date tag
|
||||
id: date
|
||||
@ -94,7 +93,7 @@ jobs:
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
sudo apt-get install -y rpm
|
||||
yarn build:linux
|
||||
pnpm build:linux
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
NODE_OPTIONS: --max-old-space-size=8192
|
||||
@ -106,7 +105,7 @@ jobs:
|
||||
- name: Build Mac
|
||||
if: matrix.os == 'macos-latest'
|
||||
run: |
|
||||
yarn build:mac
|
||||
pnpm build:mac
|
||||
env:
|
||||
CSC_LINK: ${{ secrets.CSC_LINK }}
|
||||
CSC_KEY_PASSWORD: ${{ secrets.CSC_KEY_PASSWORD }}
|
||||
@ -123,7 +122,7 @@ jobs:
|
||||
- name: Build Windows
|
||||
if: matrix.os == 'windows-latest'
|
||||
run: |
|
||||
yarn build:win
|
||||
pnpm build:win
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
NODE_OPTIONS: --max-old-space-size=8192
|
||||
|
||||
33
.github/workflows/pr-ci.yml
vendored
33
.github/workflows/pr-ci.yml
vendored
@ -28,37 +28,36 @@ jobs:
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
- name: Install corepack
|
||||
run: corepack enable && corepack prepare yarn@4.9.1 --activate
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
|
||||
- name: Get yarn cache directory path
|
||||
id: yarn-cache-dir-path
|
||||
run: echo "dir=$(yarn config get cacheFolder)" >> $GITHUB_OUTPUT
|
||||
- name: Get pnpm store directory
|
||||
id: pnpm-cache
|
||||
shell: bash
|
||||
run: echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache yarn dependencies
|
||||
- name: Cache pnpm dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
${{ steps.yarn-cache-dir-path.outputs.dir }}
|
||||
node_modules
|
||||
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
|
||||
path: ${{ steps.pnpm-cache.outputs.STORE_PATH }}
|
||||
key: ${{ runner.os }}-pnpm-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-yarn-
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install Dependencies
|
||||
run: yarn install
|
||||
run: pnpm install
|
||||
|
||||
- name: Lint Check
|
||||
run: yarn test:lint
|
||||
run: pnpm test:lint
|
||||
|
||||
- name: Format Check
|
||||
run: yarn format:check
|
||||
run: pnpm format:check
|
||||
|
||||
- name: Type Check
|
||||
run: yarn typecheck
|
||||
run: pnpm typecheck
|
||||
|
||||
- name: i18n Check
|
||||
run: yarn i18n:check
|
||||
run: pnpm i18n:check
|
||||
|
||||
- name: Test
|
||||
run: yarn test
|
||||
run: pnpm test
|
||||
|
||||
29
.github/workflows/release.yml
vendored
29
.github/workflows/release.yml
vendored
@ -56,31 +56,30 @@ jobs:
|
||||
run: |
|
||||
brew install python-setuptools
|
||||
|
||||
- name: Install corepack
|
||||
run: corepack enable && corepack prepare yarn@4.9.1 --activate
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
|
||||
- name: Get yarn cache directory path
|
||||
id: yarn-cache-dir-path
|
||||
run: echo "dir=$(yarn config get cacheFolder)" >> $GITHUB_OUTPUT
|
||||
- name: Get pnpm store directory
|
||||
id: pnpm-cache
|
||||
shell: bash
|
||||
run: echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache yarn dependencies
|
||||
- name: Cache pnpm dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
${{ steps.yarn-cache-dir-path.outputs.dir }}
|
||||
node_modules
|
||||
key: ${{ runner.os }}-yarn-${{ hashFiles('**/yarn.lock') }}
|
||||
path: ${{ steps.pnpm-cache.outputs.STORE_PATH }}
|
||||
key: ${{ runner.os }}-pnpm-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-yarn-
|
||||
${{ runner.os }}-pnpm-
|
||||
|
||||
- name: Install Dependencies
|
||||
run: yarn install
|
||||
run: pnpm install
|
||||
|
||||
- name: Build Linux
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
sudo apt-get install -y rpm
|
||||
yarn build:linux
|
||||
pnpm build:linux
|
||||
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@ -94,7 +93,7 @@ jobs:
|
||||
if: matrix.os == 'macos-latest'
|
||||
run: |
|
||||
sudo -H pip install setuptools
|
||||
yarn build:mac
|
||||
pnpm build:mac
|
||||
env:
|
||||
CSC_LINK: ${{ secrets.CSC_LINK }}
|
||||
CSC_KEY_PASSWORD: ${{ secrets.CSC_KEY_PASSWORD }}
|
||||
@ -111,7 +110,7 @@ jobs:
|
||||
- name: Build Windows
|
||||
if: matrix.os == 'windows-latest'
|
||||
run: |
|
||||
yarn build:win
|
||||
pnpm build:win
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
NODE_OPTIONS: --max-old-space-size=8192
|
||||
|
||||
9
.github/workflows/sync-to-gitcode.yml
vendored
9
.github/workflows/sync-to-gitcode.yml
vendored
@ -48,9 +48,8 @@ jobs:
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
- name: Install corepack
|
||||
shell: bash
|
||||
run: corepack enable && corepack prepare yarn@4.9.1 --activate
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
|
||||
- name: Clean node_modules
|
||||
if: ${{ github.event.inputs.clean == 'true' }}
|
||||
@ -59,11 +58,11 @@ jobs:
|
||||
|
||||
- name: Install Dependencies
|
||||
shell: bash
|
||||
run: yarn install
|
||||
run: pnpm install
|
||||
|
||||
- name: Build Windows with code signing
|
||||
shell: bash
|
||||
run: yarn build:win
|
||||
run: pnpm build:win
|
||||
env:
|
||||
WIN_SIGN: true
|
||||
CHERRY_CERT_PATH: ${{ secrets.CHERRY_CERT_PATH }}
|
||||
|
||||
@ -154,14 +154,15 @@ jobs:
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
- name: Enable Corepack
|
||||
- name: Enable corepack
|
||||
if: steps.check.outputs.should_run == 'true'
|
||||
run: corepack enable && corepack prepare yarn@4.9.1 --activate
|
||||
working-directory: main
|
||||
run: corepack enable pnpm
|
||||
|
||||
- name: Install dependencies
|
||||
if: steps.check.outputs.should_run == 'true'
|
||||
working-directory: main
|
||||
run: yarn install --immutable
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Update upgrade config
|
||||
if: steps.check.outputs.should_run == 'true'
|
||||
@ -170,7 +171,7 @@ jobs:
|
||||
RELEASE_TAG: ${{ steps.meta.outputs.tag }}
|
||||
IS_PRERELEASE: ${{ steps.check.outputs.is_prerelease }}
|
||||
run: |
|
||||
yarn tsx scripts/update-app-upgrade-config.ts \
|
||||
pnpm tsx scripts/update-app-upgrade-config.ts \
|
||||
--tag "$RELEASE_TAG" \
|
||||
--config ../cs/app-upgrade-config.json \
|
||||
--is-prerelease "$IS_PRERELEASE"
|
||||
|
||||
@ -1 +1 @@
|
||||
yarn lint-staged
|
||||
pnpm lint-staged
|
||||
|
||||
2
.npmrc
2
.npmrc
@ -1 +1 @@
|
||||
electron_mirror=https://npmmirror.com/mirrors/electron/
|
||||
electron_mirror=https://npmmirror.com/mirrors/electron/
|
||||
|
||||
@ -1,35 +0,0 @@
|
||||
diff --git a/sdk.mjs b/sdk.mjs
|
||||
index dea7766a3432a1e809f12d6daba4f2834a219689..e0b02ef73da177ba32b903887d7bbbeaa08cc6d3 100755
|
||||
--- a/sdk.mjs
|
||||
+++ b/sdk.mjs
|
||||
@@ -6250,7 +6250,7 @@ function createAbortController(maxListeners = DEFAULT_MAX_LISTENERS) {
|
||||
}
|
||||
|
||||
// ../src/transport/ProcessTransport.ts
|
||||
-import { spawn } from "child_process";
|
||||
+import { fork } from "child_process";
|
||||
import { createInterface } from "readline";
|
||||
|
||||
// ../src/utils/fsOperations.ts
|
||||
@@ -6644,18 +6644,11 @@ class ProcessTransport {
|
||||
const errorMessage = isNativeBinary(pathToClaudeCodeExecutable) ? `Claude Code native binary not found at ${pathToClaudeCodeExecutable}. Please ensure Claude Code is installed via native installer or specify a valid path with options.pathToClaudeCodeExecutable.` : `Claude Code executable not found at ${pathToClaudeCodeExecutable}. Is options.pathToClaudeCodeExecutable set?`;
|
||||
throw new ReferenceError(errorMessage);
|
||||
}
|
||||
- const isNative = isNativeBinary(pathToClaudeCodeExecutable);
|
||||
- const spawnCommand = isNative ? pathToClaudeCodeExecutable : executable;
|
||||
- const spawnArgs = isNative ? [...executableArgs, ...args] : [...executableArgs, pathToClaudeCodeExecutable, ...args];
|
||||
- const spawnMessage = isNative ? `Spawning Claude Code native binary: ${spawnCommand} ${spawnArgs.join(" ")}` : `Spawning Claude Code process: ${spawnCommand} ${spawnArgs.join(" ")}`;
|
||||
- logForSdkDebugging(spawnMessage);
|
||||
- if (stderr) {
|
||||
- stderr(spawnMessage);
|
||||
- }
|
||||
+ logForSdkDebugging(`Forking Claude Code Node.js process: ${pathToClaudeCodeExecutable} ${args.join(" ")}`);
|
||||
const stderrMode = env.DEBUG_CLAUDE_AGENT_SDK || stderr ? "pipe" : "ignore";
|
||||
- this.child = spawn(spawnCommand, spawnArgs, {
|
||||
+ this.child = fork(pathToClaudeCodeExecutable, args, {
|
||||
cwd,
|
||||
- stdio: ["pipe", "pipe", stderrMode],
|
||||
+ stdio: stderrMode === "pipe" ? ["pipe", "pipe", "pipe", "ipc"] : ["pipe", "pipe", "ignore", "ipc"],
|
||||
signal: this.abortController.signal,
|
||||
env
|
||||
});
|
||||
BIN
.yarn/releases/yarn-4.9.1.cjs
vendored
BIN
.yarn/releases/yarn-4.9.1.cjs
vendored
Binary file not shown.
@ -1,9 +0,0 @@
|
||||
enableImmutableInstalls: false
|
||||
|
||||
httpTimeout: 300000
|
||||
|
||||
nodeLinker: node-modules
|
||||
|
||||
yarnPath: .yarn/releases/yarn-4.9.1.cjs
|
||||
npmRegistryServer: https://registry.npmjs.org
|
||||
npmPublishRegistry: https://registry.npmjs.org
|
||||
24
CLAUDE.md
24
CLAUDE.md
@ -11,7 +11,7 @@ This file provides guidance to AI coding assistants when working with code in th
|
||||
- **Log centrally**: Route all logging through `loggerService` with the right context—no `console.log`.
|
||||
- **Research via subagent**: Lean on `subagent` for external docs, APIs, news, and references.
|
||||
- **Always propose before executing**: Before making any changes, clearly explain your planned approach and wait for explicit user approval to ensure alignment and prevent unwanted modifications.
|
||||
- **Lint, test, and format before completion**: Coding tasks are only complete after running `yarn lint`, `yarn test`, and `yarn format` successfully.
|
||||
- **Lint, test, and format before completion**: Coding tasks are only complete after running `pnpm lint`, `pnpm test`, and `pnpm format` successfully.
|
||||
- **Write conventional commits**: Commit small, focused changes using Conventional Commit messages (e.g., `feat:`, `fix:`, `refactor:`, `docs:`).
|
||||
|
||||
## Pull Request Workflow (CRITICAL)
|
||||
@ -25,18 +25,18 @@ When creating a Pull Request, you MUST:
|
||||
|
||||
## Development Commands
|
||||
|
||||
- **Install**: `yarn install` - Install all project dependencies
|
||||
- **Development**: `yarn dev` - Runs Electron app in development mode with hot reload
|
||||
- **Debug**: `yarn debug` - Starts with debugging enabled, use `chrome://inspect` to attach debugger
|
||||
- **Build Check**: `yarn build:check` - **REQUIRED** before commits (lint + test + typecheck)
|
||||
- If having i18n sort issues, run `yarn i18n:sync` first to sync template
|
||||
- If having formatting issues, run `yarn format` first
|
||||
- **Test**: `yarn test` - Run all tests (Vitest) across main and renderer processes
|
||||
- **Install**: `pnpm install` - Install all project dependencies
|
||||
- **Development**: `pnpm dev` - Runs Electron app in development mode with hot reload
|
||||
- **Debug**: `pnpm debug` - Starts with debugging enabled, use `chrome://inspect` to attach debugger
|
||||
- **Build Check**: `pnpm build:check` - **REQUIRED** before commits (lint + test + typecheck)
|
||||
- If having i18n sort issues, run `pnpm i18n:sync` first to sync template
|
||||
- If having formatting issues, run `pnpm format` first
|
||||
- **Test**: `pnpm test` - Run all tests (Vitest) across main and renderer processes
|
||||
- **Single Test**:
|
||||
- `yarn test:main` - Run tests for main process only
|
||||
- `yarn test:renderer` - Run tests for renderer process only
|
||||
- **Lint**: `yarn lint` - Fix linting issues and run TypeScript type checking
|
||||
- **Format**: `yarn format` - Auto-format code using Biome
|
||||
- `pnpm test:main` - Run tests for main process only
|
||||
- `pnpm test:renderer` - Run tests for renderer process only
|
||||
- **Lint**: `pnpm lint` - Fix linting issues and run TypeScript type checking
|
||||
- **Format**: `pnpm format` - Auto-format code using Biome
|
||||
|
||||
## Project Architecture
|
||||
|
||||
|
||||
@ -51,7 +51,9 @@
|
||||
"!*.json",
|
||||
"!src/main/integration/**",
|
||||
"!**/tailwind.css",
|
||||
"!**/package.json"
|
||||
"!**/package.json",
|
||||
"!src/renderer/src/routeTree.gen.ts",
|
||||
"!.zed/**"
|
||||
],
|
||||
"indentStyle": "space",
|
||||
"indentWidth": 2,
|
||||
@ -81,7 +83,7 @@
|
||||
},
|
||||
"linter": {
|
||||
"enabled": true,
|
||||
"includes": ["!**/tailwind.css", "src/renderer/**/*.{tsx,ts}"],
|
||||
"includes": ["!**/tailwind.css", "!src/renderer/src/routeTree.gen.ts", "src/renderer/**/*.{tsx,ts}"],
|
||||
// only enable sorted tailwind css rule. used as formatter instead of linter
|
||||
"rules": {
|
||||
"nursery": {
|
||||
|
||||
@ -11,7 +11,7 @@
|
||||
### Install
|
||||
|
||||
```bash
|
||||
yarn
|
||||
pnpm install
|
||||
```
|
||||
|
||||
### Development
|
||||
@ -20,17 +20,17 @@ yarn
|
||||
|
||||
Download and install [Node.js v22.x.x](https://nodejs.org/en/download)
|
||||
|
||||
### Setup Yarn
|
||||
### Setup pnpm
|
||||
|
||||
```bash
|
||||
corepack enable
|
||||
corepack prepare yarn@4.9.1 --activate
|
||||
corepack prepare pnpm@10.27.0 --activate
|
||||
```
|
||||
|
||||
### Install Dependencies
|
||||
|
||||
```bash
|
||||
yarn install
|
||||
pnpm install
|
||||
```
|
||||
|
||||
### ENV
|
||||
@ -42,13 +42,13 @@ cp .env.example .env
|
||||
### Start
|
||||
|
||||
```bash
|
||||
yarn dev
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
### Debug
|
||||
|
||||
```bash
|
||||
yarn debug
|
||||
pnpm debug
|
||||
```
|
||||
|
||||
Then input chrome://inspect in browser
|
||||
@ -56,18 +56,18 @@ Then input chrome://inspect in browser
|
||||
### Test
|
||||
|
||||
```bash
|
||||
yarn test
|
||||
pnpm test
|
||||
```
|
||||
|
||||
### Build
|
||||
|
||||
```bash
|
||||
# For windows
|
||||
$ yarn build:win
|
||||
$ pnpm build:win
|
||||
|
||||
# For macOS
|
||||
$ yarn build:mac
|
||||
$ pnpm build:mac
|
||||
|
||||
# For Linux
|
||||
$ yarn build:linux
|
||||
$ pnpm build:linux
|
||||
```
|
||||
|
||||
@ -116,7 +116,7 @@ This script checks:
|
||||
- Whether keys are properly sorted
|
||||
|
||||
```bash
|
||||
yarn i18n:check
|
||||
pnpm i18n:check
|
||||
```
|
||||
|
||||
### `i18n:sync` - Synchronize JSON Structure and Sort Order
|
||||
@ -128,7 +128,7 @@ This script uses `zh-cn.json` as the source of truth to sync structure across al
|
||||
3. Sorting keys automatically
|
||||
|
||||
```bash
|
||||
yarn i18n:sync
|
||||
pnpm i18n:sync
|
||||
```
|
||||
|
||||
### `i18n:translate` - Automatically Translate Pending Texts
|
||||
@ -148,20 +148,20 @@ MODEL="qwen-plus-latest"
|
||||
Alternatively, add these variables directly to your `.env` file.
|
||||
|
||||
```bash
|
||||
yarn i18n:translate
|
||||
pnpm i18n:translate
|
||||
```
|
||||
|
||||
### Workflow
|
||||
|
||||
1. During development, first add the required text in `zh-cn.json`
|
||||
2. Confirm it displays correctly in the Chinese environment
|
||||
3. Run `yarn i18n:sync` to propagate the keys to other language files
|
||||
4. Run `yarn i18n:translate` to perform machine translation
|
||||
3. Run `pnpm i18n:sync` to propagate the keys to other language files
|
||||
4. Run `pnpm i18n:translate` to perform machine translation
|
||||
5. Grab a coffee and let the magic happen!
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Use Chinese as Source Language**: All development starts in Chinese, then translates to other languages.
|
||||
2. **Run Check Script Before Commit**: Use `yarn i18n:check` to catch i18n issues early.
|
||||
2. **Run Check Script Before Commit**: Use `pnpm i18n:check` to catch i18n issues early.
|
||||
3. **Translate in Small Increments**: Avoid accumulating a large backlog of untranslated content.
|
||||
4. **Keep Keys Semantically Clear**: Keys should clearly express their purpose, e.g., `user.profile.avatar.upload.error`
|
||||
|
||||
@ -37,8 +37,8 @@ The `x-files/app-upgrade-config/app-upgrade-config.json` file is synchronized by
|
||||
|
||||
1. **Guard + metadata preparation** – the `Check if should proceed` and `Prepare metadata` steps compute the target tag, prerelease flag, whether the tag is the newest release, and a `safe_tag` slug used for branch names. When any rule fails, the workflow stops without touching the config.
|
||||
2. **Checkout source branches** – the default branch is checked out into `main/`, while the long-lived `x-files/app-upgrade-config` branch lives in `cs/`. All modifications happen in the latter directory.
|
||||
3. **Install toolchain** – Node.js 22, Corepack, and frozen Yarn dependencies are installed inside `main/`.
|
||||
4. **Run the update script** – `yarn tsx scripts/update-app-upgrade-config.ts --tag <tag> --config ../cs/app-upgrade-config.json --is-prerelease <flag>` updates the JSON in-place.
|
||||
3. **Install toolchain** – Node.js 22, Corepack, and frozen pnpm dependencies are installed inside `main/`.
|
||||
4. **Run the update script** – `pnpm tsx scripts/update-app-upgrade-config.ts --tag <tag> --config ../cs/app-upgrade-config.json --is-prerelease <flag>` updates the JSON in-place.
|
||||
- The script normalizes the tag (e.g., strips `v` prefix), detects the release channel (`latest`, `rc`, `beta`), and loads segment rules from `config/app-upgrade-segments.json`.
|
||||
- It validates that prerelease flags and semantic suffixes agree, enforces locked segments, builds mirror feed URLs, and performs release-availability checks (GitHub HEAD request for every channel; GitCode GET for latest channels, falling back to `https://releases.cherry-ai.com` when gitcode is delayed).
|
||||
- After updating the relevant channel entry, the script rewrites the config with semver-sort order and a new `lastUpdated` timestamp.
|
||||
@ -223,10 +223,10 @@ interface ChannelConfig {
|
||||
Starting from this change, `.github/workflows/update-app-upgrade-config.yml` listens to GitHub release events (published + prerelease). The workflow:
|
||||
|
||||
1. Checks out the default branch (for scripts) and the `x-files/app-upgrade-config` branch (where the config is hosted).
|
||||
2. Runs `yarn tsx scripts/update-app-upgrade-config.ts --tag <tag> --config ../cs/app-upgrade-config.json` to regenerate the config directly inside the `x-files/app-upgrade-config` working tree.
|
||||
2. Runs `pnpm tsx scripts/update-app-upgrade-config.ts --tag <tag> --config ../cs/app-upgrade-config.json` to regenerate the config directly inside the `x-files/app-upgrade-config` working tree.
|
||||
3. If the file changed, it opens a PR against `x-files/app-upgrade-config` via `peter-evans/create-pull-request`, with the generated diff limited to `app-upgrade-config.json`.
|
||||
|
||||
You can run the same script locally via `yarn update:upgrade-config --tag v2.1.6 --config ../cs/app-upgrade-config.json` (add `--dry-run` to preview) to reproduce or debug whatever the workflow does. Passing `--skip-release-checks` along with `--dry-run` lets you bypass the release-page existence check (useful when the GitHub/GitCode pages aren’t published yet). Running without `--config` continues to update the copy in your current working directory (main branch) for documentation purposes.
|
||||
You can run the same script locally via `pnpm update:upgrade-config --tag v2.1.6 --config ../cs/app-upgrade-config.json` (add `--dry-run` to preview) to reproduce or debug whatever the workflow does. Passing `--skip-release-checks` along with `--dry-run` lets you bypass the release-page existence check (useful when the GitHub/GitCode pages aren't published yet). Running without `--config` continues to update the copy in your current working directory (main branch) for documentation purposes.
|
||||
|
||||
## Version Matching Logic
|
||||
|
||||
|
||||
@ -1,5 +1,35 @@
|
||||
# Database Schema Guidelines
|
||||
|
||||
## Schema File Organization
|
||||
|
||||
### Principles
|
||||
|
||||
| Scenario | Approach |
|
||||
| -------------------------------------- | ------------------- |
|
||||
| Strongly related tables in same domain | Merge into one file |
|
||||
| Core tables / Complex business logic | One file per table |
|
||||
| Tables that may cross multiple domains | One file per table |
|
||||
|
||||
### Decision Criteria
|
||||
|
||||
**Merge when:**
|
||||
|
||||
- Tables have strong foreign key relationships (e.g., many-to-many)
|
||||
- Tables belong to the same business domain
|
||||
- Tables are unlikely to evolve independently
|
||||
|
||||
**Separate (one file per table) when:**
|
||||
|
||||
- Core table with many fields and complex logic
|
||||
- Has a dedicated Service layer counterpart
|
||||
- May expand independently in the future
|
||||
|
||||
### File Naming
|
||||
|
||||
- **Single-table files**: named after the table export name (`message.ts` for `messageTable`, `topic.ts` for `topicTable`)
|
||||
- **Multi-table files**: lowercase, named by domain (`tagging.ts` for `tagTable` + `entityTagTable`)
|
||||
- **Helper utilities**: underscore prefix (`_columnHelpers.ts`) to indicate non-table definitions
|
||||
|
||||
## Naming Conventions
|
||||
|
||||
- **Table names**: Use **singular** form with snake_case (e.g., `topic`, `message`, `app_state`)
|
||||
@ -8,19 +38,19 @@
|
||||
|
||||
## Column Helpers
|
||||
|
||||
All helpers are exported from `./schemas/columnHelpers.ts`.
|
||||
All helpers are exported from `./schemas/_columnHelpers.ts`.
|
||||
|
||||
### Primary Keys
|
||||
|
||||
| Helper | UUID Version | Use Case |
|
||||
|--------|--------------|----------|
|
||||
| `uuidPrimaryKey()` | v4 (random) | General purpose tables |
|
||||
| Helper | UUID Version | Use Case |
|
||||
| ------------------------- | ----------------- | ------------------------------------ |
|
||||
| `uuidPrimaryKey()` | v4 (random) | General purpose tables |
|
||||
| `uuidPrimaryKeyOrdered()` | v7 (time-ordered) | Large tables with time-based queries |
|
||||
|
||||
**Usage:**
|
||||
|
||||
```typescript
|
||||
import { uuidPrimaryKey, uuidPrimaryKeyOrdered } from './columnHelpers'
|
||||
import { uuidPrimaryKey, uuidPrimaryKeyOrdered } from './_columnHelpers'
|
||||
|
||||
// General purpose table
|
||||
export const topicTable = sqliteTable('topic', {
|
||||
@ -45,29 +75,32 @@ export const messageTable = sqliteTable('message', {
|
||||
|
||||
### Timestamps
|
||||
|
||||
| Helper | Fields | Use Case |
|
||||
|--------|--------|----------|
|
||||
| `createUpdateTimestamps` | `createdAt`, `updatedAt` | Tables without soft delete |
|
||||
| `createUpdateDeleteTimestamps` | `createdAt`, `updatedAt`, `deletedAt` | Tables with soft delete |
|
||||
| Helper | Fields | Use Case |
|
||||
| ------------------------------ | ------------------------------------- | -------------------------- |
|
||||
| `createUpdateTimestamps` | `createdAt`, `updatedAt` | Tables without soft delete |
|
||||
| `createUpdateDeleteTimestamps` | `createdAt`, `updatedAt`, `deletedAt` | Tables with soft delete |
|
||||
|
||||
**Usage:**
|
||||
|
||||
```typescript
|
||||
import { createUpdateTimestamps, createUpdateDeleteTimestamps } from './columnHelpers'
|
||||
import {
|
||||
createUpdateTimestamps,
|
||||
createUpdateDeleteTimestamps,
|
||||
} from "./_columnHelpers";
|
||||
|
||||
// Without soft delete
|
||||
export const tagTable = sqliteTable('tag', {
|
||||
export const tagTable = sqliteTable("tag", {
|
||||
id: uuidPrimaryKey(),
|
||||
name: text(),
|
||||
...createUpdateTimestamps
|
||||
})
|
||||
...createUpdateTimestamps,
|
||||
});
|
||||
|
||||
// With soft delete
|
||||
export const topicTable = sqliteTable('topic', {
|
||||
export const topicTable = sqliteTable("topic", {
|
||||
id: uuidPrimaryKey(),
|
||||
name: text(),
|
||||
...createUpdateDeleteTimestamps
|
||||
})
|
||||
...createUpdateDeleteTimestamps,
|
||||
});
|
||||
```
|
||||
|
||||
**Behavior:**
|
||||
@ -81,7 +114,7 @@ export const topicTable = sqliteTable('topic', {
|
||||
For JSON column support, use `{ mode: 'json' }`:
|
||||
|
||||
```typescript
|
||||
data: text({ mode: 'json' }).$type<MyDataType>()
|
||||
data: text({ mode: "json" }).$type<MyDataType>();
|
||||
```
|
||||
|
||||
Drizzle handles JSON serialization/deserialization automatically.
|
||||
@ -92,10 +125,10 @@ Drizzle handles JSON serialization/deserialization automatically.
|
||||
|
||||
```typescript
|
||||
// SET NULL: preserve record when referenced record is deleted
|
||||
groupId: text().references(() => groupTable.id, { onDelete: 'set null' })
|
||||
groupId: text().references(() => groupTable.id, { onDelete: "set null" });
|
||||
|
||||
// CASCADE: delete record when referenced record is deleted
|
||||
topicId: text().references(() => topicTable.id, { onDelete: 'cascade' })
|
||||
topicId: text().references(() => topicTable.id, { onDelete: "cascade" });
|
||||
```
|
||||
|
||||
### Self-Referencing Foreign Keys
|
||||
@ -103,23 +136,26 @@ topicId: text().references(() => topicTable.id, { onDelete: 'cascade' })
|
||||
For self-referencing foreign keys (e.g., tree structures with parentId), **always use the `foreignKey` operator** in the table's third parameter:
|
||||
|
||||
```typescript
|
||||
import { foreignKey, sqliteTable, text } from 'drizzle-orm/sqlite-core'
|
||||
import { foreignKey, sqliteTable, text } from "drizzle-orm/sqlite-core";
|
||||
|
||||
export const messageTable = sqliteTable(
|
||||
'message',
|
||||
"message",
|
||||
{
|
||||
id: uuidPrimaryKeyOrdered(),
|
||||
parentId: text(), // Do NOT use .references() here
|
||||
parentId: text(), // Do NOT use .references() here
|
||||
// ...other fields
|
||||
},
|
||||
(t) => [
|
||||
// Use foreignKey operator for self-referencing
|
||||
foreignKey({ columns: [t.parentId], foreignColumns: [t.id] }).onDelete('set null')
|
||||
foreignKey({ columns: [t.parentId], foreignColumns: [t.id] }).onDelete(
|
||||
"set null"
|
||||
),
|
||||
]
|
||||
)
|
||||
);
|
||||
```
|
||||
|
||||
**Why this approach:**
|
||||
|
||||
- Avoids TypeScript circular reference issues (no need for `AnySQLiteColumn` type annotation)
|
||||
- More explicit and readable
|
||||
- Allows chaining `.onDelete()` / `.onUpdate()` actions
|
||||
@ -142,21 +178,22 @@ If you encounter a scenario that seems to require circular references:
|
||||
|
||||
```typescript
|
||||
// ✅ GOOD: Break the cycle by handling one side at application layer
|
||||
export const topicTable = sqliteTable('topic', {
|
||||
export const topicTable = sqliteTable("topic", {
|
||||
id: uuidPrimaryKey(),
|
||||
// Application-managed reference (no FK constraint)
|
||||
// Validated by TopicService.setCurrentMessage()
|
||||
currentMessageId: text(),
|
||||
})
|
||||
});
|
||||
|
||||
export const messageTable = sqliteTable('message', {
|
||||
export const messageTable = sqliteTable("message", {
|
||||
id: uuidPrimaryKeyOrdered(),
|
||||
// Database-enforced FK
|
||||
topicId: text().references(() => topicTable.id, { onDelete: 'cascade' }),
|
||||
})
|
||||
topicId: text().references(() => topicTable.id, { onDelete: "cascade" }),
|
||||
});
|
||||
```
|
||||
|
||||
**Why soft references for SQLite:**
|
||||
|
||||
- SQLite does not support `DEFERRABLE` constraints (unlike PostgreSQL/Oracle)
|
||||
- Application-layer validation provides equivalent data integrity
|
||||
- Simplifies insert/update operations without transaction ordering concerns
|
||||
@ -185,12 +222,12 @@ Always use `.returning()` to get inserted/updated data instead of re-querying:
|
||||
|
||||
```typescript
|
||||
// Good: Use returning()
|
||||
const [row] = await db.insert(table).values(data).returning()
|
||||
return rowToEntity(row)
|
||||
const [row] = await db.insert(table).values(data).returning();
|
||||
return rowToEntity(row);
|
||||
|
||||
// Avoid: Re-query after insert (unnecessary database round-trip)
|
||||
await db.insert(table).values({ id, ...data })
|
||||
return this.getById(id)
|
||||
await db.insert(table).values({ id, ...data });
|
||||
return this.getById(id);
|
||||
```
|
||||
|
||||
### Soft delete support
|
||||
|
||||
@ -11,7 +11,7 @@
|
||||
### Install
|
||||
|
||||
```bash
|
||||
yarn
|
||||
pnpm install
|
||||
```
|
||||
|
||||
### Development
|
||||
@ -20,17 +20,17 @@ yarn
|
||||
|
||||
Download and install [Node.js v22.x.x](https://nodejs.org/en/download)
|
||||
|
||||
### Setup Yarn
|
||||
### Setup pnpm
|
||||
|
||||
```bash
|
||||
corepack enable
|
||||
corepack prepare yarn@4.9.1 --activate
|
||||
corepack prepare pnpm@10.27.0 --activate
|
||||
```
|
||||
|
||||
### Install Dependencies
|
||||
|
||||
```bash
|
||||
yarn install
|
||||
pnpm install
|
||||
```
|
||||
|
||||
### ENV
|
||||
@ -42,13 +42,13 @@ cp .env.example .env
|
||||
### Start
|
||||
|
||||
```bash
|
||||
yarn dev
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
### Debug
|
||||
|
||||
```bash
|
||||
yarn debug
|
||||
pnpm debug
|
||||
```
|
||||
|
||||
Then input chrome://inspect in browser
|
||||
@ -56,18 +56,18 @@ Then input chrome://inspect in browser
|
||||
### Test
|
||||
|
||||
```bash
|
||||
yarn test
|
||||
pnpm test
|
||||
```
|
||||
|
||||
### Build
|
||||
|
||||
```bash
|
||||
# For windows
|
||||
$ yarn build:win
|
||||
$ pnpm build:win
|
||||
|
||||
# For macOS
|
||||
$ yarn build:mac
|
||||
$ pnpm build:mac
|
||||
|
||||
# For Linux
|
||||
$ yarn build:linux
|
||||
$ pnpm build:linux
|
||||
```
|
||||
|
||||
@ -111,7 +111,7 @@ export const getThemeModeLabel = (key: string): string => {
|
||||
- 是否已经有序
|
||||
|
||||
```bash
|
||||
yarn i18n:check
|
||||
pnpm i18n:check
|
||||
```
|
||||
|
||||
### `i18n:sync` - 同步 json 结构与排序
|
||||
@ -123,7 +123,7 @@ yarn i18n:check
|
||||
3. 自动排序
|
||||
|
||||
```bash
|
||||
yarn i18n:sync
|
||||
pnpm i18n:sync
|
||||
```
|
||||
|
||||
### `i18n:translate` - 自动翻译待翻译文本
|
||||
@ -143,19 +143,19 @@ MODEL="qwen-plus-latest"
|
||||
你也可以通过直接编辑`.env`文件来添加环境变量。
|
||||
|
||||
```bash
|
||||
yarn i18n:translate
|
||||
pnpm i18n:translate
|
||||
```
|
||||
|
||||
### 工作流
|
||||
|
||||
1. 开发阶段,先在`zh-cn.json`中添加所需文案
|
||||
2. 确认在中文环境下显示无误后,使用`yarn i18n:sync`将文案同步到其他语言文件
|
||||
3. 使用`yarn i18n:translate`进行自动翻译
|
||||
2. 确认在中文环境下显示无误后,使用`pnpm i18n:sync`将文案同步到其他语言文件
|
||||
3. 使用`pnpm i18n:translate`进行自动翻译
|
||||
4. 喝杯咖啡,等翻译完成吧!
|
||||
|
||||
## 最佳实践
|
||||
|
||||
1. **以中文为源语言**:所有开发首先使用中文,再翻译为其他语言
|
||||
2. **提交前运行检查脚本**:使用`yarn i18n:check`检查 i18n 是否有问题
|
||||
2. **提交前运行检查脚本**:使用`pnpm i18n:check`检查 i18n 是否有问题
|
||||
3. **小步提交翻译**:避免积累大量未翻译文本
|
||||
4. **保持 key 语义明确**:key 应能清晰表达其用途,如`user.profile.avatar.upload.error`
|
||||
|
||||
@ -37,8 +37,8 @@
|
||||
|
||||
1. **检查与元数据准备**:`Check if should proceed` 和 `Prepare metadata` 步骤会计算 tag、prerelease 标志、是否最新版本以及用于分支名的 `safe_tag`。若任意校验失败,工作流立即退出。
|
||||
2. **检出分支**:默认分支被检出到 `main/`,长期维护的 `x-files/app-upgrade-config` 分支则在 `cs/` 中,所有改动都发生在 `cs/`。
|
||||
3. **安装工具链**:安装 Node.js 22、启用 Corepack,并在 `main/` 目录执行 `yarn install --immutable`。
|
||||
4. **运行更新脚本**:执行 `yarn tsx scripts/update-app-upgrade-config.ts --tag <tag> --config ../cs/app-upgrade-config.json --is-prerelease <flag>`。
|
||||
3. **安装工具链**:安装 Node.js 22、启用 Corepack,并在 `main/` 目录执行 `pnpm install --frozen-lockfile`。
|
||||
4. **运行更新脚本**:执行 `pnpm tsx scripts/update-app-upgrade-config.ts --tag <tag> --config ../cs/app-upgrade-config.json --is-prerelease <flag>`。
|
||||
- 脚本会标准化 tag(去掉 `v` 前缀等)、识别渠道、加载 `config/app-upgrade-segments.json` 中的分段规则。
|
||||
- 校验 prerelease 标志与语义后缀是否匹配、强制锁定的 segment 是否满足、生成镜像的下载地址,并检查 release 是否已经在 GitHub/GitCode 可用(latest 渠道在 GitCode 不可用时会回退到 `https://releases.cherry-ai.com`)。
|
||||
- 更新对应的渠道配置后,脚本会按 semver 排序写回 JSON,并刷新 `lastUpdated`。
|
||||
@ -223,10 +223,10 @@ interface ChannelConfig {
|
||||
`.github/workflows/update-app-upgrade-config.yml` 会在 GitHub Release(包含正常发布与 Pre Release)触发:
|
||||
|
||||
1. 同时 Checkout 仓库默认分支(用于脚本)和 `x-files/app-upgrade-config` 分支(真实托管配置的分支)。
|
||||
2. 在默认分支目录执行 `yarn tsx scripts/update-app-upgrade-config.ts --tag <tag> --config ../cs/app-upgrade-config.json`,直接重写 `x-files/app-upgrade-config` 分支里的配置文件。
|
||||
2. 在默认分支目录执行 `pnpm tsx scripts/update-app-upgrade-config.ts --tag <tag> --config ../cs/app-upgrade-config.json`,直接重写 `x-files/app-upgrade-config` 分支里的配置文件。
|
||||
3. 如果 `app-upgrade-config.json` 有变化,则通过 `peter-evans/create-pull-request` 自动创建一个指向 `x-files/app-upgrade-config` 的 PR,Diff 仅包含该文件。
|
||||
|
||||
如需本地调试,可执行 `yarn update:upgrade-config --tag v2.1.6 --config ../cs/app-upgrade-config.json`(加 `--dry-run` 仅打印结果)来复现 CI 行为。若需要暂时跳过 GitHub/GitCode Release 页面是否就绪的校验,可在 `--dry-run` 的同时附加 `--skip-release-checks`。不加 `--config` 时默认更新当前工作目录(通常是 main 分支)下的副本,方便文档/审查。
|
||||
如需本地调试,可执行 `pnpm update:upgrade-config --tag v2.1.6 --config ../cs/app-upgrade-config.json`(加 `--dry-run` 仅打印结果)来复现 CI 行为。若需要暂时跳过 GitHub/GitCode Release 页面是否就绪的校验,可在 `--dry-run` 的同时附加 `--skip-release-checks`。不加 `--config` 时默认更新当前工作目录(通常是 main 分支)下的副本,方便文档/审查。
|
||||
|
||||
## 版本匹配逻辑
|
||||
|
||||
|
||||
@ -28,6 +28,12 @@ files:
|
||||
- "!**/{tsconfig.json,tsconfig.tsbuildinfo,tsconfig.node.json,tsconfig.web.json}"
|
||||
- "!**/{.editorconfig,.jekyll-metadata}"
|
||||
- "!src"
|
||||
- "!config"
|
||||
- "!patches"
|
||||
- "!app-upgrade-config.json"
|
||||
- "!**/node_modules/**/*.cpp"
|
||||
- "!**/node_modules/node-addon-api/**"
|
||||
- "!**/node_modules/prebuild-install/**"
|
||||
- "!scripts"
|
||||
- "!local"
|
||||
- "!docs"
|
||||
@ -91,6 +97,7 @@ nsis:
|
||||
oneClick: false
|
||||
include: build/nsis-installer.nsh
|
||||
buildUniversalInstaller: false
|
||||
differentialPackage: false
|
||||
portable:
|
||||
artifactName: ${productName}-${version}-${arch}-portable.${ext}
|
||||
buildUniversalInstaller: false
|
||||
@ -106,6 +113,8 @@ mac:
|
||||
target:
|
||||
- target: dmg
|
||||
- target: zip
|
||||
dmg:
|
||||
writeUpdateInfo: false
|
||||
linux:
|
||||
artifactName: ${productName}-${version}-${arch}.${ext}
|
||||
target:
|
||||
@ -135,44 +144,34 @@ artifactBuildCompleted: scripts/artifact-build-completed.js
|
||||
releaseInfo:
|
||||
releaseNotes: |
|
||||
<!--LANG:en-->
|
||||
Cherry Studio 1.7.9 - New Features & Bug Fixes
|
||||
Cherry Studio 1.7.11 - New Features & Bug Fixes
|
||||
|
||||
✨ New Features
|
||||
- [Agent] Add 302.AI provider support
|
||||
- [Browser] Browser data now persists and supports multiple tabs
|
||||
- [Language] Add Romanian language support
|
||||
- [Search] Add fuzzy search for file list
|
||||
- [Models] Add latest Zhipu models
|
||||
- [Image] Improve text-to-image functionality
|
||||
- [MCP] Add MCP Hub with Auto mode for intelligent multi-server tool orchestration
|
||||
|
||||
🐛 Bug Fixes
|
||||
- [Mac] Fix mini window unexpected closing issue
|
||||
- [Preview] Fix HTML preview controls not working in fullscreen
|
||||
- [Translate] Fix translation duplicate execution issue
|
||||
- [Zoom] Fix page zoom reset issue during navigation
|
||||
- [Agent] Fix crash when switching between agent and assistant
|
||||
- [Agent] Fix navigation in agent mode
|
||||
- [Copy] Fix markdown copy button issue
|
||||
- [Windows] Fix compatibility issues on non-Windows systems
|
||||
- [Chat] Fix reasoning process not displaying correctly for some proxy models
|
||||
- [Chat] Fix duplicate loading spinners on action buttons
|
||||
- [Editor] Fix paragraph handle and plus button not clickable
|
||||
- [Drawing] Fix TokenFlux models not showing in drawing panel
|
||||
- [Translate] Fix translation stalling after initialization
|
||||
- [Error] Fix app freeze when viewing error details with large images
|
||||
- [Notes] Fix folder overlay blocking webview preview
|
||||
- [Chat] Fix thinking time display when stopping generation
|
||||
|
||||
<!--LANG:zh-CN-->
|
||||
Cherry Studio 1.7.9 - 新功能与问题修复
|
||||
Cherry Studio 1.7.11 - 新功能与问题修复
|
||||
|
||||
✨ 新功能
|
||||
- [Agent] 新增 302.AI 服务商支持
|
||||
- [浏览器] 浏览器数据现在可以保存,支持多标签页
|
||||
- [语言] 新增罗马尼亚语支持
|
||||
- [搜索] 文件列表新增模糊搜索功能
|
||||
- [模型] 新增最新智谱模型
|
||||
- [图片] 优化文生图功能
|
||||
- [MCP] 新增 MCP Hub 智能模式,可自动管理和调用多个 MCP 服务器工具
|
||||
|
||||
🐛 问题修复
|
||||
- [Mac] 修复迷你窗口意外关闭的问题
|
||||
- [预览] 修复全屏模式下 HTML 预览控件无法使用的问题
|
||||
- [翻译] 修复翻译重复执行的问题
|
||||
- [缩放] 修复页面导航时缩放被重置的问题
|
||||
- [智能体] 修复在智能体和助手间切换时崩溃的问题
|
||||
- [智能体] 修复智能体模式下的导航问题
|
||||
- [复制] 修复 Markdown 复制按钮问题
|
||||
- [兼容性] 修复非 Windows 系统的兼容性问题
|
||||
- [对话] 修复部分代理模型的推理过程无法正确显示的问题
|
||||
- [对话] 修复操作按钮重复显示加载状态的问题
|
||||
- [编辑器] 修复段落手柄和加号按钮无法点击的问题
|
||||
- [绘图] 修复 TokenFlux 模型在绘图面板不显示的问题
|
||||
- [翻译] 修复翻译功能初始化后卡住的问题
|
||||
- [错误] 修复查看包含大图片的错误详情时应用卡死的问题
|
||||
- [笔记] 修复文件夹遮挡网页预览的问题
|
||||
- [对话] 修复停止生成时思考时间显示问题
|
||||
<!--LANG:END-->
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
import { tanstackRouter } from '@tanstack/router-plugin/vite'
|
||||
import react from '@vitejs/plugin-react-swc'
|
||||
import { CodeInspectorPlugin } from 'code-inspector-plugin'
|
||||
import { defineConfig } from 'electron-vite'
|
||||
@ -80,20 +81,15 @@ export default defineConfig({
|
||||
},
|
||||
renderer: {
|
||||
plugins: [
|
||||
tanstackRouter({
|
||||
target: 'react',
|
||||
autoCodeSplitting: true,
|
||||
routesDirectory: resolve('src/renderer/src/routes'),
|
||||
generatedRouteTree: resolve('src/renderer/src/routeTree.gen.ts')
|
||||
}),
|
||||
(async () => (await import('@tailwindcss/vite')).default())(),
|
||||
react({
|
||||
tsDecorators: true,
|
||||
plugins: [
|
||||
[
|
||||
'@swc/plugin-styled-components',
|
||||
{
|
||||
displayName: true, // 开发环境下启用组件名称
|
||||
fileName: false, // 不在类名中包含文件名
|
||||
pure: true, // 优化性能
|
||||
ssr: false // 不需要服务端渲染
|
||||
}
|
||||
]
|
||||
]
|
||||
tsDecorators: true
|
||||
}),
|
||||
...(isDev ? [CodeInspectorPlugin({ bundler: 'vite' })] : []), // 只在开发环境下启用 CodeInspectorPlugin
|
||||
...visualizerPlugin('renderer')
|
||||
|
||||
@ -67,6 +67,7 @@ export default defineConfig([
|
||||
'src/main/integration/cherryai/index.js',
|
||||
'src/main/integration/nutstore/sso/lib/**',
|
||||
'src/renderer/src/ui/**',
|
||||
'src/renderer/src/routeTree.gen.ts',
|
||||
'packages/**/dist'
|
||||
]
|
||||
},
|
||||
|
||||
308
package.json
308
package.json
@ -9,28 +9,13 @@
|
||||
"engines": {
|
||||
"node": ">=22.0.0"
|
||||
},
|
||||
"workspaces": {
|
||||
"packages": [
|
||||
"local",
|
||||
"packages/*"
|
||||
],
|
||||
"installConfig": {
|
||||
"hoistingLimits": [
|
||||
"packages/database",
|
||||
"packages/mcp-trace/trace-core",
|
||||
"packages/mcp-trace/trace-node",
|
||||
"packages/mcp-trace/trace-web",
|
||||
"packages/extension-table-plus"
|
||||
]
|
||||
}
|
||||
},
|
||||
"scripts": {
|
||||
"start": "electron-vite preview",
|
||||
"dev": "dotenv electron-vite dev",
|
||||
"dev:watch": "dotenv electron-vite dev -- -w",
|
||||
"debug": "electron-vite -- --inspect --sourcemap --remote-debugging-port=9222",
|
||||
"build": "npm run typecheck && electron-vite build",
|
||||
"build:check": "yarn lint && yarn test",
|
||||
"build:check": "pnpm lint && pnpm test",
|
||||
"build:unpack": "dotenv npm run build && electron-builder --dir",
|
||||
"build:win": "dotenv npm run build && electron-builder --win --x64 --arm64",
|
||||
"build:win:x64": "dotenv npm run build && electron-builder --win --x64",
|
||||
@ -42,70 +27,65 @@
|
||||
"build:linux:arm64": "dotenv npm run build && electron-builder --linux --arm64",
|
||||
"build:linux:x64": "dotenv npm run build && electron-builder --linux --x64",
|
||||
"release": "node scripts/version.js",
|
||||
"publish": "yarn build:check && yarn release patch push",
|
||||
"publish": "pnpm build:check && pnpm release patch push",
|
||||
"pulish:artifacts": "cd packages/artifacts && npm publish && cd -",
|
||||
"agents:generate": "NODE_ENV='development' drizzle-kit generate --config src/main/services/agents/drizzle.config.ts",
|
||||
"agents:push": "NODE_ENV='development' drizzle-kit push --config src/main/services/agents/drizzle.config.ts",
|
||||
"agents:studio": "NODE_ENV='development' drizzle-kit studio --config src/main/services/agents/drizzle.config.ts",
|
||||
"agents:drop": "NODE_ENV='development' drizzle-kit drop --config src/main/services/agents/drizzle.config.ts",
|
||||
"generate:icons": "electron-icon-builder --input=./build/logo.png --output=build",
|
||||
"analyze:renderer": "VISUALIZER_RENDERER=true yarn build",
|
||||
"analyze:main": "VISUALIZER_MAIN=true yarn build",
|
||||
"analyze:renderer": "VISUALIZER_RENDERER=true pnpm build",
|
||||
"analyze:main": "VISUALIZER_MAIN=true pnpm build",
|
||||
"typecheck": "concurrently -n \"node,web\" -c \"cyan,magenta\" \"npm run typecheck:node\" \"npm run typecheck:web\"",
|
||||
"typecheck:node": "tsgo --noEmit -p tsconfig.node.json --composite false",
|
||||
"typecheck:web": "tsgo --noEmit -p tsconfig.web.json --composite false",
|
||||
"i18n:check": "dotenv -e .env -- tsx scripts/check-i18n.ts",
|
||||
"i18n:sync": "dotenv -e .env -- tsx scripts/sync-i18n.ts",
|
||||
"i18n:translate": "dotenv -e .env -- tsx scripts/auto-translate-i18n.ts",
|
||||
"i18n:all": "yarn i18n:check && yarn i18n:sync && yarn i18n:translate",
|
||||
"i18n:all": "pnpm i18n:check && pnpm i18n:sync && pnpm i18n:translate",
|
||||
"update:languages": "tsx scripts/update-languages.ts",
|
||||
"update:upgrade-config": "tsx scripts/update-app-upgrade-config.ts",
|
||||
"test": "vitest run --silent",
|
||||
"test:main": "vitest run --project main",
|
||||
"test:renderer": "vitest run --project renderer",
|
||||
"test:aicore": "vitest run --project aiCore",
|
||||
"test:update": "yarn test:renderer --update",
|
||||
"test:update": "pnpm test:renderer --update",
|
||||
"test:coverage": "vitest run --coverage --silent",
|
||||
"test:ui": "vitest --ui",
|
||||
"test:watch": "vitest",
|
||||
"test:e2e": "yarn playwright test",
|
||||
"test:e2e": "pnpm playwright test",
|
||||
"test:lint": "oxlint --deny-warnings && eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --cache",
|
||||
"test:scripts": "vitest scripts",
|
||||
"lint": "oxlint --fix && eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --fix --cache && biome lint --write && biome format --write && yarn typecheck && yarn i18n:check && yarn format:check",
|
||||
"lint": "oxlint --fix && eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --fix --cache && pnpm typecheck && pnpm i18n:check && pnpm format:check",
|
||||
"format": "biome format --write && biome lint --write",
|
||||
"format:check": "biome format && biome lint",
|
||||
"prepare": "git config blame.ignoreRevsFile .git-blame-ignore-revs && husky",
|
||||
"claude": "dotenv -e .env -- claude",
|
||||
"db:migrations:generate": "drizzle-kit generate --config ./migrations/sqlite-drizzle.config.ts",
|
||||
"release:aicore:alpha": "yarn workspace @cherrystudio/ai-core version prerelease --preid alpha --immediate && yarn workspace @cherrystudio/ai-core build && yarn workspace @cherrystudio/ai-core npm publish --tag alpha --access public",
|
||||
"release:aicore:beta": "yarn workspace @cherrystudio/ai-core version prerelease --preid beta --immediate && yarn workspace @cherrystudio/ai-core build && yarn workspace @cherrystudio/ai-core npm publish --tag beta --access public",
|
||||
"release:aicore": "yarn workspace @cherrystudio/ai-core version patch --immediate && yarn workspace @cherrystudio/ai-core build && yarn workspace @cherrystudio/ai-core npm publish --access public",
|
||||
"release:ai-sdk-provider": "yarn workspace @cherrystudio/ai-sdk-provider version patch --immediate && yarn workspace @cherrystudio/ai-sdk-provider build && yarn workspace @cherrystudio/ai-sdk-provider npm publish --access public"
|
||||
"release:aicore:alpha": "pnpm --filter @cherrystudio/ai-core version prerelease --preid alpha && pnpm --filter @cherrystudio/ai-core build && pnpm --filter @cherrystudio/ai-core publish --tag alpha --access public",
|
||||
"release:aicore:beta": "pnpm --filter @cherrystudio/ai-core version prerelease --preid beta && pnpm --filter @cherrystudio/ai-core build && pnpm --filter @cherrystudio/ai-core publish --tag beta --access public",
|
||||
"release:aicore": "pnpm --filter @cherrystudio/ai-core version patch && pnpm --filter @cherrystudio/ai-core build && pnpm --filter @cherrystudio/ai-core publish --access public",
|
||||
"release:ai-sdk-provider": "pnpm --filter @cherrystudio/ai-sdk-provider version patch && pnpm --filter @cherrystudio/ai-sdk-provider build && pnpm --filter @cherrystudio/ai-sdk-provider publish --access public"
|
||||
},
|
||||
"dependencies": {
|
||||
"@anthropic-ai/claude-agent-sdk": "patch:@anthropic-ai/claude-agent-sdk@npm%3A0.1.62#~/.yarn/patches/@anthropic-ai-claude-agent-sdk-npm-0.1.62-23ae56f8c8.patch",
|
||||
"@anthropic-ai/claude-agent-sdk": "0.1.76",
|
||||
"@libsql/client": "0.14.0",
|
||||
"@libsql/win32-x64-msvc": "^0.4.7",
|
||||
"@napi-rs/system-ocr": "patch:@napi-rs/system-ocr@npm%3A1.0.2#~/.yarn/patches/@napi-rs-system-ocr-npm-1.0.2-59e7a78e8b.patch",
|
||||
"@paymoapp/electron-shutdown-handler": "^1.1.2",
|
||||
"@strongtz/win32-arm64-msvc": "^0.4.7",
|
||||
"bonjour-service": "^1.3.0",
|
||||
"emoji-picker-element-data": "^1",
|
||||
"express": "^5.1.0",
|
||||
"font-list": "^2.0.0",
|
||||
"graceful-fs": "^4.2.11",
|
||||
"gray-matter": "^4.0.3",
|
||||
"js-yaml": "^4.1.0",
|
||||
"@napi-rs/system-ocr": "1.0.2",
|
||||
"@paymoapp/electron-shutdown-handler": "1.1.2",
|
||||
"express": "5.1.0",
|
||||
"font-list": "2.0.0",
|
||||
"graceful-fs": "4.2.11",
|
||||
"gray-matter": "4.0.3",
|
||||
"jsdom": "26.1.0",
|
||||
"node-stream-zip": "^1.15.0",
|
||||
"officeparser": "^4.2.0",
|
||||
"os-proxy-config": "^1.1.2",
|
||||
"selection-hook": "^1.0.12",
|
||||
"sharp": "^0.34.3",
|
||||
"node-stream-zip": "1.15.0",
|
||||
"officeparser": "4.2.0",
|
||||
"os-proxy-config": "1.1.2",
|
||||
"selection-hook": "1.0.12",
|
||||
"sharp": "0.34.3",
|
||||
"stream-json": "^1.9.1",
|
||||
"swagger-jsdoc": "^6.2.8",
|
||||
"swagger-ui-express": "^5.0.1",
|
||||
"tesseract.js": "patch:tesseract.js@npm%3A6.0.1#~/.yarn/patches/tesseract.js-npm-6.0.1-2562a7e46d.patch",
|
||||
"swagger-jsdoc": "6.2.8",
|
||||
"swagger-ui-express": "5.0.1",
|
||||
"tesseract.js": "6.0.1",
|
||||
"turndown": "7.2.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
@ -114,38 +94,49 @@
|
||||
"@agentic/tavily": "^7.3.3",
|
||||
"@ai-sdk/amazon-bedrock": "^3.0.61",
|
||||
"@ai-sdk/anthropic": "^2.0.49",
|
||||
"@ai-sdk/azure": "2.0.87",
|
||||
"@ai-sdk/cerebras": "^1.0.31",
|
||||
"@ai-sdk/gateway": "^2.0.15",
|
||||
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.49#~/.yarn/patches/@ai-sdk-google-npm-2.0.49-84720f41bd.patch",
|
||||
"@ai-sdk/google": "2.0.49",
|
||||
"@ai-sdk/google-vertex": "^3.0.94",
|
||||
"@ai-sdk/huggingface": "^0.0.10",
|
||||
"@ai-sdk/mistral": "^2.0.24",
|
||||
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch",
|
||||
"@ai-sdk/openai": "2.0.85",
|
||||
"@ai-sdk/perplexity": "^2.0.20",
|
||||
"@ai-sdk/provider": "2.0.0",
|
||||
"@ai-sdk/provider-utils": "3.0.17",
|
||||
"@ai-sdk/test-server": "^0.0.1",
|
||||
"@ai-sdk/xai": "2.0.36",
|
||||
"@ant-design/cssinjs": "1.23.0",
|
||||
"@ant-design/icons": "5.6.1",
|
||||
"@ant-design/v5-patch-for-react-19": "^1.0.3",
|
||||
"@anthropic-ai/sdk": "^0.41.0",
|
||||
"@anthropic-ai/vertex-sdk": "patch:@anthropic-ai/vertex-sdk@npm%3A0.11.4#~/.yarn/patches/@anthropic-ai-vertex-sdk-npm-0.11.4-c19cb41edb.patch",
|
||||
"@anthropic-ai/vertex-sdk": "0.11.4",
|
||||
"@aws-sdk/client-bedrock": "^3.910.0",
|
||||
"@aws-sdk/client-bedrock-runtime": "^3.910.0",
|
||||
"@aws-sdk/client-s3": "^3.910.0",
|
||||
"@biomejs/biome": "2.2.4",
|
||||
"@cherrystudio/ai-core": "workspace:^1.0.9",
|
||||
"@cherrystudio/embedjs": "^0.1.31",
|
||||
"@cherrystudio/embedjs-libsql": "^0.1.31",
|
||||
"@cherrystudio/embedjs-loader-csv": "^0.1.31",
|
||||
"@cherrystudio/embedjs-loader-image": "^0.1.31",
|
||||
"@cherrystudio/embedjs-loader-markdown": "^0.1.31",
|
||||
"@cherrystudio/embedjs-loader-msoffice": "^0.1.31",
|
||||
"@cherrystudio/embedjs-loader-pdf": "^0.1.31",
|
||||
"@cherrystudio/embedjs-loader-sitemap": "^0.1.31",
|
||||
"@cherrystudio/embedjs-loader-web": "^0.1.31",
|
||||
"@cherrystudio/embedjs-loader-xml": "^0.1.31",
|
||||
"@cherrystudio/embedjs-ollama": "^0.1.31",
|
||||
"@cherrystudio/embedjs-openai": "^0.1.31",
|
||||
"@cherrystudio/embedjs": "0.1.31",
|
||||
"@cherrystudio/embedjs-interfaces": "0.1.31",
|
||||
"@cherrystudio/embedjs-libsql": "0.1.31",
|
||||
"@cherrystudio/embedjs-loader-csv": "0.1.31",
|
||||
"@cherrystudio/embedjs-loader-image": "0.1.31",
|
||||
"@cherrystudio/embedjs-loader-markdown": "0.1.31",
|
||||
"@cherrystudio/embedjs-loader-msoffice": "0.1.31",
|
||||
"@cherrystudio/embedjs-loader-pdf": "0.1.31",
|
||||
"@cherrystudio/embedjs-loader-sitemap": "0.1.31",
|
||||
"@cherrystudio/embedjs-loader-web": "0.1.31",
|
||||
"@cherrystudio/embedjs-loader-xml": "0.1.31",
|
||||
"@cherrystudio/embedjs-ollama": "0.1.31",
|
||||
"@cherrystudio/embedjs-openai": "0.1.31",
|
||||
"@cherrystudio/embedjs-utils": "0.1.31",
|
||||
"@cherrystudio/extension-table-plus": "workspace:^",
|
||||
"@cherrystudio/openai": "^6.12.0",
|
||||
"@cherrystudio/openai": "6.15.0",
|
||||
"@cherrystudio/ui": "workspace:*",
|
||||
"@codemirror/lang-json": "6.0.1",
|
||||
"@codemirror/lint": "6.8.5",
|
||||
"@codemirror/view": "6.38.1",
|
||||
"@dnd-kit/core": "^6.3.1",
|
||||
"@dnd-kit/modifiers": "^9.0.0",
|
||||
"@dnd-kit/sortable": "^10.0.0",
|
||||
@ -158,17 +149,20 @@
|
||||
"@emotion/is-prop-valid": "^1.3.1",
|
||||
"@eslint-react/eslint-plugin": "^1.36.1",
|
||||
"@eslint/js": "^9.22.0",
|
||||
"@google/genai": "patch:@google/genai@npm%3A1.0.1#~/.yarn/patches/@google-genai-npm-1.0.1-e26f0f9af7.patch",
|
||||
"@floating-ui/dom": "1.7.3",
|
||||
"@google/genai": "1.0.1",
|
||||
"@hello-pangea/dnd": "^18.0.1",
|
||||
"@langchain/community": "^1.0.0",
|
||||
"@langchain/core": "patch:@langchain/core@npm%3A1.0.2#~/.yarn/patches/@langchain-core-npm-1.0.2-183ef83fe4.patch",
|
||||
"@langchain/openai": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||
"@langchain/core": "1.0.2",
|
||||
"@langchain/openai": "1.0.0",
|
||||
"@langchain/textsplitters": "0.1.0",
|
||||
"@mistralai/mistralai": "^1.7.5",
|
||||
"@modelcontextprotocol/sdk": "^1.23.0",
|
||||
"@modelcontextprotocol/sdk": "1.23.0",
|
||||
"@mozilla/readability": "^0.6.0",
|
||||
"@notionhq/client": "^2.2.15",
|
||||
"@openrouter/ai-sdk-provider": "^1.2.8",
|
||||
"@opentelemetry/api": "^1.9.0",
|
||||
"@opentelemetry/context-async-hooks": "2.0.1",
|
||||
"@opentelemetry/core": "2.0.0",
|
||||
"@opentelemetry/exporter-trace-otlp-http": "^0.200.0",
|
||||
"@opentelemetry/sdk-trace-base": "^2.0.0",
|
||||
@ -180,29 +174,36 @@
|
||||
"@radix-ui/react-switch": "^1.2.6",
|
||||
"@reduxjs/toolkit": "^2.2.5",
|
||||
"@shikijs/markdown-it": "^3.12.0",
|
||||
"@swc/core": "^1.15.8",
|
||||
"@swc/plugin-styled-components": "^8.0.4",
|
||||
"@tailwindcss/vite": "^4.1.13",
|
||||
"@tanstack/react-query": "^5.85.5",
|
||||
"@tanstack/react-router": "^1.139.3",
|
||||
"@tanstack/react-virtual": "^3.13.12",
|
||||
"@tanstack/router-plugin": "^1.139.3",
|
||||
"@testing-library/dom": "^10.4.0",
|
||||
"@testing-library/jest-dom": "^6.6.3",
|
||||
"@testing-library/react": "^16.3.0",
|
||||
"@testing-library/user-event": "^14.6.1",
|
||||
"@tiptap/extension-collaboration": "^3.2.0",
|
||||
"@tiptap/extension-drag-handle": "patch:@tiptap/extension-drag-handle@npm%3A3.2.0#~/.yarn/patches/@tiptap-extension-drag-handle-npm-3.2.0-5a9ebff7c9.patch",
|
||||
"@tiptap/extension-drag-handle-react": "^3.2.0",
|
||||
"@tiptap/extension-image": "^3.2.0",
|
||||
"@tiptap/extension-list": "^3.2.0",
|
||||
"@tiptap/extension-mathematics": "^3.2.0",
|
||||
"@tiptap/extension-mention": "^3.2.0",
|
||||
"@tiptap/extension-node-range": "^3.2.0",
|
||||
"@tiptap/extension-table-of-contents": "^3.2.0",
|
||||
"@tiptap/extension-typography": "^3.2.0",
|
||||
"@tiptap/extension-underline": "^3.2.0",
|
||||
"@tiptap/pm": "^3.2.0",
|
||||
"@tiptap/react": "^3.2.0",
|
||||
"@tiptap/starter-kit": "^3.2.0",
|
||||
"@tiptap/suggestion": "^3.2.0",
|
||||
"@tiptap/core": "3.2.0",
|
||||
"@tiptap/extension-code-block": "3.2.0",
|
||||
"@tiptap/extension-collaboration": "3.2.0",
|
||||
"@tiptap/extension-drag-handle": "3.2.0",
|
||||
"@tiptap/extension-drag-handle-react": "3.2.0",
|
||||
"@tiptap/extension-heading": "3.2.0",
|
||||
"@tiptap/extension-image": "3.2.0",
|
||||
"@tiptap/extension-link": "3.2.0",
|
||||
"@tiptap/extension-list": "3.2.0",
|
||||
"@tiptap/extension-mathematics": "3.2.0",
|
||||
"@tiptap/extension-mention": "3.2.0",
|
||||
"@tiptap/extension-node-range": "3.2.0",
|
||||
"@tiptap/extension-table-of-contents": "3.2.0",
|
||||
"@tiptap/extension-typography": "3.2.0",
|
||||
"@tiptap/extension-underline": "3.2.0",
|
||||
"@tiptap/pm": "3.2.0",
|
||||
"@tiptap/react": "3.2.0",
|
||||
"@tiptap/starter-kit": "3.2.0",
|
||||
"@tiptap/suggestion": "3.2.0",
|
||||
"@tiptap/y-tiptap": "^3.0.0",
|
||||
"@truto/turndown-plugin-gfm": "^1.0.2",
|
||||
"@tryfabric/martian": "^1.2.4",
|
||||
@ -213,14 +214,17 @@
|
||||
"@types/dotenv": "^8.2.3",
|
||||
"@types/express": "^5",
|
||||
"@types/fs-extra": "^11",
|
||||
"@types/hast": "^3.0.4",
|
||||
"@types/he": "^1",
|
||||
"@types/html-to-text": "^9",
|
||||
"@types/js-yaml": "^4.0.9",
|
||||
"@types/json-schema": "7.0.15",
|
||||
"@types/lodash": "^4.17.5",
|
||||
"@types/markdown-it": "^14",
|
||||
"@types/md5": "^2.3.5",
|
||||
"@types/mdast": "4.0.4",
|
||||
"@types/mime-types": "^3",
|
||||
"@types/node": "^22.17.1",
|
||||
"@types/node": "22.17.2",
|
||||
"@types/pako": "^1.0.2",
|
||||
"@types/react": "^19.2.7",
|
||||
"@types/react-dom": "^19.2.3",
|
||||
@ -232,9 +236,10 @@
|
||||
"@types/swagger-ui-express": "^4.1.8",
|
||||
"@types/tinycolor2": "^1",
|
||||
"@types/turndown": "^5.0.5",
|
||||
"@types/unist": "3.0.3",
|
||||
"@types/uuid": "^10.0.0",
|
||||
"@types/word-extractor": "^1",
|
||||
"@typescript/native-preview": "latest",
|
||||
"@typescript/native-preview": "7.0.0-dev.20250915.1",
|
||||
"@uiw/codemirror-extensions-langs": "^4.25.1",
|
||||
"@uiw/codemirror-themes-all": "^4.25.1",
|
||||
"@uiw/react-codemirror": "^4.25.1",
|
||||
@ -246,12 +251,16 @@
|
||||
"@viz-js/lang-dot": "^1.0.5",
|
||||
"@viz-js/viz": "^3.14.0",
|
||||
"@xyflow/react": "^12.4.4",
|
||||
"adm-zip": "0.4.16",
|
||||
"ai": "^5.0.98",
|
||||
"antd": "patch:antd@npm%3A5.27.0#~/.yarn/patches/antd-npm-5.27.0-aa91c36546.patch",
|
||||
"antd": "5.27.0",
|
||||
"archiver": "^7.0.1",
|
||||
"async-mutex": "^0.5.0",
|
||||
"axios": "^1.7.3",
|
||||
"bonjour-service": "1.3.0",
|
||||
"browser-image-compression": "^2.0.2",
|
||||
"builder-util-runtime": "9.5.0",
|
||||
"chalk": "4.1.2",
|
||||
"chardet": "^2.1.0",
|
||||
"check-disk-space": "3.4.0",
|
||||
"cheerio": "^1.1.2",
|
||||
@ -260,8 +269,10 @@
|
||||
"cli-progress": "^3.12.0",
|
||||
"clsx": "^2.1.1",
|
||||
"code-inspector-plugin": "^0.20.14",
|
||||
"codemirror-lang-mermaid": "0.5.0",
|
||||
"color": "^5.0.0",
|
||||
"concurrently": "^9.2.1",
|
||||
"cors": "2.8.5",
|
||||
"country-flag-emoji-polyfill": "0.1.8",
|
||||
"dayjs": "^1.11.11",
|
||||
"dexie": "^4.0.8",
|
||||
@ -269,6 +280,7 @@
|
||||
"diff": "^8.0.2",
|
||||
"docx": "^9.0.2",
|
||||
"dompurify": "^3.2.6",
|
||||
"dotenv": "16.6.1",
|
||||
"dotenv-cli": "^7.4.2",
|
||||
"drizzle-kit": "^0.31.4",
|
||||
"drizzle-orm": "^0.44.5",
|
||||
@ -277,12 +289,13 @@
|
||||
"electron-devtools-installer": "^3.2.0",
|
||||
"electron-reload": "^2.0.0-alpha.1",
|
||||
"electron-store": "^8.2.0",
|
||||
"electron-updater": "patch:electron-updater@npm%3A6.7.0#~/.yarn/patches/electron-updater-npm-6.7.0-47b11bb0d4.patch",
|
||||
"electron-updater": "6.7.0",
|
||||
"electron-vite": "5.0.0",
|
||||
"electron-window-state": "^5.0.3",
|
||||
"emittery": "^1.0.3",
|
||||
"emoji-picker-element": "^1.22.1",
|
||||
"epub": "patch:epub@npm%3A1.3.0#~/.yarn/patches/epub-npm-1.3.0-8325494ffe.patch",
|
||||
"emoji-picker-element-data": "1",
|
||||
"epub": "1.3.0",
|
||||
"eslint": "^9.22.0",
|
||||
"eslint-plugin-import-zod": "^1.2.0",
|
||||
"eslint-plugin-oxlint": "^1.15.0",
|
||||
@ -293,6 +306,7 @@
|
||||
"fast-diff": "^1.3.0",
|
||||
"fast-xml-parser": "^5.2.0",
|
||||
"fetch-socks": "1.3.2",
|
||||
"form-data": "4.0.4",
|
||||
"framer-motion": "^12.23.12",
|
||||
"franc-min": "^6.2.0",
|
||||
"fs-extra": "^11.2.0",
|
||||
@ -309,6 +323,11 @@
|
||||
"isbinaryfile": "5.0.4",
|
||||
"jaison": "^2.0.2",
|
||||
"jest-styled-components": "^7.2.0",
|
||||
"js-base64": "3.7.7",
|
||||
"js-yaml": "4.1.0",
|
||||
"json-schema": "0.4.0",
|
||||
"katex": "0.16.22",
|
||||
"ky": "1.8.1",
|
||||
"linguist-languages": "^8.1.0",
|
||||
"lint-staged": "^15.5.0",
|
||||
"lodash": "^4.17.21",
|
||||
@ -316,19 +335,27 @@
|
||||
"lucide-react": "^0.525.0",
|
||||
"macos-release": "^3.4.0",
|
||||
"markdown-it": "^14.1.0",
|
||||
"md5": "2.3.0",
|
||||
"mermaid": "^11.10.1",
|
||||
"mime": "^4.0.4",
|
||||
"mime-types": "^3.0.1",
|
||||
"motion": "^12.10.5",
|
||||
"nanoid": "3.3.11",
|
||||
"notion-helper": "^1.3.22",
|
||||
"npx-scope-finder": "^1.2.0",
|
||||
"ollama-ai-provider-v2": "patch:ollama-ai-provider-v2@npm%3A1.5.5#~/.yarn/patches/ollama-ai-provider-v2-npm-1.5.5-8bef249af9.patch",
|
||||
"ollama-ai-provider-v2": "1.5.5",
|
||||
"open": "^8.4.2",
|
||||
"oxlint": "^1.22.0",
|
||||
"oxlint-tsgolint": "^0.2.0",
|
||||
"p-queue": "^8.1.0",
|
||||
"pako": "1.0.11",
|
||||
"pdf-lib": "^1.17.1",
|
||||
"pdf-parse": "^1.1.1",
|
||||
"prosemirror-model": "1.25.2",
|
||||
"proxy-agent": "^6.5.0",
|
||||
"rc-input": "1.8.0",
|
||||
"rc-select": "14.16.6",
|
||||
"rc-virtual-list": "3.18.6",
|
||||
"react": "^19.2.0",
|
||||
"react-dom": "^19.2.0",
|
||||
"react-error-boundary": "^6.0.0",
|
||||
@ -339,8 +366,6 @@
|
||||
"react-markdown": "^10.1.0",
|
||||
"react-player": "^3.3.1",
|
||||
"react-redux": "^9.1.2",
|
||||
"react-router": "6",
|
||||
"react-router-dom": "6",
|
||||
"react-spinners": "^0.14.1",
|
||||
"react-transition-group": "^4.4.5",
|
||||
"redux": "^5.0.1",
|
||||
@ -355,8 +380,11 @@
|
||||
"remark-gfm": "^4.0.1",
|
||||
"remark-github-blockquote-alert": "^2.0.0",
|
||||
"remark-math": "^6.0.0",
|
||||
"remark-parse": "11.0.0",
|
||||
"remark-stringify": "11.0.0",
|
||||
"remove-markdown": "^0.6.2",
|
||||
"rollup-plugin-visualizer": "^5.12.0",
|
||||
"semver": "7.7.1",
|
||||
"shiki": "^3.12.0",
|
||||
"strict-url-sanitise": "^0.0.1",
|
||||
"string-width": "^7.2.0",
|
||||
@ -371,9 +399,10 @@
|
||||
"tsx": "^4.20.3",
|
||||
"turndown-plugin-gfm": "^1.0.2",
|
||||
"tw-animate-css": "^1.3.8",
|
||||
"typescript": "~5.8.2",
|
||||
"typescript": "~5.8.3",
|
||||
"undici": "6.21.2",
|
||||
"unified": "^11.0.5",
|
||||
"unist-util-visit": "5.0.0",
|
||||
"uuid": "^13.0.0",
|
||||
"vite": "npm:rolldown-vite@7.3.0",
|
||||
"vitest": "^3.2.4",
|
||||
@ -388,46 +417,65 @@
|
||||
"zipread": "^1.3.3",
|
||||
"zod": "^4.1.5"
|
||||
},
|
||||
"resolutions": {
|
||||
"react": "^19.2.0",
|
||||
"react-dom": "^19.2.0",
|
||||
"@smithy/types": "4.7.1",
|
||||
"@codemirror/language": "6.11.3",
|
||||
"@codemirror/lint": "6.8.5",
|
||||
"@codemirror/view": "6.38.1",
|
||||
"@langchain/core@npm:^0.3.26": "patch:@langchain/core@npm%3A1.0.2#~/.yarn/patches/@langchain-core-npm-1.0.2-183ef83fe4.patch",
|
||||
"atomically@npm:^1.7.0": "patch:atomically@npm%3A1.7.0#~/.yarn/patches/atomically-npm-1.7.0-e742e5293b.patch",
|
||||
"esbuild": "^0.25.0",
|
||||
"file-stream-rotator@npm:^0.6.1": "patch:file-stream-rotator@npm%3A0.6.1#~/.yarn/patches/file-stream-rotator-npm-0.6.1-eab45fb13d.patch",
|
||||
"libsql@npm:^0.4.4": "patch:libsql@npm%3A0.4.7#~/.yarn/patches/libsql-npm-0.4.7-444e260fb1.patch",
|
||||
"node-abi": "4.24.0",
|
||||
"openai@npm:^4.77.0": "npm:@cherrystudio/openai@6.5.0",
|
||||
"openai@npm:^4.87.3": "npm:@cherrystudio/openai@6.5.0",
|
||||
"pdf-parse@npm:1.1.1": "patch:pdf-parse@npm%3A1.1.1#~/.yarn/patches/pdf-parse-npm-1.1.1-04a6109b2a.patch",
|
||||
"pkce-challenge@npm:^4.1.0": "patch:pkce-challenge@npm%3A4.1.0#~/.yarn/patches/pkce-challenge-npm-4.1.0-fbc51695a3.patch",
|
||||
"tar-fs": "^2.1.4",
|
||||
"undici": "6.21.2",
|
||||
"vite": "npm:rolldown-vite@7.3.0",
|
||||
"tesseract.js@npm:*": "patch:tesseract.js@npm%3A6.0.1#~/.yarn/patches/tesseract.js-npm-6.0.1-2562a7e46d.patch",
|
||||
"@ai-sdk/openai@npm:^2.0.52": "patch:@ai-sdk/openai@npm%3A2.0.52#~/.yarn/patches/@ai-sdk-openai-npm-2.0.52-b36d949c76.patch",
|
||||
"@img/sharp-darwin-arm64": "0.34.3",
|
||||
"@img/sharp-darwin-x64": "0.34.3",
|
||||
"@img/sharp-linux-arm": "0.34.3",
|
||||
"@img/sharp-linux-arm64": "0.34.3",
|
||||
"@img/sharp-linux-x64": "0.34.3",
|
||||
"@img/sharp-win32-x64": "0.34.3",
|
||||
"openai@npm:5.12.2": "npm:@cherrystudio/openai@6.5.0",
|
||||
"@langchain/openai@npm:>=0.1.0 <0.6.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||
"@langchain/openai@npm:^0.3.16": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||
"@langchain/openai@npm:>=0.2.0 <0.7.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||
"@ai-sdk/openai@npm:^2.0.42": "patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch",
|
||||
"@ai-sdk/google@npm:^2.0.40": "patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch",
|
||||
"@ai-sdk/openai-compatible@npm:^1.0.27": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch",
|
||||
"@ai-sdk/google@npm:2.0.49": "patch:@ai-sdk/google@npm%3A2.0.49#~/.yarn/patches/@ai-sdk-google-npm-2.0.49-84720f41bd.patch",
|
||||
"@ai-sdk/openai-compatible@npm:1.0.27": "patch:@ai-sdk/openai-compatible@npm%3A1.0.28#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.28-5705188855.patch",
|
||||
"@ai-sdk/openai-compatible@npm:^1.0.19": "patch:@ai-sdk/openai-compatible@npm%3A1.0.28#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.28-5705188855.patch"
|
||||
"pnpm": {
|
||||
"overrides": {
|
||||
"@smithy/types": "4.7.1",
|
||||
"@codemirror/language": "6.11.3",
|
||||
"@codemirror/lint": "6.8.5",
|
||||
"@codemirror/view": "6.38.1",
|
||||
"esbuild": "^0.25.0",
|
||||
"node-abi": "4.24.0",
|
||||
"openai": "npm:@cherrystudio/openai@6.15.0",
|
||||
"tar-fs": "^2.1.4",
|
||||
"undici": "6.21.2",
|
||||
"vite": "npm:rolldown-vite@7.3.0",
|
||||
"@img/sharp-darwin-arm64": "0.34.3",
|
||||
"@img/sharp-darwin-x64": "0.34.3",
|
||||
"@img/sharp-linux-arm": "0.34.3",
|
||||
"@img/sharp-linux-arm64": "0.34.3",
|
||||
"@img/sharp-linux-x64": "0.34.3",
|
||||
"@img/sharp-win32-x64": "0.34.3",
|
||||
"@langchain/core": "1.0.2",
|
||||
"@ai-sdk/openai-compatible@1.0.27": "1.0.28"
|
||||
},
|
||||
"patchedDependencies": {
|
||||
"@napi-rs/system-ocr@1.0.2": "patches/@napi-rs-system-ocr-npm-1.0.2-59e7a78e8b.patch",
|
||||
"tesseract.js@6.0.1": "patches/tesseract.js-npm-6.0.1-2562a7e46d.patch",
|
||||
"@ai-sdk/google@2.0.49": "patches/@ai-sdk-google-npm-2.0.49-84720f41bd.patch",
|
||||
"@ai-sdk/openai@2.0.85": "patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch",
|
||||
"@anthropic-ai/vertex-sdk@0.11.4": "patches/@anthropic-ai-vertex-sdk-npm-0.11.4-c19cb41edb.patch",
|
||||
"@google/genai@1.0.1": "patches/@google-genai-npm-1.0.1-e26f0f9af7.patch",
|
||||
"@langchain/core@1.0.2": "patches/@langchain-core-npm-1.0.2-183ef83fe4.patch",
|
||||
"@langchain/openai@1.0.0": "patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||
"@tiptap/extension-drag-handle@3.2.0": "patches/@tiptap-extension-drag-handle-npm-3.2.0-5a9ebff7c9.patch",
|
||||
"antd@5.27.0": "patches/antd-npm-5.27.0-aa91c36546.patch",
|
||||
"electron-updater@6.7.0": "patches/electron-updater-npm-6.7.0-47b11bb0d4.patch",
|
||||
"epub@1.3.0": "patches/epub-npm-1.3.0-8325494ffe.patch",
|
||||
"ollama-ai-provider-v2@1.5.5": "patches/ollama-ai-provider-v2-npm-1.5.5-8bef249af9.patch",
|
||||
"atomically@1.7.0": "patches/atomically-npm-1.7.0-e742e5293b.patch",
|
||||
"file-stream-rotator@0.6.1": "patches/file-stream-rotator-npm-0.6.1-eab45fb13d.patch",
|
||||
"libsql@0.4.7": "patches/libsql-npm-0.4.7-444e260fb1.patch",
|
||||
"pdf-parse@1.1.1": "patches/pdf-parse-npm-1.1.1-04a6109b2a.patch",
|
||||
"@ai-sdk/openai-compatible@1.0.28": "patches/@ai-sdk-openai-compatible-npm-1.0.28-5705188855.patch"
|
||||
},
|
||||
"onlyBuiltDependencies": [
|
||||
"@kangfenmao/keyv-storage",
|
||||
"@paymoapp/electron-shutdown-handler",
|
||||
"@scarf/scarf",
|
||||
"@swc/core",
|
||||
"electron",
|
||||
"electron-winstaller",
|
||||
"esbuild",
|
||||
"msw",
|
||||
"protobufjs",
|
||||
"registry-js",
|
||||
"selection-hook",
|
||||
"sharp",
|
||||
"tesseract.js",
|
||||
"zipfile"
|
||||
]
|
||||
},
|
||||
"packageManager": "yarn@4.9.1",
|
||||
"packageManager": "pnpm@10.27.0",
|
||||
"lint-staged": {
|
||||
"*.{js,jsx,ts,tsx,cjs,mjs,cts,mts}": [
|
||||
"biome format --write --no-errors-on-unmatched",
|
||||
|
||||
@ -8,7 +8,7 @@ It exposes the CherryIN OpenAI-compatible entrypoints and dynamically routes Ant
|
||||
```bash
|
||||
npm install ai @cherrystudio/ai-sdk-provider @ai-sdk/anthropic @ai-sdk/google @ai-sdk/openai
|
||||
# or
|
||||
yarn add ai @cherrystudio/ai-sdk-provider @ai-sdk/anthropic @ai-sdk/google @ai-sdk/openai
|
||||
pnpm add ai @cherrystudio/ai-sdk-provider @ai-sdk/anthropic @ai-sdk/google @ai-sdk/openai
|
||||
```
|
||||
|
||||
> **Note**: This package requires peer dependencies `ai`, `@ai-sdk/anthropic`, `@ai-sdk/google`, and `@ai-sdk/openai` to be installed.
|
||||
|
||||
@ -41,7 +41,7 @@
|
||||
"ai": "^5.0.26"
|
||||
},
|
||||
"dependencies": {
|
||||
"@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.28#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.28-5705188855.patch",
|
||||
"@ai-sdk/openai-compatible": "1.0.28",
|
||||
"@ai-sdk/provider": "^2.0.0",
|
||||
"@ai-sdk/provider-utils": "^3.0.17"
|
||||
},
|
||||
|
||||
@ -42,7 +42,7 @@
|
||||
"@ai-sdk/anthropic": "^2.0.49",
|
||||
"@ai-sdk/azure": "^2.0.87",
|
||||
"@ai-sdk/deepseek": "^1.0.31",
|
||||
"@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.28#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.28-5705188855.patch",
|
||||
"@ai-sdk/openai-compatible": "1.0.28",
|
||||
"@ai-sdk/provider": "^2.0.0",
|
||||
"@ai-sdk/provider-utils": "^3.0.17",
|
||||
"@ai-sdk/xai": "^2.0.36",
|
||||
|
||||
@ -21,9 +21,6 @@ const TOOL_USE_TAG_CONFIG: TagConfig = {
|
||||
separator: '\n'
|
||||
}
|
||||
|
||||
/**
|
||||
* 默认系统提示符模板
|
||||
*/
|
||||
export const DEFAULT_SYSTEM_PROMPT = `In this environment you have access to a set of tools you can use to answer the user's question. \
|
||||
You can use one or more tools per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
|
||||
|
||||
@ -38,10 +35,16 @@ Tool use is formatted using XML-style tags. The tool name is enclosed in opening
|
||||
|
||||
The tool name should be the exact name of the tool you are using, and the arguments should be a JSON object containing the parameters required by that tool. For example:
|
||||
<tool_use>
|
||||
<name>python_interpreter</name>
|
||||
<arguments>{"code": "5 + 3 + 1294.678"}</arguments>
|
||||
<name>search</name>
|
||||
<arguments>{ "query": "browser,fetch" }</arguments>
|
||||
</tool_use>
|
||||
|
||||
<tool_use>
|
||||
<name>exec</name>
|
||||
<arguments>{ "code": "const page = await CherryBrowser_fetch({ url: "https://example.com" })\nreturn page" }</arguments>
|
||||
</tool_use>
|
||||
|
||||
|
||||
The user will respond with the result of the tool use, which should be formatted as follows:
|
||||
|
||||
<tool_use_result>
|
||||
@ -59,13 +62,6 @@ For example, if the result of the tool use is an image file, you can use it in t
|
||||
|
||||
Always adhere to this format for the tool use to ensure proper parsing and execution.
|
||||
|
||||
## Tool Use Examples
|
||||
{{ TOOL_USE_EXAMPLES }}
|
||||
|
||||
## Tool Use Available Tools
|
||||
Above example were using notional tools that might not exist for you. You only have access to these tools:
|
||||
{{ AVAILABLE_TOOLS }}
|
||||
|
||||
## Tool Use Rules
|
||||
Here are the rules you should always follow to solve your task:
|
||||
1. Always use the right arguments for the tools. Never use variable names as the action arguments, use the value instead.
|
||||
@ -74,6 +70,8 @@ Here are the rules you should always follow to solve your task:
|
||||
4. Never re-do a tool call that you previously did with the exact same parameters.
|
||||
5. For tool use, MAKE SURE use XML tag format as shown in the examples above. Do not use any other format.
|
||||
|
||||
{{ TOOLS_INFO }}
|
||||
|
||||
## Response rules
|
||||
|
||||
Respond in the language of the user's query, unless the user instructions specify additional requirements for the language to be used.
|
||||
@ -154,7 +152,8 @@ User: <tool_use_result>
|
||||
<name>search</name>
|
||||
<result>26 million (2019)</result>
|
||||
</tool_use_result>
|
||||
Assistant: The population of Shanghai is 26 million, while Guangzhou has a population of 15 million. Therefore, Shanghai has the highest population.`
|
||||
|
||||
A: The population of Shanghai is 26 million, while Guangzhou has a population of 15 million. Therefore, Shanghai has the highest population.`
|
||||
|
||||
/**
|
||||
* 构建可用工具部分(提取自 Cherry Studio)
|
||||
@ -184,13 +183,30 @@ ${result}
|
||||
/**
|
||||
* 默认的系统提示符构建函数(提取自 Cherry Studio)
|
||||
*/
|
||||
function defaultBuildSystemPrompt(userSystemPrompt: string, tools: ToolSet): string {
|
||||
function defaultBuildSystemPrompt(userSystemPrompt: string, tools: ToolSet, mcpMode?: string): string {
|
||||
const availableTools = buildAvailableTools(tools)
|
||||
if (availableTools === null) return userSystemPrompt
|
||||
|
||||
const fullPrompt = DEFAULT_SYSTEM_PROMPT.replace('{{ TOOL_USE_EXAMPLES }}', DEFAULT_TOOL_USE_EXAMPLES)
|
||||
if (mcpMode == 'auto') {
|
||||
return DEFAULT_SYSTEM_PROMPT.replace('{{ TOOLS_INFO }}', '').replace(
|
||||
'{{ USER_SYSTEM_PROMPT }}',
|
||||
userSystemPrompt || ''
|
||||
)
|
||||
}
|
||||
const toolsInfo = `
|
||||
## Tool Use Examples
|
||||
{{ TOOL_USE_EXAMPLES }}
|
||||
|
||||
## Tool Use Available Tools
|
||||
Above example were using notional tools that might not exist for you. You only have access to these tools:
|
||||
{{ AVAILABLE_TOOLS }}`
|
||||
.replace('{{ TOOL_USE_EXAMPLES }}', DEFAULT_TOOL_USE_EXAMPLES)
|
||||
.replace('{{ AVAILABLE_TOOLS }}', availableTools)
|
||||
.replace('{{ USER_SYSTEM_PROMPT }}', userSystemPrompt || '')
|
||||
|
||||
const fullPrompt = DEFAULT_SYSTEM_PROMPT.replace('{{ TOOLS_INFO }}', toolsInfo).replace(
|
||||
'{{ USER_SYSTEM_PROMPT }}',
|
||||
userSystemPrompt || ''
|
||||
)
|
||||
|
||||
return fullPrompt
|
||||
}
|
||||
@ -223,7 +239,17 @@ function defaultParseToolUse(content: string, tools: ToolSet): { results: ToolUs
|
||||
// Find all tool use blocks
|
||||
while ((match = toolUsePattern.exec(contentToProcess)) !== null) {
|
||||
const fullMatch = match[0]
|
||||
const toolName = match[2].trim()
|
||||
let toolName = match[2].trim()
|
||||
switch (toolName.toLowerCase()) {
|
||||
case 'search':
|
||||
toolName = 'mcp__CherryHub__search'
|
||||
break
|
||||
case 'exec':
|
||||
toolName = 'mcp__CherryHub__exec'
|
||||
break
|
||||
default:
|
||||
break
|
||||
}
|
||||
const toolArgs = match[4].trim()
|
||||
|
||||
// Try to parse the arguments as JSON
|
||||
@ -255,7 +281,12 @@ function defaultParseToolUse(content: string, tools: ToolSet): { results: ToolUs
|
||||
}
|
||||
|
||||
export const createPromptToolUsePlugin = (config: PromptToolUseConfig = {}) => {
|
||||
const { enabled = true, buildSystemPrompt = defaultBuildSystemPrompt, parseToolUse = defaultParseToolUse } = config
|
||||
const {
|
||||
enabled = true,
|
||||
buildSystemPrompt = defaultBuildSystemPrompt,
|
||||
parseToolUse = defaultParseToolUse,
|
||||
mcpMode
|
||||
} = config
|
||||
|
||||
return definePlugin({
|
||||
name: 'built-in:prompt-tool-use',
|
||||
@ -285,7 +316,7 @@ export const createPromptToolUsePlugin = (config: PromptToolUseConfig = {}) => {
|
||||
|
||||
// 构建系统提示符(只包含非 provider-defined 工具)
|
||||
const userSystemPrompt = typeof params.system === 'string' ? params.system : ''
|
||||
const systemPrompt = buildSystemPrompt(userSystemPrompt, promptTools)
|
||||
const systemPrompt = buildSystemPrompt(userSystemPrompt, promptTools, mcpMode)
|
||||
let systemMessage: string | null = systemPrompt
|
||||
if (config.createSystemMessage) {
|
||||
// 🎯 如果用户提供了自定义处理函数,使用它
|
||||
|
||||
@ -23,6 +23,7 @@ export interface PromptToolUseConfig extends BaseToolUsePluginConfig {
|
||||
// 自定义工具解析函数(可选,有默认实现)
|
||||
parseToolUse?: (content: string, tools: ToolSet) => { results: ToolUseResult[]; content: string }
|
||||
createSystemMessage?: (systemPrompt: string, originalParams: any, context: AiRequestContext) => string | null
|
||||
mcpMode?: string
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@ -68,8 +68,8 @@
|
||||
],
|
||||
"devDependencies": {
|
||||
"@biomejs/biome": "2.2.4",
|
||||
"@tiptap/core": "^3.2.0",
|
||||
"@tiptap/pm": "^3.2.0",
|
||||
"@tiptap/core": "3.2.0",
|
||||
"@tiptap/pm": "3.2.0",
|
||||
"eslint": "^9.22.0",
|
||||
"eslint-plugin-react-hooks": "^5.2.0",
|
||||
"eslint-plugin-simple-import-sort": "^12.1.1",
|
||||
@ -89,5 +89,5 @@
|
||||
"build": "tsdown",
|
||||
"lint": "biome format ./src/ --write && eslint --fix ./src/"
|
||||
},
|
||||
"packageManager": "yarn@4.9.1"
|
||||
"packageManager": "pnpm@10.27.0"
|
||||
}
|
||||
|
||||
138
packages/shared/__tests__/utils.test.ts
Normal file
138
packages/shared/__tests__/utils.test.ts
Normal file
@ -0,0 +1,138 @@
|
||||
import { describe, expect, it } from 'vitest'
|
||||
|
||||
import { isBase64ImageDataUrl, isDataUrl, parseDataUrl } from '../utils'
|
||||
|
||||
describe('parseDataUrl', () => {
|
||||
it('parses a standard base64 image data URL', () => {
|
||||
const result = parseDataUrl('data:image/png;base64,iVBORw0KGgo=')
|
||||
expect(result).toEqual({
|
||||
mediaType: 'image/png',
|
||||
isBase64: true,
|
||||
data: 'iVBORw0KGgo='
|
||||
})
|
||||
})
|
||||
|
||||
it('parses a base64 data URL with additional parameters', () => {
|
||||
const result = parseDataUrl('data:image/jpeg;name=foo;base64,/9j/4AAQ')
|
||||
expect(result).toEqual({
|
||||
mediaType: 'image/jpeg',
|
||||
isBase64: true,
|
||||
data: '/9j/4AAQ'
|
||||
})
|
||||
})
|
||||
|
||||
it('parses a plain text data URL (non-base64)', () => {
|
||||
const result = parseDataUrl('data:text/plain,Hello%20World')
|
||||
expect(result).toEqual({
|
||||
mediaType: 'text/plain',
|
||||
isBase64: false,
|
||||
data: 'Hello%20World'
|
||||
})
|
||||
})
|
||||
|
||||
it('parses a data URL with empty media type', () => {
|
||||
const result = parseDataUrl('data:;base64,SGVsbG8=')
|
||||
expect(result).toEqual({
|
||||
mediaType: undefined,
|
||||
isBase64: true,
|
||||
data: 'SGVsbG8='
|
||||
})
|
||||
})
|
||||
|
||||
it('returns null for non-data URLs', () => {
|
||||
const result = parseDataUrl('https://example.com/image.png')
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
it('returns null for malformed data URL without comma', () => {
|
||||
const result = parseDataUrl('data:image/png;base64')
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
it('handles empty string', () => {
|
||||
const result = parseDataUrl('')
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
it('handles large base64 data without performance issues', () => {
|
||||
// Simulate a 4K image base64 string (about 1MB)
|
||||
const largeData = 'A'.repeat(1024 * 1024)
|
||||
const dataUrl = `data:image/png;base64,${largeData}`
|
||||
|
||||
const start = performance.now()
|
||||
const result = parseDataUrl(dataUrl)
|
||||
const duration = performance.now() - start
|
||||
|
||||
expect(result).not.toBeNull()
|
||||
expect(result?.mediaType).toBe('image/png')
|
||||
expect(result?.isBase64).toBe(true)
|
||||
expect(result?.data).toBe(largeData)
|
||||
// Should complete in under 10ms (string operations are fast)
|
||||
expect(duration).toBeLessThan(10)
|
||||
})
|
||||
|
||||
it('parses SVG data URL', () => {
|
||||
const result = parseDataUrl('data:image/svg+xml;base64,PHN2Zz4=')
|
||||
expect(result).toEqual({
|
||||
mediaType: 'image/svg+xml',
|
||||
isBase64: true,
|
||||
data: 'PHN2Zz4='
|
||||
})
|
||||
})
|
||||
|
||||
it('parses JSON data URL', () => {
|
||||
const result = parseDataUrl('data:application/json,{"key":"value"}')
|
||||
expect(result).toEqual({
|
||||
mediaType: 'application/json',
|
||||
isBase64: false,
|
||||
data: '{"key":"value"}'
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('isDataUrl', () => {
|
||||
it('returns true for valid data URLs', () => {
|
||||
expect(isDataUrl('data:image/png;base64,ABC')).toBe(true)
|
||||
expect(isDataUrl('data:text/plain,hello')).toBe(true)
|
||||
expect(isDataUrl('data:,simple')).toBe(true)
|
||||
})
|
||||
|
||||
it('returns false for non-data URLs', () => {
|
||||
expect(isDataUrl('https://example.com')).toBe(false)
|
||||
expect(isDataUrl('file:///path/to/file')).toBe(false)
|
||||
expect(isDataUrl('')).toBe(false)
|
||||
})
|
||||
|
||||
it('returns false for malformed data URLs', () => {
|
||||
expect(isDataUrl('data:')).toBe(false)
|
||||
expect(isDataUrl('data:image/png')).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('isBase64ImageDataUrl', () => {
|
||||
it('returns true for base64 image data URLs', () => {
|
||||
expect(isBase64ImageDataUrl('data:image/png;base64,ABC')).toBe(true)
|
||||
expect(isBase64ImageDataUrl('data:image/jpeg;base64,/9j/')).toBe(true)
|
||||
expect(isBase64ImageDataUrl('data:image/gif;base64,R0lG')).toBe(true)
|
||||
expect(isBase64ImageDataUrl('data:image/webp;base64,UklG')).toBe(true)
|
||||
})
|
||||
|
||||
it('returns false for non-base64 image data URLs', () => {
|
||||
expect(isBase64ImageDataUrl('data:image/svg+xml,<svg></svg>')).toBe(false)
|
||||
})
|
||||
|
||||
it('returns false for non-image data URLs', () => {
|
||||
expect(isBase64ImageDataUrl('data:text/plain;base64,SGVsbG8=')).toBe(false)
|
||||
expect(isBase64ImageDataUrl('data:application/json,{}')).toBe(false)
|
||||
})
|
||||
|
||||
it('returns false for regular URLs', () => {
|
||||
expect(isBase64ImageDataUrl('https://example.com/image.png')).toBe(false)
|
||||
expect(isBase64ImageDataUrl('file:///image.png')).toBe(false)
|
||||
})
|
||||
|
||||
it('returns false for malformed data URLs', () => {
|
||||
expect(isBase64ImageDataUrl('data:image/png')).toBe(false)
|
||||
expect(isBase64ImageDataUrl('')).toBe(false)
|
||||
})
|
||||
})
|
||||
@ -4,7 +4,7 @@
|
||||
*
|
||||
* ⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️
|
||||
* THIS FILE IS AUTOMATICALLY GENERATED BY A SCRIPT. DO NOT EDIT IT MANUALLY!
|
||||
* Run `yarn update:languages` to update this file.
|
||||
* Run `pnpm update:languages` to update this file.
|
||||
* ⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️
|
||||
*
|
||||
*/
|
||||
|
||||
4
packages/shared/data/cache/cacheSchemas.ts
vendored
4
packages/shared/data/cache/cacheSchemas.ts
vendored
@ -225,11 +225,11 @@ export const DefaultSharedCache: SharedCacheSchema = {
|
||||
* This ensures type safety and prevents key conflicts
|
||||
*/
|
||||
export type RendererPersistCacheSchema = {
|
||||
'example_scope.example_key': string
|
||||
'ui.tab.state': CacheValueTypes.TabsState
|
||||
}
|
||||
|
||||
export const DefaultRendererPersistCache: RendererPersistCacheSchema = {
|
||||
'example_scope.example_key': 'example default value'
|
||||
'ui.tab.state': { tabs: [], activeTabId: '' }
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
|
||||
35
packages/shared/data/cache/cacheValueTypes.ts
vendored
35
packages/shared/data/cache/cacheValueTypes.ts
vendored
@ -17,3 +17,38 @@ export type CacheActiveSearches = Record<string, WebSearchStatus>
|
||||
// The actual type checking will be done at runtime by the cache system
|
||||
export type CacheMinAppType = MinAppType
|
||||
export type CacheTopic = Topic
|
||||
|
||||
/**
|
||||
* Tab type for browser-like tabs
|
||||
*
|
||||
* - 'route': Internal app routes rendered via MemoryRouter
|
||||
* - 'webview': External web content rendered via Electron webview
|
||||
*/
|
||||
export type TabType = 'route' | 'webview'
|
||||
|
||||
/**
|
||||
* Tab saved state for hibernation recovery
|
||||
*/
|
||||
export interface TabSavedState {
|
||||
scrollPosition?: number
|
||||
// 其他必要草稿字段可在此扩展
|
||||
}
|
||||
|
||||
export interface Tab {
|
||||
id: string
|
||||
type: TabType
|
||||
url: string
|
||||
title: string
|
||||
icon?: string
|
||||
metadata?: Record<string, unknown>
|
||||
// LRU 字段
|
||||
lastAccessTime?: number // open/switch 时更新
|
||||
isDormant?: boolean // 是否已休眠
|
||||
isPinned?: boolean // 是否置顶(豁免 LRU)
|
||||
savedState?: TabSavedState // 休眠前保存的状态
|
||||
}
|
||||
|
||||
export interface TabsState {
|
||||
tabs: Tab[]
|
||||
activeTabId: string
|
||||
}
|
||||
|
||||
116
packages/shared/mcp.ts
Normal file
116
packages/shared/mcp.ts
Normal file
@ -0,0 +1,116 @@
|
||||
/**
|
||||
* Convert a string to camelCase, ensuring it's a valid JavaScript identifier.
|
||||
*
|
||||
* - Normalizes to lowercase first, then capitalizes word boundaries
|
||||
* - Non-alphanumeric characters are treated as word separators
|
||||
* - Non-ASCII characters are dropped (ASCII-only output)
|
||||
* - If result starts with a digit, prefixes with underscore
|
||||
*
|
||||
* @example
|
||||
* toCamelCase('my-server') // 'myServer'
|
||||
* toCamelCase('MY_SERVER') // 'myServer'
|
||||
* toCamelCase('123tool') // '_123tool'
|
||||
*/
|
||||
export function toCamelCase(str: string): string {
|
||||
let result = str
|
||||
.trim()
|
||||
.toLowerCase()
|
||||
.replace(/[^a-z0-9]+(.)/g, (_, char) => char.toUpperCase())
|
||||
.replace(/[^a-zA-Z0-9]/g, '')
|
||||
|
||||
if (result && !/^[a-zA-Z_]/.test(result)) {
|
||||
result = '_' + result
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
export type McpToolNameOptions = {
|
||||
/** Prefix added before the name (e.g., 'mcp__'). Must be JS-identifier-safe. */
|
||||
prefix?: string
|
||||
/** Delimiter between server and tool parts (e.g., '_' or '__'). Must be JS-identifier-safe. */
|
||||
delimiter?: string
|
||||
/** Maximum length of the final name. Suffix numbers for uniqueness are included in this limit. */
|
||||
maxLength?: number
|
||||
/** Mutable Set for collision detection. The final name will be added to this Set. */
|
||||
existingNames?: Set<string>
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a valid JavaScript function name from server and tool names.
|
||||
* Uses camelCase for both parts.
|
||||
*
|
||||
* @param serverName - The MCP server name (optional)
|
||||
* @param toolName - The tool name
|
||||
* @param options - Configuration options
|
||||
* @returns A valid JS identifier
|
||||
*/
|
||||
export function buildMcpToolName(
|
||||
serverName: string | undefined,
|
||||
toolName: string,
|
||||
options: McpToolNameOptions = {}
|
||||
): string {
|
||||
const { prefix = '', delimiter = '_', maxLength, existingNames } = options
|
||||
|
||||
const serverPart = serverName ? toCamelCase(serverName) : ''
|
||||
const toolPart = toCamelCase(toolName)
|
||||
const baseName = serverPart ? `${prefix}${serverPart}${delimiter}${toolPart}` : `${prefix}${toolPart}`
|
||||
|
||||
if (!existingNames) {
|
||||
return maxLength ? truncateToLength(baseName, maxLength) : baseName
|
||||
}
|
||||
|
||||
let name = maxLength ? truncateToLength(baseName, maxLength) : baseName
|
||||
let counter = 1
|
||||
|
||||
while (existingNames.has(name)) {
|
||||
const suffix = String(counter)
|
||||
const truncatedBase = maxLength ? truncateToLength(baseName, maxLength - suffix.length) : baseName
|
||||
name = `${truncatedBase}${suffix}`
|
||||
counter++
|
||||
}
|
||||
|
||||
existingNames.add(name)
|
||||
return name
|
||||
}
|
||||
|
||||
function truncateToLength(str: string, maxLength: number): string {
|
||||
if (str.length <= maxLength) {
|
||||
return str
|
||||
}
|
||||
return str.slice(0, maxLength).replace(/_+$/, '')
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a unique function name from server name and tool name.
|
||||
* Format: serverName_toolName (camelCase)
|
||||
*
|
||||
* @example
|
||||
* generateMcpToolFunctionName('github', 'search_issues') // 'github_searchIssues'
|
||||
*/
|
||||
export function generateMcpToolFunctionName(
|
||||
serverName: string | undefined,
|
||||
toolName: string,
|
||||
existingNames?: Set<string>
|
||||
): string {
|
||||
return buildMcpToolName(serverName, toolName, { existingNames })
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds a valid JavaScript function name for MCP tool calls.
|
||||
* Format: mcp__{serverName}__{toolName}
|
||||
*
|
||||
* @param serverName - The MCP server name
|
||||
* @param toolName - The tool name from the server
|
||||
* @returns A valid JS identifier in format mcp__{server}__{tool}, max 63 chars
|
||||
*
|
||||
* @example
|
||||
* buildFunctionCallToolName('github', 'search_issues') // 'mcp__github__searchIssues'
|
||||
*/
|
||||
export function buildFunctionCallToolName(serverName: string, toolName: string): string {
|
||||
return buildMcpToolName(serverName, toolName, {
|
||||
prefix: 'mcp__',
|
||||
delimiter: '__',
|
||||
maxLength: 63
|
||||
})
|
||||
}
|
||||
@ -88,3 +88,81 @@ const TRAILING_VERSION_REGEX = /\/v\d+(?:alpha|beta)?\/?$/i
|
||||
export function withoutTrailingApiVersion(url: string): string {
|
||||
return url.replace(TRAILING_VERSION_REGEX, '')
|
||||
}
|
||||
|
||||
export interface DataUrlParts {
|
||||
/** The media type (e.g., 'image/png', 'text/plain') */
|
||||
mediaType?: string
|
||||
/** Whether the data is base64 encoded */
|
||||
isBase64: boolean
|
||||
/** The data portion (everything after the comma). This is the raw string, not decoded. */
|
||||
data: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a data URL into its component parts without using regex on the data portion.
|
||||
* This is memory-safe for large data URLs (e.g., 4K images) as it uses indexOf instead of regex.
|
||||
*
|
||||
* Data URL format: data:[<mediatype>][;base64],<data>
|
||||
*
|
||||
* @param url - The data URL string to parse
|
||||
* @returns DataUrlParts if valid, null if invalid
|
||||
*
|
||||
* @example
|
||||
* parseDataUrl('data:image/png;base64,iVBORw0KGgo...')
|
||||
* // { mediaType: 'image/png', isBase64: true, data: 'iVBORw0KGgo...' }
|
||||
*
|
||||
* parseDataUrl('data:text/plain,Hello')
|
||||
* // { mediaType: 'text/plain', isBase64: false, data: 'Hello' }
|
||||
*
|
||||
* parseDataUrl('invalid-url')
|
||||
* // null
|
||||
*/
|
||||
export function parseDataUrl(url: string): DataUrlParts | null {
|
||||
if (!url.startsWith('data:')) {
|
||||
return null
|
||||
}
|
||||
|
||||
const commaIndex = url.indexOf(',')
|
||||
if (commaIndex === -1) {
|
||||
return null
|
||||
}
|
||||
|
||||
const header = url.slice(5, commaIndex)
|
||||
|
||||
const isBase64 = header.includes(';base64')
|
||||
|
||||
const semicolonIndex = header.indexOf(';')
|
||||
const mediaType = (semicolonIndex === -1 ? header : header.slice(0, semicolonIndex)).trim() || undefined
|
||||
|
||||
const data = url.slice(commaIndex + 1)
|
||||
|
||||
return { mediaType, isBase64, data }
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a string is a data URL.
|
||||
*
|
||||
* @param url - The string to check
|
||||
* @returns true if the string is a valid data URL
|
||||
*/
|
||||
export function isDataUrl(url: string): boolean {
|
||||
return url.startsWith('data:') && url.includes(',')
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a data URL contains base64-encoded image data.
|
||||
*
|
||||
* @param url - The data URL to check
|
||||
* @returns true if the URL is a base64-encoded image data URL
|
||||
*/
|
||||
export function isBase64ImageDataUrl(url: string): boolean {
|
||||
if (!url.startsWith('data:image/')) {
|
||||
return false
|
||||
}
|
||||
const commaIndex = url.indexOf(',')
|
||||
if (commaIndex === -1) {
|
||||
return false
|
||||
}
|
||||
const header = url.slice(5, commaIndex)
|
||||
return header.includes(';base64')
|
||||
}
|
||||
|
||||
@ -49,6 +49,7 @@
|
||||
"@dnd-kit/modifiers": "^9.0.0",
|
||||
"@dnd-kit/sortable": "^10.0.0",
|
||||
"@dnd-kit/utilities": "^3.2.2",
|
||||
"@radix-ui/primitive": "^1.1.3",
|
||||
"@radix-ui/react-checkbox": "^1.3.3",
|
||||
"@radix-ui/react-dialog": "^1.1.15",
|
||||
"@radix-ui/react-popover": "^1.1.15",
|
||||
@ -57,6 +58,7 @@
|
||||
"@radix-ui/react-slot": "^1.2.4",
|
||||
"@radix-ui/react-tabs": "^1.1.13",
|
||||
"@radix-ui/react-tooltip": "^1.2.8",
|
||||
"@radix-ui/react-use-callback-ref": "^1.1.1",
|
||||
"@radix-ui/react-use-controllable-state": "^1.2.2",
|
||||
"class-variance-authority": "^0.7.1",
|
||||
"clsx": "^2.1.1",
|
||||
@ -70,6 +72,7 @@
|
||||
"@heroui/react": "^2.8.4",
|
||||
"@storybook/addon-docs": "^10.0.5",
|
||||
"@storybook/addon-themes": "^10.0.5",
|
||||
"@storybook/react": "^10.1.11",
|
||||
"@storybook/react-vite": "^10.0.5",
|
||||
"@svgr/core": "^8.1.0",
|
||||
"@svgr/plugin-jsx": "^8.1.0",
|
||||
@ -94,11 +97,6 @@
|
||||
"typescript": "^5.6.2",
|
||||
"vitest": "^3.2.4"
|
||||
},
|
||||
"resolutions": {
|
||||
"@codemirror/language": "6.11.3",
|
||||
"@codemirror/lint": "6.8.5",
|
||||
"@codemirror/view": "6.38.1"
|
||||
},
|
||||
"sideEffects": false,
|
||||
"engines": {
|
||||
"node": ">=18.0.0"
|
||||
@ -147,6 +145,5 @@
|
||||
"./styles/tokens.css": "./src/styles/tokens.css",
|
||||
"./styles/theme.css": "./src/styles/theme.css",
|
||||
"./styles/index.css": "./src/styles/index.css"
|
||||
},
|
||||
"packageManager": "yarn@4.9.1"
|
||||
}
|
||||
}
|
||||
|
||||
@ -9,7 +9,7 @@ index 48e2f6263c6ee4c75d7e5c28733e64f6ebe92200..00d0729c4a3cbf9a48e8e1e962c7e2b2
|
||||
+ sendReasoning: z.ZodOptional<z.ZodBoolean>;
|
||||
}, z.core.$strip>;
|
||||
type OpenAICompatibleProviderOptions = z.infer<typeof openaiCompatibleProviderOptions>;
|
||||
|
||||
|
||||
diff --git a/dist/index.js b/dist/index.js
|
||||
index da237bb35b7fa8e24b37cd861ee73dfc51cdfc72..b3060fbaf010e30b64df55302807828e5bfe0f9a 100644
|
||||
--- a/dist/index.js
|
||||
@ -48,7 +48,7 @@ index da237bb35b7fa8e24b37cd861ee73dfc51cdfc72..b3060fbaf010e30b64df55302807828e
|
||||
messages.push({
|
||||
role: "assistant",
|
||||
content: text,
|
||||
+ reasoning_content: reasoning_text ?? undefined,
|
||||
+ reasoning_content: reasoning_text || undefined,
|
||||
tool_calls: toolCalls.length > 0 ? toolCalls : void 0,
|
||||
...metadata
|
||||
});
|
||||
@ -60,7 +60,7 @@ index da237bb35b7fa8e24b37cd861ee73dfc51cdfc72..b3060fbaf010e30b64df55302807828e
|
||||
+ textVerbosity: import_v4.z.string().optional(),
|
||||
+ sendReasoning: import_v4.z.boolean().optional()
|
||||
});
|
||||
|
||||
|
||||
// src/openai-compatible-error.ts
|
||||
@@ -378,7 +387,7 @@ var OpenAICompatibleChatLanguageModel = class {
|
||||
reasoning_effort: compatibleOptions.reasoningEffort,
|
||||
@ -175,7 +175,7 @@ index a809a7aa0e148bfd43e01dd7b018568b151c8ad5..565b605eeacd9830b2b0e817e58ad0c5
|
||||
messages.push({
|
||||
role: "assistant",
|
||||
content: text,
|
||||
+ reasoning_content: reasoning_text ?? undefined,
|
||||
+ reasoning_content: reasoning_text || undefined,
|
||||
tool_calls: toolCalls.length > 0 ? toolCalls : void 0,
|
||||
...metadata
|
||||
});
|
||||
@ -187,7 +187,7 @@ index a809a7aa0e148bfd43e01dd7b018568b151c8ad5..565b605eeacd9830b2b0e817e58ad0c5
|
||||
+ textVerbosity: z.string().optional(),
|
||||
+ sendReasoning: z.boolean().optional()
|
||||
});
|
||||
|
||||
|
||||
// src/openai-compatible-error.ts
|
||||
@@ -362,7 +371,7 @@ var OpenAICompatibleChatLanguageModel = class {
|
||||
reasoning_effort: compatibleOptions.reasoningEffort,
|
||||
30533
pnpm-lock.yaml
Normal file
30533
pnpm-lock.yaml
Normal file
File diff suppressed because it is too large
Load Diff
2
pnpm-workspace.yaml
Normal file
2
pnpm-workspace.yaml
Normal file
@ -0,0 +1,2 @@
|
||||
packages:
|
||||
- 'packages/*'
|
||||
@ -50,7 +50,7 @@ Usage Instructions:
|
||||
- pt-pt (Portuguese)
|
||||
|
||||
Run Command:
|
||||
yarn i18n:translate
|
||||
pnpm i18n:translate
|
||||
|
||||
Performance Optimization Recommendations:
|
||||
- For stable API services: MAX_CONCURRENT_TRANSLATIONS=8, TRANSLATION_DELAY_MS=50
|
||||
|
||||
@ -2,14 +2,14 @@ const { Arch } = require('electron-builder')
|
||||
const { downloadNpmPackage } = require('./utils')
|
||||
|
||||
// if you want to add new prebuild binaries packages with different architectures, you can add them here
|
||||
// please add to allX64 and allArm64 from yarn.lock
|
||||
// please add to allX64 and allArm64 from pnpm-lock.yaml
|
||||
const allArm64 = {
|
||||
'@img/sharp-darwin-arm64': '0.34.3',
|
||||
'@img/sharp-win32-arm64': '0.34.3',
|
||||
'@img/sharp-linux-arm64': '0.34.3',
|
||||
|
||||
'@img/sharp-libvips-darwin-arm64': '1.2.0',
|
||||
'@img/sharp-libvips-linux-arm64': '1.2.0',
|
||||
'@img/sharp-libvips-darwin-arm64': '1.2.4',
|
||||
'@img/sharp-libvips-linux-arm64': '1.2.4',
|
||||
|
||||
'@libsql/darwin-arm64': '0.4.7',
|
||||
'@libsql/linux-arm64-gnu': '0.4.7',
|
||||
@ -24,8 +24,8 @@ const allX64 = {
|
||||
'@img/sharp-linux-x64': '0.34.3',
|
||||
'@img/sharp-win32-x64': '0.34.3',
|
||||
|
||||
'@img/sharp-libvips-darwin-x64': '1.2.0',
|
||||
'@img/sharp-libvips-linux-x64': '1.2.0',
|
||||
'@img/sharp-libvips-darwin-x64': '1.2.4',
|
||||
'@img/sharp-libvips-linux-x64': '1.2.4',
|
||||
|
||||
'@libsql/darwin-x64': '0.4.7',
|
||||
'@libsql/linux-x64-gnu': '0.4.7',
|
||||
|
||||
@ -145,7 +145,7 @@ export function main() {
|
||||
console.log('i18n 检查已通过')
|
||||
} catch (e) {
|
||||
console.error(e)
|
||||
throw new Error(`检查未通过。尝试运行 yarn i18n:sync 以解决问题。`)
|
||||
throw new Error(`检查未通过。尝试运行 pnpm i18n:sync 以解决问题。`)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -57,7 +57,7 @@ function generateLanguagesFileContent(languages: Record<string, LanguageData>):
|
||||
*
|
||||
* ⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️
|
||||
* THIS FILE IS AUTOMATICALLY GENERATED BY A SCRIPT. DO NOT EDIT IT MANUALLY!
|
||||
* Run \`yarn update:languages\` to update this file.
|
||||
* Run \`pnpm update:languages\` to update this file.
|
||||
* ⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️
|
||||
*
|
||||
*/
|
||||
@ -81,7 +81,7 @@ export const languages: Record<string, LanguageData> = ${languagesObjectString};
|
||||
async function format(filePath: string): Promise<void> {
|
||||
console.log('🎨 Formatting file with Biome...')
|
||||
try {
|
||||
await execAsync(`yarn biome format --write ${filePath}`)
|
||||
await execAsync(`pnpm biome format --write ${filePath}`)
|
||||
console.log('✅ Biome formatting complete.')
|
||||
} catch (e: any) {
|
||||
console.error('❌ Biome formatting failed:', e.stdout || e.stderr)
|
||||
@ -96,7 +96,7 @@ async function format(filePath: string): Promise<void> {
|
||||
async function checkTypeScript(filePath: string): Promise<void> {
|
||||
console.log('🧐 Checking file with TypeScript compiler...')
|
||||
try {
|
||||
await execAsync(`yarn tsc --noEmit --skipLibCheck ${filePath}`)
|
||||
await execAsync(`pnpm tsc --noEmit --skipLibCheck ${filePath}`)
|
||||
console.log('✅ TypeScript check passed.')
|
||||
} catch (e: any) {
|
||||
console.error('❌ TypeScript check failed:', e.stdout || e.stderr)
|
||||
|
||||
@ -18,7 +18,7 @@ if (!['patch', 'minor', 'major'].includes(versionType)) {
|
||||
}
|
||||
|
||||
// 更新版本
|
||||
exec(`yarn version ${versionType} --immediate`)
|
||||
exec(`pnpm version ${versionType}`)
|
||||
|
||||
// 读取更新后的 package.json 获取新版本号
|
||||
const updatedPackageJson = JSON.parse(fs.readFileSync('package.json', 'utf8'))
|
||||
|
||||
240
src/main/__tests__/mcp.test.ts
Normal file
240
src/main/__tests__/mcp.test.ts
Normal file
@ -0,0 +1,240 @@
|
||||
import { buildFunctionCallToolName, buildMcpToolName, generateMcpToolFunctionName, toCamelCase } from '@shared/mcp'
|
||||
import { describe, expect, it } from 'vitest'
|
||||
|
||||
describe('toCamelCase', () => {
|
||||
it('should convert hyphenated strings', () => {
|
||||
expect(toCamelCase('my-server')).toBe('myServer')
|
||||
expect(toCamelCase('my-tool-name')).toBe('myToolName')
|
||||
})
|
||||
|
||||
it('should convert underscored strings', () => {
|
||||
expect(toCamelCase('my_server')).toBe('myServer')
|
||||
expect(toCamelCase('search_issues')).toBe('searchIssues')
|
||||
})
|
||||
|
||||
it('should handle mixed delimiters', () => {
|
||||
expect(toCamelCase('my-server_name')).toBe('myServerName')
|
||||
})
|
||||
|
||||
it('should handle leading numbers by prefixing underscore', () => {
|
||||
expect(toCamelCase('123server')).toBe('_123server')
|
||||
})
|
||||
|
||||
it('should handle special characters', () => {
|
||||
expect(toCamelCase('test@server!')).toBe('testServer')
|
||||
expect(toCamelCase('tool#name$')).toBe('toolName')
|
||||
})
|
||||
|
||||
it('should trim whitespace', () => {
|
||||
expect(toCamelCase(' server ')).toBe('server')
|
||||
})
|
||||
|
||||
it('should handle empty string', () => {
|
||||
expect(toCamelCase('')).toBe('')
|
||||
})
|
||||
|
||||
it('should handle uppercase snake case', () => {
|
||||
expect(toCamelCase('MY_SERVER')).toBe('myServer')
|
||||
expect(toCamelCase('SEARCH_ISSUES')).toBe('searchIssues')
|
||||
})
|
||||
|
||||
it('should handle mixed case', () => {
|
||||
expect(toCamelCase('MyServer')).toBe('myserver')
|
||||
expect(toCamelCase('myTOOL')).toBe('mytool')
|
||||
})
|
||||
})
|
||||
|
||||
describe('buildMcpToolName', () => {
|
||||
it('should build basic name with defaults', () => {
|
||||
expect(buildMcpToolName('github', 'search_issues')).toBe('github_searchIssues')
|
||||
})
|
||||
|
||||
it('should handle undefined server name', () => {
|
||||
expect(buildMcpToolName(undefined, 'search_issues')).toBe('searchIssues')
|
||||
})
|
||||
|
||||
it('should apply custom prefix and delimiter', () => {
|
||||
expect(buildMcpToolName('github', 'search', { prefix: 'mcp__', delimiter: '__' })).toBe('mcp__github__search')
|
||||
})
|
||||
|
||||
it('should respect maxLength', () => {
|
||||
const result = buildMcpToolName('veryLongServerName', 'veryLongToolName', { maxLength: 20 })
|
||||
expect(result.length).toBeLessThanOrEqual(20)
|
||||
})
|
||||
|
||||
it('should handle collision with existingNames', () => {
|
||||
const existingNames = new Set(['github_search'])
|
||||
const result = buildMcpToolName('github', 'search', { existingNames })
|
||||
expect(result).toBe('github_search1')
|
||||
expect(existingNames.has('github_search1')).toBe(true)
|
||||
})
|
||||
|
||||
it('should respect maxLength when adding collision suffix', () => {
|
||||
const existingNames = new Set(['a'.repeat(20)])
|
||||
const result = buildMcpToolName('a'.repeat(20), '', { maxLength: 20, existingNames })
|
||||
expect(result.length).toBeLessThanOrEqual(20)
|
||||
expect(existingNames.has(result)).toBe(true)
|
||||
})
|
||||
|
||||
it('should handle multiple collisions with maxLength', () => {
|
||||
const existingNames = new Set(['abcd', 'abc1', 'abc2'])
|
||||
const result = buildMcpToolName('abcd', '', { maxLength: 4, existingNames })
|
||||
expect(result).toBe('abc3')
|
||||
expect(result.length).toBeLessThanOrEqual(4)
|
||||
})
|
||||
})
|
||||
|
||||
describe('generateMcpToolFunctionName', () => {
|
||||
it('should return format serverName_toolName in camelCase', () => {
|
||||
expect(generateMcpToolFunctionName('github', 'search_issues')).toBe('github_searchIssues')
|
||||
})
|
||||
|
||||
it('should handle hyphenated names', () => {
|
||||
expect(generateMcpToolFunctionName('my-server', 'my-tool')).toBe('myServer_myTool')
|
||||
})
|
||||
|
||||
it('should handle undefined server name', () => {
|
||||
expect(generateMcpToolFunctionName(undefined, 'search_issues')).toBe('searchIssues')
|
||||
})
|
||||
|
||||
it('should handle collision detection', () => {
|
||||
const existingNames = new Set<string>()
|
||||
const first = generateMcpToolFunctionName('github', 'search', existingNames)
|
||||
const second = generateMcpToolFunctionName('github', 'search', existingNames)
|
||||
expect(first).toBe('github_search')
|
||||
expect(second).toBe('github_search1')
|
||||
})
|
||||
})
|
||||
|
||||
describe('buildFunctionCallToolName', () => {
|
||||
describe('basic format', () => {
|
||||
it('should return format mcp__{server}__{tool} in camelCase', () => {
|
||||
const result = buildFunctionCallToolName('github', 'search_issues')
|
||||
expect(result).toBe('mcp__github__searchIssues')
|
||||
})
|
||||
|
||||
it('should handle simple server and tool names', () => {
|
||||
expect(buildFunctionCallToolName('fetch', 'get_page')).toBe('mcp__fetch__getPage')
|
||||
expect(buildFunctionCallToolName('database', 'query')).toBe('mcp__database__query')
|
||||
})
|
||||
})
|
||||
|
||||
describe('valid JavaScript identifier', () => {
|
||||
it('should always start with mcp__ prefix (valid JS identifier start)', () => {
|
||||
const result = buildFunctionCallToolName('123server', '456tool')
|
||||
expect(result).toMatch(/^mcp__/)
|
||||
})
|
||||
|
||||
it('should handle hyphenated names with camelCase', () => {
|
||||
const result = buildFunctionCallToolName('my-server', 'my-tool')
|
||||
expect(result).toBe('mcp__myServer__myTool')
|
||||
})
|
||||
|
||||
it('should be a valid JavaScript identifier', () => {
|
||||
const testCases = [
|
||||
['github', 'create_issue'],
|
||||
['my-server', 'fetch-data'],
|
||||
['test@server', 'tool#name'],
|
||||
['server.name', 'tool.action']
|
||||
]
|
||||
|
||||
for (const [server, tool] of testCases) {
|
||||
const result = buildFunctionCallToolName(server, tool)
|
||||
expect(result).toMatch(/^[a-zA-Z_][a-zA-Z0-9_]*$/)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
describe('character sanitization', () => {
|
||||
it('should convert special characters to camelCase boundaries', () => {
|
||||
expect(buildFunctionCallToolName('my-server', 'my-tool-name')).toBe('mcp__myServer__myToolName')
|
||||
expect(buildFunctionCallToolName('test@server!', 'tool#name$')).toBe('mcp__testServer__toolName')
|
||||
expect(buildFunctionCallToolName('server.name', 'tool.action')).toBe('mcp__serverName__toolAction')
|
||||
})
|
||||
|
||||
it('should handle spaces', () => {
|
||||
const result = buildFunctionCallToolName('my server', 'my tool')
|
||||
expect(result).toBe('mcp__myServer__myTool')
|
||||
})
|
||||
})
|
||||
|
||||
describe('length constraints', () => {
|
||||
it('should not exceed 63 characters', () => {
|
||||
const longServerName = 'a'.repeat(50)
|
||||
const longToolName = 'b'.repeat(50)
|
||||
const result = buildFunctionCallToolName(longServerName, longToolName)
|
||||
expect(result.length).toBeLessThanOrEqual(63)
|
||||
})
|
||||
|
||||
it('should not end with underscores after truncation', () => {
|
||||
const longServerName = 'a'.repeat(30)
|
||||
const longToolName = 'b'.repeat(30)
|
||||
const result = buildFunctionCallToolName(longServerName, longToolName)
|
||||
expect(result).not.toMatch(/_+$/)
|
||||
expect(result.length).toBeLessThanOrEqual(63)
|
||||
})
|
||||
})
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle empty server name', () => {
|
||||
const result = buildFunctionCallToolName('', 'tool')
|
||||
expect(result).toBe('mcp__tool')
|
||||
})
|
||||
|
||||
it('should handle empty tool name', () => {
|
||||
const result = buildFunctionCallToolName('server', '')
|
||||
expect(result).toBe('mcp__server__')
|
||||
})
|
||||
|
||||
it('should trim whitespace from names', () => {
|
||||
const result = buildFunctionCallToolName(' server ', ' tool ')
|
||||
expect(result).toBe('mcp__server__tool')
|
||||
})
|
||||
|
||||
it('should handle mixed case by normalizing to lowercase first', () => {
|
||||
const result = buildFunctionCallToolName('MyServer', 'MyTool')
|
||||
expect(result).toBe('mcp__myserver__mytool')
|
||||
})
|
||||
|
||||
it('should handle uppercase snake case', () => {
|
||||
const result = buildFunctionCallToolName('MY_SERVER', 'SEARCH_ISSUES')
|
||||
expect(result).toBe('mcp__myServer__searchIssues')
|
||||
})
|
||||
})
|
||||
|
||||
describe('deterministic output', () => {
|
||||
it('should produce consistent results for same input', () => {
|
||||
const result1 = buildFunctionCallToolName('github', 'search_repos')
|
||||
const result2 = buildFunctionCallToolName('github', 'search_repos')
|
||||
expect(result1).toBe(result2)
|
||||
})
|
||||
|
||||
it('should produce different results for different inputs', () => {
|
||||
const result1 = buildFunctionCallToolName('server1', 'tool')
|
||||
const result2 = buildFunctionCallToolName('server2', 'tool')
|
||||
expect(result1).not.toBe(result2)
|
||||
})
|
||||
})
|
||||
|
||||
describe('real-world scenarios', () => {
|
||||
it('should handle GitHub MCP server', () => {
|
||||
expect(buildFunctionCallToolName('github', 'create_issue')).toBe('mcp__github__createIssue')
|
||||
expect(buildFunctionCallToolName('github', 'search_repositories')).toBe('mcp__github__searchRepositories')
|
||||
})
|
||||
|
||||
it('should handle filesystem MCP server', () => {
|
||||
expect(buildFunctionCallToolName('filesystem', 'read_file')).toBe('mcp__filesystem__readFile')
|
||||
expect(buildFunctionCallToolName('filesystem', 'write_file')).toBe('mcp__filesystem__writeFile')
|
||||
})
|
||||
|
||||
it('should handle hyphenated server names (common in npm packages)', () => {
|
||||
expect(buildFunctionCallToolName('cherry-fetch', 'get_page')).toBe('mcp__cherryFetch__getPage')
|
||||
expect(buildFunctionCallToolName('mcp-server-github', 'search')).toBe('mcp__mcpServerGithub__search')
|
||||
})
|
||||
|
||||
it('should handle scoped npm package style names', () => {
|
||||
const result = buildFunctionCallToolName('@anthropic/mcp-server', 'chat')
|
||||
expect(result).toBe('mcp__AnthropicMcpServer__chat')
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -6,7 +6,7 @@ import { app } from 'electron'
|
||||
import path from 'path'
|
||||
import { pathToFileURL } from 'url'
|
||||
|
||||
import { CUSTOM_SQL_STATEMENTS } from './customSql'
|
||||
import { CUSTOM_SQL_STATEMENTS } from './customSqls'
|
||||
import Seeding from './seeding'
|
||||
import type { DbType } from './types'
|
||||
|
||||
|
||||
@ -14,7 +14,7 @@
|
||||
* 2. Import and spread them into CUSTOM_SQL_STATEMENTS below
|
||||
*/
|
||||
|
||||
import { MESSAGE_FTS_STATEMENTS } from './schemas/messageFts'
|
||||
import { MESSAGE_FTS_STATEMENTS } from './schemas/message'
|
||||
|
||||
/**
|
||||
* All custom SQL statements to run after migrations
|
||||
@ -1,6 +1,6 @@
|
||||
import { sqliteTable, text } from 'drizzle-orm/sqlite-core'
|
||||
|
||||
import { createUpdateTimestamps } from './columnHelpers'
|
||||
import { createUpdateTimestamps } from './_columnHelpers'
|
||||
|
||||
export const appStateTable = sqliteTable('app_state', {
|
||||
key: text().primaryKey(),
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
import { index, integer, sqliteTable, text } from 'drizzle-orm/sqlite-core'
|
||||
|
||||
import { createUpdateTimestamps, uuidPrimaryKey } from './columnHelpers'
|
||||
import { createUpdateTimestamps, uuidPrimaryKey } from './_columnHelpers'
|
||||
|
||||
/**
|
||||
* Group table - general-purpose grouping for entities
|
||||
|
||||
@ -3,7 +3,7 @@ import type { AssistantMeta, ModelMeta } from '@shared/data/types/meta'
|
||||
import { sql } from 'drizzle-orm'
|
||||
import { check, foreignKey, index, integer, sqliteTable, text } from 'drizzle-orm/sqlite-core'
|
||||
|
||||
import { createUpdateDeleteTimestamps, uuidPrimaryKeyOrdered } from './columnHelpers'
|
||||
import { createUpdateDeleteTimestamps, uuidPrimaryKeyOrdered } from './_columnHelpers'
|
||||
import { topicTable } from './topic'
|
||||
|
||||
/**
|
||||
@ -63,3 +63,92 @@ export const messageTable = sqliteTable(
|
||||
check('message_status_check', sql`${t.status} IN ('pending', 'success', 'error', 'paused')`)
|
||||
]
|
||||
)
|
||||
|
||||
/**
|
||||
* FTS5 SQL statements for message full-text search
|
||||
*
|
||||
* This file contains SQL statements that must be manually added to migration files.
|
||||
* Drizzle does not auto-generate virtual tables or triggers.
|
||||
*
|
||||
* Architecture:
|
||||
* 1. message.searchable_text - regular column populated by trigger
|
||||
* 2. message_fts - FTS5 virtual table with external content
|
||||
* 3. Triggers sync both searchable_text and FTS5 index
|
||||
*
|
||||
* Usage:
|
||||
* - Copy MESSAGE_FTS_MIGRATION_SQL to migration file when generating migrations
|
||||
*/
|
||||
|
||||
/**
|
||||
* Custom SQL statements that Drizzle cannot manage
|
||||
* These are executed after every migration via DbService.runCustomMigrations()
|
||||
*
|
||||
* All statements should use IF NOT EXISTS to be idempotent.
|
||||
*/
|
||||
export const MESSAGE_FTS_STATEMENTS: string[] = [
|
||||
// FTS5 virtual table, Links to message table's searchable_text column
|
||||
`CREATE VIRTUAL TABLE IF NOT EXISTS message_fts USING fts5(
|
||||
searchable_text,
|
||||
content='message',
|
||||
content_rowid='rowid',
|
||||
tokenize='trigram'
|
||||
)`,
|
||||
|
||||
// Trigger: populate searchable_text and sync FTS on INSERT
|
||||
`CREATE TRIGGER IF NOT EXISTS message_ai AFTER INSERT ON message BEGIN
|
||||
UPDATE message SET searchable_text = (
|
||||
SELECT group_concat(json_extract(value, '$.content'), ' ')
|
||||
FROM json_each(json_extract(NEW.data, '$.blocks'))
|
||||
WHERE json_extract(value, '$.type') = 'main_text'
|
||||
) WHERE id = NEW.id;
|
||||
INSERT INTO message_fts(rowid, searchable_text)
|
||||
SELECT rowid, searchable_text FROM message WHERE id = NEW.id;
|
||||
END`,
|
||||
|
||||
// Trigger: sync FTS on DELETE
|
||||
`CREATE TRIGGER IF NOT EXISTS message_ad AFTER DELETE ON message BEGIN
|
||||
INSERT INTO message_fts(message_fts, rowid, searchable_text)
|
||||
VALUES ('delete', OLD.rowid, OLD.searchable_text);
|
||||
END`,
|
||||
|
||||
// Trigger: update searchable_text and sync FTS on UPDATE OF data
|
||||
`CREATE TRIGGER IF NOT EXISTS message_au AFTER UPDATE OF data ON message BEGIN
|
||||
INSERT INTO message_fts(message_fts, rowid, searchable_text)
|
||||
VALUES ('delete', OLD.rowid, OLD.searchable_text);
|
||||
UPDATE message SET searchable_text = (
|
||||
SELECT group_concat(json_extract(value, '$.content'), ' ')
|
||||
FROM json_each(json_extract(NEW.data, '$.blocks'))
|
||||
WHERE json_extract(value, '$.type') = 'main_text'
|
||||
) WHERE id = NEW.id;
|
||||
INSERT INTO message_fts(rowid, searchable_text)
|
||||
SELECT rowid, searchable_text FROM message WHERE id = NEW.id;
|
||||
END`
|
||||
]
|
||||
|
||||
/** Examples */
|
||||
|
||||
/**
|
||||
* SQL expression to extract searchable text from data.blocks
|
||||
* Concatenates content from all main_text type blocks
|
||||
*/
|
||||
// export const SEARCHABLE_TEXT_EXPRESSION = `
|
||||
// (SELECT group_concat(json_extract(value, '$.content'), ' ')
|
||||
// FROM json_each(json_extract(NEW.data, '$.blocks'))
|
||||
// WHERE json_extract(value, '$.type') = 'main_text')
|
||||
// `
|
||||
|
||||
/**
|
||||
* Rebuild FTS index (run manually if needed)
|
||||
*/
|
||||
// export const REBUILD_FTS_SQL = `INSERT INTO message_fts(message_fts) VALUES ('rebuild')`
|
||||
|
||||
/**
|
||||
* Example search query
|
||||
*/
|
||||
// export const EXAMPLE_SEARCH_SQL = `
|
||||
// SELECT m.*
|
||||
// FROM message m
|
||||
// JOIN message_fts fts ON m.rowid = fts.rowid
|
||||
// WHERE message_fts MATCH ?
|
||||
// ORDER BY rank
|
||||
// `
|
||||
|
||||
@ -1,86 +0,0 @@
|
||||
/**
|
||||
* FTS5 SQL statements for message full-text search
|
||||
*
|
||||
* This file contains SQL statements that must be manually added to migration files.
|
||||
* Drizzle does not auto-generate virtual tables or triggers.
|
||||
*
|
||||
* Architecture:
|
||||
* 1. message.searchable_text - regular column populated by trigger
|
||||
* 2. message_fts - FTS5 virtual table with external content
|
||||
* 3. Triggers sync both searchable_text and FTS5 index
|
||||
*
|
||||
* Usage:
|
||||
* - Copy MESSAGE_FTS_MIGRATION_SQL to migration file when generating migrations
|
||||
*/
|
||||
|
||||
/**
|
||||
* SQL expression to extract searchable text from data.blocks
|
||||
* Concatenates content from all main_text type blocks
|
||||
*/
|
||||
export const SEARCHABLE_TEXT_EXPRESSION = `
|
||||
(SELECT group_concat(json_extract(value, '$.content'), ' ')
|
||||
FROM json_each(json_extract(NEW.data, '$.blocks'))
|
||||
WHERE json_extract(value, '$.type') = 'main_text')
|
||||
`
|
||||
|
||||
/**
|
||||
* Custom SQL statements that Drizzle cannot manage
|
||||
* These are executed after every migration via DbService.runCustomMigrations()
|
||||
*
|
||||
* All statements should use IF NOT EXISTS to be idempotent.
|
||||
*/
|
||||
export const MESSAGE_FTS_STATEMENTS: string[] = [
|
||||
// FTS5 virtual table, Links to message table's searchable_text column
|
||||
`CREATE VIRTUAL TABLE IF NOT EXISTS message_fts USING fts5(
|
||||
searchable_text,
|
||||
content='message',
|
||||
content_rowid='rowid',
|
||||
tokenize='trigram'
|
||||
)`,
|
||||
|
||||
// Trigger: populate searchable_text and sync FTS on INSERT
|
||||
`CREATE TRIGGER IF NOT EXISTS message_ai AFTER INSERT ON message BEGIN
|
||||
UPDATE message SET searchable_text = (
|
||||
SELECT group_concat(json_extract(value, '$.content'), ' ')
|
||||
FROM json_each(json_extract(NEW.data, '$.blocks'))
|
||||
WHERE json_extract(value, '$.type') = 'main_text'
|
||||
) WHERE id = NEW.id;
|
||||
INSERT INTO message_fts(rowid, searchable_text)
|
||||
SELECT rowid, searchable_text FROM message WHERE id = NEW.id;
|
||||
END`,
|
||||
|
||||
// Trigger: sync FTS on DELETE
|
||||
`CREATE TRIGGER IF NOT EXISTS message_ad AFTER DELETE ON message BEGIN
|
||||
INSERT INTO message_fts(message_fts, rowid, searchable_text)
|
||||
VALUES ('delete', OLD.rowid, OLD.searchable_text);
|
||||
END`,
|
||||
|
||||
// Trigger: update searchable_text and sync FTS on UPDATE OF data
|
||||
`CREATE TRIGGER IF NOT EXISTS message_au AFTER UPDATE OF data ON message BEGIN
|
||||
INSERT INTO message_fts(message_fts, rowid, searchable_text)
|
||||
VALUES ('delete', OLD.rowid, OLD.searchable_text);
|
||||
UPDATE message SET searchable_text = (
|
||||
SELECT group_concat(json_extract(value, '$.content'), ' ')
|
||||
FROM json_each(json_extract(NEW.data, '$.blocks'))
|
||||
WHERE json_extract(value, '$.type') = 'main_text'
|
||||
) WHERE id = NEW.id;
|
||||
INSERT INTO message_fts(rowid, searchable_text)
|
||||
SELECT rowid, searchable_text FROM message WHERE id = NEW.id;
|
||||
END`
|
||||
]
|
||||
|
||||
/**
|
||||
* Rebuild FTS index (run manually if needed)
|
||||
*/
|
||||
export const REBUILD_FTS_SQL = `INSERT INTO message_fts(message_fts) VALUES ('rebuild')`
|
||||
|
||||
/**
|
||||
* Example search query
|
||||
*/
|
||||
export const EXAMPLE_SEARCH_SQL = `
|
||||
SELECT m.*
|
||||
FROM message m
|
||||
JOIN message_fts fts ON m.rowid = fts.rowid
|
||||
WHERE message_fts MATCH ?
|
||||
ORDER BY rank
|
||||
`
|
||||
@ -1,6 +1,6 @@
|
||||
import { primaryKey, sqliteTable, text } from 'drizzle-orm/sqlite-core'
|
||||
|
||||
import { createUpdateTimestamps } from './columnHelpers'
|
||||
import { createUpdateTimestamps } from './_columnHelpers'
|
||||
|
||||
export const preferenceTable = sqliteTable(
|
||||
'preference',
|
||||
|
||||
@ -1,18 +0,0 @@
|
||||
import { sqliteTable, text } from 'drizzle-orm/sqlite-core'
|
||||
|
||||
import { createUpdateTimestamps, uuidPrimaryKey } from './columnHelpers'
|
||||
|
||||
/**
|
||||
* Tag table - general-purpose tags for entities
|
||||
*
|
||||
* Tags can be applied to topics, sessions, and assistants
|
||||
* via the entity_tag join table.
|
||||
*/
|
||||
export const tagTable = sqliteTable('tag', {
|
||||
id: uuidPrimaryKey(),
|
||||
// Unique tag name
|
||||
name: text().notNull().unique(),
|
||||
// Display color (hex code)
|
||||
color: text(),
|
||||
...createUpdateTimestamps
|
||||
})
|
||||
@ -1,7 +1,21 @@
|
||||
import { index, primaryKey, sqliteTable, text } from 'drizzle-orm/sqlite-core'
|
||||
|
||||
import { createUpdateTimestamps } from './columnHelpers'
|
||||
import { tagTable } from './tag'
|
||||
import { createUpdateTimestamps, uuidPrimaryKey } from './_columnHelpers'
|
||||
|
||||
/**
|
||||
* Tag table - general-purpose tags for entities
|
||||
*
|
||||
* Tags can be applied to topics, sessions, and assistants
|
||||
* via the entity_tag join table.
|
||||
*/
|
||||
export const tagTable = sqliteTable('tag', {
|
||||
id: uuidPrimaryKey(),
|
||||
// Unique tag name
|
||||
name: text().notNull().unique(),
|
||||
// Display color (hex code)
|
||||
color: text(),
|
||||
...createUpdateTimestamps
|
||||
})
|
||||
|
||||
/**
|
||||
* Entity-Tag join table - associates tags with entities
|
||||
@ -1,7 +1,7 @@
|
||||
import type { AssistantMeta } from '@shared/data/types/meta'
|
||||
import { index, integer, sqliteTable, text } from 'drizzle-orm/sqlite-core'
|
||||
|
||||
import { createUpdateDeleteTimestamps, uuidPrimaryKey } from './columnHelpers'
|
||||
import { createUpdateDeleteTimestamps, uuidPrimaryKey } from './_columnHelpers'
|
||||
import { groupTable } from './group'
|
||||
|
||||
/**
|
||||
|
||||
@ -43,23 +43,35 @@ const DEFAULT_LIMIT = 20
|
||||
|
||||
/**
|
||||
* Convert database row to Message entity
|
||||
*
|
||||
* Note: When using raw SQL queries (db.all with sql``), Drizzle ORM does NOT
|
||||
* automatically parse JSON columns. This function handles both parsed objects
|
||||
* (from ORM queries) and JSON strings (from raw SQL queries).
|
||||
*/
|
||||
function rowToMessage(row: typeof messageTable.$inferSelect): Message {
|
||||
// Handle JSON strings from raw SQL queries (db.all with sql``)
|
||||
// ORM queries (.select().from()) return already-parsed objects
|
||||
const parseJson = <T>(value: T | string | null | undefined): T | null => {
|
||||
if (value == null) return null
|
||||
if (typeof value === 'string') return JSON.parse(value)
|
||||
return value as T
|
||||
}
|
||||
|
||||
return {
|
||||
id: row.id,
|
||||
topicId: row.topicId,
|
||||
parentId: row.parentId,
|
||||
role: row.role as Message['role'],
|
||||
data: row.data,
|
||||
data: parseJson(row.data)!,
|
||||
searchableText: row.searchableText,
|
||||
status: row.status as Message['status'],
|
||||
siblingsGroupId: row.siblingsGroupId ?? 0,
|
||||
assistantId: row.assistantId,
|
||||
assistantMeta: row.assistantMeta,
|
||||
assistantMeta: parseJson(row.assistantMeta),
|
||||
modelId: row.modelId,
|
||||
modelMeta: row.modelMeta,
|
||||
modelMeta: parseJson(row.modelMeta),
|
||||
traceId: row.traceId,
|
||||
stats: row.stats,
|
||||
stats: parseJson(row.stats),
|
||||
createdAt: row.createdAt ? new Date(row.createdAt).toISOString() : new Date().toISOString(),
|
||||
updatedAt: row.updatedAt ? new Date(row.updatedAt).toISOString() : new Date().toISOString()
|
||||
}
|
||||
|
||||
@ -9,6 +9,7 @@ import DiDiMcpServer from './didi-mcp'
|
||||
import DifyKnowledgeServer from './dify-knowledge'
|
||||
import FetchServer from './fetch'
|
||||
import FileSystemServer from './filesystem'
|
||||
import HubServer from './hub'
|
||||
import MemoryServer from './memory'
|
||||
import PythonServer from './python'
|
||||
import ThinkingServer from './sequentialthinking'
|
||||
@ -52,6 +53,9 @@ export function createInMemoryMCPServer(
|
||||
case BuiltinMCPServerNames.browser: {
|
||||
return new BrowserServer().server
|
||||
}
|
||||
case BuiltinMCPServerNames.hub: {
|
||||
return new HubServer().server
|
||||
}
|
||||
default:
|
||||
throw new Error(`Unknown in-memory MCP server: ${name}`)
|
||||
}
|
||||
|
||||
213
src/main/mcpServers/hub/README.md
Normal file
213
src/main/mcpServers/hub/README.md
Normal file
@ -0,0 +1,213 @@
|
||||
# Hub MCP Server
|
||||
|
||||
A built-in MCP server that aggregates all active MCP servers in Cherry Studio and exposes them through `search` and `exec` tools.
|
||||
|
||||
## Overview
|
||||
|
||||
The Hub server enables LLMs to discover and call tools from all active MCP servers without needing to know the specific server names or tool signatures upfront.
|
||||
|
||||
## Auto Mode Integration
|
||||
|
||||
The Hub server is the core component of Cherry Studio's **Auto MCP Mode**. When an assistant is set to Auto mode:
|
||||
|
||||
1. **Automatic Injection**: The Hub server is automatically injected as the only MCP server for the assistant
|
||||
2. **System Prompt**: A specialized system prompt (`HUB_MODE_SYSTEM_PROMPT`) is appended to guide the LLM on how to use the `search` and `exec` tools
|
||||
3. **Dynamic Discovery**: The LLM can discover and use any tools from all active MCP servers without manual configuration
|
||||
|
||||
### MCP Modes
|
||||
|
||||
Cherry Studio supports three MCP modes per assistant:
|
||||
|
||||
| Mode | Description | Tools Available |
|
||||
|------|-------------|-----------------|
|
||||
| **Disabled** | No MCP tools | None |
|
||||
| **Auto** | Hub server only | `search`, `exec` |
|
||||
| **Manual** | User selects servers | Selected server tools |
|
||||
|
||||
### How Auto Mode Works
|
||||
|
||||
```
|
||||
User Message
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Assistant (mcpMode: 'auto') │
|
||||
│ │
|
||||
│ System Prompt + HUB_MODE_SYSTEM_PROMPT │
|
||||
│ Tools: [hub.search, hub.exec] │
|
||||
└─────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────┐
|
||||
│ LLM decides to use MCP tools │
|
||||
│ │
|
||||
│ 1. search({ query: "github,repo" }) │
|
||||
│ 2. exec({ code: "await searchRepos()" })│
|
||||
└─────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Hub Server │
|
||||
│ │
|
||||
│ Aggregates all active MCP servers │
|
||||
│ Routes tool calls to appropriate server │
|
||||
└─────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Relevant Code
|
||||
|
||||
- **Type Definition**: `src/renderer/src/types/index.ts` - `McpMode` type and `getEffectiveMcpMode()`
|
||||
- **Hub Server Constant**: `src/renderer/src/store/mcp.ts` - `hubMCPServer`
|
||||
- **Server Selection**: `src/renderer/src/services/ApiService.ts` - `getMcpServersForAssistant()`
|
||||
- **System Prompt**: `src/renderer/src/config/prompts.ts` - `HUB_MODE_SYSTEM_PROMPT`
|
||||
- **Prompt Injection**: `src/renderer/src/aiCore/prepareParams/parameterBuilder.ts`
|
||||
|
||||
## Tools
|
||||
|
||||
### `search`
|
||||
|
||||
Search for available MCP tools by keywords.
|
||||
|
||||
**Parameters:**
|
||||
| Name | Type | Required | Description |
|
||||
|------|------|----------|-------------|
|
||||
| `query` | string | Yes | Search keywords, comma-separated for OR matching |
|
||||
| `limit` | number | No | Maximum results to return (default: 10, max: 50) |
|
||||
|
||||
**Example:**
|
||||
```json
|
||||
{
|
||||
"query": "browser,chrome",
|
||||
"limit": 5
|
||||
}
|
||||
```
|
||||
|
||||
**Returns:** JavaScript function declarations with JSDoc comments that can be used in the `exec` tool.
|
||||
|
||||
```javascript
|
||||
// Found 2 tool(s):
|
||||
|
||||
/**
|
||||
* Launch a browser instance
|
||||
*
|
||||
* @param {{ browser?: "chromium" | "firefox" | "webkit", headless?: boolean }} params
|
||||
* @returns {Promise<unknown>}
|
||||
*/
|
||||
async function launchBrowser(params) {
|
||||
return await __callTool("browser__launch_browser", params);
|
||||
}
|
||||
```
|
||||
|
||||
### `exec`
|
||||
|
||||
Execute JavaScript code that calls MCP tools.
|
||||
|
||||
**Parameters:**
|
||||
| Name | Type | Required | Description |
|
||||
|------|------|----------|-------------|
|
||||
| `code` | string | Yes | JavaScript code to execute |
|
||||
|
||||
**Built-in Helpers:**
|
||||
- `parallel(...promises)` - Run multiple tool calls concurrently (Promise.all)
|
||||
- `settle(...promises)` - Run multiple tool calls and get all results (Promise.allSettled)
|
||||
- `console.log/warn/error/info/debug` - Captured in output logs
|
||||
|
||||
**Example:**
|
||||
```javascript
|
||||
// Call a single tool
|
||||
const result = await searchRepos({ query: "react" });
|
||||
return result;
|
||||
|
||||
// Call multiple tools in parallel
|
||||
const [users, repos] = await parallel(
|
||||
getUsers({ limit: 10 }),
|
||||
searchRepos({ query: "typescript" })
|
||||
);
|
||||
return { users, repos };
|
||||
```
|
||||
|
||||
**Returns:**
|
||||
```json
|
||||
{
|
||||
"result": { "users": [...], "repos": [...] },
|
||||
"logs": ["[log] Processing..."],
|
||||
"error": null
|
||||
}
|
||||
```
|
||||
|
||||
## Usage Flow
|
||||
|
||||
1. **Search** for tools using keywords:
|
||||
```
|
||||
search({ query: "github,repository" })
|
||||
```
|
||||
|
||||
2. **Review** the returned function signatures and JSDoc
|
||||
|
||||
3. **Execute** code using the discovered tools:
|
||||
```
|
||||
exec({ code: 'return await searchRepos({ query: "react" })' })
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
The Hub server is a built-in server identified as `@cherry/hub`.
|
||||
|
||||
### Using Auto Mode (Recommended)
|
||||
|
||||
The easiest way to use the Hub server is through Auto mode:
|
||||
|
||||
1. Click the **MCP Tools** button (hammer icon) in the input bar
|
||||
2. Select **Auto** mode
|
||||
3. The Hub server is automatically enabled for the assistant
|
||||
|
||||
### Manual Configuration
|
||||
|
||||
Alternatively, you can enable the Hub server manually:
|
||||
|
||||
1. Go to **Settings** → **MCP Servers**
|
||||
2. Find **Hub** in the built-in servers list
|
||||
3. Toggle it on
|
||||
4. In the assistant's MCP settings, select the Hub server
|
||||
|
||||
## Caching
|
||||
|
||||
- Tool definitions are cached for **10 minutes**
|
||||
- Cache is automatically refreshed when expired
|
||||
- Cache is invalidated when MCP servers connect/disconnect
|
||||
|
||||
## Limitations
|
||||
|
||||
- Code execution has a **60-second timeout**
|
||||
- Console logs are limited to **1000 entries**
|
||||
- Search results are limited to **50 tools** maximum
|
||||
- The Hub server excludes itself from the aggregated server list
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
LLM
|
||||
│
|
||||
▼
|
||||
HubServer
|
||||
├── search → ToolRegistry → SearchIndex
|
||||
└── exec → Runtime → callMcpTool()
|
||||
│
|
||||
▼
|
||||
MCPService.callTool()
|
||||
│
|
||||
▼
|
||||
External MCP Servers
|
||||
```
|
||||
|
||||
## Files
|
||||
|
||||
| File | Description |
|
||||
|------|-------------|
|
||||
| `index.ts` | Main HubServer class |
|
||||
| `types.ts` | TypeScript type definitions |
|
||||
| `generator.ts` | Converts MCP tools to JS functions with JSDoc |
|
||||
| `tool-registry.ts` | In-memory tool cache with TTL |
|
||||
| `search.ts` | Keyword-based tool search |
|
||||
| `runtime.ts` | JavaScript code execution engine |
|
||||
| `mcp-bridge.ts` | Bridge to Cherry Studio's MCPService |
|
||||
119
src/main/mcpServers/hub/__tests__/generator.test.ts
Normal file
119
src/main/mcpServers/hub/__tests__/generator.test.ts
Normal file
@ -0,0 +1,119 @@
|
||||
import { describe, expect, it } from 'vitest'
|
||||
|
||||
import { generateToolFunction, generateToolsCode } from '../generator'
|
||||
import type { GeneratedTool } from '../types'
|
||||
|
||||
describe('generator', () => {
|
||||
describe('generateToolFunction', () => {
|
||||
it('generates a simple tool function', () => {
|
||||
const tool = {
|
||||
id: 'test-id',
|
||||
name: 'search_repos',
|
||||
description: 'Search for GitHub repositories',
|
||||
serverId: 'github',
|
||||
serverName: 'github-server',
|
||||
inputSchema: {
|
||||
type: 'object' as const,
|
||||
properties: {
|
||||
query: { type: 'string', description: 'Search query' },
|
||||
limit: { type: 'number', description: 'Max results' }
|
||||
},
|
||||
required: ['query']
|
||||
},
|
||||
type: 'mcp' as const
|
||||
}
|
||||
|
||||
const existingNames = new Set<string>()
|
||||
const callTool = async () => ({ success: true })
|
||||
|
||||
const result = generateToolFunction(tool, existingNames, callTool)
|
||||
|
||||
expect(result.functionName).toBe('githubServer_searchRepos')
|
||||
expect(result.jsCode).toContain('async function githubServer_searchRepos')
|
||||
expect(result.jsCode).toContain('Search for GitHub repositories')
|
||||
expect(result.jsCode).toContain('__callTool')
|
||||
})
|
||||
|
||||
it('handles unique function names', () => {
|
||||
const tool = {
|
||||
id: 'test-id',
|
||||
name: 'search',
|
||||
serverId: 'server1',
|
||||
serverName: 'server1',
|
||||
inputSchema: { type: 'object' as const, properties: {} },
|
||||
type: 'mcp' as const
|
||||
}
|
||||
|
||||
const existingNames = new Set<string>(['server1_search'])
|
||||
const callTool = async () => ({})
|
||||
|
||||
const result = generateToolFunction(tool, existingNames, callTool)
|
||||
|
||||
expect(result.functionName).toBe('server1_search1')
|
||||
})
|
||||
|
||||
it('handles enum types in schema', () => {
|
||||
const tool = {
|
||||
id: 'test-id',
|
||||
name: 'launch_browser',
|
||||
serverId: 'browser',
|
||||
serverName: 'browser',
|
||||
inputSchema: {
|
||||
type: 'object' as const,
|
||||
properties: {
|
||||
browser: {
|
||||
type: 'string',
|
||||
enum: ['chromium', 'firefox', 'webkit']
|
||||
}
|
||||
}
|
||||
},
|
||||
type: 'mcp' as const
|
||||
}
|
||||
|
||||
const existingNames = new Set<string>()
|
||||
const callTool = async () => ({})
|
||||
|
||||
const result = generateToolFunction(tool, existingNames, callTool)
|
||||
|
||||
expect(result.jsCode).toContain('"chromium" | "firefox" | "webkit"')
|
||||
})
|
||||
})
|
||||
|
||||
describe('generateToolsCode', () => {
|
||||
it('generates code for multiple tools', () => {
|
||||
const tools: GeneratedTool[] = [
|
||||
{
|
||||
serverId: 's1',
|
||||
serverName: 'server1',
|
||||
toolName: 'tool1',
|
||||
functionName: 'server1_tool1',
|
||||
jsCode: 'async function server1_tool1() {}',
|
||||
fn: async () => ({}),
|
||||
signature: '{}',
|
||||
returns: 'unknown'
|
||||
},
|
||||
{
|
||||
serverId: 's2',
|
||||
serverName: 'server2',
|
||||
toolName: 'tool2',
|
||||
functionName: 'server2_tool2',
|
||||
jsCode: 'async function server2_tool2() {}',
|
||||
fn: async () => ({}),
|
||||
signature: '{}',
|
||||
returns: 'unknown'
|
||||
}
|
||||
]
|
||||
|
||||
const result = generateToolsCode(tools)
|
||||
|
||||
expect(result).toContain('2 tool(s)')
|
||||
expect(result).toContain('async function server1_tool1')
|
||||
expect(result).toContain('async function server2_tool2')
|
||||
})
|
||||
|
||||
it('returns message for empty tools', () => {
|
||||
const result = generateToolsCode([])
|
||||
expect(result).toBe('// No tools available')
|
||||
})
|
||||
})
|
||||
})
|
||||
229
src/main/mcpServers/hub/__tests__/hub.test.ts
Normal file
229
src/main/mcpServers/hub/__tests__/hub.test.ts
Normal file
@ -0,0 +1,229 @@
|
||||
import type { MCPTool } from '@types'
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import { HubServer } from '../index'
|
||||
|
||||
const mockTools: MCPTool[] = [
|
||||
{
|
||||
id: 'github__search_repos',
|
||||
name: 'search_repos',
|
||||
description: 'Search for GitHub repositories',
|
||||
serverId: 'github',
|
||||
serverName: 'GitHub',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
query: { type: 'string', description: 'Search query' },
|
||||
limit: { type: 'number', description: 'Max results' }
|
||||
},
|
||||
required: ['query']
|
||||
},
|
||||
type: 'mcp'
|
||||
},
|
||||
{
|
||||
id: 'github__get_user',
|
||||
name: 'get_user',
|
||||
description: 'Get GitHub user profile',
|
||||
serverId: 'github',
|
||||
serverName: 'GitHub',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
username: { type: 'string', description: 'GitHub username' }
|
||||
},
|
||||
required: ['username']
|
||||
},
|
||||
type: 'mcp'
|
||||
},
|
||||
{
|
||||
id: 'database__query',
|
||||
name: 'query',
|
||||
description: 'Execute a database query',
|
||||
serverId: 'database',
|
||||
serverName: 'Database',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
sql: { type: 'string', description: 'SQL query to execute' }
|
||||
},
|
||||
required: ['sql']
|
||||
},
|
||||
type: 'mcp'
|
||||
}
|
||||
]
|
||||
|
||||
vi.mock('@main/services/MCPService', () => ({
|
||||
default: {
|
||||
listAllActiveServerTools: vi.fn(async () => mockTools),
|
||||
callToolById: vi.fn(async (toolId: string, args: unknown) => {
|
||||
if (toolId === 'github__search_repos') {
|
||||
return {
|
||||
content: [{ type: 'text', text: JSON.stringify({ repos: ['repo1', 'repo2'], query: args }) }]
|
||||
}
|
||||
}
|
||||
if (toolId === 'github__get_user') {
|
||||
return {
|
||||
content: [{ type: 'text', text: JSON.stringify({ username: (args as any).username, id: 123 }) }]
|
||||
}
|
||||
}
|
||||
if (toolId === 'database__query') {
|
||||
return {
|
||||
content: [{ type: 'text', text: JSON.stringify({ rows: [{ id: 1 }, { id: 2 }] }) }]
|
||||
}
|
||||
}
|
||||
return { content: [{ type: 'text', text: '{}' }] }
|
||||
}),
|
||||
abortTool: vi.fn(async () => true)
|
||||
}
|
||||
}))
|
||||
|
||||
import mcpService from '@main/services/MCPService'
|
||||
|
||||
describe('HubServer Integration', () => {
|
||||
let hubServer: HubServer
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
hubServer = new HubServer()
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
describe('full search → exec flow', () => {
|
||||
it('searches for tools and executes them', async () => {
|
||||
const searchResult = await (hubServer as any).handleSearch({ query: 'github,repos' })
|
||||
|
||||
expect(searchResult.content).toBeDefined()
|
||||
const searchText = JSON.parse(searchResult.content[0].text)
|
||||
expect(searchText.total).toBeGreaterThan(0)
|
||||
expect(searchText.tools).toContain('github_searchRepos')
|
||||
|
||||
const execResult = await (hubServer as any).handleExec({
|
||||
code: 'return await github_searchRepos({ query: "test" })'
|
||||
})
|
||||
|
||||
expect(execResult.content).toBeDefined()
|
||||
const execOutput = JSON.parse(execResult.content[0].text)
|
||||
expect(execOutput.result).toEqual({ repos: ['repo1', 'repo2'], query: { query: 'test' } })
|
||||
})
|
||||
|
||||
it('handles multiple tool calls in parallel', async () => {
|
||||
await (hubServer as any).handleSearch({ query: 'github' })
|
||||
|
||||
const execResult = await (hubServer as any).handleExec({
|
||||
code: `
|
||||
const results = await parallel(
|
||||
github_searchRepos({ query: "react" }),
|
||||
github_getUser({ username: "octocat" })
|
||||
);
|
||||
return results
|
||||
`
|
||||
})
|
||||
|
||||
const execOutput = JSON.parse(execResult.content[0].text)
|
||||
expect(execOutput.result).toHaveLength(2)
|
||||
expect(execOutput.result[0]).toEqual({ repos: ['repo1', 'repo2'], query: { query: 'react' } })
|
||||
expect(execOutput.result[1]).toEqual({ username: 'octocat', id: 123 })
|
||||
})
|
||||
|
||||
it('searches across multiple servers', async () => {
|
||||
const searchResult = await (hubServer as any).handleSearch({ query: 'query' })
|
||||
|
||||
const searchText = JSON.parse(searchResult.content[0].text)
|
||||
expect(searchText.tools).toContain('database_query')
|
||||
})
|
||||
})
|
||||
|
||||
describe('tools caching', () => {
|
||||
it('uses cached tools within TTL', async () => {
|
||||
await (hubServer as any).handleSearch({ query: 'github' })
|
||||
const firstCallCount = vi.mocked(mcpService.listAllActiveServerTools).mock.calls.length
|
||||
|
||||
await (hubServer as any).handleSearch({ query: 'github' })
|
||||
const secondCallCount = vi.mocked(mcpService.listAllActiveServerTools).mock.calls.length
|
||||
|
||||
expect(secondCallCount).toBe(firstCallCount) // Should use cache
|
||||
})
|
||||
|
||||
it('refreshes tools after cache invalidation', async () => {
|
||||
await (hubServer as any).handleSearch({ query: 'github' })
|
||||
const firstCallCount = vi.mocked(mcpService.listAllActiveServerTools).mock.calls.length
|
||||
|
||||
hubServer.invalidateCache()
|
||||
|
||||
await (hubServer as any).handleSearch({ query: 'github' })
|
||||
const secondCallCount = vi.mocked(mcpService.listAllActiveServerTools).mock.calls.length
|
||||
|
||||
expect(secondCallCount).toBe(firstCallCount + 1)
|
||||
})
|
||||
})
|
||||
|
||||
describe('error handling', () => {
|
||||
it('throws error for invalid search query', async () => {
|
||||
await expect((hubServer as any).handleSearch({})).rejects.toThrow('query parameter is required')
|
||||
})
|
||||
|
||||
it('throws error for invalid exec code', async () => {
|
||||
await expect((hubServer as any).handleExec({})).rejects.toThrow('code parameter is required')
|
||||
})
|
||||
|
||||
it('handles runtime errors in exec', async () => {
|
||||
const execResult = await (hubServer as any).handleExec({
|
||||
code: 'throw new Error("test error")'
|
||||
})
|
||||
|
||||
const execOutput = JSON.parse(execResult.content[0].text)
|
||||
expect(execOutput.error).toBe('test error')
|
||||
expect(execOutput.isError).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe('exec timeouts', () => {
|
||||
afterEach(() => {
|
||||
vi.useRealTimers()
|
||||
})
|
||||
|
||||
it('aborts in-flight tool calls and returns logs on timeout', async () => {
|
||||
vi.useFakeTimers()
|
||||
|
||||
let toolCallStarted: (() => void) | null = null
|
||||
const toolCallStartedPromise = new Promise<void>((resolve) => {
|
||||
toolCallStarted = resolve
|
||||
})
|
||||
|
||||
vi.mocked(mcpService.callToolById).mockImplementationOnce(async () => {
|
||||
toolCallStarted?.()
|
||||
return await new Promise(() => {})
|
||||
})
|
||||
|
||||
const execPromise = (hubServer as any).handleExec({
|
||||
code: `
|
||||
console.log("starting");
|
||||
return await github_searchRepos({ query: "hang" });
|
||||
`
|
||||
})
|
||||
|
||||
await toolCallStartedPromise
|
||||
await vi.advanceTimersByTimeAsync(60000)
|
||||
await vi.runAllTimersAsync()
|
||||
|
||||
const execResult = await execPromise
|
||||
const execOutput = JSON.parse(execResult.content[0].text)
|
||||
|
||||
expect(execOutput.error).toBe('Execution timed out after 60000ms')
|
||||
expect(execOutput.result).toBeUndefined()
|
||||
expect(execOutput.isError).toBe(true)
|
||||
expect(execOutput.logs).toContain('[log] starting')
|
||||
expect(vi.mocked(mcpService.abortTool)).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe('server instance', () => {
|
||||
it('creates a valid MCP server instance', () => {
|
||||
expect(hubServer.server).toBeDefined()
|
||||
expect(hubServer.server.setRequestHandler).toBeDefined()
|
||||
})
|
||||
})
|
||||
})
|
||||
159
src/main/mcpServers/hub/__tests__/runtime.test.ts
Normal file
159
src/main/mcpServers/hub/__tests__/runtime.test.ts
Normal file
@ -0,0 +1,159 @@
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import { Runtime } from '../runtime'
|
||||
import type { GeneratedTool } from '../types'
|
||||
|
||||
vi.mock('../mcp-bridge', () => ({
|
||||
callMcpTool: vi.fn(async (toolId: string, params: unknown) => {
|
||||
if (toolId === 'server__failing_tool') {
|
||||
throw new Error('Tool failed')
|
||||
}
|
||||
return { toolId, params, success: true }
|
||||
})
|
||||
}))
|
||||
|
||||
const createMockTool = (partial: Partial<GeneratedTool>): GeneratedTool => ({
|
||||
serverId: 'server1',
|
||||
serverName: 'server1',
|
||||
toolName: 'tool',
|
||||
functionName: 'server1_mockTool',
|
||||
jsCode: 'async function server1_mockTool() {}',
|
||||
fn: async (params) => ({ result: params }),
|
||||
signature: '{}',
|
||||
returns: 'unknown',
|
||||
...partial
|
||||
})
|
||||
|
||||
describe('Runtime', () => {
|
||||
describe('execute', () => {
|
||||
it('executes simple code and returns result', async () => {
|
||||
const runtime = new Runtime()
|
||||
const tools: GeneratedTool[] = []
|
||||
|
||||
const result = await runtime.execute('return 1 + 1', tools)
|
||||
|
||||
expect(result.result).toBe(2)
|
||||
expect(result.error).toBeUndefined()
|
||||
})
|
||||
|
||||
it('executes async code', async () => {
|
||||
const runtime = new Runtime()
|
||||
const tools: GeneratedTool[] = []
|
||||
|
||||
const result = await runtime.execute('return await Promise.resolve(42)', tools)
|
||||
|
||||
expect(result.result).toBe(42)
|
||||
})
|
||||
|
||||
it('calls tool functions', async () => {
|
||||
const runtime = new Runtime()
|
||||
const tools = [
|
||||
createMockTool({
|
||||
functionName: 'searchRepos',
|
||||
fn: async (params) => ({ repos: ['repo1', 'repo2'], query: params })
|
||||
})
|
||||
]
|
||||
|
||||
const result = await runtime.execute('return await searchRepos({ query: "test" })', tools)
|
||||
|
||||
expect(result.result).toEqual({ toolId: 'searchRepos', params: { query: 'test' }, success: true })
|
||||
})
|
||||
|
||||
it('captures console logs', async () => {
|
||||
const runtime = new Runtime()
|
||||
const tools: GeneratedTool[] = []
|
||||
|
||||
const result = await runtime.execute(
|
||||
`
|
||||
console.log("hello");
|
||||
console.warn("warning");
|
||||
return "done"
|
||||
`,
|
||||
tools
|
||||
)
|
||||
|
||||
expect(result.result).toBe('done')
|
||||
expect(result.logs).toContain('[log] hello')
|
||||
expect(result.logs).toContain('[warn] warning')
|
||||
})
|
||||
|
||||
it('handles errors gracefully', async () => {
|
||||
const runtime = new Runtime()
|
||||
const tools: GeneratedTool[] = []
|
||||
|
||||
const result = await runtime.execute('throw new Error("test error")', tools)
|
||||
|
||||
expect(result.result).toBeUndefined()
|
||||
expect(result.error).toBe('test error')
|
||||
expect(result.isError).toBe(true)
|
||||
})
|
||||
|
||||
it('supports parallel helper', async () => {
|
||||
const runtime = new Runtime()
|
||||
const tools: GeneratedTool[] = []
|
||||
|
||||
const result = await runtime.execute(
|
||||
`
|
||||
const results = await parallel(
|
||||
Promise.resolve(1),
|
||||
Promise.resolve(2),
|
||||
Promise.resolve(3)
|
||||
);
|
||||
return results
|
||||
`,
|
||||
tools
|
||||
)
|
||||
|
||||
expect(result.result).toEqual([1, 2, 3])
|
||||
})
|
||||
|
||||
it('supports settle helper', async () => {
|
||||
const runtime = new Runtime()
|
||||
const tools: GeneratedTool[] = []
|
||||
|
||||
const result = await runtime.execute(
|
||||
`
|
||||
const results = await settle(
|
||||
Promise.resolve(1),
|
||||
Promise.reject(new Error("fail"))
|
||||
);
|
||||
return results.map(r => r.status)
|
||||
`,
|
||||
tools
|
||||
)
|
||||
|
||||
expect(result.result).toEqual(['fulfilled', 'rejected'])
|
||||
})
|
||||
|
||||
it('returns last expression when no explicit return', async () => {
|
||||
const runtime = new Runtime()
|
||||
const tools: GeneratedTool[] = []
|
||||
|
||||
const result = await runtime.execute(
|
||||
`
|
||||
const x = 10;
|
||||
const y = 20;
|
||||
return x + y
|
||||
`,
|
||||
tools
|
||||
)
|
||||
|
||||
expect(result.result).toBe(30)
|
||||
})
|
||||
|
||||
it('stops execution when a tool throws', async () => {
|
||||
const runtime = new Runtime()
|
||||
const tools = [
|
||||
createMockTool({
|
||||
functionName: 'server__failing_tool'
|
||||
})
|
||||
]
|
||||
|
||||
const result = await runtime.execute('return await server__failing_tool({})', tools)
|
||||
|
||||
expect(result.result).toBeUndefined()
|
||||
expect(result.error).toBe('Tool failed')
|
||||
expect(result.isError).toBe(true)
|
||||
})
|
||||
})
|
||||
})
|
||||
118
src/main/mcpServers/hub/__tests__/search.test.ts
Normal file
118
src/main/mcpServers/hub/__tests__/search.test.ts
Normal file
@ -0,0 +1,118 @@
|
||||
import { describe, expect, it } from 'vitest'
|
||||
|
||||
import { searchTools } from '../search'
|
||||
import type { GeneratedTool } from '../types'
|
||||
|
||||
const createMockTool = (partial: Partial<GeneratedTool>): GeneratedTool => {
|
||||
const functionName = partial.functionName || 'server1_tool'
|
||||
return {
|
||||
serverId: 'server1',
|
||||
serverName: 'server1',
|
||||
toolName: partial.toolName || 'tool',
|
||||
functionName,
|
||||
jsCode: `async function ${functionName}() {}`,
|
||||
fn: async () => ({}),
|
||||
signature: '{}',
|
||||
returns: 'unknown',
|
||||
...partial
|
||||
}
|
||||
}
|
||||
|
||||
describe('search', () => {
|
||||
describe('searchTools', () => {
|
||||
it('returns all tools when query is empty', () => {
|
||||
const tools = [
|
||||
createMockTool({ toolName: 'tool1', functionName: 'tool1' }),
|
||||
createMockTool({ toolName: 'tool2', functionName: 'tool2' })
|
||||
]
|
||||
|
||||
const result = searchTools(tools, { query: '' })
|
||||
|
||||
expect(result.total).toBe(2)
|
||||
expect(result.tools).toContain('tool1')
|
||||
expect(result.tools).toContain('tool2')
|
||||
})
|
||||
|
||||
it('filters tools by single keyword', () => {
|
||||
const tools = [
|
||||
createMockTool({ toolName: 'search_repos', functionName: 'searchRepos' }),
|
||||
createMockTool({ toolName: 'get_user', functionName: 'getUser' }),
|
||||
createMockTool({ toolName: 'search_users', functionName: 'searchUsers' })
|
||||
]
|
||||
|
||||
const result = searchTools(tools, { query: 'search' })
|
||||
|
||||
expect(result.total).toBe(2)
|
||||
expect(result.tools).toContain('searchRepos')
|
||||
expect(result.tools).toContain('searchUsers')
|
||||
expect(result.tools).not.toContain('getUser')
|
||||
})
|
||||
|
||||
it('supports OR matching with comma-separated keywords', () => {
|
||||
const tools = [
|
||||
createMockTool({ toolName: 'browser_open', functionName: 'browserOpen' }),
|
||||
createMockTool({ toolName: 'chrome_launch', functionName: 'chromeLaunch' }),
|
||||
createMockTool({ toolName: 'file_read', functionName: 'fileRead' })
|
||||
]
|
||||
|
||||
const result = searchTools(tools, { query: 'browser,chrome' })
|
||||
|
||||
expect(result.total).toBe(2)
|
||||
expect(result.tools).toContain('browserOpen')
|
||||
expect(result.tools).toContain('chromeLaunch')
|
||||
expect(result.tools).not.toContain('fileRead')
|
||||
})
|
||||
|
||||
it('matches against description', () => {
|
||||
const tools = [
|
||||
createMockTool({
|
||||
toolName: 'launch',
|
||||
functionName: 'launch',
|
||||
description: 'Launch a browser instance'
|
||||
}),
|
||||
createMockTool({
|
||||
toolName: 'close',
|
||||
functionName: 'close',
|
||||
description: 'Close a window'
|
||||
})
|
||||
]
|
||||
|
||||
const result = searchTools(tools, { query: 'browser' })
|
||||
|
||||
expect(result.total).toBe(1)
|
||||
expect(result.tools).toContain('launch')
|
||||
})
|
||||
|
||||
it('respects limit parameter', () => {
|
||||
const tools = Array.from({ length: 20 }, (_, i) =>
|
||||
createMockTool({ toolName: `tool${i}`, functionName: `server1_tool${i}` })
|
||||
)
|
||||
|
||||
const result = searchTools(tools, { query: 'tool', limit: 5 })
|
||||
|
||||
expect(result.total).toBe(20)
|
||||
const matches = (result.tools.match(/async function server1_tool\d+/g) || []).length
|
||||
expect(matches).toBe(5)
|
||||
})
|
||||
|
||||
it('is case insensitive', () => {
|
||||
const tools = [createMockTool({ toolName: 'SearchRepos', functionName: 'searchRepos' })]
|
||||
|
||||
const result = searchTools(tools, { query: 'SEARCH' })
|
||||
|
||||
expect(result.total).toBe(1)
|
||||
})
|
||||
|
||||
it('ranks exact matches higher', () => {
|
||||
const tools = [
|
||||
createMockTool({ toolName: 'searching', functionName: 'searching' }),
|
||||
createMockTool({ toolName: 'search', functionName: 'search' }),
|
||||
createMockTool({ toolName: 'search_more', functionName: 'searchMore' })
|
||||
]
|
||||
|
||||
const result = searchTools(tools, { query: 'search', limit: 1 })
|
||||
|
||||
expect(result.tools).toContain('function search(')
|
||||
})
|
||||
})
|
||||
})
|
||||
152
src/main/mcpServers/hub/generator.ts
Normal file
152
src/main/mcpServers/hub/generator.ts
Normal file
@ -0,0 +1,152 @@
|
||||
import { generateMcpToolFunctionName } from '@shared/mcp'
|
||||
import type { MCPTool } from '@types'
|
||||
|
||||
import type { GeneratedTool } from './types'
|
||||
|
||||
type PropertySchema = Record<string, unknown>
|
||||
type InputSchema = {
|
||||
type?: string
|
||||
properties?: Record<string, PropertySchema>
|
||||
required?: string[]
|
||||
}
|
||||
|
||||
function schemaTypeToTS(prop: Record<string, unknown>): string {
|
||||
const type = prop.type as string | string[] | undefined
|
||||
const enumValues = prop.enum as unknown[] | undefined
|
||||
|
||||
if (enumValues && Array.isArray(enumValues)) {
|
||||
return enumValues.map((v) => (typeof v === 'string' ? `"${v}"` : String(v))).join(' | ')
|
||||
}
|
||||
|
||||
if (Array.isArray(type)) {
|
||||
return type.map((t) => primitiveTypeToTS(t)).join(' | ')
|
||||
}
|
||||
|
||||
if (type === 'array') {
|
||||
const items = prop.items as Record<string, unknown> | undefined
|
||||
if (items) {
|
||||
return `${schemaTypeToTS(items)}[]`
|
||||
}
|
||||
return 'unknown[]'
|
||||
}
|
||||
|
||||
if (type === 'object') {
|
||||
return 'object'
|
||||
}
|
||||
|
||||
return primitiveTypeToTS(type)
|
||||
}
|
||||
|
||||
function primitiveTypeToTS(type: string | undefined): string {
|
||||
switch (type) {
|
||||
case 'string':
|
||||
return 'string'
|
||||
case 'number':
|
||||
case 'integer':
|
||||
return 'number'
|
||||
case 'boolean':
|
||||
return 'boolean'
|
||||
case 'null':
|
||||
return 'null'
|
||||
default:
|
||||
return 'unknown'
|
||||
}
|
||||
}
|
||||
|
||||
function jsonSchemaToSignature(schema: Record<string, unknown> | undefined): string {
|
||||
if (!schema || typeof schema !== 'object') {
|
||||
return '{}'
|
||||
}
|
||||
|
||||
const properties = schema.properties as Record<string, Record<string, unknown>> | undefined
|
||||
if (!properties) {
|
||||
return '{}'
|
||||
}
|
||||
|
||||
const required = (schema.required as string[]) || []
|
||||
const parts: string[] = []
|
||||
|
||||
for (const [key, prop] of Object.entries(properties)) {
|
||||
const isRequired = required.includes(key)
|
||||
const typeStr = schemaTypeToTS(prop)
|
||||
parts.push(`${key}${isRequired ? '' : '?'}: ${typeStr}`)
|
||||
}
|
||||
|
||||
return `{ ${parts.join(', ')} }`
|
||||
}
|
||||
|
||||
function generateJSDoc(tool: MCPTool, inputSchema: InputSchema | undefined, returns: string): string {
|
||||
const lines: string[] = ['/**']
|
||||
|
||||
if (tool.description) {
|
||||
const desc = tool.description.split('\n')[0]
|
||||
lines.push(` * ${desc}`)
|
||||
}
|
||||
|
||||
const properties = inputSchema?.properties || {}
|
||||
const required = inputSchema?.required || []
|
||||
|
||||
if (Object.keys(properties).length > 0) {
|
||||
lines.push(` * @param {Object} params`)
|
||||
for (const [name, prop] of Object.entries(properties)) {
|
||||
const isReq = required.includes(name)
|
||||
const type = schemaTypeToTS(prop)
|
||||
const paramName = isReq ? `params.${name}` : `[params.${name}]`
|
||||
const desc = (prop.description as string)?.split('\n')[0] || ''
|
||||
lines.push(` * @param {${type}} ${paramName} ${desc}`)
|
||||
}
|
||||
}
|
||||
|
||||
lines.push(` * @returns {Promise<${returns}>}`)
|
||||
lines.push(` */`)
|
||||
|
||||
return lines.join('\n')
|
||||
}
|
||||
|
||||
export function generateToolFunction(
|
||||
tool: MCPTool,
|
||||
existingNames: Set<string>,
|
||||
callToolFn: (functionName: string, params: unknown) => Promise<unknown>
|
||||
): GeneratedTool {
|
||||
const functionName = generateMcpToolFunctionName(tool.serverName, tool.name, existingNames)
|
||||
|
||||
const inputSchema = tool.inputSchema as InputSchema | undefined
|
||||
const outputSchema = tool.outputSchema as Record<string, unknown> | undefined
|
||||
|
||||
const signature = jsonSchemaToSignature(inputSchema)
|
||||
const returns = outputSchema ? jsonSchemaToSignature(outputSchema) : 'unknown'
|
||||
|
||||
const jsDoc = generateJSDoc(tool, inputSchema, returns)
|
||||
|
||||
const jsCode = `${jsDoc}
|
||||
async function ${functionName}(params) {
|
||||
return await __callTool("${functionName}", params);
|
||||
}`
|
||||
|
||||
const fn = async (params: unknown): Promise<unknown> => {
|
||||
return await callToolFn(functionName, params)
|
||||
}
|
||||
|
||||
return {
|
||||
serverId: tool.serverId,
|
||||
serverName: tool.serverName,
|
||||
toolName: tool.name,
|
||||
functionName,
|
||||
jsCode,
|
||||
fn,
|
||||
signature,
|
||||
returns,
|
||||
description: tool.description
|
||||
}
|
||||
}
|
||||
|
||||
export function generateToolsCode(tools: GeneratedTool[]): string {
|
||||
if (tools.length === 0) {
|
||||
return '// No tools available'
|
||||
}
|
||||
|
||||
const header = `// ${tools.length} tool(s). ALWAYS use: const r = await ToolName({...}); return r;`
|
||||
const code = tools.map((t) => t.jsCode).join('\n\n')
|
||||
|
||||
return header + '\n\n' + code
|
||||
}
|
||||
184
src/main/mcpServers/hub/index.ts
Normal file
184
src/main/mcpServers/hub/index.ts
Normal file
@ -0,0 +1,184 @@
|
||||
import { cacheService } from '@data/CacheService'
|
||||
import { loggerService } from '@logger'
|
||||
import { Server } from '@modelcontextprotocol/sdk/server/index.js'
|
||||
import { CallToolRequestSchema, ErrorCode, ListToolsRequestSchema, McpError } from '@modelcontextprotocol/sdk/types.js'
|
||||
|
||||
import { generateToolFunction } from './generator'
|
||||
import { callMcpTool, clearToolMap, listAllTools, syncToolMapFromGeneratedTools } from './mcp-bridge'
|
||||
import { Runtime } from './runtime'
|
||||
import { searchTools } from './search'
|
||||
import type { ExecInput, GeneratedTool, SearchQuery } from './types'
|
||||
|
||||
const logger = loggerService.withContext('MCPServer:Hub')
|
||||
const TOOLS_CACHE_KEY = 'hub:tools'
|
||||
const TOOLS_CACHE_TTL = 60 * 1000 // 1 minute
|
||||
|
||||
/**
|
||||
* Hub MCP Server - A meta-server that aggregates all active MCP servers.
|
||||
*
|
||||
* This server is NOT included in builtinMCPServers because:
|
||||
* 1. It aggregates tools from all other MCP servers, not a standalone tool provider
|
||||
* 2. It's designed for LLM "code mode" - enabling AI to discover and call tools programmatically
|
||||
* 3. It should be auto-enabled when code mode features are used, not manually installed by users
|
||||
*
|
||||
* The server exposes two tools:
|
||||
* - `search`: Find available tools by keywords, returns JS function signatures
|
||||
* - `exec`: Execute JavaScript code that calls discovered tools
|
||||
*/
|
||||
export class HubServer {
|
||||
public server: Server
|
||||
private runtime: Runtime
|
||||
|
||||
constructor() {
|
||||
this.runtime = new Runtime()
|
||||
|
||||
this.server = new Server(
|
||||
{
|
||||
name: 'hub-server',
|
||||
version: '1.0.0'
|
||||
},
|
||||
{
|
||||
capabilities: {
|
||||
tools: {}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
this.setupRequestHandlers()
|
||||
}
|
||||
|
||||
private setupRequestHandlers(): void {
|
||||
this.server.setRequestHandler(ListToolsRequestSchema, async () => {
|
||||
return {
|
||||
tools: [
|
||||
{
|
||||
name: 'search',
|
||||
description:
|
||||
'Search for available MCP tools by keywords. Use this FIRST to discover tools. Returns JavaScript async function declarations with JSDoc showing exact function names, parameters, and return types for use in `exec`.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
query: {
|
||||
type: 'string',
|
||||
description:
|
||||
'Comma-separated search keywords. A tool matches if ANY keyword appears in its name, description, or server name. Example: "chrome,browser,tab" matches tools related to Chrome OR browser OR tabs.'
|
||||
},
|
||||
limit: {
|
||||
type: 'number',
|
||||
description: 'Maximum number of tools to return (default: 10, max: 50)'
|
||||
}
|
||||
},
|
||||
required: ['query']
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'exec',
|
||||
description:
|
||||
'Execute JavaScript that calls MCP tools discovered via `search`. IMPORTANT: You MUST explicitly `return` the final value, or the result will be `undefined`.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
code: {
|
||||
type: 'string',
|
||||
description:
|
||||
'JavaScript code to execute. The code runs inside an async context, so use `await` directly. Do NOT wrap your code in `(async () => { ... })()` - this causes double-wrapping and returns undefined. All discovered tools are async functions (call as `await ToolName(params)`). Helpers: `parallel(...promises)`, `settle(...promises)`, `console.*`. You MUST `return` the final value. Examples: `const r = await Tool({ id: "1" }); return r` or `return await Tool({ x: 1 })`'
|
||||
}
|
||||
},
|
||||
required: ['code']
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
})
|
||||
|
||||
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
||||
const { name, arguments: args } = request.params
|
||||
|
||||
if (!args) {
|
||||
throw new McpError(ErrorCode.InvalidParams, 'No arguments provided')
|
||||
}
|
||||
|
||||
try {
|
||||
switch (name) {
|
||||
case 'search':
|
||||
return await this.handleSearch(args as unknown as SearchQuery)
|
||||
case 'exec':
|
||||
return await this.handleExec(args as unknown as ExecInput)
|
||||
default:
|
||||
throw new McpError(ErrorCode.MethodNotFound, `Unknown tool: ${name}`)
|
||||
}
|
||||
} catch (error) {
|
||||
if (error instanceof McpError) {
|
||||
throw error
|
||||
}
|
||||
logger.error(`Error executing tool ${name}:`, error as Error)
|
||||
throw new McpError(
|
||||
ErrorCode.InternalError,
|
||||
`Error executing tool ${name}: ${error instanceof Error ? error.message : String(error)}`
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
private async fetchTools(): Promise<GeneratedTool[]> {
|
||||
const cached = cacheService.get<GeneratedTool[]>(TOOLS_CACHE_KEY)
|
||||
if (cached) {
|
||||
logger.debug('Returning cached tools')
|
||||
syncToolMapFromGeneratedTools(cached)
|
||||
return cached
|
||||
}
|
||||
|
||||
logger.debug('Fetching fresh tools')
|
||||
const allTools = await listAllTools()
|
||||
const existingNames = new Set<string>()
|
||||
const tools = allTools.map((tool) => generateToolFunction(tool, existingNames, callMcpTool))
|
||||
cacheService.set(TOOLS_CACHE_KEY, tools, TOOLS_CACHE_TTL)
|
||||
syncToolMapFromGeneratedTools(tools)
|
||||
return tools
|
||||
}
|
||||
|
||||
invalidateCache(): void {
|
||||
cacheService.delete(TOOLS_CACHE_KEY)
|
||||
clearToolMap()
|
||||
logger.debug('Tools cache invalidated')
|
||||
}
|
||||
|
||||
private async handleSearch(query: SearchQuery) {
|
||||
if (!query.query || typeof query.query !== 'string') {
|
||||
throw new McpError(ErrorCode.InvalidParams, 'query parameter is required and must be a string')
|
||||
}
|
||||
|
||||
const tools = await this.fetchTools()
|
||||
const result = searchTools(tools, query)
|
||||
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: JSON.stringify(result, null, 2)
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
private async handleExec(input: ExecInput) {
|
||||
if (!input.code || typeof input.code !== 'string') {
|
||||
throw new McpError(ErrorCode.InvalidParams, 'code parameter is required and must be a string')
|
||||
}
|
||||
|
||||
const tools = await this.fetchTools()
|
||||
const result = await this.runtime.execute(input.code, tools)
|
||||
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: JSON.stringify(result, null, 2)
|
||||
}
|
||||
],
|
||||
isError: result.isError
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default HubServer
|
||||
96
src/main/mcpServers/hub/mcp-bridge.ts
Normal file
96
src/main/mcpServers/hub/mcp-bridge.ts
Normal file
@ -0,0 +1,96 @@
|
||||
/**
|
||||
* Bridge module for Hub server to access MCPService.
|
||||
* Re-exports the methods needed by tool-registry and runtime.
|
||||
*/
|
||||
import mcpService from '@main/services/MCPService'
|
||||
import { generateMcpToolFunctionName } from '@shared/mcp'
|
||||
import type { MCPCallToolResponse, MCPTool, MCPToolResultContent } from '@types'
|
||||
|
||||
import type { GeneratedTool } from './types'
|
||||
|
||||
export const listAllTools = () => mcpService.listAllActiveServerTools()
|
||||
|
||||
const toolFunctionNameToIdMap = new Map<string, { serverId: string; toolName: string }>()
|
||||
|
||||
export async function refreshToolMap(): Promise<void> {
|
||||
const tools = await listAllTools()
|
||||
syncToolMapFromTools(tools)
|
||||
}
|
||||
|
||||
export function syncToolMapFromTools(tools: MCPTool[]): void {
|
||||
toolFunctionNameToIdMap.clear()
|
||||
const existingNames = new Set<string>()
|
||||
for (const tool of tools) {
|
||||
const functionName = generateMcpToolFunctionName(tool.serverName, tool.name, existingNames)
|
||||
toolFunctionNameToIdMap.set(functionName, { serverId: tool.serverId, toolName: tool.name })
|
||||
}
|
||||
}
|
||||
|
||||
export function syncToolMapFromGeneratedTools(tools: GeneratedTool[]): void {
|
||||
toolFunctionNameToIdMap.clear()
|
||||
for (const tool of tools) {
|
||||
toolFunctionNameToIdMap.set(tool.functionName, { serverId: tool.serverId, toolName: tool.toolName })
|
||||
}
|
||||
}
|
||||
|
||||
export function clearToolMap(): void {
|
||||
toolFunctionNameToIdMap.clear()
|
||||
}
|
||||
|
||||
export const callMcpTool = async (functionName: string, params: unknown, callId?: string): Promise<unknown> => {
|
||||
const toolInfo = toolFunctionNameToIdMap.get(functionName)
|
||||
if (!toolInfo) {
|
||||
await refreshToolMap()
|
||||
const retryToolInfo = toolFunctionNameToIdMap.get(functionName)
|
||||
if (!retryToolInfo) {
|
||||
throw new Error(`Tool not found: ${functionName}`)
|
||||
}
|
||||
const toolId = `${retryToolInfo.serverId}__${retryToolInfo.toolName}`
|
||||
const result = await mcpService.callToolById(toolId, params, callId)
|
||||
throwIfToolError(result)
|
||||
return extractToolResult(result)
|
||||
}
|
||||
const toolId = `${toolInfo.serverId}__${toolInfo.toolName}`
|
||||
const result = await mcpService.callToolById(toolId, params, callId)
|
||||
throwIfToolError(result)
|
||||
return extractToolResult(result)
|
||||
}
|
||||
|
||||
export const abortMcpTool = async (callId: string): Promise<boolean> => {
|
||||
return mcpService.abortTool(null as unknown as Electron.IpcMainInvokeEvent, callId)
|
||||
}
|
||||
|
||||
function extractToolResult(result: MCPCallToolResponse): unknown {
|
||||
if (!result.content || result.content.length === 0) {
|
||||
return null
|
||||
}
|
||||
|
||||
const textContent = result.content.find((c) => c.type === 'text')
|
||||
if (textContent?.text) {
|
||||
try {
|
||||
return JSON.parse(textContent.text)
|
||||
} catch {
|
||||
return textContent.text
|
||||
}
|
||||
}
|
||||
|
||||
return result.content
|
||||
}
|
||||
|
||||
function throwIfToolError(result: MCPCallToolResponse): void {
|
||||
if (!result.isError) {
|
||||
return
|
||||
}
|
||||
|
||||
const textContent = extractTextContent(result.content)
|
||||
throw new Error(textContent ?? 'Tool execution failed')
|
||||
}
|
||||
|
||||
function extractTextContent(content: MCPToolResultContent[] | undefined): string | undefined {
|
||||
if (!content || content.length === 0) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
const textBlock = content.find((item) => item.type === 'text' && item.text)
|
||||
return textBlock?.text
|
||||
}
|
||||
170
src/main/mcpServers/hub/runtime.ts
Normal file
170
src/main/mcpServers/hub/runtime.ts
Normal file
@ -0,0 +1,170 @@
|
||||
import crypto from 'node:crypto'
|
||||
import { Worker } from 'node:worker_threads'
|
||||
|
||||
import { loggerService } from '@logger'
|
||||
|
||||
import { abortMcpTool, callMcpTool } from './mcp-bridge'
|
||||
import type {
|
||||
ExecOutput,
|
||||
GeneratedTool,
|
||||
HubWorkerCallToolMessage,
|
||||
HubWorkerExecMessage,
|
||||
HubWorkerMessage,
|
||||
HubWorkerResultMessage
|
||||
} from './types'
|
||||
import { hubWorkerSource } from './worker'
|
||||
|
||||
const logger = loggerService.withContext('MCPServer:Hub:Runtime')
|
||||
|
||||
const MAX_LOGS = 1000
|
||||
const EXECUTION_TIMEOUT = 60000
|
||||
|
||||
export class Runtime {
|
||||
async execute(code: string, tools: GeneratedTool[]): Promise<ExecOutput> {
|
||||
return await new Promise<ExecOutput>((resolve) => {
|
||||
const logs: string[] = []
|
||||
const activeCallIds = new Map<string, string>()
|
||||
let finished = false
|
||||
let timedOut = false
|
||||
let timeoutId: NodeJS.Timeout | null = null
|
||||
|
||||
const worker = new Worker(hubWorkerSource, { eval: true })
|
||||
|
||||
const addLog = (entry: string) => {
|
||||
if (logs.length >= MAX_LOGS) {
|
||||
return
|
||||
}
|
||||
logs.push(entry)
|
||||
}
|
||||
|
||||
const finalize = async (output: ExecOutput, terminateWorker = true) => {
|
||||
if (finished) {
|
||||
return
|
||||
}
|
||||
finished = true
|
||||
if (timeoutId) {
|
||||
clearTimeout(timeoutId)
|
||||
}
|
||||
worker.removeAllListeners()
|
||||
if (terminateWorker) {
|
||||
try {
|
||||
await worker.terminate()
|
||||
} catch (error) {
|
||||
logger.warn('Failed to terminate exec worker', error as Error)
|
||||
}
|
||||
}
|
||||
resolve(output)
|
||||
}
|
||||
|
||||
const abortActiveTools = async () => {
|
||||
const callIds = Array.from(activeCallIds.values())
|
||||
activeCallIds.clear()
|
||||
if (callIds.length === 0) {
|
||||
return
|
||||
}
|
||||
await Promise.allSettled(callIds.map((callId) => abortMcpTool(callId)))
|
||||
}
|
||||
|
||||
const handleToolCall = async (message: HubWorkerCallToolMessage) => {
|
||||
if (finished || timedOut) {
|
||||
return
|
||||
}
|
||||
const callId = crypto.randomUUID()
|
||||
activeCallIds.set(message.requestId, callId)
|
||||
|
||||
try {
|
||||
const result = await callMcpTool(message.functionName, message.params, callId)
|
||||
if (finished || timedOut) {
|
||||
return
|
||||
}
|
||||
worker.postMessage({ type: 'toolResult', requestId: message.requestId, result })
|
||||
} catch (error) {
|
||||
if (finished || timedOut) {
|
||||
return
|
||||
}
|
||||
const errorMessage = error instanceof Error ? error.message : String(error)
|
||||
worker.postMessage({ type: 'toolError', requestId: message.requestId, error: errorMessage })
|
||||
} finally {
|
||||
activeCallIds.delete(message.requestId)
|
||||
}
|
||||
}
|
||||
|
||||
const handleResult = (message: HubWorkerResultMessage) => {
|
||||
const resolvedLogs = message.logs && message.logs.length > 0 ? message.logs : logs
|
||||
void finalize({
|
||||
result: message.result,
|
||||
logs: resolvedLogs.length > 0 ? resolvedLogs : undefined
|
||||
})
|
||||
}
|
||||
|
||||
const handleError = (errorMessage: string, messageLogs?: string[], terminateWorker = true) => {
|
||||
const resolvedLogs = messageLogs && messageLogs.length > 0 ? messageLogs : logs
|
||||
void finalize(
|
||||
{
|
||||
result: undefined,
|
||||
logs: resolvedLogs.length > 0 ? resolvedLogs : undefined,
|
||||
error: errorMessage,
|
||||
isError: true
|
||||
},
|
||||
terminateWorker
|
||||
)
|
||||
}
|
||||
|
||||
const handleMessage = (message: HubWorkerMessage) => {
|
||||
if (!message || typeof message !== 'object') {
|
||||
return
|
||||
}
|
||||
switch (message.type) {
|
||||
case 'log':
|
||||
addLog(message.entry)
|
||||
break
|
||||
case 'callTool':
|
||||
void handleToolCall(message)
|
||||
break
|
||||
case 'result':
|
||||
handleResult(message)
|
||||
break
|
||||
case 'error':
|
||||
handleError(message.error, message.logs)
|
||||
break
|
||||
default:
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
timeoutId = setTimeout(() => {
|
||||
timedOut = true
|
||||
void (async () => {
|
||||
await abortActiveTools()
|
||||
try {
|
||||
await worker.terminate()
|
||||
} catch (error) {
|
||||
logger.warn('Failed to terminate exec worker after timeout', error as Error)
|
||||
}
|
||||
handleError(`Execution timed out after ${EXECUTION_TIMEOUT}ms`, undefined, false)
|
||||
})()
|
||||
}, EXECUTION_TIMEOUT)
|
||||
|
||||
worker.on('message', handleMessage)
|
||||
worker.on('error', (error) => {
|
||||
logger.error('Worker execution error', error)
|
||||
handleError(error instanceof Error ? error.message : String(error))
|
||||
})
|
||||
worker.on('exit', (code) => {
|
||||
if (finished || timedOut) {
|
||||
return
|
||||
}
|
||||
const message = code === 0 ? 'Exec worker exited unexpectedly' : `Exec worker exited with code ${code}`
|
||||
logger.error(message)
|
||||
handleError(message, undefined, false)
|
||||
})
|
||||
|
||||
const execMessage: HubWorkerExecMessage = {
|
||||
type: 'exec',
|
||||
code,
|
||||
tools: tools.map((tool) => ({ functionName: tool.functionName }))
|
||||
}
|
||||
worker.postMessage(execMessage)
|
||||
})
|
||||
}
|
||||
}
|
||||
109
src/main/mcpServers/hub/search.ts
Normal file
109
src/main/mcpServers/hub/search.ts
Normal file
@ -0,0 +1,109 @@
|
||||
import { generateToolsCode } from './generator'
|
||||
import type { GeneratedTool, SearchQuery, SearchResult } from './types'
|
||||
|
||||
const DEFAULT_LIMIT = 10
|
||||
const MAX_LIMIT = 50
|
||||
|
||||
export function searchTools(tools: GeneratedTool[], query: SearchQuery): SearchResult {
|
||||
const { query: queryStr, limit = DEFAULT_LIMIT } = query
|
||||
const effectiveLimit = Math.min(Math.max(1, limit), MAX_LIMIT)
|
||||
|
||||
const keywords = queryStr
|
||||
.toLowerCase()
|
||||
.split(',')
|
||||
.map((k) => k.trim())
|
||||
.filter((k) => k.length > 0)
|
||||
|
||||
if (keywords.length === 0) {
|
||||
const sliced = tools.slice(0, effectiveLimit)
|
||||
return {
|
||||
tools: generateToolsCode(sliced),
|
||||
total: tools.length
|
||||
}
|
||||
}
|
||||
|
||||
const matchedTools = tools.filter((tool) => {
|
||||
const searchText = buildSearchText(tool).toLowerCase()
|
||||
return keywords.some((keyword) => searchText.includes(keyword))
|
||||
})
|
||||
|
||||
const rankedTools = rankTools(matchedTools, keywords)
|
||||
const sliced = rankedTools.slice(0, effectiveLimit)
|
||||
|
||||
return {
|
||||
tools: generateToolsCode(sliced),
|
||||
total: matchedTools.length
|
||||
}
|
||||
}
|
||||
|
||||
function buildSearchText(tool: GeneratedTool): string {
|
||||
const combinedName = tool.serverName ? `${tool.serverName}_${tool.toolName}` : tool.toolName
|
||||
const parts = [
|
||||
tool.toolName,
|
||||
tool.functionName,
|
||||
tool.serverName,
|
||||
combinedName,
|
||||
tool.description || '',
|
||||
tool.signature
|
||||
]
|
||||
return parts.join(' ')
|
||||
}
|
||||
|
||||
function rankTools(tools: GeneratedTool[], keywords: string[]): GeneratedTool[] {
|
||||
const scored = tools.map((tool) => ({
|
||||
tool,
|
||||
score: calculateScore(tool, keywords)
|
||||
}))
|
||||
|
||||
scored.sort((a, b) => b.score - a.score)
|
||||
|
||||
return scored.map((s) => s.tool)
|
||||
}
|
||||
|
||||
function calculateScore(tool: GeneratedTool, keywords: string[]): number {
|
||||
let score = 0
|
||||
const toolName = tool.toolName.toLowerCase()
|
||||
const serverName = (tool.serverName || '').toLowerCase()
|
||||
const functionName = tool.functionName.toLowerCase()
|
||||
const description = (tool.description || '').toLowerCase()
|
||||
|
||||
for (const keyword of keywords) {
|
||||
// Match tool name
|
||||
if (toolName === keyword) {
|
||||
score += 10
|
||||
} else if (toolName.startsWith(keyword)) {
|
||||
score += 5
|
||||
} else if (toolName.includes(keyword)) {
|
||||
score += 3
|
||||
}
|
||||
|
||||
// Match server name
|
||||
if (serverName === keyword) {
|
||||
score += 8
|
||||
} else if (serverName.startsWith(keyword)) {
|
||||
score += 4
|
||||
} else if (serverName.includes(keyword)) {
|
||||
score += 2
|
||||
}
|
||||
|
||||
// Match function name (serverName_toolName format)
|
||||
if (functionName === keyword) {
|
||||
score += 10
|
||||
} else if (functionName.startsWith(keyword)) {
|
||||
score += 5
|
||||
} else if (functionName.includes(keyword)) {
|
||||
score += 3
|
||||
}
|
||||
|
||||
if (description.includes(keyword)) {
|
||||
const count = (description.match(new RegExp(escapeRegex(keyword), 'g')) || []).length
|
||||
score += Math.min(count, 3)
|
||||
}
|
||||
}
|
||||
|
||||
return score
|
||||
}
|
||||
|
||||
function escapeRegex(str: string): string {
|
||||
return str.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')
|
||||
}
|
||||
113
src/main/mcpServers/hub/types.ts
Normal file
113
src/main/mcpServers/hub/types.ts
Normal file
@ -0,0 +1,113 @@
|
||||
import type { MCPServer, MCPTool } from '@types'
|
||||
|
||||
export interface GeneratedTool {
|
||||
serverId: string
|
||||
serverName: string
|
||||
toolName: string
|
||||
functionName: string
|
||||
jsCode: string
|
||||
fn: (params: unknown) => Promise<unknown>
|
||||
signature: string
|
||||
returns: string
|
||||
description?: string
|
||||
}
|
||||
|
||||
export interface SearchQuery {
|
||||
query: string
|
||||
limit?: number
|
||||
}
|
||||
|
||||
export interface SearchResult {
|
||||
tools: string
|
||||
total: number
|
||||
}
|
||||
|
||||
export interface ExecInput {
|
||||
code: string
|
||||
}
|
||||
|
||||
export type ExecOutput = {
|
||||
result: unknown
|
||||
logs?: string[]
|
||||
error?: string
|
||||
isError?: boolean
|
||||
}
|
||||
|
||||
export interface ToolRegistryOptions {
|
||||
ttl?: number
|
||||
}
|
||||
|
||||
export interface MCPToolWithServer extends MCPTool {
|
||||
server: MCPServer
|
||||
}
|
||||
|
||||
export interface ExecutionContext {
|
||||
__callTool: (functionName: string, params: unknown) => Promise<unknown>
|
||||
parallel: <T>(...promises: Promise<T>[]) => Promise<T[]>
|
||||
settle: <T>(...promises: Promise<T>[]) => Promise<PromiseSettledResult<T>[]>
|
||||
console: ConsoleMethods
|
||||
[functionName: string]: unknown
|
||||
}
|
||||
|
||||
export interface ConsoleMethods {
|
||||
log: (...args: unknown[]) => void
|
||||
warn: (...args: unknown[]) => void
|
||||
error: (...args: unknown[]) => void
|
||||
info: (...args: unknown[]) => void
|
||||
debug: (...args: unknown[]) => void
|
||||
}
|
||||
|
||||
export type HubWorkerTool = {
|
||||
functionName: string
|
||||
}
|
||||
|
||||
export type HubWorkerExecMessage = {
|
||||
type: 'exec'
|
||||
code: string
|
||||
tools: HubWorkerTool[]
|
||||
}
|
||||
|
||||
export type HubWorkerCallToolMessage = {
|
||||
type: 'callTool'
|
||||
requestId: string
|
||||
functionName: string
|
||||
params: unknown
|
||||
}
|
||||
|
||||
export type HubWorkerToolResultMessage = {
|
||||
type: 'toolResult'
|
||||
requestId: string
|
||||
result: unknown
|
||||
}
|
||||
|
||||
export type HubWorkerToolErrorMessage = {
|
||||
type: 'toolError'
|
||||
requestId: string
|
||||
error: string
|
||||
}
|
||||
|
||||
export type HubWorkerResultMessage = {
|
||||
type: 'result'
|
||||
result: unknown
|
||||
logs?: string[]
|
||||
}
|
||||
|
||||
export type HubWorkerErrorMessage = {
|
||||
type: 'error'
|
||||
error: string
|
||||
logs?: string[]
|
||||
}
|
||||
|
||||
export type HubWorkerLogMessage = {
|
||||
type: 'log'
|
||||
entry: string
|
||||
}
|
||||
|
||||
export type HubWorkerMessage =
|
||||
| HubWorkerExecMessage
|
||||
| HubWorkerCallToolMessage
|
||||
| HubWorkerToolResultMessage
|
||||
| HubWorkerToolErrorMessage
|
||||
| HubWorkerResultMessage
|
||||
| HubWorkerErrorMessage
|
||||
| HubWorkerLogMessage
|
||||
133
src/main/mcpServers/hub/worker.ts
Normal file
133
src/main/mcpServers/hub/worker.ts
Normal file
@ -0,0 +1,133 @@
|
||||
export const hubWorkerSource = `
|
||||
const crypto = require('node:crypto')
|
||||
const { parentPort } = require('node:worker_threads')
|
||||
|
||||
const MAX_LOGS = 1000
|
||||
|
||||
const logs = []
|
||||
const pendingCalls = new Map()
|
||||
let isExecuting = false
|
||||
|
||||
const stringify = (value) => {
|
||||
if (value === undefined) return 'undefined'
|
||||
if (value === null) return 'null'
|
||||
if (typeof value === 'string') return value
|
||||
if (typeof value === 'number' || typeof value === 'boolean') return String(value)
|
||||
if (value instanceof Error) return value.message
|
||||
|
||||
try {
|
||||
return JSON.stringify(value, null, 2)
|
||||
} catch {
|
||||
return String(value)
|
||||
}
|
||||
}
|
||||
|
||||
const pushLog = (level, args) => {
|
||||
if (logs.length >= MAX_LOGS) {
|
||||
return
|
||||
}
|
||||
const message = args.map((arg) => stringify(arg)).join(' ')
|
||||
const entry = \`[\${level}] \${message}\`
|
||||
logs.push(entry)
|
||||
parentPort?.postMessage({ type: 'log', entry })
|
||||
}
|
||||
|
||||
const capturedConsole = {
|
||||
log: (...args) => pushLog('log', args),
|
||||
warn: (...args) => pushLog('warn', args),
|
||||
error: (...args) => pushLog('error', args),
|
||||
info: (...args) => pushLog('info', args),
|
||||
debug: (...args) => pushLog('debug', args)
|
||||
}
|
||||
|
||||
const callTool = (functionName, params) =>
|
||||
new Promise((resolve, reject) => {
|
||||
const requestId = crypto.randomUUID()
|
||||
pendingCalls.set(requestId, { resolve, reject })
|
||||
parentPort?.postMessage({ type: 'callTool', requestId, functionName, params })
|
||||
})
|
||||
|
||||
const buildContext = (tools) => {
|
||||
const context = {
|
||||
__callTool: callTool,
|
||||
parallel: (...promises) => Promise.all(promises),
|
||||
settle: (...promises) => Promise.allSettled(promises),
|
||||
console: capturedConsole
|
||||
}
|
||||
|
||||
for (const tool of tools) {
|
||||
context[tool.functionName] = (params) => callTool(tool.functionName, params)
|
||||
}
|
||||
|
||||
return context
|
||||
}
|
||||
|
||||
const runCode = async (code, context) => {
|
||||
const contextKeys = Object.keys(context)
|
||||
const contextValues = contextKeys.map((key) => context[key])
|
||||
|
||||
const wrappedCode = \`
|
||||
return (async () => {
|
||||
\${code}
|
||||
})()
|
||||
\`
|
||||
|
||||
const fn = new Function(...contextKeys, wrappedCode)
|
||||
return await fn(...contextValues)
|
||||
}
|
||||
|
||||
const handleExec = async (code, tools) => {
|
||||
if (isExecuting) {
|
||||
return
|
||||
}
|
||||
isExecuting = true
|
||||
|
||||
try {
|
||||
const context = buildContext(tools)
|
||||
const result = await runCode(code, context)
|
||||
parentPort?.postMessage({ type: 'result', result, logs: logs.length > 0 ? logs : undefined })
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error)
|
||||
parentPort?.postMessage({ type: 'error', error: errorMessage, logs: logs.length > 0 ? logs : undefined })
|
||||
} finally {
|
||||
pendingCalls.clear()
|
||||
}
|
||||
}
|
||||
|
||||
const handleToolResult = (message) => {
|
||||
const pending = pendingCalls.get(message.requestId)
|
||||
if (!pending) {
|
||||
return
|
||||
}
|
||||
pendingCalls.delete(message.requestId)
|
||||
pending.resolve(message.result)
|
||||
}
|
||||
|
||||
const handleToolError = (message) => {
|
||||
const pending = pendingCalls.get(message.requestId)
|
||||
if (!pending) {
|
||||
return
|
||||
}
|
||||
pendingCalls.delete(message.requestId)
|
||||
pending.reject(new Error(message.error))
|
||||
}
|
||||
|
||||
parentPort?.on('message', (message) => {
|
||||
if (!message || typeof message !== 'object') {
|
||||
return
|
||||
}
|
||||
switch (message.type) {
|
||||
case 'exec':
|
||||
handleExec(message.code, message.tools ?? [])
|
||||
break
|
||||
case 'toolResult':
|
||||
handleToolResult(message)
|
||||
break
|
||||
case 'toolError':
|
||||
handleToolError(message)
|
||||
break
|
||||
default:
|
||||
break
|
||||
}
|
||||
})
|
||||
`
|
||||
@ -10,6 +10,7 @@ import {
|
||||
scanDir
|
||||
} from '@main/utils/file'
|
||||
import { documentExts, imageExts, KB, MB } from '@shared/config/constant'
|
||||
import { parseDataUrl } from '@shared/utils'
|
||||
import type { FileMetadata, NotesTreeNode } from '@types'
|
||||
import { FileTypes } from '@types'
|
||||
import chardet from 'chardet'
|
||||
@ -672,8 +673,8 @@ class FileStorage {
|
||||
throw new Error('Base64 data is required')
|
||||
}
|
||||
|
||||
// 移除 base64 头部信息(如果存在)
|
||||
const base64String = base64Data.replace(/^data:.*;base64,/, '')
|
||||
const parseResult = parseDataUrl(base64Data)
|
||||
const base64String = parseResult?.data ?? base64Data
|
||||
const buffer = Buffer.from(base64String, 'base64')
|
||||
const uuid = uuidv4()
|
||||
const ext = '.png'
|
||||
@ -1464,8 +1465,8 @@ class FileStorage {
|
||||
})
|
||||
|
||||
if (filePath) {
|
||||
const base64Data = data.replace(/^data:image\/png;base64,/, '')
|
||||
fs.writeFileSync(filePath, base64Data, 'base64')
|
||||
const parseResult = parseDataUrl(data)
|
||||
fs.writeFileSync(filePath, parseResult?.data ?? data, 'base64')
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('[IPC - Error] An error occurred saving the image:', error as Error)
|
||||
|
||||
@ -4,9 +4,9 @@ import path from 'node:path'
|
||||
|
||||
import { cacheService } from '@data/CacheService'
|
||||
import { loggerService } from '@logger'
|
||||
import { getMCPServersFromRedux } from '@main/apiServer/utils/mcp'
|
||||
import { createInMemoryMCPServer } from '@main/mcpServers/factory'
|
||||
import { makeSureDirExists, removeEnvProxy } from '@main/utils'
|
||||
import { buildFunctionCallToolName } from '@main/utils/mcp'
|
||||
import { findCommandInShellEnv, getBinaryName, getBinaryPath, isBinaryExists } from '@main/utils/process'
|
||||
import getLoginShellEnvironment from '@main/utils/shell-env'
|
||||
import { TraceMethod, withSpanFunc } from '@mcp-trace/trace-core'
|
||||
@ -36,6 +36,7 @@ import { HOME_CHERRY_DIR } from '@shared/config/constant'
|
||||
import type { MCPProgressEvent } from '@shared/config/types'
|
||||
import type { MCPServerLogEntry } from '@shared/config/types'
|
||||
import { IpcChannel } from '@shared/IpcChannel'
|
||||
import { buildFunctionCallToolName } from '@shared/mcp'
|
||||
import { defaultAppHeaders } from '@shared/utils'
|
||||
import {
|
||||
BuiltinMCPServerNames,
|
||||
@ -165,6 +166,67 @@ class McpService {
|
||||
this.getServerLogs = this.getServerLogs.bind(this)
|
||||
}
|
||||
|
||||
/**
|
||||
* List all tools from all active MCP servers (excluding hub).
|
||||
* Used by Hub server's tool registry.
|
||||
*/
|
||||
public async listAllActiveServerTools(): Promise<MCPTool[]> {
|
||||
const servers = await getMCPServersFromRedux()
|
||||
const activeServers = servers.filter((server) => server.isActive)
|
||||
|
||||
const results = await Promise.allSettled(
|
||||
activeServers.map(async (server) => {
|
||||
const tools = await this.listToolsImpl(server)
|
||||
const disabledTools = new Set(server.disabledTools ?? [])
|
||||
return disabledTools.size > 0 ? tools.filter((tool) => !disabledTools.has(tool.name)) : tools
|
||||
})
|
||||
)
|
||||
|
||||
const allTools: MCPTool[] = []
|
||||
results.forEach((result, index) => {
|
||||
if (result.status === 'fulfilled') {
|
||||
allTools.push(...result.value)
|
||||
} else {
|
||||
logger.error(
|
||||
`[listAllActiveServerTools] Failed to list tools from ${activeServers[index].name}:`,
|
||||
result.reason as Error
|
||||
)
|
||||
}
|
||||
})
|
||||
|
||||
return allTools
|
||||
}
|
||||
|
||||
/**
|
||||
* Call a tool by its full ID (serverId__toolName format).
|
||||
* Used by Hub server's runtime.
|
||||
*/
|
||||
public async callToolById(toolId: string, params: unknown, callId?: string): Promise<MCPCallToolResponse> {
|
||||
const parts = toolId.split('__')
|
||||
if (parts.length < 2) {
|
||||
throw new Error(`Invalid tool ID format: ${toolId}`)
|
||||
}
|
||||
|
||||
const serverId = parts[0]
|
||||
const toolName = parts.slice(1).join('__')
|
||||
|
||||
const servers = await getMCPServersFromRedux()
|
||||
const server = servers.find((s) => s.id === serverId)
|
||||
|
||||
if (!server) {
|
||||
throw new Error(`Server not found: ${serverId}`)
|
||||
}
|
||||
|
||||
logger.debug(`[callToolById] Calling tool ${toolName} on server ${server.name}`)
|
||||
|
||||
return this.callTool(null as unknown as Electron.IpcMainInvokeEvent, {
|
||||
server,
|
||||
name: toolName,
|
||||
args: params,
|
||||
callId
|
||||
})
|
||||
}
|
||||
|
||||
private getServerKey(server: MCPServer): string {
|
||||
return JSON.stringify({
|
||||
baseUrl: server.baseUrl,
|
||||
|
||||
75
src/main/services/__tests__/MCPService.test.ts
Normal file
75
src/main/services/__tests__/MCPService.test.ts
Normal file
@ -0,0 +1,75 @@
|
||||
import type { MCPServer, MCPTool } from '@types'
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
vi.mock('@main/apiServer/utils/mcp', () => ({
|
||||
getMCPServersFromRedux: vi.fn()
|
||||
}))
|
||||
|
||||
vi.mock('@main/services/WindowService', () => ({
|
||||
windowService: {
|
||||
getMainWindow: vi.fn(() => null)
|
||||
}
|
||||
}))
|
||||
|
||||
import { getMCPServersFromRedux } from '@main/apiServer/utils/mcp'
|
||||
import mcpService from '@main/services/MCPService'
|
||||
|
||||
const baseInputSchema: { type: 'object'; properties: Record<string, unknown>; required: string[] } = {
|
||||
type: 'object',
|
||||
properties: {},
|
||||
required: []
|
||||
}
|
||||
|
||||
const createTool = (overrides: Partial<MCPTool>): MCPTool => ({
|
||||
id: `${overrides.serverId}__${overrides.name}`,
|
||||
name: overrides.name ?? 'tool',
|
||||
description: overrides.description,
|
||||
serverId: overrides.serverId ?? 'server',
|
||||
serverName: overrides.serverName ?? 'server',
|
||||
inputSchema: baseInputSchema,
|
||||
type: 'mcp',
|
||||
...overrides
|
||||
})
|
||||
|
||||
describe('MCPService.listAllActiveServerTools', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks()
|
||||
})
|
||||
|
||||
it('filters disabled tools per server', async () => {
|
||||
const servers: MCPServer[] = [
|
||||
{
|
||||
id: 'alpha',
|
||||
name: 'Alpha',
|
||||
isActive: true,
|
||||
disabledTools: ['disabled_tool']
|
||||
},
|
||||
{
|
||||
id: 'beta',
|
||||
name: 'Beta',
|
||||
isActive: true
|
||||
}
|
||||
]
|
||||
|
||||
vi.mocked(getMCPServersFromRedux).mockResolvedValue(servers)
|
||||
|
||||
const listToolsSpy = vi.spyOn(mcpService as any, 'listToolsImpl').mockImplementation(async (server: any) => {
|
||||
if (server.id === 'alpha') {
|
||||
return [
|
||||
createTool({ name: 'enabled_tool', serverId: server.id, serverName: server.name }),
|
||||
createTool({ name: 'disabled_tool', serverId: server.id, serverName: server.name })
|
||||
]
|
||||
}
|
||||
return [createTool({ name: 'beta_tool', serverId: server.id, serverName: server.name })]
|
||||
})
|
||||
|
||||
const tools = await mcpService.listAllActiveServerTools()
|
||||
|
||||
expect(listToolsSpy).toHaveBeenCalledTimes(2)
|
||||
expect(tools.map((tool) => tool.name)).toEqual(['enabled_tool', 'beta_tool'])
|
||||
})
|
||||
})
|
||||
@ -1,7 +1,7 @@
|
||||
import { loggerService } from '@logger'
|
||||
import { mcpApiService } from '@main/apiServer/services/mcp'
|
||||
import { type ModelValidationError, validateModelId } from '@main/apiServer/utils'
|
||||
import { buildFunctionCallToolName } from '@main/utils/mcp'
|
||||
import { buildFunctionCallToolName } from '@shared/mcp'
|
||||
import type { AgentType, MCPTool, SlashCommand, Tool } from '@types'
|
||||
import { objectKeys } from '@types'
|
||||
import fs from 'fs'
|
||||
|
||||
@ -39,22 +39,22 @@ const agent = await agentService.createAgent({
|
||||
|
||||
```bash
|
||||
# Apply schema changes
|
||||
yarn agents:generate
|
||||
pnpm agents:generate
|
||||
|
||||
# Quick development sync
|
||||
yarn agents:push
|
||||
pnpm agents:push
|
||||
|
||||
# Database tools
|
||||
yarn agents:studio # Open Drizzle Studio
|
||||
yarn agents:health # Health check
|
||||
yarn agents:drop # Reset database
|
||||
pnpm agents:studio # Open Drizzle Studio
|
||||
pnpm agents:health # Health check
|
||||
pnpm agents:drop # Reset database
|
||||
```
|
||||
|
||||
## Workflow
|
||||
|
||||
1. **Edit schema** in `/database/schema/`
|
||||
2. **Generate migration** with `yarn agents:generate`
|
||||
3. **Test changes** with `yarn agents:health`
|
||||
2. **Generate migration** with `pnpm agents:generate`
|
||||
3. **Test changes** with `pnpm agents:health`
|
||||
4. **Deploy** - migrations apply automatically
|
||||
|
||||
## Services
|
||||
@ -69,13 +69,13 @@ yarn agents:drop # Reset database
|
||||
|
||||
```bash
|
||||
# Check status
|
||||
yarn agents:health
|
||||
pnpm agents:health
|
||||
|
||||
# Apply migrations
|
||||
yarn agents:migrate
|
||||
pnpm agents:migrate
|
||||
|
||||
# Reset completely
|
||||
yarn agents:reset --yes
|
||||
pnpm agents:reset --yes
|
||||
```
|
||||
|
||||
The simplified migration system reduced complexity from 463 to ~30 lines while maintaining all functionality through Drizzle's native migration system.
|
||||
|
||||
@ -1,225 +0,0 @@
|
||||
import { describe, expect, it } from 'vitest'
|
||||
|
||||
import { buildFunctionCallToolName } from '../mcp'
|
||||
|
||||
describe('buildFunctionCallToolName', () => {
|
||||
describe('basic format', () => {
|
||||
it('should return format mcp__{server}__{tool}', () => {
|
||||
const result = buildFunctionCallToolName('github', 'search_issues')
|
||||
expect(result).toBe('mcp__github__search_issues')
|
||||
})
|
||||
|
||||
it('should handle simple server and tool names', () => {
|
||||
expect(buildFunctionCallToolName('fetch', 'get_page')).toBe('mcp__fetch__get_page')
|
||||
expect(buildFunctionCallToolName('database', 'query')).toBe('mcp__database__query')
|
||||
expect(buildFunctionCallToolName('cherry_studio', 'search')).toBe('mcp__cherry_studio__search')
|
||||
})
|
||||
})
|
||||
|
||||
describe('valid JavaScript identifier', () => {
|
||||
it('should always start with mcp__ prefix (valid JS identifier start)', () => {
|
||||
const result = buildFunctionCallToolName('123server', '456tool')
|
||||
expect(result).toMatch(/^mcp__/)
|
||||
expect(result).toBe('mcp__123server__456tool')
|
||||
})
|
||||
|
||||
it('should only contain alphanumeric chars and underscores', () => {
|
||||
const result = buildFunctionCallToolName('my-server', 'my-tool')
|
||||
expect(result).toBe('mcp__my_server__my_tool')
|
||||
expect(result).toMatch(/^[a-zA-Z][a-zA-Z0-9_]*$/)
|
||||
})
|
||||
|
||||
it('should be a valid JavaScript identifier', () => {
|
||||
const testCases = [
|
||||
['github', 'create_issue'],
|
||||
['my-server', 'fetch-data'],
|
||||
['test@server', 'tool#name'],
|
||||
['server.name', 'tool.action'],
|
||||
['123abc', 'def456']
|
||||
]
|
||||
|
||||
for (const [server, tool] of testCases) {
|
||||
const result = buildFunctionCallToolName(server, tool)
|
||||
// Valid JS identifiers match this pattern
|
||||
expect(result).toMatch(/^[a-zA-Z_][a-zA-Z0-9_]*$/)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
describe('character sanitization', () => {
|
||||
it('should replace dashes with underscores', () => {
|
||||
const result = buildFunctionCallToolName('my-server', 'my-tool-name')
|
||||
expect(result).toBe('mcp__my_server__my_tool_name')
|
||||
})
|
||||
|
||||
it('should replace special characters with underscores', () => {
|
||||
const result = buildFunctionCallToolName('test@server!', 'tool#name$')
|
||||
expect(result).toBe('mcp__test_server__tool_name')
|
||||
})
|
||||
|
||||
it('should replace dots with underscores', () => {
|
||||
const result = buildFunctionCallToolName('server.name', 'tool.action')
|
||||
expect(result).toBe('mcp__server_name__tool_action')
|
||||
})
|
||||
|
||||
it('should replace spaces with underscores', () => {
|
||||
const result = buildFunctionCallToolName('my server', 'my tool')
|
||||
expect(result).toBe('mcp__my_server__my_tool')
|
||||
})
|
||||
|
||||
it('should collapse consecutive underscores', () => {
|
||||
const result = buildFunctionCallToolName('my--server', 'my___tool')
|
||||
expect(result).toBe('mcp__my_server__my_tool')
|
||||
expect(result).not.toMatch(/_{3,}/)
|
||||
})
|
||||
|
||||
it('should trim leading and trailing underscores from parts', () => {
|
||||
const result = buildFunctionCallToolName('_server_', '_tool_')
|
||||
expect(result).toBe('mcp__server__tool')
|
||||
})
|
||||
|
||||
it('should handle names with only special characters', () => {
|
||||
const result = buildFunctionCallToolName('---', '###')
|
||||
expect(result).toBe('mcp____')
|
||||
})
|
||||
})
|
||||
|
||||
describe('length constraints', () => {
|
||||
it('should not exceed 63 characters', () => {
|
||||
const longServerName = 'a'.repeat(50)
|
||||
const longToolName = 'b'.repeat(50)
|
||||
const result = buildFunctionCallToolName(longServerName, longToolName)
|
||||
|
||||
expect(result.length).toBeLessThanOrEqual(63)
|
||||
})
|
||||
|
||||
it('should truncate server name to max 20 chars', () => {
|
||||
const longServerName = 'abcdefghijklmnopqrstuvwxyz' // 26 chars
|
||||
const result = buildFunctionCallToolName(longServerName, 'tool')
|
||||
|
||||
expect(result).toBe('mcp__abcdefghijklmnopqrst__tool')
|
||||
expect(result).toContain('abcdefghijklmnopqrst') // First 20 chars
|
||||
expect(result).not.toContain('uvwxyz') // Truncated
|
||||
})
|
||||
|
||||
it('should truncate tool name to max 35 chars', () => {
|
||||
const longToolName = 'a'.repeat(40)
|
||||
const result = buildFunctionCallToolName('server', longToolName)
|
||||
|
||||
const expectedTool = 'a'.repeat(35)
|
||||
expect(result).toBe(`mcp__server__${expectedTool}`)
|
||||
})
|
||||
|
||||
it('should not end with underscores after truncation', () => {
|
||||
// Create a name that would end with underscores after truncation
|
||||
const longServerName = 'a'.repeat(20)
|
||||
const longToolName = 'b'.repeat(35) + '___extra'
|
||||
const result = buildFunctionCallToolName(longServerName, longToolName)
|
||||
|
||||
expect(result).not.toMatch(/_+$/)
|
||||
expect(result.length).toBeLessThanOrEqual(63)
|
||||
})
|
||||
|
||||
it('should handle max length edge case exactly', () => {
|
||||
// mcp__ (5) + server (20) + __ (2) + tool (35) = 62 chars
|
||||
const server = 'a'.repeat(20)
|
||||
const tool = 'b'.repeat(35)
|
||||
const result = buildFunctionCallToolName(server, tool)
|
||||
|
||||
expect(result.length).toBe(62)
|
||||
expect(result).toBe(`mcp__${'a'.repeat(20)}__${'b'.repeat(35)}`)
|
||||
})
|
||||
})
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle empty server name', () => {
|
||||
const result = buildFunctionCallToolName('', 'tool')
|
||||
expect(result).toBe('mcp____tool')
|
||||
})
|
||||
|
||||
it('should handle empty tool name', () => {
|
||||
const result = buildFunctionCallToolName('server', '')
|
||||
expect(result).toBe('mcp__server__')
|
||||
})
|
||||
|
||||
it('should handle both empty names', () => {
|
||||
const result = buildFunctionCallToolName('', '')
|
||||
expect(result).toBe('mcp____')
|
||||
})
|
||||
|
||||
it('should handle whitespace-only names', () => {
|
||||
const result = buildFunctionCallToolName(' ', ' ')
|
||||
expect(result).toBe('mcp____')
|
||||
})
|
||||
|
||||
it('should trim whitespace from names', () => {
|
||||
const result = buildFunctionCallToolName(' server ', ' tool ')
|
||||
expect(result).toBe('mcp__server__tool')
|
||||
})
|
||||
|
||||
it('should handle unicode characters', () => {
|
||||
const result = buildFunctionCallToolName('服务器', '工具')
|
||||
// Unicode chars are replaced with underscores, then collapsed
|
||||
expect(result).toMatch(/^mcp__/)
|
||||
})
|
||||
|
||||
it('should handle mixed case', () => {
|
||||
const result = buildFunctionCallToolName('MyServer', 'MyTool')
|
||||
expect(result).toBe('mcp__MyServer__MyTool')
|
||||
})
|
||||
})
|
||||
|
||||
describe('deterministic output', () => {
|
||||
it('should produce consistent results for same input', () => {
|
||||
const serverName = 'github'
|
||||
const toolName = 'search_repos'
|
||||
|
||||
const result1 = buildFunctionCallToolName(serverName, toolName)
|
||||
const result2 = buildFunctionCallToolName(serverName, toolName)
|
||||
const result3 = buildFunctionCallToolName(serverName, toolName)
|
||||
|
||||
expect(result1).toBe(result2)
|
||||
expect(result2).toBe(result3)
|
||||
})
|
||||
|
||||
it('should produce different results for different inputs', () => {
|
||||
const result1 = buildFunctionCallToolName('server1', 'tool')
|
||||
const result2 = buildFunctionCallToolName('server2', 'tool')
|
||||
const result3 = buildFunctionCallToolName('server', 'tool1')
|
||||
const result4 = buildFunctionCallToolName('server', 'tool2')
|
||||
|
||||
expect(result1).not.toBe(result2)
|
||||
expect(result3).not.toBe(result4)
|
||||
})
|
||||
})
|
||||
|
||||
describe('real-world scenarios', () => {
|
||||
it('should handle GitHub MCP server', () => {
|
||||
expect(buildFunctionCallToolName('github', 'create_issue')).toBe('mcp__github__create_issue')
|
||||
expect(buildFunctionCallToolName('github', 'search_repositories')).toBe('mcp__github__search_repositories')
|
||||
expect(buildFunctionCallToolName('github', 'get_pull_request')).toBe('mcp__github__get_pull_request')
|
||||
})
|
||||
|
||||
it('should handle filesystem MCP server', () => {
|
||||
expect(buildFunctionCallToolName('filesystem', 'read_file')).toBe('mcp__filesystem__read_file')
|
||||
expect(buildFunctionCallToolName('filesystem', 'write_file')).toBe('mcp__filesystem__write_file')
|
||||
expect(buildFunctionCallToolName('filesystem', 'list_directory')).toBe('mcp__filesystem__list_directory')
|
||||
})
|
||||
|
||||
it('should handle hyphenated server names (common in npm packages)', () => {
|
||||
expect(buildFunctionCallToolName('cherry-fetch', 'get_page')).toBe('mcp__cherry_fetch__get_page')
|
||||
expect(buildFunctionCallToolName('mcp-server-github', 'search')).toBe('mcp__mcp_server_github__search')
|
||||
})
|
||||
|
||||
it('should handle scoped npm package style names', () => {
|
||||
const result = buildFunctionCallToolName('@anthropic/mcp-server', 'chat')
|
||||
expect(result).toBe('mcp__anthropic_mcp_server__chat')
|
||||
})
|
||||
|
||||
it('should handle tools with long descriptive names', () => {
|
||||
const result = buildFunctionCallToolName('github', 'search_repositories_by_language_and_stars')
|
||||
expect(result.length).toBeLessThanOrEqual(63)
|
||||
expect(result).toMatch(/^mcp__github__search_repositories_by_lan/)
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -13,18 +13,13 @@ export async function getIpCountry(): Promise<string> {
|
||||
const controller = new AbortController()
|
||||
const timeoutId = setTimeout(() => controller.abort(), 5000)
|
||||
|
||||
const ipinfo = await net.fetch('https://ipinfo.io/json', {
|
||||
signal: controller.signal,
|
||||
headers: {
|
||||
'User-Agent':
|
||||
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
|
||||
'Accept-Language': 'en-US,en;q=0.9'
|
||||
}
|
||||
const ipinfo = await net.fetch(`https://api.ipinfo.io/lite/me?token=2a42580355dae4`, {
|
||||
signal: controller.signal
|
||||
})
|
||||
|
||||
clearTimeout(timeoutId)
|
||||
const data = await ipinfo.json()
|
||||
const country = data.country || 'CN'
|
||||
const country = data.country_code || 'CN'
|
||||
logger.info(`Detected user IP address country: ${country}`)
|
||||
return country
|
||||
} catch (error) {
|
||||
|
||||
@ -1,29 +0,0 @@
|
||||
/**
|
||||
* Builds a valid JavaScript function name for MCP tool calls.
|
||||
* Format: mcp__{server_name}__{tool_name}
|
||||
*
|
||||
* @param serverName - The MCP server name
|
||||
* @param toolName - The tool name from the server
|
||||
* @returns A valid JS identifier in format mcp__{server}__{tool}, max 63 chars
|
||||
*/
|
||||
export function buildFunctionCallToolName(serverName: string, toolName: string): string {
|
||||
// Sanitize to valid JS identifier chars (alphanumeric + underscore only)
|
||||
const sanitize = (str: string): string =>
|
||||
str
|
||||
.trim()
|
||||
.replace(/[^a-zA-Z0-9]/g, '_') // Replace all non-alphanumeric with underscore
|
||||
.replace(/_{2,}/g, '_') // Collapse multiple underscores
|
||||
.replace(/^_+|_+$/g, '') // Trim leading/trailing underscores
|
||||
|
||||
const server = sanitize(serverName).slice(0, 20) // Keep server name short
|
||||
const tool = sanitize(toolName).slice(0, 35) // More room for tool name
|
||||
|
||||
let name = `mcp__${server}__${tool}`
|
||||
|
||||
// Ensure max 63 chars and clean trailing underscores
|
||||
if (name.length > 63) {
|
||||
name = name.slice(0, 63).replace(/_+$/, '')
|
||||
}
|
||||
|
||||
return name
|
||||
}
|
||||
@ -8,13 +8,13 @@ import { QueryClient, QueryClientProvider } from '@tanstack/react-query'
|
||||
import { Provider } from 'react-redux'
|
||||
import { PersistGate } from 'redux-persist/integration/react'
|
||||
|
||||
import { AppShell } from './components/layout/AppShell'
|
||||
import TopViewContainer from './components/TopView'
|
||||
import AntdProvider from './context/AntdProvider'
|
||||
import { CodeStyleProvider } from './context/CodeStyleProvider'
|
||||
import { NotificationProvider } from './context/NotificationProvider'
|
||||
import StyleSheetManager from './context/StyleSheetManager'
|
||||
import { ThemeProvider } from './context/ThemeProvider'
|
||||
import Router from './Router'
|
||||
|
||||
const logger = loggerService.withContext('App.tsx')
|
||||
|
||||
@ -43,8 +43,8 @@ function App(): React.ReactElement {
|
||||
<CodeStyleProvider>
|
||||
<PersistGate loading={null} persistor={persistor}>
|
||||
<TopViewContainer>
|
||||
<Router />
|
||||
<Toaster />
|
||||
<AppShell />
|
||||
</TopViewContainer>
|
||||
</PersistGate>
|
||||
</CodeStyleProvider>
|
||||
|
||||
@ -1,67 +0,0 @@
|
||||
import '@renderer/databases'
|
||||
|
||||
import type { FC } from 'react'
|
||||
import { useMemo } from 'react'
|
||||
import { HashRouter, Route, Routes } from 'react-router-dom'
|
||||
|
||||
import Sidebar from './components/app/Sidebar'
|
||||
import { ErrorBoundary } from './components/ErrorBoundary'
|
||||
import TabsContainer from './components/Tab/TabContainer'
|
||||
import NavigationHandler from './handler/NavigationHandler'
|
||||
import { useNavbarPosition } from './hooks/useNavbar'
|
||||
import CodeToolsPage from './pages/code/CodeToolsPage'
|
||||
import FilesPage from './pages/files/FilesPage'
|
||||
import HomePage from './pages/home/HomePage'
|
||||
import KnowledgePage from './pages/knowledge/KnowledgePage'
|
||||
import LaunchpadPage from './pages/launchpad/LaunchpadPage'
|
||||
import MinAppPage from './pages/minapps/MinAppPage'
|
||||
import MinAppsPage from './pages/minapps/MinAppsPage'
|
||||
import NotesPage from './pages/notes/NotesPage'
|
||||
import PaintingsRoutePage from './pages/paintings/PaintingsRoutePage'
|
||||
import SettingsPage from './pages/settings/SettingsPage'
|
||||
import AssistantPresetsPage from './pages/store/assistants/presets/AssistantPresetsPage'
|
||||
import TranslatePage from './pages/translate/TranslatePage'
|
||||
|
||||
const Router: FC = () => {
|
||||
const { navbarPosition } = useNavbarPosition()
|
||||
|
||||
const routes = useMemo(() => {
|
||||
return (
|
||||
<ErrorBoundary>
|
||||
<Routes>
|
||||
<Route path="/" element={<HomePage />} />
|
||||
<Route path="/store" element={<AssistantPresetsPage />} />
|
||||
<Route path="/paintings/*" element={<PaintingsRoutePage />} />
|
||||
<Route path="/translate" element={<TranslatePage />} />
|
||||
<Route path="/files" element={<FilesPage />} />
|
||||
<Route path="/notes" element={<NotesPage />} />
|
||||
<Route path="/knowledge" element={<KnowledgePage />} />
|
||||
<Route path="/apps/:appId" element={<MinAppPage />} />
|
||||
<Route path="/apps" element={<MinAppsPage />} />
|
||||
<Route path="/code" element={<CodeToolsPage />} />
|
||||
<Route path="/settings/*" element={<SettingsPage />} />
|
||||
<Route path="/launchpad" element={<LaunchpadPage />} />
|
||||
</Routes>
|
||||
</ErrorBoundary>
|
||||
)
|
||||
}, [])
|
||||
|
||||
if (navbarPosition === 'left') {
|
||||
return (
|
||||
<HashRouter>
|
||||
<Sidebar />
|
||||
{routes}
|
||||
<NavigationHandler />
|
||||
</HashRouter>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<HashRouter>
|
||||
<NavigationHandler />
|
||||
<TabsContainer>{routes}</TabsContainer>
|
||||
</HashRouter>
|
||||
)
|
||||
}
|
||||
|
||||
export default Router
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user