mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2025-12-19 14:41:24 +08:00
Compare commits
119 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3045f924ce | ||
|
|
a6ba5d34e0 | ||
|
|
8ab375161d | ||
|
|
42260710d8 | ||
|
|
5e8646c6a5 | ||
|
|
7e93e8b9b2 | ||
|
|
eb7a2cc85a | ||
|
|
fd6986076a | ||
|
|
6309cc179d | ||
|
|
c04529a23c | ||
|
|
0f1b3afa72 | ||
|
|
0cf0072b51 | ||
|
|
150bb3e3a0 | ||
|
|
739096deca | ||
|
|
1d5dafa325 | ||
|
|
bdfda7afb1 | ||
|
|
ef25eef0eb | ||
|
|
c676a93595 | ||
|
|
e85009fcd6 | ||
|
|
99d7223a0a | ||
|
|
bdd272b7cd | ||
|
|
782f8496e0 | ||
|
|
bfeef7ef91 | ||
|
|
784fdd4fed | ||
|
|
432b31c7b1 | ||
|
|
f2b4a2382b | ||
|
|
b66787280a | ||
|
|
d41229c69b | ||
|
|
aeebd343d7 | ||
|
|
71df9d61fd | ||
|
|
4d3d5ae4ce | ||
|
|
a1f0addafb | ||
|
|
e78f25ff91 | ||
|
|
68f70e3b16 | ||
|
|
fd921103dd | ||
|
|
a1e44a6827 | ||
|
|
ee7eee24da | ||
|
|
f0ec2354dc | ||
|
|
5bd550bfb4 | ||
|
|
dc0c47c64d | ||
|
|
66feee714b | ||
|
|
96aba33077 | ||
|
|
97f6275104 | ||
|
|
b906849c17 | ||
|
|
f742ebed1f | ||
|
|
d7b9a6e09a | ||
|
|
be9a8b8699 | ||
|
|
512d872ac3 | ||
|
|
95f5853d7d | ||
|
|
c1bf6cfbb7 | ||
|
|
595a0f194a | ||
|
|
a91c69982c | ||
|
|
6b25fbb901 | ||
|
|
c52a2dbc48 | ||
|
|
367c4fe6b6 | ||
|
|
5f3af646f4 | ||
|
|
ed695a8620 | ||
|
|
8cd4b1b747 | ||
|
|
9ac7e2c78d | ||
|
|
c4fd48376d | ||
|
|
600a045ff7 | ||
|
|
880673c4eb | ||
|
|
03db02d5f7 | ||
|
|
fda2287475 | ||
|
|
76524d68c6 | ||
|
|
96085707ce | ||
|
|
711f805a5b | ||
|
|
6df60a69c3 | ||
|
|
058a2c763b | ||
|
|
7507443d8b | ||
|
|
8ede7b197f | ||
|
|
086190228a | ||
|
|
adbadf5da6 | ||
|
|
73fc74d875 | ||
|
|
bc00c11a00 | ||
|
|
f8c33db450 | ||
|
|
61c171dafc | ||
|
|
e1e6702425 | ||
|
|
e6003463ac | ||
|
|
0cc4c96bc0 | ||
|
|
d35434b6d6 | ||
|
|
ef5b97813c | ||
|
|
4c4f832bc7 | ||
|
|
9f7e47304d | ||
|
|
1a737f5137 | ||
|
|
82ec18c0fb | ||
|
|
0cabdefb9a | ||
|
|
224ab6a69b | ||
|
|
bba7ecae6e | ||
|
|
516b8479d6 | ||
|
|
b58a2fce03 | ||
|
|
ebfc60b039 | ||
|
|
8f39ecf762 | ||
|
|
8d1d09b1ec | ||
|
|
3cedb95db3 | ||
|
|
9d6d827f88 | ||
|
|
968210faa7 | ||
|
|
ea36b918f1 | ||
|
|
92bb05950d | ||
|
|
a566cd65f4 | ||
|
|
cd699825ed | ||
|
|
86a16f5762 | ||
|
|
6343628739 | ||
|
|
a2a6c62f48 | ||
|
|
981bb9f451 | ||
|
|
9637fb8a43 | ||
|
|
fb20173194 | ||
|
|
387e8f77f5 | ||
|
|
4f701d3e45 | ||
|
|
aeabc28451 | ||
|
|
f571dd7af0 | ||
|
|
33457686ac | ||
|
|
6696bcacb8 | ||
|
|
a1e95b55f8 | ||
|
|
600199dfcf | ||
|
|
77fd90ef7d | ||
|
|
fb45d94efb | ||
|
|
3aedf6f138 | ||
|
|
3e6dc56196 |
4
.github/workflows/auto-i18n.yml
vendored
4
.github/workflows/auto-i18n.yml
vendored
@ -23,7 +23,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: 🐈⬛ Checkout
|
- name: 🐈⬛ Checkout
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@ -54,7 +54,7 @@ jobs:
|
|||||||
yarn install
|
yarn install
|
||||||
|
|
||||||
- name: 🏃♀️ Translate
|
- name: 🏃♀️ Translate
|
||||||
run: yarn sync:i18n && yarn auto:i18n
|
run: yarn i18n:sync && yarn i18n:translate
|
||||||
|
|
||||||
- name: 🔍 Format
|
- name: 🔍 Format
|
||||||
run: yarn format
|
run: yarn format
|
||||||
|
|||||||
2
.github/workflows/claude-code-review.yml
vendored
2
.github/workflows/claude-code-review.yml
vendored
@ -27,7 +27,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 1
|
fetch-depth: 1
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/claude-translator.yml
vendored
2
.github/workflows/claude-translator.yml
vendored
@ -32,7 +32,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 1
|
fetch-depth: 1
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/claude.yml
vendored
2
.github/workflows/claude.yml
vendored
@ -37,7 +37,7 @@ jobs:
|
|||||||
actions: read # Required for Claude to read CI results on PRs
|
actions: read # Required for Claude to read CI results on PRs
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 1
|
fetch-depth: 1
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/dispatch-docs-update.yml
vendored
2
.github/workflows/dispatch-docs-update.yml
vendored
@ -19,7 +19,7 @@ jobs:
|
|||||||
echo "tag=${{ github.event.release.tag_name }}" >> $GITHUB_OUTPUT
|
echo "tag=${{ github.event.release.tag_name }}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Dispatch update-download-version workflow to cherry-studio-docs
|
- name: Dispatch update-download-version workflow to cherry-studio-docs
|
||||||
uses: peter-evans/repository-dispatch@v3
|
uses: peter-evans/repository-dispatch@v4
|
||||||
with:
|
with:
|
||||||
token: ${{ secrets.REPO_DISPATCH_TOKEN }}
|
token: ${{ secrets.REPO_DISPATCH_TOKEN }}
|
||||||
repository: CherryHQ/cherry-studio-docs
|
repository: CherryHQ/cherry-studio-docs
|
||||||
|
|||||||
6
.github/workflows/github-issue-tracker.yml
vendored
6
.github/workflows/github-issue-tracker.yml
vendored
@ -19,7 +19,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
- name: Check Beijing Time
|
- name: Check Beijing Time
|
||||||
id: check_time
|
id: check_time
|
||||||
@ -42,7 +42,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Add pending label if in quiet hours
|
- name: Add pending label if in quiet hours
|
||||||
if: steps.check_time.outputs.should_delay == 'true'
|
if: steps.check_time.outputs.should_delay == 'true'
|
||||||
uses: actions/github-script@v7
|
uses: actions/github-script@v8
|
||||||
with:
|
with:
|
||||||
script: |
|
script: |
|
||||||
github.rest.issues.addLabels({
|
github.rest.issues.addLabels({
|
||||||
@ -118,7 +118,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
- name: Setup Node.js
|
- name: Setup Node.js
|
||||||
uses: actions/setup-node@v6
|
uses: actions/setup-node@v6
|
||||||
|
|||||||
2
.github/workflows/nightly-build.yml
vendored
2
.github/workflows/nightly-build.yml
vendored
@ -51,7 +51,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Check out Git repository
|
- name: Check out Git repository
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
ref: main
|
ref: main
|
||||||
|
|
||||||
|
|||||||
4
.github/workflows/pr-ci.yml
vendored
4
.github/workflows/pr-ci.yml
vendored
@ -21,7 +21,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Check out Git repository
|
- name: Check out Git repository
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
- name: Install Node.js
|
- name: Install Node.js
|
||||||
uses: actions/setup-node@v6
|
uses: actions/setup-node@v6
|
||||||
@ -58,7 +58,7 @@ jobs:
|
|||||||
run: yarn typecheck
|
run: yarn typecheck
|
||||||
|
|
||||||
- name: i18n Check
|
- name: i18n Check
|
||||||
run: yarn check:i18n
|
run: yarn i18n:check
|
||||||
|
|
||||||
- name: Test
|
- name: Test
|
||||||
run: yarn test
|
run: yarn test
|
||||||
|
|||||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@ -25,7 +25,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Check out Git repository
|
- name: Check out Git repository
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
|
|||||||
305
.github/workflows/sync-to-gitcode.yml
vendored
Normal file
305
.github/workflows/sync-to-gitcode.yml
vendored
Normal file
@ -0,0 +1,305 @@
|
|||||||
|
name: Sync Release to GitCode
|
||||||
|
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types: [published]
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
tag:
|
||||||
|
description: 'Release tag (e.g. v1.0.0)'
|
||||||
|
required: true
|
||||||
|
clean:
|
||||||
|
description: 'Clean node_modules before build'
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-and-sync-to-gitcode:
|
||||||
|
runs-on: [self-hosted, windows-signing]
|
||||||
|
steps:
|
||||||
|
- name: Get tag name
|
||||||
|
id: get-tag
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
|
||||||
|
echo "tag=${{ github.event.inputs.tag }}" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "tag=${{ github.event.release.tag_name }}" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Check out Git repository
|
||||||
|
uses: actions/checkout@v6
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
ref: ${{ steps.get-tag.outputs.tag }}
|
||||||
|
|
||||||
|
- name: Set package.json version
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
TAG="${{ steps.get-tag.outputs.tag }}"
|
||||||
|
VERSION="${TAG#v}"
|
||||||
|
npm version "$VERSION" --no-git-tag-version --allow-same-version
|
||||||
|
|
||||||
|
- name: Install Node.js
|
||||||
|
uses: actions/setup-node@v6
|
||||||
|
with:
|
||||||
|
node-version: 22
|
||||||
|
|
||||||
|
- name: Install corepack
|
||||||
|
shell: bash
|
||||||
|
run: corepack enable && corepack prepare yarn@4.9.1 --activate
|
||||||
|
|
||||||
|
- name: Clean node_modules
|
||||||
|
if: ${{ github.event.inputs.clean == 'true' }}
|
||||||
|
shell: bash
|
||||||
|
run: rm -rf node_modules
|
||||||
|
|
||||||
|
- name: Install Dependencies
|
||||||
|
shell: bash
|
||||||
|
run: yarn install
|
||||||
|
|
||||||
|
- name: Build Windows with code signing
|
||||||
|
shell: bash
|
||||||
|
run: yarn build:win
|
||||||
|
env:
|
||||||
|
WIN_SIGN: true
|
||||||
|
CHERRY_CERT_PATH: ${{ secrets.CHERRY_CERT_PATH }}
|
||||||
|
CHERRY_CERT_KEY: ${{ secrets.CHERRY_CERT_KEY }}
|
||||||
|
CHERRY_CERT_CSP: ${{ secrets.CHERRY_CERT_CSP }}
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
NODE_OPTIONS: --max-old-space-size=8192
|
||||||
|
MAIN_VITE_CHERRYAI_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYAI_CLIENT_SECRET }}
|
||||||
|
MAIN_VITE_MINERU_API_KEY: ${{ secrets.MAIN_VITE_MINERU_API_KEY }}
|
||||||
|
RENDERER_VITE_AIHUBMIX_SECRET: ${{ secrets.RENDERER_VITE_AIHUBMIX_SECRET }}
|
||||||
|
RENDERER_VITE_PPIO_APP_SECRET: ${{ secrets.RENDERER_VITE_PPIO_APP_SECRET }}
|
||||||
|
|
||||||
|
- name: List built Windows artifacts
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "Built Windows artifacts:"
|
||||||
|
ls -la dist/*.exe dist/*.blockmap dist/latest*.yml
|
||||||
|
|
||||||
|
- name: Download GitHub release assets
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
TAG_NAME: ${{ steps.get-tag.outputs.tag }}
|
||||||
|
run: |
|
||||||
|
echo "Downloading release assets for $TAG_NAME..."
|
||||||
|
mkdir -p release-assets
|
||||||
|
cd release-assets
|
||||||
|
|
||||||
|
# Download all assets from the release
|
||||||
|
gh release download "$TAG_NAME" \
|
||||||
|
--repo "${{ github.repository }}" \
|
||||||
|
--pattern "*" \
|
||||||
|
--skip-existing
|
||||||
|
|
||||||
|
echo "Downloaded GitHub release assets:"
|
||||||
|
ls -la
|
||||||
|
|
||||||
|
- name: Replace Windows files with signed versions
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "Replacing Windows files with signed versions..."
|
||||||
|
|
||||||
|
# Verify signed files exist first
|
||||||
|
if ! ls dist/*.exe 1>/dev/null 2>&1; then
|
||||||
|
echo "ERROR: No signed .exe files found in dist/"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Remove unsigned Windows files from downloaded assets
|
||||||
|
# *.exe, *.exe.blockmap, latest.yml (Windows only)
|
||||||
|
rm -f release-assets/*.exe release-assets/*.exe.blockmap release-assets/latest.yml 2>/dev/null || true
|
||||||
|
|
||||||
|
# Copy signed Windows files with error checking
|
||||||
|
cp dist/*.exe release-assets/ || { echo "ERROR: Failed to copy .exe files"; exit 1; }
|
||||||
|
cp dist/*.exe.blockmap release-assets/ || { echo "ERROR: Failed to copy .blockmap files"; exit 1; }
|
||||||
|
cp dist/latest.yml release-assets/ || { echo "ERROR: Failed to copy latest.yml"; exit 1; }
|
||||||
|
|
||||||
|
echo "Final release assets:"
|
||||||
|
ls -la release-assets/
|
||||||
|
|
||||||
|
- name: Get release info
|
||||||
|
id: release-info
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
TAG_NAME: ${{ steps.get-tag.outputs.tag }}
|
||||||
|
LANG: C.UTF-8
|
||||||
|
LC_ALL: C.UTF-8
|
||||||
|
run: |
|
||||||
|
# Always use gh cli to avoid special character issues
|
||||||
|
RELEASE_NAME=$(gh release view "$TAG_NAME" --repo "${{ github.repository }}" --json name -q '.name')
|
||||||
|
# Use delimiter to safely handle special characters in release name
|
||||||
|
{
|
||||||
|
echo 'name<<EOF'
|
||||||
|
echo "$RELEASE_NAME"
|
||||||
|
echo 'EOF'
|
||||||
|
} >> $GITHUB_OUTPUT
|
||||||
|
# Extract releaseNotes from electron-builder.yml (from releaseNotes: | to end of file, remove 4-space indent)
|
||||||
|
sed -n '/releaseNotes: |/,$ { /releaseNotes: |/d; s/^ //; p }' electron-builder.yml > release_body.txt
|
||||||
|
|
||||||
|
- name: Create GitCode release and upload files
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
GITCODE_TOKEN: ${{ secrets.GITCODE_TOKEN }}
|
||||||
|
GITCODE_OWNER: ${{ vars.GITCODE_OWNER }}
|
||||||
|
GITCODE_REPO: ${{ vars.GITCODE_REPO }}
|
||||||
|
GITCODE_API_URL: ${{ vars.GITCODE_API_URL }}
|
||||||
|
TAG_NAME: ${{ steps.get-tag.outputs.tag }}
|
||||||
|
RELEASE_NAME: ${{ steps.release-info.outputs.name }}
|
||||||
|
LANG: C.UTF-8
|
||||||
|
LC_ALL: C.UTF-8
|
||||||
|
run: |
|
||||||
|
# Validate required environment variables
|
||||||
|
if [ -z "$GITCODE_TOKEN" ]; then
|
||||||
|
echo "ERROR: GITCODE_TOKEN is not set"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "$GITCODE_OWNER" ]; then
|
||||||
|
echo "ERROR: GITCODE_OWNER is not set"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "$GITCODE_REPO" ]; then
|
||||||
|
echo "ERROR: GITCODE_REPO is not set"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
API_URL="${GITCODE_API_URL:-https://api.gitcode.com/api/v5}"
|
||||||
|
|
||||||
|
echo "Creating GitCode release..."
|
||||||
|
echo "Tag: $TAG_NAME"
|
||||||
|
echo "Repo: $GITCODE_OWNER/$GITCODE_REPO"
|
||||||
|
|
||||||
|
# Step 1: Create release
|
||||||
|
# Use --rawfile to read body directly from file, avoiding shell variable encoding issues
|
||||||
|
jq -n \
|
||||||
|
--arg tag "$TAG_NAME" \
|
||||||
|
--arg name "$RELEASE_NAME" \
|
||||||
|
--rawfile body release_body.txt \
|
||||||
|
'{
|
||||||
|
tag_name: $tag,
|
||||||
|
name: $name,
|
||||||
|
body: $body,
|
||||||
|
target_commitish: "main"
|
||||||
|
}' > /tmp/release_payload.json
|
||||||
|
|
||||||
|
RELEASE_RESPONSE=$(curl -s -w "\n%{http_code}" -X POST \
|
||||||
|
--connect-timeout 30 --max-time 60 \
|
||||||
|
"${API_URL}/repos/${GITCODE_OWNER}/${GITCODE_REPO}/releases" \
|
||||||
|
-H "Content-Type: application/json; charset=utf-8" \
|
||||||
|
-H "Authorization: Bearer ${GITCODE_TOKEN}" \
|
||||||
|
--data-binary "@/tmp/release_payload.json")
|
||||||
|
|
||||||
|
HTTP_CODE=$(echo "$RELEASE_RESPONSE" | tail -n1)
|
||||||
|
RESPONSE_BODY=$(echo "$RELEASE_RESPONSE" | sed '$d')
|
||||||
|
|
||||||
|
if [ "$HTTP_CODE" -ge 200 ] && [ "$HTTP_CODE" -lt 300 ]; then
|
||||||
|
echo "Release created successfully"
|
||||||
|
else
|
||||||
|
echo "Warning: Release creation returned HTTP $HTTP_CODE"
|
||||||
|
echo "$RESPONSE_BODY"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Step 2: Upload files to release
|
||||||
|
echo "Uploading files to GitCode release..."
|
||||||
|
|
||||||
|
# Function to upload a single file with retry
|
||||||
|
upload_file() {
|
||||||
|
local file="$1"
|
||||||
|
local filename=$(basename "$file")
|
||||||
|
local max_retries=3
|
||||||
|
local retry=0
|
||||||
|
local curl_status=0
|
||||||
|
|
||||||
|
echo "Uploading: $filename"
|
||||||
|
|
||||||
|
# URL encode the filename
|
||||||
|
encoded_filename=$(printf '%s' "$filename" | jq -sRr @uri)
|
||||||
|
|
||||||
|
while [ $retry -lt $max_retries ]; do
|
||||||
|
# Get upload URL
|
||||||
|
curl_status=0
|
||||||
|
UPLOAD_INFO=$(curl -s --connect-timeout 30 --max-time 60 \
|
||||||
|
-H "Authorization: Bearer ${GITCODE_TOKEN}" \
|
||||||
|
"${API_URL}/repos/${GITCODE_OWNER}/${GITCODE_REPO}/releases/${TAG_NAME}/upload_url?file_name=${encoded_filename}") || curl_status=$?
|
||||||
|
|
||||||
|
if [ $curl_status -eq 0 ]; then
|
||||||
|
UPLOAD_URL=$(echo "$UPLOAD_INFO" | jq -r '.url // empty')
|
||||||
|
|
||||||
|
if [ -n "$UPLOAD_URL" ]; then
|
||||||
|
# Write headers to temp file to avoid shell escaping issues
|
||||||
|
echo "$UPLOAD_INFO" | jq -r '.headers | to_entries[] | "header = \"" + .key + ": " + .value + "\""' > /tmp/upload_headers.txt
|
||||||
|
|
||||||
|
# Upload file using PUT with headers from file
|
||||||
|
curl_status=0
|
||||||
|
UPLOAD_RESPONSE=$(curl -s -w "\n%{http_code}" -X PUT \
|
||||||
|
-K /tmp/upload_headers.txt \
|
||||||
|
--data-binary "@${file}" \
|
||||||
|
"$UPLOAD_URL") || curl_status=$?
|
||||||
|
|
||||||
|
if [ $curl_status -eq 0 ]; then
|
||||||
|
HTTP_CODE=$(echo "$UPLOAD_RESPONSE" | tail -n1)
|
||||||
|
RESPONSE_BODY=$(echo "$UPLOAD_RESPONSE" | sed '$d')
|
||||||
|
|
||||||
|
if [ "$HTTP_CODE" -ge 200 ] && [ "$HTTP_CODE" -lt 300 ]; then
|
||||||
|
echo " Uploaded: $filename"
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
echo " Failed (HTTP $HTTP_CODE), retry $((retry + 1))/$max_retries"
|
||||||
|
echo " Response: $RESPONSE_BODY"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo " Upload request failed (curl exit $curl_status), retry $((retry + 1))/$max_retries"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo " Failed to get upload URL, retry $((retry + 1))/$max_retries"
|
||||||
|
echo " Response: $UPLOAD_INFO"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo " Failed to get upload URL (curl exit $curl_status), retry $((retry + 1))/$max_retries"
|
||||||
|
echo " Response: $UPLOAD_INFO"
|
||||||
|
fi
|
||||||
|
|
||||||
|
retry=$((retry + 1))
|
||||||
|
[ $retry -lt $max_retries ] && sleep 3
|
||||||
|
done
|
||||||
|
|
||||||
|
echo " Failed: $filename after $max_retries retries"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Upload non-yml/json files first
|
||||||
|
for file in release-assets/*; do
|
||||||
|
if [ -f "$file" ]; then
|
||||||
|
filename=$(basename "$file")
|
||||||
|
if [[ ! "$filename" =~ \.(yml|yaml|json)$ ]]; then
|
||||||
|
upload_file "$file"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Upload yml/json files last
|
||||||
|
for file in release-assets/*; do
|
||||||
|
if [ -f "$file" ]; then
|
||||||
|
filename=$(basename "$file")
|
||||||
|
if [[ "$filename" =~ \.(yml|yaml|json)$ ]]; then
|
||||||
|
upload_file "$file"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "GitCode release sync completed!"
|
||||||
|
|
||||||
|
- name: Cleanup temp files
|
||||||
|
if: always()
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
rm -f /tmp/release_payload.json /tmp/upload_headers.txt release_body.txt
|
||||||
|
rm -rf release-assets/
|
||||||
36
.github/workflows/update-app-upgrade-config.yml
vendored
36
.github/workflows/update-app-upgrade-config.yml
vendored
@ -19,10 +19,9 @@ on:
|
|||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
pull-requests: write
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
propose-update:
|
update-config:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: github.event_name == 'workflow_dispatch' || (github.event_name == 'release' && github.event.release.draft == false)
|
if: github.event_name == 'workflow_dispatch' || (github.event_name == 'release' && github.event.release.draft == false)
|
||||||
|
|
||||||
@ -135,7 +134,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Checkout default branch
|
- name: Checkout default branch
|
||||||
if: steps.check.outputs.should_run == 'true'
|
if: steps.check.outputs.should_run == 'true'
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
ref: ${{ github.event.repository.default_branch }}
|
ref: ${{ github.event.repository.default_branch }}
|
||||||
path: main
|
path: main
|
||||||
@ -143,7 +142,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Checkout x-files/app-upgrade-config branch
|
- name: Checkout x-files/app-upgrade-config branch
|
||||||
if: steps.check.outputs.should_run == 'true'
|
if: steps.check.outputs.should_run == 'true'
|
||||||
uses: actions/checkout@v5
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
ref: x-files/app-upgrade-config
|
ref: x-files/app-upgrade-config
|
||||||
path: cs
|
path: cs
|
||||||
@ -187,25 +186,20 @@ jobs:
|
|||||||
echo "changed=true" >> "$GITHUB_OUTPUT"
|
echo "changed=true" >> "$GITHUB_OUTPUT"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Create pull request
|
- name: Commit and push changes
|
||||||
if: steps.check.outputs.should_run == 'true' && steps.diff.outputs.changed == 'true'
|
if: steps.check.outputs.should_run == 'true' && steps.diff.outputs.changed == 'true'
|
||||||
uses: peter-evans/create-pull-request@v7
|
working-directory: cs
|
||||||
with:
|
run: |
|
||||||
path: cs
|
git config user.name "github-actions[bot]"
|
||||||
base: x-files/app-upgrade-config
|
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||||
branch: chore/update-app-upgrade-config/${{ steps.meta.outputs.safe_tag }}
|
git add app-upgrade-config.json
|
||||||
commit-message: "🤖 chore: sync app-upgrade-config for ${{ steps.meta.outputs.tag }}"
|
git commit -m "chore: sync app-upgrade-config for ${{ steps.meta.outputs.tag }}" -m "Automated update triggered by \`${{ steps.meta.outputs.trigger }}\`.
|
||||||
title: "chore: update app-upgrade-config for ${{ steps.meta.outputs.tag }}"
|
|
||||||
body: |
|
|
||||||
Automated update triggered by `${{ steps.meta.outputs.trigger }}`.
|
|
||||||
|
|
||||||
- Source tag: `${{ steps.meta.outputs.tag }}`
|
- Source tag: \`${{ steps.meta.outputs.tag }}\`
|
||||||
- Pre-release: `${{ steps.meta.outputs.prerelease }}`
|
- Pre-release: \`${{ steps.meta.outputs.prerelease }}\`
|
||||||
- Latest: `${{ steps.meta.outputs.latest }}`
|
- Latest: \`${{ steps.meta.outputs.latest }}\`
|
||||||
- Workflow run: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
- Workflow run: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||||
labels: |
|
git push origin x-files/app-upgrade-config
|
||||||
automation
|
|
||||||
app-upgrade
|
|
||||||
|
|
||||||
- name: No changes detected
|
- name: No changes detected
|
||||||
if: steps.check.outputs.should_run == 'true' && steps.diff.outputs.changed != 'true'
|
if: steps.check.outputs.should_run == 'true' && steps.diff.outputs.changed != 'true'
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
diff --git a/dist/index.js b/dist/index.js
|
diff --git a/dist/index.js b/dist/index.js
|
||||||
index 51ce7e423934fb717cb90245cdfcdb3dae6780e6..0f7f7009e2f41a79a8669d38c8a44867bbff5e1f 100644
|
index d004b415c5841a1969705823614f395265ea5a8a..6b1e0dad4610b0424393ecc12e9114723bbe316b 100644
|
||||||
--- a/dist/index.js
|
--- a/dist/index.js
|
||||||
+++ b/dist/index.js
|
+++ b/dist/index.js
|
||||||
@@ -474,7 +474,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
|
@@ -474,7 +474,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
|
||||||
@ -12,7 +12,7 @@ index 51ce7e423934fb717cb90245cdfcdb3dae6780e6..0f7f7009e2f41a79a8669d38c8a44867
|
|||||||
|
|
||||||
// src/google-generative-ai-options.ts
|
// src/google-generative-ai-options.ts
|
||||||
diff --git a/dist/index.mjs b/dist/index.mjs
|
diff --git a/dist/index.mjs b/dist/index.mjs
|
||||||
index f4b77e35c0cbfece85a3ef0d4f4e67aa6dde6271..8d2fecf8155a226006a0bde72b00b6036d4014b6 100644
|
index 1780dd2391b7f42224a0b8048c723d2f81222c44..1f12ed14399d6902107ce9b435d7d8e6cc61e06b 100644
|
||||||
--- a/dist/index.mjs
|
--- a/dist/index.mjs
|
||||||
+++ b/dist/index.mjs
|
+++ b/dist/index.mjs
|
||||||
@@ -480,7 +480,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
|
@@ -480,7 +480,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
|
||||||
@ -24,3 +24,14 @@ index f4b77e35c0cbfece85a3ef0d4f4e67aa6dde6271..8d2fecf8155a226006a0bde72b00b603
|
|||||||
}
|
}
|
||||||
|
|
||||||
// src/google-generative-ai-options.ts
|
// src/google-generative-ai-options.ts
|
||||||
|
@@ -1909,8 +1909,7 @@ function createGoogleGenerativeAI(options = {}) {
|
||||||
|
}
|
||||||
|
var google = createGoogleGenerativeAI();
|
||||||
|
export {
|
||||||
|
- VERSION,
|
||||||
|
createGoogleGenerativeAI,
|
||||||
|
- google
|
||||||
|
+ google, VERSION
|
||||||
|
};
|
||||||
|
//# sourceMappingURL=index.mjs.map
|
||||||
|
\ No newline at end of file
|
||||||
@ -1,8 +1,8 @@
|
|||||||
diff --git a/dist/index.js b/dist/index.js
|
diff --git a/dist/index.js b/dist/index.js
|
||||||
index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a70ea2b5a2 100644
|
index 130094d194ea1e8e7d3027d07d82465741192124..4d13dcee8c962ca9ee8f1c3d748f8ffe6a3cfb47 100644
|
||||||
--- a/dist/index.js
|
--- a/dist/index.js
|
||||||
+++ b/dist/index.js
|
+++ b/dist/index.js
|
||||||
@@ -274,6 +274,7 @@ var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)(
|
@@ -290,6 +290,7 @@ var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)(
|
||||||
message: import_v42.z.object({
|
message: import_v42.z.object({
|
||||||
role: import_v42.z.literal("assistant").nullish(),
|
role: import_v42.z.literal("assistant").nullish(),
|
||||||
content: import_v42.z.string().nullish(),
|
content: import_v42.z.string().nullish(),
|
||||||
@ -10,7 +10,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7
|
|||||||
tool_calls: import_v42.z.array(
|
tool_calls: import_v42.z.array(
|
||||||
import_v42.z.object({
|
import_v42.z.object({
|
||||||
id: import_v42.z.string().nullish(),
|
id: import_v42.z.string().nullish(),
|
||||||
@@ -340,6 +341,7 @@ var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)(
|
@@ -356,6 +357,7 @@ var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)(
|
||||||
delta: import_v42.z.object({
|
delta: import_v42.z.object({
|
||||||
role: import_v42.z.enum(["assistant"]).nullish(),
|
role: import_v42.z.enum(["assistant"]).nullish(),
|
||||||
content: import_v42.z.string().nullish(),
|
content: import_v42.z.string().nullish(),
|
||||||
@ -18,7 +18,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7
|
|||||||
tool_calls: import_v42.z.array(
|
tool_calls: import_v42.z.array(
|
||||||
import_v42.z.object({
|
import_v42.z.object({
|
||||||
index: import_v42.z.number(),
|
index: import_v42.z.number(),
|
||||||
@@ -795,6 +797,13 @@ var OpenAIChatLanguageModel = class {
|
@@ -814,6 +816,13 @@ var OpenAIChatLanguageModel = class {
|
||||||
if (text != null && text.length > 0) {
|
if (text != null && text.length > 0) {
|
||||||
content.push({ type: "text", text });
|
content.push({ type: "text", text });
|
||||||
}
|
}
|
||||||
@ -32,7 +32,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7
|
|||||||
for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
|
for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
|
||||||
content.push({
|
content.push({
|
||||||
type: "tool-call",
|
type: "tool-call",
|
||||||
@@ -876,6 +885,7 @@ var OpenAIChatLanguageModel = class {
|
@@ -895,6 +904,7 @@ var OpenAIChatLanguageModel = class {
|
||||||
};
|
};
|
||||||
let metadataExtracted = false;
|
let metadataExtracted = false;
|
||||||
let isActiveText = false;
|
let isActiveText = false;
|
||||||
@ -40,7 +40,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7
|
|||||||
const providerMetadata = { openai: {} };
|
const providerMetadata = { openai: {} };
|
||||||
return {
|
return {
|
||||||
stream: response.pipeThrough(
|
stream: response.pipeThrough(
|
||||||
@@ -933,6 +943,21 @@ var OpenAIChatLanguageModel = class {
|
@@ -952,6 +962,21 @@ var OpenAIChatLanguageModel = class {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
const delta = choice.delta;
|
const delta = choice.delta;
|
||||||
@ -62,7 +62,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7
|
|||||||
if (delta.content != null) {
|
if (delta.content != null) {
|
||||||
if (!isActiveText) {
|
if (!isActiveText) {
|
||||||
controller.enqueue({ type: "text-start", id: "0" });
|
controller.enqueue({ type: "text-start", id: "0" });
|
||||||
@@ -1045,6 +1070,9 @@ var OpenAIChatLanguageModel = class {
|
@@ -1064,6 +1089,9 @@ var OpenAIChatLanguageModel = class {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
flush(controller) {
|
flush(controller) {
|
||||||
@ -1,5 +1,5 @@
|
|||||||
diff --git a/sdk.mjs b/sdk.mjs
|
diff --git a/sdk.mjs b/sdk.mjs
|
||||||
index bf429a344b7d59f70aead16b639f949b07688a81..f77d50cc5d3fb04292cb3ac7fa7085d02dcc628f 100755
|
index dea7766a3432a1e809f12d6daba4f2834a219689..e0b02ef73da177ba32b903887d7bbbeaa08cc6d3 100755
|
||||||
--- a/sdk.mjs
|
--- a/sdk.mjs
|
||||||
+++ b/sdk.mjs
|
+++ b/sdk.mjs
|
||||||
@@ -6250,7 +6250,7 @@ function createAbortController(maxListeners = DEFAULT_MAX_LISTENERS) {
|
@@ -6250,7 +6250,7 @@ function createAbortController(maxListeners = DEFAULT_MAX_LISTENERS) {
|
||||||
@ -11,7 +11,7 @@ index bf429a344b7d59f70aead16b639f949b07688a81..f77d50cc5d3fb04292cb3ac7fa7085d0
|
|||||||
import { createInterface } from "readline";
|
import { createInterface } from "readline";
|
||||||
|
|
||||||
// ../src/utils/fsOperations.ts
|
// ../src/utils/fsOperations.ts
|
||||||
@@ -6619,18 +6619,11 @@ class ProcessTransport {
|
@@ -6644,18 +6644,11 @@ class ProcessTransport {
|
||||||
const errorMessage = isNativeBinary(pathToClaudeCodeExecutable) ? `Claude Code native binary not found at ${pathToClaudeCodeExecutable}. Please ensure Claude Code is installed via native installer or specify a valid path with options.pathToClaudeCodeExecutable.` : `Claude Code executable not found at ${pathToClaudeCodeExecutable}. Is options.pathToClaudeCodeExecutable set?`;
|
const errorMessage = isNativeBinary(pathToClaudeCodeExecutable) ? `Claude Code native binary not found at ${pathToClaudeCodeExecutable}. Please ensure Claude Code is installed via native installer or specify a valid path with options.pathToClaudeCodeExecutable.` : `Claude Code executable not found at ${pathToClaudeCodeExecutable}. Is options.pathToClaudeCodeExecutable set?`;
|
||||||
throw new ReferenceError(errorMessage);
|
throw new ReferenceError(errorMessage);
|
||||||
}
|
}
|
||||||
145
.yarn/patches/ollama-ai-provider-v2-npm-1.5.5-8bef249af9.patch
vendored
Normal file
145
.yarn/patches/ollama-ai-provider-v2-npm-1.5.5-8bef249af9.patch
vendored
Normal file
@ -0,0 +1,145 @@
|
|||||||
|
diff --git a/dist/index.d.ts b/dist/index.d.ts
|
||||||
|
index 8dd9b498050dbecd8dd6b901acf1aa8ca38a49af..ed644349c9d38fe2a66b2fb44214f7c18eb97f89 100644
|
||||||
|
--- a/dist/index.d.ts
|
||||||
|
+++ b/dist/index.d.ts
|
||||||
|
@@ -4,7 +4,7 @@ import { z } from 'zod/v4';
|
||||||
|
|
||||||
|
type OllamaChatModelId = "athene-v2" | "athene-v2:72b" | "aya-expanse" | "aya-expanse:8b" | "aya-expanse:32b" | "codegemma" | "codegemma:2b" | "codegemma:7b" | "codellama" | "codellama:7b" | "codellama:13b" | "codellama:34b" | "codellama:70b" | "codellama:code" | "codellama:python" | "command-r" | "command-r:35b" | "command-r-plus" | "command-r-plus:104b" | "command-r7b" | "command-r7b:7b" | "deepseek-r1" | "deepseek-r1:1.5b" | "deepseek-r1:7b" | "deepseek-r1:8b" | "deepseek-r1:14b" | "deepseek-r1:32b" | "deepseek-r1:70b" | "deepseek-r1:671b" | "deepseek-coder-v2" | "deepseek-coder-v2:16b" | "deepseek-coder-v2:236b" | "deepseek-v3" | "deepseek-v3:671b" | "devstral" | "devstral:24b" | "dolphin3" | "dolphin3:8b" | "exaone3.5" | "exaone3.5:2.4b" | "exaone3.5:7.8b" | "exaone3.5:32b" | "falcon2" | "falcon2:11b" | "falcon3" | "falcon3:1b" | "falcon3:3b" | "falcon3:7b" | "falcon3:10b" | "firefunction-v2" | "firefunction-v2:70b" | "gemma" | "gemma:2b" | "gemma:7b" | "gemma2" | "gemma2:2b" | "gemma2:9b" | "gemma2:27b" | "gemma3" | "gemma3:1b" | "gemma3:4b" | "gemma3:12b" | "gemma3:27b" | "granite3-dense" | "granite3-dense:2b" | "granite3-dense:8b" | "granite3-guardian" | "granite3-guardian:2b" | "granite3-guardian:8b" | "granite3-moe" | "granite3-moe:1b" | "granite3-moe:3b" | "granite3.1-dense" | "granite3.1-dense:2b" | "granite3.1-dense:8b" | "granite3.1-moe" | "granite3.1-moe:1b" | "granite3.1-moe:3b" | "llama2" | "llama2:7b" | "llama2:13b" | "llama2:70b" | "llama3" | "llama3:8b" | "llama3:70b" | "llama3-chatqa" | "llama3-chatqa:8b" | "llama3-chatqa:70b" | "llama3-gradient" | "llama3-gradient:8b" | "llama3-gradient:70b" | "llama3.1" | "llama3.1:8b" | "llama3.1:70b" | "llama3.1:405b" | "llama3.2" | "llama3.2:1b" | "llama3.2:3b" | "llama3.2-vision" | "llama3.2-vision:11b" | "llama3.2-vision:90b" | "llama3.3" | "llama3.3:70b" | "llama4" | "llama4:16x17b" | "llama4:128x17b" | "llama-guard3" | "llama-guard3:1b" | "llama-guard3:8b" | "llava" | "llava:7b" | "llava:13b" | "llava:34b" | "llava-llama3" | "llava-llama3:8b" | "llava-phi3" | "llava-phi3:3.8b" | "marco-o1" | "marco-o1:7b" | "mistral" | "mistral:7b" | "mistral-large" | "mistral-large:123b" | "mistral-nemo" | "mistral-nemo:12b" | "mistral-small" | "mistral-small:22b" | "mixtral" | "mixtral:8x7b" | "mixtral:8x22b" | "moondream" | "moondream:1.8b" | "openhermes" | "openhermes:v2.5" | "nemotron" | "nemotron:70b" | "nemotron-mini" | "nemotron-mini:4b" | "olmo" | "olmo:7b" | "olmo:13b" | "opencoder" | "opencoder:1.5b" | "opencoder:8b" | "phi3" | "phi3:3.8b" | "phi3:14b" | "phi3.5" | "phi3.5:3.8b" | "phi4" | "phi4:14b" | "qwen" | "qwen:7b" | "qwen:14b" | "qwen:32b" | "qwen:72b" | "qwen:110b" | "qwen2" | "qwen2:0.5b" | "qwen2:1.5b" | "qwen2:7b" | "qwen2:72b" | "qwen2.5" | "qwen2.5:0.5b" | "qwen2.5:1.5b" | "qwen2.5:3b" | "qwen2.5:7b" | "qwen2.5:14b" | "qwen2.5:32b" | "qwen2.5:72b" | "qwen2.5-coder" | "qwen2.5-coder:0.5b" | "qwen2.5-coder:1.5b" | "qwen2.5-coder:3b" | "qwen2.5-coder:7b" | "qwen2.5-coder:14b" | "qwen2.5-coder:32b" | "qwen3" | "qwen3:0.6b" | "qwen3:1.7b" | "qwen3:4b" | "qwen3:8b" | "qwen3:14b" | "qwen3:30b" | "qwen3:32b" | "qwen3:235b" | "qwq" | "qwq:32b" | "sailor2" | "sailor2:1b" | "sailor2:8b" | "sailor2:20b" | "shieldgemma" | "shieldgemma:2b" | "shieldgemma:9b" | "shieldgemma:27b" | "smallthinker" | "smallthinker:3b" | "smollm" | "smollm:135m" | "smollm:360m" | "smollm:1.7b" | "tinyllama" | "tinyllama:1.1b" | "tulu3" | "tulu3:8b" | "tulu3:70b" | (string & {});
|
||||||
|
declare const ollamaProviderOptions: z.ZodObject<{
|
||||||
|
- think: z.ZodOptional<z.ZodBoolean>;
|
||||||
|
+ think: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodEnum<['low', 'medium', 'high']>]>>;
|
||||||
|
options: z.ZodOptional<z.ZodObject<{
|
||||||
|
num_ctx: z.ZodOptional<z.ZodNumber>;
|
||||||
|
repeat_last_n: z.ZodOptional<z.ZodNumber>;
|
||||||
|
@@ -27,9 +27,11 @@ interface OllamaCompletionSettings {
|
||||||
|
* the model's thinking from the model's output. When disabled, the model will not think
|
||||||
|
* and directly output the content.
|
||||||
|
*
|
||||||
|
+ * For gpt-oss models, you can also use 'low', 'medium', or 'high' to control the depth of thinking.
|
||||||
|
+ *
|
||||||
|
* Only supported by certain models like DeepSeek R1 and Qwen 3.
|
||||||
|
*/
|
||||||
|
- think?: boolean;
|
||||||
|
+ think?: boolean | 'low' | 'medium' | 'high';
|
||||||
|
/**
|
||||||
|
* Echo back the prompt in addition to the completion.
|
||||||
|
*/
|
||||||
|
@@ -146,7 +148,7 @@ declare const ollamaEmbeddingProviderOptions: z.ZodObject<{
|
||||||
|
type OllamaEmbeddingProviderOptions = z.infer<typeof ollamaEmbeddingProviderOptions>;
|
||||||
|
|
||||||
|
declare const ollamaCompletionProviderOptions: z.ZodObject<{
|
||||||
|
- think: z.ZodOptional<z.ZodBoolean>;
|
||||||
|
+ think: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodEnum<['low', 'medium', 'high']>]>>;
|
||||||
|
user: z.ZodOptional<z.ZodString>;
|
||||||
|
suffix: z.ZodOptional<z.ZodString>;
|
||||||
|
echo: z.ZodOptional<z.ZodBoolean>;
|
||||||
|
diff --git a/dist/index.js b/dist/index.js
|
||||||
|
index 35b5142ce8476ce2549ed7c2ec48e7d8c46c90d9..2ef64dc9a4c2be043e6af608241a6a8309a5a69f 100644
|
||||||
|
--- a/dist/index.js
|
||||||
|
+++ b/dist/index.js
|
||||||
|
@@ -158,7 +158,7 @@ function getResponseMetadata({
|
||||||
|
|
||||||
|
// src/completion/ollama-completion-language-model.ts
|
||||||
|
var ollamaCompletionProviderOptions = import_v42.z.object({
|
||||||
|
- think: import_v42.z.boolean().optional(),
|
||||||
|
+ think: import_v42.z.union([import_v42.z.boolean(), import_v42.z.enum(['low', 'medium', 'high'])]).optional(),
|
||||||
|
user: import_v42.z.string().optional(),
|
||||||
|
suffix: import_v42.z.string().optional(),
|
||||||
|
echo: import_v42.z.boolean().optional()
|
||||||
|
@@ -662,7 +662,7 @@ function convertToOllamaChatMessages({
|
||||||
|
const images = content.filter((part) => part.type === "file" && part.mediaType.startsWith("image/")).map((part) => part.data);
|
||||||
|
messages.push({
|
||||||
|
role: "user",
|
||||||
|
- content: userText.length > 0 ? userText : [],
|
||||||
|
+ content: userText.length > 0 ? userText : '',
|
||||||
|
images: images.length > 0 ? images : void 0
|
||||||
|
});
|
||||||
|
break;
|
||||||
|
@@ -813,9 +813,11 @@ var ollamaProviderOptions = import_v44.z.object({
|
||||||
|
* the model's thinking from the model's output. When disabled, the model will not think
|
||||||
|
* and directly output the content.
|
||||||
|
*
|
||||||
|
+ * For gpt-oss models, you can also use 'low', 'medium', or 'high' to control the depth of thinking.
|
||||||
|
+ *
|
||||||
|
* Only supported by certain models like DeepSeek R1 and Qwen 3.
|
||||||
|
*/
|
||||||
|
- think: import_v44.z.boolean().optional(),
|
||||||
|
+ think: import_v44.z.union([import_v44.z.boolean(), import_v44.z.enum(['low', 'medium', 'high'])]).optional(),
|
||||||
|
options: import_v44.z.object({
|
||||||
|
num_ctx: import_v44.z.number().optional(),
|
||||||
|
repeat_last_n: import_v44.z.number().optional(),
|
||||||
|
@@ -929,14 +931,16 @@ var OllamaRequestBuilder = class {
|
||||||
|
prompt,
|
||||||
|
systemMessageMode: "system"
|
||||||
|
}),
|
||||||
|
- temperature,
|
||||||
|
- top_p: topP,
|
||||||
|
max_output_tokens: maxOutputTokens,
|
||||||
|
...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
|
||||||
|
format: responseFormat.schema != null ? responseFormat.schema : "json"
|
||||||
|
},
|
||||||
|
think: (_a = ollamaOptions == null ? void 0 : ollamaOptions.think) != null ? _a : false,
|
||||||
|
- options: (_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : void 0
|
||||||
|
+ options: {
|
||||||
|
+ ...temperature !== void 0 && { temperature },
|
||||||
|
+ ...topP !== void 0 && { top_p: topP },
|
||||||
|
+ ...((_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : {})
|
||||||
|
+ }
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
diff --git a/dist/index.mjs b/dist/index.mjs
|
||||||
|
index e2a634a78d80ac9542f2cc4f96cf2291094b10cf..67b23efce3c1cf4f026693d3ff9246988a3ef26e 100644
|
||||||
|
--- a/dist/index.mjs
|
||||||
|
+++ b/dist/index.mjs
|
||||||
|
@@ -144,7 +144,7 @@ function getResponseMetadata({
|
||||||
|
|
||||||
|
// src/completion/ollama-completion-language-model.ts
|
||||||
|
var ollamaCompletionProviderOptions = z2.object({
|
||||||
|
- think: z2.boolean().optional(),
|
||||||
|
+ think: z2.union([z2.boolean(), z2.enum(['low', 'medium', 'high'])]).optional(),
|
||||||
|
user: z2.string().optional(),
|
||||||
|
suffix: z2.string().optional(),
|
||||||
|
echo: z2.boolean().optional()
|
||||||
|
@@ -662,7 +662,7 @@ function convertToOllamaChatMessages({
|
||||||
|
const images = content.filter((part) => part.type === "file" && part.mediaType.startsWith("image/")).map((part) => part.data);
|
||||||
|
messages.push({
|
||||||
|
role: "user",
|
||||||
|
- content: userText.length > 0 ? userText : [],
|
||||||
|
+ content: userText.length > 0 ? userText : '',
|
||||||
|
images: images.length > 0 ? images : void 0
|
||||||
|
});
|
||||||
|
break;
|
||||||
|
@@ -815,9 +815,11 @@ var ollamaProviderOptions = z4.object({
|
||||||
|
* the model's thinking from the model's output. When disabled, the model will not think
|
||||||
|
* and directly output the content.
|
||||||
|
*
|
||||||
|
+ * For gpt-oss models, you can also use 'low', 'medium', or 'high' to control the depth of thinking.
|
||||||
|
+ *
|
||||||
|
* Only supported by certain models like DeepSeek R1 and Qwen 3.
|
||||||
|
*/
|
||||||
|
- think: z4.boolean().optional(),
|
||||||
|
+ think: z4.union([z4.boolean(), z4.enum(['low', 'medium', 'high'])]).optional(),
|
||||||
|
options: z4.object({
|
||||||
|
num_ctx: z4.number().optional(),
|
||||||
|
repeat_last_n: z4.number().optional(),
|
||||||
|
@@ -931,14 +933,16 @@ var OllamaRequestBuilder = class {
|
||||||
|
prompt,
|
||||||
|
systemMessageMode: "system"
|
||||||
|
}),
|
||||||
|
- temperature,
|
||||||
|
- top_p: topP,
|
||||||
|
max_output_tokens: maxOutputTokens,
|
||||||
|
...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
|
||||||
|
format: responseFormat.schema != null ? responseFormat.schema : "json"
|
||||||
|
},
|
||||||
|
think: (_a = ollamaOptions == null ? void 0 : ollamaOptions.think) != null ? _a : false,
|
||||||
|
- options: (_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : void 0
|
||||||
|
+ options: {
|
||||||
|
+ ...temperature !== void 0 && { temperature },
|
||||||
|
+ ...topP !== void 0 && { top_p: topP },
|
||||||
|
+ ...((_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : {})
|
||||||
|
+ }
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
11
CLAUDE.md
11
CLAUDE.md
@ -28,7 +28,7 @@ When creating a Pull Request, you MUST:
|
|||||||
- **Development**: `yarn dev` - Runs Electron app in development mode with hot reload
|
- **Development**: `yarn dev` - Runs Electron app in development mode with hot reload
|
||||||
- **Debug**: `yarn debug` - Starts with debugging enabled, use `chrome://inspect` to attach debugger
|
- **Debug**: `yarn debug` - Starts with debugging enabled, use `chrome://inspect` to attach debugger
|
||||||
- **Build Check**: `yarn build:check` - **REQUIRED** before commits (lint + test + typecheck)
|
- **Build Check**: `yarn build:check` - **REQUIRED** before commits (lint + test + typecheck)
|
||||||
- If having i18n sort issues, run `yarn sync:i18n` first to sync template
|
- If having i18n sort issues, run `yarn i18n:sync` first to sync template
|
||||||
- If having formatting issues, run `yarn format` first
|
- If having formatting issues, run `yarn format` first
|
||||||
- **Test**: `yarn test` - Run all tests (Vitest) across main and renderer processes
|
- **Test**: `yarn test` - Run all tests (Vitest) across main and renderer processes
|
||||||
- **Single Test**:
|
- **Single Test**:
|
||||||
@ -40,20 +40,23 @@ When creating a Pull Request, you MUST:
|
|||||||
## Project Architecture
|
## Project Architecture
|
||||||
|
|
||||||
### Electron Structure
|
### Electron Structure
|
||||||
|
|
||||||
- **Main Process** (`src/main/`): Node.js backend with services (MCP, Knowledge, Storage, etc.)
|
- **Main Process** (`src/main/`): Node.js backend with services (MCP, Knowledge, Storage, etc.)
|
||||||
- **Renderer Process** (`src/renderer/`): React UI with Redux state management
|
- **Renderer Process** (`src/renderer/`): React UI with Redux state management
|
||||||
- **Preload Scripts** (`src/preload/`): Secure IPC bridge
|
- **Preload Scripts** (`src/preload/`): Secure IPC bridge
|
||||||
|
|
||||||
### Key Components
|
### Key Components
|
||||||
|
|
||||||
- **AI Core** (`src/renderer/src/aiCore/`): Middleware pipeline for multiple AI providers.
|
- **AI Core** (`src/renderer/src/aiCore/`): Middleware pipeline for multiple AI providers.
|
||||||
- **Services** (`src/main/services/`): MCPService, KnowledgeService, WindowService, etc.
|
- **Services** (`src/main/services/`): MCPService, KnowledgeService, WindowService, etc.
|
||||||
- **Build System**: Electron-Vite with experimental rolldown-vite, yarn workspaces.
|
- **Build System**: Electron-Vite with experimental rolldown-vite, yarn workspaces.
|
||||||
- **State Management**: Redux Toolkit (`src/renderer/src/store/`) for predictable state.
|
- **State Management**: Redux Toolkit (`src/renderer/src/store/`) for predictable state.
|
||||||
|
|
||||||
### Logging
|
### Logging
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
import { loggerService } from '@logger'
|
import { loggerService } from "@logger";
|
||||||
const logger = loggerService.withContext('moduleName')
|
const logger = loggerService.withContext("moduleName");
|
||||||
// Renderer: loggerService.initWindowSource('windowName') first
|
// Renderer: loggerService.initWindowSource('windowName') first
|
||||||
logger.info('message', CONTEXT)
|
logger.info("message", CONTEXT);
|
||||||
```
|
```
|
||||||
|
|||||||
@ -23,7 +23,7 @@
|
|||||||
},
|
},
|
||||||
"files": {
|
"files": {
|
||||||
"ignoreUnknown": false,
|
"ignoreUnknown": false,
|
||||||
"includes": ["**", "!**/.claude/**", "!**/.vscode/**"],
|
"includes": ["**", "!**/.claude/**", "!**/.vscode/**", "!**/.conductor/**"],
|
||||||
"maxSize": 2097152
|
"maxSize": 2097152
|
||||||
},
|
},
|
||||||
"formatter": {
|
"formatter": {
|
||||||
|
|||||||
@ -12,8 +12,13 @@
|
|||||||
|
|
||||||
; https://github.com/electron-userland/electron-builder/issues/1122
|
; https://github.com/electron-userland/electron-builder/issues/1122
|
||||||
!ifndef BUILD_UNINSTALLER
|
!ifndef BUILD_UNINSTALLER
|
||||||
|
; Check VC++ Redistributable based on architecture stored in $1
|
||||||
Function checkVCRedist
|
Function checkVCRedist
|
||||||
ReadRegDWORD $0 HKLM "SOFTWARE\Microsoft\VisualStudio\14.0\VC\Runtimes\x64" "Installed"
|
${If} $1 == "arm64"
|
||||||
|
ReadRegDWORD $0 HKLM "SOFTWARE\Microsoft\VisualStudio\14.0\VC\Runtimes\ARM64" "Installed"
|
||||||
|
${Else}
|
||||||
|
ReadRegDWORD $0 HKLM "SOFTWARE\Microsoft\VisualStudio\14.0\VC\Runtimes\x64" "Installed"
|
||||||
|
${EndIf}
|
||||||
FunctionEnd
|
FunctionEnd
|
||||||
|
|
||||||
Function checkArchitectureCompatibility
|
Function checkArchitectureCompatibility
|
||||||
@ -97,29 +102,47 @@
|
|||||||
|
|
||||||
Call checkVCRedist
|
Call checkVCRedist
|
||||||
${If} $0 != "1"
|
${If} $0 != "1"
|
||||||
MessageBox MB_YESNO "\
|
; VC++ is required - install automatically since declining would abort anyway
|
||||||
NOTE: ${PRODUCT_NAME} requires $\r$\n\
|
; Select download URL based on system architecture (stored in $1)
|
||||||
'Microsoft Visual C++ Redistributable'$\r$\n\
|
${If} $1 == "arm64"
|
||||||
to function properly.$\r$\n$\r$\n\
|
StrCpy $2 "https://aka.ms/vs/17/release/vc_redist.arm64.exe"
|
||||||
Download and install now?" /SD IDYES IDYES InstallVCRedist IDNO DontInstall
|
StrCpy $3 "$TEMP\vc_redist.arm64.exe"
|
||||||
InstallVCRedist:
|
${Else}
|
||||||
inetc::get /CAPTION " " /BANNER "Downloading Microsoft Visual C++ Redistributable..." "https://aka.ms/vs/17/release/vc_redist.x64.exe" "$TEMP\vc_redist.x64.exe"
|
StrCpy $2 "https://aka.ms/vs/17/release/vc_redist.x64.exe"
|
||||||
ExecWait "$TEMP\vc_redist.x64.exe /install /norestart"
|
StrCpy $3 "$TEMP\vc_redist.x64.exe"
|
||||||
;IfErrors InstallError ContinueInstall ; vc_redist exit code is unreliable :(
|
${EndIf}
|
||||||
Call checkVCRedist
|
|
||||||
${If} $0 == "1"
|
|
||||||
Goto ContinueInstall
|
|
||||||
${EndIf}
|
|
||||||
|
|
||||||
;InstallError:
|
inetc::get /CAPTION " " /BANNER "Downloading Microsoft Visual C++ Redistributable..." \
|
||||||
MessageBox MB_ICONSTOP "\
|
$2 $3 /END
|
||||||
There was an unexpected error installing$\r$\n\
|
Pop $0 ; Get download status from inetc::get
|
||||||
Microsoft Visual C++ Redistributable.$\r$\n\
|
${If} $0 != "OK"
|
||||||
The installation of ${PRODUCT_NAME} cannot continue."
|
MessageBox MB_ICONSTOP|MB_YESNO "\
|
||||||
DontInstall:
|
Failed to download Microsoft Visual C++ Redistributable.$\r$\n$\r$\n\
|
||||||
|
Error: $0$\r$\n$\r$\n\
|
||||||
|
Would you like to open the download page in your browser?$\r$\n\
|
||||||
|
$2" IDYES openDownloadUrl IDNO skipDownloadUrl
|
||||||
|
openDownloadUrl:
|
||||||
|
ExecShell "open" $2
|
||||||
|
skipDownloadUrl:
|
||||||
Abort
|
Abort
|
||||||
|
${EndIf}
|
||||||
|
|
||||||
|
ExecWait "$3 /install /quiet /norestart"
|
||||||
|
; Note: vc_redist exit code is unreliable, verify via registry check instead
|
||||||
|
|
||||||
|
Call checkVCRedist
|
||||||
|
${If} $0 != "1"
|
||||||
|
MessageBox MB_ICONSTOP|MB_YESNO "\
|
||||||
|
Microsoft Visual C++ Redistributable installation failed.$\r$\n$\r$\n\
|
||||||
|
Would you like to open the download page in your browser?$\r$\n\
|
||||||
|
$2$\r$\n$\r$\n\
|
||||||
|
The installation of ${PRODUCT_NAME} cannot continue." IDYES openInstallUrl IDNO skipInstallUrl
|
||||||
|
openInstallUrl:
|
||||||
|
ExecShell "open" $2
|
||||||
|
skipInstallUrl:
|
||||||
|
Abort
|
||||||
|
${EndIf}
|
||||||
${EndIf}
|
${EndIf}
|
||||||
ContinueInstall:
|
|
||||||
Pop $4
|
Pop $4
|
||||||
Pop $3
|
Pop $3
|
||||||
Pop $2
|
Pop $2
|
||||||
|
|||||||
@ -71,7 +71,7 @@ Tools like i18n Ally cannot parse dynamic content within template strings, resul
|
|||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
// Not recommended - Plugin cannot resolve
|
// Not recommended - Plugin cannot resolve
|
||||||
const message = t(`fruits.${fruit}`)
|
const message = t(`fruits.${fruit}`);
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 2. **No Real-time Rendering in Editor**
|
#### 2. **No Real-time Rendering in Editor**
|
||||||
@ -91,14 +91,14 @@ For example:
|
|||||||
```ts
|
```ts
|
||||||
// src/renderer/src/i18n/label.ts
|
// src/renderer/src/i18n/label.ts
|
||||||
const themeModeKeyMap = {
|
const themeModeKeyMap = {
|
||||||
dark: 'settings.theme.dark',
|
dark: "settings.theme.dark",
|
||||||
light: 'settings.theme.light',
|
light: "settings.theme.light",
|
||||||
system: 'settings.theme.system'
|
system: "settings.theme.system",
|
||||||
} as const
|
} as const;
|
||||||
|
|
||||||
export const getThemeModeLabel = (key: string): string => {
|
export const getThemeModeLabel = (key: string): string => {
|
||||||
return themeModeKeyMap[key] ? t(themeModeKeyMap[key]) : key
|
return themeModeKeyMap[key] ? t(themeModeKeyMap[key]) : key;
|
||||||
}
|
};
|
||||||
```
|
```
|
||||||
|
|
||||||
By avoiding template strings, you gain better developer experience, more reliable translation checks, and a more maintainable codebase.
|
By avoiding template strings, you gain better developer experience, more reliable translation checks, and a more maintainable codebase.
|
||||||
@ -107,7 +107,7 @@ By avoiding template strings, you gain better developer experience, more reliabl
|
|||||||
|
|
||||||
The project includes several scripts to automate i18n-related tasks:
|
The project includes several scripts to automate i18n-related tasks:
|
||||||
|
|
||||||
### `check:i18n` - Validate i18n Structure
|
### `i18n:check` - Validate i18n Structure
|
||||||
|
|
||||||
This script checks:
|
This script checks:
|
||||||
|
|
||||||
@ -116,10 +116,10 @@ This script checks:
|
|||||||
- Whether keys are properly sorted
|
- Whether keys are properly sorted
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
yarn check:i18n
|
yarn i18n:check
|
||||||
```
|
```
|
||||||
|
|
||||||
### `sync:i18n` - Synchronize JSON Structure and Sort Order
|
### `i18n:sync` - Synchronize JSON Structure and Sort Order
|
||||||
|
|
||||||
This script uses `zh-cn.json` as the source of truth to sync structure across all language files, including:
|
This script uses `zh-cn.json` as the source of truth to sync structure across all language files, including:
|
||||||
|
|
||||||
@ -128,14 +128,14 @@ This script uses `zh-cn.json` as the source of truth to sync structure across al
|
|||||||
3. Sorting keys automatically
|
3. Sorting keys automatically
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
yarn sync:i18n
|
yarn i18n:sync
|
||||||
```
|
```
|
||||||
|
|
||||||
### `auto:i18n` - Automatically Translate Pending Texts
|
### `i18n:translate` - Automatically Translate Pending Texts
|
||||||
|
|
||||||
This script fills in texts marked as `[to be translated]` using machine translation.
|
This script fills in texts marked as `[to be translated]` using machine translation.
|
||||||
|
|
||||||
Typically, after adding new texts in `zh-cn.json`, run `sync:i18n`, then `auto:i18n` to complete translations.
|
Typically, after adding new texts in `zh-cn.json`, run `i18n:sync`, then `i18n:translate` to complete translations.
|
||||||
|
|
||||||
Before using this script, set the required environment variables:
|
Before using this script, set the required environment variables:
|
||||||
|
|
||||||
@ -148,30 +148,20 @@ MODEL="qwen-plus-latest"
|
|||||||
Alternatively, add these variables directly to your `.env` file.
|
Alternatively, add these variables directly to your `.env` file.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
yarn auto:i18n
|
yarn i18n:translate
|
||||||
```
|
|
||||||
|
|
||||||
### `update:i18n` - Object-level Translation Update
|
|
||||||
|
|
||||||
Updates translations in language files under `src/renderer/src/i18n/translate` at the object level, preserving existing translations and only updating new content.
|
|
||||||
|
|
||||||
**Not recommended** — prefer `auto:i18n` for translation tasks.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
yarn update:i18n
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Workflow
|
### Workflow
|
||||||
|
|
||||||
1. During development, first add the required text in `zh-cn.json`
|
1. During development, first add the required text in `zh-cn.json`
|
||||||
2. Confirm it displays correctly in the Chinese environment
|
2. Confirm it displays correctly in the Chinese environment
|
||||||
3. Run `yarn sync:i18n` to propagate the keys to other language files
|
3. Run `yarn i18n:sync` to propagate the keys to other language files
|
||||||
4. Run `yarn auto:i18n` to perform machine translation
|
4. Run `yarn i18n:translate` to perform machine translation
|
||||||
5. Grab a coffee and let the magic happen!
|
5. Grab a coffee and let the magic happen!
|
||||||
|
|
||||||
## Best Practices
|
## Best Practices
|
||||||
|
|
||||||
1. **Use Chinese as Source Language**: All development starts in Chinese, then translates to other languages.
|
1. **Use Chinese as Source Language**: All development starts in Chinese, then translates to other languages.
|
||||||
2. **Run Check Script Before Commit**: Use `yarn check:i18n` to catch i18n issues early.
|
2. **Run Check Script Before Commit**: Use `yarn i18n:check` to catch i18n issues early.
|
||||||
3. **Translate in Small Increments**: Avoid accumulating a large backlog of untranslated content.
|
3. **Translate in Small Increments**: Avoid accumulating a large backlog of untranslated content.
|
||||||
4. **Keep Keys Semantically Clear**: Keys should clearly express their purpose, e.g., `user.profile.avatar.upload.error`
|
4. **Keep Keys Semantically Clear**: Keys should clearly express their purpose, e.g., `user.profile.avatar.upload.error`
|
||||||
|
|||||||
@ -1,17 +1,17 @@
|
|||||||
# 如何优雅地做好 i18n
|
# 如何优雅地做好 i18n
|
||||||
|
|
||||||
## 使用i18n ally插件提升开发体验
|
## 使用 i18n ally 插件提升开发体验
|
||||||
|
|
||||||
i18n ally是一个强大的VSCode插件,它能在开发阶段提供实时反馈,帮助开发者更早发现文案缺失和错译问题。
|
i18n ally 是一个强大的 VSCode 插件,它能在开发阶段提供实时反馈,帮助开发者更早发现文案缺失和错译问题。
|
||||||
|
|
||||||
项目中已经配置好了插件设置,直接安装即可。
|
项目中已经配置好了插件设置,直接安装即可。
|
||||||
|
|
||||||
### 开发时优势
|
### 开发时优势
|
||||||
|
|
||||||
- **实时预览**:翻译文案会直接显示在编辑器中
|
- **实时预览**:翻译文案会直接显示在编辑器中
|
||||||
- **错误检测**:自动追踪标记出缺失的翻译或未使用的key
|
- **错误检测**:自动追踪标记出缺失的翻译或未使用的 key
|
||||||
- **快速跳转**:可通过key直接跳转到定义处(Ctrl/Cmd + click)
|
- **快速跳转**:可通过 key 直接跳转到定义处(Ctrl/Cmd + click)
|
||||||
- **自动补全**:输入i18n key时提供自动补全建议
|
- **自动补全**:输入 i18n key 时提供自动补全建议
|
||||||
|
|
||||||
### 效果展示
|
### 效果展示
|
||||||
|
|
||||||
@ -23,9 +23,9 @@ i18n ally是一个强大的VSCode插件,它能在开发阶段提供实时反
|
|||||||
|
|
||||||
## i18n 约定
|
## i18n 约定
|
||||||
|
|
||||||
### **绝对避免使用flat格式**
|
### **绝对避免使用 flat 格式**
|
||||||
|
|
||||||
绝对避免使用flat格式,如`"add.button.tip": "添加"`。应采用清晰的嵌套结构:
|
绝对避免使用 flat 格式,如`"add.button.tip": "添加"`。应采用清晰的嵌套结构:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
// 错误示例 - flat结构
|
// 错误示例 - flat结构
|
||||||
@ -52,14 +52,14 @@ i18n ally是一个强大的VSCode插件,它能在开发阶段提供实时反
|
|||||||
#### 为什么要使用嵌套结构
|
#### 为什么要使用嵌套结构
|
||||||
|
|
||||||
1. **自然分组**:通过对象结构天然能将相关上下文的文案分到一个组别中
|
1. **自然分组**:通过对象结构天然能将相关上下文的文案分到一个组别中
|
||||||
2. **插件要求**:i18n ally 插件需要嵌套或flat格式其一的文件才能正常分析
|
2. **插件要求**:i18n ally 插件需要嵌套或 flat 格式其一的文件才能正常分析
|
||||||
|
|
||||||
### **避免在`t()`中使用模板字符串**
|
### **避免在`t()`中使用模板字符串**
|
||||||
|
|
||||||
**强烈建议避免使用模板字符串**进行动态插值。虽然模板字符串在JavaScript开发中非常方便,但在国际化场景下会带来一系列问题。
|
**强烈建议避免使用模板字符串**进行动态插值。虽然模板字符串在 JavaScript 开发中非常方便,但在国际化场景下会带来一系列问题。
|
||||||
|
|
||||||
1. **插件无法跟踪**
|
1. **插件无法跟踪**
|
||||||
i18n ally等工具无法解析模板字符串中的动态内容,导致:
|
i18n ally 等工具无法解析模板字符串中的动态内容,导致:
|
||||||
|
|
||||||
- 无法正确显示实时预览
|
- 无法正确显示实时预览
|
||||||
- 无法检测翻译缺失
|
- 无法检测翻译缺失
|
||||||
@ -67,11 +67,11 @@ i18n ally是一个强大的VSCode插件,它能在开发阶段提供实时反
|
|||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
// 不推荐 - 插件无法解析
|
// 不推荐 - 插件无法解析
|
||||||
const message = t(`fruits.${fruit}`)
|
const message = t(`fruits.${fruit}`);
|
||||||
```
|
```
|
||||||
|
|
||||||
2. **编辑器无法实时渲染**
|
2. **编辑器无法实时渲染**
|
||||||
在IDE中,模板字符串会显示为原始代码而非最终翻译结果,降低了开发体验。
|
在 IDE 中,模板字符串会显示为原始代码而非最终翻译结果,降低了开发体验。
|
||||||
|
|
||||||
3. **更难以维护**
|
3. **更难以维护**
|
||||||
由于插件无法跟踪这样的文案,编辑器中也无法渲染,开发者必须人工确认语言文件中是否存在相应的文案。
|
由于插件无法跟踪这样的文案,编辑器中也无法渲染,开发者必须人工确认语言文件中是否存在相应的文案。
|
||||||
@ -85,36 +85,36 @@ i18n ally是一个强大的VSCode插件,它能在开发阶段提供实时反
|
|||||||
```ts
|
```ts
|
||||||
// src/renderer/src/i18n/label.ts
|
// src/renderer/src/i18n/label.ts
|
||||||
const themeModeKeyMap = {
|
const themeModeKeyMap = {
|
||||||
dark: 'settings.theme.dark',
|
dark: "settings.theme.dark",
|
||||||
light: 'settings.theme.light',
|
light: "settings.theme.light",
|
||||||
system: 'settings.theme.system'
|
system: "settings.theme.system",
|
||||||
} as const
|
} as const;
|
||||||
|
|
||||||
export const getThemeModeLabel = (key: string): string => {
|
export const getThemeModeLabel = (key: string): string => {
|
||||||
return themeModeKeyMap[key] ? t(themeModeKeyMap[key]) : key
|
return themeModeKeyMap[key] ? t(themeModeKeyMap[key]) : key;
|
||||||
}
|
};
|
||||||
```
|
```
|
||||||
|
|
||||||
通过避免模板字符串,可以获得更好的开发体验、更可靠的翻译检查以及更易维护的代码库。
|
通过避免模板字符串,可以获得更好的开发体验、更可靠的翻译检查以及更易维护的代码库。
|
||||||
|
|
||||||
## 自动化脚本
|
## 自动化脚本
|
||||||
|
|
||||||
项目中有一系列脚本来自动化i18n相关任务:
|
项目中有一系列脚本来自动化 i18n 相关任务:
|
||||||
|
|
||||||
### `check:i18n` - 检查i18n结构
|
### `i18n:check` - 检查 i18n 结构
|
||||||
|
|
||||||
此脚本会检查:
|
此脚本会检查:
|
||||||
|
|
||||||
- 所有语言文件是否为嵌套结构
|
- 所有语言文件是否为嵌套结构
|
||||||
- 是否存在缺失的key
|
- 是否存在缺失的 key
|
||||||
- 是否存在多余的key
|
- 是否存在多余的 key
|
||||||
- 是否已经有序
|
- 是否已经有序
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
yarn check:i18n
|
yarn i18n:check
|
||||||
```
|
```
|
||||||
|
|
||||||
### `sync:i18n` - 同步json结构与排序
|
### `i18n:sync` - 同步 json 结构与排序
|
||||||
|
|
||||||
此脚本以`zh-cn.json`文件为基准,将结构同步到其他语言文件,包括:
|
此脚本以`zh-cn.json`文件为基准,将结构同步到其他语言文件,包括:
|
||||||
|
|
||||||
@ -123,14 +123,14 @@ yarn check:i18n
|
|||||||
3. 自动排序
|
3. 自动排序
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
yarn sync:i18n
|
yarn i18n:sync
|
||||||
```
|
```
|
||||||
|
|
||||||
### `auto:i18n` - 自动翻译待翻译文本
|
### `i18n:translate` - 自动翻译待翻译文本
|
||||||
|
|
||||||
次脚本自动将标记为待翻译的文本通过机器翻译填充。
|
次脚本自动将标记为待翻译的文本通过机器翻译填充。
|
||||||
|
|
||||||
通常,在`zh-cn.json`中添加所需文案后,执行`sync:i18n`即可自动完成翻译。
|
通常,在`zh-cn.json`中添加所需文案后,执行`i18n:sync`即可自动完成翻译。
|
||||||
|
|
||||||
使用该脚本前,需要配置环境变量,例如:
|
使用该脚本前,需要配置环境变量,例如:
|
||||||
|
|
||||||
@ -143,29 +143,19 @@ MODEL="qwen-plus-latest"
|
|||||||
你也可以通过直接编辑`.env`文件来添加环境变量。
|
你也可以通过直接编辑`.env`文件来添加环境变量。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
yarn auto:i18n
|
yarn i18n:translate
|
||||||
```
|
|
||||||
|
|
||||||
### `update:i18n` - 对象级别翻译更新
|
|
||||||
|
|
||||||
对`src/renderer/src/i18n/translate`中的语言文件进行对象级别的翻译更新,保留已有翻译,只更新新增内容。
|
|
||||||
|
|
||||||
**不建议**使用该脚本,更推荐使用`auto:i18n`进行翻译。
|
|
||||||
|
|
||||||
```bash
|
|
||||||
yarn update:i18n
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### 工作流
|
### 工作流
|
||||||
|
|
||||||
1. 开发阶段,先在`zh-cn.json`中添加所需文案
|
1. 开发阶段,先在`zh-cn.json`中添加所需文案
|
||||||
2. 确认在中文环境下显示无误后,使用`yarn sync:i18n`将文案同步到其他语言文件
|
2. 确认在中文环境下显示无误后,使用`yarn i18n:sync`将文案同步到其他语言文件
|
||||||
3. 使用`yarn auto:i18n`进行自动翻译
|
3. 使用`yarn i18n:translate`进行自动翻译
|
||||||
4. 喝杯咖啡,等翻译完成吧!
|
4. 喝杯咖啡,等翻译完成吧!
|
||||||
|
|
||||||
## 最佳实践
|
## 最佳实践
|
||||||
|
|
||||||
1. **以中文为源语言**:所有开发首先使用中文,再翻译为其他语言
|
1. **以中文为源语言**:所有开发首先使用中文,再翻译为其他语言
|
||||||
2. **提交前运行检查脚本**:使用`yarn check:i18n`检查i18n是否有问题
|
2. **提交前运行检查脚本**:使用`yarn i18n:check`检查 i18n 是否有问题
|
||||||
3. **小步提交翻译**:避免积累大量未翻译文本
|
3. **小步提交翻译**:避免积累大量未翻译文本
|
||||||
4. **保持key语义明确**:key应能清晰表达其用途,如`user.profile.avatar.upload.error`
|
4. **保持 key 语义明确**:key 应能清晰表达其用途,如`user.profile.avatar.upload.error`
|
||||||
|
|||||||
@ -134,108 +134,38 @@ artifactBuildCompleted: scripts/artifact-build-completed.js
|
|||||||
releaseInfo:
|
releaseInfo:
|
||||||
releaseNotes: |
|
releaseNotes: |
|
||||||
<!--LANG:en-->
|
<!--LANG:en-->
|
||||||
A New Era of Intelligence with Cherry Studio 1.7.1
|
Cherry Studio 1.7.6 - New Models & MCP Enhancements
|
||||||
|
|
||||||
Today we're releasing Cherry Studio 1.7.1 — our most ambitious update yet, introducing Agent: autonomous AI that thinks, plans, and acts.
|
This release adds support for new AI models and includes a new MCP server for memory management.
|
||||||
|
|
||||||
For years, AI assistants have been reactive — waiting for your commands, responding to your questions. With Agent, we're changing that. Now, AI can truly work alongside you: understanding complex goals, breaking them into steps, and executing them independently.
|
✨ New Features
|
||||||
|
- [Models] Add support for Xiaomi MiMo model
|
||||||
|
- [Models] Add support for Gemini 3 Flash and Pro model detection
|
||||||
|
- [Models] Add support for Volcengine Doubao-Seed-1.8 model
|
||||||
|
- [MCP] Add Nowledge Mem builtin MCP server for memory management
|
||||||
|
- [Settings] Add default reasoning effort option to resolve confusion between undefined and none
|
||||||
|
|
||||||
This is what we've been building toward. And it's just the beginning.
|
🐛 Bug Fixes
|
||||||
|
- [Azure] Restore deployment-based URLs for non-v1 apiVersion
|
||||||
🤖 Meet Agent
|
- [Translation] Disable reasoning mode for translation to improve efficiency
|
||||||
Imagine having a brilliant colleague who never sleeps. Give Agent a goal — write a report, analyze data, refactor code — and watch it work. It reasons through problems, breaks them into steps, calls the right tools, and adapts when things change.
|
- [Image] Update API path for image generation requests in OpenAIBaseClient
|
||||||
|
- [Windows] Auto-discover and persist Git Bash path on Windows for scoop users
|
||||||
- **Think → Plan → Act**: From goal to execution, fully autonomous
|
|
||||||
- **Deep Reasoning**: Multi-turn thinking that solves real problems
|
|
||||||
- **Tool Mastery**: File operations, web search, code execution, and more
|
|
||||||
- **Skill Plugins**: Extend with custom commands and capabilities
|
|
||||||
- **You Stay in Control**: Real-time approval for sensitive actions
|
|
||||||
- **Full Visibility**: Every thought, every decision, fully transparent
|
|
||||||
|
|
||||||
🌐 Expanding Ecosystem
|
|
||||||
- **New Providers**: HuggingFace, Mistral, CherryIN, AI Gateway, Intel OVMS, Didi MCP
|
|
||||||
- **New Models**: Claude 4.5 Haiku, DeepSeek v3.2, GLM-4.6, Doubao, Ling series
|
|
||||||
- **MCP Integration**: Alibaba Cloud, ModelScope, Higress, MCP.so, TokenFlux and more
|
|
||||||
|
|
||||||
📚 Smarter Knowledge Base
|
|
||||||
- **OpenMinerU**: Self-hosted document processing
|
|
||||||
- **Full-Text Search**: Find anything instantly across your notes
|
|
||||||
- **Enhanced Tool Selection**: Smarter configuration for better AI assistance
|
|
||||||
|
|
||||||
📝 Notes, Reimagined
|
|
||||||
- Full-text search with highlighted results
|
|
||||||
- AI-powered smart rename
|
|
||||||
- Export as image
|
|
||||||
- Auto-wrap for tables
|
|
||||||
|
|
||||||
🖼️ Image & OCR
|
|
||||||
- Intel OVMS painting capabilities
|
|
||||||
- Intel OpenVINO NPU-accelerated OCR
|
|
||||||
|
|
||||||
🌍 Now in 10+ Languages
|
|
||||||
- Added German support
|
|
||||||
- Enhanced internationalization
|
|
||||||
|
|
||||||
⚡ Faster & More Polished
|
|
||||||
- Electron 38 upgrade
|
|
||||||
- New MCP management interface
|
|
||||||
- Dozens of UI refinements
|
|
||||||
|
|
||||||
❤️ Fully Open Source
|
|
||||||
Commercial restrictions removed. Cherry Studio now follows standard AGPL v3 — free for teams of any size.
|
|
||||||
|
|
||||||
The Agent Era is here. We can't wait to see what you'll create.
|
|
||||||
|
|
||||||
<!--LANG:zh-CN-->
|
<!--LANG:zh-CN-->
|
||||||
Cherry Studio 1.7.1:开启智能新纪元
|
Cherry Studio 1.7.6 - 新模型与 MCP 增强
|
||||||
|
|
||||||
今天,我们正式发布 Cherry Studio 1.7.1 —— 迄今最具雄心的版本,带来全新的 Agent:能够自主思考、规划和行动的 AI。
|
本次更新添加了多个新 AI 模型支持,并新增记忆管理 MCP 服务器。
|
||||||
|
|
||||||
多年来,AI 助手一直是被动的——等待你的指令,回应你的问题。Agent 改变了这一切。现在,AI 能够真正与你并肩工作:理解复杂目标,将其拆解为步骤,并独立执行。
|
✨ 新功能
|
||||||
|
- [模型] 添加小米 MiMo 模型支持
|
||||||
|
- [模型] 添加 Gemini 3 Flash 和 Pro 模型检测支持
|
||||||
|
- [模型] 添加火山引擎 Doubao-Seed-1.8 模型支持
|
||||||
|
- [MCP] 新增 Nowledge Mem 内置 MCP 服务器,用于记忆管理
|
||||||
|
- [设置] 添加默认推理强度选项,解决 undefined 和 none 之间的混淆
|
||||||
|
|
||||||
这是我们一直在构建的未来。而这,仅仅是开始。
|
🐛 问题修复
|
||||||
|
- [Azure] 修复非 v1 apiVersion 的部署 URL 问题
|
||||||
🤖 认识 Agent
|
- [翻译] 禁用翻译时的推理模式以提高效率
|
||||||
想象一位永不疲倦的得力伙伴。给 Agent 一个目标——撰写报告、分析数据、重构代码——然后看它工作。它会推理问题、拆解步骤、调用工具,并在情况变化时灵活应对。
|
- [图像] 更新 OpenAIBaseClient 中图像生成请求的 API 路径
|
||||||
|
- [Windows] 自动发现并保存 Windows scoop 用户的 Git Bash 路径
|
||||||
- **思考 → 规划 → 行动**:从目标到执行,全程自主
|
|
||||||
- **深度推理**:多轮思考,解决真实问题
|
|
||||||
- **工具大师**:文件操作、网络搜索、代码执行,样样精通
|
|
||||||
- **技能插件**:自定义命令,无限扩展
|
|
||||||
- **你掌控全局**:敏感操作,实时审批
|
|
||||||
- **完全透明**:每一步思考,每一个决策,清晰可见
|
|
||||||
|
|
||||||
🌐 生态持续壮大
|
|
||||||
- **新增服务商**:Hugging Face、Mistral、Perplexity、SophNet、AI Gateway、Cerebras AI
|
|
||||||
- **新增模型**:Gemini 3、Gemini 3 Pro(支持图像预览)、GPT-5.1、Claude Opus 4.5
|
|
||||||
- **MCP 集成**:百炼、魔搭、Higress、MCP.so、TokenFlux 等平台
|
|
||||||
|
|
||||||
📚 更智能的知识库
|
|
||||||
- **OpenMinerU**:本地自部署文档处理
|
|
||||||
- **全文搜索**:笔记内容一搜即达
|
|
||||||
- **增强工具选择**:更智能的配置,更好的 AI 协助
|
|
||||||
|
|
||||||
📝 笔记,焕然一新
|
|
||||||
- 全文搜索,结果高亮
|
|
||||||
- AI 智能重命名
|
|
||||||
- 导出为图片
|
|
||||||
- 表格自动换行
|
|
||||||
|
|
||||||
🖼️ 图像与 OCR
|
|
||||||
- Intel OVMS 绘图能力
|
|
||||||
- Intel OpenVINO NPU 加速 OCR
|
|
||||||
|
|
||||||
🌍 支持 10+ 种语言
|
|
||||||
- 新增德语支持
|
|
||||||
- 全面增强国际化
|
|
||||||
|
|
||||||
⚡ 更快、更精致
|
|
||||||
- 升级 Electron 38
|
|
||||||
- 新的 MCP 管理界面
|
|
||||||
- 数十处 UI 细节打磨
|
|
||||||
|
|
||||||
❤️ 完全开源
|
|
||||||
商用限制已移除。Cherry Studio 现遵循标准 AGPL v3 协议——任意规模团队均可自由使用。
|
|
||||||
|
|
||||||
Agent 纪元已至。期待你的创造。
|
|
||||||
<!--LANG:END-->
|
<!--LANG:END-->
|
||||||
|
|||||||
@ -61,6 +61,7 @@ export default defineConfig([
|
|||||||
'tests/**',
|
'tests/**',
|
||||||
'.yarn/**',
|
'.yarn/**',
|
||||||
'.gitignore',
|
'.gitignore',
|
||||||
|
'.conductor/**',
|
||||||
'scripts/cloudflare-worker.js',
|
'scripts/cloudflare-worker.js',
|
||||||
'src/main/integration/nutstore/sso/lib/**',
|
'src/main/integration/nutstore/sso/lib/**',
|
||||||
'src/main/integration/cherryai/index.js',
|
'src/main/integration/cherryai/index.js',
|
||||||
|
|||||||
31
package.json
31
package.json
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "CherryStudio",
|
"name": "CherryStudio",
|
||||||
"version": "1.7.1",
|
"version": "1.7.6",
|
||||||
"private": true,
|
"private": true,
|
||||||
"description": "A powerful AI assistant for producer.",
|
"description": "A powerful AI assistant for producer.",
|
||||||
"main": "./out/main/index.js",
|
"main": "./out/main/index.js",
|
||||||
@ -53,10 +53,10 @@
|
|||||||
"typecheck": "concurrently -n \"node,web\" -c \"cyan,magenta\" \"npm run typecheck:node\" \"npm run typecheck:web\"",
|
"typecheck": "concurrently -n \"node,web\" -c \"cyan,magenta\" \"npm run typecheck:node\" \"npm run typecheck:web\"",
|
||||||
"typecheck:node": "tsgo --noEmit -p tsconfig.node.json --composite false",
|
"typecheck:node": "tsgo --noEmit -p tsconfig.node.json --composite false",
|
||||||
"typecheck:web": "tsgo --noEmit -p tsconfig.web.json --composite false",
|
"typecheck:web": "tsgo --noEmit -p tsconfig.web.json --composite false",
|
||||||
"check:i18n": "dotenv -e .env -- tsx scripts/check-i18n.ts",
|
"i18n:check": "dotenv -e .env -- tsx scripts/check-i18n.ts",
|
||||||
"sync:i18n": "dotenv -e .env -- tsx scripts/sync-i18n.ts",
|
"i18n:sync": "dotenv -e .env -- tsx scripts/sync-i18n.ts",
|
||||||
"update:i18n": "dotenv -e .env -- tsx scripts/update-i18n.ts",
|
"i18n:translate": "dotenv -e .env -- tsx scripts/auto-translate-i18n.ts",
|
||||||
"auto:i18n": "dotenv -e .env -- tsx scripts/auto-translate-i18n.ts",
|
"i18n:all": "yarn i18n:check && yarn i18n:sync && yarn i18n:translate",
|
||||||
"update:languages": "tsx scripts/update-languages.ts",
|
"update:languages": "tsx scripts/update-languages.ts",
|
||||||
"update:upgrade-config": "tsx scripts/update-app-upgrade-config.ts",
|
"update:upgrade-config": "tsx scripts/update-app-upgrade-config.ts",
|
||||||
"test": "vitest run --silent",
|
"test": "vitest run --silent",
|
||||||
@ -70,7 +70,7 @@
|
|||||||
"test:e2e": "yarn playwright test",
|
"test:e2e": "yarn playwright test",
|
||||||
"test:lint": "oxlint --deny-warnings && eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --cache",
|
"test:lint": "oxlint --deny-warnings && eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --cache",
|
||||||
"test:scripts": "vitest scripts",
|
"test:scripts": "vitest scripts",
|
||||||
"lint": "oxlint --fix && eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --fix --cache && yarn typecheck && yarn check:i18n && yarn format:check",
|
"lint": "oxlint --fix && eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --fix --cache && yarn typecheck && yarn i18n:check && yarn format:check",
|
||||||
"format": "biome format --write && biome lint --write",
|
"format": "biome format --write && biome lint --write",
|
||||||
"format:check": "biome format && biome lint",
|
"format:check": "biome format && biome lint",
|
||||||
"prepare": "git config blame.ignoreRevsFile .git-blame-ignore-revs && husky",
|
"prepare": "git config blame.ignoreRevsFile .git-blame-ignore-revs && husky",
|
||||||
@ -81,7 +81,7 @@
|
|||||||
"release:ai-sdk-provider": "yarn workspace @cherrystudio/ai-sdk-provider version patch --immediate && yarn workspace @cherrystudio/ai-sdk-provider build && yarn workspace @cherrystudio/ai-sdk-provider npm publish --access public"
|
"release:ai-sdk-provider": "yarn workspace @cherrystudio/ai-sdk-provider version patch --immediate && yarn workspace @cherrystudio/ai-sdk-provider build && yarn workspace @cherrystudio/ai-sdk-provider npm publish --access public"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@anthropic-ai/claude-agent-sdk": "patch:@anthropic-ai/claude-agent-sdk@npm%3A0.1.53#~/.yarn/patches/@anthropic-ai-claude-agent-sdk-npm-0.1.53-4b77f4cf29.patch",
|
"@anthropic-ai/claude-agent-sdk": "patch:@anthropic-ai/claude-agent-sdk@npm%3A0.1.62#~/.yarn/patches/@anthropic-ai-claude-agent-sdk-npm-0.1.62-23ae56f8c8.patch",
|
||||||
"@libsql/client": "0.14.0",
|
"@libsql/client": "0.14.0",
|
||||||
"@libsql/win32-x64-msvc": "^0.4.7",
|
"@libsql/win32-x64-msvc": "^0.4.7",
|
||||||
"@napi-rs/system-ocr": "patch:@napi-rs/system-ocr@npm%3A1.0.2#~/.yarn/patches/@napi-rs-system-ocr-npm-1.0.2-59e7a78e8b.patch",
|
"@napi-rs/system-ocr": "patch:@napi-rs/system-ocr@npm%3A1.0.2#~/.yarn/patches/@napi-rs-system-ocr-npm-1.0.2-59e7a78e8b.patch",
|
||||||
@ -114,11 +114,11 @@
|
|||||||
"@ai-sdk/anthropic": "^2.0.49",
|
"@ai-sdk/anthropic": "^2.0.49",
|
||||||
"@ai-sdk/cerebras": "^1.0.31",
|
"@ai-sdk/cerebras": "^1.0.31",
|
||||||
"@ai-sdk/gateway": "^2.0.15",
|
"@ai-sdk/gateway": "^2.0.15",
|
||||||
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.43#~/.yarn/patches/@ai-sdk-google-npm-2.0.43-689ed559b3.patch",
|
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.49#~/.yarn/patches/@ai-sdk-google-npm-2.0.49-84720f41bd.patch",
|
||||||
"@ai-sdk/google-vertex": "^3.0.79",
|
"@ai-sdk/google-vertex": "^3.0.94",
|
||||||
"@ai-sdk/huggingface": "^0.0.10",
|
"@ai-sdk/huggingface": "^0.0.10",
|
||||||
"@ai-sdk/mistral": "^2.0.24",
|
"@ai-sdk/mistral": "^2.0.24",
|
||||||
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch",
|
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch",
|
||||||
"@ai-sdk/perplexity": "^2.0.20",
|
"@ai-sdk/perplexity": "^2.0.20",
|
||||||
"@ai-sdk/test-server": "^0.0.1",
|
"@ai-sdk/test-server": "^0.0.1",
|
||||||
"@ant-design/v5-patch-for-react-19": "^1.0.3",
|
"@ant-design/v5-patch-for-react-19": "^1.0.3",
|
||||||
@ -142,7 +142,7 @@
|
|||||||
"@cherrystudio/embedjs-ollama": "^0.1.31",
|
"@cherrystudio/embedjs-ollama": "^0.1.31",
|
||||||
"@cherrystudio/embedjs-openai": "^0.1.31",
|
"@cherrystudio/embedjs-openai": "^0.1.31",
|
||||||
"@cherrystudio/extension-table-plus": "workspace:^",
|
"@cherrystudio/extension-table-plus": "workspace:^",
|
||||||
"@cherrystudio/openai": "^6.9.0",
|
"@cherrystudio/openai": "^6.12.0",
|
||||||
"@dnd-kit/core": "^6.3.1",
|
"@dnd-kit/core": "^6.3.1",
|
||||||
"@dnd-kit/modifiers": "^9.0.0",
|
"@dnd-kit/modifiers": "^9.0.0",
|
||||||
"@dnd-kit/sortable": "^10.0.0",
|
"@dnd-kit/sortable": "^10.0.0",
|
||||||
@ -162,7 +162,7 @@
|
|||||||
"@langchain/core": "patch:@langchain/core@npm%3A1.0.2#~/.yarn/patches/@langchain-core-npm-1.0.2-183ef83fe4.patch",
|
"@langchain/core": "patch:@langchain/core@npm%3A1.0.2#~/.yarn/patches/@langchain-core-npm-1.0.2-183ef83fe4.patch",
|
||||||
"@langchain/openai": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
"@langchain/openai": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||||
"@mistralai/mistralai": "^1.7.5",
|
"@mistralai/mistralai": "^1.7.5",
|
||||||
"@modelcontextprotocol/sdk": "^1.17.5",
|
"@modelcontextprotocol/sdk": "^1.23.0",
|
||||||
"@mozilla/readability": "^0.6.0",
|
"@mozilla/readability": "^0.6.0",
|
||||||
"@notionhq/client": "^2.2.15",
|
"@notionhq/client": "^2.2.15",
|
||||||
"@openrouter/ai-sdk-provider": "^1.2.8",
|
"@openrouter/ai-sdk-provider": "^1.2.8",
|
||||||
@ -207,6 +207,7 @@
|
|||||||
"@types/content-type": "^1.1.9",
|
"@types/content-type": "^1.1.9",
|
||||||
"@types/cors": "^2.8.19",
|
"@types/cors": "^2.8.19",
|
||||||
"@types/diff": "^7",
|
"@types/diff": "^7",
|
||||||
|
"@types/dotenv": "^8.2.3",
|
||||||
"@types/express": "^5",
|
"@types/express": "^5",
|
||||||
"@types/fs-extra": "^11",
|
"@types/fs-extra": "^11",
|
||||||
"@types/he": "^1",
|
"@types/he": "^1",
|
||||||
@ -317,6 +318,7 @@
|
|||||||
"motion": "^12.10.5",
|
"motion": "^12.10.5",
|
||||||
"notion-helper": "^1.3.22",
|
"notion-helper": "^1.3.22",
|
||||||
"npx-scope-finder": "^1.2.0",
|
"npx-scope-finder": "^1.2.0",
|
||||||
|
"ollama-ai-provider-v2": "patch:ollama-ai-provider-v2@npm%3A1.5.5#~/.yarn/patches/ollama-ai-provider-v2-npm-1.5.5-8bef249af9.patch",
|
||||||
"oxlint": "^1.22.0",
|
"oxlint": "^1.22.0",
|
||||||
"oxlint-tsgolint": "^0.2.0",
|
"oxlint-tsgolint": "^0.2.0",
|
||||||
"p-queue": "^8.1.0",
|
"p-queue": "^8.1.0",
|
||||||
@ -412,9 +414,10 @@
|
|||||||
"@langchain/openai@npm:>=0.1.0 <0.6.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
"@langchain/openai@npm:>=0.1.0 <0.6.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||||
"@langchain/openai@npm:^0.3.16": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
"@langchain/openai@npm:^0.3.16": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||||
"@langchain/openai@npm:>=0.2.0 <0.7.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
"@langchain/openai@npm:>=0.2.0 <0.7.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||||
"@ai-sdk/openai@npm:^2.0.42": "patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch",
|
"@ai-sdk/openai@npm:^2.0.42": "patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch",
|
||||||
"@ai-sdk/google@npm:^2.0.40": "patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch",
|
"@ai-sdk/google@npm:^2.0.40": "patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch",
|
||||||
"@ai-sdk/openai-compatible@npm:^1.0.27": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch"
|
"@ai-sdk/openai-compatible@npm:^1.0.27": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch",
|
||||||
|
"@ai-sdk/google@npm:2.0.49": "patch:@ai-sdk/google@npm%3A2.0.49#~/.yarn/patches/@ai-sdk-google-npm-2.0.49-84720f41bd.patch"
|
||||||
},
|
},
|
||||||
"packageManager": "yarn@4.9.1",
|
"packageManager": "yarn@4.9.1",
|
||||||
"lint-staged": {
|
"lint-staged": {
|
||||||
|
|||||||
@ -41,6 +41,7 @@
|
|||||||
"ai": "^5.0.26"
|
"ai": "^5.0.26"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
|
"@ai-sdk/openai-compatible": "^1.0.28",
|
||||||
"@ai-sdk/provider": "^2.0.0",
|
"@ai-sdk/provider": "^2.0.0",
|
||||||
"@ai-sdk/provider-utils": "^3.0.17"
|
"@ai-sdk/provider-utils": "^3.0.17"
|
||||||
},
|
},
|
||||||
|
|||||||
@ -2,7 +2,6 @@ import { AnthropicMessagesLanguageModel } from '@ai-sdk/anthropic/internal'
|
|||||||
import { GoogleGenerativeAILanguageModel } from '@ai-sdk/google/internal'
|
import { GoogleGenerativeAILanguageModel } from '@ai-sdk/google/internal'
|
||||||
import type { OpenAIProviderSettings } from '@ai-sdk/openai'
|
import type { OpenAIProviderSettings } from '@ai-sdk/openai'
|
||||||
import {
|
import {
|
||||||
OpenAIChatLanguageModel,
|
|
||||||
OpenAICompletionLanguageModel,
|
OpenAICompletionLanguageModel,
|
||||||
OpenAIEmbeddingModel,
|
OpenAIEmbeddingModel,
|
||||||
OpenAIImageModel,
|
OpenAIImageModel,
|
||||||
@ -10,6 +9,7 @@ import {
|
|||||||
OpenAISpeechModel,
|
OpenAISpeechModel,
|
||||||
OpenAITranscriptionModel
|
OpenAITranscriptionModel
|
||||||
} from '@ai-sdk/openai/internal'
|
} from '@ai-sdk/openai/internal'
|
||||||
|
import { OpenAICompatibleChatLanguageModel } from '@ai-sdk/openai-compatible'
|
||||||
import {
|
import {
|
||||||
type EmbeddingModelV2,
|
type EmbeddingModelV2,
|
||||||
type ImageModelV2,
|
type ImageModelV2,
|
||||||
@ -118,7 +118,7 @@ const createCustomFetch = (originalFetch?: any) => {
|
|||||||
return originalFetch ? originalFetch(url, options) : fetch(url, options)
|
return originalFetch ? originalFetch(url, options) : fetch(url, options)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
class CherryInOpenAIChatLanguageModel extends OpenAIChatLanguageModel {
|
class CherryInOpenAIChatLanguageModel extends OpenAICompatibleChatLanguageModel {
|
||||||
constructor(modelId: string, settings: any) {
|
constructor(modelId: string, settings: any) {
|
||||||
super(modelId, {
|
super(modelId, {
|
||||||
...settings,
|
...settings,
|
||||||
|
|||||||
@ -40,8 +40,8 @@
|
|||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@ai-sdk/anthropic": "^2.0.49",
|
"@ai-sdk/anthropic": "^2.0.49",
|
||||||
"@ai-sdk/azure": "^2.0.74",
|
"@ai-sdk/azure": "^2.0.87",
|
||||||
"@ai-sdk/deepseek": "^1.0.29",
|
"@ai-sdk/deepseek": "^1.0.31",
|
||||||
"@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch",
|
"@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch",
|
||||||
"@ai-sdk/provider": "^2.0.0",
|
"@ai-sdk/provider": "^2.0.0",
|
||||||
"@ai-sdk/provider-utils": "^3.0.17",
|
"@ai-sdk/provider-utils": "^3.0.17",
|
||||||
|
|||||||
@ -62,7 +62,7 @@ export class StreamEventManager {
|
|||||||
const recursiveResult = await context.recursiveCall(recursiveParams)
|
const recursiveResult = await context.recursiveCall(recursiveParams)
|
||||||
|
|
||||||
if (recursiveResult && recursiveResult.fullStream) {
|
if (recursiveResult && recursiveResult.fullStream) {
|
||||||
await this.pipeRecursiveStream(controller, recursiveResult.fullStream, context)
|
await this.pipeRecursiveStream(controller, recursiveResult.fullStream)
|
||||||
} else {
|
} else {
|
||||||
console.warn('[MCP Prompt] No fullstream found in recursive result:', recursiveResult)
|
console.warn('[MCP Prompt] No fullstream found in recursive result:', recursiveResult)
|
||||||
}
|
}
|
||||||
@ -74,11 +74,7 @@ export class StreamEventManager {
|
|||||||
/**
|
/**
|
||||||
* 将递归流的数据传递到当前流
|
* 将递归流的数据传递到当前流
|
||||||
*/
|
*/
|
||||||
private async pipeRecursiveStream(
|
private async pipeRecursiveStream(controller: StreamController, recursiveStream: ReadableStream): Promise<void> {
|
||||||
controller: StreamController,
|
|
||||||
recursiveStream: ReadableStream,
|
|
||||||
context?: AiRequestContext
|
|
||||||
): Promise<void> {
|
|
||||||
const reader = recursiveStream.getReader()
|
const reader = recursiveStream.getReader()
|
||||||
try {
|
try {
|
||||||
while (true) {
|
while (true) {
|
||||||
@ -86,18 +82,14 @@ export class StreamEventManager {
|
|||||||
if (done) {
|
if (done) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
if (value.type === 'start') {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
if (value.type === 'finish') {
|
if (value.type === 'finish') {
|
||||||
// 迭代的流不发finish,但需要累加其 usage
|
|
||||||
if (value.usage && context?.accumulatedUsage) {
|
|
||||||
this.accumulateUsage(context.accumulatedUsage, value.usage)
|
|
||||||
}
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
// 对于 finish-step 类型,累加其 usage
|
|
||||||
if (value.type === 'finish-step' && value.usage && context?.accumulatedUsage) {
|
|
||||||
this.accumulateUsage(context.accumulatedUsage, value.usage)
|
|
||||||
}
|
|
||||||
// 将递归流的数据传递到当前流
|
|
||||||
controller.enqueue(value)
|
controller.enqueue(value)
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
@ -135,10 +127,8 @@ export class StreamEventManager {
|
|||||||
// 构建新的对话消息
|
// 构建新的对话消息
|
||||||
const newMessages: ModelMessage[] = [
|
const newMessages: ModelMessage[] = [
|
||||||
...(context.originalParams.messages || []),
|
...(context.originalParams.messages || []),
|
||||||
{
|
// 只有当 textBuffer 有内容时才添加 assistant 消息,避免空消息导致 API 错误
|
||||||
role: 'assistant',
|
...(textBuffer ? [{ role: 'assistant' as const, content: textBuffer }] : []),
|
||||||
content: textBuffer
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
role: 'user',
|
role: 'user',
|
||||||
content: toolResultsText
|
content: toolResultsText
|
||||||
@ -161,7 +151,7 @@ export class StreamEventManager {
|
|||||||
/**
|
/**
|
||||||
* 累加 usage 数据
|
* 累加 usage 数据
|
||||||
*/
|
*/
|
||||||
private accumulateUsage(target: any, source: any): void {
|
accumulateUsage(target: any, source: any): void {
|
||||||
if (!target || !source) return
|
if (!target || !source) return
|
||||||
|
|
||||||
// 累加各种 token 类型
|
// 累加各种 token 类型
|
||||||
|
|||||||
@ -411,7 +411,10 @@ export const createPromptToolUsePlugin = (config: PromptToolUseConfig = {}) => {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 如果没有执行工具调用,直接传递原始finish-step事件
|
// 如果没有执行工具调用,累加 usage 后透传 finish-step 事件
|
||||||
|
if (chunk.usage && context.accumulatedUsage) {
|
||||||
|
streamEventManager.accumulateUsage(context.accumulatedUsage, chunk.usage)
|
||||||
|
}
|
||||||
controller.enqueue(chunk)
|
controller.enqueue(chunk)
|
||||||
|
|
||||||
// 清理状态
|
// 清理状态
|
||||||
|
|||||||
@ -6,6 +6,7 @@ import { type Tool } from 'ai'
|
|||||||
|
|
||||||
import { createOpenRouterOptions, createXaiOptions, mergeProviderOptions } from '../../../options'
|
import { createOpenRouterOptions, createXaiOptions, mergeProviderOptions } from '../../../options'
|
||||||
import type { ProviderOptionsMap } from '../../../options/types'
|
import type { ProviderOptionsMap } from '../../../options/types'
|
||||||
|
import type { AiRequestContext } from '../../'
|
||||||
import type { OpenRouterSearchConfig } from './openrouter'
|
import type { OpenRouterSearchConfig } from './openrouter'
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -35,7 +36,6 @@ export interface WebSearchPluginConfig {
|
|||||||
anthropic?: AnthropicSearchConfig
|
anthropic?: AnthropicSearchConfig
|
||||||
xai?: ProviderOptionsMap['xai']['searchParameters']
|
xai?: ProviderOptionsMap['xai']['searchParameters']
|
||||||
google?: GoogleSearchConfig
|
google?: GoogleSearchConfig
|
||||||
'google-vertex'?: GoogleSearchConfig
|
|
||||||
openrouter?: OpenRouterSearchConfig
|
openrouter?: OpenRouterSearchConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -44,7 +44,6 @@ export interface WebSearchPluginConfig {
|
|||||||
*/
|
*/
|
||||||
export const DEFAULT_WEB_SEARCH_CONFIG: WebSearchPluginConfig = {
|
export const DEFAULT_WEB_SEARCH_CONFIG: WebSearchPluginConfig = {
|
||||||
google: {},
|
google: {},
|
||||||
'google-vertex': {},
|
|
||||||
openai: {},
|
openai: {},
|
||||||
'openai-chat': {},
|
'openai-chat': {},
|
||||||
xai: {
|
xai: {
|
||||||
@ -97,55 +96,84 @@ export type WebSearchToolInputSchema = {
|
|||||||
'openai-chat': InferToolInput<OpenAIChatWebSearchTool>
|
'openai-chat': InferToolInput<OpenAIChatWebSearchTool>
|
||||||
}
|
}
|
||||||
|
|
||||||
export const switchWebSearchTool = (providerId: string, config: WebSearchPluginConfig, params: any) => {
|
/**
|
||||||
switch (providerId) {
|
* Helper function to ensure params.tools object exists
|
||||||
case 'openai': {
|
*/
|
||||||
if (config.openai) {
|
const ensureToolsObject = (params: any) => {
|
||||||
if (!params.tools) params.tools = {}
|
if (!params.tools) params.tools = {}
|
||||||
params.tools.web_search = openai.tools.webSearch(config.openai)
|
}
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
case 'openai-chat': {
|
|
||||||
if (config['openai-chat']) {
|
|
||||||
if (!params.tools) params.tools = {}
|
|
||||||
params.tools.web_search_preview = openai.tools.webSearchPreview(config['openai-chat'])
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'anthropic': {
|
/**
|
||||||
if (config.anthropic) {
|
* Helper function to apply tool-based web search configuration
|
||||||
if (!params.tools) params.tools = {}
|
*/
|
||||||
params.tools.web_search = anthropic.tools.webSearch_20250305(config.anthropic)
|
const applyToolBasedSearch = (params: any, toolName: string, toolInstance: any) => {
|
||||||
}
|
ensureToolsObject(params)
|
||||||
break
|
params.tools[toolName] = toolInstance
|
||||||
}
|
}
|
||||||
|
|
||||||
case 'google': {
|
/**
|
||||||
// case 'google-vertex':
|
* Helper function to apply provider options-based web search configuration
|
||||||
if (!params.tools) params.tools = {}
|
*/
|
||||||
params.tools.web_search = google.tools.googleSearch(config.google || {})
|
const applyProviderOptionsSearch = (params: any, searchOptions: any) => {
|
||||||
break
|
params.providerOptions = mergeProviderOptions(params.providerOptions, searchOptions)
|
||||||
}
|
}
|
||||||
|
|
||||||
case 'xai': {
|
export const switchWebSearchTool = (config: WebSearchPluginConfig, params: any, context?: AiRequestContext) => {
|
||||||
if (config.xai) {
|
const providerId = context?.providerId
|
||||||
const searchOptions = createXaiOptions({
|
|
||||||
searchParameters: { ...config.xai, mode: 'on' }
|
|
||||||
})
|
|
||||||
params.providerOptions = mergeProviderOptions(params.providerOptions, searchOptions)
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'openrouter': {
|
// Provider-specific configuration map
|
||||||
if (config.openrouter) {
|
const providerHandlers: Record<string, () => void> = {
|
||||||
const searchOptions = createOpenRouterOptions(config.openrouter)
|
openai: () => {
|
||||||
params.providerOptions = mergeProviderOptions(params.providerOptions, searchOptions)
|
const cfg = config.openai ?? DEFAULT_WEB_SEARCH_CONFIG.openai
|
||||||
}
|
applyToolBasedSearch(params, 'web_search', openai.tools.webSearch(cfg))
|
||||||
|
},
|
||||||
|
'openai-chat': () => {
|
||||||
|
const cfg = (config['openai-chat'] ?? DEFAULT_WEB_SEARCH_CONFIG['openai-chat']) as OpenAISearchPreviewConfig
|
||||||
|
applyToolBasedSearch(params, 'web_search_preview', openai.tools.webSearchPreview(cfg))
|
||||||
|
},
|
||||||
|
anthropic: () => {
|
||||||
|
const cfg = config.anthropic ?? DEFAULT_WEB_SEARCH_CONFIG.anthropic
|
||||||
|
applyToolBasedSearch(params, 'web_search', anthropic.tools.webSearch_20250305(cfg))
|
||||||
|
},
|
||||||
|
google: () => {
|
||||||
|
const cfg = (config.google ?? DEFAULT_WEB_SEARCH_CONFIG.google) as GoogleSearchConfig
|
||||||
|
applyToolBasedSearch(params, 'web_search', google.tools.googleSearch(cfg))
|
||||||
|
},
|
||||||
|
xai: () => {
|
||||||
|
const cfg = config.xai ?? DEFAULT_WEB_SEARCH_CONFIG.xai
|
||||||
|
const searchOptions = createXaiOptions({ searchParameters: { ...cfg, mode: 'on' } })
|
||||||
|
applyProviderOptionsSearch(params, searchOptions)
|
||||||
|
},
|
||||||
|
openrouter: () => {
|
||||||
|
const cfg = (config.openrouter ?? DEFAULT_WEB_SEARCH_CONFIG.openrouter) as OpenRouterSearchConfig
|
||||||
|
const searchOptions = createOpenRouterOptions(cfg)
|
||||||
|
applyProviderOptionsSearch(params, searchOptions)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try provider-specific handler first
|
||||||
|
const handler = providerId && providerHandlers[providerId]
|
||||||
|
if (handler) {
|
||||||
|
handler()
|
||||||
|
return params
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback: apply based on available config keys (prioritized order)
|
||||||
|
const fallbackOrder: Array<keyof WebSearchPluginConfig> = [
|
||||||
|
'openai',
|
||||||
|
'openai-chat',
|
||||||
|
'anthropic',
|
||||||
|
'google',
|
||||||
|
'xai',
|
||||||
|
'openrouter'
|
||||||
|
]
|
||||||
|
|
||||||
|
for (const key of fallbackOrder) {
|
||||||
|
if (config[key]) {
|
||||||
|
providerHandlers[key]()
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return params
|
return params
|
||||||
}
|
}
|
||||||
|
|||||||
@ -4,7 +4,6 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { definePlugin } from '../../'
|
import { definePlugin } from '../../'
|
||||||
import type { AiRequestContext } from '../../types'
|
|
||||||
import type { WebSearchPluginConfig } from './helper'
|
import type { WebSearchPluginConfig } from './helper'
|
||||||
import { DEFAULT_WEB_SEARCH_CONFIG, switchWebSearchTool } from './helper'
|
import { DEFAULT_WEB_SEARCH_CONFIG, switchWebSearchTool } from './helper'
|
||||||
|
|
||||||
@ -18,15 +17,22 @@ export const webSearchPlugin = (config: WebSearchPluginConfig = DEFAULT_WEB_SEAR
|
|||||||
name: 'webSearch',
|
name: 'webSearch',
|
||||||
enforce: 'pre',
|
enforce: 'pre',
|
||||||
|
|
||||||
transformParams: async (params: any, context: AiRequestContext) => {
|
transformParams: async (params: any, context) => {
|
||||||
const { providerId } = context
|
let { providerId } = context
|
||||||
switchWebSearchTool(providerId, config, params)
|
|
||||||
|
|
||||||
|
// For cherryin providers, extract the actual provider from the model's provider string
|
||||||
|
// Expected format: "cherryin.{actualProvider}" (e.g., "cherryin.gemini")
|
||||||
if (providerId === 'cherryin' || providerId === 'cherryin-chat') {
|
if (providerId === 'cherryin' || providerId === 'cherryin-chat') {
|
||||||
// cherryin.gemini
|
const provider = params.model?.provider
|
||||||
const _providerId = params.model.provider.split('.')[1]
|
if (provider && typeof provider === 'string' && provider.includes('.')) {
|
||||||
switchWebSearchTool(_providerId, config, params)
|
const extractedProviderId = provider.split('.')[1]
|
||||||
|
if (extractedProviderId) {
|
||||||
|
providerId = extractedProviderId
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
switchWebSearchTool(config, params, { ...context, providerId })
|
||||||
return params
|
return params
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|||||||
@ -55,6 +55,8 @@ export enum IpcChannel {
|
|||||||
Webview_SetOpenLinkExternal = 'webview:set-open-link-external',
|
Webview_SetOpenLinkExternal = 'webview:set-open-link-external',
|
||||||
Webview_SetSpellCheckEnabled = 'webview:set-spell-check-enabled',
|
Webview_SetSpellCheckEnabled = 'webview:set-spell-check-enabled',
|
||||||
Webview_SearchHotkey = 'webview:search-hotkey',
|
Webview_SearchHotkey = 'webview:search-hotkey',
|
||||||
|
Webview_PrintToPDF = 'webview:print-to-pdf',
|
||||||
|
Webview_SaveAsHTML = 'webview:save-as-html',
|
||||||
|
|
||||||
// Open
|
// Open
|
||||||
Open_Path = 'open:path',
|
Open_Path = 'open:path',
|
||||||
@ -90,6 +92,8 @@ export enum IpcChannel {
|
|||||||
Mcp_AbortTool = 'mcp:abort-tool',
|
Mcp_AbortTool = 'mcp:abort-tool',
|
||||||
Mcp_GetServerVersion = 'mcp:get-server-version',
|
Mcp_GetServerVersion = 'mcp:get-server-version',
|
||||||
Mcp_Progress = 'mcp:progress',
|
Mcp_Progress = 'mcp:progress',
|
||||||
|
Mcp_GetServerLogs = 'mcp:get-server-logs',
|
||||||
|
Mcp_ServerLog = 'mcp:server-log',
|
||||||
// Python
|
// Python
|
||||||
Python_Execute = 'python:execute',
|
Python_Execute = 'python:execute',
|
||||||
|
|
||||||
@ -196,6 +200,9 @@ export enum IpcChannel {
|
|||||||
File_ValidateNotesDirectory = 'file:validateNotesDirectory',
|
File_ValidateNotesDirectory = 'file:validateNotesDirectory',
|
||||||
File_StartWatcher = 'file:startWatcher',
|
File_StartWatcher = 'file:startWatcher',
|
||||||
File_StopWatcher = 'file:stopWatcher',
|
File_StopWatcher = 'file:stopWatcher',
|
||||||
|
File_PauseWatcher = 'file:pauseWatcher',
|
||||||
|
File_ResumeWatcher = 'file:resumeWatcher',
|
||||||
|
File_BatchUploadMarkdown = 'file:batchUploadMarkdown',
|
||||||
File_ShowInFolder = 'file:showInFolder',
|
File_ShowInFolder = 'file:showInFolder',
|
||||||
|
|
||||||
// file service
|
// file service
|
||||||
@ -236,6 +243,9 @@ export enum IpcChannel {
|
|||||||
System_GetHostname = 'system:getHostname',
|
System_GetHostname = 'system:getHostname',
|
||||||
System_GetCpuName = 'system:getCpuName',
|
System_GetCpuName = 'system:getCpuName',
|
||||||
System_CheckGitBash = 'system:checkGitBash',
|
System_CheckGitBash = 'system:checkGitBash',
|
||||||
|
System_GetGitBashPath = 'system:getGitBashPath',
|
||||||
|
System_GetGitBashPathInfo = 'system:getGitBashPathInfo',
|
||||||
|
System_SetGitBashPath = 'system:setGitBashPath',
|
||||||
|
|
||||||
// DevTools
|
// DevTools
|
||||||
System_ToggleDevTools = 'system:toggleDevTools',
|
System_ToggleDevTools = 'system:toggleDevTools',
|
||||||
@ -290,6 +300,8 @@ export enum IpcChannel {
|
|||||||
Selection_ActionWindowClose = 'selection:action-window-close',
|
Selection_ActionWindowClose = 'selection:action-window-close',
|
||||||
Selection_ActionWindowMinimize = 'selection:action-window-minimize',
|
Selection_ActionWindowMinimize = 'selection:action-window-minimize',
|
||||||
Selection_ActionWindowPin = 'selection:action-window-pin',
|
Selection_ActionWindowPin = 'selection:action-window-pin',
|
||||||
|
// [Windows only] Electron bug workaround - can be removed once https://github.com/electron/electron/issues/48554 is fixed
|
||||||
|
Selection_ActionWindowResize = 'selection:action-window-resize',
|
||||||
Selection_ProcessAction = 'selection:process-action',
|
Selection_ProcessAction = 'selection:process-action',
|
||||||
Selection_UpdateActionData = 'selection:update-action-data',
|
Selection_UpdateActionData = 'selection:update-action-data',
|
||||||
|
|
||||||
|
|||||||
@ -88,16 +88,11 @@ export function getSdkClient(
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
let baseURL =
|
const baseURL =
|
||||||
provider.type === 'anthropic'
|
provider.type === 'anthropic'
|
||||||
? provider.apiHost
|
? provider.apiHost
|
||||||
: (provider.anthropicApiHost && provider.anthropicApiHost.trim()) || provider.apiHost
|
: (provider.anthropicApiHost && provider.anthropicApiHost.trim()) || provider.apiHost
|
||||||
|
|
||||||
// Anthropic SDK automatically appends /v1 to all endpoints (like /v1/messages, /v1/models)
|
|
||||||
// We need to strip api version from baseURL to avoid duplication (e.g., /v3/v1/models)
|
|
||||||
// formatProviderApiHost adds /v1 for AI SDK compatibility, but Anthropic SDK needs it removed
|
|
||||||
baseURL = baseURL.replace(/\/v\d+(?:alpha|beta)?(?=\/|$)/i, '')
|
|
||||||
|
|
||||||
logger.debug('Anthropic API baseURL', { baseURL, providerId: provider.id })
|
logger.debug('Anthropic API baseURL', { baseURL, providerId: provider.id })
|
||||||
|
|
||||||
if (provider.id === 'aihubmix') {
|
if (provider.id === 'aihubmix') {
|
||||||
|
|||||||
@ -7,6 +7,11 @@ export const documentExts = ['.pdf', '.doc', '.docx', '.pptx', '.xlsx', '.odt',
|
|||||||
export const thirdPartyApplicationExts = ['.draftsExport']
|
export const thirdPartyApplicationExts = ['.draftsExport']
|
||||||
export const bookExts = ['.epub']
|
export const bookExts = ['.epub']
|
||||||
|
|
||||||
|
export const API_SERVER_DEFAULTS = {
|
||||||
|
HOST: '127.0.0.1',
|
||||||
|
PORT: 23333
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A flat array of all file extensions known by the linguist database.
|
* A flat array of all file extensions known by the linguist database.
|
||||||
* This is the primary source for identifying code files.
|
* This is the primary source for identifying code files.
|
||||||
@ -483,3 +488,11 @@ export const MACOS_TERMINALS_WITH_COMMANDS: TerminalConfigWithCommand[] = [
|
|||||||
|
|
||||||
// resources/scripts should be maintained manually
|
// resources/scripts should be maintained manually
|
||||||
export const HOME_CHERRY_DIR = '.cherrystudio'
|
export const HOME_CHERRY_DIR = '.cherrystudio'
|
||||||
|
|
||||||
|
// Git Bash path configuration types
|
||||||
|
export type GitBashPathSource = 'manual' | 'auto'
|
||||||
|
|
||||||
|
export interface GitBashPathInfo {
|
||||||
|
path: string | null
|
||||||
|
source: GitBashPathSource | null
|
||||||
|
}
|
||||||
|
|||||||
@ -10,7 +10,7 @@ export type LoaderReturn = {
|
|||||||
messageSource?: 'preprocess' | 'embedding' | 'validation'
|
messageSource?: 'preprocess' | 'embedding' | 'validation'
|
||||||
}
|
}
|
||||||
|
|
||||||
export type FileChangeEventType = 'add' | 'change' | 'unlink' | 'addDir' | 'unlinkDir'
|
export type FileChangeEventType = 'add' | 'change' | 'unlink' | 'addDir' | 'unlinkDir' | 'refresh'
|
||||||
|
|
||||||
export type FileChangeEvent = {
|
export type FileChangeEvent = {
|
||||||
eventType: FileChangeEventType
|
eventType: FileChangeEventType
|
||||||
@ -23,6 +23,14 @@ export type MCPProgressEvent = {
|
|||||||
progress: number // 0-1 range
|
progress: number // 0-1 range
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export type MCPServerLogEntry = {
|
||||||
|
timestamp: number
|
||||||
|
level: 'debug' | 'info' | 'warn' | 'error' | 'stderr' | 'stdout'
|
||||||
|
message: string
|
||||||
|
data?: any
|
||||||
|
source?: string
|
||||||
|
}
|
||||||
|
|
||||||
export type WebviewKeyEvent = {
|
export type WebviewKeyEvent = {
|
||||||
webviewId: number
|
webviewId: number
|
||||||
key: string
|
key: string
|
||||||
|
|||||||
@ -11,7 +11,7 @@ const OVMS_EX_URL = 'https://gitcode.com/gcw_ggDjjkY3/kjfile/releases/download/d
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* error code:
|
* error code:
|
||||||
* 101: Unsupported CPU (not Intel Ultra)
|
* 101: Unsupported CPU (not Intel)
|
||||||
* 102: Unsupported platform (not Windows)
|
* 102: Unsupported platform (not Windows)
|
||||||
* 103: Download failed
|
* 103: Download failed
|
||||||
* 104: Installation failed
|
* 104: Installation failed
|
||||||
@ -213,8 +213,8 @@ async function installOvms() {
|
|||||||
console.log(`CPU Name: ${cpuName}`)
|
console.log(`CPU Name: ${cpuName}`)
|
||||||
|
|
||||||
// Check if CPU name contains "Ultra"
|
// Check if CPU name contains "Ultra"
|
||||||
if (!cpuName.toLowerCase().includes('intel') || !cpuName.toLowerCase().includes('ultra')) {
|
if (!cpuName.toLowerCase().includes('intel')) {
|
||||||
console.error('OVMS installation requires an Intel(R) Core(TM) Ultra CPU.')
|
console.error('OVMS installation requires an Intel CPU.')
|
||||||
return 101
|
return 101
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -50,7 +50,7 @@ Usage Instructions:
|
|||||||
- pt-pt (Portuguese)
|
- pt-pt (Portuguese)
|
||||||
|
|
||||||
Run Command:
|
Run Command:
|
||||||
yarn auto:i18n
|
yarn i18n:translate
|
||||||
|
|
||||||
Performance Optimization Recommendations:
|
Performance Optimization Recommendations:
|
||||||
- For stable API services: MAX_CONCURRENT_TRANSLATIONS=8, TRANSLATION_DELAY_MS=50
|
- For stable API services: MAX_CONCURRENT_TRANSLATIONS=8, TRANSLATION_DELAY_MS=50
|
||||||
|
|||||||
@ -145,7 +145,7 @@ export function main() {
|
|||||||
console.log('i18n 检查已通过')
|
console.log('i18n 检查已通过')
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
console.error(e)
|
console.error(e)
|
||||||
throw new Error(`检查未通过。尝试运行 yarn sync:i18n 以解决问题。`)
|
throw new Error(`检查未通过。尝试运行 yarn i18n:sync 以解决问题。`)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -91,23 +91,6 @@ function createIssueCard(issueData) {
|
|||||||
|
|
||||||
return {
|
return {
|
||||||
elements: [
|
elements: [
|
||||||
{
|
|
||||||
tag: 'div',
|
|
||||||
text: {
|
|
||||||
tag: 'lark_md',
|
|
||||||
content: `**🐛 New GitHub Issue #${issueNumber}**`
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
tag: 'hr'
|
|
||||||
},
|
|
||||||
{
|
|
||||||
tag: 'div',
|
|
||||||
text: {
|
|
||||||
tag: 'lark_md',
|
|
||||||
content: `**📝 Title:** ${issueTitle}`
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
tag: 'div',
|
tag: 'div',
|
||||||
text: {
|
text: {
|
||||||
@ -158,7 +141,7 @@ function createIssueCard(issueData) {
|
|||||||
template: 'blue',
|
template: 'blue',
|
||||||
title: {
|
title: {
|
||||||
tag: 'plain_text',
|
tag: 'plain_text',
|
||||||
content: '🆕 Cherry Studio - New Issue'
|
content: `#${issueNumber} - ${issueTitle}`
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -5,9 +5,17 @@ exports.default = async function (configuration) {
|
|||||||
const { path } = configuration
|
const { path } = configuration
|
||||||
if (configuration.path) {
|
if (configuration.path) {
|
||||||
try {
|
try {
|
||||||
|
const certPath = process.env.CHERRY_CERT_PATH
|
||||||
|
const keyContainer = process.env.CHERRY_CERT_KEY
|
||||||
|
const csp = process.env.CHERRY_CERT_CSP
|
||||||
|
|
||||||
|
if (!certPath || !keyContainer || !csp) {
|
||||||
|
throw new Error('CHERRY_CERT_PATH, CHERRY_CERT_KEY or CHERRY_CERT_CSP is not set')
|
||||||
|
}
|
||||||
|
|
||||||
console.log('Start code signing...')
|
console.log('Start code signing...')
|
||||||
console.log('Signing file:', path)
|
console.log('Signing file:', path)
|
||||||
const signCommand = `signtool sign /tr http://timestamp.comodoca.com /td sha256 /fd sha256 /a /v "${path}"`
|
const signCommand = `signtool sign /tr http://timestamp.comodoca.com /td sha256 /fd sha256 /v /f "${certPath}" /csp "${csp}" /k "${keyContainer}" "${path}"`
|
||||||
execSync(signCommand, { stdio: 'inherit' })
|
execSync(signCommand, { stdio: 'inherit' })
|
||||||
console.log('Code signing completed')
|
console.log('Code signing completed')
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
|||||||
@ -1,3 +1,4 @@
|
|||||||
|
import { API_SERVER_DEFAULTS } from '@shared/config/constant'
|
||||||
import type { ApiServerConfig } from '@types'
|
import type { ApiServerConfig } from '@types'
|
||||||
import { v4 as uuidv4 } from 'uuid'
|
import { v4 as uuidv4 } from 'uuid'
|
||||||
|
|
||||||
@ -6,9 +7,6 @@ import { reduxService } from '../services/ReduxService'
|
|||||||
|
|
||||||
const logger = loggerService.withContext('ApiServerConfig')
|
const logger = loggerService.withContext('ApiServerConfig')
|
||||||
|
|
||||||
const defaultHost = 'localhost'
|
|
||||||
const defaultPort = 23333
|
|
||||||
|
|
||||||
class ConfigManager {
|
class ConfigManager {
|
||||||
private _config: ApiServerConfig | null = null
|
private _config: ApiServerConfig | null = null
|
||||||
|
|
||||||
@ -30,8 +28,8 @@ class ConfigManager {
|
|||||||
}
|
}
|
||||||
this._config = {
|
this._config = {
|
||||||
enabled: serverSettings?.enabled ?? false,
|
enabled: serverSettings?.enabled ?? false,
|
||||||
port: serverSettings?.port ?? defaultPort,
|
port: serverSettings?.port ?? API_SERVER_DEFAULTS.PORT,
|
||||||
host: defaultHost,
|
host: serverSettings?.host ?? API_SERVER_DEFAULTS.HOST,
|
||||||
apiKey: apiKey
|
apiKey: apiKey
|
||||||
}
|
}
|
||||||
return this._config
|
return this._config
|
||||||
@ -39,8 +37,8 @@ class ConfigManager {
|
|||||||
logger.warn('Failed to load config from Redux, using defaults', { error })
|
logger.warn('Failed to load config from Redux, using defaults', { error })
|
||||||
this._config = {
|
this._config = {
|
||||||
enabled: false,
|
enabled: false,
|
||||||
port: defaultPort,
|
port: API_SERVER_DEFAULTS.PORT,
|
||||||
host: defaultHost,
|
host: API_SERVER_DEFAULTS.HOST,
|
||||||
apiKey: this.generateApiKey()
|
apiKey: this.generateApiKey()
|
||||||
}
|
}
|
||||||
return this._config
|
return this._config
|
||||||
|
|||||||
@ -20,8 +20,8 @@ const swaggerOptions: swaggerJSDoc.Options = {
|
|||||||
},
|
},
|
||||||
servers: [
|
servers: [
|
||||||
{
|
{
|
||||||
url: 'http://localhost:23333',
|
url: '/',
|
||||||
description: 'Local development server'
|
description: 'Current server'
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
components: {
|
components: {
|
||||||
|
|||||||
@ -19,8 +19,8 @@ import { agentService } from './services/agents'
|
|||||||
import { apiServerService } from './services/ApiServerService'
|
import { apiServerService } from './services/ApiServerService'
|
||||||
import { appMenuService } from './services/AppMenuService'
|
import { appMenuService } from './services/AppMenuService'
|
||||||
import { configManager } from './services/ConfigManager'
|
import { configManager } from './services/ConfigManager'
|
||||||
import mcpService from './services/MCPService'
|
|
||||||
import { nodeTraceService } from './services/NodeTraceService'
|
import { nodeTraceService } from './services/NodeTraceService'
|
||||||
|
import mcpService from './services/MCPService'
|
||||||
import powerMonitorService from './services/PowerMonitorService'
|
import powerMonitorService from './services/PowerMonitorService'
|
||||||
import {
|
import {
|
||||||
CHERRY_STUDIO_PROTOCOL,
|
CHERRY_STUDIO_PROTOCOL,
|
||||||
|
|||||||
@ -6,7 +6,14 @@ import { loggerService } from '@logger'
|
|||||||
import { isLinux, isMac, isPortable, isWin } from '@main/constant'
|
import { isLinux, isMac, isPortable, isWin } from '@main/constant'
|
||||||
import { generateSignature } from '@main/integration/cherryai'
|
import { generateSignature } from '@main/integration/cherryai'
|
||||||
import anthropicService from '@main/services/AnthropicService'
|
import anthropicService from '@main/services/AnthropicService'
|
||||||
import { getBinaryPath, isBinaryExists, runInstallScript } from '@main/utils/process'
|
import {
|
||||||
|
autoDiscoverGitBash,
|
||||||
|
getBinaryPath,
|
||||||
|
getGitBashPathInfo,
|
||||||
|
isBinaryExists,
|
||||||
|
runInstallScript,
|
||||||
|
validateGitBashPath
|
||||||
|
} from '@main/utils/process'
|
||||||
import { handleZoomFactor } from '@main/utils/zoom'
|
import { handleZoomFactor } from '@main/utils/zoom'
|
||||||
import type { SpanEntity, TokenUsage } from '@mcp-trace/trace-core'
|
import type { SpanEntity, TokenUsage } from '@mcp-trace/trace-core'
|
||||||
import type { UpgradeChannel } from '@shared/config/constant'
|
import type { UpgradeChannel } from '@shared/config/constant'
|
||||||
@ -35,7 +42,7 @@ import appService from './services/AppService'
|
|||||||
import AppUpdater from './services/AppUpdater'
|
import AppUpdater from './services/AppUpdater'
|
||||||
import BackupManager from './services/BackupManager'
|
import BackupManager from './services/BackupManager'
|
||||||
import { codeToolsService } from './services/CodeToolsService'
|
import { codeToolsService } from './services/CodeToolsService'
|
||||||
import { configManager } from './services/ConfigManager'
|
import { ConfigKeys, configManager } from './services/ConfigManager'
|
||||||
import CopilotService from './services/CopilotService'
|
import CopilotService from './services/CopilotService'
|
||||||
import DxtService from './services/DxtService'
|
import DxtService from './services/DxtService'
|
||||||
import { ExportService } from './services/ExportService'
|
import { ExportService } from './services/ExportService'
|
||||||
@ -499,38 +506,60 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Check common Git Bash installation paths
|
// Use autoDiscoverGitBash to handle auto-discovery and persistence
|
||||||
const commonPaths = [
|
const bashPath = autoDiscoverGitBash()
|
||||||
path.join(process.env.ProgramFiles || 'C:\\Program Files', 'Git', 'bin', 'bash.exe'),
|
if (bashPath) {
|
||||||
path.join(process.env['ProgramFiles(x86)'] || 'C:\\Program Files (x86)', 'Git', 'bin', 'bash.exe'),
|
logger.info('Git Bash is available', { path: bashPath })
|
||||||
path.join(process.env.LOCALAPPDATA || '', 'Programs', 'Git', 'bin', 'bash.exe')
|
|
||||||
]
|
|
||||||
|
|
||||||
// Check if any of the common paths exist
|
|
||||||
for (const bashPath of commonPaths) {
|
|
||||||
if (fs.existsSync(bashPath)) {
|
|
||||||
logger.debug('Git Bash found', { path: bashPath })
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if git is in PATH
|
|
||||||
const { execSync } = require('child_process')
|
|
||||||
try {
|
|
||||||
execSync('git --version', { stdio: 'ignore' })
|
|
||||||
logger.debug('Git found in PATH')
|
|
||||||
return true
|
return true
|
||||||
} catch {
|
|
||||||
// Git not in PATH
|
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.debug('Git Bash not found on Windows system')
|
logger.warn('Git Bash not found. Please install Git for Windows from https://git-scm.com/downloads/win')
|
||||||
return false
|
return false
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('Error checking Git Bash', error as Error)
|
logger.error('Unexpected error checking Git Bash', error as Error)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
ipcMain.handle(IpcChannel.System_GetGitBashPath, () => {
|
||||||
|
if (!isWin) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
const customPath = configManager.get(ConfigKeys.GitBashPath) as string | undefined
|
||||||
|
return customPath ?? null
|
||||||
|
})
|
||||||
|
|
||||||
|
// Returns { path, source } where source is 'manual' | 'auto' | null
|
||||||
|
ipcMain.handle(IpcChannel.System_GetGitBashPathInfo, () => {
|
||||||
|
return getGitBashPathInfo()
|
||||||
|
})
|
||||||
|
|
||||||
|
ipcMain.handle(IpcChannel.System_SetGitBashPath, (_, newPath: string | null) => {
|
||||||
|
if (!isWin) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!newPath) {
|
||||||
|
// Clear manual setting and re-run auto-discovery
|
||||||
|
configManager.set(ConfigKeys.GitBashPath, null)
|
||||||
|
configManager.set(ConfigKeys.GitBashPathSource, null)
|
||||||
|
// Re-run auto-discovery to restore auto-discovered path if available
|
||||||
|
autoDiscoverGitBash()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
const validated = validateGitBashPath(newPath)
|
||||||
|
if (!validated) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set path with 'manual' source
|
||||||
|
configManager.set(ConfigKeys.GitBashPath, validated)
|
||||||
|
configManager.set(ConfigKeys.GitBashPathSource, 'manual')
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
ipcMain.handle(IpcChannel.System_ToggleDevTools, (e) => {
|
ipcMain.handle(IpcChannel.System_ToggleDevTools, (e) => {
|
||||||
const win = BrowserWindow.fromWebContents(e.sender)
|
const win = BrowserWindow.fromWebContents(e.sender)
|
||||||
win && win.webContents.toggleDevTools()
|
win && win.webContents.toggleDevTools()
|
||||||
@ -595,6 +624,9 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
|||||||
ipcMain.handle(IpcChannel.File_ValidateNotesDirectory, fileManager.validateNotesDirectory.bind(fileManager))
|
ipcMain.handle(IpcChannel.File_ValidateNotesDirectory, fileManager.validateNotesDirectory.bind(fileManager))
|
||||||
ipcMain.handle(IpcChannel.File_StartWatcher, fileManager.startFileWatcher.bind(fileManager))
|
ipcMain.handle(IpcChannel.File_StartWatcher, fileManager.startFileWatcher.bind(fileManager))
|
||||||
ipcMain.handle(IpcChannel.File_StopWatcher, fileManager.stopFileWatcher.bind(fileManager))
|
ipcMain.handle(IpcChannel.File_StopWatcher, fileManager.stopFileWatcher.bind(fileManager))
|
||||||
|
ipcMain.handle(IpcChannel.File_PauseWatcher, fileManager.pauseFileWatcher.bind(fileManager))
|
||||||
|
ipcMain.handle(IpcChannel.File_ResumeWatcher, fileManager.resumeFileWatcher.bind(fileManager))
|
||||||
|
ipcMain.handle(IpcChannel.File_BatchUploadMarkdown, fileManager.batchUploadMarkdownFiles.bind(fileManager))
|
||||||
ipcMain.handle(IpcChannel.File_ShowInFolder, fileManager.showInFolder.bind(fileManager))
|
ipcMain.handle(IpcChannel.File_ShowInFolder, fileManager.showInFolder.bind(fileManager))
|
||||||
|
|
||||||
// file service
|
// file service
|
||||||
@ -780,6 +812,7 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
|||||||
ipcMain.handle(IpcChannel.Mcp_CheckConnectivity, mcpService.checkMcpConnectivity)
|
ipcMain.handle(IpcChannel.Mcp_CheckConnectivity, mcpService.checkMcpConnectivity)
|
||||||
ipcMain.handle(IpcChannel.Mcp_AbortTool, mcpService.abortTool)
|
ipcMain.handle(IpcChannel.Mcp_AbortTool, mcpService.abortTool)
|
||||||
ipcMain.handle(IpcChannel.Mcp_GetServerVersion, mcpService.getServerVersion)
|
ipcMain.handle(IpcChannel.Mcp_GetServerVersion, mcpService.getServerVersion)
|
||||||
|
ipcMain.handle(IpcChannel.Mcp_GetServerLogs, mcpService.getServerLogs)
|
||||||
|
|
||||||
// DXT upload handler
|
// DXT upload handler
|
||||||
ipcMain.handle(IpcChannel.Mcp_UploadDxt, async (event, fileBuffer: ArrayBuffer, fileName: string) => {
|
ipcMain.handle(IpcChannel.Mcp_UploadDxt, async (event, fileBuffer: ArrayBuffer, fileName: string) => {
|
||||||
@ -858,6 +891,17 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
|||||||
webview.session.setSpellCheckerEnabled(isEnable)
|
webview.session.setSpellCheckerEnabled(isEnable)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// Webview print and save handlers
|
||||||
|
ipcMain.handle(IpcChannel.Webview_PrintToPDF, async (_, webviewId: number) => {
|
||||||
|
const { printWebviewToPDF } = await import('./services/WebviewService')
|
||||||
|
return await printWebviewToPDF(webviewId)
|
||||||
|
})
|
||||||
|
|
||||||
|
ipcMain.handle(IpcChannel.Webview_SaveAsHTML, async (_, webviewId: number) => {
|
||||||
|
const { saveWebviewAsHTML } = await import('./services/WebviewService')
|
||||||
|
return await saveWebviewAsHTML(webviewId)
|
||||||
|
})
|
||||||
|
|
||||||
// store sync
|
// store sync
|
||||||
storeSyncService.registerIpcHandler()
|
storeSyncService.registerIpcHandler()
|
||||||
|
|
||||||
|
|||||||
@ -19,19 +19,9 @@ export default class EmbeddingsFactory {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
if (provider === 'ollama') {
|
if (provider === 'ollama') {
|
||||||
if (baseURL.includes('v1/')) {
|
|
||||||
return new OllamaEmbeddings({
|
|
||||||
model: model,
|
|
||||||
baseUrl: baseURL.replace('v1/', ''),
|
|
||||||
requestOptions: {
|
|
||||||
// @ts-ignore expected
|
|
||||||
'encoding-format': 'float'
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return new OllamaEmbeddings({
|
return new OllamaEmbeddings({
|
||||||
model: model,
|
model: model,
|
||||||
baseUrl: baseURL,
|
baseUrl: baseURL.replace(/\/api$/, ''),
|
||||||
requestOptions: {
|
requestOptions: {
|
||||||
// @ts-ignore expected
|
// @ts-ignore expected
|
||||||
'encoding-format': 'float'
|
'encoding-format': 'float'
|
||||||
|
|||||||
134
src/main/mcpServers/__tests__/browser.test.ts
Normal file
134
src/main/mcpServers/__tests__/browser.test.ts
Normal file
@ -0,0 +1,134 @@
|
|||||||
|
import { describe, expect, it, vi } from 'vitest'
|
||||||
|
|
||||||
|
vi.mock('electron', () => {
|
||||||
|
const sendCommand = vi.fn(async (command: string, params?: { expression?: string }) => {
|
||||||
|
if (command === 'Runtime.evaluate') {
|
||||||
|
if (params?.expression === 'document.documentElement.outerHTML') {
|
||||||
|
return { result: { value: '<html><body><h1>Test</h1><p>Content</p></body></html>' } }
|
||||||
|
}
|
||||||
|
if (params?.expression === 'document.body.innerText') {
|
||||||
|
return { result: { value: 'Test\nContent' } }
|
||||||
|
}
|
||||||
|
return { result: { value: 'ok' } }
|
||||||
|
}
|
||||||
|
return {}
|
||||||
|
})
|
||||||
|
|
||||||
|
const debuggerObj = {
|
||||||
|
isAttached: vi.fn(() => true),
|
||||||
|
attach: vi.fn(),
|
||||||
|
detach: vi.fn(),
|
||||||
|
sendCommand
|
||||||
|
}
|
||||||
|
|
||||||
|
const webContents = {
|
||||||
|
debugger: debuggerObj,
|
||||||
|
setUserAgent: vi.fn(),
|
||||||
|
getURL: vi.fn(() => 'https://example.com/'),
|
||||||
|
getTitle: vi.fn(async () => 'Example Title'),
|
||||||
|
once: vi.fn(),
|
||||||
|
removeListener: vi.fn(),
|
||||||
|
on: vi.fn()
|
||||||
|
}
|
||||||
|
|
||||||
|
const loadURL = vi.fn(async () => {})
|
||||||
|
|
||||||
|
const windows: any[] = []
|
||||||
|
|
||||||
|
class MockBrowserWindow {
|
||||||
|
private destroyed = false
|
||||||
|
public webContents = webContents
|
||||||
|
public loadURL = loadURL
|
||||||
|
public isDestroyed = vi.fn(() => this.destroyed)
|
||||||
|
public close = vi.fn(() => {
|
||||||
|
this.destroyed = true
|
||||||
|
})
|
||||||
|
public destroy = vi.fn(() => {
|
||||||
|
this.destroyed = true
|
||||||
|
})
|
||||||
|
public on = vi.fn()
|
||||||
|
|
||||||
|
constructor() {
|
||||||
|
windows.push(this)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const app = {
|
||||||
|
isReady: vi.fn(() => true),
|
||||||
|
whenReady: vi.fn(async () => {}),
|
||||||
|
on: vi.fn()
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
BrowserWindow: MockBrowserWindow as any,
|
||||||
|
app,
|
||||||
|
__mockDebugger: debuggerObj,
|
||||||
|
__mockSendCommand: sendCommand,
|
||||||
|
__mockLoadURL: loadURL,
|
||||||
|
__mockWindows: windows
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
import * as electron from 'electron'
|
||||||
|
const { __mockWindows } = electron as typeof electron & { __mockWindows: any[] }
|
||||||
|
|
||||||
|
import { CdpBrowserController } from '../browser'
|
||||||
|
|
||||||
|
describe('CdpBrowserController', () => {
|
||||||
|
it('executes single-line code via Runtime.evaluate', async () => {
|
||||||
|
const controller = new CdpBrowserController()
|
||||||
|
const result = await controller.execute('1+1')
|
||||||
|
expect(result).toBe('ok')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('opens a URL (hidden) and returns current page info', async () => {
|
||||||
|
const controller = new CdpBrowserController()
|
||||||
|
const result = await controller.open('https://foo.bar/', 5000, false)
|
||||||
|
expect(result.currentUrl).toBe('https://example.com/')
|
||||||
|
expect(result.title).toBe('Example Title')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('opens a URL (visible) when show=true', async () => {
|
||||||
|
const controller = new CdpBrowserController()
|
||||||
|
const result = await controller.open('https://foo.bar/', 5000, true, 'session-a')
|
||||||
|
expect(result.currentUrl).toBe('https://example.com/')
|
||||||
|
expect(result.title).toBe('Example Title')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('reuses session for execute and supports multiline', async () => {
|
||||||
|
const controller = new CdpBrowserController()
|
||||||
|
await controller.open('https://foo.bar/', 5000, false, 'session-b')
|
||||||
|
const result = await controller.execute('const a=1; const b=2; a+b;', 5000, 'session-b')
|
||||||
|
expect(result).toBe('ok')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('evicts least recently used session when exceeding maxSessions', async () => {
|
||||||
|
const controller = new CdpBrowserController({ maxSessions: 2, idleTimeoutMs: 1000 * 60 })
|
||||||
|
await controller.open('https://foo.bar/', 5000, false, 's1')
|
||||||
|
await controller.open('https://foo.bar/', 5000, false, 's2')
|
||||||
|
await controller.open('https://foo.bar/', 5000, false, 's3')
|
||||||
|
const destroyedCount = __mockWindows.filter(
|
||||||
|
(w: any) => w.destroy.mock.calls.length > 0 || w.close.mock.calls.length > 0
|
||||||
|
).length
|
||||||
|
expect(destroyedCount).toBeGreaterThanOrEqual(1)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('fetches URL and returns html format', async () => {
|
||||||
|
const controller = new CdpBrowserController()
|
||||||
|
const result = await controller.fetch('https://example.com/', 'html')
|
||||||
|
expect(result).toBe('<html><body><h1>Test</h1><p>Content</p></body></html>')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('fetches URL and returns txt format', async () => {
|
||||||
|
const controller = new CdpBrowserController()
|
||||||
|
const result = await controller.fetch('https://example.com/', 'txt')
|
||||||
|
expect(result).toBe('Test\nContent')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('fetches URL and returns markdown format (default)', async () => {
|
||||||
|
const controller = new CdpBrowserController()
|
||||||
|
const result = await controller.fetch('https://example.com/')
|
||||||
|
expect(typeof result).toBe('string')
|
||||||
|
expect(result).toContain('Test')
|
||||||
|
})
|
||||||
|
})
|
||||||
307
src/main/mcpServers/browser/controller.ts
Normal file
307
src/main/mcpServers/browser/controller.ts
Normal file
@ -0,0 +1,307 @@
|
|||||||
|
import { app, BrowserWindow } from 'electron'
|
||||||
|
import TurndownService from 'turndown'
|
||||||
|
|
||||||
|
import { logger, userAgent } from './types'
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Controller for managing browser windows via Chrome DevTools Protocol (CDP).
|
||||||
|
* Supports multiple sessions with LRU eviction and idle timeout cleanup.
|
||||||
|
*/
|
||||||
|
export class CdpBrowserController {
|
||||||
|
private windows: Map<string, { win: BrowserWindow; lastActive: number }> = new Map()
|
||||||
|
private readonly maxSessions: number
|
||||||
|
private readonly idleTimeoutMs: number
|
||||||
|
|
||||||
|
constructor(options?: { maxSessions?: number; idleTimeoutMs?: number }) {
|
||||||
|
this.maxSessions = options?.maxSessions ?? 5
|
||||||
|
this.idleTimeoutMs = options?.idleTimeoutMs ?? 5 * 60 * 1000
|
||||||
|
}
|
||||||
|
|
||||||
|
private async ensureAppReady() {
|
||||||
|
if (!app.isReady()) {
|
||||||
|
await app.whenReady()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private touch(sessionId: string) {
|
||||||
|
const entry = this.windows.get(sessionId)
|
||||||
|
if (entry) entry.lastActive = Date.now()
|
||||||
|
}
|
||||||
|
|
||||||
|
private closeWindow(win: BrowserWindow, sessionId: string) {
|
||||||
|
try {
|
||||||
|
if (!win.isDestroyed()) {
|
||||||
|
if (win.webContents.debugger.isAttached()) {
|
||||||
|
win.webContents.debugger.detach()
|
||||||
|
}
|
||||||
|
win.close()
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
logger.warn('Error closing window', { error, sessionId })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private async ensureDebuggerAttached(dbg: Electron.Debugger, sessionId: string) {
|
||||||
|
if (!dbg.isAttached()) {
|
||||||
|
try {
|
||||||
|
logger.info('Attaching debugger', { sessionId })
|
||||||
|
dbg.attach('1.3')
|
||||||
|
await dbg.sendCommand('Page.enable')
|
||||||
|
await dbg.sendCommand('Runtime.enable')
|
||||||
|
logger.info('Debugger attached and domains enabled')
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Failed to attach debugger', { error })
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private sweepIdle() {
|
||||||
|
const now = Date.now()
|
||||||
|
for (const [id, entry] of this.windows.entries()) {
|
||||||
|
if (now - entry.lastActive > this.idleTimeoutMs) {
|
||||||
|
this.closeWindow(entry.win, id)
|
||||||
|
this.windows.delete(id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private evictIfNeeded(newSessionId: string) {
|
||||||
|
if (this.windows.size < this.maxSessions) return
|
||||||
|
let lruId: string | null = null
|
||||||
|
let lruTime = Number.POSITIVE_INFINITY
|
||||||
|
for (const [id, entry] of this.windows.entries()) {
|
||||||
|
if (id === newSessionId) continue
|
||||||
|
if (entry.lastActive < lruTime) {
|
||||||
|
lruTime = entry.lastActive
|
||||||
|
lruId = id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (lruId) {
|
||||||
|
const entry = this.windows.get(lruId)
|
||||||
|
if (entry) {
|
||||||
|
this.closeWindow(entry.win, lruId)
|
||||||
|
}
|
||||||
|
this.windows.delete(lruId)
|
||||||
|
logger.info('Evicted session to respect maxSessions', { evicted: lruId })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private async getWindow(sessionId = 'default', forceNew = false, show = false): Promise<BrowserWindow> {
|
||||||
|
await this.ensureAppReady()
|
||||||
|
|
||||||
|
this.sweepIdle()
|
||||||
|
|
||||||
|
const existing = this.windows.get(sessionId)
|
||||||
|
if (existing && !existing.win.isDestroyed() && !forceNew) {
|
||||||
|
this.touch(sessionId)
|
||||||
|
return existing.win
|
||||||
|
}
|
||||||
|
|
||||||
|
if (existing && !existing.win.isDestroyed() && forceNew) {
|
||||||
|
try {
|
||||||
|
if (existing.win.webContents.debugger.isAttached()) {
|
||||||
|
existing.win.webContents.debugger.detach()
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
logger.warn('Error detaching debugger before recreate', { error, sessionId })
|
||||||
|
}
|
||||||
|
existing.win.destroy()
|
||||||
|
this.windows.delete(sessionId)
|
||||||
|
}
|
||||||
|
|
||||||
|
this.evictIfNeeded(sessionId)
|
||||||
|
|
||||||
|
const win = new BrowserWindow({
|
||||||
|
show,
|
||||||
|
webPreferences: {
|
||||||
|
contextIsolation: true,
|
||||||
|
sandbox: true,
|
||||||
|
nodeIntegration: false,
|
||||||
|
devTools: true
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Use a standard Chrome UA to avoid some anti-bot blocks
|
||||||
|
win.webContents.setUserAgent(userAgent)
|
||||||
|
|
||||||
|
// Log navigation lifecycle to help diagnose slow loads
|
||||||
|
win.webContents.on('did-start-loading', () => logger.info(`did-start-loading`, { sessionId }))
|
||||||
|
win.webContents.on('dom-ready', () => logger.info(`dom-ready`, { sessionId }))
|
||||||
|
win.webContents.on('did-finish-load', () => logger.info(`did-finish-load`, { sessionId }))
|
||||||
|
win.webContents.on('did-fail-load', (_e, code, desc) => logger.warn('Navigation failed', { code, desc }))
|
||||||
|
|
||||||
|
win.on('closed', () => {
|
||||||
|
this.windows.delete(sessionId)
|
||||||
|
})
|
||||||
|
|
||||||
|
this.windows.set(sessionId, { win, lastActive: Date.now() })
|
||||||
|
return win
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Opens a URL in a browser window and waits for navigation to complete.
|
||||||
|
* @param url - The URL to navigate to
|
||||||
|
* @param timeout - Navigation timeout in milliseconds (default: 10000)
|
||||||
|
* @param show - Whether to show the browser window (default: false)
|
||||||
|
* @param sessionId - Session identifier for window reuse (default: 'default')
|
||||||
|
* @returns Object containing the current URL and page title after navigation
|
||||||
|
*/
|
||||||
|
public async open(url: string, timeout = 10000, show = false, sessionId = 'default') {
|
||||||
|
const win = await this.getWindow(sessionId, true, show)
|
||||||
|
logger.info('Loading URL', { url, sessionId })
|
||||||
|
const { webContents } = win
|
||||||
|
this.touch(sessionId)
|
||||||
|
|
||||||
|
// Track resolution state to prevent multiple handlers from firing
|
||||||
|
let resolved = false
|
||||||
|
let onFinish: () => void
|
||||||
|
let onDomReady: () => void
|
||||||
|
let onFail: (_event: Electron.Event, code: number, desc: string) => void
|
||||||
|
|
||||||
|
// Define cleanup outside Promise to ensure it's callable in finally block,
|
||||||
|
// preventing memory leaks when timeout occurs before navigation completes
|
||||||
|
const cleanup = () => {
|
||||||
|
webContents.removeListener('did-finish-load', onFinish)
|
||||||
|
webContents.removeListener('did-fail-load', onFail)
|
||||||
|
webContents.removeListener('dom-ready', onDomReady)
|
||||||
|
}
|
||||||
|
|
||||||
|
const loadPromise = new Promise<void>((resolve, reject) => {
|
||||||
|
onFinish = () => {
|
||||||
|
if (resolved) return
|
||||||
|
resolved = true
|
||||||
|
cleanup()
|
||||||
|
resolve()
|
||||||
|
}
|
||||||
|
onDomReady = () => {
|
||||||
|
if (resolved) return
|
||||||
|
resolved = true
|
||||||
|
cleanup()
|
||||||
|
resolve()
|
||||||
|
}
|
||||||
|
onFail = (_event: Electron.Event, code: number, desc: string) => {
|
||||||
|
if (resolved) return
|
||||||
|
resolved = true
|
||||||
|
cleanup()
|
||||||
|
reject(new Error(`Navigation failed (${code}): ${desc}`))
|
||||||
|
}
|
||||||
|
webContents.once('did-finish-load', onFinish)
|
||||||
|
webContents.once('dom-ready', onDomReady)
|
||||||
|
webContents.once('did-fail-load', onFail)
|
||||||
|
})
|
||||||
|
|
||||||
|
const timeoutPromise = new Promise<void>((_, reject) => {
|
||||||
|
setTimeout(() => reject(new Error('Navigation timed out')), timeout)
|
||||||
|
})
|
||||||
|
|
||||||
|
try {
|
||||||
|
await Promise.race([win.loadURL(url), loadPromise, timeoutPromise])
|
||||||
|
} finally {
|
||||||
|
// Always cleanup listeners to prevent memory leaks on timeout
|
||||||
|
cleanup()
|
||||||
|
}
|
||||||
|
|
||||||
|
const currentUrl = webContents.getURL()
|
||||||
|
const title = await webContents.getTitle()
|
||||||
|
return { currentUrl, title }
|
||||||
|
}
|
||||||
|
|
||||||
|
public async execute(code: string, timeout = 5000, sessionId = 'default') {
|
||||||
|
const win = await this.getWindow(sessionId)
|
||||||
|
this.touch(sessionId)
|
||||||
|
const dbg = win.webContents.debugger
|
||||||
|
|
||||||
|
await this.ensureDebuggerAttached(dbg, sessionId)
|
||||||
|
|
||||||
|
const evalPromise = dbg.sendCommand('Runtime.evaluate', {
|
||||||
|
expression: code,
|
||||||
|
awaitPromise: true,
|
||||||
|
returnByValue: true
|
||||||
|
})
|
||||||
|
|
||||||
|
const result = await Promise.race([
|
||||||
|
evalPromise,
|
||||||
|
new Promise((_, reject) => setTimeout(() => reject(new Error('Execution timed out')), timeout))
|
||||||
|
])
|
||||||
|
|
||||||
|
const evalResult = result as any
|
||||||
|
|
||||||
|
if (evalResult?.exceptionDetails) {
|
||||||
|
const message = evalResult.exceptionDetails.exception?.description || 'Unknown script error'
|
||||||
|
logger.warn('Runtime.evaluate raised exception', { message })
|
||||||
|
throw new Error(message)
|
||||||
|
}
|
||||||
|
|
||||||
|
const value = evalResult?.result?.value ?? evalResult?.result?.description ?? null
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
public async reset(sessionId?: string) {
|
||||||
|
if (sessionId) {
|
||||||
|
const entry = this.windows.get(sessionId)
|
||||||
|
if (entry) {
|
||||||
|
this.closeWindow(entry.win, sessionId)
|
||||||
|
}
|
||||||
|
this.windows.delete(sessionId)
|
||||||
|
logger.info('Browser CDP context reset', { sessionId })
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const [id, entry] of this.windows.entries()) {
|
||||||
|
this.closeWindow(entry.win, id)
|
||||||
|
this.windows.delete(id)
|
||||||
|
}
|
||||||
|
logger.info('Browser CDP context reset (all sessions)')
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fetches a URL and returns content in the specified format.
|
||||||
|
* @param url - The URL to fetch
|
||||||
|
* @param format - Output format: 'html', 'txt', 'markdown', or 'json' (default: 'markdown')
|
||||||
|
* @param timeout - Navigation timeout in milliseconds (default: 10000)
|
||||||
|
* @param sessionId - Session identifier (default: 'default')
|
||||||
|
* @returns Content in the requested format. For 'json', returns parsed object or { data: rawContent } if parsing fails
|
||||||
|
*/
|
||||||
|
public async fetch(
|
||||||
|
url: string,
|
||||||
|
format: 'html' | 'txt' | 'markdown' | 'json' = 'markdown',
|
||||||
|
timeout = 10000,
|
||||||
|
sessionId = 'default'
|
||||||
|
) {
|
||||||
|
await this.open(url, timeout, false, sessionId)
|
||||||
|
|
||||||
|
const win = await this.getWindow(sessionId)
|
||||||
|
const dbg = win.webContents.debugger
|
||||||
|
|
||||||
|
await this.ensureDebuggerAttached(dbg, sessionId)
|
||||||
|
|
||||||
|
let expression: string
|
||||||
|
if (format === 'json' || format === 'txt') {
|
||||||
|
expression = 'document.body.innerText'
|
||||||
|
} else {
|
||||||
|
expression = 'document.documentElement.outerHTML'
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = (await dbg.sendCommand('Runtime.evaluate', {
|
||||||
|
expression,
|
||||||
|
returnByValue: true
|
||||||
|
})) as { result?: { value?: string } }
|
||||||
|
|
||||||
|
const content = result?.result?.value ?? ''
|
||||||
|
|
||||||
|
if (format === 'markdown') {
|
||||||
|
const turndownService = new TurndownService()
|
||||||
|
return turndownService.turndown(content)
|
||||||
|
}
|
||||||
|
if (format === 'json') {
|
||||||
|
// Attempt to parse as JSON; if content is not valid JSON, wrap it in a data object
|
||||||
|
try {
|
||||||
|
return JSON.parse(content)
|
||||||
|
} catch {
|
||||||
|
return { data: content }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return content
|
||||||
|
}
|
||||||
|
}
|
||||||
3
src/main/mcpServers/browser/index.ts
Normal file
3
src/main/mcpServers/browser/index.ts
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
export { CdpBrowserController } from './controller'
|
||||||
|
export { BrowserServer } from './server'
|
||||||
|
export { BrowserServer as default } from './server'
|
||||||
50
src/main/mcpServers/browser/server.ts
Normal file
50
src/main/mcpServers/browser/server.ts
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
import type { Server } from '@modelcontextprotocol/sdk/server/index.js'
|
||||||
|
import { Server as MCServer } from '@modelcontextprotocol/sdk/server/index.js'
|
||||||
|
import { CallToolRequestSchema, ListToolsRequestSchema } from '@modelcontextprotocol/sdk/types.js'
|
||||||
|
import { app } from 'electron'
|
||||||
|
|
||||||
|
import { CdpBrowserController } from './controller'
|
||||||
|
import { toolDefinitions, toolHandlers } from './tools'
|
||||||
|
|
||||||
|
export class BrowserServer {
|
||||||
|
public server: Server
|
||||||
|
private controller = new CdpBrowserController()
|
||||||
|
|
||||||
|
constructor() {
|
||||||
|
const server = new MCServer(
|
||||||
|
{
|
||||||
|
name: '@cherry/browser',
|
||||||
|
version: '0.1.0'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
capabilities: {
|
||||||
|
resources: {},
|
||||||
|
tools: {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
server.setRequestHandler(ListToolsRequestSchema, async () => {
|
||||||
|
return {
|
||||||
|
tools: toolDefinitions
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
||||||
|
const { name, arguments: args } = request.params
|
||||||
|
const handler = toolHandlers[name]
|
||||||
|
if (!handler) {
|
||||||
|
throw new Error('Tool not found')
|
||||||
|
}
|
||||||
|
return handler(this.controller, args)
|
||||||
|
})
|
||||||
|
|
||||||
|
app.on('before-quit', () => {
|
||||||
|
void this.controller.reset()
|
||||||
|
})
|
||||||
|
|
||||||
|
this.server = server
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export default BrowserServer
|
||||||
48
src/main/mcpServers/browser/tools/execute.ts
Normal file
48
src/main/mcpServers/browser/tools/execute.ts
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import type { CdpBrowserController } from '../controller'
|
||||||
|
import { errorResponse, successResponse } from './utils'
|
||||||
|
|
||||||
|
export const ExecuteSchema = z.object({
|
||||||
|
code: z
|
||||||
|
.string()
|
||||||
|
.describe(
|
||||||
|
'JavaScript evaluated via Chrome DevTools Runtime.evaluate. Keep it short; prefer one-line with semicolons for multiple statements.'
|
||||||
|
),
|
||||||
|
timeout: z.number().default(5000).describe('Timeout in milliseconds for code execution (default: 5000ms)'),
|
||||||
|
sessionId: z.string().optional().describe('Session identifier to target a specific page (default: default)')
|
||||||
|
})
|
||||||
|
|
||||||
|
export const executeToolDefinition = {
|
||||||
|
name: 'execute',
|
||||||
|
description:
|
||||||
|
'Run JavaScript in the current page via Runtime.evaluate. Prefer short, single-line snippets; use semicolons for multiple statements.',
|
||||||
|
inputSchema: {
|
||||||
|
type: 'object',
|
||||||
|
properties: {
|
||||||
|
code: {
|
||||||
|
type: 'string',
|
||||||
|
description: 'One-line JS to evaluate in page context'
|
||||||
|
},
|
||||||
|
timeout: {
|
||||||
|
type: 'number',
|
||||||
|
description: 'Timeout in milliseconds (default 5000)'
|
||||||
|
},
|
||||||
|
sessionId: {
|
||||||
|
type: 'string',
|
||||||
|
description: 'Session identifier; targets a specific page (default: default)'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
required: ['code']
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function handleExecute(controller: CdpBrowserController, args: unknown) {
|
||||||
|
const { code, timeout, sessionId } = ExecuteSchema.parse(args)
|
||||||
|
try {
|
||||||
|
const value = await controller.execute(code, timeout, sessionId ?? 'default')
|
||||||
|
return successResponse(typeof value === 'string' ? value : JSON.stringify(value))
|
||||||
|
} catch (error) {
|
||||||
|
return errorResponse(error as Error)
|
||||||
|
}
|
||||||
|
}
|
||||||
49
src/main/mcpServers/browser/tools/fetch.ts
Normal file
49
src/main/mcpServers/browser/tools/fetch.ts
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import type { CdpBrowserController } from '../controller'
|
||||||
|
import { errorResponse, successResponse } from './utils'
|
||||||
|
|
||||||
|
export const FetchSchema = z.object({
|
||||||
|
url: z.url().describe('URL to fetch'),
|
||||||
|
format: z.enum(['html', 'txt', 'markdown', 'json']).default('markdown').describe('Output format (default: markdown)'),
|
||||||
|
timeout: z.number().optional().describe('Timeout in milliseconds for navigation (default: 10000)'),
|
||||||
|
sessionId: z.string().optional().describe('Session identifier (default: default)')
|
||||||
|
})
|
||||||
|
|
||||||
|
export const fetchToolDefinition = {
|
||||||
|
name: 'fetch',
|
||||||
|
description: 'Fetch a URL using the browser and return content in specified format (html, txt, markdown, json)',
|
||||||
|
inputSchema: {
|
||||||
|
type: 'object',
|
||||||
|
properties: {
|
||||||
|
url: {
|
||||||
|
type: 'string',
|
||||||
|
description: 'URL to fetch'
|
||||||
|
},
|
||||||
|
format: {
|
||||||
|
type: 'string',
|
||||||
|
enum: ['html', 'txt', 'markdown', 'json'],
|
||||||
|
description: 'Output format (default: markdown)'
|
||||||
|
},
|
||||||
|
timeout: {
|
||||||
|
type: 'number',
|
||||||
|
description: 'Navigation timeout in milliseconds (default: 10000)'
|
||||||
|
},
|
||||||
|
sessionId: {
|
||||||
|
type: 'string',
|
||||||
|
description: 'Session identifier (default: default)'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
required: ['url']
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function handleFetch(controller: CdpBrowserController, args: unknown) {
|
||||||
|
const { url, format, timeout, sessionId } = FetchSchema.parse(args)
|
||||||
|
try {
|
||||||
|
const content = await controller.fetch(url, format, timeout ?? 10000, sessionId ?? 'default')
|
||||||
|
return successResponse(typeof content === 'string' ? content : JSON.stringify(content))
|
||||||
|
} catch (error) {
|
||||||
|
return errorResponse(error as Error)
|
||||||
|
}
|
||||||
|
}
|
||||||
25
src/main/mcpServers/browser/tools/index.ts
Normal file
25
src/main/mcpServers/browser/tools/index.ts
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
export { ExecuteSchema, executeToolDefinition, handleExecute } from './execute'
|
||||||
|
export { FetchSchema, fetchToolDefinition, handleFetch } from './fetch'
|
||||||
|
export { handleOpen, OpenSchema, openToolDefinition } from './open'
|
||||||
|
export { handleReset, resetToolDefinition } from './reset'
|
||||||
|
|
||||||
|
import type { CdpBrowserController } from '../controller'
|
||||||
|
import { executeToolDefinition, handleExecute } from './execute'
|
||||||
|
import { fetchToolDefinition, handleFetch } from './fetch'
|
||||||
|
import { handleOpen, openToolDefinition } from './open'
|
||||||
|
import { handleReset, resetToolDefinition } from './reset'
|
||||||
|
|
||||||
|
export const toolDefinitions = [openToolDefinition, executeToolDefinition, resetToolDefinition, fetchToolDefinition]
|
||||||
|
|
||||||
|
export const toolHandlers: Record<
|
||||||
|
string,
|
||||||
|
(
|
||||||
|
controller: CdpBrowserController,
|
||||||
|
args: unknown
|
||||||
|
) => Promise<{ content: { type: string; text: string }[]; isError: boolean }>
|
||||||
|
> = {
|
||||||
|
open: handleOpen,
|
||||||
|
execute: handleExecute,
|
||||||
|
reset: handleReset,
|
||||||
|
fetch: handleFetch
|
||||||
|
}
|
||||||
47
src/main/mcpServers/browser/tools/open.ts
Normal file
47
src/main/mcpServers/browser/tools/open.ts
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import type { CdpBrowserController } from '../controller'
|
||||||
|
import { successResponse } from './utils'
|
||||||
|
|
||||||
|
export const OpenSchema = z.object({
|
||||||
|
url: z.url().describe('URL to open in the controlled Electron window'),
|
||||||
|
timeout: z.number().optional().describe('Timeout in milliseconds for navigation (default: 10000)'),
|
||||||
|
show: z.boolean().optional().describe('Whether to show the browser window (default: false)'),
|
||||||
|
sessionId: z
|
||||||
|
.string()
|
||||||
|
.optional()
|
||||||
|
.describe('Session identifier; separate sessions keep separate pages (default: default)')
|
||||||
|
})
|
||||||
|
|
||||||
|
export const openToolDefinition = {
|
||||||
|
name: 'open',
|
||||||
|
description: 'Open a URL in a hidden Electron window controlled via Chrome DevTools Protocol',
|
||||||
|
inputSchema: {
|
||||||
|
type: 'object',
|
||||||
|
properties: {
|
||||||
|
url: {
|
||||||
|
type: 'string',
|
||||||
|
description: 'URL to load'
|
||||||
|
},
|
||||||
|
timeout: {
|
||||||
|
type: 'number',
|
||||||
|
description: 'Navigation timeout in milliseconds (default 10000)'
|
||||||
|
},
|
||||||
|
show: {
|
||||||
|
type: 'boolean',
|
||||||
|
description: 'Whether to show the browser window (default false)'
|
||||||
|
},
|
||||||
|
sessionId: {
|
||||||
|
type: 'string',
|
||||||
|
description: 'Session identifier; separate sessions keep separate pages (default: default)'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
required: ['url']
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function handleOpen(controller: CdpBrowserController, args: unknown) {
|
||||||
|
const { url, timeout, show, sessionId } = OpenSchema.parse(args)
|
||||||
|
const res = await controller.open(url, timeout ?? 10000, show ?? false, sessionId ?? 'default')
|
||||||
|
return successResponse(JSON.stringify(res))
|
||||||
|
}
|
||||||
34
src/main/mcpServers/browser/tools/reset.ts
Normal file
34
src/main/mcpServers/browser/tools/reset.ts
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import type { CdpBrowserController } from '../controller'
|
||||||
|
import { successResponse } from './utils'
|
||||||
|
|
||||||
|
/** Zod schema for validating reset tool arguments */
|
||||||
|
export const ResetSchema = z.object({
|
||||||
|
sessionId: z.string().optional().describe('Session identifier to reset; omit to reset all sessions')
|
||||||
|
})
|
||||||
|
|
||||||
|
/** MCP tool definition for the reset tool */
|
||||||
|
export const resetToolDefinition = {
|
||||||
|
name: 'reset',
|
||||||
|
description: 'Reset the controlled window and detach debugger',
|
||||||
|
inputSchema: {
|
||||||
|
type: 'object',
|
||||||
|
properties: {
|
||||||
|
sessionId: {
|
||||||
|
type: 'string',
|
||||||
|
description: 'Session identifier to reset; omit to reset all sessions'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handler for the reset MCP tool.
|
||||||
|
* Closes browser window(s) and detaches debugger for the specified session or all sessions.
|
||||||
|
*/
|
||||||
|
export async function handleReset(controller: CdpBrowserController, args: unknown) {
|
||||||
|
const { sessionId } = ResetSchema.parse(args)
|
||||||
|
await controller.reset(sessionId)
|
||||||
|
return successResponse('reset')
|
||||||
|
}
|
||||||
13
src/main/mcpServers/browser/tools/utils.ts
Normal file
13
src/main/mcpServers/browser/tools/utils.ts
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
export function successResponse(text: string) {
|
||||||
|
return {
|
||||||
|
content: [{ type: 'text', text }],
|
||||||
|
isError: false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function errorResponse(error: Error) {
|
||||||
|
return {
|
||||||
|
content: [{ type: 'text', text: error.message }],
|
||||||
|
isError: true
|
||||||
|
}
|
||||||
|
}
|
||||||
4
src/main/mcpServers/browser/types.ts
Normal file
4
src/main/mcpServers/browser/types.ts
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
import { loggerService } from '@logger'
|
||||||
|
|
||||||
|
export const logger = loggerService.withContext('MCPBrowserCDP')
|
||||||
|
export const userAgent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:145.0) Gecko/20100101 Firefox/145.0'
|
||||||
@ -4,6 +4,7 @@ import type { BuiltinMCPServerName } from '@types'
|
|||||||
import { BuiltinMCPServerNames } from '@types'
|
import { BuiltinMCPServerNames } from '@types'
|
||||||
|
|
||||||
import BraveSearchServer from './brave-search'
|
import BraveSearchServer from './brave-search'
|
||||||
|
import BrowserServer from './browser'
|
||||||
import DiDiMcpServer from './didi-mcp'
|
import DiDiMcpServer from './didi-mcp'
|
||||||
import DifyKnowledgeServer from './dify-knowledge'
|
import DifyKnowledgeServer from './dify-knowledge'
|
||||||
import FetchServer from './fetch'
|
import FetchServer from './fetch'
|
||||||
@ -35,7 +36,7 @@ export function createInMemoryMCPServer(
|
|||||||
return new FetchServer().server
|
return new FetchServer().server
|
||||||
}
|
}
|
||||||
case BuiltinMCPServerNames.filesystem: {
|
case BuiltinMCPServerNames.filesystem: {
|
||||||
return new FileSystemServer(args).server
|
return new FileSystemServer(envs.WORKSPACE_ROOT).server
|
||||||
}
|
}
|
||||||
case BuiltinMCPServerNames.difyKnowledge: {
|
case BuiltinMCPServerNames.difyKnowledge: {
|
||||||
const difyKey = envs.DIFY_KEY
|
const difyKey = envs.DIFY_KEY
|
||||||
@ -48,6 +49,9 @@ export function createInMemoryMCPServer(
|
|||||||
const apiKey = envs.DIDI_API_KEY
|
const apiKey = envs.DIDI_API_KEY
|
||||||
return new DiDiMcpServer(apiKey).server
|
return new DiDiMcpServer(apiKey).server
|
||||||
}
|
}
|
||||||
|
case BuiltinMCPServerNames.browser: {
|
||||||
|
return new BrowserServer().server
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
throw new Error(`Unknown in-memory MCP server: ${name}`)
|
throw new Error(`Unknown in-memory MCP server: ${name}`)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,652 +0,0 @@
|
|||||||
// port https://github.com/modelcontextprotocol/servers/blob/main/src/filesystem/index.ts
|
|
||||||
|
|
||||||
import { loggerService } from '@logger'
|
|
||||||
import { Server } from '@modelcontextprotocol/sdk/server/index.js'
|
|
||||||
import { CallToolRequestSchema, ListToolsRequestSchema } from '@modelcontextprotocol/sdk/types.js'
|
|
||||||
import { createTwoFilesPatch } from 'diff'
|
|
||||||
import fs from 'fs/promises'
|
|
||||||
import { minimatch } from 'minimatch'
|
|
||||||
import os from 'os'
|
|
||||||
import path from 'path'
|
|
||||||
import * as z from 'zod'
|
|
||||||
|
|
||||||
const logger = loggerService.withContext('MCP:FileSystemServer')
|
|
||||||
|
|
||||||
// Normalize all paths consistently
|
|
||||||
function normalizePath(p: string): string {
|
|
||||||
return path.normalize(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
function expandHome(filepath: string): string {
|
|
||||||
if (filepath.startsWith('~/') || filepath === '~') {
|
|
||||||
return path.join(os.homedir(), filepath.slice(1))
|
|
||||||
}
|
|
||||||
return filepath
|
|
||||||
}
|
|
||||||
|
|
||||||
// Security utilities
|
|
||||||
async function validatePath(allowedDirectories: string[], requestedPath: string): Promise<string> {
|
|
||||||
const expandedPath = expandHome(requestedPath)
|
|
||||||
const absolute = path.isAbsolute(expandedPath)
|
|
||||||
? path.resolve(expandedPath)
|
|
||||||
: path.resolve(process.cwd(), expandedPath)
|
|
||||||
|
|
||||||
const normalizedRequested = normalizePath(absolute)
|
|
||||||
|
|
||||||
// Check if path is within allowed directories
|
|
||||||
const isAllowed = allowedDirectories.some((dir) => normalizedRequested.startsWith(dir))
|
|
||||||
if (!isAllowed) {
|
|
||||||
throw new Error(
|
|
||||||
`Access denied - path outside allowed directories: ${absolute} not in ${allowedDirectories.join(', ')}`
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle symlinks by checking their real path
|
|
||||||
try {
|
|
||||||
const realPath = await fs.realpath(absolute)
|
|
||||||
const normalizedReal = normalizePath(realPath)
|
|
||||||
const isRealPathAllowed = allowedDirectories.some((dir) => normalizedReal.startsWith(dir))
|
|
||||||
if (!isRealPathAllowed) {
|
|
||||||
throw new Error('Access denied - symlink target outside allowed directories')
|
|
||||||
}
|
|
||||||
return realPath
|
|
||||||
} catch (error) {
|
|
||||||
// For new files that don't exist yet, verify parent directory
|
|
||||||
const parentDir = path.dirname(absolute)
|
|
||||||
try {
|
|
||||||
const realParentPath = await fs.realpath(parentDir)
|
|
||||||
const normalizedParent = normalizePath(realParentPath)
|
|
||||||
const isParentAllowed = allowedDirectories.some((dir) => normalizedParent.startsWith(dir))
|
|
||||||
if (!isParentAllowed) {
|
|
||||||
throw new Error('Access denied - parent directory outside allowed directories')
|
|
||||||
}
|
|
||||||
return absolute
|
|
||||||
} catch {
|
|
||||||
throw new Error(`Parent directory does not exist: ${parentDir}`)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schema definitions
|
|
||||||
const ReadFileArgsSchema = z.object({
|
|
||||||
path: z.string()
|
|
||||||
})
|
|
||||||
|
|
||||||
const ReadMultipleFilesArgsSchema = z.object({
|
|
||||||
paths: z.array(z.string())
|
|
||||||
})
|
|
||||||
|
|
||||||
const WriteFileArgsSchema = z.object({
|
|
||||||
path: z.string(),
|
|
||||||
content: z.string()
|
|
||||||
})
|
|
||||||
|
|
||||||
const EditOperation = z.object({
|
|
||||||
oldText: z.string().describe('Text to search for - must match exactly'),
|
|
||||||
newText: z.string().describe('Text to replace with')
|
|
||||||
})
|
|
||||||
|
|
||||||
const EditFileArgsSchema = z.object({
|
|
||||||
path: z.string(),
|
|
||||||
edits: z.array(EditOperation),
|
|
||||||
dryRun: z.boolean().default(false).describe('Preview changes using git-style diff format')
|
|
||||||
})
|
|
||||||
|
|
||||||
const CreateDirectoryArgsSchema = z.object({
|
|
||||||
path: z.string()
|
|
||||||
})
|
|
||||||
|
|
||||||
const ListDirectoryArgsSchema = z.object({
|
|
||||||
path: z.string()
|
|
||||||
})
|
|
||||||
|
|
||||||
const DirectoryTreeArgsSchema = z.object({
|
|
||||||
path: z.string()
|
|
||||||
})
|
|
||||||
|
|
||||||
const MoveFileArgsSchema = z.object({
|
|
||||||
source: z.string(),
|
|
||||||
destination: z.string()
|
|
||||||
})
|
|
||||||
|
|
||||||
const SearchFilesArgsSchema = z.object({
|
|
||||||
path: z.string(),
|
|
||||||
pattern: z.string(),
|
|
||||||
excludePatterns: z.array(z.string()).optional().default([])
|
|
||||||
})
|
|
||||||
|
|
||||||
const GetFileInfoArgsSchema = z.object({
|
|
||||||
path: z.string()
|
|
||||||
})
|
|
||||||
|
|
||||||
interface FileInfo {
|
|
||||||
size: number
|
|
||||||
created: Date
|
|
||||||
modified: Date
|
|
||||||
accessed: Date
|
|
||||||
isDirectory: boolean
|
|
||||||
isFile: boolean
|
|
||||||
permissions: string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tool implementations
|
|
||||||
async function getFileStats(filePath: string): Promise<FileInfo> {
|
|
||||||
const stats = await fs.stat(filePath)
|
|
||||||
return {
|
|
||||||
size: stats.size,
|
|
||||||
created: stats.birthtime,
|
|
||||||
modified: stats.mtime,
|
|
||||||
accessed: stats.atime,
|
|
||||||
isDirectory: stats.isDirectory(),
|
|
||||||
isFile: stats.isFile(),
|
|
||||||
permissions: stats.mode.toString(8).slice(-3)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async function searchFiles(
|
|
||||||
allowedDirectories: string[],
|
|
||||||
rootPath: string,
|
|
||||||
pattern: string,
|
|
||||||
excludePatterns: string[] = []
|
|
||||||
): Promise<string[]> {
|
|
||||||
const results: string[] = []
|
|
||||||
|
|
||||||
async function search(currentPath: string) {
|
|
||||||
const entries = await fs.readdir(currentPath, { withFileTypes: true })
|
|
||||||
|
|
||||||
for (const entry of entries) {
|
|
||||||
const fullPath = path.join(currentPath, entry.name)
|
|
||||||
|
|
||||||
try {
|
|
||||||
// Validate each path before processing
|
|
||||||
await validatePath(allowedDirectories, fullPath)
|
|
||||||
|
|
||||||
// Check if path matches any exclude pattern
|
|
||||||
const relativePath = path.relative(rootPath, fullPath)
|
|
||||||
const shouldExclude = excludePatterns.some((pattern) => {
|
|
||||||
const globPattern = pattern.includes('*') ? pattern : `**/${pattern}/**`
|
|
||||||
return minimatch(relativePath, globPattern, { dot: true })
|
|
||||||
})
|
|
||||||
|
|
||||||
if (shouldExclude) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if (entry.name.toLowerCase().includes(pattern.toLowerCase())) {
|
|
||||||
results.push(fullPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
if (entry.isDirectory()) {
|
|
||||||
await search(fullPath)
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// Skip invalid paths during search
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
await search(rootPath)
|
|
||||||
return results
|
|
||||||
}
|
|
||||||
|
|
||||||
// file editing and diffing utilities
|
|
||||||
function normalizeLineEndings(text: string): string {
|
|
||||||
return text.replace(/\r\n/g, '\n')
|
|
||||||
}
|
|
||||||
|
|
||||||
function createUnifiedDiff(originalContent: string, newContent: string, filepath: string = 'file'): string {
|
|
||||||
// Ensure consistent line endings for diff
|
|
||||||
const normalizedOriginal = normalizeLineEndings(originalContent)
|
|
||||||
const normalizedNew = normalizeLineEndings(newContent)
|
|
||||||
|
|
||||||
return createTwoFilesPatch(filepath, filepath, normalizedOriginal, normalizedNew, 'original', 'modified')
|
|
||||||
}
|
|
||||||
|
|
||||||
async function applyFileEdits(
|
|
||||||
filePath: string,
|
|
||||||
edits: Array<{ oldText: string; newText: string }>,
|
|
||||||
dryRun = false
|
|
||||||
): Promise<string> {
|
|
||||||
// Read file content and normalize line endings
|
|
||||||
const content = normalizeLineEndings(await fs.readFile(filePath, 'utf-8'))
|
|
||||||
|
|
||||||
// Apply edits sequentially
|
|
||||||
let modifiedContent = content
|
|
||||||
for (const edit of edits) {
|
|
||||||
const normalizedOld = normalizeLineEndings(edit.oldText)
|
|
||||||
const normalizedNew = normalizeLineEndings(edit.newText)
|
|
||||||
|
|
||||||
// If exact match exists, use it
|
|
||||||
if (modifiedContent.includes(normalizedOld)) {
|
|
||||||
modifiedContent = modifiedContent.replace(normalizedOld, normalizedNew)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise, try line-by-line matching with flexibility for whitespace
|
|
||||||
const oldLines = normalizedOld.split('\n')
|
|
||||||
const contentLines = modifiedContent.split('\n')
|
|
||||||
let matchFound = false
|
|
||||||
|
|
||||||
for (let i = 0; i <= contentLines.length - oldLines.length; i++) {
|
|
||||||
const potentialMatch = contentLines.slice(i, i + oldLines.length)
|
|
||||||
|
|
||||||
// Compare lines with normalized whitespace
|
|
||||||
const isMatch = oldLines.every((oldLine, j) => {
|
|
||||||
const contentLine = potentialMatch[j]
|
|
||||||
return oldLine.trim() === contentLine.trim()
|
|
||||||
})
|
|
||||||
|
|
||||||
if (isMatch) {
|
|
||||||
// Preserve original indentation of first line
|
|
||||||
const originalIndent = contentLines[i].match(/^\s*/)?.[0] || ''
|
|
||||||
const newLines = normalizedNew.split('\n').map((line, j) => {
|
|
||||||
if (j === 0) return originalIndent + line.trimStart()
|
|
||||||
// For subsequent lines, try to preserve relative indentation
|
|
||||||
const oldIndent = oldLines[j]?.match(/^\s*/)?.[0] || ''
|
|
||||||
const newIndent = line.match(/^\s*/)?.[0] || ''
|
|
||||||
if (oldIndent && newIndent) {
|
|
||||||
const relativeIndent = newIndent.length - oldIndent.length
|
|
||||||
return originalIndent + ' '.repeat(Math.max(0, relativeIndent)) + line.trimStart()
|
|
||||||
}
|
|
||||||
return line
|
|
||||||
})
|
|
||||||
|
|
||||||
contentLines.splice(i, oldLines.length, ...newLines)
|
|
||||||
modifiedContent = contentLines.join('\n')
|
|
||||||
matchFound = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!matchFound) {
|
|
||||||
throw new Error(`Could not find exact match for edit:\n${edit.oldText}`)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create unified diff
|
|
||||||
const diff = createUnifiedDiff(content, modifiedContent, filePath)
|
|
||||||
|
|
||||||
// Format diff with appropriate number of backticks
|
|
||||||
let numBackticks = 3
|
|
||||||
while (diff.includes('`'.repeat(numBackticks))) {
|
|
||||||
numBackticks++
|
|
||||||
}
|
|
||||||
const formattedDiff = `${'`'.repeat(numBackticks)}diff\n${diff}${'`'.repeat(numBackticks)}\n\n`
|
|
||||||
|
|
||||||
if (!dryRun) {
|
|
||||||
await fs.writeFile(filePath, modifiedContent, 'utf-8')
|
|
||||||
}
|
|
||||||
|
|
||||||
return formattedDiff
|
|
||||||
}
|
|
||||||
|
|
||||||
class FileSystemServer {
|
|
||||||
public server: Server
|
|
||||||
private allowedDirectories: string[]
|
|
||||||
constructor(allowedDirs: string[]) {
|
|
||||||
if (!Array.isArray(allowedDirs) || allowedDirs.length === 0) {
|
|
||||||
throw new Error('No allowed directories provided, please specify at least one directory in args')
|
|
||||||
}
|
|
||||||
|
|
||||||
this.allowedDirectories = allowedDirs.map((dir) => normalizePath(path.resolve(expandHome(dir))))
|
|
||||||
|
|
||||||
// Validate that all directories exist and are accessible
|
|
||||||
this.validateDirs().catch((error) => {
|
|
||||||
logger.error('Error validating allowed directories:', error)
|
|
||||||
throw new Error(`Error validating allowed directories: ${error}`)
|
|
||||||
})
|
|
||||||
|
|
||||||
this.server = new Server(
|
|
||||||
{
|
|
||||||
name: 'secure-filesystem-server',
|
|
||||||
version: '0.2.0'
|
|
||||||
},
|
|
||||||
{
|
|
||||||
capabilities: {
|
|
||||||
tools: {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
this.initialize()
|
|
||||||
}
|
|
||||||
|
|
||||||
async validateDirs() {
|
|
||||||
// Validate that all directories exist and are accessible
|
|
||||||
await Promise.all(
|
|
||||||
this.allowedDirectories.map(async (dir) => {
|
|
||||||
try {
|
|
||||||
const stats = await fs.stat(expandHome(dir))
|
|
||||||
if (!stats.isDirectory()) {
|
|
||||||
logger.error(`Error: ${dir} is not a directory`)
|
|
||||||
throw new Error(`Error: ${dir} is not a directory`)
|
|
||||||
}
|
|
||||||
} catch (error: any) {
|
|
||||||
logger.error(`Error accessing directory ${dir}:`, error)
|
|
||||||
throw new Error(`Error accessing directory ${dir}:`, error)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
initialize() {
|
|
||||||
// Tool handlers
|
|
||||||
this.server.setRequestHandler(ListToolsRequestSchema, async () => {
|
|
||||||
return {
|
|
||||||
tools: [
|
|
||||||
{
|
|
||||||
name: 'read_file',
|
|
||||||
description:
|
|
||||||
'Read the complete contents of a file from the file system. ' +
|
|
||||||
'Handles various text encodings and provides detailed error messages ' +
|
|
||||||
'if the file cannot be read. Use this tool when you need to examine ' +
|
|
||||||
'the contents of a single file. Only works within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(ReadFileArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'read_multiple_files',
|
|
||||||
description:
|
|
||||||
'Read the contents of multiple files simultaneously. This is more ' +
|
|
||||||
'efficient than reading files one by one when you need to analyze ' +
|
|
||||||
"or compare multiple files. Each file's content is returned with its " +
|
|
||||||
"path as a reference. Failed reads for individual files won't stop " +
|
|
||||||
'the entire operation. Only works within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(ReadMultipleFilesArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'write_file',
|
|
||||||
description:
|
|
||||||
'Create a new file or completely overwrite an existing file with new content. ' +
|
|
||||||
'Use with caution as it will overwrite existing files without warning. ' +
|
|
||||||
'Handles text content with proper encoding. Only works within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(WriteFileArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'edit_file',
|
|
||||||
description:
|
|
||||||
'Make line-based edits to a text file. Each edit replaces exact line sequences ' +
|
|
||||||
'with new content. Returns a git-style diff showing the changes made. ' +
|
|
||||||
'Only works within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(EditFileArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'create_directory',
|
|
||||||
description:
|
|
||||||
'Create a new directory or ensure a directory exists. Can create multiple ' +
|
|
||||||
'nested directories in one operation. If the directory already exists, ' +
|
|
||||||
'this operation will succeed silently. Perfect for setting up directory ' +
|
|
||||||
'structures for projects or ensuring required paths exist. Only works within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(CreateDirectoryArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'list_directory',
|
|
||||||
description:
|
|
||||||
'Get a detailed listing of all files and directories in a specified path. ' +
|
|
||||||
'Results clearly distinguish between files and directories with [FILE] and [DIR] ' +
|
|
||||||
'prefixes. This tool is essential for understanding directory structure and ' +
|
|
||||||
'finding specific files within a directory. Only works within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(ListDirectoryArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'directory_tree',
|
|
||||||
description:
|
|
||||||
'Get a recursive tree view of files and directories as a JSON structure. ' +
|
|
||||||
"Each entry includes 'name', 'type' (file/directory), and 'children' for directories. " +
|
|
||||||
'Files have no children array, while directories always have a children array (which may be empty). ' +
|
|
||||||
'The output is formatted with 2-space indentation for readability. Only works within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(DirectoryTreeArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'move_file',
|
|
||||||
description:
|
|
||||||
'Move or rename files and directories. Can move files between directories ' +
|
|
||||||
'and rename them in a single operation. If the destination exists, the ' +
|
|
||||||
'operation will fail. Works across different directories and can be used ' +
|
|
||||||
'for simple renaming within the same directory. Both source and destination must be within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(MoveFileArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'search_files',
|
|
||||||
description:
|
|
||||||
'Recursively search for files and directories matching a pattern. ' +
|
|
||||||
'Searches through all subdirectories from the starting path. The search ' +
|
|
||||||
'is case-insensitive and matches partial names. Returns full paths to all ' +
|
|
||||||
"matching items. Great for finding files when you don't know their exact location. " +
|
|
||||||
'Only searches within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(SearchFilesArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'get_file_info',
|
|
||||||
description:
|
|
||||||
'Retrieve detailed metadata about a file or directory. Returns comprehensive ' +
|
|
||||||
'information including size, creation time, last modified time, permissions, ' +
|
|
||||||
'and type. This tool is perfect for understanding file characteristics ' +
|
|
||||||
'without reading the actual content. Only works within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(GetFileInfoArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'list_allowed_directories',
|
|
||||||
description:
|
|
||||||
'Returns the list of directories that this server is allowed to access. ' +
|
|
||||||
'Use this to understand which directories are available before trying to access files.',
|
|
||||||
inputSchema: {
|
|
||||||
type: 'object',
|
|
||||||
properties: {},
|
|
||||||
required: []
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
||||||
try {
|
|
||||||
const { name, arguments: args } = request.params
|
|
||||||
|
|
||||||
switch (name) {
|
|
||||||
case 'read_file': {
|
|
||||||
const parsed = ReadFileArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for read_file: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
|
||||||
const content = await fs.readFile(validPath, 'utf-8')
|
|
||||||
return {
|
|
||||||
content: [{ type: 'text', text: content }]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'read_multiple_files': {
|
|
||||||
const parsed = ReadMultipleFilesArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for read_multiple_files: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const results = await Promise.all(
|
|
||||||
parsed.data.paths.map(async (filePath: string) => {
|
|
||||||
try {
|
|
||||||
const validPath = await validatePath(this.allowedDirectories, filePath)
|
|
||||||
const content = await fs.readFile(validPath, 'utf-8')
|
|
||||||
return `${filePath}:\n${content}\n`
|
|
||||||
} catch (error) {
|
|
||||||
const errorMessage = error instanceof Error ? error.message : String(error)
|
|
||||||
return `${filePath}: Error - ${errorMessage}`
|
|
||||||
}
|
|
||||||
})
|
|
||||||
)
|
|
||||||
return {
|
|
||||||
content: [{ type: 'text', text: results.join('\n---\n') }]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'write_file': {
|
|
||||||
const parsed = WriteFileArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for write_file: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
|
||||||
await fs.writeFile(validPath, parsed.data.content, 'utf-8')
|
|
||||||
return {
|
|
||||||
content: [{ type: 'text', text: `Successfully wrote to ${parsed.data.path}` }]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'edit_file': {
|
|
||||||
const parsed = EditFileArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for edit_file: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
|
||||||
const result = await applyFileEdits(validPath, parsed.data.edits, parsed.data.dryRun)
|
|
||||||
return {
|
|
||||||
content: [{ type: 'text', text: result }]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'create_directory': {
|
|
||||||
const parsed = CreateDirectoryArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for create_directory: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
|
||||||
await fs.mkdir(validPath, { recursive: true })
|
|
||||||
return {
|
|
||||||
content: [{ type: 'text', text: `Successfully created directory ${parsed.data.path}` }]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'list_directory': {
|
|
||||||
const parsed = ListDirectoryArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for list_directory: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
|
||||||
const entries = await fs.readdir(validPath, { withFileTypes: true })
|
|
||||||
const formatted = entries
|
|
||||||
.map((entry) => `${entry.isDirectory() ? '[DIR]' : '[FILE]'} ${entry.name}`)
|
|
||||||
.join('\n')
|
|
||||||
return {
|
|
||||||
content: [{ type: 'text', text: formatted }]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'directory_tree': {
|
|
||||||
const parsed = DirectoryTreeArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for directory_tree: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
|
|
||||||
interface TreeEntry {
|
|
||||||
name: string
|
|
||||||
type: 'file' | 'directory'
|
|
||||||
children?: TreeEntry[]
|
|
||||||
}
|
|
||||||
|
|
||||||
async function buildTree(allowedDirectories: string[], currentPath: string): Promise<TreeEntry[]> {
|
|
||||||
const validPath = await validatePath(allowedDirectories, currentPath)
|
|
||||||
const entries = await fs.readdir(validPath, { withFileTypes: true })
|
|
||||||
const result: TreeEntry[] = []
|
|
||||||
|
|
||||||
for (const entry of entries) {
|
|
||||||
const entryData: TreeEntry = {
|
|
||||||
name: entry.name,
|
|
||||||
type: entry.isDirectory() ? 'directory' : 'file'
|
|
||||||
}
|
|
||||||
|
|
||||||
if (entry.isDirectory()) {
|
|
||||||
const subPath = path.join(currentPath, entry.name)
|
|
||||||
entryData.children = await buildTree(allowedDirectories, subPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
result.push(entryData)
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
const treeData = await buildTree(this.allowedDirectories, parsed.data.path)
|
|
||||||
return {
|
|
||||||
content: [
|
|
||||||
{
|
|
||||||
type: 'text',
|
|
||||||
text: JSON.stringify(treeData, null, 2)
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'move_file': {
|
|
||||||
const parsed = MoveFileArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for move_file: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const validSourcePath = await validatePath(this.allowedDirectories, parsed.data.source)
|
|
||||||
const validDestPath = await validatePath(this.allowedDirectories, parsed.data.destination)
|
|
||||||
await fs.rename(validSourcePath, validDestPath)
|
|
||||||
return {
|
|
||||||
content: [
|
|
||||||
{ type: 'text', text: `Successfully moved ${parsed.data.source} to ${parsed.data.destination}` }
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'search_files': {
|
|
||||||
const parsed = SearchFilesArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for search_files: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
|
||||||
const results = await searchFiles(
|
|
||||||
this.allowedDirectories,
|
|
||||||
validPath,
|
|
||||||
parsed.data.pattern,
|
|
||||||
parsed.data.excludePatterns
|
|
||||||
)
|
|
||||||
return {
|
|
||||||
content: [{ type: 'text', text: results.length > 0 ? results.join('\n') : 'No matches found' }]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'get_file_info': {
|
|
||||||
const parsed = GetFileInfoArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for get_file_info: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
|
||||||
const info = await getFileStats(validPath)
|
|
||||||
return {
|
|
||||||
content: [
|
|
||||||
{
|
|
||||||
type: 'text',
|
|
||||||
text: Object.entries(info)
|
|
||||||
.map(([key, value]) => `${key}: ${value}`)
|
|
||||||
.join('\n')
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'list_allowed_directories': {
|
|
||||||
return {
|
|
||||||
content: [
|
|
||||||
{
|
|
||||||
type: 'text',
|
|
||||||
text: `Allowed directories:\n${this.allowedDirectories.join('\n')}`
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
|
||||||
throw new Error(`Unknown tool: ${name}`)
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
const errorMessage = error instanceof Error ? error.message : String(error)
|
|
||||||
return {
|
|
||||||
content: [{ type: 'text', text: `Error: ${errorMessage}` }],
|
|
||||||
isError: true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
export default FileSystemServer
|
|
||||||
2
src/main/mcpServers/filesystem/index.ts
Normal file
2
src/main/mcpServers/filesystem/index.ts
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
// Re-export FileSystemServer to maintain existing import pattern
|
||||||
|
export { default, FileSystemServer } from './server'
|
||||||
118
src/main/mcpServers/filesystem/server.ts
Normal file
118
src/main/mcpServers/filesystem/server.ts
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
import { Server } from '@modelcontextprotocol/sdk/server/index.js'
|
||||||
|
import { CallToolRequestSchema, ListToolsRequestSchema } from '@modelcontextprotocol/sdk/types.js'
|
||||||
|
import { app } from 'electron'
|
||||||
|
import fs from 'fs/promises'
|
||||||
|
import path from 'path'
|
||||||
|
|
||||||
|
import {
|
||||||
|
deleteToolDefinition,
|
||||||
|
editToolDefinition,
|
||||||
|
globToolDefinition,
|
||||||
|
grepToolDefinition,
|
||||||
|
handleDeleteTool,
|
||||||
|
handleEditTool,
|
||||||
|
handleGlobTool,
|
||||||
|
handleGrepTool,
|
||||||
|
handleLsTool,
|
||||||
|
handleReadTool,
|
||||||
|
handleWriteTool,
|
||||||
|
lsToolDefinition,
|
||||||
|
readToolDefinition,
|
||||||
|
writeToolDefinition
|
||||||
|
} from './tools'
|
||||||
|
import { logger } from './types'
|
||||||
|
|
||||||
|
export class FileSystemServer {
|
||||||
|
public server: Server
|
||||||
|
private baseDir: string
|
||||||
|
|
||||||
|
constructor(baseDir?: string) {
|
||||||
|
if (baseDir && path.isAbsolute(baseDir)) {
|
||||||
|
this.baseDir = baseDir
|
||||||
|
logger.info(`Using provided baseDir for filesystem MCP: ${baseDir}`)
|
||||||
|
} else {
|
||||||
|
const userData = app.getPath('userData')
|
||||||
|
this.baseDir = path.join(userData, 'Data', 'Workspace')
|
||||||
|
logger.info(`Using default workspace for filesystem MCP baseDir: ${this.baseDir}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
this.server = new Server(
|
||||||
|
{
|
||||||
|
name: 'filesystem-server',
|
||||||
|
version: '2.0.0'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
capabilities: {
|
||||||
|
tools: {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
this.initialize()
|
||||||
|
}
|
||||||
|
|
||||||
|
async initialize() {
|
||||||
|
try {
|
||||||
|
await fs.mkdir(this.baseDir, { recursive: true })
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Failed to create filesystem MCP baseDir', { error, baseDir: this.baseDir })
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register tool list handler
|
||||||
|
this.server.setRequestHandler(ListToolsRequestSchema, async () => {
|
||||||
|
return {
|
||||||
|
tools: [
|
||||||
|
globToolDefinition,
|
||||||
|
lsToolDefinition,
|
||||||
|
grepToolDefinition,
|
||||||
|
readToolDefinition,
|
||||||
|
editToolDefinition,
|
||||||
|
writeToolDefinition,
|
||||||
|
deleteToolDefinition
|
||||||
|
]
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Register tool call handler
|
||||||
|
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
||||||
|
try {
|
||||||
|
const { name, arguments: args } = request.params
|
||||||
|
|
||||||
|
switch (name) {
|
||||||
|
case 'glob':
|
||||||
|
return await handleGlobTool(args, this.baseDir)
|
||||||
|
|
||||||
|
case 'ls':
|
||||||
|
return await handleLsTool(args, this.baseDir)
|
||||||
|
|
||||||
|
case 'grep':
|
||||||
|
return await handleGrepTool(args, this.baseDir)
|
||||||
|
|
||||||
|
case 'read':
|
||||||
|
return await handleReadTool(args, this.baseDir)
|
||||||
|
|
||||||
|
case 'edit':
|
||||||
|
return await handleEditTool(args, this.baseDir)
|
||||||
|
|
||||||
|
case 'write':
|
||||||
|
return await handleWriteTool(args, this.baseDir)
|
||||||
|
|
||||||
|
case 'delete':
|
||||||
|
return await handleDeleteTool(args, this.baseDir)
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw new Error(`Unknown tool: ${name}`)
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
const errorMessage = error instanceof Error ? error.message : String(error)
|
||||||
|
logger.error(`Tool execution error for ${request.params.name}:`, { error })
|
||||||
|
return {
|
||||||
|
content: [{ type: 'text', text: `Error: ${errorMessage}` }],
|
||||||
|
isError: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export default FileSystemServer
|
||||||
93
src/main/mcpServers/filesystem/tools/delete.ts
Normal file
93
src/main/mcpServers/filesystem/tools/delete.ts
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
import fs from 'fs/promises'
|
||||||
|
import path from 'path'
|
||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import { logger, validatePath } from '../types'
|
||||||
|
|
||||||
|
// Schema definition
|
||||||
|
export const DeleteToolSchema = z.object({
|
||||||
|
path: z.string().describe('The path to the file or directory to delete'),
|
||||||
|
recursive: z.boolean().optional().describe('For directories, whether to delete recursively (default: false)')
|
||||||
|
})
|
||||||
|
|
||||||
|
// Tool definition with detailed description
|
||||||
|
export const deleteToolDefinition = {
|
||||||
|
name: 'delete',
|
||||||
|
description: `Deletes a file or directory from the filesystem.
|
||||||
|
|
||||||
|
CAUTION: This operation cannot be undone!
|
||||||
|
|
||||||
|
- For files: simply provide the path
|
||||||
|
- For empty directories: provide the path
|
||||||
|
- For non-empty directories: set recursive=true
|
||||||
|
- The path must be an absolute path, not a relative path
|
||||||
|
- Always verify the path before deleting to avoid data loss`,
|
||||||
|
inputSchema: z.toJSONSchema(DeleteToolSchema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler implementation
|
||||||
|
export async function handleDeleteTool(args: unknown, baseDir: string) {
|
||||||
|
const parsed = DeleteToolSchema.safeParse(args)
|
||||||
|
if (!parsed.success) {
|
||||||
|
throw new Error(`Invalid arguments for delete: ${parsed.error}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const targetPath = parsed.data.path
|
||||||
|
const validPath = await validatePath(targetPath, baseDir)
|
||||||
|
const recursive = parsed.data.recursive || false
|
||||||
|
|
||||||
|
// Check if path exists and get stats
|
||||||
|
let stats
|
||||||
|
try {
|
||||||
|
stats = await fs.stat(validPath)
|
||||||
|
} catch (error: any) {
|
||||||
|
if (error.code === 'ENOENT') {
|
||||||
|
throw new Error(`Path not found: ${targetPath}`)
|
||||||
|
}
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
|
||||||
|
const isDirectory = stats.isDirectory()
|
||||||
|
const relativePath = path.relative(baseDir, validPath)
|
||||||
|
|
||||||
|
// Perform deletion
|
||||||
|
try {
|
||||||
|
if (isDirectory) {
|
||||||
|
if (recursive) {
|
||||||
|
// Delete directory recursively
|
||||||
|
await fs.rm(validPath, { recursive: true, force: true })
|
||||||
|
} else {
|
||||||
|
// Try to delete empty directory
|
||||||
|
await fs.rmdir(validPath)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Delete file
|
||||||
|
await fs.unlink(validPath)
|
||||||
|
}
|
||||||
|
} catch (error: any) {
|
||||||
|
if (error.code === 'ENOTEMPTY') {
|
||||||
|
throw new Error(`Directory not empty: ${targetPath}. Use recursive=true to delete non-empty directories.`)
|
||||||
|
}
|
||||||
|
throw new Error(`Failed to delete: ${error.message}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log the operation
|
||||||
|
logger.info('Path deleted', {
|
||||||
|
path: validPath,
|
||||||
|
type: isDirectory ? 'directory' : 'file',
|
||||||
|
recursive: isDirectory ? recursive : undefined
|
||||||
|
})
|
||||||
|
|
||||||
|
// Format output
|
||||||
|
const itemType = isDirectory ? 'Directory' : 'File'
|
||||||
|
const recursiveNote = isDirectory && recursive ? ' (recursive)' : ''
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: `${itemType} deleted${recursiveNote}: ${relativePath}`
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
130
src/main/mcpServers/filesystem/tools/edit.ts
Normal file
130
src/main/mcpServers/filesystem/tools/edit.ts
Normal file
@ -0,0 +1,130 @@
|
|||||||
|
import fs from 'fs/promises'
|
||||||
|
import path from 'path'
|
||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import { logger, replaceWithFuzzyMatch, validatePath } from '../types'
|
||||||
|
|
||||||
|
// Schema definition
|
||||||
|
export const EditToolSchema = z.object({
|
||||||
|
file_path: z.string().describe('The path to the file to modify'),
|
||||||
|
old_string: z.string().describe('The text to replace'),
|
||||||
|
new_string: z.string().describe('The text to replace it with'),
|
||||||
|
replace_all: z.boolean().optional().default(false).describe('Replace all occurrences of old_string (default false)')
|
||||||
|
})
|
||||||
|
|
||||||
|
// Tool definition with detailed description
|
||||||
|
export const editToolDefinition = {
|
||||||
|
name: 'edit',
|
||||||
|
description: `Performs exact string replacements in files.
|
||||||
|
|
||||||
|
- You must use the 'read' tool at least once before editing
|
||||||
|
- The file_path must be an absolute path, not a relative path
|
||||||
|
- Preserve exact indentation from read output (after the line number prefix)
|
||||||
|
- Never include line number prefixes in old_string or new_string
|
||||||
|
- ALWAYS prefer editing existing files over creating new ones
|
||||||
|
- The edit will FAIL if old_string is not found in the file
|
||||||
|
- The edit will FAIL if old_string appears multiple times (provide more context or use replace_all)
|
||||||
|
- The edit will FAIL if old_string equals new_string
|
||||||
|
- Use replace_all to rename variables or replace all occurrences`,
|
||||||
|
inputSchema: z.toJSONSchema(EditToolSchema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler implementation
|
||||||
|
export async function handleEditTool(args: unknown, baseDir: string) {
|
||||||
|
const parsed = EditToolSchema.safeParse(args)
|
||||||
|
if (!parsed.success) {
|
||||||
|
throw new Error(`Invalid arguments for edit: ${parsed.error}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const { file_path: filePath, old_string: oldString, new_string: newString, replace_all: replaceAll } = parsed.data
|
||||||
|
|
||||||
|
// Validate path
|
||||||
|
const validPath = await validatePath(filePath, baseDir)
|
||||||
|
|
||||||
|
// Check if file exists
|
||||||
|
try {
|
||||||
|
const stats = await fs.stat(validPath)
|
||||||
|
if (!stats.isFile()) {
|
||||||
|
throw new Error(`Path is not a file: ${filePath}`)
|
||||||
|
}
|
||||||
|
} catch (error: any) {
|
||||||
|
if (error.code === 'ENOENT') {
|
||||||
|
// If old_string is empty, this is a create new file operation
|
||||||
|
if (oldString === '') {
|
||||||
|
// Create parent directory if needed
|
||||||
|
const parentDir = path.dirname(validPath)
|
||||||
|
await fs.mkdir(parentDir, { recursive: true })
|
||||||
|
|
||||||
|
// Write the new content
|
||||||
|
await fs.writeFile(validPath, newString, 'utf-8')
|
||||||
|
|
||||||
|
logger.info('File created', { path: validPath })
|
||||||
|
|
||||||
|
const relativePath = path.relative(baseDir, validPath)
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: `Created new file: ${relativePath}\nLines: ${newString.split('\n').length}`
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
throw new Error(`File not found: ${filePath}`)
|
||||||
|
}
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read current content
|
||||||
|
const content = await fs.readFile(validPath, 'utf-8')
|
||||||
|
|
||||||
|
// Handle special case: old_string is empty (create file with content)
|
||||||
|
if (oldString === '') {
|
||||||
|
await fs.writeFile(validPath, newString, 'utf-8')
|
||||||
|
|
||||||
|
logger.info('File overwritten', { path: validPath })
|
||||||
|
|
||||||
|
const relativePath = path.relative(baseDir, validPath)
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: `Overwrote file: ${relativePath}\nLines: ${newString.split('\n').length}`
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform the replacement with fuzzy matching
|
||||||
|
const newContent = replaceWithFuzzyMatch(content, oldString, newString, replaceAll)
|
||||||
|
|
||||||
|
// Write the modified content
|
||||||
|
await fs.writeFile(validPath, newContent, 'utf-8')
|
||||||
|
|
||||||
|
logger.info('File edited', {
|
||||||
|
path: validPath,
|
||||||
|
replaceAll
|
||||||
|
})
|
||||||
|
|
||||||
|
// Generate a simple diff summary
|
||||||
|
const oldLines = content.split('\n').length
|
||||||
|
const newLines = newContent.split('\n').length
|
||||||
|
const lineDiff = newLines - oldLines
|
||||||
|
|
||||||
|
const relativePath = path.relative(baseDir, validPath)
|
||||||
|
let diffSummary = `Edited: ${relativePath}`
|
||||||
|
if (lineDiff > 0) {
|
||||||
|
diffSummary += `\n+${lineDiff} lines`
|
||||||
|
} else if (lineDiff < 0) {
|
||||||
|
diffSummary += `\n${lineDiff} lines`
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: diffSummary
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
149
src/main/mcpServers/filesystem/tools/glob.ts
Normal file
149
src/main/mcpServers/filesystem/tools/glob.ts
Normal file
@ -0,0 +1,149 @@
|
|||||||
|
import fs from 'fs/promises'
|
||||||
|
import path from 'path'
|
||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import type { FileInfo } from '../types'
|
||||||
|
import { logger, MAX_FILES_LIMIT, runRipgrep, validatePath } from '../types'
|
||||||
|
|
||||||
|
// Schema definition
|
||||||
|
export const GlobToolSchema = z.object({
|
||||||
|
pattern: z.string().describe('The glob pattern to match files against'),
|
||||||
|
path: z
|
||||||
|
.string()
|
||||||
|
.optional()
|
||||||
|
.describe('The directory to search in (must be absolute path). Defaults to the base directory')
|
||||||
|
})
|
||||||
|
|
||||||
|
// Tool definition with detailed description
|
||||||
|
export const globToolDefinition = {
|
||||||
|
name: 'glob',
|
||||||
|
description: `Fast file pattern matching tool that works with any codebase size.
|
||||||
|
|
||||||
|
- Supports glob patterns like "**/*.js" or "src/**/*.ts"
|
||||||
|
- Returns matching absolute file paths sorted by modification time (newest first)
|
||||||
|
- Use this when you need to find files by name patterns
|
||||||
|
- Patterns without "/" (e.g., "*.txt") match files at ANY depth in the directory tree
|
||||||
|
- Patterns with "/" (e.g., "src/*.ts") match relative to the search path
|
||||||
|
- Pattern syntax: * (any chars), ** (any path), {a,b} (alternatives), ? (single char)
|
||||||
|
- Results are limited to 100 files
|
||||||
|
- The path parameter must be an absolute path if specified
|
||||||
|
- If path is not specified, defaults to the base directory
|
||||||
|
- IMPORTANT: Omit the path field for the default directory (don't use "undefined" or "null")`,
|
||||||
|
inputSchema: z.toJSONSchema(GlobToolSchema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler implementation
|
||||||
|
export async function handleGlobTool(args: unknown, baseDir: string) {
|
||||||
|
const parsed = GlobToolSchema.safeParse(args)
|
||||||
|
if (!parsed.success) {
|
||||||
|
throw new Error(`Invalid arguments for glob: ${parsed.error}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const searchPath = parsed.data.path || baseDir
|
||||||
|
const validPath = await validatePath(searchPath, baseDir)
|
||||||
|
|
||||||
|
// Verify the search directory exists
|
||||||
|
try {
|
||||||
|
const stats = await fs.stat(validPath)
|
||||||
|
if (!stats.isDirectory()) {
|
||||||
|
throw new Error(`Path is not a directory: ${validPath}`)
|
||||||
|
}
|
||||||
|
} catch (error: unknown) {
|
||||||
|
if (error && typeof error === 'object' && 'code' in error && error.code === 'ENOENT') {
|
||||||
|
throw new Error(`Directory not found: ${validPath}`)
|
||||||
|
}
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate pattern
|
||||||
|
const pattern = parsed.data.pattern.trim()
|
||||||
|
if (!pattern) {
|
||||||
|
throw new Error('Pattern cannot be empty')
|
||||||
|
}
|
||||||
|
|
||||||
|
const files: FileInfo[] = []
|
||||||
|
let truncated = false
|
||||||
|
|
||||||
|
// Build ripgrep arguments for file listing using --glob=pattern format
|
||||||
|
const rgArgs: string[] = [
|
||||||
|
'--files',
|
||||||
|
'--follow',
|
||||||
|
'--hidden',
|
||||||
|
`--glob=${pattern}`,
|
||||||
|
'--glob=!.git/*',
|
||||||
|
'--glob=!node_modules/*',
|
||||||
|
'--glob=!dist/*',
|
||||||
|
'--glob=!build/*',
|
||||||
|
'--glob=!__pycache__/*',
|
||||||
|
validPath
|
||||||
|
]
|
||||||
|
|
||||||
|
// Use ripgrep for file listing
|
||||||
|
logger.debug('Running ripgrep with args', { rgArgs })
|
||||||
|
const rgResult = await runRipgrep(rgArgs)
|
||||||
|
logger.debug('Ripgrep result', {
|
||||||
|
ok: rgResult.ok,
|
||||||
|
exitCode: rgResult.exitCode,
|
||||||
|
stdoutLength: rgResult.stdout.length,
|
||||||
|
stdoutPreview: rgResult.stdout.slice(0, 500)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Process results if we have stdout content
|
||||||
|
// Exit code 2 can indicate partial errors (e.g., permission denied on some dirs) but still have valid results
|
||||||
|
if (rgResult.ok && rgResult.stdout.length > 0) {
|
||||||
|
const lines = rgResult.stdout.split('\n').filter(Boolean)
|
||||||
|
logger.debug('Parsed lines from ripgrep', { lineCount: lines.length, lines })
|
||||||
|
|
||||||
|
for (const line of lines) {
|
||||||
|
if (files.length >= MAX_FILES_LIMIT) {
|
||||||
|
truncated = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
const filePath = line.trim()
|
||||||
|
if (!filePath) continue
|
||||||
|
|
||||||
|
const absolutePath = path.isAbsolute(filePath) ? filePath : path.resolve(validPath, filePath)
|
||||||
|
|
||||||
|
try {
|
||||||
|
const stats = await fs.stat(absolutePath)
|
||||||
|
files.push({
|
||||||
|
path: absolutePath,
|
||||||
|
type: 'file', // ripgrep --files only returns files
|
||||||
|
size: stats.size,
|
||||||
|
modified: stats.mtime
|
||||||
|
})
|
||||||
|
} catch (error) {
|
||||||
|
logger.debug('Failed to stat file from ripgrep output, skipping', { file: absolutePath, error })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by modification time (newest first)
|
||||||
|
files.sort((a, b) => {
|
||||||
|
const aTime = a.modified ? a.modified.getTime() : 0
|
||||||
|
const bTime = b.modified ? b.modified.getTime() : 0
|
||||||
|
return bTime - aTime
|
||||||
|
})
|
||||||
|
|
||||||
|
// Format output - always use absolute paths
|
||||||
|
const output: string[] = []
|
||||||
|
if (files.length === 0) {
|
||||||
|
output.push(`No files found matching pattern "${parsed.data.pattern}" in ${validPath}`)
|
||||||
|
} else {
|
||||||
|
output.push(...files.map((f) => f.path))
|
||||||
|
if (truncated) {
|
||||||
|
output.push('')
|
||||||
|
output.push(`(Results truncated to ${MAX_FILES_LIMIT} files. Consider using a more specific pattern.)`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: output.join('\n')
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
266
src/main/mcpServers/filesystem/tools/grep.ts
Normal file
266
src/main/mcpServers/filesystem/tools/grep.ts
Normal file
@ -0,0 +1,266 @@
|
|||||||
|
import fs from 'fs/promises'
|
||||||
|
import path from 'path'
|
||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import type { GrepMatch } from '../types'
|
||||||
|
import { isBinaryFile, MAX_GREP_MATCHES, MAX_LINE_LENGTH, runRipgrep, validatePath } from '../types'
|
||||||
|
|
||||||
|
// Schema definition
|
||||||
|
export const GrepToolSchema = z.object({
|
||||||
|
pattern: z.string().describe('The regex pattern to search for in file contents'),
|
||||||
|
path: z
|
||||||
|
.string()
|
||||||
|
.optional()
|
||||||
|
.describe('The directory to search in (must be absolute path). Defaults to the base directory'),
|
||||||
|
include: z.string().optional().describe('File pattern to include in the search (e.g. "*.js", "*.{ts,tsx}")')
|
||||||
|
})
|
||||||
|
|
||||||
|
// Tool definition with detailed description
|
||||||
|
export const grepToolDefinition = {
|
||||||
|
name: 'grep',
|
||||||
|
description: `Fast content search tool that works with any codebase size.
|
||||||
|
|
||||||
|
- Searches file contents using regular expressions
|
||||||
|
- Supports full regex syntax (e.g., "log.*Error", "function\\s+\\w+")
|
||||||
|
- Filter files by pattern with include (e.g., "*.js", "*.{ts,tsx}")
|
||||||
|
- Returns absolute file paths and line numbers with matching content
|
||||||
|
- Results are limited to 100 matches
|
||||||
|
- Binary files are automatically skipped
|
||||||
|
- Common directories (node_modules, .git, dist) are excluded
|
||||||
|
- The path parameter must be an absolute path if specified
|
||||||
|
- If path is not specified, defaults to the base directory`,
|
||||||
|
inputSchema: z.toJSONSchema(GrepToolSchema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler implementation
|
||||||
|
export async function handleGrepTool(args: unknown, baseDir: string) {
|
||||||
|
const parsed = GrepToolSchema.safeParse(args)
|
||||||
|
if (!parsed.success) {
|
||||||
|
throw new Error(`Invalid arguments for grep: ${parsed.error}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const data = parsed.data
|
||||||
|
|
||||||
|
if (!data.pattern) {
|
||||||
|
throw new Error('Pattern is required for grep')
|
||||||
|
}
|
||||||
|
|
||||||
|
const searchPath = data.path || baseDir
|
||||||
|
const validPath = await validatePath(searchPath, baseDir)
|
||||||
|
|
||||||
|
const matches: GrepMatch[] = []
|
||||||
|
let truncated = false
|
||||||
|
let regex: RegExp
|
||||||
|
|
||||||
|
// Build ripgrep arguments
|
||||||
|
const rgArgs: string[] = [
|
||||||
|
'--no-heading',
|
||||||
|
'--line-number',
|
||||||
|
'--color',
|
||||||
|
'never',
|
||||||
|
'--ignore-case',
|
||||||
|
'--glob',
|
||||||
|
'!.git/**',
|
||||||
|
'--glob',
|
||||||
|
'!node_modules/**',
|
||||||
|
'--glob',
|
||||||
|
'!dist/**',
|
||||||
|
'--glob',
|
||||||
|
'!build/**',
|
||||||
|
'--glob',
|
||||||
|
'!__pycache__/**'
|
||||||
|
]
|
||||||
|
|
||||||
|
if (data.include) {
|
||||||
|
for (const pat of data.include
|
||||||
|
.split(',')
|
||||||
|
.map((p) => p.trim())
|
||||||
|
.filter(Boolean)) {
|
||||||
|
rgArgs.push('--glob', pat)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rgArgs.push(data.pattern)
|
||||||
|
rgArgs.push(validPath)
|
||||||
|
|
||||||
|
try {
|
||||||
|
regex = new RegExp(data.pattern, 'gi')
|
||||||
|
} catch (error) {
|
||||||
|
throw new Error(`Invalid regex pattern: ${data.pattern}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
async function searchFile(filePath: string): Promise<void> {
|
||||||
|
if (matches.length >= MAX_GREP_MATCHES) {
|
||||||
|
truncated = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Skip binary files
|
||||||
|
if (await isBinaryFile(filePath)) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
const content = await fs.readFile(filePath, 'utf-8')
|
||||||
|
const lines = content.split('\n')
|
||||||
|
|
||||||
|
lines.forEach((line, index) => {
|
||||||
|
if (matches.length >= MAX_GREP_MATCHES) {
|
||||||
|
truncated = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if (regex.test(line)) {
|
||||||
|
// Truncate long lines
|
||||||
|
const truncatedLine = line.length > MAX_LINE_LENGTH ? line.substring(0, MAX_LINE_LENGTH) + '...' : line
|
||||||
|
|
||||||
|
matches.push({
|
||||||
|
file: filePath,
|
||||||
|
line: index + 1,
|
||||||
|
content: truncatedLine.trim()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
} catch (error) {
|
||||||
|
// Skip files we can't read
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function searchDirectory(dir: string): Promise<void> {
|
||||||
|
if (matches.length >= MAX_GREP_MATCHES) {
|
||||||
|
truncated = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const entries = await fs.readdir(dir, { withFileTypes: true })
|
||||||
|
|
||||||
|
for (const entry of entries) {
|
||||||
|
if (matches.length >= MAX_GREP_MATCHES) {
|
||||||
|
truncated = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
const fullPath = path.join(dir, entry.name)
|
||||||
|
|
||||||
|
// Skip common ignore patterns
|
||||||
|
if (entry.name.startsWith('.') && entry.name !== '.env.example') {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if (['node_modules', 'dist', 'build', '__pycache__', '.git'].includes(entry.name)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if (entry.isFile()) {
|
||||||
|
// Check if file matches include pattern
|
||||||
|
if (data.include) {
|
||||||
|
const includePatterns = data.include.split(',').map((p) => p.trim())
|
||||||
|
const fileName = path.basename(fullPath)
|
||||||
|
const matchesInclude = includePatterns.some((pattern) => {
|
||||||
|
// Simple glob pattern matching
|
||||||
|
const regexPattern = pattern
|
||||||
|
.replace(/\*/g, '.*')
|
||||||
|
.replace(/\?/g, '.')
|
||||||
|
.replace(/\{([^}]+)\}/g, (_, group) => `(${group.split(',').join('|')})`)
|
||||||
|
return new RegExp(`^${regexPattern}$`).test(fileName)
|
||||||
|
})
|
||||||
|
if (!matchesInclude) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
await searchFile(fullPath)
|
||||||
|
} else if (entry.isDirectory()) {
|
||||||
|
await searchDirectory(fullPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
// Skip directories we can't read
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform the search
|
||||||
|
let usedRipgrep = false
|
||||||
|
try {
|
||||||
|
const rgResult = await runRipgrep(rgArgs)
|
||||||
|
if (rgResult.ok && rgResult.exitCode !== null && rgResult.exitCode !== 2) {
|
||||||
|
usedRipgrep = true
|
||||||
|
const lines = rgResult.stdout.split('\n').filter(Boolean)
|
||||||
|
for (const line of lines) {
|
||||||
|
if (matches.length >= MAX_GREP_MATCHES) {
|
||||||
|
truncated = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
const firstColon = line.indexOf(':')
|
||||||
|
const secondColon = line.indexOf(':', firstColon + 1)
|
||||||
|
if (firstColon === -1 || secondColon === -1) continue
|
||||||
|
|
||||||
|
const filePart = line.slice(0, firstColon)
|
||||||
|
const linePart = line.slice(firstColon + 1, secondColon)
|
||||||
|
const contentPart = line.slice(secondColon + 1)
|
||||||
|
const lineNum = Number.parseInt(linePart, 10)
|
||||||
|
if (!Number.isFinite(lineNum)) continue
|
||||||
|
|
||||||
|
const absoluteFilePath = path.isAbsolute(filePart) ? filePart : path.resolve(baseDir, filePart)
|
||||||
|
const truncatedLine =
|
||||||
|
contentPart.length > MAX_LINE_LENGTH ? contentPart.substring(0, MAX_LINE_LENGTH) + '...' : contentPart
|
||||||
|
|
||||||
|
matches.push({
|
||||||
|
file: absoluteFilePath,
|
||||||
|
line: lineNum,
|
||||||
|
content: truncatedLine.trim()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
usedRipgrep = false
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!usedRipgrep) {
|
||||||
|
const stats = await fs.stat(validPath)
|
||||||
|
if (stats.isFile()) {
|
||||||
|
await searchFile(validPath)
|
||||||
|
} else {
|
||||||
|
await searchDirectory(validPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format output
|
||||||
|
const output: string[] = []
|
||||||
|
|
||||||
|
if (matches.length === 0) {
|
||||||
|
output.push('No matches found')
|
||||||
|
} else {
|
||||||
|
// Group matches by file
|
||||||
|
const fileGroups = new Map<string, GrepMatch[]>()
|
||||||
|
matches.forEach((match) => {
|
||||||
|
if (!fileGroups.has(match.file)) {
|
||||||
|
fileGroups.set(match.file, [])
|
||||||
|
}
|
||||||
|
fileGroups.get(match.file)!.push(match)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Format grouped matches - always use absolute paths
|
||||||
|
fileGroups.forEach((fileMatches, filePath) => {
|
||||||
|
output.push(`\n${filePath}:`)
|
||||||
|
fileMatches.forEach((match) => {
|
||||||
|
output.push(` ${match.line}: ${match.content}`)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
if (truncated) {
|
||||||
|
output.push('')
|
||||||
|
output.push(`(Results truncated to ${MAX_GREP_MATCHES} matches. Consider using a more specific pattern or path.)`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: output.join('\n')
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
8
src/main/mcpServers/filesystem/tools/index.ts
Normal file
8
src/main/mcpServers/filesystem/tools/index.ts
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
// Export all tool definitions and handlers
|
||||||
|
export { deleteToolDefinition, handleDeleteTool } from './delete'
|
||||||
|
export { editToolDefinition, handleEditTool } from './edit'
|
||||||
|
export { globToolDefinition, handleGlobTool } from './glob'
|
||||||
|
export { grepToolDefinition, handleGrepTool } from './grep'
|
||||||
|
export { handleLsTool, lsToolDefinition } from './ls'
|
||||||
|
export { handleReadTool, readToolDefinition } from './read'
|
||||||
|
export { handleWriteTool, writeToolDefinition } from './write'
|
||||||
150
src/main/mcpServers/filesystem/tools/ls.ts
Normal file
150
src/main/mcpServers/filesystem/tools/ls.ts
Normal file
@ -0,0 +1,150 @@
|
|||||||
|
import fs from 'fs/promises'
|
||||||
|
import path from 'path'
|
||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import { MAX_FILES_LIMIT, validatePath } from '../types'
|
||||||
|
|
||||||
|
// Schema definition
|
||||||
|
export const LsToolSchema = z.object({
|
||||||
|
path: z.string().optional().describe('The directory to list (must be absolute path). Defaults to the base directory'),
|
||||||
|
recursive: z.boolean().optional().describe('Whether to list directories recursively (default: false)')
|
||||||
|
})
|
||||||
|
|
||||||
|
// Tool definition with detailed description
|
||||||
|
export const lsToolDefinition = {
|
||||||
|
name: 'ls',
|
||||||
|
description: `Lists files and directories in a specified path.
|
||||||
|
|
||||||
|
- Returns a tree-like structure with icons (📁 directories, 📄 files)
|
||||||
|
- Shows the absolute directory path in the header
|
||||||
|
- Entries are sorted alphabetically with directories first
|
||||||
|
- Can list recursively with recursive=true (up to 5 levels deep)
|
||||||
|
- Common directories (node_modules, dist, .git) are excluded
|
||||||
|
- Hidden files (starting with .) are excluded except .env.example
|
||||||
|
- Results are limited to 100 entries
|
||||||
|
- The path parameter must be an absolute path if specified
|
||||||
|
- If path is not specified, defaults to the base directory`,
|
||||||
|
inputSchema: z.toJSONSchema(LsToolSchema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler implementation
|
||||||
|
export async function handleLsTool(args: unknown, baseDir: string) {
|
||||||
|
const parsed = LsToolSchema.safeParse(args)
|
||||||
|
if (!parsed.success) {
|
||||||
|
throw new Error(`Invalid arguments for ls: ${parsed.error}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const targetPath = parsed.data.path || baseDir
|
||||||
|
const validPath = await validatePath(targetPath, baseDir)
|
||||||
|
const recursive = parsed.data.recursive || false
|
||||||
|
|
||||||
|
interface TreeNode {
|
||||||
|
name: string
|
||||||
|
type: 'file' | 'directory'
|
||||||
|
children?: TreeNode[]
|
||||||
|
}
|
||||||
|
|
||||||
|
let fileCount = 0
|
||||||
|
let truncated = false
|
||||||
|
|
||||||
|
async function buildTree(dirPath: string, depth: number = 0): Promise<TreeNode[]> {
|
||||||
|
if (fileCount >= MAX_FILES_LIMIT) {
|
||||||
|
truncated = true
|
||||||
|
return []
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const entries = await fs.readdir(dirPath, { withFileTypes: true })
|
||||||
|
const nodes: TreeNode[] = []
|
||||||
|
|
||||||
|
// Sort entries: directories first, then files, alphabetically
|
||||||
|
entries.sort((a, b) => {
|
||||||
|
if (a.isDirectory() && !b.isDirectory()) return -1
|
||||||
|
if (!a.isDirectory() && b.isDirectory()) return 1
|
||||||
|
return a.name.localeCompare(b.name)
|
||||||
|
})
|
||||||
|
|
||||||
|
for (const entry of entries) {
|
||||||
|
if (fileCount >= MAX_FILES_LIMIT) {
|
||||||
|
truncated = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip hidden files and common ignore patterns
|
||||||
|
if (entry.name.startsWith('.') && entry.name !== '.env.example') {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if (['node_modules', 'dist', 'build', '__pycache__'].includes(entry.name)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fileCount++
|
||||||
|
const node: TreeNode = {
|
||||||
|
name: entry.name,
|
||||||
|
type: entry.isDirectory() ? 'directory' : 'file'
|
||||||
|
}
|
||||||
|
|
||||||
|
if (entry.isDirectory() && recursive && depth < 5) {
|
||||||
|
// Limit depth to prevent infinite recursion
|
||||||
|
const childPath = path.join(dirPath, entry.name)
|
||||||
|
node.children = await buildTree(childPath, depth + 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes.push(node)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nodes
|
||||||
|
} catch (error) {
|
||||||
|
return []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the tree
|
||||||
|
const tree = await buildTree(validPath)
|
||||||
|
|
||||||
|
// Format as text output
|
||||||
|
function formatTree(nodes: TreeNode[], prefix: string = ''): string[] {
|
||||||
|
const lines: string[] = []
|
||||||
|
|
||||||
|
nodes.forEach((node, index) => {
|
||||||
|
const isLastNode = index === nodes.length - 1
|
||||||
|
const connector = isLastNode ? '└── ' : '├── '
|
||||||
|
const icon = node.type === 'directory' ? '📁 ' : '📄 '
|
||||||
|
|
||||||
|
lines.push(prefix + connector + icon + node.name)
|
||||||
|
|
||||||
|
if (node.children && node.children.length > 0) {
|
||||||
|
const childPrefix = prefix + (isLastNode ? ' ' : '│ ')
|
||||||
|
lines.push(...formatTree(node.children, childPrefix))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return lines
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate output
|
||||||
|
const output: string[] = []
|
||||||
|
output.push(`Directory: ${validPath}`)
|
||||||
|
output.push('')
|
||||||
|
|
||||||
|
if (tree.length === 0) {
|
||||||
|
output.push('(empty directory)')
|
||||||
|
} else {
|
||||||
|
const treeLines = formatTree(tree, '')
|
||||||
|
output.push(...treeLines)
|
||||||
|
|
||||||
|
if (truncated) {
|
||||||
|
output.push('')
|
||||||
|
output.push(`(Results truncated to ${MAX_FILES_LIMIT} files. Consider listing a more specific directory.)`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: output.join('\n')
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
101
src/main/mcpServers/filesystem/tools/read.ts
Normal file
101
src/main/mcpServers/filesystem/tools/read.ts
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
import fs from 'fs/promises'
|
||||||
|
import path from 'path'
|
||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import { DEFAULT_READ_LIMIT, isBinaryFile, MAX_LINE_LENGTH, validatePath } from '../types'
|
||||||
|
|
||||||
|
// Schema definition
|
||||||
|
export const ReadToolSchema = z.object({
|
||||||
|
file_path: z.string().describe('The path to the file to read'),
|
||||||
|
offset: z.number().optional().describe('The line number to start reading from (1-based)'),
|
||||||
|
limit: z.number().optional().describe('The number of lines to read (defaults to 2000)')
|
||||||
|
})
|
||||||
|
|
||||||
|
// Tool definition with detailed description
|
||||||
|
export const readToolDefinition = {
|
||||||
|
name: 'read',
|
||||||
|
description: `Reads a file from the local filesystem.
|
||||||
|
|
||||||
|
- Assumes this tool can read all files on the machine
|
||||||
|
- The file_path parameter must be an absolute path, not a relative path
|
||||||
|
- By default, reads up to 2000 lines starting from the beginning
|
||||||
|
- You can optionally specify a line offset and limit for long files
|
||||||
|
- Any lines longer than 2000 characters will be truncated
|
||||||
|
- Results are returned with line numbers starting at 1
|
||||||
|
- Binary files are detected and rejected with an error
|
||||||
|
- Empty files return a warning`,
|
||||||
|
inputSchema: z.toJSONSchema(ReadToolSchema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler implementation
|
||||||
|
export async function handleReadTool(args: unknown, baseDir: string) {
|
||||||
|
const parsed = ReadToolSchema.safeParse(args)
|
||||||
|
if (!parsed.success) {
|
||||||
|
throw new Error(`Invalid arguments for read: ${parsed.error}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const filePath = parsed.data.file_path
|
||||||
|
const validPath = await validatePath(filePath, baseDir)
|
||||||
|
|
||||||
|
// Check if file exists
|
||||||
|
try {
|
||||||
|
const stats = await fs.stat(validPath)
|
||||||
|
if (!stats.isFile()) {
|
||||||
|
throw new Error(`Path is not a file: ${filePath}`)
|
||||||
|
}
|
||||||
|
} catch (error: any) {
|
||||||
|
if (error.code === 'ENOENT') {
|
||||||
|
throw new Error(`File not found: ${filePath}`)
|
||||||
|
}
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if file is binary
|
||||||
|
if (await isBinaryFile(validPath)) {
|
||||||
|
throw new Error(`Cannot read binary file: ${filePath}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read file content
|
||||||
|
const content = await fs.readFile(validPath, 'utf-8')
|
||||||
|
const lines = content.split('\n')
|
||||||
|
|
||||||
|
// Apply offset and limit
|
||||||
|
const offset = (parsed.data.offset || 1) - 1 // Convert to 0-based
|
||||||
|
const limit = parsed.data.limit || DEFAULT_READ_LIMIT
|
||||||
|
|
||||||
|
if (offset < 0 || offset >= lines.length) {
|
||||||
|
throw new Error(`Invalid offset: ${offset + 1}. File has ${lines.length} lines.`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const selectedLines = lines.slice(offset, offset + limit)
|
||||||
|
|
||||||
|
// Format output with line numbers and truncate long lines
|
||||||
|
const output: string[] = []
|
||||||
|
const relativePath = path.relative(baseDir, validPath)
|
||||||
|
|
||||||
|
output.push(`File: ${relativePath}`)
|
||||||
|
if (offset > 0 || limit < lines.length) {
|
||||||
|
output.push(`Lines ${offset + 1} to ${Math.min(offset + limit, lines.length)} of ${lines.length}`)
|
||||||
|
}
|
||||||
|
output.push('')
|
||||||
|
|
||||||
|
selectedLines.forEach((line, index) => {
|
||||||
|
const lineNumber = offset + index + 1
|
||||||
|
const truncatedLine = line.length > MAX_LINE_LENGTH ? line.substring(0, MAX_LINE_LENGTH) + '...' : line
|
||||||
|
output.push(`${lineNumber.toString().padStart(6)}\t${truncatedLine}`)
|
||||||
|
})
|
||||||
|
|
||||||
|
if (offset + limit < lines.length) {
|
||||||
|
output.push('')
|
||||||
|
output.push(`(${lines.length - (offset + limit)} more lines not shown)`)
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: output.join('\n')
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
83
src/main/mcpServers/filesystem/tools/write.ts
Normal file
83
src/main/mcpServers/filesystem/tools/write.ts
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
import fs from 'fs/promises'
|
||||||
|
import path from 'path'
|
||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import { logger, validatePath } from '../types'
|
||||||
|
|
||||||
|
// Schema definition
|
||||||
|
export const WriteToolSchema = z.object({
|
||||||
|
file_path: z.string().describe('The path to the file to write'),
|
||||||
|
content: z.string().describe('The content to write to the file')
|
||||||
|
})
|
||||||
|
|
||||||
|
// Tool definition with detailed description
|
||||||
|
export const writeToolDefinition = {
|
||||||
|
name: 'write',
|
||||||
|
description: `Writes a file to the local filesystem.
|
||||||
|
|
||||||
|
- This tool will overwrite the existing file if one exists at the path
|
||||||
|
- You MUST use the read tool first to understand what you're overwriting
|
||||||
|
- ALWAYS prefer using the 'edit' tool for existing files
|
||||||
|
- NEVER proactively create documentation files unless explicitly requested
|
||||||
|
- Parent directories will be created automatically if they don't exist
|
||||||
|
- The file_path must be an absolute path, not a relative path`,
|
||||||
|
inputSchema: z.toJSONSchema(WriteToolSchema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler implementation
|
||||||
|
export async function handleWriteTool(args: unknown, baseDir: string) {
|
||||||
|
const parsed = WriteToolSchema.safeParse(args)
|
||||||
|
if (!parsed.success) {
|
||||||
|
throw new Error(`Invalid arguments for write: ${parsed.error}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const filePath = parsed.data.file_path
|
||||||
|
const validPath = await validatePath(filePath, baseDir)
|
||||||
|
|
||||||
|
// Create parent directory if it doesn't exist
|
||||||
|
const parentDir = path.dirname(validPath)
|
||||||
|
try {
|
||||||
|
await fs.mkdir(parentDir, { recursive: true })
|
||||||
|
} catch (error: any) {
|
||||||
|
if (error.code !== 'EEXIST') {
|
||||||
|
throw new Error(`Failed to create parent directory: ${error.message}`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if file exists (for logging)
|
||||||
|
let isOverwrite = false
|
||||||
|
try {
|
||||||
|
await fs.stat(validPath)
|
||||||
|
isOverwrite = true
|
||||||
|
} catch {
|
||||||
|
// File doesn't exist, that's fine
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the file
|
||||||
|
try {
|
||||||
|
await fs.writeFile(validPath, parsed.data.content, 'utf-8')
|
||||||
|
} catch (error: any) {
|
||||||
|
throw new Error(`Failed to write file: ${error.message}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log the operation
|
||||||
|
logger.info('File written', {
|
||||||
|
path: validPath,
|
||||||
|
overwrite: isOverwrite,
|
||||||
|
size: parsed.data.content.length
|
||||||
|
})
|
||||||
|
|
||||||
|
// Format output
|
||||||
|
const relativePath = path.relative(baseDir, validPath)
|
||||||
|
const action = isOverwrite ? 'Updated' : 'Created'
|
||||||
|
const lines = parsed.data.content.split('\n').length
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: `${action} file: ${relativePath}\n` + `Size: ${parsed.data.content.length} bytes\n` + `Lines: ${lines}`
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
627
src/main/mcpServers/filesystem/types.ts
Normal file
627
src/main/mcpServers/filesystem/types.ts
Normal file
@ -0,0 +1,627 @@
|
|||||||
|
import { loggerService } from '@logger'
|
||||||
|
import { isMac, isWin } from '@main/constant'
|
||||||
|
import { spawn } from 'child_process'
|
||||||
|
import fs from 'fs/promises'
|
||||||
|
import os from 'os'
|
||||||
|
import path from 'path'
|
||||||
|
|
||||||
|
export const logger = loggerService.withContext('MCP:FileSystemServer')
|
||||||
|
|
||||||
|
// Constants
|
||||||
|
export const MAX_LINE_LENGTH = 2000
|
||||||
|
export const DEFAULT_READ_LIMIT = 2000
|
||||||
|
export const MAX_FILES_LIMIT = 100
|
||||||
|
export const MAX_GREP_MATCHES = 100
|
||||||
|
|
||||||
|
// Common types
|
||||||
|
export interface FileInfo {
|
||||||
|
path: string
|
||||||
|
type: 'file' | 'directory'
|
||||||
|
size?: number
|
||||||
|
modified?: Date
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface GrepMatch {
|
||||||
|
file: string
|
||||||
|
line: number
|
||||||
|
content: string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Utility functions for path handling
|
||||||
|
export function normalizePath(p: string): string {
|
||||||
|
return path.normalize(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
export function expandHome(filepath: string): string {
|
||||||
|
if (filepath.startsWith('~/') || filepath === '~') {
|
||||||
|
return path.join(os.homedir(), filepath.slice(1))
|
||||||
|
}
|
||||||
|
return filepath
|
||||||
|
}
|
||||||
|
|
||||||
|
// Security validation
|
||||||
|
export async function validatePath(requestedPath: string, baseDir?: string): Promise<string> {
|
||||||
|
const expandedPath = expandHome(requestedPath)
|
||||||
|
const root = baseDir ?? process.cwd()
|
||||||
|
const absolute = path.isAbsolute(expandedPath) ? path.resolve(expandedPath) : path.resolve(root, expandedPath)
|
||||||
|
|
||||||
|
// Handle symlinks by checking their real path
|
||||||
|
try {
|
||||||
|
const realPath = await fs.realpath(absolute)
|
||||||
|
return normalizePath(realPath)
|
||||||
|
} catch (error) {
|
||||||
|
// For new files that don't exist yet, verify parent directory
|
||||||
|
const parentDir = path.dirname(absolute)
|
||||||
|
try {
|
||||||
|
const realParentPath = await fs.realpath(parentDir)
|
||||||
|
normalizePath(realParentPath)
|
||||||
|
return normalizePath(absolute)
|
||||||
|
} catch {
|
||||||
|
return normalizePath(absolute)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Edit Tool Utilities - Fuzzy matching replacers from opencode
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
export type Replacer = (content: string, find: string) => Generator<string, void, unknown>
|
||||||
|
|
||||||
|
// Similarity thresholds for block anchor fallback matching
|
||||||
|
const SINGLE_CANDIDATE_SIMILARITY_THRESHOLD = 0.0
|
||||||
|
const MULTIPLE_CANDIDATES_SIMILARITY_THRESHOLD = 0.3
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Levenshtein distance algorithm implementation
|
||||||
|
*/
|
||||||
|
function levenshtein(a: string, b: string): number {
|
||||||
|
if (a === '' || b === '') {
|
||||||
|
return Math.max(a.length, b.length)
|
||||||
|
}
|
||||||
|
const matrix = Array.from({ length: a.length + 1 }, (_, i) =>
|
||||||
|
Array.from({ length: b.length + 1 }, (_, j) => (i === 0 ? j : j === 0 ? i : 0))
|
||||||
|
)
|
||||||
|
|
||||||
|
for (let i = 1; i <= a.length; i++) {
|
||||||
|
for (let j = 1; j <= b.length; j++) {
|
||||||
|
const cost = a[i - 1] === b[j - 1] ? 0 : 1
|
||||||
|
matrix[i][j] = Math.min(matrix[i - 1][j] + 1, matrix[i][j - 1] + 1, matrix[i - 1][j - 1] + cost)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return matrix[a.length][b.length]
|
||||||
|
}
|
||||||
|
|
||||||
|
export const SimpleReplacer: Replacer = function* (_content, find) {
|
||||||
|
yield find
|
||||||
|
}
|
||||||
|
|
||||||
|
export const LineTrimmedReplacer: Replacer = function* (content, find) {
|
||||||
|
const originalLines = content.split('\n')
|
||||||
|
const searchLines = find.split('\n')
|
||||||
|
|
||||||
|
if (searchLines[searchLines.length - 1] === '') {
|
||||||
|
searchLines.pop()
|
||||||
|
}
|
||||||
|
|
||||||
|
for (let i = 0; i <= originalLines.length - searchLines.length; i++) {
|
||||||
|
let matches = true
|
||||||
|
|
||||||
|
for (let j = 0; j < searchLines.length; j++) {
|
||||||
|
const originalTrimmed = originalLines[i + j].trim()
|
||||||
|
const searchTrimmed = searchLines[j].trim()
|
||||||
|
|
||||||
|
if (originalTrimmed !== searchTrimmed) {
|
||||||
|
matches = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (matches) {
|
||||||
|
let matchStartIndex = 0
|
||||||
|
for (let k = 0; k < i; k++) {
|
||||||
|
matchStartIndex += originalLines[k].length + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
let matchEndIndex = matchStartIndex
|
||||||
|
for (let k = 0; k < searchLines.length; k++) {
|
||||||
|
matchEndIndex += originalLines[i + k].length
|
||||||
|
if (k < searchLines.length - 1) {
|
||||||
|
matchEndIndex += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
yield content.substring(matchStartIndex, matchEndIndex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export const BlockAnchorReplacer: Replacer = function* (content, find) {
|
||||||
|
const originalLines = content.split('\n')
|
||||||
|
const searchLines = find.split('\n')
|
||||||
|
|
||||||
|
if (searchLines.length < 3) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if (searchLines[searchLines.length - 1] === '') {
|
||||||
|
searchLines.pop()
|
||||||
|
}
|
||||||
|
|
||||||
|
const firstLineSearch = searchLines[0].trim()
|
||||||
|
const lastLineSearch = searchLines[searchLines.length - 1].trim()
|
||||||
|
const searchBlockSize = searchLines.length
|
||||||
|
|
||||||
|
const candidates: Array<{ startLine: number; endLine: number }> = []
|
||||||
|
for (let i = 0; i < originalLines.length; i++) {
|
||||||
|
if (originalLines[i].trim() !== firstLineSearch) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for (let j = i + 2; j < originalLines.length; j++) {
|
||||||
|
if (originalLines[j].trim() === lastLineSearch) {
|
||||||
|
candidates.push({ startLine: i, endLine: j })
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (candidates.length === 0) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if (candidates.length === 1) {
|
||||||
|
const { startLine, endLine } = candidates[0]
|
||||||
|
const actualBlockSize = endLine - startLine + 1
|
||||||
|
|
||||||
|
let similarity = 0
|
||||||
|
const linesToCheck = Math.min(searchBlockSize - 2, actualBlockSize - 2)
|
||||||
|
|
||||||
|
if (linesToCheck > 0) {
|
||||||
|
for (let j = 1; j < searchBlockSize - 1 && j < actualBlockSize - 1; j++) {
|
||||||
|
const originalLine = originalLines[startLine + j].trim()
|
||||||
|
const searchLine = searchLines[j].trim()
|
||||||
|
const maxLen = Math.max(originalLine.length, searchLine.length)
|
||||||
|
if (maxLen === 0) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
const distance = levenshtein(originalLine, searchLine)
|
||||||
|
similarity += (1 - distance / maxLen) / linesToCheck
|
||||||
|
|
||||||
|
if (similarity >= SINGLE_CANDIDATE_SIMILARITY_THRESHOLD) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
similarity = 1.0
|
||||||
|
}
|
||||||
|
|
||||||
|
if (similarity >= SINGLE_CANDIDATE_SIMILARITY_THRESHOLD) {
|
||||||
|
let matchStartIndex = 0
|
||||||
|
for (let k = 0; k < startLine; k++) {
|
||||||
|
matchStartIndex += originalLines[k].length + 1
|
||||||
|
}
|
||||||
|
let matchEndIndex = matchStartIndex
|
||||||
|
for (let k = startLine; k <= endLine; k++) {
|
||||||
|
matchEndIndex += originalLines[k].length
|
||||||
|
if (k < endLine) {
|
||||||
|
matchEndIndex += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
yield content.substring(matchStartIndex, matchEndIndex)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
let bestMatch: { startLine: number; endLine: number } | null = null
|
||||||
|
let maxSimilarity = -1
|
||||||
|
|
||||||
|
for (const candidate of candidates) {
|
||||||
|
const { startLine, endLine } = candidate
|
||||||
|
const actualBlockSize = endLine - startLine + 1
|
||||||
|
|
||||||
|
let similarity = 0
|
||||||
|
const linesToCheck = Math.min(searchBlockSize - 2, actualBlockSize - 2)
|
||||||
|
|
||||||
|
if (linesToCheck > 0) {
|
||||||
|
for (let j = 1; j < searchBlockSize - 1 && j < actualBlockSize - 1; j++) {
|
||||||
|
const originalLine = originalLines[startLine + j].trim()
|
||||||
|
const searchLine = searchLines[j].trim()
|
||||||
|
const maxLen = Math.max(originalLine.length, searchLine.length)
|
||||||
|
if (maxLen === 0) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
const distance = levenshtein(originalLine, searchLine)
|
||||||
|
similarity += 1 - distance / maxLen
|
||||||
|
}
|
||||||
|
similarity /= linesToCheck
|
||||||
|
} else {
|
||||||
|
similarity = 1.0
|
||||||
|
}
|
||||||
|
|
||||||
|
if (similarity > maxSimilarity) {
|
||||||
|
maxSimilarity = similarity
|
||||||
|
bestMatch = candidate
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (maxSimilarity >= MULTIPLE_CANDIDATES_SIMILARITY_THRESHOLD && bestMatch) {
|
||||||
|
const { startLine, endLine } = bestMatch
|
||||||
|
let matchStartIndex = 0
|
||||||
|
for (let k = 0; k < startLine; k++) {
|
||||||
|
matchStartIndex += originalLines[k].length + 1
|
||||||
|
}
|
||||||
|
let matchEndIndex = matchStartIndex
|
||||||
|
for (let k = startLine; k <= endLine; k++) {
|
||||||
|
matchEndIndex += originalLines[k].length
|
||||||
|
if (k < endLine) {
|
||||||
|
matchEndIndex += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
yield content.substring(matchStartIndex, matchEndIndex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export const WhitespaceNormalizedReplacer: Replacer = function* (content, find) {
|
||||||
|
const normalizeWhitespace = (text: string) => text.replace(/\s+/g, ' ').trim()
|
||||||
|
const normalizedFind = normalizeWhitespace(find)
|
||||||
|
|
||||||
|
const lines = content.split('\n')
|
||||||
|
for (let i = 0; i < lines.length; i++) {
|
||||||
|
const line = lines[i]
|
||||||
|
if (normalizeWhitespace(line) === normalizedFind) {
|
||||||
|
yield line
|
||||||
|
} else {
|
||||||
|
const normalizedLine = normalizeWhitespace(line)
|
||||||
|
if (normalizedLine.includes(normalizedFind)) {
|
||||||
|
const words = find.trim().split(/\s+/)
|
||||||
|
if (words.length > 0) {
|
||||||
|
const pattern = words.map((word) => word.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')).join('\\s+')
|
||||||
|
try {
|
||||||
|
const regex = new RegExp(pattern)
|
||||||
|
const match = line.match(regex)
|
||||||
|
if (match) {
|
||||||
|
yield match[0]
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Invalid regex pattern, skip
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const findLines = find.split('\n')
|
||||||
|
if (findLines.length > 1) {
|
||||||
|
for (let i = 0; i <= lines.length - findLines.length; i++) {
|
||||||
|
const block = lines.slice(i, i + findLines.length)
|
||||||
|
if (normalizeWhitespace(block.join('\n')) === normalizedFind) {
|
||||||
|
yield block.join('\n')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export const IndentationFlexibleReplacer: Replacer = function* (content, find) {
|
||||||
|
const removeIndentation = (text: string) => {
|
||||||
|
const lines = text.split('\n')
|
||||||
|
const nonEmptyLines = lines.filter((line) => line.trim().length > 0)
|
||||||
|
if (nonEmptyLines.length === 0) return text
|
||||||
|
|
||||||
|
const minIndent = Math.min(
|
||||||
|
...nonEmptyLines.map((line) => {
|
||||||
|
const match = line.match(/^(\s*)/)
|
||||||
|
return match ? match[1].length : 0
|
||||||
|
})
|
||||||
|
)
|
||||||
|
|
||||||
|
return lines.map((line) => (line.trim().length === 0 ? line : line.slice(minIndent))).join('\n')
|
||||||
|
}
|
||||||
|
|
||||||
|
const normalizedFind = removeIndentation(find)
|
||||||
|
const contentLines = content.split('\n')
|
||||||
|
const findLines = find.split('\n')
|
||||||
|
|
||||||
|
for (let i = 0; i <= contentLines.length - findLines.length; i++) {
|
||||||
|
const block = contentLines.slice(i, i + findLines.length).join('\n')
|
||||||
|
if (removeIndentation(block) === normalizedFind) {
|
||||||
|
yield block
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export const EscapeNormalizedReplacer: Replacer = function* (content, find) {
|
||||||
|
const unescapeString = (str: string): string => {
|
||||||
|
return str.replace(/\\(n|t|r|'|"|`|\\|\n|\$)/g, (match, capturedChar) => {
|
||||||
|
switch (capturedChar) {
|
||||||
|
case 'n':
|
||||||
|
return '\n'
|
||||||
|
case 't':
|
||||||
|
return '\t'
|
||||||
|
case 'r':
|
||||||
|
return '\r'
|
||||||
|
case "'":
|
||||||
|
return "'"
|
||||||
|
case '"':
|
||||||
|
return '"'
|
||||||
|
case '`':
|
||||||
|
return '`'
|
||||||
|
case '\\':
|
||||||
|
return '\\'
|
||||||
|
case '\n':
|
||||||
|
return '\n'
|
||||||
|
case '$':
|
||||||
|
return '$'
|
||||||
|
default:
|
||||||
|
return match
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
const unescapedFind = unescapeString(find)
|
||||||
|
|
||||||
|
if (content.includes(unescapedFind)) {
|
||||||
|
yield unescapedFind
|
||||||
|
}
|
||||||
|
|
||||||
|
const lines = content.split('\n')
|
||||||
|
const findLines = unescapedFind.split('\n')
|
||||||
|
|
||||||
|
for (let i = 0; i <= lines.length - findLines.length; i++) {
|
||||||
|
const block = lines.slice(i, i + findLines.length).join('\n')
|
||||||
|
const unescapedBlock = unescapeString(block)
|
||||||
|
|
||||||
|
if (unescapedBlock === unescapedFind) {
|
||||||
|
yield block
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export const TrimmedBoundaryReplacer: Replacer = function* (content, find) {
|
||||||
|
const trimmedFind = find.trim()
|
||||||
|
|
||||||
|
if (trimmedFind === find) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if (content.includes(trimmedFind)) {
|
||||||
|
yield trimmedFind
|
||||||
|
}
|
||||||
|
|
||||||
|
const lines = content.split('\n')
|
||||||
|
const findLines = find.split('\n')
|
||||||
|
|
||||||
|
for (let i = 0; i <= lines.length - findLines.length; i++) {
|
||||||
|
const block = lines.slice(i, i + findLines.length).join('\n')
|
||||||
|
|
||||||
|
if (block.trim() === trimmedFind) {
|
||||||
|
yield block
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export const ContextAwareReplacer: Replacer = function* (content, find) {
|
||||||
|
const findLines = find.split('\n')
|
||||||
|
if (findLines.length < 3) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if (findLines[findLines.length - 1] === '') {
|
||||||
|
findLines.pop()
|
||||||
|
}
|
||||||
|
|
||||||
|
const contentLines = content.split('\n')
|
||||||
|
|
||||||
|
const firstLine = findLines[0].trim()
|
||||||
|
const lastLine = findLines[findLines.length - 1].trim()
|
||||||
|
|
||||||
|
for (let i = 0; i < contentLines.length; i++) {
|
||||||
|
if (contentLines[i].trim() !== firstLine) continue
|
||||||
|
|
||||||
|
for (let j = i + 2; j < contentLines.length; j++) {
|
||||||
|
if (contentLines[j].trim() === lastLine) {
|
||||||
|
const blockLines = contentLines.slice(i, j + 1)
|
||||||
|
const block = blockLines.join('\n')
|
||||||
|
|
||||||
|
if (blockLines.length === findLines.length) {
|
||||||
|
let matchingLines = 0
|
||||||
|
let totalNonEmptyLines = 0
|
||||||
|
|
||||||
|
for (let k = 1; k < blockLines.length - 1; k++) {
|
||||||
|
const blockLine = blockLines[k].trim()
|
||||||
|
const findLine = findLines[k].trim()
|
||||||
|
|
||||||
|
if (blockLine.length > 0 || findLine.length > 0) {
|
||||||
|
totalNonEmptyLines++
|
||||||
|
if (blockLine === findLine) {
|
||||||
|
matchingLines++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (totalNonEmptyLines === 0 || matchingLines / totalNonEmptyLines >= 0.5) {
|
||||||
|
yield block
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export const MultiOccurrenceReplacer: Replacer = function* (content, find) {
|
||||||
|
let startIndex = 0
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
const index = content.indexOf(find, startIndex)
|
||||||
|
if (index === -1) break
|
||||||
|
|
||||||
|
yield find
|
||||||
|
startIndex = index + find.length
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* All replacers in order of specificity
|
||||||
|
*/
|
||||||
|
export const ALL_REPLACERS: Replacer[] = [
|
||||||
|
SimpleReplacer,
|
||||||
|
LineTrimmedReplacer,
|
||||||
|
BlockAnchorReplacer,
|
||||||
|
WhitespaceNormalizedReplacer,
|
||||||
|
IndentationFlexibleReplacer,
|
||||||
|
EscapeNormalizedReplacer,
|
||||||
|
TrimmedBoundaryReplacer,
|
||||||
|
ContextAwareReplacer,
|
||||||
|
MultiOccurrenceReplacer
|
||||||
|
]
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Replace oldString with newString in content using fuzzy matching
|
||||||
|
*/
|
||||||
|
export function replaceWithFuzzyMatch(
|
||||||
|
content: string,
|
||||||
|
oldString: string,
|
||||||
|
newString: string,
|
||||||
|
replaceAll = false
|
||||||
|
): string {
|
||||||
|
if (oldString === newString) {
|
||||||
|
throw new Error('old_string and new_string must be different')
|
||||||
|
}
|
||||||
|
|
||||||
|
let notFound = true
|
||||||
|
|
||||||
|
for (const replacer of ALL_REPLACERS) {
|
||||||
|
for (const search of replacer(content, oldString)) {
|
||||||
|
const index = content.indexOf(search)
|
||||||
|
if (index === -1) continue
|
||||||
|
notFound = false
|
||||||
|
if (replaceAll) {
|
||||||
|
return content.replaceAll(search, newString)
|
||||||
|
}
|
||||||
|
const lastIndex = content.lastIndexOf(search)
|
||||||
|
if (index !== lastIndex) continue
|
||||||
|
return content.substring(0, index) + newString + content.substring(index + search.length)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (notFound) {
|
||||||
|
throw new Error('old_string not found in content')
|
||||||
|
}
|
||||||
|
throw new Error(
|
||||||
|
'Found multiple matches for old_string. Provide more surrounding lines in old_string to identify the correct match.'
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Binary File Detection
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
// Check if a file is likely binary
|
||||||
|
export async function isBinaryFile(filePath: string): Promise<boolean> {
|
||||||
|
try {
|
||||||
|
const buffer = Buffer.alloc(4096)
|
||||||
|
const fd = await fs.open(filePath, 'r')
|
||||||
|
const { bytesRead } = await fd.read(buffer, 0, buffer.length, 0)
|
||||||
|
await fd.close()
|
||||||
|
|
||||||
|
if (bytesRead === 0) return false
|
||||||
|
|
||||||
|
const view = buffer.subarray(0, bytesRead)
|
||||||
|
|
||||||
|
let zeroBytes = 0
|
||||||
|
let evenZeros = 0
|
||||||
|
let oddZeros = 0
|
||||||
|
let nonPrintable = 0
|
||||||
|
|
||||||
|
for (let i = 0; i < view.length; i++) {
|
||||||
|
const b = view[i]
|
||||||
|
|
||||||
|
if (b === 0) {
|
||||||
|
zeroBytes++
|
||||||
|
if (i % 2 === 0) evenZeros++
|
||||||
|
else oddZeros++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// treat common whitespace as printable
|
||||||
|
if (b === 9 || b === 10 || b === 13) continue
|
||||||
|
|
||||||
|
// basic ASCII printable range
|
||||||
|
if (b >= 32 && b <= 126) continue
|
||||||
|
|
||||||
|
// bytes >= 128 are likely part of UTF-8 sequences; count as printable
|
||||||
|
if (b >= 128) continue
|
||||||
|
|
||||||
|
nonPrintable++
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there are lots of null bytes, it's probably binary unless it looks like UTF-16 text.
|
||||||
|
if (zeroBytes > 0) {
|
||||||
|
const evenSlots = Math.ceil(view.length / 2)
|
||||||
|
const oddSlots = Math.floor(view.length / 2)
|
||||||
|
const evenZeroRatio = evenSlots > 0 ? evenZeros / evenSlots : 0
|
||||||
|
const oddZeroRatio = oddSlots > 0 ? oddZeros / oddSlots : 0
|
||||||
|
|
||||||
|
// UTF-16LE/BE tends to have zeros on every other byte.
|
||||||
|
if (evenZeroRatio > 0.7 || oddZeroRatio > 0.7) return false
|
||||||
|
|
||||||
|
if (zeroBytes / view.length > 0.05) return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Heuristic: too many non-printable bytes => binary.
|
||||||
|
return nonPrintable / view.length > 0.3
|
||||||
|
} catch {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Ripgrep Utilities
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
export interface RipgrepResult {
|
||||||
|
ok: boolean
|
||||||
|
stdout: string
|
||||||
|
exitCode: number | null
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getRipgrepAddonPath(): string {
|
||||||
|
const pkgJsonPath = require.resolve('@anthropic-ai/claude-agent-sdk/package.json')
|
||||||
|
const pkgRoot = path.dirname(pkgJsonPath)
|
||||||
|
const platform = isMac ? 'darwin' : isWin ? 'win32' : 'linux'
|
||||||
|
const arch = process.arch === 'arm64' ? 'arm64' : 'x64'
|
||||||
|
return path.join(pkgRoot, 'vendor', 'ripgrep', `${arch}-${platform}`, 'ripgrep.node')
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function runRipgrep(args: string[]): Promise<RipgrepResult> {
|
||||||
|
const addonPath = getRipgrepAddonPath()
|
||||||
|
const childScript = `const { ripgrepMain } = require(process.env.RIPGREP_ADDON_PATH); process.exit(ripgrepMain(process.argv.slice(1)));`
|
||||||
|
|
||||||
|
return new Promise((resolve) => {
|
||||||
|
const child = spawn(process.execPath, ['--eval', childScript, 'rg', ...args], {
|
||||||
|
cwd: process.cwd(),
|
||||||
|
env: {
|
||||||
|
...process.env,
|
||||||
|
ELECTRON_RUN_AS_NODE: '1',
|
||||||
|
RIPGREP_ADDON_PATH: addonPath
|
||||||
|
},
|
||||||
|
stdio: ['ignore', 'pipe', 'pipe']
|
||||||
|
})
|
||||||
|
|
||||||
|
let stdout = ''
|
||||||
|
|
||||||
|
child.stdout?.on('data', (chunk) => {
|
||||||
|
stdout += chunk.toString('utf-8')
|
||||||
|
})
|
||||||
|
|
||||||
|
child.on('error', () => {
|
||||||
|
resolve({ ok: false, stdout: '', exitCode: null })
|
||||||
|
})
|
||||||
|
|
||||||
|
child.on('close', (code) => {
|
||||||
|
resolve({ ok: true, stdout, exitCode: code })
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
@ -31,7 +31,9 @@ export enum ConfigKeys {
|
|||||||
DisableHardwareAcceleration = 'disableHardwareAcceleration',
|
DisableHardwareAcceleration = 'disableHardwareAcceleration',
|
||||||
Proxy = 'proxy',
|
Proxy = 'proxy',
|
||||||
EnableDeveloperMode = 'enableDeveloperMode',
|
EnableDeveloperMode = 'enableDeveloperMode',
|
||||||
ClientId = 'clientId'
|
ClientId = 'clientId',
|
||||||
|
GitBashPath = 'gitBashPath',
|
||||||
|
GitBashPathSource = 'gitBashPathSource' // 'manual' | 'auto' | null
|
||||||
}
|
}
|
||||||
|
|
||||||
export class ConfigManager {
|
export class ConfigManager {
|
||||||
|
|||||||
@ -151,6 +151,7 @@ class FileStorage {
|
|||||||
private currentWatchPath?: string
|
private currentWatchPath?: string
|
||||||
private debounceTimer?: NodeJS.Timeout
|
private debounceTimer?: NodeJS.Timeout
|
||||||
private watcherConfig: Required<FileWatcherConfig> = DEFAULT_WATCHER_CONFIG
|
private watcherConfig: Required<FileWatcherConfig> = DEFAULT_WATCHER_CONFIG
|
||||||
|
private isPaused = false
|
||||||
|
|
||||||
constructor() {
|
constructor() {
|
||||||
this.initStorageDir()
|
this.initStorageDir()
|
||||||
@ -162,7 +163,7 @@ class FileStorage {
|
|||||||
fs.mkdirSync(this.storageDir, { recursive: true })
|
fs.mkdirSync(this.storageDir, { recursive: true })
|
||||||
}
|
}
|
||||||
if (!fs.existsSync(this.notesDir)) {
|
if (!fs.existsSync(this.notesDir)) {
|
||||||
fs.mkdirSync(this.storageDir, { recursive: true })
|
fs.mkdirSync(this.notesDir, { recursive: true })
|
||||||
}
|
}
|
||||||
if (!fs.existsSync(this.tempDir)) {
|
if (!fs.existsSync(this.tempDir)) {
|
||||||
fs.mkdirSync(this.tempDir, { recursive: true })
|
fs.mkdirSync(this.tempDir, { recursive: true })
|
||||||
@ -478,13 +479,16 @@ class FileStorage {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public readFile = async (
|
/**
|
||||||
_: Electron.IpcMainInvokeEvent,
|
* Core file reading logic that handles both documents and text files.
|
||||||
id: string,
|
*
|
||||||
detectEncoding: boolean = false
|
* @private
|
||||||
): Promise<string> => {
|
* @param filePath - Full path to the file
|
||||||
const filePath = path.join(this.storageDir, id)
|
* @param detectEncoding - Whether to auto-detect text file encoding
|
||||||
|
* @returns Promise resolving to the extracted text content
|
||||||
|
* @throws Error if file reading fails
|
||||||
|
*/
|
||||||
|
private async readFileCore(filePath: string, detectEncoding: boolean = false): Promise<string> {
|
||||||
const fileExtension = path.extname(filePath)
|
const fileExtension = path.extname(filePath)
|
||||||
|
|
||||||
if (documentExts.includes(fileExtension)) {
|
if (documentExts.includes(fileExtension)) {
|
||||||
@ -504,7 +508,7 @@ class FileStorage {
|
|||||||
return data
|
return data
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
chdir(originalCwd)
|
chdir(originalCwd)
|
||||||
logger.error('Failed to read file:', error as Error)
|
logger.error('Failed to read document file:', error as Error)
|
||||||
throw error
|
throw error
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -516,11 +520,72 @@ class FileStorage {
|
|||||||
return fs.readFileSync(filePath, 'utf-8')
|
return fs.readFileSync(filePath, 'utf-8')
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('Failed to read file:', error as Error)
|
logger.error('Failed to read text file:', error as Error)
|
||||||
throw new Error(`Failed to read file: ${filePath}.`)
|
throw new Error(`Failed to read file: ${filePath}.`)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reads and extracts content from a stored file.
|
||||||
|
*
|
||||||
|
* Supports multiple file formats including:
|
||||||
|
* - Complex documents: .pdf, .doc, .docx, .pptx, .xlsx, .odt, .odp, .ods
|
||||||
|
* - Text files: .txt, .md, .json, .csv, etc.
|
||||||
|
* - Code files: .js, .ts, .py, .java, etc.
|
||||||
|
*
|
||||||
|
* For document formats, extracts text content using specialized parsers:
|
||||||
|
* - .doc files: Uses word-extractor library
|
||||||
|
* - Other Office formats: Uses officeparser library
|
||||||
|
*
|
||||||
|
* For text files, can optionally detect encoding automatically.
|
||||||
|
*
|
||||||
|
* @param _ - Electron IPC invoke event (unused)
|
||||||
|
* @param id - File identifier with extension (e.g., "uuid.docx")
|
||||||
|
* @param detectEncoding - Whether to auto-detect text file encoding (default: false)
|
||||||
|
* @returns Promise resolving to the extracted text content of the file
|
||||||
|
* @throws Error if file reading fails or file is not found
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* // Read a DOCX file
|
||||||
|
* const content = await readFile(event, "document.docx");
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* // Read a text file with encoding detection
|
||||||
|
* const content = await readFile(event, "text.txt", true);
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* // Read a PDF file
|
||||||
|
* const content = await readFile(event, "manual.pdf");
|
||||||
|
*/
|
||||||
|
public readFile = async (
|
||||||
|
_: Electron.IpcMainInvokeEvent,
|
||||||
|
id: string,
|
||||||
|
detectEncoding: boolean = false
|
||||||
|
): Promise<string> => {
|
||||||
|
const filePath = path.join(this.storageDir, id)
|
||||||
|
return this.readFileCore(filePath, detectEncoding)
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reads and extracts content from an external file path.
|
||||||
|
*
|
||||||
|
* Similar to readFile, but operates on external file paths instead of stored files.
|
||||||
|
* Supports the same file formats including complex documents and text files.
|
||||||
|
*
|
||||||
|
* @param _ - Electron IPC invoke event (unused)
|
||||||
|
* @param filePath - Absolute path to the external file
|
||||||
|
* @param detectEncoding - Whether to auto-detect text file encoding (default: false)
|
||||||
|
* @returns Promise resolving to the extracted text content of the file
|
||||||
|
* @throws Error if file does not exist or reading fails
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* // Read an external DOCX file
|
||||||
|
* const content = await readExternalFile(event, "/path/to/document.docx");
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* // Read an external text file with encoding detection
|
||||||
|
* const content = await readExternalFile(event, "/path/to/text.txt", true);
|
||||||
|
*/
|
||||||
public readExternalFile = async (
|
public readExternalFile = async (
|
||||||
_: Electron.IpcMainInvokeEvent,
|
_: Electron.IpcMainInvokeEvent,
|
||||||
filePath: string,
|
filePath: string,
|
||||||
@ -530,40 +595,7 @@ class FileStorage {
|
|||||||
throw new Error(`File does not exist: ${filePath}`)
|
throw new Error(`File does not exist: ${filePath}`)
|
||||||
}
|
}
|
||||||
|
|
||||||
const fileExtension = path.extname(filePath)
|
return this.readFileCore(filePath, detectEncoding)
|
||||||
|
|
||||||
if (documentExts.includes(fileExtension)) {
|
|
||||||
const originalCwd = process.cwd()
|
|
||||||
try {
|
|
||||||
chdir(this.tempDir)
|
|
||||||
|
|
||||||
if (fileExtension === '.doc') {
|
|
||||||
const extractor = new WordExtractor()
|
|
||||||
const extracted = await extractor.extract(filePath)
|
|
||||||
chdir(originalCwd)
|
|
||||||
return extracted.getBody()
|
|
||||||
}
|
|
||||||
|
|
||||||
const data = await officeParser.parseOfficeAsync(filePath)
|
|
||||||
chdir(originalCwd)
|
|
||||||
return data
|
|
||||||
} catch (error) {
|
|
||||||
chdir(originalCwd)
|
|
||||||
logger.error('Failed to read file:', error as Error)
|
|
||||||
throw error
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
if (detectEncoding) {
|
|
||||||
return readTextFileWithAutoEncoding(filePath)
|
|
||||||
} else {
|
|
||||||
return fs.readFileSync(filePath, 'utf-8')
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
logger.error('Failed to read file:', error as Error)
|
|
||||||
throw new Error(`Failed to read file: ${filePath}.`)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public createTempFile = async (_: Electron.IpcMainInvokeEvent, fileName: string): Promise<string> => {
|
public createTempFile = async (_: Electron.IpcMainInvokeEvent, fileName: string): Promise<string> => {
|
||||||
@ -1448,6 +1480,12 @@ class FileStorage {
|
|||||||
|
|
||||||
private createChangeHandler() {
|
private createChangeHandler() {
|
||||||
return (eventType: string, filePath: string) => {
|
return (eventType: string, filePath: string) => {
|
||||||
|
// Skip processing if watcher is paused
|
||||||
|
if (this.isPaused) {
|
||||||
|
logger.debug('File change ignored (watcher paused)', { eventType, filePath })
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if (!this.shouldWatchFile(filePath, eventType)) {
|
if (!this.shouldWatchFile(filePath, eventType)) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -1605,6 +1643,165 @@ class FileStorage {
|
|||||||
logger.error('Failed to show item in folder:', error as Error)
|
logger.error('Failed to show item in folder:', error as Error)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Batch upload markdown files from native File objects
|
||||||
|
* This handles all I/O operations in the Main process to avoid blocking Renderer
|
||||||
|
*/
|
||||||
|
public batchUploadMarkdownFiles = async (
|
||||||
|
_: Electron.IpcMainInvokeEvent,
|
||||||
|
filePaths: string[],
|
||||||
|
targetPath: string
|
||||||
|
): Promise<{
|
||||||
|
fileCount: number
|
||||||
|
folderCount: number
|
||||||
|
skippedFiles: number
|
||||||
|
}> => {
|
||||||
|
try {
|
||||||
|
logger.info('Starting batch upload', { fileCount: filePaths.length, targetPath })
|
||||||
|
|
||||||
|
const basePath = path.resolve(targetPath)
|
||||||
|
const MARKDOWN_EXTS = ['.md', '.markdown']
|
||||||
|
|
||||||
|
// Filter markdown files
|
||||||
|
const markdownFiles = filePaths.filter((filePath) => {
|
||||||
|
const ext = path.extname(filePath).toLowerCase()
|
||||||
|
return MARKDOWN_EXTS.includes(ext)
|
||||||
|
})
|
||||||
|
|
||||||
|
const skippedFiles = filePaths.length - markdownFiles.length
|
||||||
|
|
||||||
|
if (markdownFiles.length === 0) {
|
||||||
|
return { fileCount: 0, folderCount: 0, skippedFiles }
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect unique folders needed
|
||||||
|
const foldersSet = new Set<string>()
|
||||||
|
const fileOperations: Array<{ sourcePath: string; targetPath: string }> = []
|
||||||
|
|
||||||
|
for (const filePath of markdownFiles) {
|
||||||
|
try {
|
||||||
|
// Get relative path if file is from a directory upload
|
||||||
|
const fileName = path.basename(filePath)
|
||||||
|
const relativePath = path.dirname(filePath)
|
||||||
|
|
||||||
|
// Determine target directory structure
|
||||||
|
let targetDir = basePath
|
||||||
|
const folderParts: string[] = []
|
||||||
|
|
||||||
|
// Extract folder structure from file path for nested uploads
|
||||||
|
// This is a simplified version - in real scenario we'd need the original directory structure
|
||||||
|
if (relativePath && relativePath !== '.') {
|
||||||
|
const parts = relativePath.split(path.sep)
|
||||||
|
// Get the last few parts that represent the folder structure within upload
|
||||||
|
const relevantParts = parts.slice(Math.max(0, parts.length - 3))
|
||||||
|
folderParts.push(...relevantParts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build target directory path
|
||||||
|
for (const part of folderParts) {
|
||||||
|
targetDir = path.join(targetDir, part)
|
||||||
|
foldersSet.add(targetDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine final file name
|
||||||
|
const nameWithoutExt = fileName.endsWith('.md')
|
||||||
|
? fileName.slice(0, -3)
|
||||||
|
: fileName.endsWith('.markdown')
|
||||||
|
? fileName.slice(0, -9)
|
||||||
|
: fileName
|
||||||
|
|
||||||
|
const { safeName } = await this.fileNameGuard(_, targetDir, nameWithoutExt, true)
|
||||||
|
const finalPath = path.join(targetDir, safeName + '.md')
|
||||||
|
|
||||||
|
fileOperations.push({ sourcePath: filePath, targetPath: finalPath })
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Failed to prepare file operation:', error as Error, { filePath })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create folders in order (shallow to deep)
|
||||||
|
const sortedFolders = Array.from(foldersSet).sort((a, b) => a.length - b.length)
|
||||||
|
for (const folder of sortedFolders) {
|
||||||
|
try {
|
||||||
|
if (!fs.existsSync(folder)) {
|
||||||
|
await fs.promises.mkdir(folder, { recursive: true })
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
logger.debug('Folder already exists or creation failed', { folder, error: (error as Error).message })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process files in batches
|
||||||
|
const BATCH_SIZE = 10 // Higher batch size since we're in Main process
|
||||||
|
let successCount = 0
|
||||||
|
|
||||||
|
for (let i = 0; i < fileOperations.length; i += BATCH_SIZE) {
|
||||||
|
const batch = fileOperations.slice(i, i + BATCH_SIZE)
|
||||||
|
|
||||||
|
const results = await Promise.allSettled(
|
||||||
|
batch.map(async (op) => {
|
||||||
|
// Read from source and write to target in Main process
|
||||||
|
const content = await fs.promises.readFile(op.sourcePath, 'utf-8')
|
||||||
|
await fs.promises.writeFile(op.targetPath, content, 'utf-8')
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
)
|
||||||
|
|
||||||
|
results.forEach((result, index) => {
|
||||||
|
if (result.status === 'fulfilled') {
|
||||||
|
successCount++
|
||||||
|
} else {
|
||||||
|
logger.error('Failed to upload file:', result.reason, {
|
||||||
|
file: batch[index].sourcePath
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info('Batch upload completed', {
|
||||||
|
successCount,
|
||||||
|
folderCount: foldersSet.size,
|
||||||
|
skippedFiles
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
fileCount: successCount,
|
||||||
|
folderCount: foldersSet.size,
|
||||||
|
skippedFiles
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Batch upload failed:', error as Error)
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Pause file watcher to prevent events during batch operations
|
||||||
|
*/
|
||||||
|
public pauseFileWatcher = async (): Promise<void> => {
|
||||||
|
if (this.watcher) {
|
||||||
|
logger.debug('Pausing file watcher')
|
||||||
|
this.isPaused = true
|
||||||
|
// Clear any pending debounced notifications
|
||||||
|
if (this.debounceTimer) {
|
||||||
|
clearTimeout(this.debounceTimer)
|
||||||
|
this.debounceTimer = undefined
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Resume file watcher and trigger a refresh
|
||||||
|
*/
|
||||||
|
public resumeFileWatcher = async (): Promise<void> => {
|
||||||
|
if (this.watcher && this.currentWatchPath) {
|
||||||
|
logger.debug('Resuming file watcher')
|
||||||
|
this.isPaused = false
|
||||||
|
// Send a synthetic refresh event to trigger tree reload
|
||||||
|
this.notifyChange('refresh', this.currentWatchPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export const fileStorage = new FileStorage()
|
export const fileStorage = new FileStorage()
|
||||||
|
|||||||
@ -12,6 +12,7 @@ import { TraceMethod, withSpanFunc } from '@mcp-trace/trace-core'
|
|||||||
import { Client } from '@modelcontextprotocol/sdk/client/index.js'
|
import { Client } from '@modelcontextprotocol/sdk/client/index.js'
|
||||||
import type { SSEClientTransportOptions } from '@modelcontextprotocol/sdk/client/sse.js'
|
import type { SSEClientTransportOptions } from '@modelcontextprotocol/sdk/client/sse.js'
|
||||||
import { SSEClientTransport } from '@modelcontextprotocol/sdk/client/sse.js'
|
import { SSEClientTransport } from '@modelcontextprotocol/sdk/client/sse.js'
|
||||||
|
import type { StdioServerParameters } from '@modelcontextprotocol/sdk/client/stdio.js'
|
||||||
import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js'
|
import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js'
|
||||||
import {
|
import {
|
||||||
StreamableHTTPClientTransport,
|
StreamableHTTPClientTransport,
|
||||||
@ -32,6 +33,7 @@ import {
|
|||||||
import { nanoid } from '@reduxjs/toolkit'
|
import { nanoid } from '@reduxjs/toolkit'
|
||||||
import { HOME_CHERRY_DIR } from '@shared/config/constant'
|
import { HOME_CHERRY_DIR } from '@shared/config/constant'
|
||||||
import type { MCPProgressEvent } from '@shared/config/types'
|
import type { MCPProgressEvent } from '@shared/config/types'
|
||||||
|
import type { MCPServerLogEntry } from '@shared/config/types'
|
||||||
import { IpcChannel } from '@shared/IpcChannel'
|
import { IpcChannel } from '@shared/IpcChannel'
|
||||||
import { defaultAppHeaders } from '@shared/utils'
|
import { defaultAppHeaders } from '@shared/utils'
|
||||||
import {
|
import {
|
||||||
@ -42,16 +44,20 @@ import {
|
|||||||
type MCPPrompt,
|
type MCPPrompt,
|
||||||
type MCPResource,
|
type MCPResource,
|
||||||
type MCPServer,
|
type MCPServer,
|
||||||
type MCPTool
|
type MCPTool,
|
||||||
|
MCPToolInputSchema,
|
||||||
|
MCPToolOutputSchema
|
||||||
} from '@types'
|
} from '@types'
|
||||||
import { app, net } from 'electron'
|
import { app, net } from 'electron'
|
||||||
import { EventEmitter } from 'events'
|
import { EventEmitter } from 'events'
|
||||||
import { v4 as uuidv4 } from 'uuid'
|
import { v4 as uuidv4 } from 'uuid'
|
||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
import { CacheService } from './CacheService'
|
import { CacheService } from './CacheService'
|
||||||
import DxtService from './DxtService'
|
import DxtService from './DxtService'
|
||||||
import { CallBackServer } from './mcp/oauth/callback'
|
import { CallBackServer } from './mcp/oauth/callback'
|
||||||
import { McpOAuthClientProvider } from './mcp/oauth/provider'
|
import { McpOAuthClientProvider } from './mcp/oauth/provider'
|
||||||
|
import { ServerLogBuffer } from './mcp/ServerLogBuffer'
|
||||||
import { windowService } from './WindowService'
|
import { windowService } from './WindowService'
|
||||||
|
|
||||||
// Generic type for caching wrapped functions
|
// Generic type for caching wrapped functions
|
||||||
@ -138,6 +144,7 @@ class McpService {
|
|||||||
private pendingClients: Map<string, Promise<Client>> = new Map()
|
private pendingClients: Map<string, Promise<Client>> = new Map()
|
||||||
private dxtService = new DxtService()
|
private dxtService = new DxtService()
|
||||||
private activeToolCalls: Map<string, AbortController> = new Map()
|
private activeToolCalls: Map<string, AbortController> = new Map()
|
||||||
|
private serverLogs = new ServerLogBuffer(200)
|
||||||
|
|
||||||
constructor() {
|
constructor() {
|
||||||
this.initClient = this.initClient.bind(this)
|
this.initClient = this.initClient.bind(this)
|
||||||
@ -155,6 +162,7 @@ class McpService {
|
|||||||
this.cleanup = this.cleanup.bind(this)
|
this.cleanup = this.cleanup.bind(this)
|
||||||
this.checkMcpConnectivity = this.checkMcpConnectivity.bind(this)
|
this.checkMcpConnectivity = this.checkMcpConnectivity.bind(this)
|
||||||
this.getServerVersion = this.getServerVersion.bind(this)
|
this.getServerVersion = this.getServerVersion.bind(this)
|
||||||
|
this.getServerLogs = this.getServerLogs.bind(this)
|
||||||
}
|
}
|
||||||
|
|
||||||
private getServerKey(server: MCPServer): string {
|
private getServerKey(server: MCPServer): string {
|
||||||
@ -168,6 +176,19 @@ class McpService {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private emitServerLog(server: MCPServer, entry: MCPServerLogEntry) {
|
||||||
|
const serverKey = this.getServerKey(server)
|
||||||
|
this.serverLogs.append(serverKey, entry)
|
||||||
|
const mainWindow = windowService.getMainWindow()
|
||||||
|
if (mainWindow) {
|
||||||
|
mainWindow.webContents.send(IpcChannel.Mcp_ServerLog, { ...entry, serverId: server.id })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public getServerLogs(_: Electron.IpcMainInvokeEvent, server: MCPServer): MCPServerLogEntry[] {
|
||||||
|
return this.serverLogs.get(this.getServerKey(server))
|
||||||
|
}
|
||||||
|
|
||||||
async initClient(server: MCPServer): Promise<Client> {
|
async initClient(server: MCPServer): Promise<Client> {
|
||||||
const serverKey = this.getServerKey(server)
|
const serverKey = this.getServerKey(server)
|
||||||
|
|
||||||
@ -228,6 +249,26 @@ class McpService {
|
|||||||
StdioClientTransport | SSEClientTransport | InMemoryTransport | StreamableHTTPClientTransport
|
StdioClientTransport | SSEClientTransport | InMemoryTransport | StreamableHTTPClientTransport
|
||||||
> => {
|
> => {
|
||||||
// Create appropriate transport based on configuration
|
// Create appropriate transport based on configuration
|
||||||
|
|
||||||
|
// Special case for nowledgeMem - uses HTTP transport instead of in-memory
|
||||||
|
if (isBuiltinMCPServer(server) && server.name === BuiltinMCPServerNames.nowledgeMem) {
|
||||||
|
const nowledgeMemUrl = 'http://127.0.0.1:14242/mcp'
|
||||||
|
const options: StreamableHTTPClientTransportOptions = {
|
||||||
|
fetch: async (url, init) => {
|
||||||
|
return net.fetch(typeof url === 'string' ? url : url.toString(), init)
|
||||||
|
},
|
||||||
|
requestInit: {
|
||||||
|
headers: {
|
||||||
|
...defaultAppHeaders(),
|
||||||
|
APP: 'Cherry Studio'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
authProvider
|
||||||
|
}
|
||||||
|
getServerLogger(server).debug(`Using StreamableHTTPClientTransport for ${server.name}`)
|
||||||
|
return new StreamableHTTPClientTransport(new URL(nowledgeMemUrl), options)
|
||||||
|
}
|
||||||
|
|
||||||
if (isBuiltinMCPServer(server) && server.name !== BuiltinMCPServerNames.mcpAutoInstall) {
|
if (isBuiltinMCPServer(server) && server.name !== BuiltinMCPServerNames.mcpAutoInstall) {
|
||||||
getServerLogger(server).debug(`Using in-memory transport`)
|
getServerLogger(server).debug(`Using in-memory transport`)
|
||||||
const [clientTransport, serverTransport] = InMemoryTransport.createLinkedPair()
|
const [clientTransport, serverTransport] = InMemoryTransport.createLinkedPair()
|
||||||
@ -343,7 +384,7 @@ class McpService {
|
|||||||
removeEnvProxy(loginShellEnv)
|
removeEnvProxy(loginShellEnv)
|
||||||
}
|
}
|
||||||
|
|
||||||
const transportOptions: any = {
|
const transportOptions: StdioServerParameters = {
|
||||||
command: cmd,
|
command: cmd,
|
||||||
args,
|
args,
|
||||||
env: {
|
env: {
|
||||||
@ -362,9 +403,18 @@ class McpService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const stdioTransport = new StdioClientTransport(transportOptions)
|
const stdioTransport = new StdioClientTransport(transportOptions)
|
||||||
stdioTransport.stderr?.on('data', (data) =>
|
stdioTransport.stderr?.on('data', (data) => {
|
||||||
getServerLogger(server).debug(`Stdio stderr`, { data: data.toString() })
|
const msg = data.toString()
|
||||||
)
|
getServerLogger(server).debug(`Stdio stderr`, { data: msg })
|
||||||
|
this.emitServerLog(server, {
|
||||||
|
timestamp: Date.now(),
|
||||||
|
level: 'stderr',
|
||||||
|
message: msg.trim(),
|
||||||
|
source: 'stdio'
|
||||||
|
})
|
||||||
|
})
|
||||||
|
// StdioClientTransport does not expose stdout as a readable stream for raw logging
|
||||||
|
// (stdout is reserved for JSON-RPC). Avoid attaching a listener that would never fire.
|
||||||
return stdioTransport
|
return stdioTransport
|
||||||
} else {
|
} else {
|
||||||
throw new Error('Either baseUrl or command must be provided')
|
throw new Error('Either baseUrl or command must be provided')
|
||||||
@ -432,6 +482,13 @@ class McpService {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
this.emitServerLog(server, {
|
||||||
|
timestamp: Date.now(),
|
||||||
|
level: 'info',
|
||||||
|
message: 'Server connected',
|
||||||
|
source: 'client'
|
||||||
|
})
|
||||||
|
|
||||||
// Store the new client in the cache
|
// Store the new client in the cache
|
||||||
this.clients.set(serverKey, client)
|
this.clients.set(serverKey, client)
|
||||||
|
|
||||||
@ -442,9 +499,22 @@ class McpService {
|
|||||||
this.clearServerCache(serverKey)
|
this.clearServerCache(serverKey)
|
||||||
|
|
||||||
logger.debug(`Activated server: ${server.name}`)
|
logger.debug(`Activated server: ${server.name}`)
|
||||||
|
this.emitServerLog(server, {
|
||||||
|
timestamp: Date.now(),
|
||||||
|
level: 'info',
|
||||||
|
message: 'Server activated',
|
||||||
|
source: 'client'
|
||||||
|
})
|
||||||
return client
|
return client
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
getServerLogger(server).error(`Error activating server ${server.name}`, error as Error)
|
getServerLogger(server).error(`Error activating server ${server.name}`, error as Error)
|
||||||
|
this.emitServerLog(server, {
|
||||||
|
timestamp: Date.now(),
|
||||||
|
level: 'error',
|
||||||
|
message: `Error activating server: ${(error as Error)?.message}`,
|
||||||
|
data: redactSensitive(error),
|
||||||
|
source: 'client'
|
||||||
|
})
|
||||||
throw error
|
throw error
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
@ -502,6 +572,16 @@ class McpService {
|
|||||||
// Set up logging message notification handler
|
// Set up logging message notification handler
|
||||||
client.setNotificationHandler(LoggingMessageNotificationSchema, async (notification) => {
|
client.setNotificationHandler(LoggingMessageNotificationSchema, async (notification) => {
|
||||||
logger.debug(`Message from server ${server.name}:`, notification.params)
|
logger.debug(`Message from server ${server.name}:`, notification.params)
|
||||||
|
const msg = notification.params?.message
|
||||||
|
if (msg) {
|
||||||
|
this.emitServerLog(server, {
|
||||||
|
timestamp: Date.now(),
|
||||||
|
level: (notification.params?.level as MCPServerLogEntry['level']) || 'info',
|
||||||
|
message: typeof msg === 'string' ? msg : JSON.stringify(msg),
|
||||||
|
data: redactSensitive(notification.params?.data),
|
||||||
|
source: notification.params?.logger || 'server'
|
||||||
|
})
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
getServerLogger(server).debug(`Set up notification handlers`)
|
getServerLogger(server).debug(`Set up notification handlers`)
|
||||||
@ -536,6 +616,7 @@ class McpService {
|
|||||||
this.clients.delete(serverKey)
|
this.clients.delete(serverKey)
|
||||||
// Clear all caches for this server
|
// Clear all caches for this server
|
||||||
this.clearServerCache(serverKey)
|
this.clearServerCache(serverKey)
|
||||||
|
this.serverLogs.remove(serverKey)
|
||||||
} else {
|
} else {
|
||||||
logger.warn(`No client found for server`, { serverKey })
|
logger.warn(`No client found for server`, { serverKey })
|
||||||
}
|
}
|
||||||
@ -544,6 +625,12 @@ class McpService {
|
|||||||
async stopServer(_: Electron.IpcMainInvokeEvent, server: MCPServer) {
|
async stopServer(_: Electron.IpcMainInvokeEvent, server: MCPServer) {
|
||||||
const serverKey = this.getServerKey(server)
|
const serverKey = this.getServerKey(server)
|
||||||
getServerLogger(server).debug(`Stopping server`)
|
getServerLogger(server).debug(`Stopping server`)
|
||||||
|
this.emitServerLog(server, {
|
||||||
|
timestamp: Date.now(),
|
||||||
|
level: 'info',
|
||||||
|
message: 'Stopping server',
|
||||||
|
source: 'client'
|
||||||
|
})
|
||||||
await this.closeClient(serverKey)
|
await this.closeClient(serverKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -570,6 +657,12 @@ class McpService {
|
|||||||
async restartServer(_: Electron.IpcMainInvokeEvent, server: MCPServer) {
|
async restartServer(_: Electron.IpcMainInvokeEvent, server: MCPServer) {
|
||||||
getServerLogger(server).debug(`Restarting server`)
|
getServerLogger(server).debug(`Restarting server`)
|
||||||
const serverKey = this.getServerKey(server)
|
const serverKey = this.getServerKey(server)
|
||||||
|
this.emitServerLog(server, {
|
||||||
|
timestamp: Date.now(),
|
||||||
|
level: 'info',
|
||||||
|
message: 'Restarting server',
|
||||||
|
source: 'client'
|
||||||
|
})
|
||||||
await this.closeClient(serverKey)
|
await this.closeClient(serverKey)
|
||||||
// Clear cache before restarting to ensure fresh data
|
// Clear cache before restarting to ensure fresh data
|
||||||
this.clearServerCache(serverKey)
|
this.clearServerCache(serverKey)
|
||||||
@ -602,9 +695,22 @@ class McpService {
|
|||||||
// Attempt to list tools as a way to check connectivity
|
// Attempt to list tools as a way to check connectivity
|
||||||
await client.listTools()
|
await client.listTools()
|
||||||
getServerLogger(server).debug(`Connectivity check successful`)
|
getServerLogger(server).debug(`Connectivity check successful`)
|
||||||
|
this.emitServerLog(server, {
|
||||||
|
timestamp: Date.now(),
|
||||||
|
level: 'info',
|
||||||
|
message: 'Connectivity check successful',
|
||||||
|
source: 'connectivity'
|
||||||
|
})
|
||||||
return true
|
return true
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
getServerLogger(server).error(`Connectivity check failed`, error as Error)
|
getServerLogger(server).error(`Connectivity check failed`, error as Error)
|
||||||
|
this.emitServerLog(server, {
|
||||||
|
timestamp: Date.now(),
|
||||||
|
level: 'error',
|
||||||
|
message: `Connectivity check failed: ${(error as Error).message}`,
|
||||||
|
data: redactSensitive(error),
|
||||||
|
source: 'connectivity'
|
||||||
|
})
|
||||||
// Close the client if connectivity check fails to ensure a clean state for the next attempt
|
// Close the client if connectivity check fails to ensure a clean state for the next attempt
|
||||||
const serverKey = this.getServerKey(server)
|
const serverKey = this.getServerKey(server)
|
||||||
await this.closeClient(serverKey)
|
await this.closeClient(serverKey)
|
||||||
@ -620,6 +726,8 @@ class McpService {
|
|||||||
tools.map((tool: SDKTool) => {
|
tools.map((tool: SDKTool) => {
|
||||||
const serverTool: MCPTool = {
|
const serverTool: MCPTool = {
|
||||||
...tool,
|
...tool,
|
||||||
|
inputSchema: z.parse(MCPToolInputSchema, tool.inputSchema),
|
||||||
|
outputSchema: tool.outputSchema ? z.parse(MCPToolOutputSchema, tool.outputSchema) : undefined,
|
||||||
id: buildFunctionCallToolName(server.name, tool.name, server.id),
|
id: buildFunctionCallToolName(server.name, tool.name, server.id),
|
||||||
serverId: server.id,
|
serverId: server.id,
|
||||||
serverName: server.name,
|
serverName: server.name,
|
||||||
|
|||||||
@ -1393,6 +1393,50 @@ export class SelectionService {
|
|||||||
actionWindow.setAlwaysOnTop(isPinned)
|
actionWindow.setAlwaysOnTop(isPinned)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* [Windows only] Manual window resize handler
|
||||||
|
*
|
||||||
|
* ELECTRON BUG WORKAROUND:
|
||||||
|
* In Electron, when using `frame: false` + `transparent: true`, the native window
|
||||||
|
* resize functionality is broken on Windows. This is a known Electron bug.
|
||||||
|
* See: https://github.com/electron/electron/issues/48554
|
||||||
|
*
|
||||||
|
* This method can be removed once the Electron bug is fixed.
|
||||||
|
*/
|
||||||
|
public resizeActionWindow(actionWindow: BrowserWindow, deltaX: number, deltaY: number, direction: string): void {
|
||||||
|
const bounds = actionWindow.getBounds()
|
||||||
|
const minWidth = 300
|
||||||
|
const minHeight = 200
|
||||||
|
|
||||||
|
let { x, y, width, height } = bounds
|
||||||
|
|
||||||
|
// Handle horizontal resize
|
||||||
|
if (direction.includes('e')) {
|
||||||
|
width = Math.max(minWidth, width + deltaX)
|
||||||
|
}
|
||||||
|
if (direction.includes('w')) {
|
||||||
|
const newWidth = Math.max(minWidth, width - deltaX)
|
||||||
|
if (newWidth !== width) {
|
||||||
|
x = x + (width - newWidth)
|
||||||
|
width = newWidth
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle vertical resize
|
||||||
|
if (direction.includes('s')) {
|
||||||
|
height = Math.max(minHeight, height + deltaY)
|
||||||
|
}
|
||||||
|
if (direction.includes('n')) {
|
||||||
|
const newHeight = Math.max(minHeight, height - deltaY)
|
||||||
|
if (newHeight !== height) {
|
||||||
|
y = y + (height - newHeight)
|
||||||
|
height = newHeight
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
actionWindow.setBounds({ x, y, width, height })
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Update trigger mode behavior
|
* Update trigger mode behavior
|
||||||
* Switches between selection-based and alt-key based triggering
|
* Switches between selection-based and alt-key based triggering
|
||||||
@ -1510,6 +1554,18 @@ export class SelectionService {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// [Windows only] Electron bug workaround - can be removed once fixed
|
||||||
|
// See: https://github.com/electron/electron/issues/48554
|
||||||
|
ipcMain.handle(
|
||||||
|
IpcChannel.Selection_ActionWindowResize,
|
||||||
|
(event, deltaX: number, deltaY: number, direction: string) => {
|
||||||
|
const actionWindow = BrowserWindow.fromWebContents(event.sender)
|
||||||
|
if (actionWindow) {
|
||||||
|
selectionService?.resizeActionWindow(actionWindow, deltaX, deltaY, direction)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
this.isIpcHandlerRegistered = true
|
this.isIpcHandlerRegistered = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -35,6 +35,15 @@ function getShortcutHandler(shortcut: Shortcut) {
|
|||||||
}
|
}
|
||||||
case 'mini_window':
|
case 'mini_window':
|
||||||
return () => {
|
return () => {
|
||||||
|
// 在处理器内部检查QuickAssistant状态,而不是在注册时检查
|
||||||
|
const quickAssistantEnabled = configManager.getEnableQuickAssistant()
|
||||||
|
logger.info(`mini_window shortcut triggered, QuickAssistant enabled: ${quickAssistantEnabled}`)
|
||||||
|
|
||||||
|
if (!quickAssistantEnabled) {
|
||||||
|
logger.warn('QuickAssistant is disabled, ignoring mini_window shortcut trigger')
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
windowService.toggleMiniWindow()
|
windowService.toggleMiniWindow()
|
||||||
}
|
}
|
||||||
case 'selection_assistant_toggle':
|
case 'selection_assistant_toggle':
|
||||||
@ -190,11 +199,10 @@ export function registerShortcuts(window: BrowserWindow) {
|
|||||||
break
|
break
|
||||||
|
|
||||||
case 'mini_window':
|
case 'mini_window':
|
||||||
//available only when QuickAssistant enabled
|
// 移除注册时的条件检查,在处理器内部进行检查
|
||||||
if (!configManager.getEnableQuickAssistant()) {
|
logger.info(`Processing mini_window shortcut, enabled: ${shortcut.enabled}`)
|
||||||
return
|
|
||||||
}
|
|
||||||
showMiniWindowAccelerator = formatShortcutKey(shortcut.shortcut)
|
showMiniWindowAccelerator = formatShortcutKey(shortcut.shortcut)
|
||||||
|
logger.debug(`Mini window accelerator set to: ${showMiniWindowAccelerator}`)
|
||||||
break
|
break
|
||||||
|
|
||||||
case 'selection_assistant_toggle':
|
case 'selection_assistant_toggle':
|
||||||
|
|||||||
@ -1,5 +1,6 @@
|
|||||||
import { IpcChannel } from '@shared/IpcChannel'
|
import { IpcChannel } from '@shared/IpcChannel'
|
||||||
import { app, session, shell, webContents } from 'electron'
|
import { app, dialog, session, shell, webContents } from 'electron'
|
||||||
|
import { promises as fs } from 'fs'
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* init the useragent of the webview session
|
* init the useragent of the webview session
|
||||||
@ -53,11 +54,17 @@ const attachKeyboardHandler = (contents: Electron.WebContents) => {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
const isFindShortcut = (input.control || input.meta) && key === 'f'
|
// Helper to check if this is a shortcut we handle
|
||||||
const isEscape = key === 'escape'
|
const isHandledShortcut = (k: string) => {
|
||||||
const isEnter = key === 'enter'
|
const isFindShortcut = (input.control || input.meta) && k === 'f'
|
||||||
|
const isPrintShortcut = (input.control || input.meta) && k === 'p'
|
||||||
|
const isSaveShortcut = (input.control || input.meta) && k === 's'
|
||||||
|
const isEscape = k === 'escape'
|
||||||
|
const isEnter = k === 'enter'
|
||||||
|
return isFindShortcut || isPrintShortcut || isSaveShortcut || isEscape || isEnter
|
||||||
|
}
|
||||||
|
|
||||||
if (!isFindShortcut && !isEscape && !isEnter) {
|
if (!isHandledShortcut(key)) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -66,11 +73,20 @@ const attachKeyboardHandler = (contents: Electron.WebContents) => {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const isFindShortcut = (input.control || input.meta) && key === 'f'
|
||||||
|
const isPrintShortcut = (input.control || input.meta) && key === 'p'
|
||||||
|
const isSaveShortcut = (input.control || input.meta) && key === 's'
|
||||||
|
|
||||||
// Always prevent Cmd/Ctrl+F to override the guest page's native find dialog
|
// Always prevent Cmd/Ctrl+F to override the guest page's native find dialog
|
||||||
if (isFindShortcut) {
|
if (isFindShortcut) {
|
||||||
event.preventDefault()
|
event.preventDefault()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Prevent default print/save dialogs and handle them with custom logic
|
||||||
|
if (isPrintShortcut || isSaveShortcut) {
|
||||||
|
event.preventDefault()
|
||||||
|
}
|
||||||
|
|
||||||
// Send the hotkey event to the renderer
|
// Send the hotkey event to the renderer
|
||||||
// The renderer will decide whether to preventDefault for Escape and Enter
|
// The renderer will decide whether to preventDefault for Escape and Enter
|
||||||
// based on whether the search bar is visible
|
// based on whether the search bar is visible
|
||||||
@ -100,3 +116,130 @@ export function initWebviewHotkeys() {
|
|||||||
attachKeyboardHandler(contents)
|
attachKeyboardHandler(contents)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Print webview content to PDF
|
||||||
|
* @param webviewId The webview webContents id
|
||||||
|
* @returns Path to saved PDF file or null if user cancelled
|
||||||
|
*/
|
||||||
|
export async function printWebviewToPDF(webviewId: number): Promise<string | null> {
|
||||||
|
const webview = webContents.fromId(webviewId)
|
||||||
|
if (!webview) {
|
||||||
|
throw new Error('Webview not found')
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Get the page title for default filename
|
||||||
|
const pageTitle = await webview.executeJavaScript('document.title || "webpage"').catch(() => 'webpage')
|
||||||
|
// Sanitize filename by removing invalid characters
|
||||||
|
const sanitizedTitle = pageTitle.replace(/[<>:"/\\|?*]/g, '-').substring(0, 100)
|
||||||
|
const defaultFilename = sanitizedTitle ? `${sanitizedTitle}.pdf` : `webpage-${Date.now()}.pdf`
|
||||||
|
|
||||||
|
// Show save dialog
|
||||||
|
const { canceled, filePath } = await dialog.showSaveDialog({
|
||||||
|
title: 'Save as PDF',
|
||||||
|
defaultPath: defaultFilename,
|
||||||
|
filters: [{ name: 'PDF Files', extensions: ['pdf'] }]
|
||||||
|
})
|
||||||
|
|
||||||
|
if (canceled || !filePath) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate PDF with settings to capture full page
|
||||||
|
const pdfData = await webview.printToPDF({
|
||||||
|
margins: {
|
||||||
|
marginType: 'default'
|
||||||
|
},
|
||||||
|
printBackground: true,
|
||||||
|
landscape: false,
|
||||||
|
pageSize: 'A4',
|
||||||
|
preferCSSPageSize: true
|
||||||
|
})
|
||||||
|
|
||||||
|
// Save PDF to file
|
||||||
|
await fs.writeFile(filePath, pdfData)
|
||||||
|
|
||||||
|
return filePath
|
||||||
|
} catch (error) {
|
||||||
|
throw new Error(`Failed to print to PDF: ${(error as Error).message}`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Save webview content as HTML
|
||||||
|
* @param webviewId The webview webContents id
|
||||||
|
* @returns Path to saved HTML file or null if user cancelled
|
||||||
|
*/
|
||||||
|
export async function saveWebviewAsHTML(webviewId: number): Promise<string | null> {
|
||||||
|
const webview = webContents.fromId(webviewId)
|
||||||
|
if (!webview) {
|
||||||
|
throw new Error('Webview not found')
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Get the page title for default filename
|
||||||
|
const pageTitle = await webview.executeJavaScript('document.title || "webpage"').catch(() => 'webpage')
|
||||||
|
// Sanitize filename by removing invalid characters
|
||||||
|
const sanitizedTitle = pageTitle.replace(/[<>:"/\\|?*]/g, '-').substring(0, 100)
|
||||||
|
const defaultFilename = sanitizedTitle ? `${sanitizedTitle}.html` : `webpage-${Date.now()}.html`
|
||||||
|
|
||||||
|
// Show save dialog
|
||||||
|
const { canceled, filePath } = await dialog.showSaveDialog({
|
||||||
|
title: 'Save as HTML',
|
||||||
|
defaultPath: defaultFilename,
|
||||||
|
filters: [
|
||||||
|
{ name: 'HTML Files', extensions: ['html', 'htm'] },
|
||||||
|
{ name: 'All Files', extensions: ['*'] }
|
||||||
|
]
|
||||||
|
})
|
||||||
|
|
||||||
|
if (canceled || !filePath) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the HTML content with safe error handling
|
||||||
|
const html = await webview.executeJavaScript(`
|
||||||
|
(() => {
|
||||||
|
try {
|
||||||
|
// Build complete DOCTYPE string if present
|
||||||
|
let doctype = '';
|
||||||
|
if (document.doctype) {
|
||||||
|
const dt = document.doctype;
|
||||||
|
doctype = '<!DOCTYPE ' + (dt.name || 'html');
|
||||||
|
|
||||||
|
// Add PUBLIC identifier if publicId is present
|
||||||
|
if (dt.publicId) {
|
||||||
|
// Escape single quotes in publicId
|
||||||
|
const escapedPublicId = String(dt.publicId).replace(/'/g, "\\'");
|
||||||
|
doctype += " PUBLIC '" + escapedPublicId + "'";
|
||||||
|
|
||||||
|
// Add systemId if present (required when publicId is present)
|
||||||
|
if (dt.systemId) {
|
||||||
|
const escapedSystemId = String(dt.systemId).replace(/'/g, "\\'");
|
||||||
|
doctype += " '" + escapedSystemId + "'";
|
||||||
|
}
|
||||||
|
} else if (dt.systemId) {
|
||||||
|
// SYSTEM identifier (without PUBLIC)
|
||||||
|
const escapedSystemId = String(dt.systemId).replace(/'/g, "\\'");
|
||||||
|
doctype += " SYSTEM '" + escapedSystemId + "'";
|
||||||
|
}
|
||||||
|
|
||||||
|
doctype += '>';
|
||||||
|
}
|
||||||
|
return doctype + (document.documentElement?.outerHTML || '');
|
||||||
|
} catch (error) {
|
||||||
|
// Fallback: just return the HTML without DOCTYPE if there's an error
|
||||||
|
return document.documentElement?.outerHTML || '';
|
||||||
|
}
|
||||||
|
})()
|
||||||
|
`)
|
||||||
|
|
||||||
|
// Save HTML to file
|
||||||
|
await fs.writeFile(filePath, html, 'utf-8')
|
||||||
|
|
||||||
|
return filePath
|
||||||
|
} catch (error) {
|
||||||
|
throw new Error(`Failed to save as HTML: ${(error as Error).message}`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -271,9 +271,9 @@ export class WindowService {
|
|||||||
'https://account.siliconflow.cn/oauth',
|
'https://account.siliconflow.cn/oauth',
|
||||||
'https://cloud.siliconflow.cn/bills',
|
'https://cloud.siliconflow.cn/bills',
|
||||||
'https://cloud.siliconflow.cn/expensebill',
|
'https://cloud.siliconflow.cn/expensebill',
|
||||||
'https://aihubmix.com/token',
|
'https://console.aihubmix.com/token',
|
||||||
'https://aihubmix.com/topup',
|
'https://console.aihubmix.com/topup',
|
||||||
'https://aihubmix.com/statistics',
|
'https://console.aihubmix.com/statistics',
|
||||||
'https://dash.302.ai/sso/login',
|
'https://dash.302.ai/sso/login',
|
||||||
'https://dash.302.ai/charge',
|
'https://dash.302.ai/charge',
|
||||||
'https://www.aiionly.com/login'
|
'https://www.aiionly.com/login'
|
||||||
|
|||||||
29
src/main/services/__tests__/ServerLogBuffer.test.ts
Normal file
29
src/main/services/__tests__/ServerLogBuffer.test.ts
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
import { describe, expect, it } from 'vitest'
|
||||||
|
|
||||||
|
import { ServerLogBuffer } from '../mcp/ServerLogBuffer'
|
||||||
|
|
||||||
|
describe('ServerLogBuffer', () => {
|
||||||
|
it('keeps a bounded number of entries per server', () => {
|
||||||
|
const buffer = new ServerLogBuffer(3)
|
||||||
|
const key = 'srv'
|
||||||
|
|
||||||
|
buffer.append(key, { timestamp: 1, level: 'info', message: 'a' })
|
||||||
|
buffer.append(key, { timestamp: 2, level: 'info', message: 'b' })
|
||||||
|
buffer.append(key, { timestamp: 3, level: 'info', message: 'c' })
|
||||||
|
buffer.append(key, { timestamp: 4, level: 'info', message: 'd' })
|
||||||
|
|
||||||
|
const logs = buffer.get(key)
|
||||||
|
expect(logs).toHaveLength(3)
|
||||||
|
expect(logs[0].message).toBe('b')
|
||||||
|
expect(logs[2].message).toBe('d')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('isolates entries by server key', () => {
|
||||||
|
const buffer = new ServerLogBuffer(5)
|
||||||
|
buffer.append('one', { timestamp: 1, level: 'info', message: 'a' })
|
||||||
|
buffer.append('two', { timestamp: 2, level: 'info', message: 'b' })
|
||||||
|
|
||||||
|
expect(buffer.get('one')).toHaveLength(1)
|
||||||
|
expect(buffer.get('two')).toHaveLength(1)
|
||||||
|
})
|
||||||
|
})
|
||||||
@ -78,7 +78,7 @@ export abstract class BaseService {
|
|||||||
* Get database instance
|
* Get database instance
|
||||||
* Automatically waits for initialization to complete
|
* Automatically waits for initialization to complete
|
||||||
*/
|
*/
|
||||||
protected async getDatabase() {
|
public async getDatabase() {
|
||||||
const dbManager = await DatabaseManager.getInstance()
|
const dbManager = await DatabaseManager.getInstance()
|
||||||
return dbManager.getDatabase()
|
return dbManager.getDatabase()
|
||||||
}
|
}
|
||||||
|
|||||||
@ -15,6 +15,8 @@ import { query } from '@anthropic-ai/claude-agent-sdk'
|
|||||||
import { loggerService } from '@logger'
|
import { loggerService } from '@logger'
|
||||||
import { config as apiConfigService } from '@main/apiServer/config'
|
import { config as apiConfigService } from '@main/apiServer/config'
|
||||||
import { validateModelId } from '@main/apiServer/utils'
|
import { validateModelId } from '@main/apiServer/utils'
|
||||||
|
import { isWin } from '@main/constant'
|
||||||
|
import { autoDiscoverGitBash } from '@main/utils/process'
|
||||||
import getLoginShellEnvironment from '@main/utils/shell-env'
|
import getLoginShellEnvironment from '@main/utils/shell-env'
|
||||||
import { app } from 'electron'
|
import { app } from 'electron'
|
||||||
|
|
||||||
@ -107,6 +109,9 @@ class ClaudeCodeService implements AgentServiceInterface {
|
|||||||
Object.entries(loginShellEnv).filter(([key]) => !key.toLowerCase().endsWith('_proxy'))
|
Object.entries(loginShellEnv).filter(([key]) => !key.toLowerCase().endsWith('_proxy'))
|
||||||
) as Record<string, string>
|
) as Record<string, string>
|
||||||
|
|
||||||
|
// Auto-discover Git Bash path on Windows (already logs internally)
|
||||||
|
const customGitBashPath = isWin ? autoDiscoverGitBash() : null
|
||||||
|
|
||||||
const env = {
|
const env = {
|
||||||
...loginShellEnvWithoutProxies,
|
...loginShellEnvWithoutProxies,
|
||||||
// TODO: fix the proxy api server
|
// TODO: fix the proxy api server
|
||||||
@ -126,7 +131,8 @@ class ClaudeCodeService implements AgentServiceInterface {
|
|||||||
// Set CLAUDE_CONFIG_DIR to app's userData directory to avoid path encoding issues
|
// Set CLAUDE_CONFIG_DIR to app's userData directory to avoid path encoding issues
|
||||||
// on Windows when the username contains non-ASCII characters (e.g., Chinese characters)
|
// on Windows when the username contains non-ASCII characters (e.g., Chinese characters)
|
||||||
// This prevents the SDK from using the user's home directory which may have encoding problems
|
// This prevents the SDK from using the user's home directory which may have encoding problems
|
||||||
CLAUDE_CONFIG_DIR: path.join(app.getPath('userData'), '.claude')
|
CLAUDE_CONFIG_DIR: path.join(app.getPath('userData'), '.claude'),
|
||||||
|
...(customGitBashPath ? { CLAUDE_CODE_GIT_BASH_PATH: customGitBashPath } : {})
|
||||||
}
|
}
|
||||||
|
|
||||||
const errorChunks: string[] = []
|
const errorChunks: string[] = []
|
||||||
|
|||||||
36
src/main/services/mcp/ServerLogBuffer.ts
Normal file
36
src/main/services/mcp/ServerLogBuffer.ts
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
export type MCPServerLogEntry = {
|
||||||
|
timestamp: number
|
||||||
|
level: 'debug' | 'info' | 'warn' | 'error' | 'stderr' | 'stdout'
|
||||||
|
message: string
|
||||||
|
data?: any
|
||||||
|
source?: string
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Lightweight ring buffer for per-server MCP logs.
|
||||||
|
*/
|
||||||
|
export class ServerLogBuffer {
|
||||||
|
private maxEntries: number
|
||||||
|
private logs: Map<string, MCPServerLogEntry[]> = new Map()
|
||||||
|
|
||||||
|
constructor(maxEntries = 200) {
|
||||||
|
this.maxEntries = maxEntries
|
||||||
|
}
|
||||||
|
|
||||||
|
append(serverKey: string, entry: MCPServerLogEntry) {
|
||||||
|
const list = this.logs.get(serverKey) ?? []
|
||||||
|
list.push(entry)
|
||||||
|
if (list.length > this.maxEntries) {
|
||||||
|
list.splice(0, list.length - this.maxEntries)
|
||||||
|
}
|
||||||
|
this.logs.set(serverKey, list)
|
||||||
|
}
|
||||||
|
|
||||||
|
get(serverKey: string): MCPServerLogEntry[] {
|
||||||
|
return [...(this.logs.get(serverKey) ?? [])]
|
||||||
|
}
|
||||||
|
|
||||||
|
remove(serverKey: string) {
|
||||||
|
this.logs.delete(serverKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -128,8 +128,8 @@ export class CallBackServer {
|
|||||||
})
|
})
|
||||||
|
|
||||||
return new Promise<http.Server>((resolve, reject) => {
|
return new Promise<http.Server>((resolve, reject) => {
|
||||||
server.listen(port, () => {
|
server.listen(port, '127.0.0.1', () => {
|
||||||
logger.info(`OAuth callback server listening on port ${port}`)
|
logger.info(`OAuth callback server listening on 127.0.0.1:${port}`)
|
||||||
resolve(server)
|
resolve(server)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|||||||
990
src/main/utils/__tests__/process.test.ts
Normal file
990
src/main/utils/__tests__/process.test.ts
Normal file
@ -0,0 +1,990 @@
|
|||||||
|
import { configManager } from '@main/services/ConfigManager'
|
||||||
|
import { execFileSync } from 'child_process'
|
||||||
|
import fs from 'fs'
|
||||||
|
import path from 'path'
|
||||||
|
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||||
|
|
||||||
|
import { autoDiscoverGitBash, findExecutable, findGitBash, validateGitBashPath } from '../process'
|
||||||
|
|
||||||
|
// Mock configManager
|
||||||
|
vi.mock('@main/services/ConfigManager', () => ({
|
||||||
|
ConfigKeys: {
|
||||||
|
GitBashPath: 'gitBashPath'
|
||||||
|
},
|
||||||
|
configManager: {
|
||||||
|
get: vi.fn(),
|
||||||
|
set: vi.fn()
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
|
||||||
|
// Mock dependencies
|
||||||
|
vi.mock('child_process')
|
||||||
|
vi.mock('fs')
|
||||||
|
vi.mock('path')
|
||||||
|
|
||||||
|
// These tests only run on Windows since the functions have platform guards
|
||||||
|
describe.skipIf(process.platform !== 'win32')('process utilities', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.clearAllMocks()
|
||||||
|
|
||||||
|
// Mock path.join to concatenate paths with backslashes (Windows-style)
|
||||||
|
vi.mocked(path.join).mockImplementation((...args) => args.join('\\'))
|
||||||
|
|
||||||
|
// Mock path.resolve to handle path resolution with .. support
|
||||||
|
vi.mocked(path.resolve).mockImplementation((...args) => {
|
||||||
|
let result = args.join('\\')
|
||||||
|
|
||||||
|
// Handle .. navigation
|
||||||
|
while (result.includes('\\..')) {
|
||||||
|
result = result.replace(/\\[^\\]+\\\.\./g, '')
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure absolute path
|
||||||
|
if (!result.match(/^[A-Z]:/)) {
|
||||||
|
result = `C:\\cwd\\${result}`
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
})
|
||||||
|
|
||||||
|
// Mock path.dirname
|
||||||
|
vi.mocked(path.dirname).mockImplementation((p) => {
|
||||||
|
const parts = p.split('\\')
|
||||||
|
parts.pop()
|
||||||
|
return parts.join('\\')
|
||||||
|
})
|
||||||
|
|
||||||
|
// Mock path.sep
|
||||||
|
Object.defineProperty(path, 'sep', { value: '\\', writable: true })
|
||||||
|
|
||||||
|
// Mock process.cwd()
|
||||||
|
vi.spyOn(process, 'cwd').mockReturnValue('C:\\cwd')
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('findExecutable', () => {
|
||||||
|
describe('git common paths', () => {
|
||||||
|
it('should find git at Program Files path', () => {
|
||||||
|
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||||
|
process.env.ProgramFiles = 'C:\\Program Files'
|
||||||
|
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => p === gitPath)
|
||||||
|
|
||||||
|
const result = findExecutable('git')
|
||||||
|
|
||||||
|
expect(result).toBe(gitPath)
|
||||||
|
expect(fs.existsSync).toHaveBeenCalledWith(gitPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should find git at Program Files (x86) path', () => {
|
||||||
|
const gitPath = 'C:\\Program Files (x86)\\Git\\cmd\\git.exe'
|
||||||
|
process.env['ProgramFiles(x86)'] = 'C:\\Program Files (x86)'
|
||||||
|
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => p === gitPath)
|
||||||
|
|
||||||
|
const result = findExecutable('git')
|
||||||
|
|
||||||
|
expect(result).toBe(gitPath)
|
||||||
|
expect(fs.existsSync).toHaveBeenCalledWith(gitPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should use fallback paths when environment variables are not set', () => {
|
||||||
|
delete process.env.ProgramFiles
|
||||||
|
delete process.env['ProgramFiles(x86)']
|
||||||
|
|
||||||
|
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => p === gitPath)
|
||||||
|
|
||||||
|
const result = findExecutable('git')
|
||||||
|
|
||||||
|
expect(result).toBe(gitPath)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('where.exe PATH lookup', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
Object.defineProperty(process, 'platform', { value: 'win32', writable: true })
|
||||||
|
// Common paths don't exist
|
||||||
|
vi.mocked(fs.existsSync).mockReturnValue(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should find executable via where.exe', () => {
|
||||||
|
const gitPath = 'C:\\Git\\bin\\git.exe'
|
||||||
|
|
||||||
|
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||||
|
|
||||||
|
const result = findExecutable('git')
|
||||||
|
|
||||||
|
expect(result).toBe(gitPath)
|
||||||
|
expect(execFileSync).toHaveBeenCalledWith('where.exe', ['git.exe'], {
|
||||||
|
encoding: 'utf8',
|
||||||
|
stdio: ['pipe', 'pipe', 'pipe']
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should add .exe extension when calling where.exe', () => {
|
||||||
|
vi.mocked(execFileSync).mockImplementation(() => {
|
||||||
|
throw new Error('Not found')
|
||||||
|
})
|
||||||
|
|
||||||
|
findExecutable('node')
|
||||||
|
|
||||||
|
expect(execFileSync).toHaveBeenCalledWith('where.exe', ['node.exe'], expect.any(Object))
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should handle Windows line endings (CRLF)', () => {
|
||||||
|
const gitPath1 = 'C:\\Git\\bin\\git.exe'
|
||||||
|
const gitPath2 = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||||
|
|
||||||
|
vi.mocked(execFileSync).mockReturnValue(`${gitPath1}\r\n${gitPath2}\r\n`)
|
||||||
|
|
||||||
|
const result = findExecutable('git')
|
||||||
|
|
||||||
|
// Should return the first valid path
|
||||||
|
expect(result).toBe(gitPath1)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should handle Unix line endings (LF)', () => {
|
||||||
|
const gitPath1 = 'C:\\Git\\bin\\git.exe'
|
||||||
|
const gitPath2 = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||||
|
|
||||||
|
vi.mocked(execFileSync).mockReturnValue(`${gitPath1}\n${gitPath2}\n`)
|
||||||
|
|
||||||
|
const result = findExecutable('git')
|
||||||
|
|
||||||
|
expect(result).toBe(gitPath1)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should handle mixed line endings', () => {
|
||||||
|
const gitPath1 = 'C:\\Git\\bin\\git.exe'
|
||||||
|
const gitPath2 = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||||
|
|
||||||
|
vi.mocked(execFileSync).mockReturnValue(`${gitPath1}\r\n${gitPath2}\n`)
|
||||||
|
|
||||||
|
const result = findExecutable('git')
|
||||||
|
|
||||||
|
expect(result).toBe(gitPath1)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should trim whitespace from paths', () => {
|
||||||
|
const gitPath = 'C:\\Git\\bin\\git.exe'
|
||||||
|
|
||||||
|
vi.mocked(execFileSync).mockReturnValue(` ${gitPath} \n`)
|
||||||
|
|
||||||
|
const result = findExecutable('git')
|
||||||
|
|
||||||
|
expect(result).toBe(gitPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should filter empty lines', () => {
|
||||||
|
const gitPath = 'C:\\Git\\bin\\git.exe'
|
||||||
|
|
||||||
|
vi.mocked(execFileSync).mockReturnValue(`\n\n${gitPath}\n\n`)
|
||||||
|
|
||||||
|
const result = findExecutable('git')
|
||||||
|
|
||||||
|
expect(result).toBe(gitPath)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('security checks', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
Object.defineProperty(process, 'platform', { value: 'win32', writable: true })
|
||||||
|
vi.mocked(fs.existsSync).mockReturnValue(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should skip executables in current directory', () => {
|
||||||
|
const maliciousPath = 'C:\\cwd\\git.exe'
|
||||||
|
const safePath = 'C:\\Git\\bin\\git.exe'
|
||||||
|
|
||||||
|
vi.mocked(execFileSync).mockReturnValue(`${maliciousPath}\n${safePath}`)
|
||||||
|
|
||||||
|
vi.mocked(path.resolve).mockImplementation((p) => {
|
||||||
|
if (p.includes('cwd\\git.exe')) return 'c:\\cwd\\git.exe'
|
||||||
|
return 'c:\\git\\bin\\git.exe'
|
||||||
|
})
|
||||||
|
|
||||||
|
vi.mocked(path.dirname).mockImplementation((p) => {
|
||||||
|
if (p.includes('cwd\\git.exe')) return 'c:\\cwd'
|
||||||
|
return 'c:\\git\\bin'
|
||||||
|
})
|
||||||
|
|
||||||
|
const result = findExecutable('git')
|
||||||
|
|
||||||
|
// Should skip malicious path and return safe path
|
||||||
|
expect(result).toBe(safePath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should skip executables in current directory subdirectories', () => {
|
||||||
|
const maliciousPath = 'C:\\cwd\\subdir\\git.exe'
|
||||||
|
const safePath = 'C:\\Git\\bin\\git.exe'
|
||||||
|
|
||||||
|
vi.mocked(execFileSync).mockReturnValue(`${maliciousPath}\n${safePath}`)
|
||||||
|
|
||||||
|
vi.mocked(path.resolve).mockImplementation((p) => {
|
||||||
|
if (p.includes('cwd\\subdir')) return 'c:\\cwd\\subdir\\git.exe'
|
||||||
|
return 'c:\\git\\bin\\git.exe'
|
||||||
|
})
|
||||||
|
|
||||||
|
vi.mocked(path.dirname).mockImplementation((p) => {
|
||||||
|
if (p.includes('cwd\\subdir')) return 'c:\\cwd\\subdir'
|
||||||
|
return 'c:\\git\\bin'
|
||||||
|
})
|
||||||
|
|
||||||
|
const result = findExecutable('git')
|
||||||
|
|
||||||
|
expect(result).toBe(safePath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return null when only malicious executables are found', () => {
|
||||||
|
const maliciousPath = 'C:\\cwd\\git.exe'
|
||||||
|
|
||||||
|
vi.mocked(execFileSync).mockReturnValue(maliciousPath)
|
||||||
|
|
||||||
|
vi.mocked(path.resolve).mockReturnValue('c:\\cwd\\git.exe')
|
||||||
|
vi.mocked(path.dirname).mockReturnValue('c:\\cwd')
|
||||||
|
|
||||||
|
const result = findExecutable('git')
|
||||||
|
|
||||||
|
expect(result).toBeNull()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('error handling', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
Object.defineProperty(process, 'platform', { value: 'win32', writable: true })
|
||||||
|
vi.mocked(fs.existsSync).mockReturnValue(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return null when where.exe fails', () => {
|
||||||
|
vi.mocked(execFileSync).mockImplementation(() => {
|
||||||
|
throw new Error('Command failed')
|
||||||
|
})
|
||||||
|
|
||||||
|
const result = findExecutable('nonexistent')
|
||||||
|
|
||||||
|
expect(result).toBeNull()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return null when where.exe returns empty output', () => {
|
||||||
|
vi.mocked(execFileSync).mockReturnValue('')
|
||||||
|
|
||||||
|
const result = findExecutable('git')
|
||||||
|
|
||||||
|
expect(result).toBeNull()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return null when where.exe returns only whitespace', () => {
|
||||||
|
vi.mocked(execFileSync).mockReturnValue(' \n\n ')
|
||||||
|
|
||||||
|
const result = findExecutable('git')
|
||||||
|
|
||||||
|
expect(result).toBeNull()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('non-git executables', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
Object.defineProperty(process, 'platform', { value: 'win32', writable: true })
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should skip common paths check for non-git executables', () => {
|
||||||
|
const nodePath = 'C:\\Program Files\\nodejs\\node.exe'
|
||||||
|
|
||||||
|
vi.mocked(execFileSync).mockReturnValue(nodePath)
|
||||||
|
|
||||||
|
const result = findExecutable('node')
|
||||||
|
|
||||||
|
expect(result).toBe(nodePath)
|
||||||
|
// Should not check common Git paths
|
||||||
|
expect(fs.existsSync).not.toHaveBeenCalledWith(expect.stringContaining('Git\\cmd\\node.exe'))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('validateGitBashPath', () => {
|
||||||
|
it('returns null when path is null', () => {
|
||||||
|
const result = validateGitBashPath(null)
|
||||||
|
|
||||||
|
expect(result).toBeNull()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns null when path is undefined', () => {
|
||||||
|
const result = validateGitBashPath(undefined)
|
||||||
|
|
||||||
|
expect(result).toBeNull()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns normalized path when valid bash.exe exists', () => {
|
||||||
|
const customPath = 'C:\\PortableGit\\bin\\bash.exe'
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => p === 'C:\\PortableGit\\bin\\bash.exe')
|
||||||
|
|
||||||
|
const result = validateGitBashPath(customPath)
|
||||||
|
|
||||||
|
expect(result).toBe('C:\\PortableGit\\bin\\bash.exe')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns null when file does not exist', () => {
|
||||||
|
vi.mocked(fs.existsSync).mockReturnValue(false)
|
||||||
|
|
||||||
|
const result = validateGitBashPath('C:\\missing\\bash.exe')
|
||||||
|
|
||||||
|
expect(result).toBeNull()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns null when path is not bash.exe', () => {
|
||||||
|
const customPath = 'C:\\PortableGit\\bin\\git.exe'
|
||||||
|
vi.mocked(fs.existsSync).mockReturnValue(true)
|
||||||
|
|
||||||
|
const result = validateGitBashPath(customPath)
|
||||||
|
|
||||||
|
expect(result).toBeNull()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('findGitBash', () => {
|
||||||
|
describe('customPath parameter', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
delete process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||||
|
})
|
||||||
|
|
||||||
|
it('uses customPath when valid', () => {
|
||||||
|
const customPath = 'C:\\CustomGit\\bin\\bash.exe'
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => p === customPath)
|
||||||
|
|
||||||
|
const result = findGitBash(customPath)
|
||||||
|
|
||||||
|
expect(result).toBe(customPath)
|
||||||
|
expect(execFileSync).not.toHaveBeenCalled()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('falls back when customPath is invalid', () => {
|
||||||
|
const customPath = 'C:\\Invalid\\bash.exe'
|
||||||
|
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||||
|
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||||
|
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||||
|
if (p === customPath) return false
|
||||||
|
if (p === gitPath) return true
|
||||||
|
if (p === bashPath) return true
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
|
||||||
|
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||||
|
|
||||||
|
const result = findGitBash(customPath)
|
||||||
|
|
||||||
|
expect(result).toBe(bashPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('prioritizes customPath over env override', () => {
|
||||||
|
const customPath = 'C:\\CustomGit\\bin\\bash.exe'
|
||||||
|
const envPath = 'C:\\EnvGit\\bin\\bash.exe'
|
||||||
|
process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath
|
||||||
|
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => p === customPath || p === envPath)
|
||||||
|
|
||||||
|
const result = findGitBash(customPath)
|
||||||
|
|
||||||
|
expect(result).toBe(customPath)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('env override', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
delete process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||||
|
})
|
||||||
|
|
||||||
|
it('uses CLAUDE_CODE_GIT_BASH_PATH when valid', () => {
|
||||||
|
const envPath = 'C:\\OverrideGit\\bin\\bash.exe'
|
||||||
|
process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath
|
||||||
|
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => p === envPath)
|
||||||
|
|
||||||
|
const result = findGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(envPath)
|
||||||
|
expect(execFileSync).not.toHaveBeenCalled()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('falls back when CLAUDE_CODE_GIT_BASH_PATH is invalid', () => {
|
||||||
|
const envPath = 'C:\\Invalid\\bash.exe'
|
||||||
|
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||||
|
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||||
|
|
||||||
|
process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath
|
||||||
|
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||||
|
if (p === envPath) return false
|
||||||
|
if (p === gitPath) return true
|
||||||
|
if (p === bashPath) return true
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
|
||||||
|
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||||
|
|
||||||
|
const result = findGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(bashPath)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('git.exe path derivation', () => {
|
||||||
|
it('should derive bash.exe from standard Git installation (Git/cmd/git.exe)', () => {
|
||||||
|
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||||
|
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||||
|
|
||||||
|
// findExecutable will find git at common path
|
||||||
|
process.env.ProgramFiles = 'C:\\Program Files'
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||||
|
return p === gitPath || p === bashPath
|
||||||
|
})
|
||||||
|
|
||||||
|
const result = findGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(bashPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should derive bash.exe from portable Git installation (Git/bin/git.exe)', () => {
|
||||||
|
const gitPath = 'C:\\PortableGit\\bin\\git.exe'
|
||||||
|
const bashPath = 'C:\\PortableGit\\bin\\bash.exe'
|
||||||
|
|
||||||
|
// Mock: common git paths don't exist, but where.exe finds portable git
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||||
|
const pathStr = p?.toString() || ''
|
||||||
|
// Common git paths don't exist
|
||||||
|
if (pathStr.includes('Program Files\\Git\\cmd\\git.exe')) return false
|
||||||
|
if (pathStr.includes('Program Files (x86)\\Git\\cmd\\git.exe')) return false
|
||||||
|
// Portable bash.exe exists at Git/bin/bash.exe (second path in possibleBashPaths)
|
||||||
|
if (pathStr === bashPath) return true
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
|
||||||
|
// where.exe returns portable git path
|
||||||
|
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||||
|
|
||||||
|
const result = findGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(bashPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should derive bash.exe from MSYS2 Git installation (Git/usr/bin/bash.exe)', () => {
|
||||||
|
const gitPath = 'C:\\msys64\\usr\\bin\\git.exe'
|
||||||
|
const bashPath = 'C:\\msys64\\usr\\bin\\bash.exe'
|
||||||
|
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||||
|
const pathStr = p?.toString() || ''
|
||||||
|
// Common git paths don't exist
|
||||||
|
if (pathStr.includes('Program Files\\Git\\cmd\\git.exe')) return false
|
||||||
|
if (pathStr.includes('Program Files (x86)\\Git\\cmd\\git.exe')) return false
|
||||||
|
// MSYS2 bash.exe exists at usr/bin/bash.exe (third path in possibleBashPaths)
|
||||||
|
if (pathStr === bashPath) return true
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
|
||||||
|
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||||
|
|
||||||
|
const result = findGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(bashPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should try multiple bash.exe locations in order', () => {
|
||||||
|
const gitPath = 'C:\\Git\\cmd\\git.exe'
|
||||||
|
const bashPath = 'C:\\Git\\bin\\bash.exe'
|
||||||
|
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||||
|
const pathStr = p?.toString() || ''
|
||||||
|
// Common git paths don't exist
|
||||||
|
if (pathStr.includes('Program Files\\Git\\cmd\\git.exe')) return false
|
||||||
|
if (pathStr.includes('Program Files (x86)\\Git\\cmd\\git.exe')) return false
|
||||||
|
// Standard path exists (first in possibleBashPaths)
|
||||||
|
if (pathStr === bashPath) return true
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
|
||||||
|
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||||
|
|
||||||
|
const result = findGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(bashPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should handle when git.exe is found but bash.exe is not at any derived location', () => {
|
||||||
|
const gitPath = 'C:\\Git\\cmd\\git.exe'
|
||||||
|
|
||||||
|
// git.exe exists via where.exe, but bash.exe doesn't exist at any derived location
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation(() => {
|
||||||
|
// Only return false for all bash.exe checks
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
|
||||||
|
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||||
|
|
||||||
|
const result = findGitBash()
|
||||||
|
|
||||||
|
// Should fall back to common paths check
|
||||||
|
expect(result).toBeNull()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('common paths fallback', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
// git.exe not found
|
||||||
|
vi.mocked(execFileSync).mockImplementation(() => {
|
||||||
|
throw new Error('Not found')
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should check Program Files path', () => {
|
||||||
|
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||||
|
process.env.ProgramFiles = 'C:\\Program Files'
|
||||||
|
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => p === bashPath)
|
||||||
|
|
||||||
|
const result = findGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(bashPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should check Program Files (x86) path', () => {
|
||||||
|
const bashPath = 'C:\\Program Files (x86)\\Git\\bin\\bash.exe'
|
||||||
|
process.env['ProgramFiles(x86)'] = 'C:\\Program Files (x86)'
|
||||||
|
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => p === bashPath)
|
||||||
|
|
||||||
|
const result = findGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(bashPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should check LOCALAPPDATA path', () => {
|
||||||
|
const bashPath = 'C:\\Users\\User\\AppData\\Local\\Programs\\Git\\bin\\bash.exe'
|
||||||
|
process.env.LOCALAPPDATA = 'C:\\Users\\User\\AppData\\Local'
|
||||||
|
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => p === bashPath)
|
||||||
|
|
||||||
|
const result = findGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(bashPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should skip LOCALAPPDATA check when environment variable is not set', () => {
|
||||||
|
delete process.env.LOCALAPPDATA
|
||||||
|
|
||||||
|
vi.mocked(fs.existsSync).mockReturnValue(false)
|
||||||
|
|
||||||
|
const result = findGitBash()
|
||||||
|
|
||||||
|
expect(result).toBeNull()
|
||||||
|
// Should not check invalid path with empty LOCALAPPDATA
|
||||||
|
expect(fs.existsSync).not.toHaveBeenCalledWith(expect.stringContaining('undefined'))
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should use fallback values when environment variables are not set', () => {
|
||||||
|
delete process.env.ProgramFiles
|
||||||
|
delete process.env['ProgramFiles(x86)']
|
||||||
|
|
||||||
|
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => p === bashPath)
|
||||||
|
|
||||||
|
const result = findGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(bashPath)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('priority order', () => {
|
||||||
|
it('should prioritize git.exe derivation over common paths', () => {
|
||||||
|
const gitPath = 'C:\\CustomPath\\Git\\cmd\\git.exe'
|
||||||
|
const derivedBashPath = 'C:\\CustomPath\\Git\\bin\\bash.exe'
|
||||||
|
const commonBashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||||
|
|
||||||
|
// Both exist
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||||
|
const pathStr = p?.toString() || ''
|
||||||
|
// Common git paths don't exist (so findExecutable uses where.exe)
|
||||||
|
if (pathStr.includes('Program Files\\Git\\cmd\\git.exe')) return false
|
||||||
|
if (pathStr.includes('Program Files (x86)\\Git\\cmd\\git.exe')) return false
|
||||||
|
// Both bash paths exist, but derived should be checked first
|
||||||
|
if (pathStr === derivedBashPath) return true
|
||||||
|
if (pathStr === commonBashPath) return true
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
|
||||||
|
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||||
|
|
||||||
|
const result = findGitBash()
|
||||||
|
|
||||||
|
// Should return derived path, not common path
|
||||||
|
expect(result).toBe(derivedBashPath)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('error scenarios', () => {
|
||||||
|
it('should return null when Git is not installed anywhere', () => {
|
||||||
|
vi.mocked(fs.existsSync).mockReturnValue(false)
|
||||||
|
vi.mocked(execFileSync).mockImplementation(() => {
|
||||||
|
throw new Error('Not found')
|
||||||
|
})
|
||||||
|
|
||||||
|
const result = findGitBash()
|
||||||
|
|
||||||
|
expect(result).toBeNull()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return null when git.exe exists but bash.exe does not', () => {
|
||||||
|
const gitPath = 'C:\\Git\\cmd\\git.exe'
|
||||||
|
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||||
|
// git.exe exists, but no bash.exe anywhere
|
||||||
|
return p === gitPath
|
||||||
|
})
|
||||||
|
|
||||||
|
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||||
|
|
||||||
|
const result = findGitBash()
|
||||||
|
|
||||||
|
expect(result).toBeNull()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('real-world scenarios', () => {
|
||||||
|
it('should handle official Git for Windows installer', () => {
|
||||||
|
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||||
|
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||||
|
|
||||||
|
process.env.ProgramFiles = 'C:\\Program Files'
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||||
|
return p === gitPath || p === bashPath
|
||||||
|
})
|
||||||
|
|
||||||
|
const result = findGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(bashPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should handle portable Git installation in custom directory', () => {
|
||||||
|
const gitPath = 'D:\\DevTools\\PortableGit\\bin\\git.exe'
|
||||||
|
const bashPath = 'D:\\DevTools\\PortableGit\\bin\\bash.exe'
|
||||||
|
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||||
|
const pathStr = p?.toString() || ''
|
||||||
|
// Common paths don't exist
|
||||||
|
if (pathStr.includes('Program Files\\Git\\cmd\\git.exe')) return false
|
||||||
|
if (pathStr.includes('Program Files (x86)\\Git\\cmd\\git.exe')) return false
|
||||||
|
// Portable Git paths exist (portable uses second path: Git/bin/bash.exe)
|
||||||
|
if (pathStr === bashPath) return true
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
|
||||||
|
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||||
|
|
||||||
|
const result = findGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(bashPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should handle Git installed via Scoop', () => {
|
||||||
|
// Scoop typically installs to %USERPROFILE%\scoop\apps\git\current
|
||||||
|
const gitPath = 'C:\\Users\\User\\scoop\\apps\\git\\current\\cmd\\git.exe'
|
||||||
|
const bashPath = 'C:\\Users\\User\\scoop\\apps\\git\\current\\bin\\bash.exe'
|
||||||
|
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||||
|
const pathStr = p?.toString() || ''
|
||||||
|
// Common paths don't exist
|
||||||
|
if (pathStr.includes('Program Files\\Git\\cmd\\git.exe')) return false
|
||||||
|
if (pathStr.includes('Program Files (x86)\\Git\\cmd\\git.exe')) return false
|
||||||
|
// Scoop bash path exists (standard structure: cmd -> bin)
|
||||||
|
if (pathStr === bashPath) return true
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
|
||||||
|
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||||
|
|
||||||
|
const result = findGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(bashPath)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('autoDiscoverGitBash', () => {
|
||||||
|
const originalEnvVar = process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.mocked(configManager.get).mockReset()
|
||||||
|
vi.mocked(configManager.set).mockReset()
|
||||||
|
delete process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||||
|
})
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
// Restore original environment variable
|
||||||
|
if (originalEnvVar !== undefined) {
|
||||||
|
process.env.CLAUDE_CODE_GIT_BASH_PATH = originalEnvVar
|
||||||
|
} else {
|
||||||
|
delete process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper to mock fs.existsSync with a set of valid paths
|
||||||
|
*/
|
||||||
|
const mockExistingPaths = (...validPaths: string[]) => {
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => validPaths.includes(p as string))
|
||||||
|
}
|
||||||
|
|
||||||
|
describe('with no existing config path', () => {
|
||||||
|
it('should discover and persist Git Bash path when not configured', () => {
|
||||||
|
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||||
|
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||||
|
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||||
|
process.env.ProgramFiles = 'C:\\Program Files'
|
||||||
|
mockExistingPaths(gitPath, bashPath)
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(bashPath)
|
||||||
|
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', bashPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return null and not persist when Git Bash is not found', () => {
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||||
|
vi.mocked(fs.existsSync).mockReturnValue(false)
|
||||||
|
vi.mocked(execFileSync).mockImplementation(() => {
|
||||||
|
throw new Error('Not found')
|
||||||
|
})
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
expect(result).toBeNull()
|
||||||
|
expect(configManager.set).not.toHaveBeenCalled()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('environment variable precedence', () => {
|
||||||
|
it('should use env var over valid config path', () => {
|
||||||
|
const envPath = 'C:\\EnvGit\\bin\\bash.exe'
|
||||||
|
const configPath = 'C:\\ConfigGit\\bin\\bash.exe'
|
||||||
|
|
||||||
|
process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(configPath)
|
||||||
|
mockExistingPaths(envPath, configPath)
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
// Env var should take precedence
|
||||||
|
expect(result).toBe(envPath)
|
||||||
|
// Should not persist env var path (it's a runtime override)
|
||||||
|
expect(configManager.set).not.toHaveBeenCalled()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should fall back to config path when env var is invalid', () => {
|
||||||
|
const envPath = 'C:\\Invalid\\bash.exe'
|
||||||
|
const configPath = 'C:\\ConfigGit\\bin\\bash.exe'
|
||||||
|
|
||||||
|
process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(configPath)
|
||||||
|
// Env path is invalid (doesn't exist), only config path exists
|
||||||
|
mockExistingPaths(configPath)
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
// Should fall back to config path
|
||||||
|
expect(result).toBe(configPath)
|
||||||
|
expect(configManager.set).not.toHaveBeenCalled()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should fall back to auto-discovery when both env var and config are invalid', () => {
|
||||||
|
const envPath = 'C:\\InvalidEnv\\bash.exe'
|
||||||
|
const configPath = 'C:\\InvalidConfig\\bash.exe'
|
||||||
|
const discoveredPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||||
|
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||||
|
|
||||||
|
process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath
|
||||||
|
process.env.ProgramFiles = 'C:\\Program Files'
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(configPath)
|
||||||
|
// Both env and config paths are invalid, only standard Git exists
|
||||||
|
mockExistingPaths(gitPath, discoveredPath)
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(discoveredPath)
|
||||||
|
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', discoveredPath)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('with valid existing config path', () => {
|
||||||
|
it('should validate and return existing path without re-discovering', () => {
|
||||||
|
const existingPath = 'C:\\CustomGit\\bin\\bash.exe'
|
||||||
|
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(existingPath)
|
||||||
|
mockExistingPaths(existingPath)
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(existingPath)
|
||||||
|
// Should not call findGitBash or persist again
|
||||||
|
expect(configManager.set).not.toHaveBeenCalled()
|
||||||
|
// Should not call execFileSync (which findGitBash would use for discovery)
|
||||||
|
expect(execFileSync).not.toHaveBeenCalled()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should not override existing valid config with auto-discovery', () => {
|
||||||
|
const existingPath = 'C:\\CustomGit\\bin\\bash.exe'
|
||||||
|
const discoveredPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||||
|
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(existingPath)
|
||||||
|
mockExistingPaths(existingPath, discoveredPath)
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(existingPath)
|
||||||
|
expect(configManager.set).not.toHaveBeenCalled()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('with invalid existing config path', () => {
|
||||||
|
it('should attempt auto-discovery when existing path does not exist', () => {
|
||||||
|
const existingPath = 'C:\\NonExistent\\bin\\bash.exe'
|
||||||
|
const discoveredPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||||
|
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||||
|
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(existingPath)
|
||||||
|
process.env.ProgramFiles = 'C:\\Program Files'
|
||||||
|
// Invalid path doesn't exist, but Git is installed at standard location
|
||||||
|
mockExistingPaths(gitPath, discoveredPath)
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
// Should discover and return the new path
|
||||||
|
expect(result).toBe(discoveredPath)
|
||||||
|
// Should persist the discovered path (overwrites invalid)
|
||||||
|
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', discoveredPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should attempt auto-discovery when existing path is not bash.exe', () => {
|
||||||
|
const existingPath = 'C:\\CustomGit\\bin\\git.exe'
|
||||||
|
const discoveredPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||||
|
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||||
|
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(existingPath)
|
||||||
|
process.env.ProgramFiles = 'C:\\Program Files'
|
||||||
|
// Invalid path exists but is not bash.exe (validation will fail)
|
||||||
|
// Git is installed at standard location
|
||||||
|
mockExistingPaths(existingPath, gitPath, discoveredPath)
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
// Should discover and return the new path
|
||||||
|
expect(result).toBe(discoveredPath)
|
||||||
|
// Should persist the discovered path (overwrites invalid)
|
||||||
|
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', discoveredPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return null when existing path is invalid and discovery fails', () => {
|
||||||
|
const existingPath = 'C:\\NonExistent\\bin\\bash.exe'
|
||||||
|
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(existingPath)
|
||||||
|
vi.mocked(fs.existsSync).mockReturnValue(false)
|
||||||
|
vi.mocked(execFileSync).mockImplementation(() => {
|
||||||
|
throw new Error('Not found')
|
||||||
|
})
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
// Both validation and discovery failed
|
||||||
|
expect(result).toBeNull()
|
||||||
|
// Should not persist when discovery fails
|
||||||
|
expect(configManager.set).not.toHaveBeenCalled()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('config persistence verification', () => {
|
||||||
|
it('should persist discovered path with correct config key', () => {
|
||||||
|
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||||
|
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||||
|
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||||
|
process.env.ProgramFiles = 'C:\\Program Files'
|
||||||
|
mockExistingPaths(gitPath, bashPath)
|
||||||
|
|
||||||
|
autoDiscoverGitBash()
|
||||||
|
|
||||||
|
// Verify the exact call to configManager.set
|
||||||
|
expect(configManager.set).toHaveBeenCalledTimes(1)
|
||||||
|
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', bashPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should persist on each discovery when config remains undefined', () => {
|
||||||
|
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||||
|
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||||
|
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||||
|
process.env.ProgramFiles = 'C:\\Program Files'
|
||||||
|
mockExistingPaths(gitPath, bashPath)
|
||||||
|
|
||||||
|
autoDiscoverGitBash()
|
||||||
|
autoDiscoverGitBash()
|
||||||
|
|
||||||
|
// Each call discovers and persists since config remains undefined (mocked)
|
||||||
|
expect(configManager.set).toHaveBeenCalledTimes(2)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('real-world scenarios', () => {
|
||||||
|
it('should discover and persist standard Git for Windows installation', () => {
|
||||||
|
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||||
|
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||||
|
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||||
|
process.env.ProgramFiles = 'C:\\Program Files'
|
||||||
|
mockExistingPaths(gitPath, bashPath)
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(bashPath)
|
||||||
|
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', bashPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should discover portable Git via where.exe and persist', () => {
|
||||||
|
const gitPath = 'D:\\PortableApps\\Git\\bin\\git.exe'
|
||||||
|
const bashPath = 'D:\\PortableApps\\Git\\bin\\bash.exe'
|
||||||
|
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||||
|
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||||
|
const pathStr = p?.toString() || ''
|
||||||
|
// Common git paths don't exist
|
||||||
|
if (pathStr.includes('Program Files\\Git\\cmd\\git.exe')) return false
|
||||||
|
if (pathStr.includes('Program Files (x86)\\Git\\cmd\\git.exe')) return false
|
||||||
|
// Portable bash path exists
|
||||||
|
if (pathStr === bashPath) return true
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
|
||||||
|
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(bashPath)
|
||||||
|
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', bashPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should respect user-configured path over auto-discovery', () => {
|
||||||
|
const userConfiguredPath = 'D:\\MyGit\\bin\\bash.exe'
|
||||||
|
const systemPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||||
|
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(userConfiguredPath)
|
||||||
|
mockExistingPaths(userConfiguredPath, systemPath)
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(userConfiguredPath)
|
||||||
|
expect(configManager.set).not.toHaveBeenCalled()
|
||||||
|
// Verify findGitBash was not called for discovery
|
||||||
|
expect(execFileSync).not.toHaveBeenCalled()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
@ -1,10 +1,13 @@
|
|||||||
import { loggerService } from '@logger'
|
import { loggerService } from '@logger'
|
||||||
|
import type { GitBashPathInfo, GitBashPathSource } from '@shared/config/constant'
|
||||||
import { HOME_CHERRY_DIR } from '@shared/config/constant'
|
import { HOME_CHERRY_DIR } from '@shared/config/constant'
|
||||||
import { spawn } from 'child_process'
|
import { execFileSync, spawn } from 'child_process'
|
||||||
import fs from 'fs'
|
import fs from 'fs'
|
||||||
import os from 'os'
|
import os from 'os'
|
||||||
import path from 'path'
|
import path from 'path'
|
||||||
|
|
||||||
|
import { isWin } from '../constant'
|
||||||
|
import { ConfigKeys, configManager } from '../services/ConfigManager'
|
||||||
import { getResourcePath } from '.'
|
import { getResourcePath } from '.'
|
||||||
|
|
||||||
const logger = loggerService.withContext('Utils:Process')
|
const logger = loggerService.withContext('Utils:Process')
|
||||||
@ -39,7 +42,7 @@ export function runInstallScript(scriptPath: string): Promise<void> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export async function getBinaryName(name: string): Promise<string> {
|
export async function getBinaryName(name: string): Promise<string> {
|
||||||
if (process.platform === 'win32') {
|
if (isWin) {
|
||||||
return `${name}.exe`
|
return `${name}.exe`
|
||||||
}
|
}
|
||||||
return name
|
return name
|
||||||
@ -58,5 +61,243 @@ export async function getBinaryPath(name?: string): Promise<string> {
|
|||||||
|
|
||||||
export async function isBinaryExists(name: string): Promise<boolean> {
|
export async function isBinaryExists(name: string): Promise<boolean> {
|
||||||
const cmd = await getBinaryPath(name)
|
const cmd = await getBinaryPath(name)
|
||||||
return await fs.existsSync(cmd)
|
return fs.existsSync(cmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Find executable in common paths or PATH environment variable
|
||||||
|
* Based on Claude Code's implementation with security checks
|
||||||
|
* @param name - Name of the executable to find (without .exe extension)
|
||||||
|
* @returns Full path to the executable or null if not found
|
||||||
|
*/
|
||||||
|
export function findExecutable(name: string): string | null {
|
||||||
|
// This implementation uses where.exe which is Windows-only
|
||||||
|
if (!isWin) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
// Special handling for git - check common installation paths first
|
||||||
|
if (name === 'git') {
|
||||||
|
const commonGitPaths = [
|
||||||
|
path.join(process.env.ProgramFiles || 'C:\\Program Files', 'Git', 'cmd', 'git.exe'),
|
||||||
|
path.join(process.env['ProgramFiles(x86)'] || 'C:\\Program Files (x86)', 'Git', 'cmd', 'git.exe')
|
||||||
|
]
|
||||||
|
|
||||||
|
for (const gitPath of commonGitPaths) {
|
||||||
|
if (fs.existsSync(gitPath)) {
|
||||||
|
logger.debug(`Found ${name} at common path`, { path: gitPath })
|
||||||
|
return gitPath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use where.exe to find executable in PATH
|
||||||
|
// Use execFileSync to prevent command injection
|
||||||
|
try {
|
||||||
|
// Add .exe extension for more precise matching on Windows
|
||||||
|
const executableName = `${name}.exe`
|
||||||
|
const result = execFileSync('where.exe', [executableName], {
|
||||||
|
encoding: 'utf8',
|
||||||
|
stdio: ['pipe', 'pipe', 'pipe']
|
||||||
|
})
|
||||||
|
|
||||||
|
// Handle both Windows (\r\n) and Unix (\n) line endings
|
||||||
|
const paths = result.trim().split(/\r?\n/).filter(Boolean)
|
||||||
|
const currentDir = process.cwd().toLowerCase()
|
||||||
|
|
||||||
|
// Security check: skip executables in current directory
|
||||||
|
for (const exePath of paths) {
|
||||||
|
// Trim whitespace from where.exe output
|
||||||
|
const cleanPath = exePath.trim()
|
||||||
|
const resolvedPath = path.resolve(cleanPath).toLowerCase()
|
||||||
|
const execDir = path.dirname(resolvedPath).toLowerCase()
|
||||||
|
|
||||||
|
// Skip if in current directory or subdirectory (potential malware)
|
||||||
|
if (execDir === currentDir || execDir.startsWith(currentDir + path.sep)) {
|
||||||
|
logger.warn('Skipping potentially malicious executable in current directory', {
|
||||||
|
path: cleanPath
|
||||||
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug(`Found ${name} via where.exe`, { path: cleanPath })
|
||||||
|
return cleanPath
|
||||||
|
}
|
||||||
|
|
||||||
|
return null
|
||||||
|
} catch (error) {
|
||||||
|
logger.debug(`where.exe ${name} failed`, { error })
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Find Git Bash executable on Windows
|
||||||
|
* @param customPath - Optional custom path from config
|
||||||
|
* @returns Full path to bash.exe or null if not found
|
||||||
|
*/
|
||||||
|
export function findGitBash(customPath?: string | null): string | null {
|
||||||
|
// Git Bash is Windows-only
|
||||||
|
if (!isWin) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1. Check custom path from config first
|
||||||
|
if (customPath) {
|
||||||
|
const validated = validateGitBashPath(customPath)
|
||||||
|
if (validated) {
|
||||||
|
logger.debug('Using custom Git Bash path from config', { path: validated })
|
||||||
|
return validated
|
||||||
|
}
|
||||||
|
logger.warn('Custom Git Bash path provided but invalid', { path: customPath })
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Check environment variable override
|
||||||
|
const envOverride = process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||||
|
if (envOverride) {
|
||||||
|
const validated = validateGitBashPath(envOverride)
|
||||||
|
if (validated) {
|
||||||
|
logger.debug('Using CLAUDE_CODE_GIT_BASH_PATH override for bash.exe', { path: validated })
|
||||||
|
return validated
|
||||||
|
}
|
||||||
|
logger.warn('CLAUDE_CODE_GIT_BASH_PATH provided but path is invalid', { path: envOverride })
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Find git.exe and derive bash.exe path
|
||||||
|
const gitPath = findExecutable('git')
|
||||||
|
if (gitPath) {
|
||||||
|
// Try multiple possible locations for bash.exe relative to git.exe
|
||||||
|
// Different Git installations have different directory structures
|
||||||
|
const possibleBashPaths = [
|
||||||
|
path.join(gitPath, '..', '..', 'bin', 'bash.exe'), // Standard Git: git.exe at Git/cmd/ -> navigate up 2 levels -> then bin/bash.exe
|
||||||
|
path.join(gitPath, '..', 'bash.exe'), // Portable Git: git.exe at Git/bin/ -> bash.exe in same directory
|
||||||
|
path.join(gitPath, '..', '..', 'usr', 'bin', 'bash.exe') // MSYS2 Git: git.exe at msys64/usr/bin/ -> navigate up 2 levels -> then usr/bin/bash.exe
|
||||||
|
]
|
||||||
|
|
||||||
|
for (const bashPath of possibleBashPaths) {
|
||||||
|
const resolvedBashPath = path.resolve(bashPath)
|
||||||
|
if (fs.existsSync(resolvedBashPath)) {
|
||||||
|
logger.debug('Found bash.exe via git.exe path derivation', { path: resolvedBashPath })
|
||||||
|
return resolvedBashPath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug('bash.exe not found at expected locations relative to git.exe', {
|
||||||
|
gitPath,
|
||||||
|
checkedPaths: possibleBashPaths.map((p) => path.resolve(p))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. Fallback: check common Git Bash paths directly
|
||||||
|
const commonBashPaths = [
|
||||||
|
path.join(process.env.ProgramFiles || 'C:\\Program Files', 'Git', 'bin', 'bash.exe'),
|
||||||
|
path.join(process.env['ProgramFiles(x86)'] || 'C:\\Program Files (x86)', 'Git', 'bin', 'bash.exe'),
|
||||||
|
...(process.env.LOCALAPPDATA ? [path.join(process.env.LOCALAPPDATA, 'Programs', 'Git', 'bin', 'bash.exe')] : [])
|
||||||
|
]
|
||||||
|
|
||||||
|
for (const bashPath of commonBashPaths) {
|
||||||
|
if (fs.existsSync(bashPath)) {
|
||||||
|
logger.debug('Found bash.exe at common path', { path: bashPath })
|
||||||
|
return bashPath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug('Git Bash not found - checked git derivation and common paths')
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
export function validateGitBashPath(customPath?: string | null): string | null {
|
||||||
|
if (!customPath) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
const resolved = path.resolve(customPath)
|
||||||
|
|
||||||
|
if (!fs.existsSync(resolved)) {
|
||||||
|
logger.warn('Custom Git Bash path does not exist', { path: resolved })
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
const isExe = resolved.toLowerCase().endsWith('bash.exe')
|
||||||
|
if (!isExe) {
|
||||||
|
logger.warn('Custom Git Bash path is not bash.exe', { path: resolved })
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug('Validated custom Git Bash path', { path: resolved })
|
||||||
|
return resolved
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Auto-discover and persist Git Bash path if not already configured
|
||||||
|
* Only called when Git Bash is actually needed
|
||||||
|
*
|
||||||
|
* Precedence order:
|
||||||
|
* 1. CLAUDE_CODE_GIT_BASH_PATH environment variable (highest - runtime override)
|
||||||
|
* 2. Configured path from settings (manual or auto)
|
||||||
|
* 3. Auto-discovery via findGitBash (only if no valid config exists)
|
||||||
|
*/
|
||||||
|
export function autoDiscoverGitBash(): string | null {
|
||||||
|
if (!isWin) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1. Check environment variable override first (highest priority)
|
||||||
|
const envOverride = process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||||
|
if (envOverride) {
|
||||||
|
const validated = validateGitBashPath(envOverride)
|
||||||
|
if (validated) {
|
||||||
|
logger.debug('Using CLAUDE_CODE_GIT_BASH_PATH override', { path: validated })
|
||||||
|
return validated
|
||||||
|
}
|
||||||
|
logger.warn('CLAUDE_CODE_GIT_BASH_PATH provided but path is invalid', { path: envOverride })
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Check if a path is already configured
|
||||||
|
const existingPath = configManager.get<string | undefined>(ConfigKeys.GitBashPath)
|
||||||
|
const existingSource = configManager.get<GitBashPathSource | undefined>(ConfigKeys.GitBashPathSource)
|
||||||
|
|
||||||
|
if (existingPath) {
|
||||||
|
const validated = validateGitBashPath(existingPath)
|
||||||
|
if (validated) {
|
||||||
|
return validated
|
||||||
|
}
|
||||||
|
// Existing path is invalid, try to auto-discover
|
||||||
|
logger.warn('Existing Git Bash path is invalid, attempting auto-discovery', {
|
||||||
|
path: existingPath,
|
||||||
|
source: existingSource
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Try to find Git Bash via auto-discovery
|
||||||
|
const discoveredPath = findGitBash()
|
||||||
|
if (discoveredPath) {
|
||||||
|
// Persist the discovered path with 'auto' source
|
||||||
|
configManager.set(ConfigKeys.GitBashPath, discoveredPath)
|
||||||
|
configManager.set(ConfigKeys.GitBashPathSource, 'auto')
|
||||||
|
logger.info('Auto-discovered Git Bash path', { path: discoveredPath })
|
||||||
|
}
|
||||||
|
|
||||||
|
return discoveredPath
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get Git Bash path info including source
|
||||||
|
* If no path is configured, triggers auto-discovery first
|
||||||
|
*/
|
||||||
|
export function getGitBashPathInfo(): GitBashPathInfo {
|
||||||
|
if (!isWin) {
|
||||||
|
return { path: null, source: null }
|
||||||
|
}
|
||||||
|
|
||||||
|
let path = configManager.get<string | null>(ConfigKeys.GitBashPath) ?? null
|
||||||
|
let source = configManager.get<GitBashPathSource | null>(ConfigKeys.GitBashPathSource) ?? null
|
||||||
|
|
||||||
|
// If no path configured, trigger auto-discovery (handles upgrade from old versions)
|
||||||
|
if (!path) {
|
||||||
|
path = autoDiscoverGitBash()
|
||||||
|
source = path ? 'auto' : null
|
||||||
|
}
|
||||||
|
|
||||||
|
return { path, source }
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2,9 +2,10 @@ import type { PermissionUpdate } from '@anthropic-ai/claude-agent-sdk'
|
|||||||
import { electronAPI } from '@electron-toolkit/preload'
|
import { electronAPI } from '@electron-toolkit/preload'
|
||||||
import type { SpanEntity, TokenUsage } from '@mcp-trace/trace-core'
|
import type { SpanEntity, TokenUsage } from '@mcp-trace/trace-core'
|
||||||
import type { SpanContext } from '@opentelemetry/api'
|
import type { SpanContext } from '@opentelemetry/api'
|
||||||
import type { TerminalConfig, UpgradeChannel } from '@shared/config/constant'
|
import type { GitBashPathInfo, TerminalConfig, UpgradeChannel } from '@shared/config/constant'
|
||||||
import type { LogLevel, LogSourceWithContext } from '@shared/config/logger'
|
import type { LogLevel, LogSourceWithContext } from '@shared/config/logger'
|
||||||
import type { FileChangeEvent, WebviewKeyEvent } from '@shared/config/types'
|
import type { FileChangeEvent, WebviewKeyEvent } from '@shared/config/types'
|
||||||
|
import type { MCPServerLogEntry } from '@shared/config/types'
|
||||||
import { IpcChannel } from '@shared/IpcChannel'
|
import { IpcChannel } from '@shared/IpcChannel'
|
||||||
import type { Notification } from '@types'
|
import type { Notification } from '@types'
|
||||||
import type {
|
import type {
|
||||||
@ -123,7 +124,11 @@ const api = {
|
|||||||
getDeviceType: () => ipcRenderer.invoke(IpcChannel.System_GetDeviceType),
|
getDeviceType: () => ipcRenderer.invoke(IpcChannel.System_GetDeviceType),
|
||||||
getHostname: () => ipcRenderer.invoke(IpcChannel.System_GetHostname),
|
getHostname: () => ipcRenderer.invoke(IpcChannel.System_GetHostname),
|
||||||
getCpuName: () => ipcRenderer.invoke(IpcChannel.System_GetCpuName),
|
getCpuName: () => ipcRenderer.invoke(IpcChannel.System_GetCpuName),
|
||||||
checkGitBash: (): Promise<boolean> => ipcRenderer.invoke(IpcChannel.System_CheckGitBash)
|
checkGitBash: (): Promise<boolean> => ipcRenderer.invoke(IpcChannel.System_CheckGitBash),
|
||||||
|
getGitBashPath: (): Promise<string | null> => ipcRenderer.invoke(IpcChannel.System_GetGitBashPath),
|
||||||
|
getGitBashPathInfo: (): Promise<GitBashPathInfo> => ipcRenderer.invoke(IpcChannel.System_GetGitBashPathInfo),
|
||||||
|
setGitBashPath: (newPath: string | null): Promise<boolean> =>
|
||||||
|
ipcRenderer.invoke(IpcChannel.System_SetGitBashPath, newPath)
|
||||||
},
|
},
|
||||||
devTools: {
|
devTools: {
|
||||||
toggle: () => ipcRenderer.invoke(IpcChannel.System_ToggleDevTools)
|
toggle: () => ipcRenderer.invoke(IpcChannel.System_ToggleDevTools)
|
||||||
@ -221,6 +226,10 @@ const api = {
|
|||||||
startFileWatcher: (dirPath: string, config?: any) =>
|
startFileWatcher: (dirPath: string, config?: any) =>
|
||||||
ipcRenderer.invoke(IpcChannel.File_StartWatcher, dirPath, config),
|
ipcRenderer.invoke(IpcChannel.File_StartWatcher, dirPath, config),
|
||||||
stopFileWatcher: () => ipcRenderer.invoke(IpcChannel.File_StopWatcher),
|
stopFileWatcher: () => ipcRenderer.invoke(IpcChannel.File_StopWatcher),
|
||||||
|
pauseFileWatcher: () => ipcRenderer.invoke(IpcChannel.File_PauseWatcher),
|
||||||
|
resumeFileWatcher: () => ipcRenderer.invoke(IpcChannel.File_ResumeWatcher),
|
||||||
|
batchUploadMarkdown: (filePaths: string[], targetPath: string) =>
|
||||||
|
ipcRenderer.invoke(IpcChannel.File_BatchUploadMarkdown, filePaths, targetPath),
|
||||||
onFileChange: (callback: (data: FileChangeEvent) => void) => {
|
onFileChange: (callback: (data: FileChangeEvent) => void) => {
|
||||||
const listener = (_event: Electron.IpcRendererEvent, data: any) => {
|
const listener = (_event: Electron.IpcRendererEvent, data: any) => {
|
||||||
if (data && typeof data === 'object') {
|
if (data && typeof data === 'object') {
|
||||||
@ -368,7 +377,16 @@ const api = {
|
|||||||
},
|
},
|
||||||
abortTool: (callId: string) => ipcRenderer.invoke(IpcChannel.Mcp_AbortTool, callId),
|
abortTool: (callId: string) => ipcRenderer.invoke(IpcChannel.Mcp_AbortTool, callId),
|
||||||
getServerVersion: (server: MCPServer): Promise<string | null> =>
|
getServerVersion: (server: MCPServer): Promise<string | null> =>
|
||||||
ipcRenderer.invoke(IpcChannel.Mcp_GetServerVersion, server)
|
ipcRenderer.invoke(IpcChannel.Mcp_GetServerVersion, server),
|
||||||
|
getServerLogs: (server: MCPServer): Promise<MCPServerLogEntry[]> =>
|
||||||
|
ipcRenderer.invoke(IpcChannel.Mcp_GetServerLogs, server),
|
||||||
|
onServerLog: (callback: (log: MCPServerLogEntry & { serverId?: string }) => void) => {
|
||||||
|
const listener = (_event: Electron.IpcRendererEvent, log: MCPServerLogEntry & { serverId?: string }) => {
|
||||||
|
callback(log)
|
||||||
|
}
|
||||||
|
ipcRenderer.on(IpcChannel.Mcp_ServerLog, listener)
|
||||||
|
return () => ipcRenderer.off(IpcChannel.Mcp_ServerLog, listener)
|
||||||
|
}
|
||||||
},
|
},
|
||||||
python: {
|
python: {
|
||||||
execute: (script: string, context?: Record<string, any>, timeout?: number) =>
|
execute: (script: string, context?: Record<string, any>, timeout?: number) =>
|
||||||
@ -420,6 +438,8 @@ const api = {
|
|||||||
ipcRenderer.invoke(IpcChannel.Webview_SetOpenLinkExternal, webviewId, isExternal),
|
ipcRenderer.invoke(IpcChannel.Webview_SetOpenLinkExternal, webviewId, isExternal),
|
||||||
setSpellCheckEnabled: (webviewId: number, isEnable: boolean) =>
|
setSpellCheckEnabled: (webviewId: number, isEnable: boolean) =>
|
||||||
ipcRenderer.invoke(IpcChannel.Webview_SetSpellCheckEnabled, webviewId, isEnable),
|
ipcRenderer.invoke(IpcChannel.Webview_SetSpellCheckEnabled, webviewId, isEnable),
|
||||||
|
printToPDF: (webviewId: number) => ipcRenderer.invoke(IpcChannel.Webview_PrintToPDF, webviewId),
|
||||||
|
saveAsHTML: (webviewId: number) => ipcRenderer.invoke(IpcChannel.Webview_SaveAsHTML, webviewId),
|
||||||
onFindShortcut: (callback: (payload: WebviewKeyEvent) => void) => {
|
onFindShortcut: (callback: (payload: WebviewKeyEvent) => void) => {
|
||||||
const listener = (_event: Electron.IpcRendererEvent, payload: WebviewKeyEvent) => {
|
const listener = (_event: Electron.IpcRendererEvent, payload: WebviewKeyEvent) => {
|
||||||
callback(payload)
|
callback(payload)
|
||||||
@ -452,7 +472,10 @@ const api = {
|
|||||||
ipcRenderer.invoke(IpcChannel.Selection_ProcessAction, actionItem, isFullScreen),
|
ipcRenderer.invoke(IpcChannel.Selection_ProcessAction, actionItem, isFullScreen),
|
||||||
closeActionWindow: () => ipcRenderer.invoke(IpcChannel.Selection_ActionWindowClose),
|
closeActionWindow: () => ipcRenderer.invoke(IpcChannel.Selection_ActionWindowClose),
|
||||||
minimizeActionWindow: () => ipcRenderer.invoke(IpcChannel.Selection_ActionWindowMinimize),
|
minimizeActionWindow: () => ipcRenderer.invoke(IpcChannel.Selection_ActionWindowMinimize),
|
||||||
pinActionWindow: (isPinned: boolean) => ipcRenderer.invoke(IpcChannel.Selection_ActionWindowPin, isPinned)
|
pinActionWindow: (isPinned: boolean) => ipcRenderer.invoke(IpcChannel.Selection_ActionWindowPin, isPinned),
|
||||||
|
// [Windows only] Electron bug workaround - can be removed once https://github.com/electron/electron/issues/48554 is fixed
|
||||||
|
resizeActionWindow: (deltaX: number, deltaY: number, direction: string) =>
|
||||||
|
ipcRenderer.invoke(IpcChannel.Selection_ActionWindowResize, deltaX, deltaY, direction)
|
||||||
},
|
},
|
||||||
agentTools: {
|
agentTools: {
|
||||||
respondToPermission: (payload: {
|
respondToPermission: (payload: {
|
||||||
|
|||||||
@ -7,10 +7,10 @@
|
|||||||
* 2. 暂时保持接口兼容性
|
* 2. 暂时保持接口兼容性
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import type { GatewayLanguageModelEntry } from '@ai-sdk/gateway'
|
|
||||||
import { createExecutor } from '@cherrystudio/ai-core'
|
import { createExecutor } from '@cherrystudio/ai-core'
|
||||||
import { loggerService } from '@logger'
|
import { loggerService } from '@logger'
|
||||||
import { getEnableDeveloperMode } from '@renderer/hooks/useSettings'
|
import { getEnableDeveloperMode } from '@renderer/hooks/useSettings'
|
||||||
|
import { normalizeGatewayModels, normalizeSdkModels } from '@renderer/services/models/ModelAdapter'
|
||||||
import { addSpan, endSpan } from '@renderer/services/SpanManagerService'
|
import { addSpan, endSpan } from '@renderer/services/SpanManagerService'
|
||||||
import type { StartSpanParams } from '@renderer/trace/types/ModelSpanEntity'
|
import type { StartSpanParams } from '@renderer/trace/types/ModelSpanEntity'
|
||||||
import { type Assistant, type GenerateImageParams, type Model, type Provider, SystemProviderIds } from '@renderer/types'
|
import { type Assistant, type GenerateImageParams, type Model, type Provider, SystemProviderIds } from '@renderer/types'
|
||||||
@ -27,6 +27,7 @@ import { buildAiSdkMiddlewares } from './middleware/AiSdkMiddlewareBuilder'
|
|||||||
import { buildPlugins } from './plugins/PluginBuilder'
|
import { buildPlugins } from './plugins/PluginBuilder'
|
||||||
import { createAiSdkProvider } from './provider/factory'
|
import { createAiSdkProvider } from './provider/factory'
|
||||||
import {
|
import {
|
||||||
|
adaptProvider,
|
||||||
getActualProvider,
|
getActualProvider,
|
||||||
isModernSdkSupported,
|
isModernSdkSupported,
|
||||||
prepareSpecialProviderConfig,
|
prepareSpecialProviderConfig,
|
||||||
@ -64,12 +65,11 @@ export default class ModernAiProvider {
|
|||||||
* - URL will be automatically formatted via `formatProviderApiHost`, adding version suffixes like `/v1`
|
* - URL will be automatically formatted via `formatProviderApiHost`, adding version suffixes like `/v1`
|
||||||
*
|
*
|
||||||
* 2. When called with `(model, provider)`:
|
* 2. When called with `(model, provider)`:
|
||||||
* - **Directly uses the provided provider WITHOUT going through `getActualProvider`**
|
* - The provided provider will be adapted via `adaptProvider`
|
||||||
* - **URL will NOT be automatically formatted, `/v1` suffix will NOT be added**
|
* - URL formatting behavior depends on the adapted result
|
||||||
* - This is legacy behavior kept for backward compatibility
|
|
||||||
*
|
*
|
||||||
* 3. When called with `(provider)`:
|
* 3. When called with `(provider)`:
|
||||||
* - Directly uses the provider without requiring a model
|
* - The provider will be adapted via `adaptProvider`
|
||||||
* - Used for operations that don't need a model (e.g., fetchModels)
|
* - Used for operations that don't need a model (e.g., fetchModels)
|
||||||
*
|
*
|
||||||
* @example
|
* @example
|
||||||
@ -77,7 +77,7 @@ export default class ModernAiProvider {
|
|||||||
* // Recommended: Auto-format URL
|
* // Recommended: Auto-format URL
|
||||||
* const ai = new ModernAiProvider(model)
|
* const ai = new ModernAiProvider(model)
|
||||||
*
|
*
|
||||||
* // Not recommended: Skip URL formatting (only for special cases)
|
* // Provider will be adapted
|
||||||
* const ai = new ModernAiProvider(model, customProvider)
|
* const ai = new ModernAiProvider(model, customProvider)
|
||||||
*
|
*
|
||||||
* // For operations that don't need a model
|
* // For operations that don't need a model
|
||||||
@ -91,12 +91,14 @@ export default class ModernAiProvider {
|
|||||||
if (this.isModel(modelOrProvider)) {
|
if (this.isModel(modelOrProvider)) {
|
||||||
// 传入的是 Model
|
// 传入的是 Model
|
||||||
this.model = modelOrProvider
|
this.model = modelOrProvider
|
||||||
this.actualProvider = provider || getActualProvider(modelOrProvider)
|
this.actualProvider = provider
|
||||||
|
? adaptProvider({ provider, model: modelOrProvider })
|
||||||
|
: getActualProvider(modelOrProvider)
|
||||||
// 只保存配置,不预先创建executor
|
// 只保存配置,不预先创建executor
|
||||||
this.config = providerToAiSdkConfig(this.actualProvider, modelOrProvider)
|
this.config = providerToAiSdkConfig(this.actualProvider, modelOrProvider)
|
||||||
} else {
|
} else {
|
||||||
// 传入的是 Provider
|
// 传入的是 Provider
|
||||||
this.actualProvider = modelOrProvider
|
this.actualProvider = adaptProvider({ provider: modelOrProvider })
|
||||||
// model为可选,某些操作(如fetchModels)不需要model
|
// model为可选,某些操作(如fetchModels)不需要model
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -120,9 +122,12 @@ export default class ModernAiProvider {
|
|||||||
throw new Error('Model is required for completions. Please use constructor with model parameter.')
|
throw new Error('Model is required for completions. Please use constructor with model parameter.')
|
||||||
}
|
}
|
||||||
|
|
||||||
// 每次请求时重新生成配置以确保API key轮换生效
|
// Config is now set in constructor, ApiService handles key rotation before passing provider
|
||||||
this.config = providerToAiSdkConfig(this.actualProvider, this.model)
|
if (!this.config) {
|
||||||
logger.debug('Generated provider config for completions', this.config)
|
// If config wasn't set in constructor (when provider only), generate it now
|
||||||
|
this.config = providerToAiSdkConfig(this.actualProvider, this.model!)
|
||||||
|
}
|
||||||
|
logger.debug('Using provider config for completions', this.config)
|
||||||
|
|
||||||
// 检查 config 是否存在
|
// 检查 config 是否存在
|
||||||
if (!this.config) {
|
if (!this.config) {
|
||||||
@ -189,7 +194,7 @@ export default class ModernAiProvider {
|
|||||||
config: ModernAiProviderConfig
|
config: ModernAiProviderConfig
|
||||||
): Promise<CompletionsResult> {
|
): Promise<CompletionsResult> {
|
||||||
// ai-gateway不是image/generation 端点,所以就先不走legacy了
|
// ai-gateway不是image/generation 端点,所以就先不走legacy了
|
||||||
if (config.isImageGenerationEndpoint && this.getActualProvider().id !== SystemProviderIds['ai-gateway']) {
|
if (config.isImageGenerationEndpoint && this.getActualProvider().id !== SystemProviderIds.gateway) {
|
||||||
// 使用 legacy 实现处理图像生成(支持图片编辑等高级功能)
|
// 使用 legacy 实现处理图像生成(支持图片编辑等高级功能)
|
||||||
if (!config.uiMessages) {
|
if (!config.uiMessages) {
|
||||||
throw new Error('uiMessages is required for image generation endpoint')
|
throw new Error('uiMessages is required for image generation endpoint')
|
||||||
@ -480,19 +485,12 @@ export default class ModernAiProvider {
|
|||||||
|
|
||||||
// 代理其他方法到原有实现
|
// 代理其他方法到原有实现
|
||||||
public async models() {
|
public async models() {
|
||||||
if (this.actualProvider.id === SystemProviderIds['ai-gateway']) {
|
if (this.actualProvider.id === SystemProviderIds.gateway) {
|
||||||
const formatModel = function (models: GatewayLanguageModelEntry[]): Model[] {
|
const gatewayModels = (await gateway.getAvailableModels()).models
|
||||||
return models.map((m) => ({
|
return normalizeGatewayModels(this.actualProvider, gatewayModels)
|
||||||
id: m.id,
|
|
||||||
name: m.name,
|
|
||||||
provider: 'gateway',
|
|
||||||
group: m.id.split('/')[0],
|
|
||||||
description: m.description ?? undefined
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
return formatModel((await gateway.getAvailableModels()).models)
|
|
||||||
}
|
}
|
||||||
return this.legacyProvider.models()
|
const sdkModels = await this.legacyProvider.models()
|
||||||
|
return normalizeSdkModels(this.actualProvider, sdkModels)
|
||||||
}
|
}
|
||||||
|
|
||||||
public async getEmbeddingDimensions(model: Model): Promise<number> {
|
public async getEmbeddingDimensions(model: Model): Promise<number> {
|
||||||
|
|||||||
@ -2,13 +2,15 @@ import { loggerService } from '@logger'
|
|||||||
import {
|
import {
|
||||||
getModelSupportedVerbosity,
|
getModelSupportedVerbosity,
|
||||||
isFunctionCallingModel,
|
isFunctionCallingModel,
|
||||||
isNotSupportTemperatureAndTopP,
|
|
||||||
isOpenAIModel,
|
isOpenAIModel,
|
||||||
isSupportFlexServiceTierModel
|
isSupportFlexServiceTierModel,
|
||||||
|
isSupportTemperatureModel,
|
||||||
|
isSupportTopPModel
|
||||||
} from '@renderer/config/models'
|
} from '@renderer/config/models'
|
||||||
import { REFERENCE_PROMPT } from '@renderer/config/prompts'
|
import { REFERENCE_PROMPT } from '@renderer/config/prompts'
|
||||||
import { getLMStudioKeepAliveTime } from '@renderer/hooks/useLMStudio'
|
import { getLMStudioKeepAliveTime } from '@renderer/hooks/useLMStudio'
|
||||||
import { getAssistantSettings } from '@renderer/services/AssistantService'
|
import { getAssistantSettings } from '@renderer/services/AssistantService'
|
||||||
|
import type { RootState } from '@renderer/store'
|
||||||
import type {
|
import type {
|
||||||
Assistant,
|
Assistant,
|
||||||
GenerateImageParams,
|
GenerateImageParams,
|
||||||
@ -199,7 +201,7 @@ export abstract class BaseApiClient<
|
|||||||
}
|
}
|
||||||
|
|
||||||
public getTemperature(assistant: Assistant, model: Model): number | undefined {
|
public getTemperature(assistant: Assistant, model: Model): number | undefined {
|
||||||
if (isNotSupportTemperatureAndTopP(model)) {
|
if (!isSupportTemperatureModel(model)) {
|
||||||
return undefined
|
return undefined
|
||||||
}
|
}
|
||||||
const assistantSettings = getAssistantSettings(assistant)
|
const assistantSettings = getAssistantSettings(assistant)
|
||||||
@ -207,7 +209,7 @@ export abstract class BaseApiClient<
|
|||||||
}
|
}
|
||||||
|
|
||||||
public getTopP(assistant: Assistant, model: Model): number | undefined {
|
public getTopP(assistant: Assistant, model: Model): number | undefined {
|
||||||
if (isNotSupportTemperatureAndTopP(model)) {
|
if (!isSupportTopPModel(model)) {
|
||||||
return undefined
|
return undefined
|
||||||
}
|
}
|
||||||
const assistantSettings = getAssistantSettings(assistant)
|
const assistantSettings = getAssistantSettings(assistant)
|
||||||
@ -245,23 +247,20 @@ export abstract class BaseApiClient<
|
|||||||
|
|
||||||
protected getVerbosity(model?: Model): OpenAIVerbosity {
|
protected getVerbosity(model?: Model): OpenAIVerbosity {
|
||||||
try {
|
try {
|
||||||
const state = window.store?.getState()
|
const state = window.store?.getState() as RootState
|
||||||
const verbosity = state?.settings?.openAI?.verbosity
|
const verbosity = state?.settings?.openAI?.verbosity
|
||||||
|
|
||||||
if (verbosity && ['low', 'medium', 'high'].includes(verbosity)) {
|
// If model is provided, check if the verbosity is supported by the model
|
||||||
// If model is provided, check if the verbosity is supported by the model
|
if (model) {
|
||||||
if (model) {
|
const supportedVerbosity = getModelSupportedVerbosity(model)
|
||||||
const supportedVerbosity = getModelSupportedVerbosity(model)
|
// Use user's verbosity if supported, otherwise use the first supported option
|
||||||
// Use user's verbosity if supported, otherwise use the first supported option
|
return supportedVerbosity.includes(verbosity) ? verbosity : supportedVerbosity[0]
|
||||||
return supportedVerbosity.includes(verbosity) ? verbosity : supportedVerbosity[0]
|
|
||||||
}
|
|
||||||
return verbosity
|
|
||||||
}
|
}
|
||||||
|
return verbosity
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.warn('Failed to get verbosity from state:', error as Error)
|
logger.warn('Failed to get verbosity from state. Fallback to undefined.', error as Error)
|
||||||
|
return undefined
|
||||||
}
|
}
|
||||||
|
|
||||||
return 'medium'
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected getTimeout(model: Model) {
|
protected getTimeout(model: Model) {
|
||||||
|
|||||||
@ -124,7 +124,8 @@ export class AnthropicAPIClient extends BaseApiClient<
|
|||||||
|
|
||||||
override async listModels(): Promise<Anthropic.ModelInfo[]> {
|
override async listModels(): Promise<Anthropic.ModelInfo[]> {
|
||||||
const sdk = (await this.getSdkInstance()) as Anthropic
|
const sdk = (await this.getSdkInstance()) as Anthropic
|
||||||
const response = await sdk.models.list()
|
// prevent auto appended /v1. It's included in baseUrl.
|
||||||
|
const response = await sdk.models.list({ path: '/models' })
|
||||||
return response.data
|
return response.data
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -173,13 +173,15 @@ export class GeminiAPIClient extends BaseApiClient<
|
|||||||
return this.sdkInstance
|
return this.sdkInstance
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const apiVersion = this.getApiVersion()
|
||||||
|
|
||||||
this.sdkInstance = new GoogleGenAI({
|
this.sdkInstance = new GoogleGenAI({
|
||||||
vertexai: false,
|
vertexai: false,
|
||||||
apiKey: this.apiKey,
|
apiKey: this.apiKey,
|
||||||
apiVersion: this.getApiVersion(),
|
apiVersion,
|
||||||
httpOptions: {
|
httpOptions: {
|
||||||
baseUrl: this.getBaseURL(),
|
baseUrl: this.getBaseURL(),
|
||||||
apiVersion: this.getApiVersion(),
|
apiVersion,
|
||||||
headers: {
|
headers: {
|
||||||
...this.provider.extra_headers
|
...this.provider.extra_headers
|
||||||
}
|
}
|
||||||
@ -200,7 +202,7 @@ export class GeminiAPIClient extends BaseApiClient<
|
|||||||
return trailingVersion
|
return trailingVersion
|
||||||
}
|
}
|
||||||
|
|
||||||
return 'v1beta'
|
return ''
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@ -10,7 +10,7 @@ import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant'
|
|||||||
import {
|
import {
|
||||||
findTokenLimit,
|
findTokenLimit,
|
||||||
GEMINI_FLASH_MODEL_REGEX,
|
GEMINI_FLASH_MODEL_REGEX,
|
||||||
getThinkModelType,
|
getModelSupportedReasoningEffortOptions,
|
||||||
isDeepSeekHybridInferenceModel,
|
isDeepSeekHybridInferenceModel,
|
||||||
isDoubaoThinkingAutoModel,
|
isDoubaoThinkingAutoModel,
|
||||||
isGPT5SeriesModel,
|
isGPT5SeriesModel,
|
||||||
@ -32,9 +32,7 @@ import {
|
|||||||
isSupportedThinkingTokenModel,
|
isSupportedThinkingTokenModel,
|
||||||
isSupportedThinkingTokenQwenModel,
|
isSupportedThinkingTokenQwenModel,
|
||||||
isSupportedThinkingTokenZhipuModel,
|
isSupportedThinkingTokenZhipuModel,
|
||||||
isSupportVerbosityModel,
|
|
||||||
isVisionModel,
|
isVisionModel,
|
||||||
MODEL_SUPPORTED_REASONING_EFFORT,
|
|
||||||
ZHIPU_RESULT_TOKENS
|
ZHIPU_RESULT_TOKENS
|
||||||
} from '@renderer/config/models'
|
} from '@renderer/config/models'
|
||||||
import { mapLanguageToQwenMTModel } from '@renderer/config/translate'
|
import { mapLanguageToQwenMTModel } from '@renderer/config/translate'
|
||||||
@ -144,6 +142,10 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
|
|||||||
return { thinking: { type: reasoningEffort ? 'enabled' : 'disabled' } }
|
return { thinking: { type: reasoningEffort ? 'enabled' : 'disabled' } }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (reasoningEffort === 'default') {
|
||||||
|
return {}
|
||||||
|
}
|
||||||
|
|
||||||
if (!reasoningEffort) {
|
if (!reasoningEffort) {
|
||||||
// DeepSeek hybrid inference models, v3.1 and maybe more in the future
|
// DeepSeek hybrid inference models, v3.1 and maybe more in the future
|
||||||
// 不同的 provider 有不同的思考控制方式,在这里统一解决
|
// 不同的 provider 有不同的思考控制方式,在这里统一解决
|
||||||
@ -305,16 +307,15 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
|
|||||||
// Grok models/Perplexity models/OpenAI models
|
// Grok models/Perplexity models/OpenAI models
|
||||||
if (isSupportedReasoningEffortModel(model)) {
|
if (isSupportedReasoningEffortModel(model)) {
|
||||||
// 检查模型是否支持所选选项
|
// 检查模型是否支持所选选项
|
||||||
const modelType = getThinkModelType(model)
|
const supportedOptions = getModelSupportedReasoningEffortOptions(model)?.filter((option) => option !== 'default')
|
||||||
const supportedOptions = MODEL_SUPPORTED_REASONING_EFFORT[modelType]
|
if (supportedOptions?.includes(reasoningEffort)) {
|
||||||
if (supportedOptions.includes(reasoningEffort)) {
|
|
||||||
return {
|
return {
|
||||||
reasoning_effort: reasoningEffort
|
reasoning_effort: reasoningEffort
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// 如果不支持,fallback到第一个支持的值
|
// 如果不支持,fallback到第一个支持的值
|
||||||
return {
|
return {
|
||||||
reasoning_effort: supportedOptions[0]
|
reasoning_effort: supportedOptions?.[0]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -714,13 +715,8 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
|
|||||||
...modalities,
|
...modalities,
|
||||||
// groq 有不同的 service tier 配置,不符合 openai 接口类型
|
// groq 有不同的 service tier 配置,不符合 openai 接口类型
|
||||||
service_tier: this.getServiceTier(model) as OpenAIServiceTier,
|
service_tier: this.getServiceTier(model) as OpenAIServiceTier,
|
||||||
...(isSupportVerbosityModel(model)
|
// verbosity. getVerbosity ensures the returned value is valid.
|
||||||
? {
|
verbosity: this.getVerbosity(model),
|
||||||
text: {
|
|
||||||
verbosity: this.getVerbosity(model)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
: {}),
|
|
||||||
...this.getProviderSpecificParameters(assistant, model),
|
...this.getProviderSpecificParameters(assistant, model),
|
||||||
...reasoningEffort,
|
...reasoningEffort,
|
||||||
// ...getOpenAIWebSearchParams(model, enableWebSearch),
|
// ...getOpenAIWebSearchParams(model, enableWebSearch),
|
||||||
|
|||||||
@ -11,7 +11,7 @@ import { getStoreSetting } from '@renderer/hooks/useSettings'
|
|||||||
import { getAssistantSettings } from '@renderer/services/AssistantService'
|
import { getAssistantSettings } from '@renderer/services/AssistantService'
|
||||||
import store from '@renderer/store'
|
import store from '@renderer/store'
|
||||||
import type { SettingsState } from '@renderer/store/settings'
|
import type { SettingsState } from '@renderer/store/settings'
|
||||||
import type { Assistant, GenerateImageParams, Model, Provider } from '@renderer/types'
|
import { type Assistant, type GenerateImageParams, type Model, type Provider } from '@renderer/types'
|
||||||
import type {
|
import type {
|
||||||
OpenAIResponseSdkMessageParam,
|
OpenAIResponseSdkMessageParam,
|
||||||
OpenAIResponseSdkParams,
|
OpenAIResponseSdkParams,
|
||||||
@ -25,7 +25,8 @@ import type {
|
|||||||
OpenAISdkRawOutput,
|
OpenAISdkRawOutput,
|
||||||
ReasoningEffortOptionalParams
|
ReasoningEffortOptionalParams
|
||||||
} from '@renderer/types/sdk'
|
} from '@renderer/types/sdk'
|
||||||
import { formatApiHost } from '@renderer/utils/api'
|
import { withoutTrailingSlash } from '@renderer/utils/api'
|
||||||
|
import { isOllamaProvider } from '@renderer/utils/provider'
|
||||||
|
|
||||||
import { BaseApiClient } from '../BaseApiClient'
|
import { BaseApiClient } from '../BaseApiClient'
|
||||||
|
|
||||||
@ -48,8 +49,9 @@ export abstract class OpenAIBaseClient<
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 仅适用于openai
|
// 仅适用于openai
|
||||||
override getBaseURL(isSupportedAPIVerion: boolean = true): string {
|
override getBaseURL(): string {
|
||||||
return formatApiHost(this.provider.apiHost, isSupportedAPIVerion)
|
// apiHost is formatted when called by AiProvider
|
||||||
|
return this.provider.apiHost
|
||||||
}
|
}
|
||||||
|
|
||||||
override async generateImage({
|
override async generateImage({
|
||||||
@ -67,7 +69,7 @@ export abstract class OpenAIBaseClient<
|
|||||||
const sdk = await this.getSdkInstance()
|
const sdk = await this.getSdkInstance()
|
||||||
const response = (await sdk.request({
|
const response = (await sdk.request({
|
||||||
method: 'post',
|
method: 'post',
|
||||||
path: '/images/generations',
|
path: '/v1/images/generations',
|
||||||
signal,
|
signal,
|
||||||
body: {
|
body: {
|
||||||
model,
|
model,
|
||||||
@ -86,7 +88,11 @@ export abstract class OpenAIBaseClient<
|
|||||||
}
|
}
|
||||||
|
|
||||||
override async getEmbeddingDimensions(model: Model): Promise<number> {
|
override async getEmbeddingDimensions(model: Model): Promise<number> {
|
||||||
const sdk = await this.getSdkInstance()
|
let sdk: OpenAI = await this.getSdkInstance()
|
||||||
|
if (isOllamaProvider(this.provider)) {
|
||||||
|
const embedBaseUrl = `${this.provider.apiHost.replace(/(\/(api|v1))\/?$/, '')}/v1`
|
||||||
|
sdk = sdk.withOptions({ baseURL: embedBaseUrl })
|
||||||
|
}
|
||||||
|
|
||||||
const data = await sdk.embeddings.create({
|
const data = await sdk.embeddings.create({
|
||||||
model: model.id,
|
model: model.id,
|
||||||
@ -99,6 +105,17 @@ export abstract class OpenAIBaseClient<
|
|||||||
override async listModels(): Promise<OpenAI.Models.Model[]> {
|
override async listModels(): Promise<OpenAI.Models.Model[]> {
|
||||||
try {
|
try {
|
||||||
const sdk = await this.getSdkInstance()
|
const sdk = await this.getSdkInstance()
|
||||||
|
if (this.provider.id === 'openrouter') {
|
||||||
|
// https://openrouter.ai/docs/api/api-reference/embeddings/list-embeddings-models
|
||||||
|
const embedBaseUrl = 'https://openrouter.ai/api/v1/embeddings'
|
||||||
|
const embedSdk = sdk.withOptions({ baseURL: embedBaseUrl })
|
||||||
|
const modelPromise = sdk.models.list()
|
||||||
|
const embedModelPromise = embedSdk.models.list()
|
||||||
|
const [modelResponse, embedModelResponse] = await Promise.all([modelPromise, embedModelPromise])
|
||||||
|
const models = [...modelResponse.data, ...embedModelResponse.data]
|
||||||
|
const uniqueModels = Array.from(new Map(models.map((model) => [model.id, model])).values())
|
||||||
|
return uniqueModels.filter(isSupportedModel)
|
||||||
|
}
|
||||||
if (this.provider.id === 'github') {
|
if (this.provider.id === 'github') {
|
||||||
// GitHub Models 其 models 和 chat completions 两个接口的 baseUrl 不一样
|
// GitHub Models 其 models 和 chat completions 两个接口的 baseUrl 不一样
|
||||||
const baseUrl = 'https://models.github.ai/catalog/'
|
const baseUrl = 'https://models.github.ai/catalog/'
|
||||||
@ -115,6 +132,34 @@ export abstract class OpenAIBaseClient<
|
|||||||
}))
|
}))
|
||||||
.filter(isSupportedModel)
|
.filter(isSupportedModel)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (isOllamaProvider(this.provider)) {
|
||||||
|
const baseUrl = withoutTrailingSlash(this.getBaseURL())
|
||||||
|
.replace(/\/v1$/, '')
|
||||||
|
.replace(/\/api$/, '')
|
||||||
|
const response = await fetch(`${baseUrl}/api/tags`, {
|
||||||
|
headers: {
|
||||||
|
Authorization: `Bearer ${this.apiKey}`,
|
||||||
|
...this.defaultHeaders(),
|
||||||
|
...this.provider.extra_headers
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error(`Ollama server returned ${response.status} ${response.statusText}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const data = await response.json()
|
||||||
|
if (!data?.models || !Array.isArray(data.models)) {
|
||||||
|
throw new Error('Invalid response from Ollama API: missing models array')
|
||||||
|
}
|
||||||
|
|
||||||
|
return data.models.map((model) => ({
|
||||||
|
id: model.name,
|
||||||
|
object: 'model',
|
||||||
|
owned_by: 'ollama'
|
||||||
|
}))
|
||||||
|
}
|
||||||
const response = await sdk.models.list()
|
const response = await sdk.models.list()
|
||||||
if (this.provider.id === 'together') {
|
if (this.provider.id === 'together') {
|
||||||
// @ts-ignore key is not typed
|
// @ts-ignore key is not typed
|
||||||
@ -144,6 +189,7 @@ export abstract class OpenAIBaseClient<
|
|||||||
|
|
||||||
let apiKeyForSdkInstance = this.apiKey
|
let apiKeyForSdkInstance = this.apiKey
|
||||||
let baseURLForSdkInstance = this.getBaseURL()
|
let baseURLForSdkInstance = this.getBaseURL()
|
||||||
|
logger.debug('baseURLForSdkInstance', { baseURLForSdkInstance })
|
||||||
let headersForSdkInstance = {
|
let headersForSdkInstance = {
|
||||||
...this.defaultHeaders(),
|
...this.defaultHeaders(),
|
||||||
...this.provider.extra_headers
|
...this.provider.extra_headers
|
||||||
@ -155,7 +201,7 @@ export abstract class OpenAIBaseClient<
|
|||||||
// this.provider.apiKey不允许修改
|
// this.provider.apiKey不允许修改
|
||||||
// this.provider.apiKey = token
|
// this.provider.apiKey = token
|
||||||
apiKeyForSdkInstance = token
|
apiKeyForSdkInstance = token
|
||||||
baseURLForSdkInstance = this.getBaseURL(false)
|
baseURLForSdkInstance = this.getBaseURL()
|
||||||
headersForSdkInstance = {
|
headersForSdkInstance = {
|
||||||
...headersForSdkInstance,
|
...headersForSdkInstance,
|
||||||
...COPILOT_DEFAULT_HEADERS
|
...COPILOT_DEFAULT_HEADERS
|
||||||
|
|||||||
@ -122,6 +122,7 @@ export class OpenAIResponseAPIClient extends OpenAIBaseClient<
|
|||||||
if (this.sdkInstance) {
|
if (this.sdkInstance) {
|
||||||
return this.sdkInstance
|
return this.sdkInstance
|
||||||
}
|
}
|
||||||
|
const baseUrl = this.getBaseURL()
|
||||||
|
|
||||||
if (this.provider.id === 'azure-openai' || this.provider.type === 'azure-openai') {
|
if (this.provider.id === 'azure-openai' || this.provider.type === 'azure-openai') {
|
||||||
return new AzureOpenAI({
|
return new AzureOpenAI({
|
||||||
@ -134,7 +135,7 @@ export class OpenAIResponseAPIClient extends OpenAIBaseClient<
|
|||||||
return new OpenAI({
|
return new OpenAI({
|
||||||
dangerouslyAllowBrowser: true,
|
dangerouslyAllowBrowser: true,
|
||||||
apiKey: this.apiKey,
|
apiKey: this.apiKey,
|
||||||
baseURL: this.getBaseURL(),
|
baseURL: baseUrl,
|
||||||
defaultHeaders: {
|
defaultHeaders: {
|
||||||
...this.defaultHeaders(),
|
...this.defaultHeaders(),
|
||||||
...this.provider.extra_headers
|
...this.provider.extra_headers
|
||||||
|
|||||||
@ -3,6 +3,7 @@ import { loggerService } from '@logger'
|
|||||||
import { isSupportedModel } from '@renderer/config/models'
|
import { isSupportedModel } from '@renderer/config/models'
|
||||||
import type { Provider } from '@renderer/types'
|
import type { Provider } from '@renderer/types'
|
||||||
import { objectKeys } from '@renderer/types'
|
import { objectKeys } from '@renderer/types'
|
||||||
|
import { formatApiHost, withoutTrailingApiVersion } from '@renderer/utils'
|
||||||
|
|
||||||
import { OpenAIAPIClient } from '../openai/OpenAIApiClient'
|
import { OpenAIAPIClient } from '../openai/OpenAIApiClient'
|
||||||
|
|
||||||
@ -16,11 +17,8 @@ export class OVMSClient extends OpenAIAPIClient {
|
|||||||
override async listModels(): Promise<OpenAI.Models.Model[]> {
|
override async listModels(): Promise<OpenAI.Models.Model[]> {
|
||||||
try {
|
try {
|
||||||
const sdk = await this.getSdkInstance()
|
const sdk = await this.getSdkInstance()
|
||||||
|
const url = formatApiHost(withoutTrailingApiVersion(this.getBaseURL()), true, 'v1')
|
||||||
const chatModelsResponse = await sdk.request({
|
const chatModelsResponse = await sdk.withOptions({ baseURL: url }).get('/config')
|
||||||
method: 'get',
|
|
||||||
path: '../v1/config'
|
|
||||||
})
|
|
||||||
logger.debug(`Chat models response: ${JSON.stringify(chatModelsResponse)}`)
|
logger.debug(`Chat models response: ${JSON.stringify(chatModelsResponse)}`)
|
||||||
|
|
||||||
// Parse the config response to extract model information
|
// Parse the config response to extract model information
|
||||||
|
|||||||
@ -2,7 +2,6 @@ import { loggerService } from '@logger'
|
|||||||
import { ApiClientFactory } from '@renderer/aiCore/legacy/clients/ApiClientFactory'
|
import { ApiClientFactory } from '@renderer/aiCore/legacy/clients/ApiClientFactory'
|
||||||
import type { BaseApiClient } from '@renderer/aiCore/legacy/clients/BaseApiClient'
|
import type { BaseApiClient } from '@renderer/aiCore/legacy/clients/BaseApiClient'
|
||||||
import { isDedicatedImageGenerationModel, isFunctionCallingModel } from '@renderer/config/models'
|
import { isDedicatedImageGenerationModel, isFunctionCallingModel } from '@renderer/config/models'
|
||||||
import { getProviderByModel } from '@renderer/services/AssistantService'
|
|
||||||
import { withSpanResult } from '@renderer/services/SpanManagerService'
|
import { withSpanResult } from '@renderer/services/SpanManagerService'
|
||||||
import type { StartSpanParams } from '@renderer/trace/types/ModelSpanEntity'
|
import type { StartSpanParams } from '@renderer/trace/types/ModelSpanEntity'
|
||||||
import type { GenerateImageParams, Model, Provider } from '@renderer/types'
|
import type { GenerateImageParams, Model, Provider } from '@renderer/types'
|
||||||
@ -160,9 +159,6 @@ export default class AiProvider {
|
|||||||
public async getEmbeddingDimensions(model: Model): Promise<number> {
|
public async getEmbeddingDimensions(model: Model): Promise<number> {
|
||||||
try {
|
try {
|
||||||
// Use the SDK instance to test embedding capabilities
|
// Use the SDK instance to test embedding capabilities
|
||||||
if (this.apiClient instanceof OpenAIResponseAPIClient && getProviderByModel(model).type === 'azure-openai') {
|
|
||||||
this.apiClient = this.apiClient.getClient(model) as BaseApiClient
|
|
||||||
}
|
|
||||||
const dimensions = await this.apiClient.getEmbeddingDimensions(model)
|
const dimensions = await this.apiClient.getEmbeddingDimensions(model)
|
||||||
return dimensions
|
return dimensions
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
|||||||
@ -4,10 +4,9 @@ import { isGemini3Model, isSupportedThinkingTokenQwenModel } from '@renderer/con
|
|||||||
import type { MCPTool } from '@renderer/types'
|
import type { MCPTool } from '@renderer/types'
|
||||||
import { type Assistant, type Message, type Model, type Provider, SystemProviderIds } from '@renderer/types'
|
import { type Assistant, type Message, type Model, type Provider, SystemProviderIds } from '@renderer/types'
|
||||||
import type { Chunk } from '@renderer/types/chunk'
|
import type { Chunk } from '@renderer/types/chunk'
|
||||||
import { isSupportEnableThinkingProvider } from '@renderer/utils/provider'
|
import { isOllamaProvider, isSupportEnableThinkingProvider } from '@renderer/utils/provider'
|
||||||
import type { LanguageModelMiddleware } from 'ai'
|
import type { LanguageModelMiddleware } from 'ai'
|
||||||
import { extractReasoningMiddleware, simulateStreamingMiddleware } from 'ai'
|
import { extractReasoningMiddleware, simulateStreamingMiddleware } from 'ai'
|
||||||
import { isEmpty } from 'lodash'
|
|
||||||
|
|
||||||
import { getAiSdkProviderId } from '../provider/factory'
|
import { getAiSdkProviderId } from '../provider/factory'
|
||||||
import { isOpenRouterGeminiGenerateImageModel } from '../utils/image'
|
import { isOpenRouterGeminiGenerateImageModel } from '../utils/image'
|
||||||
@ -16,7 +15,6 @@ import { openrouterGenerateImageMiddleware } from './openrouterGenerateImageMidd
|
|||||||
import { openrouterReasoningMiddleware } from './openrouterReasoningMiddleware'
|
import { openrouterReasoningMiddleware } from './openrouterReasoningMiddleware'
|
||||||
import { qwenThinkingMiddleware } from './qwenThinkingMiddleware'
|
import { qwenThinkingMiddleware } from './qwenThinkingMiddleware'
|
||||||
import { skipGeminiThoughtSignatureMiddleware } from './skipGeminiThoughtSignatureMiddleware'
|
import { skipGeminiThoughtSignatureMiddleware } from './skipGeminiThoughtSignatureMiddleware'
|
||||||
import { toolChoiceMiddleware } from './toolChoiceMiddleware'
|
|
||||||
|
|
||||||
const logger = loggerService.withContext('AiSdkMiddlewareBuilder')
|
const logger = loggerService.withContext('AiSdkMiddlewareBuilder')
|
||||||
|
|
||||||
@ -136,15 +134,6 @@ export class AiSdkMiddlewareBuilder {
|
|||||||
export function buildAiSdkMiddlewares(config: AiSdkMiddlewareConfig): LanguageModelMiddleware[] {
|
export function buildAiSdkMiddlewares(config: AiSdkMiddlewareConfig): LanguageModelMiddleware[] {
|
||||||
const builder = new AiSdkMiddlewareBuilder()
|
const builder = new AiSdkMiddlewareBuilder()
|
||||||
|
|
||||||
// 0. 知识库强制调用中间件(必须在最前面,确保第一轮强制调用知识库)
|
|
||||||
if (!isEmpty(config.assistant?.knowledge_bases?.map((base) => base.id)) && config.knowledgeRecognition !== 'on') {
|
|
||||||
builder.add({
|
|
||||||
name: 'force-knowledge-first',
|
|
||||||
middleware: toolChoiceMiddleware('builtin_knowledge_search')
|
|
||||||
})
|
|
||||||
logger.debug('Added toolChoice middleware to force knowledge base search on first round')
|
|
||||||
}
|
|
||||||
|
|
||||||
// 1. 根据provider添加特定中间件
|
// 1. 根据provider添加特定中间件
|
||||||
if (config.provider) {
|
if (config.provider) {
|
||||||
addProviderSpecificMiddlewares(builder, config)
|
addProviderSpecificMiddlewares(builder, config)
|
||||||
@ -240,6 +229,7 @@ function addModelSpecificMiddlewares(builder: AiSdkMiddlewareBuilder, config: Ai
|
|||||||
// Use /think or /no_think suffix to control thinking mode
|
// Use /think or /no_think suffix to control thinking mode
|
||||||
if (
|
if (
|
||||||
config.provider &&
|
config.provider &&
|
||||||
|
!isOllamaProvider(config.provider) &&
|
||||||
isSupportedThinkingTokenQwenModel(config.model) &&
|
isSupportedThinkingTokenQwenModel(config.model) &&
|
||||||
!isSupportEnableThinkingProvider(config.provider)
|
!isSupportEnableThinkingProvider(config.provider)
|
||||||
) {
|
) {
|
||||||
|
|||||||
@ -31,7 +31,7 @@ import { webSearchToolWithPreExtractedKeywords } from '../tools/WebSearchTool'
|
|||||||
|
|
||||||
const logger = loggerService.withContext('SearchOrchestrationPlugin')
|
const logger = loggerService.withContext('SearchOrchestrationPlugin')
|
||||||
|
|
||||||
const getMessageContent = (message: ModelMessage) => {
|
export const getMessageContent = (message: ModelMessage) => {
|
||||||
if (typeof message.content === 'string') return message.content
|
if (typeof message.content === 'string') return message.content
|
||||||
return message.content.reduce((acc, part) => {
|
return message.content.reduce((acc, part) => {
|
||||||
if (part.type === 'text') {
|
if (part.type === 'text') {
|
||||||
@ -266,14 +266,14 @@ export const searchOrchestrationPlugin = (assistant: Assistant, topicId: string)
|
|||||||
// 判断是否需要各种搜索
|
// 判断是否需要各种搜索
|
||||||
const knowledgeBaseIds = assistant.knowledge_bases?.map((base) => base.id)
|
const knowledgeBaseIds = assistant.knowledge_bases?.map((base) => base.id)
|
||||||
const hasKnowledgeBase = !isEmpty(knowledgeBaseIds)
|
const hasKnowledgeBase = !isEmpty(knowledgeBaseIds)
|
||||||
const knowledgeRecognition = assistant.knowledgeRecognition || 'on'
|
const knowledgeRecognition = assistant.knowledgeRecognition || 'off'
|
||||||
const globalMemoryEnabled = selectGlobalMemoryEnabled(store.getState())
|
const globalMemoryEnabled = selectGlobalMemoryEnabled(store.getState())
|
||||||
const shouldWebSearch = !!assistant.webSearchProviderId
|
const shouldWebSearch = !!assistant.webSearchProviderId
|
||||||
const shouldKnowledgeSearch = hasKnowledgeBase && knowledgeRecognition === 'on'
|
const shouldKnowledgeSearch = hasKnowledgeBase && knowledgeRecognition === 'on'
|
||||||
const shouldMemorySearch = globalMemoryEnabled && assistant.enableMemory
|
const shouldMemorySearch = globalMemoryEnabled && assistant.enableMemory
|
||||||
|
|
||||||
// 执行意图分析
|
// 执行意图分析
|
||||||
if (shouldWebSearch || hasKnowledgeBase) {
|
if (shouldWebSearch || shouldKnowledgeSearch) {
|
||||||
const analysisResult = await analyzeSearchIntent(lastUserMessage, assistant, {
|
const analysisResult = await analyzeSearchIntent(lastUserMessage, assistant, {
|
||||||
shouldWebSearch,
|
shouldWebSearch,
|
||||||
shouldKnowledgeSearch,
|
shouldKnowledgeSearch,
|
||||||
@ -330,41 +330,25 @@ export const searchOrchestrationPlugin = (assistant: Assistant, topicId: string)
|
|||||||
// 📚 知识库搜索工具配置
|
// 📚 知识库搜索工具配置
|
||||||
const knowledgeBaseIds = assistant.knowledge_bases?.map((base) => base.id)
|
const knowledgeBaseIds = assistant.knowledge_bases?.map((base) => base.id)
|
||||||
const hasKnowledgeBase = !isEmpty(knowledgeBaseIds)
|
const hasKnowledgeBase = !isEmpty(knowledgeBaseIds)
|
||||||
const knowledgeRecognition = assistant.knowledgeRecognition || 'on'
|
const knowledgeRecognition = assistant.knowledgeRecognition || 'off'
|
||||||
|
const shouldKnowledgeSearch = hasKnowledgeBase && knowledgeRecognition === 'on'
|
||||||
|
|
||||||
if (hasKnowledgeBase) {
|
if (shouldKnowledgeSearch) {
|
||||||
if (knowledgeRecognition === 'off') {
|
// on 模式:根据意图识别结果决定是否添加工具
|
||||||
// off 模式:直接添加知识库搜索工具,使用用户消息作为搜索关键词
|
const needsKnowledgeSearch =
|
||||||
|
analysisResult?.knowledge &&
|
||||||
|
analysisResult.knowledge.question &&
|
||||||
|
analysisResult.knowledge.question[0] !== 'not_needed'
|
||||||
|
|
||||||
|
if (needsKnowledgeSearch && analysisResult.knowledge) {
|
||||||
|
// logger.info('📚 Adding knowledge search tool (intent-based)')
|
||||||
const userMessage = userMessages[context.requestId]
|
const userMessage = userMessages[context.requestId]
|
||||||
const fallbackKeywords = {
|
|
||||||
question: [getMessageContent(userMessage) || 'search'],
|
|
||||||
rewrite: getMessageContent(userMessage) || 'search'
|
|
||||||
}
|
|
||||||
// logger.info('📚 Adding knowledge search tool (force mode)')
|
|
||||||
params.tools['builtin_knowledge_search'] = knowledgeSearchTool(
|
params.tools['builtin_knowledge_search'] = knowledgeSearchTool(
|
||||||
assistant,
|
assistant,
|
||||||
fallbackKeywords,
|
analysisResult.knowledge,
|
||||||
getMessageContent(userMessage),
|
getMessageContent(userMessage),
|
||||||
topicId
|
topicId
|
||||||
)
|
)
|
||||||
// params.toolChoice = { type: 'tool', toolName: 'builtin_knowledge_search' }
|
|
||||||
} else {
|
|
||||||
// on 模式:根据意图识别结果决定是否添加工具
|
|
||||||
const needsKnowledgeSearch =
|
|
||||||
analysisResult?.knowledge &&
|
|
||||||
analysisResult.knowledge.question &&
|
|
||||||
analysisResult.knowledge.question[0] !== 'not_needed'
|
|
||||||
|
|
||||||
if (needsKnowledgeSearch && analysisResult.knowledge) {
|
|
||||||
// logger.info('📚 Adding knowledge search tool (intent-based)')
|
|
||||||
const userMessage = userMessages[context.requestId]
|
|
||||||
params.tools['builtin_knowledge_search'] = knowledgeSearchTool(
|
|
||||||
assistant,
|
|
||||||
analysisResult.knowledge,
|
|
||||||
getMessageContent(userMessage),
|
|
||||||
topicId
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -109,6 +109,20 @@ const createImageBlock = (
|
|||||||
...overrides
|
...overrides
|
||||||
})
|
})
|
||||||
|
|
||||||
|
const createThinkingBlock = (
|
||||||
|
messageId: string,
|
||||||
|
overrides: Partial<Omit<ThinkingMessageBlock, 'type' | 'messageId'>> = {}
|
||||||
|
): ThinkingMessageBlock => ({
|
||||||
|
id: overrides.id ?? `thinking-block-${++blockCounter}`,
|
||||||
|
messageId,
|
||||||
|
type: MessageBlockType.THINKING,
|
||||||
|
createdAt: overrides.createdAt ?? new Date(2024, 0, 1, 0, 0, blockCounter).toISOString(),
|
||||||
|
status: overrides.status ?? MessageBlockStatus.SUCCESS,
|
||||||
|
content: overrides.content ?? 'Let me think...',
|
||||||
|
thinking_millsec: overrides.thinking_millsec ?? 1000,
|
||||||
|
...overrides
|
||||||
|
})
|
||||||
|
|
||||||
describe('messageConverter', () => {
|
describe('messageConverter', () => {
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
convertFileBlockToFilePartMock.mockReset()
|
convertFileBlockToFilePartMock.mockReset()
|
||||||
@ -137,6 +151,73 @@ describe('messageConverter', () => {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
it('extracts base64 data from data URLs and preserves mediaType', async () => {
|
||||||
|
const model = createModel()
|
||||||
|
const message = createMessage('user')
|
||||||
|
message.__mockContent = 'Check this image'
|
||||||
|
message.__mockImageBlocks = [createImageBlock(message.id, { url: 'data:image/png;base64,iVBORw0KGgoAAAANS' })]
|
||||||
|
|
||||||
|
const result = await convertMessageToSdkParam(message, true, model)
|
||||||
|
|
||||||
|
expect(result).toEqual({
|
||||||
|
role: 'user',
|
||||||
|
content: [
|
||||||
|
{ type: 'text', text: 'Check this image' },
|
||||||
|
{ type: 'image', image: 'iVBORw0KGgoAAAANS', mediaType: 'image/png' }
|
||||||
|
]
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
it('handles data URLs without mediaType gracefully', async () => {
|
||||||
|
const model = createModel()
|
||||||
|
const message = createMessage('user')
|
||||||
|
message.__mockContent = 'Check this'
|
||||||
|
message.__mockImageBlocks = [createImageBlock(message.id, { url: 'data:;base64,AAABBBCCC' })]
|
||||||
|
|
||||||
|
const result = await convertMessageToSdkParam(message, true, model)
|
||||||
|
|
||||||
|
expect(result).toEqual({
|
||||||
|
role: 'user',
|
||||||
|
content: [
|
||||||
|
{ type: 'text', text: 'Check this' },
|
||||||
|
{ type: 'image', image: 'AAABBBCCC' }
|
||||||
|
]
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
it('skips malformed data URLs without comma separator', async () => {
|
||||||
|
const model = createModel()
|
||||||
|
const message = createMessage('user')
|
||||||
|
message.__mockContent = 'Malformed data url'
|
||||||
|
message.__mockImageBlocks = [createImageBlock(message.id, { url: 'data:image/pngAAABBB' })]
|
||||||
|
|
||||||
|
const result = await convertMessageToSdkParam(message, true, model)
|
||||||
|
|
||||||
|
expect(result).toEqual({
|
||||||
|
role: 'user',
|
||||||
|
content: [
|
||||||
|
{ type: 'text', text: 'Malformed data url' }
|
||||||
|
// Malformed data URL is excluded from the content
|
||||||
|
]
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
it('handles multiple large base64 images without stack overflow', async () => {
|
||||||
|
const model = createModel()
|
||||||
|
const message = createMessage('user')
|
||||||
|
// Create large base64 strings (~500KB each) to simulate real-world large images
|
||||||
|
const largeBase64 = 'A'.repeat(500_000)
|
||||||
|
message.__mockContent = 'Check these images'
|
||||||
|
message.__mockImageBlocks = [
|
||||||
|
createImageBlock(message.id, { url: `data:image/png;base64,${largeBase64}` }),
|
||||||
|
createImageBlock(message.id, { url: `data:image/png;base64,${largeBase64}` }),
|
||||||
|
createImageBlock(message.id, { url: `data:image/png;base64,${largeBase64}` })
|
||||||
|
]
|
||||||
|
|
||||||
|
// Should not throw RangeError: Maximum call stack size exceeded
|
||||||
|
await expect(convertMessageToSdkParam(message, true, model)).resolves.toBeDefined()
|
||||||
|
})
|
||||||
|
|
||||||
it('returns file instructions as a system message when native uploads succeed', async () => {
|
it('returns file instructions as a system message when native uploads succeed', async () => {
|
||||||
const model = createModel()
|
const model = createModel()
|
||||||
const message = createMessage('user')
|
const message = createMessage('user')
|
||||||
@ -162,10 +243,27 @@ describe('messageConverter', () => {
|
|||||||
}
|
}
|
||||||
])
|
])
|
||||||
})
|
})
|
||||||
|
|
||||||
|
it('includes reasoning parts for assistant messages with thinking blocks', async () => {
|
||||||
|
const model = createModel()
|
||||||
|
const message = createMessage('assistant')
|
||||||
|
message.__mockContent = 'Here is my answer'
|
||||||
|
message.__mockThinkingBlocks = [createThinkingBlock(message.id, { content: 'Let me think...' })]
|
||||||
|
|
||||||
|
const result = await convertMessageToSdkParam(message, false, model)
|
||||||
|
|
||||||
|
expect(result).toEqual({
|
||||||
|
role: 'assistant',
|
||||||
|
content: [
|
||||||
|
{ type: 'text', text: 'Here is my answer' },
|
||||||
|
{ type: 'reasoning', text: 'Let me think...' }
|
||||||
|
]
|
||||||
|
})
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
describe('convertMessagesToSdkMessages', () => {
|
describe('convertMessagesToSdkMessages', () => {
|
||||||
it('appends assistant images to the final user message for image enhancement models', async () => {
|
it('collapses to [system?, user(image)] for image enhancement models', async () => {
|
||||||
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
|
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
|
||||||
const initialUser = createMessage('user')
|
const initialUser = createMessage('user')
|
||||||
initialUser.__mockContent = 'Start editing'
|
initialUser.__mockContent = 'Start editing'
|
||||||
@ -180,14 +278,6 @@ describe('messageConverter', () => {
|
|||||||
const result = await convertMessagesToSdkMessages([initialUser, assistant, finalUser], model)
|
const result = await convertMessagesToSdkMessages([initialUser, assistant, finalUser], model)
|
||||||
|
|
||||||
expect(result).toEqual([
|
expect(result).toEqual([
|
||||||
{
|
|
||||||
role: 'user',
|
|
||||||
content: [{ type: 'text', text: 'Start editing' }]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
role: 'assistant',
|
|
||||||
content: [{ type: 'text', text: 'Here is the current preview' }]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
role: 'user',
|
role: 'user',
|
||||||
content: [
|
content: [
|
||||||
@ -198,7 +288,7 @@ describe('messageConverter', () => {
|
|||||||
])
|
])
|
||||||
})
|
})
|
||||||
|
|
||||||
it('preserves preceding system instructions when building enhancement payloads', async () => {
|
it('preserves system messages and collapses others for enhancement payloads', async () => {
|
||||||
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
|
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
|
||||||
const fileUser = createMessage('user')
|
const fileUser = createMessage('user')
|
||||||
fileUser.__mockContent = 'Use this document as inspiration'
|
fileUser.__mockContent = 'Use this document as inspiration'
|
||||||
@ -221,11 +311,6 @@ describe('messageConverter', () => {
|
|||||||
|
|
||||||
expect(result).toEqual([
|
expect(result).toEqual([
|
||||||
{ role: 'system', content: 'fileid://reference' },
|
{ role: 'system', content: 'fileid://reference' },
|
||||||
{ role: 'user', content: [{ type: 'text', text: 'Use this document as inspiration' }] },
|
|
||||||
{
|
|
||||||
role: 'assistant',
|
|
||||||
content: [{ type: 'text', text: 'Generated previews ready' }]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
role: 'user',
|
role: 'user',
|
||||||
content: [
|
content: [
|
||||||
@ -235,5 +320,120 @@ describe('messageConverter', () => {
|
|||||||
}
|
}
|
||||||
])
|
])
|
||||||
})
|
})
|
||||||
|
|
||||||
|
it('handles no previous assistant message with images', async () => {
|
||||||
|
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
|
||||||
|
const user1 = createMessage('user')
|
||||||
|
user1.__mockContent = 'Start'
|
||||||
|
|
||||||
|
const user2 = createMessage('user')
|
||||||
|
user2.__mockContent = 'Continue without images'
|
||||||
|
|
||||||
|
const result = await convertMessagesToSdkMessages([user1, user2], model)
|
||||||
|
|
||||||
|
expect(result).toEqual([
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
content: [{ type: 'text', text: 'Continue without images' }]
|
||||||
|
}
|
||||||
|
])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('handles assistant message without images', async () => {
|
||||||
|
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
|
||||||
|
const user1 = createMessage('user')
|
||||||
|
user1.__mockContent = 'Start'
|
||||||
|
|
||||||
|
const assistant = createMessage('assistant')
|
||||||
|
assistant.__mockContent = 'Text only response'
|
||||||
|
assistant.__mockImageBlocks = []
|
||||||
|
|
||||||
|
const user2 = createMessage('user')
|
||||||
|
user2.__mockContent = 'Follow up'
|
||||||
|
|
||||||
|
const result = await convertMessagesToSdkMessages([user1, assistant, user2], model)
|
||||||
|
|
||||||
|
expect(result).toEqual([
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
content: [{ type: 'text', text: 'Follow up' }]
|
||||||
|
}
|
||||||
|
])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('handles multiple assistant messages by using the most recent one', async () => {
|
||||||
|
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
|
||||||
|
const user1 = createMessage('user')
|
||||||
|
user1.__mockContent = 'Start'
|
||||||
|
|
||||||
|
const assistant1 = createMessage('assistant')
|
||||||
|
assistant1.__mockContent = 'First response'
|
||||||
|
assistant1.__mockImageBlocks = [createImageBlock(assistant1.id, { url: 'https://example.com/old.png' })]
|
||||||
|
|
||||||
|
const user2 = createMessage('user')
|
||||||
|
user2.__mockContent = 'Continue'
|
||||||
|
|
||||||
|
const assistant2 = createMessage('assistant')
|
||||||
|
assistant2.__mockContent = 'Second response'
|
||||||
|
assistant2.__mockImageBlocks = [createImageBlock(assistant2.id, { url: 'https://example.com/new.png' })]
|
||||||
|
|
||||||
|
const user3 = createMessage('user')
|
||||||
|
user3.__mockContent = 'Final request'
|
||||||
|
|
||||||
|
const result = await convertMessagesToSdkMessages([user1, assistant1, user2, assistant2, user3], model)
|
||||||
|
|
||||||
|
expect(result).toEqual([
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
content: [
|
||||||
|
{ type: 'text', text: 'Final request' },
|
||||||
|
{ type: 'image', image: 'https://example.com/new.png' }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('handles conversation ending with assistant message', async () => {
|
||||||
|
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
|
||||||
|
const user = createMessage('user')
|
||||||
|
user.__mockContent = 'Start'
|
||||||
|
|
||||||
|
const assistant = createMessage('assistant')
|
||||||
|
assistant.__mockContent = 'Response with image'
|
||||||
|
assistant.__mockImageBlocks = [createImageBlock(assistant.id, { url: 'https://example.com/image.png' })]
|
||||||
|
|
||||||
|
const result = await convertMessagesToSdkMessages([user, assistant], model)
|
||||||
|
|
||||||
|
// The user message is the last user message, but since the assistant comes after,
|
||||||
|
// there's no "previous" assistant message (search starts from messages.length-2 backwards)
|
||||||
|
expect(result).toEqual([
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
content: [{ type: 'text', text: 'Start' }]
|
||||||
|
}
|
||||||
|
])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('handles empty content in last user message', async () => {
|
||||||
|
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
|
||||||
|
const user1 = createMessage('user')
|
||||||
|
user1.__mockContent = 'Start'
|
||||||
|
|
||||||
|
const assistant = createMessage('assistant')
|
||||||
|
assistant.__mockContent = 'Here is the preview'
|
||||||
|
assistant.__mockImageBlocks = [createImageBlock(assistant.id, { url: 'https://example.com/preview.png' })]
|
||||||
|
|
||||||
|
const user2 = createMessage('user')
|
||||||
|
user2.__mockContent = ''
|
||||||
|
|
||||||
|
const result = await convertMessagesToSdkMessages([user1, assistant, user2], model)
|
||||||
|
|
||||||
|
expect(result).toEqual([
|
||||||
|
{
|
||||||
|
role: 'user',
|
||||||
|
content: [{ type: 'image', image: 'https://example.com/preview.png' }]
|
||||||
|
}
|
||||||
|
])
|
||||||
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|||||||
@ -18,7 +18,7 @@ vi.mock('@renderer/services/AssistantService', () => ({
|
|||||||
toolUseMode: assistant.settings?.toolUseMode ?? 'prompt',
|
toolUseMode: assistant.settings?.toolUseMode ?? 'prompt',
|
||||||
defaultModel: assistant.defaultModel,
|
defaultModel: assistant.defaultModel,
|
||||||
customParameters: assistant.settings?.customParameters ?? [],
|
customParameters: assistant.settings?.customParameters ?? [],
|
||||||
reasoning_effort: assistant.settings?.reasoning_effort,
|
reasoning_effort: assistant.settings?.reasoning_effort ?? 'default',
|
||||||
reasoning_effort_cache: assistant.settings?.reasoning_effort_cache,
|
reasoning_effort_cache: assistant.settings?.reasoning_effort_cache,
|
||||||
qwenThinkMode: assistant.settings?.qwenThinkMode
|
qwenThinkMode: assistant.settings?.qwenThinkMode
|
||||||
})
|
})
|
||||||
|
|||||||
@ -3,10 +3,12 @@
|
|||||||
* 将 Cherry Studio 消息格式转换为 AI SDK 消息格式
|
* 将 Cherry Studio 消息格式转换为 AI SDK 消息格式
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
import type { ReasoningPart } from '@ai-sdk/provider-utils'
|
||||||
import { loggerService } from '@logger'
|
import { loggerService } from '@logger'
|
||||||
import { isImageEnhancementModel, isVisionModel } from '@renderer/config/models'
|
import { isImageEnhancementModel, isVisionModel } from '@renderer/config/models'
|
||||||
import type { Message, Model } from '@renderer/types'
|
import type { Message, Model } from '@renderer/types'
|
||||||
import type { FileMessageBlock, ImageMessageBlock, ThinkingMessageBlock } from '@renderer/types/newMessage'
|
import type { FileMessageBlock, ImageMessageBlock, ThinkingMessageBlock } from '@renderer/types/newMessage'
|
||||||
|
import { parseDataUrlMediaType } from '@renderer/utils/image'
|
||||||
import {
|
import {
|
||||||
findFileBlocks,
|
findFileBlocks,
|
||||||
findImageBlocks,
|
findImageBlocks,
|
||||||
@ -59,23 +61,29 @@ async function convertImageBlockToImagePart(imageBlocks: ImageMessageBlock[]): P
|
|||||||
mediaType: image.mime
|
mediaType: image.mime
|
||||||
})
|
})
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.warn('Failed to load image:', error as Error)
|
logger.error('Failed to load image file, image will be excluded from message:', {
|
||||||
|
fileId: imageBlock.file.id,
|
||||||
|
fileName: imageBlock.file.origin_name,
|
||||||
|
error: error as Error
|
||||||
|
})
|
||||||
}
|
}
|
||||||
} else if (imageBlock.url) {
|
} else if (imageBlock.url) {
|
||||||
const isBase64 = imageBlock.url.startsWith('data:')
|
const url = imageBlock.url
|
||||||
if (isBase64) {
|
const isDataUrl = url.startsWith('data:')
|
||||||
const base64 = imageBlock.url.match(/^data:[^;]*;base64,(.+)$/)![1]
|
if (isDataUrl) {
|
||||||
const mimeMatch = imageBlock.url.match(/^data:([^;]+)/)
|
const { mediaType } = parseDataUrlMediaType(url)
|
||||||
parts.push({
|
const commaIndex = url.indexOf(',')
|
||||||
type: 'image',
|
if (commaIndex === -1) {
|
||||||
image: base64,
|
logger.error('Malformed data URL detected (missing comma separator), image will be excluded:', {
|
||||||
mediaType: mimeMatch ? mimeMatch[1] : 'image/png'
|
urlPrefix: url.slice(0, 50) + '...'
|
||||||
})
|
})
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
const base64Data = url.slice(commaIndex + 1)
|
||||||
|
parts.push({ type: 'image', image: base64Data, ...(mediaType ? { mediaType } : {}) })
|
||||||
} else {
|
} else {
|
||||||
parts.push({
|
// For remote URLs we keep payload minimal to match existing expectations.
|
||||||
type: 'image',
|
parts.push({ type: 'image', image: url })
|
||||||
image: imageBlock.url
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -156,13 +164,13 @@ async function convertMessageToAssistantModelMessage(
|
|||||||
thinkingBlocks: ThinkingMessageBlock[],
|
thinkingBlocks: ThinkingMessageBlock[],
|
||||||
model?: Model
|
model?: Model
|
||||||
): Promise<AssistantModelMessage> {
|
): Promise<AssistantModelMessage> {
|
||||||
const parts: Array<TextPart | FilePart> = []
|
const parts: Array<TextPart | ReasoningPart | FilePart> = []
|
||||||
if (content) {
|
if (content) {
|
||||||
parts.push({ type: 'text', text: content })
|
parts.push({ type: 'text', text: content })
|
||||||
}
|
}
|
||||||
|
|
||||||
for (const thinkingBlock of thinkingBlocks) {
|
for (const thinkingBlock of thinkingBlocks) {
|
||||||
parts.push({ type: 'text', text: thinkingBlock.content })
|
parts.push({ type: 'reasoning', text: thinkingBlock.content })
|
||||||
}
|
}
|
||||||
|
|
||||||
for (const fileBlock of fileBlocks) {
|
for (const fileBlock of fileBlocks) {
|
||||||
@ -194,17 +202,20 @@ async function convertMessageToAssistantModelMessage(
|
|||||||
* This function processes messages and transforms them into the format required by the SDK.
|
* This function processes messages and transforms them into the format required by the SDK.
|
||||||
* It handles special cases for vision models and image enhancement models.
|
* It handles special cases for vision models and image enhancement models.
|
||||||
*
|
*
|
||||||
* @param messages - Array of messages to convert. Must contain at least 3 messages when using image enhancement models for special handling.
|
* @param messages - Array of messages to convert.
|
||||||
* @param model - The model configuration that determines conversion behavior
|
* @param model - The model configuration that determines conversion behavior
|
||||||
*
|
*
|
||||||
* @returns A promise that resolves to an array of SDK-compatible model messages
|
* @returns A promise that resolves to an array of SDK-compatible model messages
|
||||||
*
|
*
|
||||||
* @remarks
|
* @remarks
|
||||||
* For image enhancement models with 3+ messages:
|
* For image enhancement models:
|
||||||
* - Examines the last 2 messages to find an assistant message containing image blocks
|
* - Collapses the conversation into [system?, user(image)] format
|
||||||
* - If found, extracts images from the assistant message and appends them to the last user message content
|
* - Searches backwards through all messages to find the most recent assistant message with images
|
||||||
* - Returns all converted messages (not just the last two) with the images merged into the user message
|
* - Preserves all system messages (including ones generated from file uploads like 'fileid://...')
|
||||||
* - Typical pattern: [system?, assistant(image), user] -> [system?, assistant, user(image)]
|
* - Extracts the last user message content and merges images from the previous assistant message
|
||||||
|
* - Returns only the collapsed messages: system messages (if any) followed by a single user message
|
||||||
|
* - If no user message is found, returns only system messages
|
||||||
|
* - Typical pattern: [system?, user, assistant(image), user] -> [system?, user(image)]
|
||||||
*
|
*
|
||||||
* For other models:
|
* For other models:
|
||||||
* - Returns all converted messages in order without special image handling
|
* - Returns all converted messages in order without special image handling
|
||||||
@ -220,25 +231,66 @@ export async function convertMessagesToSdkMessages(messages: Message[], model: M
|
|||||||
sdkMessages.push(...(Array.isArray(sdkMessage) ? sdkMessage : [sdkMessage]))
|
sdkMessages.push(...(Array.isArray(sdkMessage) ? sdkMessage : [sdkMessage]))
|
||||||
}
|
}
|
||||||
// Special handling for image enhancement models
|
// Special handling for image enhancement models
|
||||||
// Only merge images into the user message
|
// Target behavior: Collapse the conversation into [system?, user(image)].
|
||||||
// [system?, assistant(image), user] -> [system?, assistant, user(image)]
|
// Explanation of why we don't simply use slice:
|
||||||
if (isImageEnhancementModel(model) && messages.length >= 3) {
|
// 1) We need to preserve all system messages: During the convertMessageToSdkParam process, native file uploads may insert `system(fileid://...)`.
|
||||||
const needUpdatedMessages = messages.slice(-2)
|
// Directly slicing the original messages or already converted sdkMessages could easily result in missing these system instructions.
|
||||||
const assistantMessage = needUpdatedMessages.find((m) => m.role === 'assistant')
|
// Therefore, we first perform a full conversion and then aggregate the system messages afterward.
|
||||||
const userSdkMessage = sdkMessages[sdkMessages.length - 1]
|
// 2) The conversion process may split messages: A single user message might be broken into two SDK messages—[system, user].
|
||||||
|
// Slicing either side could lead to obtaining semantically incorrect fragments (e.g., only the split-out system message).
|
||||||
|
// 3) The “previous assistant message” is not necessarily the second-to-last one: There might be system messages or other message blocks inserted in between,
|
||||||
|
// making a simple slice(-2) assumption too rigid. Here, we trace back from the end of the original messages to locate the most recent assistant message, which better aligns with business semantics.
|
||||||
|
// 4) This is a “collapse” rather than a simple “slice”: Ultimately, we need to synthesize a new user message
|
||||||
|
// (with text from the last user message and images from the previous assistant message). Using slice can only extract subarrays,
|
||||||
|
// which still require reassembly; constructing directly according to the target structure is clearer and more reliable.
|
||||||
|
if (isImageEnhancementModel(model)) {
|
||||||
|
// Collect all system messages (including ones generated from file uploads)
|
||||||
|
const systemMessages = sdkMessages.filter((m): m is SystemModelMessage => m.role === 'system')
|
||||||
|
|
||||||
if (assistantMessage && userSdkMessage?.role === 'user') {
|
// Find the last user message (SDK converted)
|
||||||
const imageBlocks = findImageBlocks(assistantMessage)
|
const lastUserSdkIndex = (() => {
|
||||||
const imageParts = await convertImageBlockToImagePart(imageBlocks)
|
for (let i = sdkMessages.length - 1; i >= 0; i--) {
|
||||||
|
if (sdkMessages[i].role === 'user') return i
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
})()
|
||||||
|
|
||||||
if (imageParts.length > 0) {
|
const lastUserSdk = lastUserSdkIndex >= 0 ? (sdkMessages[lastUserSdkIndex] as UserModelMessage) : null
|
||||||
if (typeof userSdkMessage.content === 'string') {
|
|
||||||
userSdkMessage.content = [{ type: 'text', text: userSdkMessage.content }, ...imageParts]
|
// Find the nearest preceding assistant message in original messages
|
||||||
} else if (Array.isArray(userSdkMessage.content)) {
|
let prevAssistant: Message | null = null
|
||||||
userSdkMessage.content.push(...imageParts)
|
for (let i = messages.length - 2; i >= 0; i--) {
|
||||||
}
|
if (messages[i].role === 'assistant') {
|
||||||
|
prevAssistant = messages[i]
|
||||||
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Build the final user content parts
|
||||||
|
let finalUserParts: Array<TextPart | FilePart | ImagePart> = []
|
||||||
|
if (lastUserSdk) {
|
||||||
|
if (typeof lastUserSdk.content === 'string') {
|
||||||
|
finalUserParts.push({ type: 'text', text: lastUserSdk.content })
|
||||||
|
} else if (Array.isArray(lastUserSdk.content)) {
|
||||||
|
finalUserParts = [...lastUserSdk.content]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append images from the previous assistant message if any
|
||||||
|
if (prevAssistant) {
|
||||||
|
const imageBlocks = findImageBlocks(prevAssistant)
|
||||||
|
const imageParts = await convertImageBlockToImagePart(imageBlocks)
|
||||||
|
if (imageParts.length > 0) {
|
||||||
|
finalUserParts.push(...imageParts)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we couldn't find a last user message, fall back to returning collected system messages only
|
||||||
|
if (!lastUserSdk) {
|
||||||
|
return systemMessages
|
||||||
|
}
|
||||||
|
|
||||||
|
return [...systemMessages, { role: 'user', content: finalUserParts }]
|
||||||
}
|
}
|
||||||
|
|
||||||
return sdkMessages
|
return sdkMessages
|
||||||
|
|||||||
@ -4,60 +4,90 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import {
|
import {
|
||||||
isClaude45ReasoningModel,
|
|
||||||
isClaudeReasoningModel,
|
isClaudeReasoningModel,
|
||||||
isMaxTemperatureOneModel,
|
isMaxTemperatureOneModel,
|
||||||
isNotSupportTemperatureAndTopP,
|
|
||||||
isSupportedFlexServiceTier,
|
isSupportedFlexServiceTier,
|
||||||
isSupportedThinkingTokenClaudeModel
|
isSupportedThinkingTokenClaudeModel,
|
||||||
|
isSupportTemperatureModel,
|
||||||
|
isSupportTopPModel,
|
||||||
|
isTemperatureTopPMutuallyExclusiveModel
|
||||||
} from '@renderer/config/models'
|
} from '@renderer/config/models'
|
||||||
import { getAssistantSettings, getProviderByModel } from '@renderer/services/AssistantService'
|
import {
|
||||||
|
DEFAULT_ASSISTANT_SETTINGS,
|
||||||
|
getAssistantSettings,
|
||||||
|
getProviderByModel
|
||||||
|
} from '@renderer/services/AssistantService'
|
||||||
import type { Assistant, Model } from '@renderer/types'
|
import type { Assistant, Model } from '@renderer/types'
|
||||||
import { defaultTimeout } from '@shared/config/constant'
|
import { defaultTimeout } from '@shared/config/constant'
|
||||||
|
|
||||||
import { getAnthropicThinkingBudget } from '../utils/reasoning'
|
import { getAnthropicThinkingBudget } from '../utils/reasoning'
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Claude 4.5 推理模型:
|
* Retrieves the temperature parameter, adapting it based on assistant.settings and model capabilities.
|
||||||
* - 只启用 temperature → 使用 temperature
|
* - Disabled for Claude reasoning models when reasoning effort is set.
|
||||||
* - 只启用 top_p → 使用 top_p
|
* - Disabled for models that do not support temperature.
|
||||||
* - 同时启用 → temperature 生效,top_p 被忽略
|
* - Disabled for Claude 4.5 reasoning models when TopP is enabled and temperature is disabled.
|
||||||
* - 都不启用 → 都不使用
|
* Otherwise, returns the temperature value if the assistant has temperature enabled.
|
||||||
* 获取温度参数
|
|
||||||
*/
|
*/
|
||||||
export function getTemperature(assistant: Assistant, model: Model): number | undefined {
|
export function getTemperature(assistant: Assistant, model: Model): number | undefined {
|
||||||
if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) {
|
if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) {
|
||||||
return undefined
|
return undefined
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!isSupportTemperatureModel(model, assistant)) {
|
||||||
|
return undefined
|
||||||
|
}
|
||||||
|
|
||||||
if (
|
if (
|
||||||
isNotSupportTemperatureAndTopP(model) ||
|
isTemperatureTopPMutuallyExclusiveModel(model) &&
|
||||||
(isClaude45ReasoningModel(model) && assistant.settings?.enableTopP && !assistant.settings?.enableTemperature)
|
assistant.settings?.enableTopP &&
|
||||||
|
!assistant.settings?.enableTemperature
|
||||||
) {
|
) {
|
||||||
return undefined
|
return undefined
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return getTemperatureValue(assistant, model)
|
||||||
|
}
|
||||||
|
|
||||||
|
function getTemperatureValue(assistant: Assistant, model: Model): number | undefined {
|
||||||
const assistantSettings = getAssistantSettings(assistant)
|
const assistantSettings = getAssistantSettings(assistant)
|
||||||
let temperature = assistantSettings?.temperature
|
let temperature = assistantSettings?.temperature
|
||||||
if (temperature && isMaxTemperatureOneModel(model)) {
|
if (temperature && isMaxTemperatureOneModel(model)) {
|
||||||
temperature = Math.min(1, temperature)
|
temperature = Math.min(1, temperature)
|
||||||
}
|
}
|
||||||
return assistantSettings?.enableTemperature ? temperature : undefined
|
|
||||||
|
// FIXME: assistant.settings.enableTemperature should be always a boolean value.
|
||||||
|
const enableTemperature = assistantSettings?.enableTemperature ?? DEFAULT_ASSISTANT_SETTINGS.enableTemperature
|
||||||
|
return enableTemperature ? temperature : undefined
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 获取 TopP 参数
|
* Retrieves the TopP parameter, adapting it based on assistant.settings and model capabilities.
|
||||||
|
* - Disabled for Claude reasoning models when reasoning effort is set.
|
||||||
|
* - Disabled for models that do not support TopP.
|
||||||
|
* - Disabled for Claude 4.5 reasoning models when temperature is explicitly enabled.
|
||||||
|
* Otherwise, returns the TopP value if the assistant has TopP enabled.
|
||||||
*/
|
*/
|
||||||
export function getTopP(assistant: Assistant, model: Model): number | undefined {
|
export function getTopP(assistant: Assistant, model: Model): number | undefined {
|
||||||
if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) {
|
if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) {
|
||||||
return undefined
|
return undefined
|
||||||
}
|
}
|
||||||
if (
|
if (!isSupportTopPModel(model, assistant)) {
|
||||||
isNotSupportTemperatureAndTopP(model) ||
|
|
||||||
(isClaude45ReasoningModel(model) && assistant.settings?.enableTemperature)
|
|
||||||
) {
|
|
||||||
return undefined
|
return undefined
|
||||||
}
|
}
|
||||||
|
if (isTemperatureTopPMutuallyExclusiveModel(model) && assistant.settings?.enableTemperature) {
|
||||||
|
return undefined
|
||||||
|
}
|
||||||
|
|
||||||
|
return getTopPValue(assistant)
|
||||||
|
}
|
||||||
|
|
||||||
|
function getTopPValue(assistant: Assistant): number | undefined {
|
||||||
const assistantSettings = getAssistantSettings(assistant)
|
const assistantSettings = getAssistantSettings(assistant)
|
||||||
return assistantSettings?.enableTopP ? assistantSettings?.topP : undefined
|
// FIXME: assistant.settings.enableTopP should be always a boolean value.
|
||||||
|
const enableTopP = assistantSettings.enableTopP ?? DEFAULT_ASSISTANT_SETTINGS.enableTopP
|
||||||
|
return enableTopP ? assistantSettings?.topP : undefined
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@ -11,12 +11,16 @@ import { vertex } from '@ai-sdk/google-vertex/edge'
|
|||||||
import { combineHeaders } from '@ai-sdk/provider-utils'
|
import { combineHeaders } from '@ai-sdk/provider-utils'
|
||||||
import type { AnthropicSearchConfig, WebSearchPluginConfig } from '@cherrystudio/ai-core/built-in/plugins'
|
import type { AnthropicSearchConfig, WebSearchPluginConfig } from '@cherrystudio/ai-core/built-in/plugins'
|
||||||
import { isBaseProvider } from '@cherrystudio/ai-core/core/providers/schemas'
|
import { isBaseProvider } from '@cherrystudio/ai-core/core/providers/schemas'
|
||||||
|
import type { BaseProviderId } from '@cherrystudio/ai-core/provider'
|
||||||
import { loggerService } from '@logger'
|
import { loggerService } from '@logger'
|
||||||
import {
|
import {
|
||||||
isAnthropicModel,
|
isAnthropicModel,
|
||||||
|
isFixedReasoningModel,
|
||||||
|
isGeminiModel,
|
||||||
isGenerateImageModel,
|
isGenerateImageModel,
|
||||||
|
isGrokModel,
|
||||||
|
isOpenAIModel,
|
||||||
isOpenRouterBuiltInWebSearchModel,
|
isOpenRouterBuiltInWebSearchModel,
|
||||||
isReasoningModel,
|
|
||||||
isSupportedReasoningEffortModel,
|
isSupportedReasoningEffortModel,
|
||||||
isSupportedThinkingTokenModel,
|
isSupportedThinkingTokenModel,
|
||||||
isWebSearchModel
|
isWebSearchModel
|
||||||
@ -24,11 +28,12 @@ import {
|
|||||||
import { getDefaultModel } from '@renderer/services/AssistantService'
|
import { getDefaultModel } from '@renderer/services/AssistantService'
|
||||||
import store from '@renderer/store'
|
import store from '@renderer/store'
|
||||||
import type { CherryWebSearchConfig } from '@renderer/store/websearch'
|
import type { CherryWebSearchConfig } from '@renderer/store/websearch'
|
||||||
import { type Assistant, type MCPTool, type Provider } from '@renderer/types'
|
import type { Model } from '@renderer/types'
|
||||||
|
import { type Assistant, type MCPTool, type Provider, SystemProviderIds } from '@renderer/types'
|
||||||
import type { StreamTextParams } from '@renderer/types/aiCoreTypes'
|
import type { StreamTextParams } from '@renderer/types/aiCoreTypes'
|
||||||
import { mapRegexToPatterns } from '@renderer/utils/blacklistMatchPattern'
|
import { mapRegexToPatterns } from '@renderer/utils/blacklistMatchPattern'
|
||||||
import { replacePromptVariables } from '@renderer/utils/prompt'
|
import { replacePromptVariables } from '@renderer/utils/prompt'
|
||||||
import { isAwsBedrockProvider } from '@renderer/utils/provider'
|
import { isAIGatewayProvider, isAwsBedrockProvider } from '@renderer/utils/provider'
|
||||||
import type { ModelMessage, Tool } from 'ai'
|
import type { ModelMessage, Tool } from 'ai'
|
||||||
import { stepCountIs } from 'ai'
|
import { stepCountIs } from 'ai'
|
||||||
|
|
||||||
@ -43,6 +48,25 @@ const logger = loggerService.withContext('parameterBuilder')
|
|||||||
|
|
||||||
type ProviderDefinedTool = Extract<Tool<any, any>, { type: 'provider-defined' }>
|
type ProviderDefinedTool = Extract<Tool<any, any>, { type: 'provider-defined' }>
|
||||||
|
|
||||||
|
function mapVertexAIGatewayModelToProviderId(model: Model): BaseProviderId | undefined {
|
||||||
|
if (isAnthropicModel(model)) {
|
||||||
|
return 'anthropic'
|
||||||
|
}
|
||||||
|
if (isGeminiModel(model)) {
|
||||||
|
return 'google'
|
||||||
|
}
|
||||||
|
if (isGrokModel(model)) {
|
||||||
|
return 'xai'
|
||||||
|
}
|
||||||
|
if (isOpenAIModel(model)) {
|
||||||
|
return 'openai'
|
||||||
|
}
|
||||||
|
logger.warn(
|
||||||
|
`[mapVertexAIGatewayModelToProviderId] Unknown model type for AI Gateway: ${model.id}. Web search will not be enabled.`
|
||||||
|
)
|
||||||
|
return undefined
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 构建 AI SDK 流式参数
|
* 构建 AI SDK 流式参数
|
||||||
* 这是主要的参数构建函数,整合所有转换逻辑
|
* 这是主要的参数构建函数,整合所有转换逻辑
|
||||||
@ -83,7 +107,7 @@ export async function buildStreamTextParams(
|
|||||||
const enableReasoning =
|
const enableReasoning =
|
||||||
((isSupportedThinkingTokenModel(model) || isSupportedReasoningEffortModel(model)) &&
|
((isSupportedThinkingTokenModel(model) || isSupportedReasoningEffortModel(model)) &&
|
||||||
assistant.settings?.reasoning_effort !== undefined) ||
|
assistant.settings?.reasoning_effort !== undefined) ||
|
||||||
(isReasoningModel(model) && (!isSupportedThinkingTokenModel(model) || !isSupportedReasoningEffortModel(model)))
|
isFixedReasoningModel(model)
|
||||||
|
|
||||||
// 判断是否使用内置搜索
|
// 判断是否使用内置搜索
|
||||||
// 条件:没有外部搜索提供商 && (用户开启了内置搜索 || 模型强制使用内置搜索)
|
// 条件:没有外部搜索提供商 && (用户开启了内置搜索 || 模型强制使用内置搜索)
|
||||||
@ -117,6 +141,11 @@ export async function buildStreamTextParams(
|
|||||||
if (enableWebSearch) {
|
if (enableWebSearch) {
|
||||||
if (isBaseProvider(aiSdkProviderId)) {
|
if (isBaseProvider(aiSdkProviderId)) {
|
||||||
webSearchPluginConfig = buildProviderBuiltinWebSearchConfig(aiSdkProviderId, webSearchConfig, model)
|
webSearchPluginConfig = buildProviderBuiltinWebSearchConfig(aiSdkProviderId, webSearchConfig, model)
|
||||||
|
} else if (isAIGatewayProvider(provider) || SystemProviderIds.gateway === provider.id) {
|
||||||
|
const aiSdkProviderId = mapVertexAIGatewayModelToProviderId(model)
|
||||||
|
if (aiSdkProviderId) {
|
||||||
|
webSearchPluginConfig = buildProviderBuiltinWebSearchConfig(aiSdkProviderId, webSearchConfig, model)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (!tools) {
|
if (!tools) {
|
||||||
tools = {}
|
tools = {}
|
||||||
@ -177,8 +206,12 @@ export async function buildStreamTextParams(
|
|||||||
let headers: Record<string, string | undefined> = options.requestOptions?.headers ?? {}
|
let headers: Record<string, string | undefined> = options.requestOptions?.headers ?? {}
|
||||||
|
|
||||||
if (isAnthropicModel(model) && !isAwsBedrockProvider(provider)) {
|
if (isAnthropicModel(model) && !isAwsBedrockProvider(provider)) {
|
||||||
const newBetaHeaders = { 'anthropic-beta': addAnthropicHeaders(assistant, model).join(',') }
|
const betaHeaders = addAnthropicHeaders(assistant, model)
|
||||||
headers = combineHeaders(headers, newBetaHeaders)
|
// Only add the anthropic-beta header if there are actual beta headers to include
|
||||||
|
if (betaHeaders.length > 0) {
|
||||||
|
const newBetaHeaders = { 'anthropic-beta': betaHeaders.join(',') }
|
||||||
|
headers = combineHeaders(headers, newBetaHeaders)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 构建基础参数
|
// 构建基础参数
|
||||||
|
|||||||
@ -22,11 +22,15 @@ vi.mock('@renderer/services/AssistantService', () => ({
|
|||||||
})
|
})
|
||||||
}))
|
}))
|
||||||
|
|
||||||
vi.mock('@renderer/store', () => ({
|
vi.mock('@renderer/store', () => {
|
||||||
default: {
|
const mockGetState = vi.fn()
|
||||||
getState: () => ({ copilot: { defaultHeaders: {} } })
|
return {
|
||||||
|
default: {
|
||||||
|
getState: mockGetState
|
||||||
|
},
|
||||||
|
__mockGetState: mockGetState
|
||||||
}
|
}
|
||||||
}))
|
})
|
||||||
|
|
||||||
vi.mock('@renderer/utils/api', () => ({
|
vi.mock('@renderer/utils/api', () => ({
|
||||||
formatApiHost: vi.fn((host, isSupportedAPIVersion = true) => {
|
formatApiHost: vi.fn((host, isSupportedAPIVersion = true) => {
|
||||||
@ -38,7 +42,8 @@ vi.mock('@renderer/utils/api', () => ({
|
|||||||
routeToEndpoint: vi.fn((host) => ({
|
routeToEndpoint: vi.fn((host) => ({
|
||||||
baseURL: host,
|
baseURL: host,
|
||||||
endpoint: '/chat/completions'
|
endpoint: '/chat/completions'
|
||||||
}))
|
})),
|
||||||
|
isWithTrailingSharp: vi.fn((host) => host?.endsWith('#') || false)
|
||||||
}))
|
}))
|
||||||
|
|
||||||
vi.mock('@renderer/utils/provider', async (importOriginal) => {
|
vi.mock('@renderer/utils/provider', async (importOriginal) => {
|
||||||
@ -74,11 +79,13 @@ vi.mock('@renderer/services/AssistantService', () => ({
|
|||||||
import { getProviderByModel } from '@renderer/services/AssistantService'
|
import { getProviderByModel } from '@renderer/services/AssistantService'
|
||||||
import type { Model, Provider } from '@renderer/types'
|
import type { Model, Provider } from '@renderer/types'
|
||||||
import { formatApiHost } from '@renderer/utils/api'
|
import { formatApiHost } from '@renderer/utils/api'
|
||||||
import { isCherryAIProvider, isPerplexityProvider } from '@renderer/utils/provider'
|
import { isAzureOpenAIProvider, isCherryAIProvider, isPerplexityProvider } from '@renderer/utils/provider'
|
||||||
|
|
||||||
import { COPILOT_DEFAULT_HEADERS, COPILOT_EDITOR_VERSION, isCopilotResponsesModel } from '../constants'
|
import { COPILOT_DEFAULT_HEADERS, COPILOT_EDITOR_VERSION, isCopilotResponsesModel } from '../constants'
|
||||||
import { getActualProvider, providerToAiSdkConfig } from '../providerConfig'
|
import { getActualProvider, providerToAiSdkConfig } from '../providerConfig'
|
||||||
|
|
||||||
|
const { __mockGetState: mockGetState } = vi.mocked(await import('@renderer/store')) as any
|
||||||
|
|
||||||
const createWindowKeyv = () => {
|
const createWindowKeyv = () => {
|
||||||
const store = new Map<string, string>()
|
const store = new Map<string, string>()
|
||||||
return {
|
return {
|
||||||
@ -126,12 +133,33 @@ const createPerplexityProvider = (): Provider => ({
|
|||||||
isSystem: false
|
isSystem: false
|
||||||
})
|
})
|
||||||
|
|
||||||
|
const createAzureProvider = (apiVersion: string): Provider => ({
|
||||||
|
id: 'azure-openai',
|
||||||
|
type: 'azure-openai',
|
||||||
|
name: 'Azure OpenAI',
|
||||||
|
apiKey: 'test-key',
|
||||||
|
apiHost: 'https://example.openai.azure.com/openai',
|
||||||
|
apiVersion,
|
||||||
|
models: [],
|
||||||
|
isSystem: true
|
||||||
|
})
|
||||||
|
|
||||||
describe('Copilot responses routing', () => {
|
describe('Copilot responses routing', () => {
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
;(globalThis as any).window = {
|
;(globalThis as any).window = {
|
||||||
...(globalThis as any).window,
|
...(globalThis as any).window,
|
||||||
keyv: createWindowKeyv()
|
keyv: createWindowKeyv()
|
||||||
}
|
}
|
||||||
|
mockGetState.mockReturnValue({
|
||||||
|
copilot: { defaultHeaders: {} },
|
||||||
|
settings: {
|
||||||
|
openAI: {
|
||||||
|
streamOptions: {
|
||||||
|
includeUsage: undefined
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
it('detects official GPT-5 Codex identifiers case-insensitively', () => {
|
it('detects official GPT-5 Codex identifiers case-insensitively', () => {
|
||||||
@ -167,6 +195,16 @@ describe('CherryAI provider configuration', () => {
|
|||||||
...(globalThis as any).window,
|
...(globalThis as any).window,
|
||||||
keyv: createWindowKeyv()
|
keyv: createWindowKeyv()
|
||||||
}
|
}
|
||||||
|
mockGetState.mockReturnValue({
|
||||||
|
copilot: { defaultHeaders: {} },
|
||||||
|
settings: {
|
||||||
|
openAI: {
|
||||||
|
streamOptions: {
|
||||||
|
includeUsage: undefined
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
vi.clearAllMocks()
|
vi.clearAllMocks()
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -201,12 +239,19 @@ describe('CherryAI provider configuration', () => {
|
|||||||
// Mock the functions to simulate non-CherryAI provider
|
// Mock the functions to simulate non-CherryAI provider
|
||||||
vi.mocked(isCherryAIProvider).mockReturnValue(false)
|
vi.mocked(isCherryAIProvider).mockReturnValue(false)
|
||||||
vi.mocked(getProviderByModel).mockReturnValue(provider)
|
vi.mocked(getProviderByModel).mockReturnValue(provider)
|
||||||
|
// Mock isWithTrailingSharp to return false for this test
|
||||||
|
vi.mocked(formatApiHost as any).mockImplementation((host, isSupportedAPIVersion = true) => {
|
||||||
|
if (isSupportedAPIVersion === false) {
|
||||||
|
return host
|
||||||
|
}
|
||||||
|
return `${host}/v1`
|
||||||
|
})
|
||||||
|
|
||||||
// Call getActualProvider
|
// Call getActualProvider
|
||||||
const actualProvider = getActualProvider(model)
|
const actualProvider = getActualProvider(model)
|
||||||
|
|
||||||
// Verify that formatApiHost was called with default parameters (true)
|
// Verify that formatApiHost was called with appendApiVersion parameter
|
||||||
expect(formatApiHost).toHaveBeenCalledWith('https://api.openai.com')
|
expect(formatApiHost).toHaveBeenCalledWith('https://api.openai.com', true)
|
||||||
expect(actualProvider.apiHost).toBe('https://api.openai.com/v1')
|
expect(actualProvider.apiHost).toBe('https://api.openai.com/v1')
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -231,6 +276,16 @@ describe('Perplexity provider configuration', () => {
|
|||||||
...(globalThis as any).window,
|
...(globalThis as any).window,
|
||||||
keyv: createWindowKeyv()
|
keyv: createWindowKeyv()
|
||||||
}
|
}
|
||||||
|
mockGetState.mockReturnValue({
|
||||||
|
copilot: { defaultHeaders: {} },
|
||||||
|
settings: {
|
||||||
|
openAI: {
|
||||||
|
streamOptions: {
|
||||||
|
includeUsage: undefined
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
vi.clearAllMocks()
|
vi.clearAllMocks()
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -267,12 +322,19 @@ describe('Perplexity provider configuration', () => {
|
|||||||
vi.mocked(isCherryAIProvider).mockReturnValue(false)
|
vi.mocked(isCherryAIProvider).mockReturnValue(false)
|
||||||
vi.mocked(isPerplexityProvider).mockReturnValue(false)
|
vi.mocked(isPerplexityProvider).mockReturnValue(false)
|
||||||
vi.mocked(getProviderByModel).mockReturnValue(provider)
|
vi.mocked(getProviderByModel).mockReturnValue(provider)
|
||||||
|
// Mock isWithTrailingSharp to return false for this test
|
||||||
|
vi.mocked(formatApiHost as any).mockImplementation((host, isSupportedAPIVersion = true) => {
|
||||||
|
if (isSupportedAPIVersion === false) {
|
||||||
|
return host
|
||||||
|
}
|
||||||
|
return `${host}/v1`
|
||||||
|
})
|
||||||
|
|
||||||
// Call getActualProvider
|
// Call getActualProvider
|
||||||
const actualProvider = getActualProvider(model)
|
const actualProvider = getActualProvider(model)
|
||||||
|
|
||||||
// Verify that formatApiHost was called with default parameters (true)
|
// Verify that formatApiHost was called with appendApiVersion parameter
|
||||||
expect(formatApiHost).toHaveBeenCalledWith('https://api.openai.com')
|
expect(formatApiHost).toHaveBeenCalledWith('https://api.openai.com', true)
|
||||||
expect(actualProvider.apiHost).toBe('https://api.openai.com/v1')
|
expect(actualProvider.apiHost).toBe('https://api.openai.com/v1')
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -291,3 +353,208 @@ describe('Perplexity provider configuration', () => {
|
|||||||
expect(actualProvider.apiHost).toBe('')
|
expect(actualProvider.apiHost).toBe('')
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
describe('Stream options includeUsage configuration', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
;(globalThis as any).window = {
|
||||||
|
...(globalThis as any).window,
|
||||||
|
keyv: createWindowKeyv()
|
||||||
|
}
|
||||||
|
vi.clearAllMocks()
|
||||||
|
})
|
||||||
|
|
||||||
|
const createOpenAIProvider = (): Provider => ({
|
||||||
|
id: 'openai-compatible',
|
||||||
|
type: 'openai',
|
||||||
|
name: 'OpenAI',
|
||||||
|
apiKey: 'test-key',
|
||||||
|
apiHost: 'https://api.openai.com',
|
||||||
|
models: [],
|
||||||
|
isSystem: true
|
||||||
|
})
|
||||||
|
|
||||||
|
it('uses includeUsage from settings when undefined', () => {
|
||||||
|
mockGetState.mockReturnValue({
|
||||||
|
copilot: { defaultHeaders: {} },
|
||||||
|
settings: {
|
||||||
|
openAI: {
|
||||||
|
streamOptions: {
|
||||||
|
includeUsage: undefined
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
const provider = createOpenAIProvider()
|
||||||
|
const config = providerToAiSdkConfig(provider, createModel('gpt-4', 'GPT-4', 'openai'))
|
||||||
|
|
||||||
|
expect(config.options.includeUsage).toBeUndefined()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('uses includeUsage from settings when set to true', () => {
|
||||||
|
mockGetState.mockReturnValue({
|
||||||
|
copilot: { defaultHeaders: {} },
|
||||||
|
settings: {
|
||||||
|
openAI: {
|
||||||
|
streamOptions: {
|
||||||
|
includeUsage: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
const provider = createOpenAIProvider()
|
||||||
|
const config = providerToAiSdkConfig(provider, createModel('gpt-4', 'GPT-4', 'openai'))
|
||||||
|
|
||||||
|
expect(config.options.includeUsage).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('uses includeUsage from settings when set to false', () => {
|
||||||
|
mockGetState.mockReturnValue({
|
||||||
|
copilot: { defaultHeaders: {} },
|
||||||
|
settings: {
|
||||||
|
openAI: {
|
||||||
|
streamOptions: {
|
||||||
|
includeUsage: false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
const provider = createOpenAIProvider()
|
||||||
|
const config = providerToAiSdkConfig(provider, createModel('gpt-4', 'GPT-4', 'openai'))
|
||||||
|
|
||||||
|
expect(config.options.includeUsage).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('respects includeUsage setting for non-supporting providers', () => {
|
||||||
|
mockGetState.mockReturnValue({
|
||||||
|
copilot: { defaultHeaders: {} },
|
||||||
|
settings: {
|
||||||
|
openAI: {
|
||||||
|
streamOptions: {
|
||||||
|
includeUsage: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
const testProvider: Provider = {
|
||||||
|
id: 'test',
|
||||||
|
type: 'openai',
|
||||||
|
name: 'test',
|
||||||
|
apiKey: 'test-key',
|
||||||
|
apiHost: 'https://api.test.com',
|
||||||
|
models: [],
|
||||||
|
isSystem: false,
|
||||||
|
apiOptions: {
|
||||||
|
isNotSupportStreamOptions: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const config = providerToAiSdkConfig(testProvider, createModel('gpt-4', 'GPT-4', 'test'))
|
||||||
|
|
||||||
|
// Even though setting is true, provider doesn't support it, so includeUsage should be undefined
|
||||||
|
expect(config.options.includeUsage).toBeUndefined()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('uses includeUsage from settings for Copilot provider when set to false', () => {
|
||||||
|
mockGetState.mockReturnValue({
|
||||||
|
copilot: { defaultHeaders: {} },
|
||||||
|
settings: {
|
||||||
|
openAI: {
|
||||||
|
streamOptions: {
|
||||||
|
includeUsage: false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
const provider = createCopilotProvider()
|
||||||
|
const config = providerToAiSdkConfig(provider, createModel('gpt-4', 'GPT-4', 'copilot'))
|
||||||
|
|
||||||
|
expect(config.options.includeUsage).toBe(false)
|
||||||
|
expect(config.providerId).toBe('github-copilot-openai-compatible')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('uses includeUsage from settings for Copilot provider when set to true', () => {
|
||||||
|
mockGetState.mockReturnValue({
|
||||||
|
copilot: { defaultHeaders: {} },
|
||||||
|
settings: {
|
||||||
|
openAI: {
|
||||||
|
streamOptions: {
|
||||||
|
includeUsage: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
const provider = createCopilotProvider()
|
||||||
|
const config = providerToAiSdkConfig(provider, createModel('gpt-4', 'GPT-4', 'copilot'))
|
||||||
|
|
||||||
|
expect(config.options.includeUsage).toBe(true)
|
||||||
|
expect(config.providerId).toBe('github-copilot-openai-compatible')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('uses includeUsage from settings for Copilot provider when undefined', () => {
|
||||||
|
mockGetState.mockReturnValue({
|
||||||
|
copilot: { defaultHeaders: {} },
|
||||||
|
settings: {
|
||||||
|
openAI: {
|
||||||
|
streamOptions: {
|
||||||
|
includeUsage: undefined
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
const provider = createCopilotProvider()
|
||||||
|
const config = providerToAiSdkConfig(provider, createModel('gpt-4', 'GPT-4', 'copilot'))
|
||||||
|
|
||||||
|
expect(config.options.includeUsage).toBeUndefined()
|
||||||
|
expect(config.providerId).toBe('github-copilot-openai-compatible')
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Azure OpenAI traditional API routing', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
;(globalThis as any).window = {
|
||||||
|
...(globalThis as any).window,
|
||||||
|
keyv: createWindowKeyv()
|
||||||
|
}
|
||||||
|
mockGetState.mockReturnValue({
|
||||||
|
settings: {
|
||||||
|
openAI: {
|
||||||
|
streamOptions: {
|
||||||
|
includeUsage: undefined
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
vi.mocked(isAzureOpenAIProvider).mockImplementation((provider) => provider.type === 'azure-openai')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('uses deployment-based URLs when apiVersion is a date version', () => {
|
||||||
|
const provider = createAzureProvider('2024-02-15-preview')
|
||||||
|
const config = providerToAiSdkConfig(provider, createModel('gpt-4o', 'GPT-4o', provider.id))
|
||||||
|
|
||||||
|
expect(config.providerId).toBe('azure')
|
||||||
|
expect(config.options.apiVersion).toBe('2024-02-15-preview')
|
||||||
|
expect(config.options.useDeploymentBasedUrls).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('does not force deployment-based URLs for apiVersion v1/preview', () => {
|
||||||
|
const v1Provider = createAzureProvider('v1')
|
||||||
|
const v1Config = providerToAiSdkConfig(v1Provider, createModel('gpt-4o', 'GPT-4o', v1Provider.id))
|
||||||
|
expect(v1Config.providerId).toBe('azure-responses')
|
||||||
|
expect(v1Config.options.apiVersion).toBe('v1')
|
||||||
|
expect(v1Config.options.useDeploymentBasedUrls).toBeUndefined()
|
||||||
|
|
||||||
|
const previewProvider = createAzureProvider('preview')
|
||||||
|
const previewConfig = providerToAiSdkConfig(previewProvider, createModel('gpt-4o', 'GPT-4o', previewProvider.id))
|
||||||
|
expect(previewConfig.providerId).toBe('azure-responses')
|
||||||
|
expect(previewConfig.options.apiVersion).toBe('preview')
|
||||||
|
expect(previewConfig.options.useDeploymentBasedUrls).toBeUndefined()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user