mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2025-12-19 14:41:24 +08:00
Merge branch 'main' into main
This commit is contained in:
commit
efa3ed8d29
4
.github/workflows/auto-i18n.yml
vendored
4
.github/workflows/auto-i18n.yml
vendored
@ -23,7 +23,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: 🐈⬛ Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@ -54,7 +54,7 @@ jobs:
|
||||
yarn install
|
||||
|
||||
- name: 🏃♀️ Translate
|
||||
run: yarn sync:i18n && yarn auto:i18n
|
||||
run: yarn i18n:sync && yarn i18n:translate
|
||||
|
||||
- name: 🔍 Format
|
||||
run: yarn format
|
||||
|
||||
2
.github/workflows/claude-code-review.yml
vendored
2
.github/workflows/claude-code-review.yml
vendored
@ -27,7 +27,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
|
||||
2
.github/workflows/claude-translator.yml
vendored
2
.github/workflows/claude-translator.yml
vendored
@ -32,7 +32,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
|
||||
2
.github/workflows/claude.yml
vendored
2
.github/workflows/claude.yml
vendored
@ -37,7 +37,7 @@ jobs:
|
||||
actions: read # Required for Claude to read CI results on PRs
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
|
||||
6
.github/workflows/github-issue-tracker.yml
vendored
6
.github/workflows/github-issue-tracker.yml
vendored
@ -19,7 +19,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Check Beijing Time
|
||||
id: check_time
|
||||
@ -42,7 +42,7 @@ jobs:
|
||||
|
||||
- name: Add pending label if in quiet hours
|
||||
if: steps.check_time.outputs.should_delay == 'true'
|
||||
uses: actions/github-script@v7
|
||||
uses: actions/github-script@v8
|
||||
with:
|
||||
script: |
|
||||
github.rest.issues.addLabels({
|
||||
@ -118,7 +118,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v6
|
||||
|
||||
2
.github/workflows/nightly-build.yml
vendored
2
.github/workflows/nightly-build.yml
vendored
@ -51,7 +51,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out Git repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
ref: main
|
||||
|
||||
|
||||
4
.github/workflows/pr-ci.yml
vendored
4
.github/workflows/pr-ci.yml
vendored
@ -21,7 +21,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out Git repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Install Node.js
|
||||
uses: actions/setup-node@v6
|
||||
@ -58,7 +58,7 @@ jobs:
|
||||
run: yarn typecheck
|
||||
|
||||
- name: i18n Check
|
||||
run: yarn check:i18n
|
||||
run: yarn i18n:check
|
||||
|
||||
- name: Test
|
||||
run: yarn test
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@ -25,7 +25,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out Git repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
305
.github/workflows/sync-to-gitcode.yml
vendored
Normal file
305
.github/workflows/sync-to-gitcode.yml
vendored
Normal file
@ -0,0 +1,305 @@
|
||||
name: Sync Release to GitCode
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: 'Release tag (e.g. v1.0.0)'
|
||||
required: true
|
||||
clean:
|
||||
description: 'Clean node_modules before build'
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-and-sync-to-gitcode:
|
||||
runs-on: [self-hosted, windows-signing]
|
||||
steps:
|
||||
- name: Get tag name
|
||||
id: get-tag
|
||||
shell: bash
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
|
||||
echo "tag=${{ github.event.inputs.tag }}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "tag=${{ github.event.release.tag_name }}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Check out Git repository
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ steps.get-tag.outputs.tag }}
|
||||
|
||||
- name: Set package.json version
|
||||
shell: bash
|
||||
run: |
|
||||
TAG="${{ steps.get-tag.outputs.tag }}"
|
||||
VERSION="${TAG#v}"
|
||||
npm version "$VERSION" --no-git-tag-version --allow-same-version
|
||||
|
||||
- name: Install Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
- name: Install corepack
|
||||
shell: bash
|
||||
run: corepack enable && corepack prepare yarn@4.9.1 --activate
|
||||
|
||||
- name: Clean node_modules
|
||||
if: ${{ github.event.inputs.clean == 'true' }}
|
||||
shell: bash
|
||||
run: rm -rf node_modules
|
||||
|
||||
- name: Install Dependencies
|
||||
shell: bash
|
||||
run: yarn install
|
||||
|
||||
- name: Build Windows with code signing
|
||||
shell: bash
|
||||
run: yarn build:win
|
||||
env:
|
||||
WIN_SIGN: true
|
||||
CHERRY_CERT_PATH: ${{ secrets.CHERRY_CERT_PATH }}
|
||||
CHERRY_CERT_KEY: ${{ secrets.CHERRY_CERT_KEY }}
|
||||
CHERRY_CERT_CSP: ${{ secrets.CHERRY_CERT_CSP }}
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
NODE_OPTIONS: --max-old-space-size=8192
|
||||
MAIN_VITE_CHERRYAI_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYAI_CLIENT_SECRET }}
|
||||
MAIN_VITE_MINERU_API_KEY: ${{ secrets.MAIN_VITE_MINERU_API_KEY }}
|
||||
RENDERER_VITE_AIHUBMIX_SECRET: ${{ secrets.RENDERER_VITE_AIHUBMIX_SECRET }}
|
||||
RENDERER_VITE_PPIO_APP_SECRET: ${{ secrets.RENDERER_VITE_PPIO_APP_SECRET }}
|
||||
|
||||
- name: List built Windows artifacts
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Built Windows artifacts:"
|
||||
ls -la dist/*.exe dist/*.blockmap dist/latest*.yml
|
||||
|
||||
- name: Download GitHub release assets
|
||||
shell: bash
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
TAG_NAME: ${{ steps.get-tag.outputs.tag }}
|
||||
run: |
|
||||
echo "Downloading release assets for $TAG_NAME..."
|
||||
mkdir -p release-assets
|
||||
cd release-assets
|
||||
|
||||
# Download all assets from the release
|
||||
gh release download "$TAG_NAME" \
|
||||
--repo "${{ github.repository }}" \
|
||||
--pattern "*" \
|
||||
--skip-existing
|
||||
|
||||
echo "Downloaded GitHub release assets:"
|
||||
ls -la
|
||||
|
||||
- name: Replace Windows files with signed versions
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Replacing Windows files with signed versions..."
|
||||
|
||||
# Verify signed files exist first
|
||||
if ! ls dist/*.exe 1>/dev/null 2>&1; then
|
||||
echo "ERROR: No signed .exe files found in dist/"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Remove unsigned Windows files from downloaded assets
|
||||
# *.exe, *.exe.blockmap, latest.yml (Windows only)
|
||||
rm -f release-assets/*.exe release-assets/*.exe.blockmap release-assets/latest.yml 2>/dev/null || true
|
||||
|
||||
# Copy signed Windows files with error checking
|
||||
cp dist/*.exe release-assets/ || { echo "ERROR: Failed to copy .exe files"; exit 1; }
|
||||
cp dist/*.exe.blockmap release-assets/ || { echo "ERROR: Failed to copy .blockmap files"; exit 1; }
|
||||
cp dist/latest.yml release-assets/ || { echo "ERROR: Failed to copy latest.yml"; exit 1; }
|
||||
|
||||
echo "Final release assets:"
|
||||
ls -la release-assets/
|
||||
|
||||
- name: Get release info
|
||||
id: release-info
|
||||
shell: bash
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
TAG_NAME: ${{ steps.get-tag.outputs.tag }}
|
||||
LANG: C.UTF-8
|
||||
LC_ALL: C.UTF-8
|
||||
run: |
|
||||
# Always use gh cli to avoid special character issues
|
||||
RELEASE_NAME=$(gh release view "$TAG_NAME" --repo "${{ github.repository }}" --json name -q '.name')
|
||||
# Use delimiter to safely handle special characters in release name
|
||||
{
|
||||
echo 'name<<EOF'
|
||||
echo "$RELEASE_NAME"
|
||||
echo 'EOF'
|
||||
} >> $GITHUB_OUTPUT
|
||||
# Extract releaseNotes from electron-builder.yml (from releaseNotes: | to end of file, remove 4-space indent)
|
||||
sed -n '/releaseNotes: |/,$ { /releaseNotes: |/d; s/^ //; p }' electron-builder.yml > release_body.txt
|
||||
|
||||
- name: Create GitCode release and upload files
|
||||
shell: bash
|
||||
env:
|
||||
GITCODE_TOKEN: ${{ secrets.GITCODE_TOKEN }}
|
||||
GITCODE_OWNER: ${{ vars.GITCODE_OWNER }}
|
||||
GITCODE_REPO: ${{ vars.GITCODE_REPO }}
|
||||
GITCODE_API_URL: ${{ vars.GITCODE_API_URL }}
|
||||
TAG_NAME: ${{ steps.get-tag.outputs.tag }}
|
||||
RELEASE_NAME: ${{ steps.release-info.outputs.name }}
|
||||
LANG: C.UTF-8
|
||||
LC_ALL: C.UTF-8
|
||||
run: |
|
||||
# Validate required environment variables
|
||||
if [ -z "$GITCODE_TOKEN" ]; then
|
||||
echo "ERROR: GITCODE_TOKEN is not set"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$GITCODE_OWNER" ]; then
|
||||
echo "ERROR: GITCODE_OWNER is not set"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$GITCODE_REPO" ]; then
|
||||
echo "ERROR: GITCODE_REPO is not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
API_URL="${GITCODE_API_URL:-https://api.gitcode.com/api/v5}"
|
||||
|
||||
echo "Creating GitCode release..."
|
||||
echo "Tag: $TAG_NAME"
|
||||
echo "Repo: $GITCODE_OWNER/$GITCODE_REPO"
|
||||
|
||||
# Step 1: Create release
|
||||
# Use --rawfile to read body directly from file, avoiding shell variable encoding issues
|
||||
jq -n \
|
||||
--arg tag "$TAG_NAME" \
|
||||
--arg name "$RELEASE_NAME" \
|
||||
--rawfile body release_body.txt \
|
||||
'{
|
||||
tag_name: $tag,
|
||||
name: $name,
|
||||
body: $body,
|
||||
target_commitish: "main"
|
||||
}' > /tmp/release_payload.json
|
||||
|
||||
RELEASE_RESPONSE=$(curl -s -w "\n%{http_code}" -X POST \
|
||||
--connect-timeout 30 --max-time 60 \
|
||||
"${API_URL}/repos/${GITCODE_OWNER}/${GITCODE_REPO}/releases" \
|
||||
-H "Content-Type: application/json; charset=utf-8" \
|
||||
-H "Authorization: Bearer ${GITCODE_TOKEN}" \
|
||||
--data-binary "@/tmp/release_payload.json")
|
||||
|
||||
HTTP_CODE=$(echo "$RELEASE_RESPONSE" | tail -n1)
|
||||
RESPONSE_BODY=$(echo "$RELEASE_RESPONSE" | sed '$d')
|
||||
|
||||
if [ "$HTTP_CODE" -ge 200 ] && [ "$HTTP_CODE" -lt 300 ]; then
|
||||
echo "Release created successfully"
|
||||
else
|
||||
echo "Warning: Release creation returned HTTP $HTTP_CODE"
|
||||
echo "$RESPONSE_BODY"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Step 2: Upload files to release
|
||||
echo "Uploading files to GitCode release..."
|
||||
|
||||
# Function to upload a single file with retry
|
||||
upload_file() {
|
||||
local file="$1"
|
||||
local filename=$(basename "$file")
|
||||
local max_retries=3
|
||||
local retry=0
|
||||
local curl_status=0
|
||||
|
||||
echo "Uploading: $filename"
|
||||
|
||||
# URL encode the filename
|
||||
encoded_filename=$(printf '%s' "$filename" | jq -sRr @uri)
|
||||
|
||||
while [ $retry -lt $max_retries ]; do
|
||||
# Get upload URL
|
||||
curl_status=0
|
||||
UPLOAD_INFO=$(curl -s --connect-timeout 30 --max-time 60 \
|
||||
-H "Authorization: Bearer ${GITCODE_TOKEN}" \
|
||||
"${API_URL}/repos/${GITCODE_OWNER}/${GITCODE_REPO}/releases/${TAG_NAME}/upload_url?file_name=${encoded_filename}") || curl_status=$?
|
||||
|
||||
if [ $curl_status -eq 0 ]; then
|
||||
UPLOAD_URL=$(echo "$UPLOAD_INFO" | jq -r '.url // empty')
|
||||
|
||||
if [ -n "$UPLOAD_URL" ]; then
|
||||
# Write headers to temp file to avoid shell escaping issues
|
||||
echo "$UPLOAD_INFO" | jq -r '.headers | to_entries[] | "header = \"" + .key + ": " + .value + "\""' > /tmp/upload_headers.txt
|
||||
|
||||
# Upload file using PUT with headers from file
|
||||
curl_status=0
|
||||
UPLOAD_RESPONSE=$(curl -s -w "\n%{http_code}" -X PUT \
|
||||
-K /tmp/upload_headers.txt \
|
||||
--data-binary "@${file}" \
|
||||
"$UPLOAD_URL") || curl_status=$?
|
||||
|
||||
if [ $curl_status -eq 0 ]; then
|
||||
HTTP_CODE=$(echo "$UPLOAD_RESPONSE" | tail -n1)
|
||||
RESPONSE_BODY=$(echo "$UPLOAD_RESPONSE" | sed '$d')
|
||||
|
||||
if [ "$HTTP_CODE" -ge 200 ] && [ "$HTTP_CODE" -lt 300 ]; then
|
||||
echo " Uploaded: $filename"
|
||||
return 0
|
||||
else
|
||||
echo " Failed (HTTP $HTTP_CODE), retry $((retry + 1))/$max_retries"
|
||||
echo " Response: $RESPONSE_BODY"
|
||||
fi
|
||||
else
|
||||
echo " Upload request failed (curl exit $curl_status), retry $((retry + 1))/$max_retries"
|
||||
fi
|
||||
else
|
||||
echo " Failed to get upload URL, retry $((retry + 1))/$max_retries"
|
||||
echo " Response: $UPLOAD_INFO"
|
||||
fi
|
||||
else
|
||||
echo " Failed to get upload URL (curl exit $curl_status), retry $((retry + 1))/$max_retries"
|
||||
echo " Response: $UPLOAD_INFO"
|
||||
fi
|
||||
|
||||
retry=$((retry + 1))
|
||||
[ $retry -lt $max_retries ] && sleep 3
|
||||
done
|
||||
|
||||
echo " Failed: $filename after $max_retries retries"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Upload non-yml/json files first
|
||||
for file in release-assets/*; do
|
||||
if [ -f "$file" ]; then
|
||||
filename=$(basename "$file")
|
||||
if [[ ! "$filename" =~ \.(yml|yaml|json)$ ]]; then
|
||||
upload_file "$file"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
# Upload yml/json files last
|
||||
for file in release-assets/*; do
|
||||
if [ -f "$file" ]; then
|
||||
filename=$(basename "$file")
|
||||
if [[ "$filename" =~ \.(yml|yaml|json)$ ]]; then
|
||||
upload_file "$file"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
echo "GitCode release sync completed!"
|
||||
|
||||
- name: Cleanup temp files
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
rm -f /tmp/release_payload.json /tmp/upload_headers.txt release_body.txt
|
||||
rm -rf release-assets/
|
||||
36
.github/workflows/update-app-upgrade-config.yml
vendored
36
.github/workflows/update-app-upgrade-config.yml
vendored
@ -19,10 +19,9 @@ on:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
propose-update:
|
||||
update-config:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name == 'workflow_dispatch' || (github.event_name == 'release' && github.event.release.draft == false)
|
||||
|
||||
@ -135,7 +134,7 @@ jobs:
|
||||
|
||||
- name: Checkout default branch
|
||||
if: steps.check.outputs.should_run == 'true'
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
ref: ${{ github.event.repository.default_branch }}
|
||||
path: main
|
||||
@ -143,7 +142,7 @@ jobs:
|
||||
|
||||
- name: Checkout x-files/app-upgrade-config branch
|
||||
if: steps.check.outputs.should_run == 'true'
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
ref: x-files/app-upgrade-config
|
||||
path: cs
|
||||
@ -187,25 +186,20 @@ jobs:
|
||||
echo "changed=true" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Create pull request
|
||||
- name: Commit and push changes
|
||||
if: steps.check.outputs.should_run == 'true' && steps.diff.outputs.changed == 'true'
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
path: cs
|
||||
base: x-files/app-upgrade-config
|
||||
branch: chore/update-app-upgrade-config/${{ steps.meta.outputs.safe_tag }}
|
||||
commit-message: "🤖 chore: sync app-upgrade-config for ${{ steps.meta.outputs.tag }}"
|
||||
title: "chore: update app-upgrade-config for ${{ steps.meta.outputs.tag }}"
|
||||
body: |
|
||||
Automated update triggered by `${{ steps.meta.outputs.trigger }}`.
|
||||
working-directory: cs
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git add app-upgrade-config.json
|
||||
git commit -m "chore: sync app-upgrade-config for ${{ steps.meta.outputs.tag }}" -m "Automated update triggered by \`${{ steps.meta.outputs.trigger }}\`.
|
||||
|
||||
- Source tag: `${{ steps.meta.outputs.tag }}`
|
||||
- Pre-release: `${{ steps.meta.outputs.prerelease }}`
|
||||
- Latest: `${{ steps.meta.outputs.latest }}`
|
||||
- Workflow run: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
labels: |
|
||||
automation
|
||||
app-upgrade
|
||||
- Source tag: \`${{ steps.meta.outputs.tag }}\`
|
||||
- Pre-release: \`${{ steps.meta.outputs.prerelease }}\`
|
||||
- Latest: \`${{ steps.meta.outputs.latest }}\`
|
||||
- Workflow run: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
git push origin x-files/app-upgrade-config
|
||||
|
||||
- name: No changes detected
|
||||
if: steps.check.outputs.should_run == 'true' && steps.diff.outputs.changed != 'true'
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
diff --git a/dist/index.js b/dist/index.js
|
||||
index 51ce7e423934fb717cb90245cdfcdb3dae6780e6..0f7f7009e2f41a79a8669d38c8a44867bbff5e1f 100644
|
||||
index d004b415c5841a1969705823614f395265ea5a8a..6b1e0dad4610b0424393ecc12e9114723bbe316b 100644
|
||||
--- a/dist/index.js
|
||||
+++ b/dist/index.js
|
||||
@@ -474,7 +474,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
|
||||
@ -12,7 +12,7 @@ index 51ce7e423934fb717cb90245cdfcdb3dae6780e6..0f7f7009e2f41a79a8669d38c8a44867
|
||||
|
||||
// src/google-generative-ai-options.ts
|
||||
diff --git a/dist/index.mjs b/dist/index.mjs
|
||||
index f4b77e35c0cbfece85a3ef0d4f4e67aa6dde6271..8d2fecf8155a226006a0bde72b00b6036d4014b6 100644
|
||||
index 1780dd2391b7f42224a0b8048c723d2f81222c44..1f12ed14399d6902107ce9b435d7d8e6cc61e06b 100644
|
||||
--- a/dist/index.mjs
|
||||
+++ b/dist/index.mjs
|
||||
@@ -480,7 +480,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
|
||||
@ -24,3 +24,14 @@ index f4b77e35c0cbfece85a3ef0d4f4e67aa6dde6271..8d2fecf8155a226006a0bde72b00b603
|
||||
}
|
||||
|
||||
// src/google-generative-ai-options.ts
|
||||
@@ -1909,8 +1909,7 @@ function createGoogleGenerativeAI(options = {}) {
|
||||
}
|
||||
var google = createGoogleGenerativeAI();
|
||||
export {
|
||||
- VERSION,
|
||||
createGoogleGenerativeAI,
|
||||
- google
|
||||
+ google, VERSION
|
||||
};
|
||||
//# sourceMappingURL=index.mjs.map
|
||||
\ No newline at end of file
|
||||
@ -1,8 +1,8 @@
|
||||
diff --git a/dist/index.js b/dist/index.js
|
||||
index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a70ea2b5a2 100644
|
||||
index 130094d194ea1e8e7d3027d07d82465741192124..4d13dcee8c962ca9ee8f1c3d748f8ffe6a3cfb47 100644
|
||||
--- a/dist/index.js
|
||||
+++ b/dist/index.js
|
||||
@@ -274,6 +274,7 @@ var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)(
|
||||
@@ -290,6 +290,7 @@ var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)(
|
||||
message: import_v42.z.object({
|
||||
role: import_v42.z.literal("assistant").nullish(),
|
||||
content: import_v42.z.string().nullish(),
|
||||
@ -10,7 +10,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7
|
||||
tool_calls: import_v42.z.array(
|
||||
import_v42.z.object({
|
||||
id: import_v42.z.string().nullish(),
|
||||
@@ -340,6 +341,7 @@ var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)(
|
||||
@@ -356,6 +357,7 @@ var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)(
|
||||
delta: import_v42.z.object({
|
||||
role: import_v42.z.enum(["assistant"]).nullish(),
|
||||
content: import_v42.z.string().nullish(),
|
||||
@ -18,7 +18,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7
|
||||
tool_calls: import_v42.z.array(
|
||||
import_v42.z.object({
|
||||
index: import_v42.z.number(),
|
||||
@@ -795,6 +797,13 @@ var OpenAIChatLanguageModel = class {
|
||||
@@ -814,6 +816,13 @@ var OpenAIChatLanguageModel = class {
|
||||
if (text != null && text.length > 0) {
|
||||
content.push({ type: "text", text });
|
||||
}
|
||||
@ -32,7 +32,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7
|
||||
for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
|
||||
content.push({
|
||||
type: "tool-call",
|
||||
@@ -876,6 +885,7 @@ var OpenAIChatLanguageModel = class {
|
||||
@@ -895,6 +904,7 @@ var OpenAIChatLanguageModel = class {
|
||||
};
|
||||
let metadataExtracted = false;
|
||||
let isActiveText = false;
|
||||
@ -40,7 +40,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7
|
||||
const providerMetadata = { openai: {} };
|
||||
return {
|
||||
stream: response.pipeThrough(
|
||||
@@ -933,6 +943,21 @@ var OpenAIChatLanguageModel = class {
|
||||
@@ -952,6 +962,21 @@ var OpenAIChatLanguageModel = class {
|
||||
return;
|
||||
}
|
||||
const delta = choice.delta;
|
||||
@ -62,7 +62,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7
|
||||
if (delta.content != null) {
|
||||
if (!isActiveText) {
|
||||
controller.enqueue({ type: "text-start", id: "0" });
|
||||
@@ -1045,6 +1070,9 @@ var OpenAIChatLanguageModel = class {
|
||||
@@ -1064,6 +1089,9 @@ var OpenAIChatLanguageModel = class {
|
||||
}
|
||||
},
|
||||
flush(controller) {
|
||||
@ -1,5 +1,5 @@
|
||||
diff --git a/sdk.mjs b/sdk.mjs
|
||||
index bf429a344b7d59f70aead16b639f949b07688a81..f77d50cc5d3fb04292cb3ac7fa7085d02dcc628f 100755
|
||||
index dea7766a3432a1e809f12d6daba4f2834a219689..e0b02ef73da177ba32b903887d7bbbeaa08cc6d3 100755
|
||||
--- a/sdk.mjs
|
||||
+++ b/sdk.mjs
|
||||
@@ -6250,7 +6250,7 @@ function createAbortController(maxListeners = DEFAULT_MAX_LISTENERS) {
|
||||
@ -11,7 +11,7 @@ index bf429a344b7d59f70aead16b639f949b07688a81..f77d50cc5d3fb04292cb3ac7fa7085d0
|
||||
import { createInterface } from "readline";
|
||||
|
||||
// ../src/utils/fsOperations.ts
|
||||
@@ -6619,18 +6619,11 @@ class ProcessTransport {
|
||||
@@ -6644,18 +6644,11 @@ class ProcessTransport {
|
||||
const errorMessage = isNativeBinary(pathToClaudeCodeExecutable) ? `Claude Code native binary not found at ${pathToClaudeCodeExecutable}. Please ensure Claude Code is installed via native installer or specify a valid path with options.pathToClaudeCodeExecutable.` : `Claude Code executable not found at ${pathToClaudeCodeExecutable}. Is options.pathToClaudeCodeExecutable set?`;
|
||||
throw new ReferenceError(errorMessage);
|
||||
}
|
||||
145
.yarn/patches/ollama-ai-provider-v2-npm-1.5.5-8bef249af9.patch
vendored
Normal file
145
.yarn/patches/ollama-ai-provider-v2-npm-1.5.5-8bef249af9.patch
vendored
Normal file
@ -0,0 +1,145 @@
|
||||
diff --git a/dist/index.d.ts b/dist/index.d.ts
|
||||
index 8dd9b498050dbecd8dd6b901acf1aa8ca38a49af..ed644349c9d38fe2a66b2fb44214f7c18eb97f89 100644
|
||||
--- a/dist/index.d.ts
|
||||
+++ b/dist/index.d.ts
|
||||
@@ -4,7 +4,7 @@ import { z } from 'zod/v4';
|
||||
|
||||
type OllamaChatModelId = "athene-v2" | "athene-v2:72b" | "aya-expanse" | "aya-expanse:8b" | "aya-expanse:32b" | "codegemma" | "codegemma:2b" | "codegemma:7b" | "codellama" | "codellama:7b" | "codellama:13b" | "codellama:34b" | "codellama:70b" | "codellama:code" | "codellama:python" | "command-r" | "command-r:35b" | "command-r-plus" | "command-r-plus:104b" | "command-r7b" | "command-r7b:7b" | "deepseek-r1" | "deepseek-r1:1.5b" | "deepseek-r1:7b" | "deepseek-r1:8b" | "deepseek-r1:14b" | "deepseek-r1:32b" | "deepseek-r1:70b" | "deepseek-r1:671b" | "deepseek-coder-v2" | "deepseek-coder-v2:16b" | "deepseek-coder-v2:236b" | "deepseek-v3" | "deepseek-v3:671b" | "devstral" | "devstral:24b" | "dolphin3" | "dolphin3:8b" | "exaone3.5" | "exaone3.5:2.4b" | "exaone3.5:7.8b" | "exaone3.5:32b" | "falcon2" | "falcon2:11b" | "falcon3" | "falcon3:1b" | "falcon3:3b" | "falcon3:7b" | "falcon3:10b" | "firefunction-v2" | "firefunction-v2:70b" | "gemma" | "gemma:2b" | "gemma:7b" | "gemma2" | "gemma2:2b" | "gemma2:9b" | "gemma2:27b" | "gemma3" | "gemma3:1b" | "gemma3:4b" | "gemma3:12b" | "gemma3:27b" | "granite3-dense" | "granite3-dense:2b" | "granite3-dense:8b" | "granite3-guardian" | "granite3-guardian:2b" | "granite3-guardian:8b" | "granite3-moe" | "granite3-moe:1b" | "granite3-moe:3b" | "granite3.1-dense" | "granite3.1-dense:2b" | "granite3.1-dense:8b" | "granite3.1-moe" | "granite3.1-moe:1b" | "granite3.1-moe:3b" | "llama2" | "llama2:7b" | "llama2:13b" | "llama2:70b" | "llama3" | "llama3:8b" | "llama3:70b" | "llama3-chatqa" | "llama3-chatqa:8b" | "llama3-chatqa:70b" | "llama3-gradient" | "llama3-gradient:8b" | "llama3-gradient:70b" | "llama3.1" | "llama3.1:8b" | "llama3.1:70b" | "llama3.1:405b" | "llama3.2" | "llama3.2:1b" | "llama3.2:3b" | "llama3.2-vision" | "llama3.2-vision:11b" | "llama3.2-vision:90b" | "llama3.3" | "llama3.3:70b" | "llama4" | "llama4:16x17b" | "llama4:128x17b" | "llama-guard3" | "llama-guard3:1b" | "llama-guard3:8b" | "llava" | "llava:7b" | "llava:13b" | "llava:34b" | "llava-llama3" | "llava-llama3:8b" | "llava-phi3" | "llava-phi3:3.8b" | "marco-o1" | "marco-o1:7b" | "mistral" | "mistral:7b" | "mistral-large" | "mistral-large:123b" | "mistral-nemo" | "mistral-nemo:12b" | "mistral-small" | "mistral-small:22b" | "mixtral" | "mixtral:8x7b" | "mixtral:8x22b" | "moondream" | "moondream:1.8b" | "openhermes" | "openhermes:v2.5" | "nemotron" | "nemotron:70b" | "nemotron-mini" | "nemotron-mini:4b" | "olmo" | "olmo:7b" | "olmo:13b" | "opencoder" | "opencoder:1.5b" | "opencoder:8b" | "phi3" | "phi3:3.8b" | "phi3:14b" | "phi3.5" | "phi3.5:3.8b" | "phi4" | "phi4:14b" | "qwen" | "qwen:7b" | "qwen:14b" | "qwen:32b" | "qwen:72b" | "qwen:110b" | "qwen2" | "qwen2:0.5b" | "qwen2:1.5b" | "qwen2:7b" | "qwen2:72b" | "qwen2.5" | "qwen2.5:0.5b" | "qwen2.5:1.5b" | "qwen2.5:3b" | "qwen2.5:7b" | "qwen2.5:14b" | "qwen2.5:32b" | "qwen2.5:72b" | "qwen2.5-coder" | "qwen2.5-coder:0.5b" | "qwen2.5-coder:1.5b" | "qwen2.5-coder:3b" | "qwen2.5-coder:7b" | "qwen2.5-coder:14b" | "qwen2.5-coder:32b" | "qwen3" | "qwen3:0.6b" | "qwen3:1.7b" | "qwen3:4b" | "qwen3:8b" | "qwen3:14b" | "qwen3:30b" | "qwen3:32b" | "qwen3:235b" | "qwq" | "qwq:32b" | "sailor2" | "sailor2:1b" | "sailor2:8b" | "sailor2:20b" | "shieldgemma" | "shieldgemma:2b" | "shieldgemma:9b" | "shieldgemma:27b" | "smallthinker" | "smallthinker:3b" | "smollm" | "smollm:135m" | "smollm:360m" | "smollm:1.7b" | "tinyllama" | "tinyllama:1.1b" | "tulu3" | "tulu3:8b" | "tulu3:70b" | (string & {});
|
||||
declare const ollamaProviderOptions: z.ZodObject<{
|
||||
- think: z.ZodOptional<z.ZodBoolean>;
|
||||
+ think: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodEnum<['low', 'medium', 'high']>]>>;
|
||||
options: z.ZodOptional<z.ZodObject<{
|
||||
num_ctx: z.ZodOptional<z.ZodNumber>;
|
||||
repeat_last_n: z.ZodOptional<z.ZodNumber>;
|
||||
@@ -27,9 +27,11 @@ interface OllamaCompletionSettings {
|
||||
* the model's thinking from the model's output. When disabled, the model will not think
|
||||
* and directly output the content.
|
||||
*
|
||||
+ * For gpt-oss models, you can also use 'low', 'medium', or 'high' to control the depth of thinking.
|
||||
+ *
|
||||
* Only supported by certain models like DeepSeek R1 and Qwen 3.
|
||||
*/
|
||||
- think?: boolean;
|
||||
+ think?: boolean | 'low' | 'medium' | 'high';
|
||||
/**
|
||||
* Echo back the prompt in addition to the completion.
|
||||
*/
|
||||
@@ -146,7 +148,7 @@ declare const ollamaEmbeddingProviderOptions: z.ZodObject<{
|
||||
type OllamaEmbeddingProviderOptions = z.infer<typeof ollamaEmbeddingProviderOptions>;
|
||||
|
||||
declare const ollamaCompletionProviderOptions: z.ZodObject<{
|
||||
- think: z.ZodOptional<z.ZodBoolean>;
|
||||
+ think: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodEnum<['low', 'medium', 'high']>]>>;
|
||||
user: z.ZodOptional<z.ZodString>;
|
||||
suffix: z.ZodOptional<z.ZodString>;
|
||||
echo: z.ZodOptional<z.ZodBoolean>;
|
||||
diff --git a/dist/index.js b/dist/index.js
|
||||
index 35b5142ce8476ce2549ed7c2ec48e7d8c46c90d9..2ef64dc9a4c2be043e6af608241a6a8309a5a69f 100644
|
||||
--- a/dist/index.js
|
||||
+++ b/dist/index.js
|
||||
@@ -158,7 +158,7 @@ function getResponseMetadata({
|
||||
|
||||
// src/completion/ollama-completion-language-model.ts
|
||||
var ollamaCompletionProviderOptions = import_v42.z.object({
|
||||
- think: import_v42.z.boolean().optional(),
|
||||
+ think: import_v42.z.union([import_v42.z.boolean(), import_v42.z.enum(['low', 'medium', 'high'])]).optional(),
|
||||
user: import_v42.z.string().optional(),
|
||||
suffix: import_v42.z.string().optional(),
|
||||
echo: import_v42.z.boolean().optional()
|
||||
@@ -662,7 +662,7 @@ function convertToOllamaChatMessages({
|
||||
const images = content.filter((part) => part.type === "file" && part.mediaType.startsWith("image/")).map((part) => part.data);
|
||||
messages.push({
|
||||
role: "user",
|
||||
- content: userText.length > 0 ? userText : [],
|
||||
+ content: userText.length > 0 ? userText : '',
|
||||
images: images.length > 0 ? images : void 0
|
||||
});
|
||||
break;
|
||||
@@ -813,9 +813,11 @@ var ollamaProviderOptions = import_v44.z.object({
|
||||
* the model's thinking from the model's output. When disabled, the model will not think
|
||||
* and directly output the content.
|
||||
*
|
||||
+ * For gpt-oss models, you can also use 'low', 'medium', or 'high' to control the depth of thinking.
|
||||
+ *
|
||||
* Only supported by certain models like DeepSeek R1 and Qwen 3.
|
||||
*/
|
||||
- think: import_v44.z.boolean().optional(),
|
||||
+ think: import_v44.z.union([import_v44.z.boolean(), import_v44.z.enum(['low', 'medium', 'high'])]).optional(),
|
||||
options: import_v44.z.object({
|
||||
num_ctx: import_v44.z.number().optional(),
|
||||
repeat_last_n: import_v44.z.number().optional(),
|
||||
@@ -929,14 +931,16 @@ var OllamaRequestBuilder = class {
|
||||
prompt,
|
||||
systemMessageMode: "system"
|
||||
}),
|
||||
- temperature,
|
||||
- top_p: topP,
|
||||
max_output_tokens: maxOutputTokens,
|
||||
...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
|
||||
format: responseFormat.schema != null ? responseFormat.schema : "json"
|
||||
},
|
||||
think: (_a = ollamaOptions == null ? void 0 : ollamaOptions.think) != null ? _a : false,
|
||||
- options: (_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : void 0
|
||||
+ options: {
|
||||
+ ...temperature !== void 0 && { temperature },
|
||||
+ ...topP !== void 0 && { top_p: topP },
|
||||
+ ...((_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : {})
|
||||
+ }
|
||||
};
|
||||
}
|
||||
};
|
||||
diff --git a/dist/index.mjs b/dist/index.mjs
|
||||
index e2a634a78d80ac9542f2cc4f96cf2291094b10cf..67b23efce3c1cf4f026693d3ff9246988a3ef26e 100644
|
||||
--- a/dist/index.mjs
|
||||
+++ b/dist/index.mjs
|
||||
@@ -144,7 +144,7 @@ function getResponseMetadata({
|
||||
|
||||
// src/completion/ollama-completion-language-model.ts
|
||||
var ollamaCompletionProviderOptions = z2.object({
|
||||
- think: z2.boolean().optional(),
|
||||
+ think: z2.union([z2.boolean(), z2.enum(['low', 'medium', 'high'])]).optional(),
|
||||
user: z2.string().optional(),
|
||||
suffix: z2.string().optional(),
|
||||
echo: z2.boolean().optional()
|
||||
@@ -662,7 +662,7 @@ function convertToOllamaChatMessages({
|
||||
const images = content.filter((part) => part.type === "file" && part.mediaType.startsWith("image/")).map((part) => part.data);
|
||||
messages.push({
|
||||
role: "user",
|
||||
- content: userText.length > 0 ? userText : [],
|
||||
+ content: userText.length > 0 ? userText : '',
|
||||
images: images.length > 0 ? images : void 0
|
||||
});
|
||||
break;
|
||||
@@ -815,9 +815,11 @@ var ollamaProviderOptions = z4.object({
|
||||
* the model's thinking from the model's output. When disabled, the model will not think
|
||||
* and directly output the content.
|
||||
*
|
||||
+ * For gpt-oss models, you can also use 'low', 'medium', or 'high' to control the depth of thinking.
|
||||
+ *
|
||||
* Only supported by certain models like DeepSeek R1 and Qwen 3.
|
||||
*/
|
||||
- think: z4.boolean().optional(),
|
||||
+ think: z4.union([z4.boolean(), z4.enum(['low', 'medium', 'high'])]).optional(),
|
||||
options: z4.object({
|
||||
num_ctx: z4.number().optional(),
|
||||
repeat_last_n: z4.number().optional(),
|
||||
@@ -931,14 +933,16 @@ var OllamaRequestBuilder = class {
|
||||
prompt,
|
||||
systemMessageMode: "system"
|
||||
}),
|
||||
- temperature,
|
||||
- top_p: topP,
|
||||
max_output_tokens: maxOutputTokens,
|
||||
...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
|
||||
format: responseFormat.schema != null ? responseFormat.schema : "json"
|
||||
},
|
||||
think: (_a = ollamaOptions == null ? void 0 : ollamaOptions.think) != null ? _a : false,
|
||||
- options: (_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : void 0
|
||||
+ options: {
|
||||
+ ...temperature !== void 0 && { temperature },
|
||||
+ ...topP !== void 0 && { top_p: topP },
|
||||
+ ...((_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : {})
|
||||
+ }
|
||||
};
|
||||
}
|
||||
};
|
||||
11
CLAUDE.md
11
CLAUDE.md
@ -28,7 +28,7 @@ When creating a Pull Request, you MUST:
|
||||
- **Development**: `yarn dev` - Runs Electron app in development mode with hot reload
|
||||
- **Debug**: `yarn debug` - Starts with debugging enabled, use `chrome://inspect` to attach debugger
|
||||
- **Build Check**: `yarn build:check` - **REQUIRED** before commits (lint + test + typecheck)
|
||||
- If having i18n sort issues, run `yarn sync:i18n` first to sync template
|
||||
- If having i18n sort issues, run `yarn i18n:sync` first to sync template
|
||||
- If having formatting issues, run `yarn format` first
|
||||
- **Test**: `yarn test` - Run all tests (Vitest) across main and renderer processes
|
||||
- **Single Test**:
|
||||
@ -40,20 +40,23 @@ When creating a Pull Request, you MUST:
|
||||
## Project Architecture
|
||||
|
||||
### Electron Structure
|
||||
|
||||
- **Main Process** (`src/main/`): Node.js backend with services (MCP, Knowledge, Storage, etc.)
|
||||
- **Renderer Process** (`src/renderer/`): React UI with Redux state management
|
||||
- **Preload Scripts** (`src/preload/`): Secure IPC bridge
|
||||
|
||||
### Key Components
|
||||
|
||||
- **AI Core** (`src/renderer/src/aiCore/`): Middleware pipeline for multiple AI providers.
|
||||
- **Services** (`src/main/services/`): MCPService, KnowledgeService, WindowService, etc.
|
||||
- **Build System**: Electron-Vite with experimental rolldown-vite, yarn workspaces.
|
||||
- **State Management**: Redux Toolkit (`src/renderer/src/store/`) for predictable state.
|
||||
|
||||
### Logging
|
||||
|
||||
```typescript
|
||||
import { loggerService } from '@logger'
|
||||
const logger = loggerService.withContext('moduleName')
|
||||
import { loggerService } from "@logger";
|
||||
const logger = loggerService.withContext("moduleName");
|
||||
// Renderer: loggerService.initWindowSource('windowName') first
|
||||
logger.info('message', CONTEXT)
|
||||
logger.info("message", CONTEXT);
|
||||
```
|
||||
|
||||
@ -23,7 +23,7 @@
|
||||
},
|
||||
"files": {
|
||||
"ignoreUnknown": false,
|
||||
"includes": ["**", "!**/.claude/**", "!**/.vscode/**"],
|
||||
"includes": ["**", "!**/.claude/**", "!**/.vscode/**", "!**/.conductor/**"],
|
||||
"maxSize": 2097152
|
||||
},
|
||||
"formatter": {
|
||||
|
||||
@ -12,8 +12,13 @@
|
||||
|
||||
; https://github.com/electron-userland/electron-builder/issues/1122
|
||||
!ifndef BUILD_UNINSTALLER
|
||||
; Check VC++ Redistributable based on architecture stored in $1
|
||||
Function checkVCRedist
|
||||
ReadRegDWORD $0 HKLM "SOFTWARE\Microsoft\VisualStudio\14.0\VC\Runtimes\x64" "Installed"
|
||||
${If} $1 == "arm64"
|
||||
ReadRegDWORD $0 HKLM "SOFTWARE\Microsoft\VisualStudio\14.0\VC\Runtimes\ARM64" "Installed"
|
||||
${Else}
|
||||
ReadRegDWORD $0 HKLM "SOFTWARE\Microsoft\VisualStudio\14.0\VC\Runtimes\x64" "Installed"
|
||||
${EndIf}
|
||||
FunctionEnd
|
||||
|
||||
Function checkArchitectureCompatibility
|
||||
@ -97,29 +102,47 @@
|
||||
|
||||
Call checkVCRedist
|
||||
${If} $0 != "1"
|
||||
MessageBox MB_YESNO "\
|
||||
NOTE: ${PRODUCT_NAME} requires $\r$\n\
|
||||
'Microsoft Visual C++ Redistributable'$\r$\n\
|
||||
to function properly.$\r$\n$\r$\n\
|
||||
Download and install now?" /SD IDYES IDYES InstallVCRedist IDNO DontInstall
|
||||
InstallVCRedist:
|
||||
inetc::get /CAPTION " " /BANNER "Downloading Microsoft Visual C++ Redistributable..." "https://aka.ms/vs/17/release/vc_redist.x64.exe" "$TEMP\vc_redist.x64.exe"
|
||||
ExecWait "$TEMP\vc_redist.x64.exe /install /norestart"
|
||||
;IfErrors InstallError ContinueInstall ; vc_redist exit code is unreliable :(
|
||||
Call checkVCRedist
|
||||
${If} $0 == "1"
|
||||
Goto ContinueInstall
|
||||
${EndIf}
|
||||
; VC++ is required - install automatically since declining would abort anyway
|
||||
; Select download URL based on system architecture (stored in $1)
|
||||
${If} $1 == "arm64"
|
||||
StrCpy $2 "https://aka.ms/vs/17/release/vc_redist.arm64.exe"
|
||||
StrCpy $3 "$TEMP\vc_redist.arm64.exe"
|
||||
${Else}
|
||||
StrCpy $2 "https://aka.ms/vs/17/release/vc_redist.x64.exe"
|
||||
StrCpy $3 "$TEMP\vc_redist.x64.exe"
|
||||
${EndIf}
|
||||
|
||||
;InstallError:
|
||||
MessageBox MB_ICONSTOP "\
|
||||
There was an unexpected error installing$\r$\n\
|
||||
Microsoft Visual C++ Redistributable.$\r$\n\
|
||||
The installation of ${PRODUCT_NAME} cannot continue."
|
||||
DontInstall:
|
||||
inetc::get /CAPTION " " /BANNER "Downloading Microsoft Visual C++ Redistributable..." \
|
||||
$2 $3 /END
|
||||
Pop $0 ; Get download status from inetc::get
|
||||
${If} $0 != "OK"
|
||||
MessageBox MB_ICONSTOP|MB_YESNO "\
|
||||
Failed to download Microsoft Visual C++ Redistributable.$\r$\n$\r$\n\
|
||||
Error: $0$\r$\n$\r$\n\
|
||||
Would you like to open the download page in your browser?$\r$\n\
|
||||
$2" IDYES openDownloadUrl IDNO skipDownloadUrl
|
||||
openDownloadUrl:
|
||||
ExecShell "open" $2
|
||||
skipDownloadUrl:
|
||||
Abort
|
||||
${EndIf}
|
||||
|
||||
ExecWait "$3 /install /quiet /norestart"
|
||||
; Note: vc_redist exit code is unreliable, verify via registry check instead
|
||||
|
||||
Call checkVCRedist
|
||||
${If} $0 != "1"
|
||||
MessageBox MB_ICONSTOP|MB_YESNO "\
|
||||
Microsoft Visual C++ Redistributable installation failed.$\r$\n$\r$\n\
|
||||
Would you like to open the download page in your browser?$\r$\n\
|
||||
$2$\r$\n$\r$\n\
|
||||
The installation of ${PRODUCT_NAME} cannot continue." IDYES openInstallUrl IDNO skipInstallUrl
|
||||
openInstallUrl:
|
||||
ExecShell "open" $2
|
||||
skipInstallUrl:
|
||||
Abort
|
||||
${EndIf}
|
||||
${EndIf}
|
||||
ContinueInstall:
|
||||
Pop $4
|
||||
Pop $3
|
||||
Pop $2
|
||||
|
||||
@ -71,7 +71,7 @@ Tools like i18n Ally cannot parse dynamic content within template strings, resul
|
||||
|
||||
```javascript
|
||||
// Not recommended - Plugin cannot resolve
|
||||
const message = t(`fruits.${fruit}`)
|
||||
const message = t(`fruits.${fruit}`);
|
||||
```
|
||||
|
||||
#### 2. **No Real-time Rendering in Editor**
|
||||
@ -91,14 +91,14 @@ For example:
|
||||
```ts
|
||||
// src/renderer/src/i18n/label.ts
|
||||
const themeModeKeyMap = {
|
||||
dark: 'settings.theme.dark',
|
||||
light: 'settings.theme.light',
|
||||
system: 'settings.theme.system'
|
||||
} as const
|
||||
dark: "settings.theme.dark",
|
||||
light: "settings.theme.light",
|
||||
system: "settings.theme.system",
|
||||
} as const;
|
||||
|
||||
export const getThemeModeLabel = (key: string): string => {
|
||||
return themeModeKeyMap[key] ? t(themeModeKeyMap[key]) : key
|
||||
}
|
||||
return themeModeKeyMap[key] ? t(themeModeKeyMap[key]) : key;
|
||||
};
|
||||
```
|
||||
|
||||
By avoiding template strings, you gain better developer experience, more reliable translation checks, and a more maintainable codebase.
|
||||
@ -107,7 +107,7 @@ By avoiding template strings, you gain better developer experience, more reliabl
|
||||
|
||||
The project includes several scripts to automate i18n-related tasks:
|
||||
|
||||
### `check:i18n` - Validate i18n Structure
|
||||
### `i18n:check` - Validate i18n Structure
|
||||
|
||||
This script checks:
|
||||
|
||||
@ -116,10 +116,10 @@ This script checks:
|
||||
- Whether keys are properly sorted
|
||||
|
||||
```bash
|
||||
yarn check:i18n
|
||||
yarn i18n:check
|
||||
```
|
||||
|
||||
### `sync:i18n` - Synchronize JSON Structure and Sort Order
|
||||
### `i18n:sync` - Synchronize JSON Structure and Sort Order
|
||||
|
||||
This script uses `zh-cn.json` as the source of truth to sync structure across all language files, including:
|
||||
|
||||
@ -128,14 +128,14 @@ This script uses `zh-cn.json` as the source of truth to sync structure across al
|
||||
3. Sorting keys automatically
|
||||
|
||||
```bash
|
||||
yarn sync:i18n
|
||||
yarn i18n:sync
|
||||
```
|
||||
|
||||
### `auto:i18n` - Automatically Translate Pending Texts
|
||||
### `i18n:translate` - Automatically Translate Pending Texts
|
||||
|
||||
This script fills in texts marked as `[to be translated]` using machine translation.
|
||||
|
||||
Typically, after adding new texts in `zh-cn.json`, run `sync:i18n`, then `auto:i18n` to complete translations.
|
||||
Typically, after adding new texts in `zh-cn.json`, run `i18n:sync`, then `i18n:translate` to complete translations.
|
||||
|
||||
Before using this script, set the required environment variables:
|
||||
|
||||
@ -148,30 +148,20 @@ MODEL="qwen-plus-latest"
|
||||
Alternatively, add these variables directly to your `.env` file.
|
||||
|
||||
```bash
|
||||
yarn auto:i18n
|
||||
```
|
||||
|
||||
### `update:i18n` - Object-level Translation Update
|
||||
|
||||
Updates translations in language files under `src/renderer/src/i18n/translate` at the object level, preserving existing translations and only updating new content.
|
||||
|
||||
**Not recommended** — prefer `auto:i18n` for translation tasks.
|
||||
|
||||
```bash
|
||||
yarn update:i18n
|
||||
yarn i18n:translate
|
||||
```
|
||||
|
||||
### Workflow
|
||||
|
||||
1. During development, first add the required text in `zh-cn.json`
|
||||
2. Confirm it displays correctly in the Chinese environment
|
||||
3. Run `yarn sync:i18n` to propagate the keys to other language files
|
||||
4. Run `yarn auto:i18n` to perform machine translation
|
||||
3. Run `yarn i18n:sync` to propagate the keys to other language files
|
||||
4. Run `yarn i18n:translate` to perform machine translation
|
||||
5. Grab a coffee and let the magic happen!
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Use Chinese as Source Language**: All development starts in Chinese, then translates to other languages.
|
||||
2. **Run Check Script Before Commit**: Use `yarn check:i18n` to catch i18n issues early.
|
||||
2. **Run Check Script Before Commit**: Use `yarn i18n:check` to catch i18n issues early.
|
||||
3. **Translate in Small Increments**: Avoid accumulating a large backlog of untranslated content.
|
||||
4. **Keep Keys Semantically Clear**: Keys should clearly express their purpose, e.g., `user.profile.avatar.upload.error`
|
||||
|
||||
@ -1,17 +1,17 @@
|
||||
# 如何优雅地做好 i18n
|
||||
|
||||
## 使用i18n ally插件提升开发体验
|
||||
## 使用 i18n ally 插件提升开发体验
|
||||
|
||||
i18n ally是一个强大的VSCode插件,它能在开发阶段提供实时反馈,帮助开发者更早发现文案缺失和错译问题。
|
||||
i18n ally 是一个强大的 VSCode 插件,它能在开发阶段提供实时反馈,帮助开发者更早发现文案缺失和错译问题。
|
||||
|
||||
项目中已经配置好了插件设置,直接安装即可。
|
||||
|
||||
### 开发时优势
|
||||
|
||||
- **实时预览**:翻译文案会直接显示在编辑器中
|
||||
- **错误检测**:自动追踪标记出缺失的翻译或未使用的key
|
||||
- **快速跳转**:可通过key直接跳转到定义处(Ctrl/Cmd + click)
|
||||
- **自动补全**:输入i18n key时提供自动补全建议
|
||||
- **错误检测**:自动追踪标记出缺失的翻译或未使用的 key
|
||||
- **快速跳转**:可通过 key 直接跳转到定义处(Ctrl/Cmd + click)
|
||||
- **自动补全**:输入 i18n key 时提供自动补全建议
|
||||
|
||||
### 效果展示
|
||||
|
||||
@ -23,9 +23,9 @@ i18n ally是一个强大的VSCode插件,它能在开发阶段提供实时反
|
||||
|
||||
## i18n 约定
|
||||
|
||||
### **绝对避免使用flat格式**
|
||||
### **绝对避免使用 flat 格式**
|
||||
|
||||
绝对避免使用flat格式,如`"add.button.tip": "添加"`。应采用清晰的嵌套结构:
|
||||
绝对避免使用 flat 格式,如`"add.button.tip": "添加"`。应采用清晰的嵌套结构:
|
||||
|
||||
```json
|
||||
// 错误示例 - flat结构
|
||||
@ -52,14 +52,14 @@ i18n ally是一个强大的VSCode插件,它能在开发阶段提供实时反
|
||||
#### 为什么要使用嵌套结构
|
||||
|
||||
1. **自然分组**:通过对象结构天然能将相关上下文的文案分到一个组别中
|
||||
2. **插件要求**:i18n ally 插件需要嵌套或flat格式其一的文件才能正常分析
|
||||
2. **插件要求**:i18n ally 插件需要嵌套或 flat 格式其一的文件才能正常分析
|
||||
|
||||
### **避免在`t()`中使用模板字符串**
|
||||
|
||||
**强烈建议避免使用模板字符串**进行动态插值。虽然模板字符串在JavaScript开发中非常方便,但在国际化场景下会带来一系列问题。
|
||||
**强烈建议避免使用模板字符串**进行动态插值。虽然模板字符串在 JavaScript 开发中非常方便,但在国际化场景下会带来一系列问题。
|
||||
|
||||
1. **插件无法跟踪**
|
||||
i18n ally等工具无法解析模板字符串中的动态内容,导致:
|
||||
i18n ally 等工具无法解析模板字符串中的动态内容,导致:
|
||||
|
||||
- 无法正确显示实时预览
|
||||
- 无法检测翻译缺失
|
||||
@ -67,11 +67,11 @@ i18n ally是一个强大的VSCode插件,它能在开发阶段提供实时反
|
||||
|
||||
```javascript
|
||||
// 不推荐 - 插件无法解析
|
||||
const message = t(`fruits.${fruit}`)
|
||||
const message = t(`fruits.${fruit}`);
|
||||
```
|
||||
|
||||
2. **编辑器无法实时渲染**
|
||||
在IDE中,模板字符串会显示为原始代码而非最终翻译结果,降低了开发体验。
|
||||
在 IDE 中,模板字符串会显示为原始代码而非最终翻译结果,降低了开发体验。
|
||||
|
||||
3. **更难以维护**
|
||||
由于插件无法跟踪这样的文案,编辑器中也无法渲染,开发者必须人工确认语言文件中是否存在相应的文案。
|
||||
@ -85,36 +85,36 @@ i18n ally是一个强大的VSCode插件,它能在开发阶段提供实时反
|
||||
```ts
|
||||
// src/renderer/src/i18n/label.ts
|
||||
const themeModeKeyMap = {
|
||||
dark: 'settings.theme.dark',
|
||||
light: 'settings.theme.light',
|
||||
system: 'settings.theme.system'
|
||||
} as const
|
||||
dark: "settings.theme.dark",
|
||||
light: "settings.theme.light",
|
||||
system: "settings.theme.system",
|
||||
} as const;
|
||||
|
||||
export const getThemeModeLabel = (key: string): string => {
|
||||
return themeModeKeyMap[key] ? t(themeModeKeyMap[key]) : key
|
||||
}
|
||||
return themeModeKeyMap[key] ? t(themeModeKeyMap[key]) : key;
|
||||
};
|
||||
```
|
||||
|
||||
通过避免模板字符串,可以获得更好的开发体验、更可靠的翻译检查以及更易维护的代码库。
|
||||
|
||||
## 自动化脚本
|
||||
|
||||
项目中有一系列脚本来自动化i18n相关任务:
|
||||
项目中有一系列脚本来自动化 i18n 相关任务:
|
||||
|
||||
### `check:i18n` - 检查i18n结构
|
||||
### `i18n:check` - 检查 i18n 结构
|
||||
|
||||
此脚本会检查:
|
||||
|
||||
- 所有语言文件是否为嵌套结构
|
||||
- 是否存在缺失的key
|
||||
- 是否存在多余的key
|
||||
- 是否存在缺失的 key
|
||||
- 是否存在多余的 key
|
||||
- 是否已经有序
|
||||
|
||||
```bash
|
||||
yarn check:i18n
|
||||
yarn i18n:check
|
||||
```
|
||||
|
||||
### `sync:i18n` - 同步json结构与排序
|
||||
### `i18n:sync` - 同步 json 结构与排序
|
||||
|
||||
此脚本以`zh-cn.json`文件为基准,将结构同步到其他语言文件,包括:
|
||||
|
||||
@ -123,14 +123,14 @@ yarn check:i18n
|
||||
3. 自动排序
|
||||
|
||||
```bash
|
||||
yarn sync:i18n
|
||||
yarn i18n:sync
|
||||
```
|
||||
|
||||
### `auto:i18n` - 自动翻译待翻译文本
|
||||
### `i18n:translate` - 自动翻译待翻译文本
|
||||
|
||||
次脚本自动将标记为待翻译的文本通过机器翻译填充。
|
||||
|
||||
通常,在`zh-cn.json`中添加所需文案后,执行`sync:i18n`即可自动完成翻译。
|
||||
通常,在`zh-cn.json`中添加所需文案后,执行`i18n:sync`即可自动完成翻译。
|
||||
|
||||
使用该脚本前,需要配置环境变量,例如:
|
||||
|
||||
@ -143,29 +143,19 @@ MODEL="qwen-plus-latest"
|
||||
你也可以通过直接编辑`.env`文件来添加环境变量。
|
||||
|
||||
```bash
|
||||
yarn auto:i18n
|
||||
```
|
||||
|
||||
### `update:i18n` - 对象级别翻译更新
|
||||
|
||||
对`src/renderer/src/i18n/translate`中的语言文件进行对象级别的翻译更新,保留已有翻译,只更新新增内容。
|
||||
|
||||
**不建议**使用该脚本,更推荐使用`auto:i18n`进行翻译。
|
||||
|
||||
```bash
|
||||
yarn update:i18n
|
||||
yarn i18n:translate
|
||||
```
|
||||
|
||||
### 工作流
|
||||
|
||||
1. 开发阶段,先在`zh-cn.json`中添加所需文案
|
||||
2. 确认在中文环境下显示无误后,使用`yarn sync:i18n`将文案同步到其他语言文件
|
||||
3. 使用`yarn auto:i18n`进行自动翻译
|
||||
2. 确认在中文环境下显示无误后,使用`yarn i18n:sync`将文案同步到其他语言文件
|
||||
3. 使用`yarn i18n:translate`进行自动翻译
|
||||
4. 喝杯咖啡,等翻译完成吧!
|
||||
|
||||
## 最佳实践
|
||||
|
||||
1. **以中文为源语言**:所有开发首先使用中文,再翻译为其他语言
|
||||
2. **提交前运行检查脚本**:使用`yarn check:i18n`检查i18n是否有问题
|
||||
2. **提交前运行检查脚本**:使用`yarn i18n:check`检查 i18n 是否有问题
|
||||
3. **小步提交翻译**:避免积累大量未翻译文本
|
||||
4. **保持key语义明确**:key应能清晰表达其用途,如`user.profile.avatar.upload.error`
|
||||
4. **保持 key 语义明确**:key 应能清晰表达其用途,如`user.profile.avatar.upload.error`
|
||||
|
||||
@ -134,108 +134,38 @@ artifactBuildCompleted: scripts/artifact-build-completed.js
|
||||
releaseInfo:
|
||||
releaseNotes: |
|
||||
<!--LANG:en-->
|
||||
A New Era of Intelligence with Cherry Studio 1.7.1
|
||||
Cherry Studio 1.7.5 - Filesystem MCP Overhaul & Topic Management
|
||||
|
||||
Today we're releasing Cherry Studio 1.7.1 — our most ambitious update yet, introducing Agent: autonomous AI that thinks, plans, and acts.
|
||||
This release features a completely rewritten filesystem MCP server, new batch topic management, and improved assistant management.
|
||||
|
||||
For years, AI assistants have been reactive — waiting for your commands, responding to your questions. With Agent, we're changing that. Now, AI can truly work alongside you: understanding complex goals, breaking them into steps, and executing them independently.
|
||||
✨ New Features
|
||||
- [MCP] Rewrite filesystem MCP server with improved tool set (glob, ls, grep, read, write, edit, delete)
|
||||
- [Topics] Add topic manage mode for batch delete and move operations with search functionality
|
||||
- [Assistants] Merge import/subscribe popups and add export to assistant management
|
||||
- [Knowledge] Use prompt injection for forced knowledge base search (faster response times)
|
||||
- [Settings] Add tool use mode setting (prompt/function) to default assistant settings
|
||||
|
||||
This is what we've been building toward. And it's just the beginning.
|
||||
|
||||
🤖 Meet Agent
|
||||
Imagine having a brilliant colleague who never sleeps. Give Agent a goal — write a report, analyze data, refactor code — and watch it work. It reasons through problems, breaks them into steps, calls the right tools, and adapts when things change.
|
||||
|
||||
- **Think → Plan → Act**: From goal to execution, fully autonomous
|
||||
- **Deep Reasoning**: Multi-turn thinking that solves real problems
|
||||
- **Tool Mastery**: File operations, web search, code execution, and more
|
||||
- **Skill Plugins**: Extend with custom commands and capabilities
|
||||
- **You Stay in Control**: Real-time approval for sensitive actions
|
||||
- **Full Visibility**: Every thought, every decision, fully transparent
|
||||
|
||||
🌐 Expanding Ecosystem
|
||||
- **New Providers**: HuggingFace, Mistral, CherryIN, AI Gateway, Intel OVMS, Didi MCP
|
||||
- **New Models**: Claude 4.5 Haiku, DeepSeek v3.2, GLM-4.6, Doubao, Ling series
|
||||
- **MCP Integration**: Alibaba Cloud, ModelScope, Higress, MCP.so, TokenFlux and more
|
||||
|
||||
📚 Smarter Knowledge Base
|
||||
- **OpenMinerU**: Self-hosted document processing
|
||||
- **Full-Text Search**: Find anything instantly across your notes
|
||||
- **Enhanced Tool Selection**: Smarter configuration for better AI assistance
|
||||
|
||||
📝 Notes, Reimagined
|
||||
- Full-text search with highlighted results
|
||||
- AI-powered smart rename
|
||||
- Export as image
|
||||
- Auto-wrap for tables
|
||||
|
||||
🖼️ Image & OCR
|
||||
- Intel OVMS painting capabilities
|
||||
- Intel OpenVINO NPU-accelerated OCR
|
||||
|
||||
🌍 Now in 10+ Languages
|
||||
- Added German support
|
||||
- Enhanced internationalization
|
||||
|
||||
⚡ Faster & More Polished
|
||||
- Electron 38 upgrade
|
||||
- New MCP management interface
|
||||
- Dozens of UI refinements
|
||||
|
||||
❤️ Fully Open Source
|
||||
Commercial restrictions removed. Cherry Studio now follows standard AGPL v3 — free for teams of any size.
|
||||
|
||||
The Agent Era is here. We can't wait to see what you'll create.
|
||||
🐛 Bug Fixes
|
||||
- [Model] Correct typo in Gemini 3 Pro Image Preview model name
|
||||
- [Installer] Auto-install VC++ Redistributable without user prompt
|
||||
- [Notes] Fix notes directory validation and default path reset for cross-platform restore
|
||||
- [OAuth] Bind OAuth callback server to localhost (127.0.0.1) for security
|
||||
|
||||
<!--LANG:zh-CN-->
|
||||
Cherry Studio 1.7.1:开启智能新纪元
|
||||
Cherry Studio 1.7.5 - 文件系统 MCP 重构与话题管理
|
||||
|
||||
今天,我们正式发布 Cherry Studio 1.7.1 —— 迄今最具雄心的版本,带来全新的 Agent:能够自主思考、规划和行动的 AI。
|
||||
本次更新完全重写了文件系统 MCP 服务器,新增批量话题管理功能,并改进了助手管理。
|
||||
|
||||
多年来,AI 助手一直是被动的——等待你的指令,回应你的问题。Agent 改变了这一切。现在,AI 能够真正与你并肩工作:理解复杂目标,将其拆解为步骤,并独立执行。
|
||||
✨ 新功能
|
||||
- [MCP] 重写文件系统 MCP 服务器,提供改进的工具集(glob、ls、grep、read、write、edit、delete)
|
||||
- [话题] 新增话题管理模式,支持批量删除和移动操作,带搜索功能
|
||||
- [助手] 合并导入/订阅弹窗,并在助手管理中添加导出功能
|
||||
- [知识库] 使用提示词注入进行强制知识库搜索(响应更快)
|
||||
- [设置] 在默认助手设置中添加工具使用模式设置(prompt/function)
|
||||
|
||||
这是我们一直在构建的未来。而这,仅仅是开始。
|
||||
|
||||
🤖 认识 Agent
|
||||
想象一位永不疲倦的得力伙伴。给 Agent 一个目标——撰写报告、分析数据、重构代码——然后看它工作。它会推理问题、拆解步骤、调用工具,并在情况变化时灵活应对。
|
||||
|
||||
- **思考 → 规划 → 行动**:从目标到执行,全程自主
|
||||
- **深度推理**:多轮思考,解决真实问题
|
||||
- **工具大师**:文件操作、网络搜索、代码执行,样样精通
|
||||
- **技能插件**:自定义命令,无限扩展
|
||||
- **你掌控全局**:敏感操作,实时审批
|
||||
- **完全透明**:每一步思考,每一个决策,清晰可见
|
||||
|
||||
🌐 生态持续壮大
|
||||
- **新增服务商**:Hugging Face、Mistral、Perplexity、SophNet、AI Gateway、Cerebras AI
|
||||
- **新增模型**:Gemini 3、Gemini 3 Pro(支持图像预览)、GPT-5.1、Claude Opus 4.5
|
||||
- **MCP 集成**:百炼、魔搭、Higress、MCP.so、TokenFlux 等平台
|
||||
|
||||
📚 更智能的知识库
|
||||
- **OpenMinerU**:本地自部署文档处理
|
||||
- **全文搜索**:笔记内容一搜即达
|
||||
- **增强工具选择**:更智能的配置,更好的 AI 协助
|
||||
|
||||
📝 笔记,焕然一新
|
||||
- 全文搜索,结果高亮
|
||||
- AI 智能重命名
|
||||
- 导出为图片
|
||||
- 表格自动换行
|
||||
|
||||
🖼️ 图像与 OCR
|
||||
- Intel OVMS 绘图能力
|
||||
- Intel OpenVINO NPU 加速 OCR
|
||||
|
||||
🌍 支持 10+ 种语言
|
||||
- 新增德语支持
|
||||
- 全面增强国际化
|
||||
|
||||
⚡ 更快、更精致
|
||||
- 升级 Electron 38
|
||||
- 新的 MCP 管理界面
|
||||
- 数十处 UI 细节打磨
|
||||
|
||||
❤️ 完全开源
|
||||
商用限制已移除。Cherry Studio 现遵循标准 AGPL v3 协议——任意规模团队均可自由使用。
|
||||
|
||||
Agent 纪元已至。期待你的创造。
|
||||
🐛 问题修复
|
||||
- [模型] 修正 Gemini 3 Pro Image Preview 模型名称的拼写错误
|
||||
- [安装程序] 自动安装 VC++ 运行库,无需用户确认
|
||||
- [笔记] 修复跨平台恢复场景下的笔记目录验证和默认路径重置逻辑
|
||||
- [OAuth] 将 OAuth 回调服务器绑定到 localhost (127.0.0.1) 以提高安全性
|
||||
<!--LANG:END-->
|
||||
|
||||
@ -61,6 +61,7 @@ export default defineConfig([
|
||||
'tests/**',
|
||||
'.yarn/**',
|
||||
'.gitignore',
|
||||
'.conductor/**',
|
||||
'scripts/cloudflare-worker.js',
|
||||
'src/main/integration/nutstore/sso/lib/**',
|
||||
'src/main/integration/cherryai/index.js',
|
||||
|
||||
29
package.json
29
package.json
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "CherryStudio",
|
||||
"version": "1.7.1",
|
||||
"version": "1.7.5",
|
||||
"private": true,
|
||||
"description": "A powerful AI assistant for producer.",
|
||||
"main": "./out/main/index.js",
|
||||
@ -53,10 +53,10 @@
|
||||
"typecheck": "concurrently -n \"node,web\" -c \"cyan,magenta\" \"npm run typecheck:node\" \"npm run typecheck:web\"",
|
||||
"typecheck:node": "tsgo --noEmit -p tsconfig.node.json --composite false",
|
||||
"typecheck:web": "tsgo --noEmit -p tsconfig.web.json --composite false",
|
||||
"check:i18n": "dotenv -e .env -- tsx scripts/check-i18n.ts",
|
||||
"sync:i18n": "dotenv -e .env -- tsx scripts/sync-i18n.ts",
|
||||
"update:i18n": "dotenv -e .env -- tsx scripts/update-i18n.ts",
|
||||
"auto:i18n": "dotenv -e .env -- tsx scripts/auto-translate-i18n.ts",
|
||||
"i18n:check": "dotenv -e .env -- tsx scripts/check-i18n.ts",
|
||||
"i18n:sync": "dotenv -e .env -- tsx scripts/sync-i18n.ts",
|
||||
"i18n:translate": "dotenv -e .env -- tsx scripts/auto-translate-i18n.ts",
|
||||
"i18n:all": "yarn i18n:check && yarn i18n:sync && yarn i18n:translate",
|
||||
"update:languages": "tsx scripts/update-languages.ts",
|
||||
"update:upgrade-config": "tsx scripts/update-app-upgrade-config.ts",
|
||||
"test": "vitest run --silent",
|
||||
@ -70,7 +70,7 @@
|
||||
"test:e2e": "yarn playwright test",
|
||||
"test:lint": "oxlint --deny-warnings && eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --cache",
|
||||
"test:scripts": "vitest scripts",
|
||||
"lint": "oxlint --fix && eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --fix --cache && yarn typecheck && yarn check:i18n && yarn format:check",
|
||||
"lint": "oxlint --fix && eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --fix --cache && yarn typecheck && yarn i18n:check && yarn format:check",
|
||||
"format": "biome format --write && biome lint --write",
|
||||
"format:check": "biome format && biome lint",
|
||||
"prepare": "git config blame.ignoreRevsFile .git-blame-ignore-revs && husky",
|
||||
@ -81,7 +81,7 @@
|
||||
"release:ai-sdk-provider": "yarn workspace @cherrystudio/ai-sdk-provider version patch --immediate && yarn workspace @cherrystudio/ai-sdk-provider build && yarn workspace @cherrystudio/ai-sdk-provider npm publish --access public"
|
||||
},
|
||||
"dependencies": {
|
||||
"@anthropic-ai/claude-agent-sdk": "patch:@anthropic-ai/claude-agent-sdk@npm%3A0.1.53#~/.yarn/patches/@anthropic-ai-claude-agent-sdk-npm-0.1.53-4b77f4cf29.patch",
|
||||
"@anthropic-ai/claude-agent-sdk": "patch:@anthropic-ai/claude-agent-sdk@npm%3A0.1.62#~/.yarn/patches/@anthropic-ai-claude-agent-sdk-npm-0.1.62-23ae56f8c8.patch",
|
||||
"@libsql/client": "0.14.0",
|
||||
"@libsql/win32-x64-msvc": "^0.4.7",
|
||||
"@napi-rs/system-ocr": "patch:@napi-rs/system-ocr@npm%3A1.0.2#~/.yarn/patches/@napi-rs-system-ocr-npm-1.0.2-59e7a78e8b.patch",
|
||||
@ -114,11 +114,11 @@
|
||||
"@ai-sdk/anthropic": "^2.0.49",
|
||||
"@ai-sdk/cerebras": "^1.0.31",
|
||||
"@ai-sdk/gateway": "^2.0.15",
|
||||
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.43#~/.yarn/patches/@ai-sdk-google-npm-2.0.43-689ed559b3.patch",
|
||||
"@ai-sdk/google-vertex": "^3.0.79",
|
||||
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.49#~/.yarn/patches/@ai-sdk-google-npm-2.0.49-84720f41bd.patch",
|
||||
"@ai-sdk/google-vertex": "^3.0.94",
|
||||
"@ai-sdk/huggingface": "^0.0.10",
|
||||
"@ai-sdk/mistral": "^2.0.24",
|
||||
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch",
|
||||
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch",
|
||||
"@ai-sdk/perplexity": "^2.0.20",
|
||||
"@ai-sdk/test-server": "^0.0.1",
|
||||
"@ant-design/v5-patch-for-react-19": "^1.0.3",
|
||||
@ -142,7 +142,7 @@
|
||||
"@cherrystudio/embedjs-ollama": "^0.1.31",
|
||||
"@cherrystudio/embedjs-openai": "^0.1.31",
|
||||
"@cherrystudio/extension-table-plus": "workspace:^",
|
||||
"@cherrystudio/openai": "^6.9.0",
|
||||
"@cherrystudio/openai": "^6.12.0",
|
||||
"@dnd-kit/core": "^6.3.1",
|
||||
"@dnd-kit/modifiers": "^9.0.0",
|
||||
"@dnd-kit/sortable": "^10.0.0",
|
||||
@ -318,7 +318,7 @@
|
||||
"motion": "^12.10.5",
|
||||
"notion-helper": "^1.3.22",
|
||||
"npx-scope-finder": "^1.2.0",
|
||||
"ollama-ai-provider-v2": "^1.5.5",
|
||||
"ollama-ai-provider-v2": "patch:ollama-ai-provider-v2@npm%3A1.5.5#~/.yarn/patches/ollama-ai-provider-v2-npm-1.5.5-8bef249af9.patch",
|
||||
"oxlint": "^1.22.0",
|
||||
"oxlint-tsgolint": "^0.2.0",
|
||||
"p-queue": "^8.1.0",
|
||||
@ -414,9 +414,10 @@
|
||||
"@langchain/openai@npm:>=0.1.0 <0.6.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||
"@langchain/openai@npm:^0.3.16": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||
"@langchain/openai@npm:>=0.2.0 <0.7.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||
"@ai-sdk/openai@npm:^2.0.42": "patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch",
|
||||
"@ai-sdk/openai@npm:^2.0.42": "patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch",
|
||||
"@ai-sdk/google@npm:^2.0.40": "patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch",
|
||||
"@ai-sdk/openai-compatible@npm:^1.0.27": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch"
|
||||
"@ai-sdk/openai-compatible@npm:^1.0.27": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch",
|
||||
"@ai-sdk/google@npm:2.0.49": "patch:@ai-sdk/google@npm%3A2.0.49#~/.yarn/patches/@ai-sdk-google-npm-2.0.49-84720f41bd.patch"
|
||||
},
|
||||
"packageManager": "yarn@4.9.1",
|
||||
"lint-staged": {
|
||||
|
||||
@ -40,7 +40,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@ai-sdk/anthropic": "^2.0.49",
|
||||
"@ai-sdk/azure": "^2.0.74",
|
||||
"@ai-sdk/azure": "^2.0.87",
|
||||
"@ai-sdk/deepseek": "^1.0.31",
|
||||
"@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch",
|
||||
"@ai-sdk/provider": "^2.0.0",
|
||||
|
||||
@ -62,7 +62,7 @@ export class StreamEventManager {
|
||||
const recursiveResult = await context.recursiveCall(recursiveParams)
|
||||
|
||||
if (recursiveResult && recursiveResult.fullStream) {
|
||||
await this.pipeRecursiveStream(controller, recursiveResult.fullStream, context)
|
||||
await this.pipeRecursiveStream(controller, recursiveResult.fullStream)
|
||||
} else {
|
||||
console.warn('[MCP Prompt] No fullstream found in recursive result:', recursiveResult)
|
||||
}
|
||||
@ -74,11 +74,7 @@ export class StreamEventManager {
|
||||
/**
|
||||
* 将递归流的数据传递到当前流
|
||||
*/
|
||||
private async pipeRecursiveStream(
|
||||
controller: StreamController,
|
||||
recursiveStream: ReadableStream,
|
||||
context?: AiRequestContext
|
||||
): Promise<void> {
|
||||
private async pipeRecursiveStream(controller: StreamController, recursiveStream: ReadableStream): Promise<void> {
|
||||
const reader = recursiveStream.getReader()
|
||||
try {
|
||||
while (true) {
|
||||
@ -86,18 +82,14 @@ export class StreamEventManager {
|
||||
if (done) {
|
||||
break
|
||||
}
|
||||
if (value.type === 'start') {
|
||||
continue
|
||||
}
|
||||
|
||||
if (value.type === 'finish') {
|
||||
// 迭代的流不发finish,但需要累加其 usage
|
||||
if (value.usage && context?.accumulatedUsage) {
|
||||
this.accumulateUsage(context.accumulatedUsage, value.usage)
|
||||
}
|
||||
break
|
||||
}
|
||||
// 对于 finish-step 类型,累加其 usage
|
||||
if (value.type === 'finish-step' && value.usage && context?.accumulatedUsage) {
|
||||
this.accumulateUsage(context.accumulatedUsage, value.usage)
|
||||
}
|
||||
// 将递归流的数据传递到当前流
|
||||
|
||||
controller.enqueue(value)
|
||||
}
|
||||
} finally {
|
||||
@ -135,10 +127,8 @@ export class StreamEventManager {
|
||||
// 构建新的对话消息
|
||||
const newMessages: ModelMessage[] = [
|
||||
...(context.originalParams.messages || []),
|
||||
{
|
||||
role: 'assistant',
|
||||
content: textBuffer
|
||||
},
|
||||
// 只有当 textBuffer 有内容时才添加 assistant 消息,避免空消息导致 API 错误
|
||||
...(textBuffer ? [{ role: 'assistant' as const, content: textBuffer }] : []),
|
||||
{
|
||||
role: 'user',
|
||||
content: toolResultsText
|
||||
@ -161,7 +151,7 @@ export class StreamEventManager {
|
||||
/**
|
||||
* 累加 usage 数据
|
||||
*/
|
||||
private accumulateUsage(target: any, source: any): void {
|
||||
accumulateUsage(target: any, source: any): void {
|
||||
if (!target || !source) return
|
||||
|
||||
// 累加各种 token 类型
|
||||
|
||||
@ -411,7 +411,10 @@ export const createPromptToolUsePlugin = (config: PromptToolUseConfig = {}) => {
|
||||
}
|
||||
}
|
||||
|
||||
// 如果没有执行工具调用,直接传递原始finish-step事件
|
||||
// 如果没有执行工具调用,累加 usage 后透传 finish-step 事件
|
||||
if (chunk.usage && context.accumulatedUsage) {
|
||||
streamEventManager.accumulateUsage(context.accumulatedUsage, chunk.usage)
|
||||
}
|
||||
controller.enqueue(chunk)
|
||||
|
||||
// 清理状态
|
||||
|
||||
@ -6,6 +6,7 @@ import { type Tool } from 'ai'
|
||||
|
||||
import { createOpenRouterOptions, createXaiOptions, mergeProviderOptions } from '../../../options'
|
||||
import type { ProviderOptionsMap } from '../../../options/types'
|
||||
import type { AiRequestContext } from '../../'
|
||||
import type { OpenRouterSearchConfig } from './openrouter'
|
||||
|
||||
/**
|
||||
@ -95,28 +96,84 @@ export type WebSearchToolInputSchema = {
|
||||
'openai-chat': InferToolInput<OpenAIChatWebSearchTool>
|
||||
}
|
||||
|
||||
export const switchWebSearchTool = (config: WebSearchPluginConfig, params: any) => {
|
||||
if (config.openai) {
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search = openai.tools.webSearch(config.openai)
|
||||
} else if (config['openai-chat']) {
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search_preview = openai.tools.webSearchPreview(config['openai-chat'])
|
||||
} else if (config.anthropic) {
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search = anthropic.tools.webSearch_20250305(config.anthropic)
|
||||
} else if (config.google) {
|
||||
// case 'google-vertex':
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search = google.tools.googleSearch(config.google || {})
|
||||
} else if (config.xai) {
|
||||
const searchOptions = createXaiOptions({
|
||||
searchParameters: { ...config.xai, mode: 'on' }
|
||||
})
|
||||
params.providerOptions = mergeProviderOptions(params.providerOptions, searchOptions)
|
||||
} else if (config.openrouter) {
|
||||
const searchOptions = createOpenRouterOptions(config.openrouter)
|
||||
params.providerOptions = mergeProviderOptions(params.providerOptions, searchOptions)
|
||||
/**
|
||||
* Helper function to ensure params.tools object exists
|
||||
*/
|
||||
const ensureToolsObject = (params: any) => {
|
||||
if (!params.tools) params.tools = {}
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to apply tool-based web search configuration
|
||||
*/
|
||||
const applyToolBasedSearch = (params: any, toolName: string, toolInstance: any) => {
|
||||
ensureToolsObject(params)
|
||||
params.tools[toolName] = toolInstance
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to apply provider options-based web search configuration
|
||||
*/
|
||||
const applyProviderOptionsSearch = (params: any, searchOptions: any) => {
|
||||
params.providerOptions = mergeProviderOptions(params.providerOptions, searchOptions)
|
||||
}
|
||||
|
||||
export const switchWebSearchTool = (config: WebSearchPluginConfig, params: any, context?: AiRequestContext) => {
|
||||
const providerId = context?.providerId
|
||||
|
||||
// Provider-specific configuration map
|
||||
const providerHandlers: Record<string, () => void> = {
|
||||
openai: () => {
|
||||
const cfg = config.openai ?? DEFAULT_WEB_SEARCH_CONFIG.openai
|
||||
applyToolBasedSearch(params, 'web_search', openai.tools.webSearch(cfg))
|
||||
},
|
||||
'openai-chat': () => {
|
||||
const cfg = (config['openai-chat'] ?? DEFAULT_WEB_SEARCH_CONFIG['openai-chat']) as OpenAISearchPreviewConfig
|
||||
applyToolBasedSearch(params, 'web_search_preview', openai.tools.webSearchPreview(cfg))
|
||||
},
|
||||
anthropic: () => {
|
||||
const cfg = config.anthropic ?? DEFAULT_WEB_SEARCH_CONFIG.anthropic
|
||||
applyToolBasedSearch(params, 'web_search', anthropic.tools.webSearch_20250305(cfg))
|
||||
},
|
||||
google: () => {
|
||||
const cfg = (config.google ?? DEFAULT_WEB_SEARCH_CONFIG.google) as GoogleSearchConfig
|
||||
applyToolBasedSearch(params, 'web_search', google.tools.googleSearch(cfg))
|
||||
},
|
||||
xai: () => {
|
||||
const cfg = config.xai ?? DEFAULT_WEB_SEARCH_CONFIG.xai
|
||||
const searchOptions = createXaiOptions({ searchParameters: { ...cfg, mode: 'on' } })
|
||||
applyProviderOptionsSearch(params, searchOptions)
|
||||
},
|
||||
openrouter: () => {
|
||||
const cfg = (config.openrouter ?? DEFAULT_WEB_SEARCH_CONFIG.openrouter) as OpenRouterSearchConfig
|
||||
const searchOptions = createOpenRouterOptions(cfg)
|
||||
applyProviderOptionsSearch(params, searchOptions)
|
||||
}
|
||||
}
|
||||
|
||||
// Try provider-specific handler first
|
||||
const handler = providerId && providerHandlers[providerId]
|
||||
if (handler) {
|
||||
handler()
|
||||
return params
|
||||
}
|
||||
|
||||
// Fallback: apply based on available config keys (prioritized order)
|
||||
const fallbackOrder: Array<keyof WebSearchPluginConfig> = [
|
||||
'openai',
|
||||
'openai-chat',
|
||||
'anthropic',
|
||||
'google',
|
||||
'xai',
|
||||
'openrouter'
|
||||
]
|
||||
|
||||
for (const key of fallbackOrder) {
|
||||
if (config[key]) {
|
||||
providerHandlers[key]()
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return params
|
||||
}
|
||||
|
||||
@ -17,8 +17,22 @@ export const webSearchPlugin = (config: WebSearchPluginConfig = DEFAULT_WEB_SEAR
|
||||
name: 'webSearch',
|
||||
enforce: 'pre',
|
||||
|
||||
transformParams: async (params: any) => {
|
||||
switchWebSearchTool(config, params)
|
||||
transformParams: async (params: any, context) => {
|
||||
let { providerId } = context
|
||||
|
||||
// For cherryin providers, extract the actual provider from the model's provider string
|
||||
// Expected format: "cherryin.{actualProvider}" (e.g., "cherryin.gemini")
|
||||
if (providerId === 'cherryin' || providerId === 'cherryin-chat') {
|
||||
const provider = params.model?.provider
|
||||
if (provider && typeof provider === 'string' && provider.includes('.')) {
|
||||
const extractedProviderId = provider.split('.')[1]
|
||||
if (extractedProviderId) {
|
||||
providerId = extractedProviderId
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switchWebSearchTool(config, params, { ...context, providerId })
|
||||
return params
|
||||
}
|
||||
})
|
||||
|
||||
@ -55,6 +55,8 @@ export enum IpcChannel {
|
||||
Webview_SetOpenLinkExternal = 'webview:set-open-link-external',
|
||||
Webview_SetSpellCheckEnabled = 'webview:set-spell-check-enabled',
|
||||
Webview_SearchHotkey = 'webview:search-hotkey',
|
||||
Webview_PrintToPDF = 'webview:print-to-pdf',
|
||||
Webview_SaveAsHTML = 'webview:save-as-html',
|
||||
|
||||
// Open
|
||||
Open_Path = 'open:path',
|
||||
@ -90,6 +92,8 @@ export enum IpcChannel {
|
||||
Mcp_AbortTool = 'mcp:abort-tool',
|
||||
Mcp_GetServerVersion = 'mcp:get-server-version',
|
||||
Mcp_Progress = 'mcp:progress',
|
||||
Mcp_GetServerLogs = 'mcp:get-server-logs',
|
||||
Mcp_ServerLog = 'mcp:server-log',
|
||||
// Python
|
||||
Python_Execute = 'python:execute',
|
||||
|
||||
@ -196,6 +200,9 @@ export enum IpcChannel {
|
||||
File_ValidateNotesDirectory = 'file:validateNotesDirectory',
|
||||
File_StartWatcher = 'file:startWatcher',
|
||||
File_StopWatcher = 'file:stopWatcher',
|
||||
File_PauseWatcher = 'file:pauseWatcher',
|
||||
File_ResumeWatcher = 'file:resumeWatcher',
|
||||
File_BatchUploadMarkdown = 'file:batchUploadMarkdown',
|
||||
File_ShowInFolder = 'file:showInFolder',
|
||||
|
||||
// file service
|
||||
@ -236,6 +243,9 @@ export enum IpcChannel {
|
||||
System_GetHostname = 'system:getHostname',
|
||||
System_GetCpuName = 'system:getCpuName',
|
||||
System_CheckGitBash = 'system:checkGitBash',
|
||||
System_GetGitBashPath = 'system:getGitBashPath',
|
||||
System_GetGitBashPathInfo = 'system:getGitBashPathInfo',
|
||||
System_SetGitBashPath = 'system:setGitBashPath',
|
||||
|
||||
// DevTools
|
||||
System_ToggleDevTools = 'system:toggleDevTools',
|
||||
@ -290,6 +300,8 @@ export enum IpcChannel {
|
||||
Selection_ActionWindowClose = 'selection:action-window-close',
|
||||
Selection_ActionWindowMinimize = 'selection:action-window-minimize',
|
||||
Selection_ActionWindowPin = 'selection:action-window-pin',
|
||||
// [Windows only] Electron bug workaround - can be removed once https://github.com/electron/electron/issues/48554 is fixed
|
||||
Selection_ActionWindowResize = 'selection:action-window-resize',
|
||||
Selection_ProcessAction = 'selection:process-action',
|
||||
Selection_UpdateActionData = 'selection:update-action-data',
|
||||
|
||||
|
||||
@ -88,16 +88,11 @@ export function getSdkClient(
|
||||
}
|
||||
})
|
||||
}
|
||||
let baseURL =
|
||||
const baseURL =
|
||||
provider.type === 'anthropic'
|
||||
? provider.apiHost
|
||||
: (provider.anthropicApiHost && provider.anthropicApiHost.trim()) || provider.apiHost
|
||||
|
||||
// Anthropic SDK automatically appends /v1 to all endpoints (like /v1/messages, /v1/models)
|
||||
// We need to strip api version from baseURL to avoid duplication (e.g., /v3/v1/models)
|
||||
// formatProviderApiHost adds /v1 for AI SDK compatibility, but Anthropic SDK needs it removed
|
||||
baseURL = baseURL.replace(/\/v\d+(?:alpha|beta)?(?=\/|$)/i, '')
|
||||
|
||||
logger.debug('Anthropic API baseURL', { baseURL, providerId: provider.id })
|
||||
|
||||
if (provider.id === 'aihubmix') {
|
||||
|
||||
@ -488,3 +488,11 @@ export const MACOS_TERMINALS_WITH_COMMANDS: TerminalConfigWithCommand[] = [
|
||||
|
||||
// resources/scripts should be maintained manually
|
||||
export const HOME_CHERRY_DIR = '.cherrystudio'
|
||||
|
||||
// Git Bash path configuration types
|
||||
export type GitBashPathSource = 'manual' | 'auto'
|
||||
|
||||
export interface GitBashPathInfo {
|
||||
path: string | null
|
||||
source: GitBashPathSource | null
|
||||
}
|
||||
|
||||
@ -10,7 +10,7 @@ export type LoaderReturn = {
|
||||
messageSource?: 'preprocess' | 'embedding' | 'validation'
|
||||
}
|
||||
|
||||
export type FileChangeEventType = 'add' | 'change' | 'unlink' | 'addDir' | 'unlinkDir'
|
||||
export type FileChangeEventType = 'add' | 'change' | 'unlink' | 'addDir' | 'unlinkDir' | 'refresh'
|
||||
|
||||
export type FileChangeEvent = {
|
||||
eventType: FileChangeEventType
|
||||
@ -23,6 +23,14 @@ export type MCPProgressEvent = {
|
||||
progress: number // 0-1 range
|
||||
}
|
||||
|
||||
export type MCPServerLogEntry = {
|
||||
timestamp: number
|
||||
level: 'debug' | 'info' | 'warn' | 'error' | 'stderr' | 'stdout'
|
||||
message: string
|
||||
data?: any
|
||||
source?: string
|
||||
}
|
||||
|
||||
export type WebviewKeyEvent = {
|
||||
webviewId: number
|
||||
key: string
|
||||
|
||||
@ -11,7 +11,7 @@ const OVMS_EX_URL = 'https://gitcode.com/gcw_ggDjjkY3/kjfile/releases/download/d
|
||||
|
||||
/**
|
||||
* error code:
|
||||
* 101: Unsupported CPU (not Intel Ultra)
|
||||
* 101: Unsupported CPU (not Intel)
|
||||
* 102: Unsupported platform (not Windows)
|
||||
* 103: Download failed
|
||||
* 104: Installation failed
|
||||
@ -213,8 +213,8 @@ async function installOvms() {
|
||||
console.log(`CPU Name: ${cpuName}`)
|
||||
|
||||
// Check if CPU name contains "Ultra"
|
||||
if (!cpuName.toLowerCase().includes('intel') || !cpuName.toLowerCase().includes('ultra')) {
|
||||
console.error('OVMS installation requires an Intel(R) Core(TM) Ultra CPU.')
|
||||
if (!cpuName.toLowerCase().includes('intel')) {
|
||||
console.error('OVMS installation requires an Intel CPU.')
|
||||
return 101
|
||||
}
|
||||
|
||||
|
||||
@ -50,7 +50,7 @@ Usage Instructions:
|
||||
- pt-pt (Portuguese)
|
||||
|
||||
Run Command:
|
||||
yarn auto:i18n
|
||||
yarn i18n:translate
|
||||
|
||||
Performance Optimization Recommendations:
|
||||
- For stable API services: MAX_CONCURRENT_TRANSLATIONS=8, TRANSLATION_DELAY_MS=50
|
||||
|
||||
@ -145,7 +145,7 @@ export function main() {
|
||||
console.log('i18n 检查已通过')
|
||||
} catch (e) {
|
||||
console.error(e)
|
||||
throw new Error(`检查未通过。尝试运行 yarn sync:i18n 以解决问题。`)
|
||||
throw new Error(`检查未通过。尝试运行 yarn i18n:sync 以解决问题。`)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -5,9 +5,17 @@ exports.default = async function (configuration) {
|
||||
const { path } = configuration
|
||||
if (configuration.path) {
|
||||
try {
|
||||
const certPath = process.env.CHERRY_CERT_PATH
|
||||
const keyContainer = process.env.CHERRY_CERT_KEY
|
||||
const csp = process.env.CHERRY_CERT_CSP
|
||||
|
||||
if (!certPath || !keyContainer || !csp) {
|
||||
throw new Error('CHERRY_CERT_PATH, CHERRY_CERT_KEY or CHERRY_CERT_CSP is not set')
|
||||
}
|
||||
|
||||
console.log('Start code signing...')
|
||||
console.log('Signing file:', path)
|
||||
const signCommand = `signtool sign /tr http://timestamp.comodoca.com /td sha256 /fd sha256 /a /v "${path}"`
|
||||
const signCommand = `signtool sign /tr http://timestamp.comodoca.com /td sha256 /fd sha256 /v /f "${certPath}" /csp "${csp}" /k "${keyContainer}" "${path}"`
|
||||
execSync(signCommand, { stdio: 'inherit' })
|
||||
console.log('Code signing completed')
|
||||
} catch (error) {
|
||||
|
||||
@ -19,8 +19,8 @@ import { agentService } from './services/agents'
|
||||
import { apiServerService } from './services/ApiServerService'
|
||||
import { appMenuService } from './services/AppMenuService'
|
||||
import { configManager } from './services/ConfigManager'
|
||||
import mcpService from './services/MCPService'
|
||||
import { nodeTraceService } from './services/NodeTraceService'
|
||||
import mcpService from './services/MCPService'
|
||||
import powerMonitorService from './services/PowerMonitorService'
|
||||
import {
|
||||
CHERRY_STUDIO_PROTOCOL,
|
||||
|
||||
@ -8,6 +8,14 @@ import { generateSignature } from '@main/integration/cherryai'
|
||||
import anthropicService from '@main/services/AnthropicService'
|
||||
import openAIService from '@main/services/OpenAIService'
|
||||
import { getBinaryPath, isBinaryExists, runInstallScript } from '@main/utils/process'
|
||||
import {
|
||||
autoDiscoverGitBash,
|
||||
getBinaryPath,
|
||||
getGitBashPathInfo,
|
||||
isBinaryExists,
|
||||
runInstallScript,
|
||||
validateGitBashPath
|
||||
} from '@main/utils/process'
|
||||
import { handleZoomFactor } from '@main/utils/zoom'
|
||||
import type { SpanEntity, TokenUsage } from '@mcp-trace/trace-core'
|
||||
import type { UpgradeChannel } from '@shared/config/constant'
|
||||
@ -36,7 +44,7 @@ import appService from './services/AppService'
|
||||
import AppUpdater from './services/AppUpdater'
|
||||
import BackupManager from './services/BackupManager'
|
||||
import { codeToolsService } from './services/CodeToolsService'
|
||||
import { configManager } from './services/ConfigManager'
|
||||
import { ConfigKeys, configManager } from './services/ConfigManager'
|
||||
import CopilotService from './services/CopilotService'
|
||||
import DxtService from './services/DxtService'
|
||||
import { ExportService } from './services/ExportService'
|
||||
@ -500,38 +508,60 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
}
|
||||
|
||||
try {
|
||||
// Check common Git Bash installation paths
|
||||
const commonPaths = [
|
||||
path.join(process.env.ProgramFiles || 'C:\\Program Files', 'Git', 'bin', 'bash.exe'),
|
||||
path.join(process.env['ProgramFiles(x86)'] || 'C:\\Program Files (x86)', 'Git', 'bin', 'bash.exe'),
|
||||
path.join(process.env.LOCALAPPDATA || '', 'Programs', 'Git', 'bin', 'bash.exe')
|
||||
]
|
||||
|
||||
// Check if any of the common paths exist
|
||||
for (const bashPath of commonPaths) {
|
||||
if (fs.existsSync(bashPath)) {
|
||||
logger.debug('Git Bash found', { path: bashPath })
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Check if git is in PATH
|
||||
const { execSync } = require('child_process')
|
||||
try {
|
||||
execSync('git --version', { stdio: 'ignore' })
|
||||
logger.debug('Git found in PATH')
|
||||
// Use autoDiscoverGitBash to handle auto-discovery and persistence
|
||||
const bashPath = autoDiscoverGitBash()
|
||||
if (bashPath) {
|
||||
logger.info('Git Bash is available', { path: bashPath })
|
||||
return true
|
||||
} catch {
|
||||
// Git not in PATH
|
||||
}
|
||||
|
||||
logger.debug('Git Bash not found on Windows system')
|
||||
logger.warn('Git Bash not found. Please install Git for Windows from https://git-scm.com/downloads/win')
|
||||
return false
|
||||
} catch (error) {
|
||||
logger.error('Error checking Git Bash', error as Error)
|
||||
logger.error('Unexpected error checking Git Bash', error as Error)
|
||||
return false
|
||||
}
|
||||
})
|
||||
|
||||
ipcMain.handle(IpcChannel.System_GetGitBashPath, () => {
|
||||
if (!isWin) {
|
||||
return null
|
||||
}
|
||||
|
||||
const customPath = configManager.get(ConfigKeys.GitBashPath) as string | undefined
|
||||
return customPath ?? null
|
||||
})
|
||||
|
||||
// Returns { path, source } where source is 'manual' | 'auto' | null
|
||||
ipcMain.handle(IpcChannel.System_GetGitBashPathInfo, () => {
|
||||
return getGitBashPathInfo()
|
||||
})
|
||||
|
||||
ipcMain.handle(IpcChannel.System_SetGitBashPath, (_, newPath: string | null) => {
|
||||
if (!isWin) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (!newPath) {
|
||||
// Clear manual setting and re-run auto-discovery
|
||||
configManager.set(ConfigKeys.GitBashPath, null)
|
||||
configManager.set(ConfigKeys.GitBashPathSource, null)
|
||||
// Re-run auto-discovery to restore auto-discovered path if available
|
||||
autoDiscoverGitBash()
|
||||
return true
|
||||
}
|
||||
|
||||
const validated = validateGitBashPath(newPath)
|
||||
if (!validated) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Set path with 'manual' source
|
||||
configManager.set(ConfigKeys.GitBashPath, validated)
|
||||
configManager.set(ConfigKeys.GitBashPathSource, 'manual')
|
||||
return true
|
||||
})
|
||||
|
||||
ipcMain.handle(IpcChannel.System_ToggleDevTools, (e) => {
|
||||
const win = BrowserWindow.fromWebContents(e.sender)
|
||||
win && win.webContents.toggleDevTools()
|
||||
@ -596,6 +626,9 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
ipcMain.handle(IpcChannel.File_ValidateNotesDirectory, fileManager.validateNotesDirectory.bind(fileManager))
|
||||
ipcMain.handle(IpcChannel.File_StartWatcher, fileManager.startFileWatcher.bind(fileManager))
|
||||
ipcMain.handle(IpcChannel.File_StopWatcher, fileManager.stopFileWatcher.bind(fileManager))
|
||||
ipcMain.handle(IpcChannel.File_PauseWatcher, fileManager.pauseFileWatcher.bind(fileManager))
|
||||
ipcMain.handle(IpcChannel.File_ResumeWatcher, fileManager.resumeFileWatcher.bind(fileManager))
|
||||
ipcMain.handle(IpcChannel.File_BatchUploadMarkdown, fileManager.batchUploadMarkdownFiles.bind(fileManager))
|
||||
ipcMain.handle(IpcChannel.File_ShowInFolder, fileManager.showInFolder.bind(fileManager))
|
||||
|
||||
// file service
|
||||
@ -781,6 +814,7 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
ipcMain.handle(IpcChannel.Mcp_CheckConnectivity, mcpService.checkMcpConnectivity)
|
||||
ipcMain.handle(IpcChannel.Mcp_AbortTool, mcpService.abortTool)
|
||||
ipcMain.handle(IpcChannel.Mcp_GetServerVersion, mcpService.getServerVersion)
|
||||
ipcMain.handle(IpcChannel.Mcp_GetServerLogs, mcpService.getServerLogs)
|
||||
|
||||
// DXT upload handler
|
||||
ipcMain.handle(IpcChannel.Mcp_UploadDxt, async (event, fileBuffer: ArrayBuffer, fileName: string) => {
|
||||
@ -859,6 +893,17 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
webview.session.setSpellCheckerEnabled(isEnable)
|
||||
})
|
||||
|
||||
// Webview print and save handlers
|
||||
ipcMain.handle(IpcChannel.Webview_PrintToPDF, async (_, webviewId: number) => {
|
||||
const { printWebviewToPDF } = await import('./services/WebviewService')
|
||||
return await printWebviewToPDF(webviewId)
|
||||
})
|
||||
|
||||
ipcMain.handle(IpcChannel.Webview_SaveAsHTML, async (_, webviewId: number) => {
|
||||
const { saveWebviewAsHTML } = await import('./services/WebviewService')
|
||||
return await saveWebviewAsHTML(webviewId)
|
||||
})
|
||||
|
||||
// store sync
|
||||
storeSyncService.registerIpcHandler()
|
||||
|
||||
|
||||
134
src/main/mcpServers/__tests__/browser.test.ts
Normal file
134
src/main/mcpServers/__tests__/browser.test.ts
Normal file
@ -0,0 +1,134 @@
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
|
||||
vi.mock('electron', () => {
|
||||
const sendCommand = vi.fn(async (command: string, params?: { expression?: string }) => {
|
||||
if (command === 'Runtime.evaluate') {
|
||||
if (params?.expression === 'document.documentElement.outerHTML') {
|
||||
return { result: { value: '<html><body><h1>Test</h1><p>Content</p></body></html>' } }
|
||||
}
|
||||
if (params?.expression === 'document.body.innerText') {
|
||||
return { result: { value: 'Test\nContent' } }
|
||||
}
|
||||
return { result: { value: 'ok' } }
|
||||
}
|
||||
return {}
|
||||
})
|
||||
|
||||
const debuggerObj = {
|
||||
isAttached: vi.fn(() => true),
|
||||
attach: vi.fn(),
|
||||
detach: vi.fn(),
|
||||
sendCommand
|
||||
}
|
||||
|
||||
const webContents = {
|
||||
debugger: debuggerObj,
|
||||
setUserAgent: vi.fn(),
|
||||
getURL: vi.fn(() => 'https://example.com/'),
|
||||
getTitle: vi.fn(async () => 'Example Title'),
|
||||
once: vi.fn(),
|
||||
removeListener: vi.fn(),
|
||||
on: vi.fn()
|
||||
}
|
||||
|
||||
const loadURL = vi.fn(async () => {})
|
||||
|
||||
const windows: any[] = []
|
||||
|
||||
class MockBrowserWindow {
|
||||
private destroyed = false
|
||||
public webContents = webContents
|
||||
public loadURL = loadURL
|
||||
public isDestroyed = vi.fn(() => this.destroyed)
|
||||
public close = vi.fn(() => {
|
||||
this.destroyed = true
|
||||
})
|
||||
public destroy = vi.fn(() => {
|
||||
this.destroyed = true
|
||||
})
|
||||
public on = vi.fn()
|
||||
|
||||
constructor() {
|
||||
windows.push(this)
|
||||
}
|
||||
}
|
||||
|
||||
const app = {
|
||||
isReady: vi.fn(() => true),
|
||||
whenReady: vi.fn(async () => {}),
|
||||
on: vi.fn()
|
||||
}
|
||||
|
||||
return {
|
||||
BrowserWindow: MockBrowserWindow as any,
|
||||
app,
|
||||
__mockDebugger: debuggerObj,
|
||||
__mockSendCommand: sendCommand,
|
||||
__mockLoadURL: loadURL,
|
||||
__mockWindows: windows
|
||||
}
|
||||
})
|
||||
|
||||
import * as electron from 'electron'
|
||||
const { __mockWindows } = electron as typeof electron & { __mockWindows: any[] }
|
||||
|
||||
import { CdpBrowserController } from '../browser'
|
||||
|
||||
describe('CdpBrowserController', () => {
|
||||
it('executes single-line code via Runtime.evaluate', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const result = await controller.execute('1+1')
|
||||
expect(result).toBe('ok')
|
||||
})
|
||||
|
||||
it('opens a URL (hidden) and returns current page info', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const result = await controller.open('https://foo.bar/', 5000, false)
|
||||
expect(result.currentUrl).toBe('https://example.com/')
|
||||
expect(result.title).toBe('Example Title')
|
||||
})
|
||||
|
||||
it('opens a URL (visible) when show=true', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const result = await controller.open('https://foo.bar/', 5000, true, 'session-a')
|
||||
expect(result.currentUrl).toBe('https://example.com/')
|
||||
expect(result.title).toBe('Example Title')
|
||||
})
|
||||
|
||||
it('reuses session for execute and supports multiline', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
await controller.open('https://foo.bar/', 5000, false, 'session-b')
|
||||
const result = await controller.execute('const a=1; const b=2; a+b;', 5000, 'session-b')
|
||||
expect(result).toBe('ok')
|
||||
})
|
||||
|
||||
it('evicts least recently used session when exceeding maxSessions', async () => {
|
||||
const controller = new CdpBrowserController({ maxSessions: 2, idleTimeoutMs: 1000 * 60 })
|
||||
await controller.open('https://foo.bar/', 5000, false, 's1')
|
||||
await controller.open('https://foo.bar/', 5000, false, 's2')
|
||||
await controller.open('https://foo.bar/', 5000, false, 's3')
|
||||
const destroyedCount = __mockWindows.filter(
|
||||
(w: any) => w.destroy.mock.calls.length > 0 || w.close.mock.calls.length > 0
|
||||
).length
|
||||
expect(destroyedCount).toBeGreaterThanOrEqual(1)
|
||||
})
|
||||
|
||||
it('fetches URL and returns html format', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const result = await controller.fetch('https://example.com/', 'html')
|
||||
expect(result).toBe('<html><body><h1>Test</h1><p>Content</p></body></html>')
|
||||
})
|
||||
|
||||
it('fetches URL and returns txt format', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const result = await controller.fetch('https://example.com/', 'txt')
|
||||
expect(result).toBe('Test\nContent')
|
||||
})
|
||||
|
||||
it('fetches URL and returns markdown format (default)', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const result = await controller.fetch('https://example.com/')
|
||||
expect(typeof result).toBe('string')
|
||||
expect(result).toContain('Test')
|
||||
})
|
||||
})
|
||||
307
src/main/mcpServers/browser/controller.ts
Normal file
307
src/main/mcpServers/browser/controller.ts
Normal file
@ -0,0 +1,307 @@
|
||||
import { app, BrowserWindow } from 'electron'
|
||||
import TurndownService from 'turndown'
|
||||
|
||||
import { logger, userAgent } from './types'
|
||||
|
||||
/**
|
||||
* Controller for managing browser windows via Chrome DevTools Protocol (CDP).
|
||||
* Supports multiple sessions with LRU eviction and idle timeout cleanup.
|
||||
*/
|
||||
export class CdpBrowserController {
|
||||
private windows: Map<string, { win: BrowserWindow; lastActive: number }> = new Map()
|
||||
private readonly maxSessions: number
|
||||
private readonly idleTimeoutMs: number
|
||||
|
||||
constructor(options?: { maxSessions?: number; idleTimeoutMs?: number }) {
|
||||
this.maxSessions = options?.maxSessions ?? 5
|
||||
this.idleTimeoutMs = options?.idleTimeoutMs ?? 5 * 60 * 1000
|
||||
}
|
||||
|
||||
private async ensureAppReady() {
|
||||
if (!app.isReady()) {
|
||||
await app.whenReady()
|
||||
}
|
||||
}
|
||||
|
||||
private touch(sessionId: string) {
|
||||
const entry = this.windows.get(sessionId)
|
||||
if (entry) entry.lastActive = Date.now()
|
||||
}
|
||||
|
||||
private closeWindow(win: BrowserWindow, sessionId: string) {
|
||||
try {
|
||||
if (!win.isDestroyed()) {
|
||||
if (win.webContents.debugger.isAttached()) {
|
||||
win.webContents.debugger.detach()
|
||||
}
|
||||
win.close()
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn('Error closing window', { error, sessionId })
|
||||
}
|
||||
}
|
||||
|
||||
private async ensureDebuggerAttached(dbg: Electron.Debugger, sessionId: string) {
|
||||
if (!dbg.isAttached()) {
|
||||
try {
|
||||
logger.info('Attaching debugger', { sessionId })
|
||||
dbg.attach('1.3')
|
||||
await dbg.sendCommand('Page.enable')
|
||||
await dbg.sendCommand('Runtime.enable')
|
||||
logger.info('Debugger attached and domains enabled')
|
||||
} catch (error) {
|
||||
logger.error('Failed to attach debugger', { error })
|
||||
throw error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private sweepIdle() {
|
||||
const now = Date.now()
|
||||
for (const [id, entry] of this.windows.entries()) {
|
||||
if (now - entry.lastActive > this.idleTimeoutMs) {
|
||||
this.closeWindow(entry.win, id)
|
||||
this.windows.delete(id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private evictIfNeeded(newSessionId: string) {
|
||||
if (this.windows.size < this.maxSessions) return
|
||||
let lruId: string | null = null
|
||||
let lruTime = Number.POSITIVE_INFINITY
|
||||
for (const [id, entry] of this.windows.entries()) {
|
||||
if (id === newSessionId) continue
|
||||
if (entry.lastActive < lruTime) {
|
||||
lruTime = entry.lastActive
|
||||
lruId = id
|
||||
}
|
||||
}
|
||||
if (lruId) {
|
||||
const entry = this.windows.get(lruId)
|
||||
if (entry) {
|
||||
this.closeWindow(entry.win, lruId)
|
||||
}
|
||||
this.windows.delete(lruId)
|
||||
logger.info('Evicted session to respect maxSessions', { evicted: lruId })
|
||||
}
|
||||
}
|
||||
|
||||
private async getWindow(sessionId = 'default', forceNew = false, show = false): Promise<BrowserWindow> {
|
||||
await this.ensureAppReady()
|
||||
|
||||
this.sweepIdle()
|
||||
|
||||
const existing = this.windows.get(sessionId)
|
||||
if (existing && !existing.win.isDestroyed() && !forceNew) {
|
||||
this.touch(sessionId)
|
||||
return existing.win
|
||||
}
|
||||
|
||||
if (existing && !existing.win.isDestroyed() && forceNew) {
|
||||
try {
|
||||
if (existing.win.webContents.debugger.isAttached()) {
|
||||
existing.win.webContents.debugger.detach()
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn('Error detaching debugger before recreate', { error, sessionId })
|
||||
}
|
||||
existing.win.destroy()
|
||||
this.windows.delete(sessionId)
|
||||
}
|
||||
|
||||
this.evictIfNeeded(sessionId)
|
||||
|
||||
const win = new BrowserWindow({
|
||||
show,
|
||||
webPreferences: {
|
||||
contextIsolation: true,
|
||||
sandbox: true,
|
||||
nodeIntegration: false,
|
||||
devTools: true
|
||||
}
|
||||
})
|
||||
|
||||
// Use a standard Chrome UA to avoid some anti-bot blocks
|
||||
win.webContents.setUserAgent(userAgent)
|
||||
|
||||
// Log navigation lifecycle to help diagnose slow loads
|
||||
win.webContents.on('did-start-loading', () => logger.info(`did-start-loading`, { sessionId }))
|
||||
win.webContents.on('dom-ready', () => logger.info(`dom-ready`, { sessionId }))
|
||||
win.webContents.on('did-finish-load', () => logger.info(`did-finish-load`, { sessionId }))
|
||||
win.webContents.on('did-fail-load', (_e, code, desc) => logger.warn('Navigation failed', { code, desc }))
|
||||
|
||||
win.on('closed', () => {
|
||||
this.windows.delete(sessionId)
|
||||
})
|
||||
|
||||
this.windows.set(sessionId, { win, lastActive: Date.now() })
|
||||
return win
|
||||
}
|
||||
|
||||
/**
|
||||
* Opens a URL in a browser window and waits for navigation to complete.
|
||||
* @param url - The URL to navigate to
|
||||
* @param timeout - Navigation timeout in milliseconds (default: 10000)
|
||||
* @param show - Whether to show the browser window (default: false)
|
||||
* @param sessionId - Session identifier for window reuse (default: 'default')
|
||||
* @returns Object containing the current URL and page title after navigation
|
||||
*/
|
||||
public async open(url: string, timeout = 10000, show = false, sessionId = 'default') {
|
||||
const win = await this.getWindow(sessionId, true, show)
|
||||
logger.info('Loading URL', { url, sessionId })
|
||||
const { webContents } = win
|
||||
this.touch(sessionId)
|
||||
|
||||
// Track resolution state to prevent multiple handlers from firing
|
||||
let resolved = false
|
||||
let onFinish: () => void
|
||||
let onDomReady: () => void
|
||||
let onFail: (_event: Electron.Event, code: number, desc: string) => void
|
||||
|
||||
// Define cleanup outside Promise to ensure it's callable in finally block,
|
||||
// preventing memory leaks when timeout occurs before navigation completes
|
||||
const cleanup = () => {
|
||||
webContents.removeListener('did-finish-load', onFinish)
|
||||
webContents.removeListener('did-fail-load', onFail)
|
||||
webContents.removeListener('dom-ready', onDomReady)
|
||||
}
|
||||
|
||||
const loadPromise = new Promise<void>((resolve, reject) => {
|
||||
onFinish = () => {
|
||||
if (resolved) return
|
||||
resolved = true
|
||||
cleanup()
|
||||
resolve()
|
||||
}
|
||||
onDomReady = () => {
|
||||
if (resolved) return
|
||||
resolved = true
|
||||
cleanup()
|
||||
resolve()
|
||||
}
|
||||
onFail = (_event: Electron.Event, code: number, desc: string) => {
|
||||
if (resolved) return
|
||||
resolved = true
|
||||
cleanup()
|
||||
reject(new Error(`Navigation failed (${code}): ${desc}`))
|
||||
}
|
||||
webContents.once('did-finish-load', onFinish)
|
||||
webContents.once('dom-ready', onDomReady)
|
||||
webContents.once('did-fail-load', onFail)
|
||||
})
|
||||
|
||||
const timeoutPromise = new Promise<void>((_, reject) => {
|
||||
setTimeout(() => reject(new Error('Navigation timed out')), timeout)
|
||||
})
|
||||
|
||||
try {
|
||||
await Promise.race([win.loadURL(url), loadPromise, timeoutPromise])
|
||||
} finally {
|
||||
// Always cleanup listeners to prevent memory leaks on timeout
|
||||
cleanup()
|
||||
}
|
||||
|
||||
const currentUrl = webContents.getURL()
|
||||
const title = await webContents.getTitle()
|
||||
return { currentUrl, title }
|
||||
}
|
||||
|
||||
public async execute(code: string, timeout = 5000, sessionId = 'default') {
|
||||
const win = await this.getWindow(sessionId)
|
||||
this.touch(sessionId)
|
||||
const dbg = win.webContents.debugger
|
||||
|
||||
await this.ensureDebuggerAttached(dbg, sessionId)
|
||||
|
||||
const evalPromise = dbg.sendCommand('Runtime.evaluate', {
|
||||
expression: code,
|
||||
awaitPromise: true,
|
||||
returnByValue: true
|
||||
})
|
||||
|
||||
const result = await Promise.race([
|
||||
evalPromise,
|
||||
new Promise((_, reject) => setTimeout(() => reject(new Error('Execution timed out')), timeout))
|
||||
])
|
||||
|
||||
const evalResult = result as any
|
||||
|
||||
if (evalResult?.exceptionDetails) {
|
||||
const message = evalResult.exceptionDetails.exception?.description || 'Unknown script error'
|
||||
logger.warn('Runtime.evaluate raised exception', { message })
|
||||
throw new Error(message)
|
||||
}
|
||||
|
||||
const value = evalResult?.result?.value ?? evalResult?.result?.description ?? null
|
||||
return value
|
||||
}
|
||||
|
||||
public async reset(sessionId?: string) {
|
||||
if (sessionId) {
|
||||
const entry = this.windows.get(sessionId)
|
||||
if (entry) {
|
||||
this.closeWindow(entry.win, sessionId)
|
||||
}
|
||||
this.windows.delete(sessionId)
|
||||
logger.info('Browser CDP context reset', { sessionId })
|
||||
return
|
||||
}
|
||||
|
||||
for (const [id, entry] of this.windows.entries()) {
|
||||
this.closeWindow(entry.win, id)
|
||||
this.windows.delete(id)
|
||||
}
|
||||
logger.info('Browser CDP context reset (all sessions)')
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetches a URL and returns content in the specified format.
|
||||
* @param url - The URL to fetch
|
||||
* @param format - Output format: 'html', 'txt', 'markdown', or 'json' (default: 'markdown')
|
||||
* @param timeout - Navigation timeout in milliseconds (default: 10000)
|
||||
* @param sessionId - Session identifier (default: 'default')
|
||||
* @returns Content in the requested format. For 'json', returns parsed object or { data: rawContent } if parsing fails
|
||||
*/
|
||||
public async fetch(
|
||||
url: string,
|
||||
format: 'html' | 'txt' | 'markdown' | 'json' = 'markdown',
|
||||
timeout = 10000,
|
||||
sessionId = 'default'
|
||||
) {
|
||||
await this.open(url, timeout, false, sessionId)
|
||||
|
||||
const win = await this.getWindow(sessionId)
|
||||
const dbg = win.webContents.debugger
|
||||
|
||||
await this.ensureDebuggerAttached(dbg, sessionId)
|
||||
|
||||
let expression: string
|
||||
if (format === 'json' || format === 'txt') {
|
||||
expression = 'document.body.innerText'
|
||||
} else {
|
||||
expression = 'document.documentElement.outerHTML'
|
||||
}
|
||||
|
||||
const result = (await dbg.sendCommand('Runtime.evaluate', {
|
||||
expression,
|
||||
returnByValue: true
|
||||
})) as { result?: { value?: string } }
|
||||
|
||||
const content = result?.result?.value ?? ''
|
||||
|
||||
if (format === 'markdown') {
|
||||
const turndownService = new TurndownService()
|
||||
return turndownService.turndown(content)
|
||||
}
|
||||
if (format === 'json') {
|
||||
// Attempt to parse as JSON; if content is not valid JSON, wrap it in a data object
|
||||
try {
|
||||
return JSON.parse(content)
|
||||
} catch {
|
||||
return { data: content }
|
||||
}
|
||||
}
|
||||
return content
|
||||
}
|
||||
}
|
||||
3
src/main/mcpServers/browser/index.ts
Normal file
3
src/main/mcpServers/browser/index.ts
Normal file
@ -0,0 +1,3 @@
|
||||
export { CdpBrowserController } from './controller'
|
||||
export { BrowserServer } from './server'
|
||||
export { BrowserServer as default } from './server'
|
||||
50
src/main/mcpServers/browser/server.ts
Normal file
50
src/main/mcpServers/browser/server.ts
Normal file
@ -0,0 +1,50 @@
|
||||
import type { Server } from '@modelcontextprotocol/sdk/server/index.js'
|
||||
import { Server as MCServer } from '@modelcontextprotocol/sdk/server/index.js'
|
||||
import { CallToolRequestSchema, ListToolsRequestSchema } from '@modelcontextprotocol/sdk/types.js'
|
||||
import { app } from 'electron'
|
||||
|
||||
import { CdpBrowserController } from './controller'
|
||||
import { toolDefinitions, toolHandlers } from './tools'
|
||||
|
||||
export class BrowserServer {
|
||||
public server: Server
|
||||
private controller = new CdpBrowserController()
|
||||
|
||||
constructor() {
|
||||
const server = new MCServer(
|
||||
{
|
||||
name: '@cherry/browser',
|
||||
version: '0.1.0'
|
||||
},
|
||||
{
|
||||
capabilities: {
|
||||
resources: {},
|
||||
tools: {}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
server.setRequestHandler(ListToolsRequestSchema, async () => {
|
||||
return {
|
||||
tools: toolDefinitions
|
||||
}
|
||||
})
|
||||
|
||||
server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
||||
const { name, arguments: args } = request.params
|
||||
const handler = toolHandlers[name]
|
||||
if (!handler) {
|
||||
throw new Error('Tool not found')
|
||||
}
|
||||
return handler(this.controller, args)
|
||||
})
|
||||
|
||||
app.on('before-quit', () => {
|
||||
void this.controller.reset()
|
||||
})
|
||||
|
||||
this.server = server
|
||||
}
|
||||
}
|
||||
|
||||
export default BrowserServer
|
||||
48
src/main/mcpServers/browser/tools/execute.ts
Normal file
48
src/main/mcpServers/browser/tools/execute.ts
Normal file
@ -0,0 +1,48 @@
|
||||
import * as z from 'zod'
|
||||
|
||||
import type { CdpBrowserController } from '../controller'
|
||||
import { errorResponse, successResponse } from './utils'
|
||||
|
||||
export const ExecuteSchema = z.object({
|
||||
code: z
|
||||
.string()
|
||||
.describe(
|
||||
'JavaScript evaluated via Chrome DevTools Runtime.evaluate. Keep it short; prefer one-line with semicolons for multiple statements.'
|
||||
),
|
||||
timeout: z.number().default(5000).describe('Timeout in milliseconds for code execution (default: 5000ms)'),
|
||||
sessionId: z.string().optional().describe('Session identifier to target a specific page (default: default)')
|
||||
})
|
||||
|
||||
export const executeToolDefinition = {
|
||||
name: 'execute',
|
||||
description:
|
||||
'Run JavaScript in the current page via Runtime.evaluate. Prefer short, single-line snippets; use semicolons for multiple statements.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
code: {
|
||||
type: 'string',
|
||||
description: 'One-line JS to evaluate in page context'
|
||||
},
|
||||
timeout: {
|
||||
type: 'number',
|
||||
description: 'Timeout in milliseconds (default 5000)'
|
||||
},
|
||||
sessionId: {
|
||||
type: 'string',
|
||||
description: 'Session identifier; targets a specific page (default: default)'
|
||||
}
|
||||
},
|
||||
required: ['code']
|
||||
}
|
||||
}
|
||||
|
||||
export async function handleExecute(controller: CdpBrowserController, args: unknown) {
|
||||
const { code, timeout, sessionId } = ExecuteSchema.parse(args)
|
||||
try {
|
||||
const value = await controller.execute(code, timeout, sessionId ?? 'default')
|
||||
return successResponse(typeof value === 'string' ? value : JSON.stringify(value))
|
||||
} catch (error) {
|
||||
return errorResponse(error as Error)
|
||||
}
|
||||
}
|
||||
49
src/main/mcpServers/browser/tools/fetch.ts
Normal file
49
src/main/mcpServers/browser/tools/fetch.ts
Normal file
@ -0,0 +1,49 @@
|
||||
import * as z from 'zod'
|
||||
|
||||
import type { CdpBrowserController } from '../controller'
|
||||
import { errorResponse, successResponse } from './utils'
|
||||
|
||||
export const FetchSchema = z.object({
|
||||
url: z.url().describe('URL to fetch'),
|
||||
format: z.enum(['html', 'txt', 'markdown', 'json']).default('markdown').describe('Output format (default: markdown)'),
|
||||
timeout: z.number().optional().describe('Timeout in milliseconds for navigation (default: 10000)'),
|
||||
sessionId: z.string().optional().describe('Session identifier (default: default)')
|
||||
})
|
||||
|
||||
export const fetchToolDefinition = {
|
||||
name: 'fetch',
|
||||
description: 'Fetch a URL using the browser and return content in specified format (html, txt, markdown, json)',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
url: {
|
||||
type: 'string',
|
||||
description: 'URL to fetch'
|
||||
},
|
||||
format: {
|
||||
type: 'string',
|
||||
enum: ['html', 'txt', 'markdown', 'json'],
|
||||
description: 'Output format (default: markdown)'
|
||||
},
|
||||
timeout: {
|
||||
type: 'number',
|
||||
description: 'Navigation timeout in milliseconds (default: 10000)'
|
||||
},
|
||||
sessionId: {
|
||||
type: 'string',
|
||||
description: 'Session identifier (default: default)'
|
||||
}
|
||||
},
|
||||
required: ['url']
|
||||
}
|
||||
}
|
||||
|
||||
export async function handleFetch(controller: CdpBrowserController, args: unknown) {
|
||||
const { url, format, timeout, sessionId } = FetchSchema.parse(args)
|
||||
try {
|
||||
const content = await controller.fetch(url, format, timeout ?? 10000, sessionId ?? 'default')
|
||||
return successResponse(typeof content === 'string' ? content : JSON.stringify(content))
|
||||
} catch (error) {
|
||||
return errorResponse(error as Error)
|
||||
}
|
||||
}
|
||||
25
src/main/mcpServers/browser/tools/index.ts
Normal file
25
src/main/mcpServers/browser/tools/index.ts
Normal file
@ -0,0 +1,25 @@
|
||||
export { ExecuteSchema, executeToolDefinition, handleExecute } from './execute'
|
||||
export { FetchSchema, fetchToolDefinition, handleFetch } from './fetch'
|
||||
export { handleOpen, OpenSchema, openToolDefinition } from './open'
|
||||
export { handleReset, resetToolDefinition } from './reset'
|
||||
|
||||
import type { CdpBrowserController } from '../controller'
|
||||
import { executeToolDefinition, handleExecute } from './execute'
|
||||
import { fetchToolDefinition, handleFetch } from './fetch'
|
||||
import { handleOpen, openToolDefinition } from './open'
|
||||
import { handleReset, resetToolDefinition } from './reset'
|
||||
|
||||
export const toolDefinitions = [openToolDefinition, executeToolDefinition, resetToolDefinition, fetchToolDefinition]
|
||||
|
||||
export const toolHandlers: Record<
|
||||
string,
|
||||
(
|
||||
controller: CdpBrowserController,
|
||||
args: unknown
|
||||
) => Promise<{ content: { type: string; text: string }[]; isError: boolean }>
|
||||
> = {
|
||||
open: handleOpen,
|
||||
execute: handleExecute,
|
||||
reset: handleReset,
|
||||
fetch: handleFetch
|
||||
}
|
||||
47
src/main/mcpServers/browser/tools/open.ts
Normal file
47
src/main/mcpServers/browser/tools/open.ts
Normal file
@ -0,0 +1,47 @@
|
||||
import * as z from 'zod'
|
||||
|
||||
import type { CdpBrowserController } from '../controller'
|
||||
import { successResponse } from './utils'
|
||||
|
||||
export const OpenSchema = z.object({
|
||||
url: z.url().describe('URL to open in the controlled Electron window'),
|
||||
timeout: z.number().optional().describe('Timeout in milliseconds for navigation (default: 10000)'),
|
||||
show: z.boolean().optional().describe('Whether to show the browser window (default: false)'),
|
||||
sessionId: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe('Session identifier; separate sessions keep separate pages (default: default)')
|
||||
})
|
||||
|
||||
export const openToolDefinition = {
|
||||
name: 'open',
|
||||
description: 'Open a URL in a hidden Electron window controlled via Chrome DevTools Protocol',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
url: {
|
||||
type: 'string',
|
||||
description: 'URL to load'
|
||||
},
|
||||
timeout: {
|
||||
type: 'number',
|
||||
description: 'Navigation timeout in milliseconds (default 10000)'
|
||||
},
|
||||
show: {
|
||||
type: 'boolean',
|
||||
description: 'Whether to show the browser window (default false)'
|
||||
},
|
||||
sessionId: {
|
||||
type: 'string',
|
||||
description: 'Session identifier; separate sessions keep separate pages (default: default)'
|
||||
}
|
||||
},
|
||||
required: ['url']
|
||||
}
|
||||
}
|
||||
|
||||
export async function handleOpen(controller: CdpBrowserController, args: unknown) {
|
||||
const { url, timeout, show, sessionId } = OpenSchema.parse(args)
|
||||
const res = await controller.open(url, timeout ?? 10000, show ?? false, sessionId ?? 'default')
|
||||
return successResponse(JSON.stringify(res))
|
||||
}
|
||||
34
src/main/mcpServers/browser/tools/reset.ts
Normal file
34
src/main/mcpServers/browser/tools/reset.ts
Normal file
@ -0,0 +1,34 @@
|
||||
import * as z from 'zod'
|
||||
|
||||
import type { CdpBrowserController } from '../controller'
|
||||
import { successResponse } from './utils'
|
||||
|
||||
/** Zod schema for validating reset tool arguments */
|
||||
export const ResetSchema = z.object({
|
||||
sessionId: z.string().optional().describe('Session identifier to reset; omit to reset all sessions')
|
||||
})
|
||||
|
||||
/** MCP tool definition for the reset tool */
|
||||
export const resetToolDefinition = {
|
||||
name: 'reset',
|
||||
description: 'Reset the controlled window and detach debugger',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
sessionId: {
|
||||
type: 'string',
|
||||
description: 'Session identifier to reset; omit to reset all sessions'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handler for the reset MCP tool.
|
||||
* Closes browser window(s) and detaches debugger for the specified session or all sessions.
|
||||
*/
|
||||
export async function handleReset(controller: CdpBrowserController, args: unknown) {
|
||||
const { sessionId } = ResetSchema.parse(args)
|
||||
await controller.reset(sessionId)
|
||||
return successResponse('reset')
|
||||
}
|
||||
13
src/main/mcpServers/browser/tools/utils.ts
Normal file
13
src/main/mcpServers/browser/tools/utils.ts
Normal file
@ -0,0 +1,13 @@
|
||||
export function successResponse(text: string) {
|
||||
return {
|
||||
content: [{ type: 'text', text }],
|
||||
isError: false
|
||||
}
|
||||
}
|
||||
|
||||
export function errorResponse(error: Error) {
|
||||
return {
|
||||
content: [{ type: 'text', text: error.message }],
|
||||
isError: true
|
||||
}
|
||||
}
|
||||
4
src/main/mcpServers/browser/types.ts
Normal file
4
src/main/mcpServers/browser/types.ts
Normal file
@ -0,0 +1,4 @@
|
||||
import { loggerService } from '@logger'
|
||||
|
||||
export const logger = loggerService.withContext('MCPBrowserCDP')
|
||||
export const userAgent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:145.0) Gecko/20100101 Firefox/145.0'
|
||||
@ -4,6 +4,7 @@ import type { BuiltinMCPServerName } from '@types'
|
||||
import { BuiltinMCPServerNames } from '@types'
|
||||
|
||||
import BraveSearchServer from './brave-search'
|
||||
import BrowserServer from './browser'
|
||||
import DiDiMcpServer from './didi-mcp'
|
||||
import DifyKnowledgeServer from './dify-knowledge'
|
||||
import FetchServer from './fetch'
|
||||
@ -35,7 +36,7 @@ export function createInMemoryMCPServer(
|
||||
return new FetchServer().server
|
||||
}
|
||||
case BuiltinMCPServerNames.filesystem: {
|
||||
return new FileSystemServer(args).server
|
||||
return new FileSystemServer(envs.WORKSPACE_ROOT).server
|
||||
}
|
||||
case BuiltinMCPServerNames.difyKnowledge: {
|
||||
const difyKey = envs.DIFY_KEY
|
||||
@ -48,6 +49,9 @@ export function createInMemoryMCPServer(
|
||||
const apiKey = envs.DIDI_API_KEY
|
||||
return new DiDiMcpServer(apiKey).server
|
||||
}
|
||||
case BuiltinMCPServerNames.browser: {
|
||||
return new BrowserServer().server
|
||||
}
|
||||
default:
|
||||
throw new Error(`Unknown in-memory MCP server: ${name}`)
|
||||
}
|
||||
|
||||
@ -1,652 +0,0 @@
|
||||
// port https://github.com/modelcontextprotocol/servers/blob/main/src/filesystem/index.ts
|
||||
|
||||
import { loggerService } from '@logger'
|
||||
import { Server } from '@modelcontextprotocol/sdk/server/index.js'
|
||||
import { CallToolRequestSchema, ListToolsRequestSchema } from '@modelcontextprotocol/sdk/types.js'
|
||||
import { createTwoFilesPatch } from 'diff'
|
||||
import fs from 'fs/promises'
|
||||
import { minimatch } from 'minimatch'
|
||||
import os from 'os'
|
||||
import path from 'path'
|
||||
import * as z from 'zod'
|
||||
|
||||
const logger = loggerService.withContext('MCP:FileSystemServer')
|
||||
|
||||
// Normalize all paths consistently
|
||||
function normalizePath(p: string): string {
|
||||
return path.normalize(p)
|
||||
}
|
||||
|
||||
function expandHome(filepath: string): string {
|
||||
if (filepath.startsWith('~/') || filepath === '~') {
|
||||
return path.join(os.homedir(), filepath.slice(1))
|
||||
}
|
||||
return filepath
|
||||
}
|
||||
|
||||
// Security utilities
|
||||
async function validatePath(allowedDirectories: string[], requestedPath: string): Promise<string> {
|
||||
const expandedPath = expandHome(requestedPath)
|
||||
const absolute = path.isAbsolute(expandedPath)
|
||||
? path.resolve(expandedPath)
|
||||
: path.resolve(process.cwd(), expandedPath)
|
||||
|
||||
const normalizedRequested = normalizePath(absolute)
|
||||
|
||||
// Check if path is within allowed directories
|
||||
const isAllowed = allowedDirectories.some((dir) => normalizedRequested.startsWith(dir))
|
||||
if (!isAllowed) {
|
||||
throw new Error(
|
||||
`Access denied - path outside allowed directories: ${absolute} not in ${allowedDirectories.join(', ')}`
|
||||
)
|
||||
}
|
||||
|
||||
// Handle symlinks by checking their real path
|
||||
try {
|
||||
const realPath = await fs.realpath(absolute)
|
||||
const normalizedReal = normalizePath(realPath)
|
||||
const isRealPathAllowed = allowedDirectories.some((dir) => normalizedReal.startsWith(dir))
|
||||
if (!isRealPathAllowed) {
|
||||
throw new Error('Access denied - symlink target outside allowed directories')
|
||||
}
|
||||
return realPath
|
||||
} catch (error) {
|
||||
// For new files that don't exist yet, verify parent directory
|
||||
const parentDir = path.dirname(absolute)
|
||||
try {
|
||||
const realParentPath = await fs.realpath(parentDir)
|
||||
const normalizedParent = normalizePath(realParentPath)
|
||||
const isParentAllowed = allowedDirectories.some((dir) => normalizedParent.startsWith(dir))
|
||||
if (!isParentAllowed) {
|
||||
throw new Error('Access denied - parent directory outside allowed directories')
|
||||
}
|
||||
return absolute
|
||||
} catch {
|
||||
throw new Error(`Parent directory does not exist: ${parentDir}`)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Schema definitions
|
||||
const ReadFileArgsSchema = z.object({
|
||||
path: z.string()
|
||||
})
|
||||
|
||||
const ReadMultipleFilesArgsSchema = z.object({
|
||||
paths: z.array(z.string())
|
||||
})
|
||||
|
||||
const WriteFileArgsSchema = z.object({
|
||||
path: z.string(),
|
||||
content: z.string()
|
||||
})
|
||||
|
||||
const EditOperation = z.object({
|
||||
oldText: z.string().describe('Text to search for - must match exactly'),
|
||||
newText: z.string().describe('Text to replace with')
|
||||
})
|
||||
|
||||
const EditFileArgsSchema = z.object({
|
||||
path: z.string(),
|
||||
edits: z.array(EditOperation),
|
||||
dryRun: z.boolean().default(false).describe('Preview changes using git-style diff format')
|
||||
})
|
||||
|
||||
const CreateDirectoryArgsSchema = z.object({
|
||||
path: z.string()
|
||||
})
|
||||
|
||||
const ListDirectoryArgsSchema = z.object({
|
||||
path: z.string()
|
||||
})
|
||||
|
||||
const DirectoryTreeArgsSchema = z.object({
|
||||
path: z.string()
|
||||
})
|
||||
|
||||
const MoveFileArgsSchema = z.object({
|
||||
source: z.string(),
|
||||
destination: z.string()
|
||||
})
|
||||
|
||||
const SearchFilesArgsSchema = z.object({
|
||||
path: z.string(),
|
||||
pattern: z.string(),
|
||||
excludePatterns: z.array(z.string()).optional().default([])
|
||||
})
|
||||
|
||||
const GetFileInfoArgsSchema = z.object({
|
||||
path: z.string()
|
||||
})
|
||||
|
||||
interface FileInfo {
|
||||
size: number
|
||||
created: Date
|
||||
modified: Date
|
||||
accessed: Date
|
||||
isDirectory: boolean
|
||||
isFile: boolean
|
||||
permissions: string
|
||||
}
|
||||
|
||||
// Tool implementations
|
||||
async function getFileStats(filePath: string): Promise<FileInfo> {
|
||||
const stats = await fs.stat(filePath)
|
||||
return {
|
||||
size: stats.size,
|
||||
created: stats.birthtime,
|
||||
modified: stats.mtime,
|
||||
accessed: stats.atime,
|
||||
isDirectory: stats.isDirectory(),
|
||||
isFile: stats.isFile(),
|
||||
permissions: stats.mode.toString(8).slice(-3)
|
||||
}
|
||||
}
|
||||
|
||||
async function searchFiles(
|
||||
allowedDirectories: string[],
|
||||
rootPath: string,
|
||||
pattern: string,
|
||||
excludePatterns: string[] = []
|
||||
): Promise<string[]> {
|
||||
const results: string[] = []
|
||||
|
||||
async function search(currentPath: string) {
|
||||
const entries = await fs.readdir(currentPath, { withFileTypes: true })
|
||||
|
||||
for (const entry of entries) {
|
||||
const fullPath = path.join(currentPath, entry.name)
|
||||
|
||||
try {
|
||||
// Validate each path before processing
|
||||
await validatePath(allowedDirectories, fullPath)
|
||||
|
||||
// Check if path matches any exclude pattern
|
||||
const relativePath = path.relative(rootPath, fullPath)
|
||||
const shouldExclude = excludePatterns.some((pattern) => {
|
||||
const globPattern = pattern.includes('*') ? pattern : `**/${pattern}/**`
|
||||
return minimatch(relativePath, globPattern, { dot: true })
|
||||
})
|
||||
|
||||
if (shouldExclude) {
|
||||
continue
|
||||
}
|
||||
|
||||
if (entry.name.toLowerCase().includes(pattern.toLowerCase())) {
|
||||
results.push(fullPath)
|
||||
}
|
||||
|
||||
if (entry.isDirectory()) {
|
||||
await search(fullPath)
|
||||
}
|
||||
} catch (error) {
|
||||
// Skip invalid paths during search
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
await search(rootPath)
|
||||
return results
|
||||
}
|
||||
|
||||
// file editing and diffing utilities
|
||||
function normalizeLineEndings(text: string): string {
|
||||
return text.replace(/\r\n/g, '\n')
|
||||
}
|
||||
|
||||
function createUnifiedDiff(originalContent: string, newContent: string, filepath: string = 'file'): string {
|
||||
// Ensure consistent line endings for diff
|
||||
const normalizedOriginal = normalizeLineEndings(originalContent)
|
||||
const normalizedNew = normalizeLineEndings(newContent)
|
||||
|
||||
return createTwoFilesPatch(filepath, filepath, normalizedOriginal, normalizedNew, 'original', 'modified')
|
||||
}
|
||||
|
||||
async function applyFileEdits(
|
||||
filePath: string,
|
||||
edits: Array<{ oldText: string; newText: string }>,
|
||||
dryRun = false
|
||||
): Promise<string> {
|
||||
// Read file content and normalize line endings
|
||||
const content = normalizeLineEndings(await fs.readFile(filePath, 'utf-8'))
|
||||
|
||||
// Apply edits sequentially
|
||||
let modifiedContent = content
|
||||
for (const edit of edits) {
|
||||
const normalizedOld = normalizeLineEndings(edit.oldText)
|
||||
const normalizedNew = normalizeLineEndings(edit.newText)
|
||||
|
||||
// If exact match exists, use it
|
||||
if (modifiedContent.includes(normalizedOld)) {
|
||||
modifiedContent = modifiedContent.replace(normalizedOld, normalizedNew)
|
||||
continue
|
||||
}
|
||||
|
||||
// Otherwise, try line-by-line matching with flexibility for whitespace
|
||||
const oldLines = normalizedOld.split('\n')
|
||||
const contentLines = modifiedContent.split('\n')
|
||||
let matchFound = false
|
||||
|
||||
for (let i = 0; i <= contentLines.length - oldLines.length; i++) {
|
||||
const potentialMatch = contentLines.slice(i, i + oldLines.length)
|
||||
|
||||
// Compare lines with normalized whitespace
|
||||
const isMatch = oldLines.every((oldLine, j) => {
|
||||
const contentLine = potentialMatch[j]
|
||||
return oldLine.trim() === contentLine.trim()
|
||||
})
|
||||
|
||||
if (isMatch) {
|
||||
// Preserve original indentation of first line
|
||||
const originalIndent = contentLines[i].match(/^\s*/)?.[0] || ''
|
||||
const newLines = normalizedNew.split('\n').map((line, j) => {
|
||||
if (j === 0) return originalIndent + line.trimStart()
|
||||
// For subsequent lines, try to preserve relative indentation
|
||||
const oldIndent = oldLines[j]?.match(/^\s*/)?.[0] || ''
|
||||
const newIndent = line.match(/^\s*/)?.[0] || ''
|
||||
if (oldIndent && newIndent) {
|
||||
const relativeIndent = newIndent.length - oldIndent.length
|
||||
return originalIndent + ' '.repeat(Math.max(0, relativeIndent)) + line.trimStart()
|
||||
}
|
||||
return line
|
||||
})
|
||||
|
||||
contentLines.splice(i, oldLines.length, ...newLines)
|
||||
modifiedContent = contentLines.join('\n')
|
||||
matchFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if (!matchFound) {
|
||||
throw new Error(`Could not find exact match for edit:\n${edit.oldText}`)
|
||||
}
|
||||
}
|
||||
|
||||
// Create unified diff
|
||||
const diff = createUnifiedDiff(content, modifiedContent, filePath)
|
||||
|
||||
// Format diff with appropriate number of backticks
|
||||
let numBackticks = 3
|
||||
while (diff.includes('`'.repeat(numBackticks))) {
|
||||
numBackticks++
|
||||
}
|
||||
const formattedDiff = `${'`'.repeat(numBackticks)}diff\n${diff}${'`'.repeat(numBackticks)}\n\n`
|
||||
|
||||
if (!dryRun) {
|
||||
await fs.writeFile(filePath, modifiedContent, 'utf-8')
|
||||
}
|
||||
|
||||
return formattedDiff
|
||||
}
|
||||
|
||||
class FileSystemServer {
|
||||
public server: Server
|
||||
private allowedDirectories: string[]
|
||||
constructor(allowedDirs: string[]) {
|
||||
if (!Array.isArray(allowedDirs) || allowedDirs.length === 0) {
|
||||
throw new Error('No allowed directories provided, please specify at least one directory in args')
|
||||
}
|
||||
|
||||
this.allowedDirectories = allowedDirs.map((dir) => normalizePath(path.resolve(expandHome(dir))))
|
||||
|
||||
// Validate that all directories exist and are accessible
|
||||
this.validateDirs().catch((error) => {
|
||||
logger.error('Error validating allowed directories:', error)
|
||||
throw new Error(`Error validating allowed directories: ${error}`)
|
||||
})
|
||||
|
||||
this.server = new Server(
|
||||
{
|
||||
name: 'secure-filesystem-server',
|
||||
version: '0.2.0'
|
||||
},
|
||||
{
|
||||
capabilities: {
|
||||
tools: {}
|
||||
}
|
||||
}
|
||||
)
|
||||
this.initialize()
|
||||
}
|
||||
|
||||
async validateDirs() {
|
||||
// Validate that all directories exist and are accessible
|
||||
await Promise.all(
|
||||
this.allowedDirectories.map(async (dir) => {
|
||||
try {
|
||||
const stats = await fs.stat(expandHome(dir))
|
||||
if (!stats.isDirectory()) {
|
||||
logger.error(`Error: ${dir} is not a directory`)
|
||||
throw new Error(`Error: ${dir} is not a directory`)
|
||||
}
|
||||
} catch (error: any) {
|
||||
logger.error(`Error accessing directory ${dir}:`, error)
|
||||
throw new Error(`Error accessing directory ${dir}:`, error)
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
initialize() {
|
||||
// Tool handlers
|
||||
this.server.setRequestHandler(ListToolsRequestSchema, async () => {
|
||||
return {
|
||||
tools: [
|
||||
{
|
||||
name: 'read_file',
|
||||
description:
|
||||
'Read the complete contents of a file from the file system. ' +
|
||||
'Handles various text encodings and provides detailed error messages ' +
|
||||
'if the file cannot be read. Use this tool when you need to examine ' +
|
||||
'the contents of a single file. Only works within allowed directories.',
|
||||
inputSchema: z.toJSONSchema(ReadFileArgsSchema)
|
||||
},
|
||||
{
|
||||
name: 'read_multiple_files',
|
||||
description:
|
||||
'Read the contents of multiple files simultaneously. This is more ' +
|
||||
'efficient than reading files one by one when you need to analyze ' +
|
||||
"or compare multiple files. Each file's content is returned with its " +
|
||||
"path as a reference. Failed reads for individual files won't stop " +
|
||||
'the entire operation. Only works within allowed directories.',
|
||||
inputSchema: z.toJSONSchema(ReadMultipleFilesArgsSchema)
|
||||
},
|
||||
{
|
||||
name: 'write_file',
|
||||
description:
|
||||
'Create a new file or completely overwrite an existing file with new content. ' +
|
||||
'Use with caution as it will overwrite existing files without warning. ' +
|
||||
'Handles text content with proper encoding. Only works within allowed directories.',
|
||||
inputSchema: z.toJSONSchema(WriteFileArgsSchema)
|
||||
},
|
||||
{
|
||||
name: 'edit_file',
|
||||
description:
|
||||
'Make line-based edits to a text file. Each edit replaces exact line sequences ' +
|
||||
'with new content. Returns a git-style diff showing the changes made. ' +
|
||||
'Only works within allowed directories.',
|
||||
inputSchema: z.toJSONSchema(EditFileArgsSchema)
|
||||
},
|
||||
{
|
||||
name: 'create_directory',
|
||||
description:
|
||||
'Create a new directory or ensure a directory exists. Can create multiple ' +
|
||||
'nested directories in one operation. If the directory already exists, ' +
|
||||
'this operation will succeed silently. Perfect for setting up directory ' +
|
||||
'structures for projects or ensuring required paths exist. Only works within allowed directories.',
|
||||
inputSchema: z.toJSONSchema(CreateDirectoryArgsSchema)
|
||||
},
|
||||
{
|
||||
name: 'list_directory',
|
||||
description:
|
||||
'Get a detailed listing of all files and directories in a specified path. ' +
|
||||
'Results clearly distinguish between files and directories with [FILE] and [DIR] ' +
|
||||
'prefixes. This tool is essential for understanding directory structure and ' +
|
||||
'finding specific files within a directory. Only works within allowed directories.',
|
||||
inputSchema: z.toJSONSchema(ListDirectoryArgsSchema)
|
||||
},
|
||||
{
|
||||
name: 'directory_tree',
|
||||
description:
|
||||
'Get a recursive tree view of files and directories as a JSON structure. ' +
|
||||
"Each entry includes 'name', 'type' (file/directory), and 'children' for directories. " +
|
||||
'Files have no children array, while directories always have a children array (which may be empty). ' +
|
||||
'The output is formatted with 2-space indentation for readability. Only works within allowed directories.',
|
||||
inputSchema: z.toJSONSchema(DirectoryTreeArgsSchema)
|
||||
},
|
||||
{
|
||||
name: 'move_file',
|
||||
description:
|
||||
'Move or rename files and directories. Can move files between directories ' +
|
||||
'and rename them in a single operation. If the destination exists, the ' +
|
||||
'operation will fail. Works across different directories and can be used ' +
|
||||
'for simple renaming within the same directory. Both source and destination must be within allowed directories.',
|
||||
inputSchema: z.toJSONSchema(MoveFileArgsSchema)
|
||||
},
|
||||
{
|
||||
name: 'search_files',
|
||||
description:
|
||||
'Recursively search for files and directories matching a pattern. ' +
|
||||
'Searches through all subdirectories from the starting path. The search ' +
|
||||
'is case-insensitive and matches partial names. Returns full paths to all ' +
|
||||
"matching items. Great for finding files when you don't know their exact location. " +
|
||||
'Only searches within allowed directories.',
|
||||
inputSchema: z.toJSONSchema(SearchFilesArgsSchema)
|
||||
},
|
||||
{
|
||||
name: 'get_file_info',
|
||||
description:
|
||||
'Retrieve detailed metadata about a file or directory. Returns comprehensive ' +
|
||||
'information including size, creation time, last modified time, permissions, ' +
|
||||
'and type. This tool is perfect for understanding file characteristics ' +
|
||||
'without reading the actual content. Only works within allowed directories.',
|
||||
inputSchema: z.toJSONSchema(GetFileInfoArgsSchema)
|
||||
},
|
||||
{
|
||||
name: 'list_allowed_directories',
|
||||
description:
|
||||
'Returns the list of directories that this server is allowed to access. ' +
|
||||
'Use this to understand which directories are available before trying to access files.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {},
|
||||
required: []
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
})
|
||||
|
||||
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
||||
try {
|
||||
const { name, arguments: args } = request.params
|
||||
|
||||
switch (name) {
|
||||
case 'read_file': {
|
||||
const parsed = ReadFileArgsSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for read_file: ${parsed.error}`)
|
||||
}
|
||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
||||
const content = await fs.readFile(validPath, 'utf-8')
|
||||
return {
|
||||
content: [{ type: 'text', text: content }]
|
||||
}
|
||||
}
|
||||
|
||||
case 'read_multiple_files': {
|
||||
const parsed = ReadMultipleFilesArgsSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for read_multiple_files: ${parsed.error}`)
|
||||
}
|
||||
const results = await Promise.all(
|
||||
parsed.data.paths.map(async (filePath: string) => {
|
||||
try {
|
||||
const validPath = await validatePath(this.allowedDirectories, filePath)
|
||||
const content = await fs.readFile(validPath, 'utf-8')
|
||||
return `${filePath}:\n${content}\n`
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error)
|
||||
return `${filePath}: Error - ${errorMessage}`
|
||||
}
|
||||
})
|
||||
)
|
||||
return {
|
||||
content: [{ type: 'text', text: results.join('\n---\n') }]
|
||||
}
|
||||
}
|
||||
|
||||
case 'write_file': {
|
||||
const parsed = WriteFileArgsSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for write_file: ${parsed.error}`)
|
||||
}
|
||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
||||
await fs.writeFile(validPath, parsed.data.content, 'utf-8')
|
||||
return {
|
||||
content: [{ type: 'text', text: `Successfully wrote to ${parsed.data.path}` }]
|
||||
}
|
||||
}
|
||||
|
||||
case 'edit_file': {
|
||||
const parsed = EditFileArgsSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for edit_file: ${parsed.error}`)
|
||||
}
|
||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
||||
const result = await applyFileEdits(validPath, parsed.data.edits, parsed.data.dryRun)
|
||||
return {
|
||||
content: [{ type: 'text', text: result }]
|
||||
}
|
||||
}
|
||||
|
||||
case 'create_directory': {
|
||||
const parsed = CreateDirectoryArgsSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for create_directory: ${parsed.error}`)
|
||||
}
|
||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
||||
await fs.mkdir(validPath, { recursive: true })
|
||||
return {
|
||||
content: [{ type: 'text', text: `Successfully created directory ${parsed.data.path}` }]
|
||||
}
|
||||
}
|
||||
|
||||
case 'list_directory': {
|
||||
const parsed = ListDirectoryArgsSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for list_directory: ${parsed.error}`)
|
||||
}
|
||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
||||
const entries = await fs.readdir(validPath, { withFileTypes: true })
|
||||
const formatted = entries
|
||||
.map((entry) => `${entry.isDirectory() ? '[DIR]' : '[FILE]'} ${entry.name}`)
|
||||
.join('\n')
|
||||
return {
|
||||
content: [{ type: 'text', text: formatted }]
|
||||
}
|
||||
}
|
||||
|
||||
case 'directory_tree': {
|
||||
const parsed = DirectoryTreeArgsSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for directory_tree: ${parsed.error}`)
|
||||
}
|
||||
|
||||
interface TreeEntry {
|
||||
name: string
|
||||
type: 'file' | 'directory'
|
||||
children?: TreeEntry[]
|
||||
}
|
||||
|
||||
async function buildTree(allowedDirectories: string[], currentPath: string): Promise<TreeEntry[]> {
|
||||
const validPath = await validatePath(allowedDirectories, currentPath)
|
||||
const entries = await fs.readdir(validPath, { withFileTypes: true })
|
||||
const result: TreeEntry[] = []
|
||||
|
||||
for (const entry of entries) {
|
||||
const entryData: TreeEntry = {
|
||||
name: entry.name,
|
||||
type: entry.isDirectory() ? 'directory' : 'file'
|
||||
}
|
||||
|
||||
if (entry.isDirectory()) {
|
||||
const subPath = path.join(currentPath, entry.name)
|
||||
entryData.children = await buildTree(allowedDirectories, subPath)
|
||||
}
|
||||
|
||||
result.push(entryData)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
const treeData = await buildTree(this.allowedDirectories, parsed.data.path)
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: JSON.stringify(treeData, null, 2)
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
case 'move_file': {
|
||||
const parsed = MoveFileArgsSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for move_file: ${parsed.error}`)
|
||||
}
|
||||
const validSourcePath = await validatePath(this.allowedDirectories, parsed.data.source)
|
||||
const validDestPath = await validatePath(this.allowedDirectories, parsed.data.destination)
|
||||
await fs.rename(validSourcePath, validDestPath)
|
||||
return {
|
||||
content: [
|
||||
{ type: 'text', text: `Successfully moved ${parsed.data.source} to ${parsed.data.destination}` }
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
case 'search_files': {
|
||||
const parsed = SearchFilesArgsSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for search_files: ${parsed.error}`)
|
||||
}
|
||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
||||
const results = await searchFiles(
|
||||
this.allowedDirectories,
|
||||
validPath,
|
||||
parsed.data.pattern,
|
||||
parsed.data.excludePatterns
|
||||
)
|
||||
return {
|
||||
content: [{ type: 'text', text: results.length > 0 ? results.join('\n') : 'No matches found' }]
|
||||
}
|
||||
}
|
||||
|
||||
case 'get_file_info': {
|
||||
const parsed = GetFileInfoArgsSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for get_file_info: ${parsed.error}`)
|
||||
}
|
||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
||||
const info = await getFileStats(validPath)
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: Object.entries(info)
|
||||
.map(([key, value]) => `${key}: ${value}`)
|
||||
.join('\n')
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
case 'list_allowed_directories': {
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: `Allowed directories:\n${this.allowedDirectories.join('\n')}`
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
throw new Error(`Unknown tool: ${name}`)
|
||||
}
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error)
|
||||
return {
|
||||
content: [{ type: 'text', text: `Error: ${errorMessage}` }],
|
||||
isError: true
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export default FileSystemServer
|
||||
2
src/main/mcpServers/filesystem/index.ts
Normal file
2
src/main/mcpServers/filesystem/index.ts
Normal file
@ -0,0 +1,2 @@
|
||||
// Re-export FileSystemServer to maintain existing import pattern
|
||||
export { default, FileSystemServer } from './server'
|
||||
118
src/main/mcpServers/filesystem/server.ts
Normal file
118
src/main/mcpServers/filesystem/server.ts
Normal file
@ -0,0 +1,118 @@
|
||||
import { Server } from '@modelcontextprotocol/sdk/server/index.js'
|
||||
import { CallToolRequestSchema, ListToolsRequestSchema } from '@modelcontextprotocol/sdk/types.js'
|
||||
import { app } from 'electron'
|
||||
import fs from 'fs/promises'
|
||||
import path from 'path'
|
||||
|
||||
import {
|
||||
deleteToolDefinition,
|
||||
editToolDefinition,
|
||||
globToolDefinition,
|
||||
grepToolDefinition,
|
||||
handleDeleteTool,
|
||||
handleEditTool,
|
||||
handleGlobTool,
|
||||
handleGrepTool,
|
||||
handleLsTool,
|
||||
handleReadTool,
|
||||
handleWriteTool,
|
||||
lsToolDefinition,
|
||||
readToolDefinition,
|
||||
writeToolDefinition
|
||||
} from './tools'
|
||||
import { logger } from './types'
|
||||
|
||||
export class FileSystemServer {
|
||||
public server: Server
|
||||
private baseDir: string
|
||||
|
||||
constructor(baseDir?: string) {
|
||||
if (baseDir && path.isAbsolute(baseDir)) {
|
||||
this.baseDir = baseDir
|
||||
logger.info(`Using provided baseDir for filesystem MCP: ${baseDir}`)
|
||||
} else {
|
||||
const userData = app.getPath('userData')
|
||||
this.baseDir = path.join(userData, 'Data', 'Workspace')
|
||||
logger.info(`Using default workspace for filesystem MCP baseDir: ${this.baseDir}`)
|
||||
}
|
||||
|
||||
this.server = new Server(
|
||||
{
|
||||
name: 'filesystem-server',
|
||||
version: '2.0.0'
|
||||
},
|
||||
{
|
||||
capabilities: {
|
||||
tools: {}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
this.initialize()
|
||||
}
|
||||
|
||||
async initialize() {
|
||||
try {
|
||||
await fs.mkdir(this.baseDir, { recursive: true })
|
||||
} catch (error) {
|
||||
logger.error('Failed to create filesystem MCP baseDir', { error, baseDir: this.baseDir })
|
||||
}
|
||||
|
||||
// Register tool list handler
|
||||
this.server.setRequestHandler(ListToolsRequestSchema, async () => {
|
||||
return {
|
||||
tools: [
|
||||
globToolDefinition,
|
||||
lsToolDefinition,
|
||||
grepToolDefinition,
|
||||
readToolDefinition,
|
||||
editToolDefinition,
|
||||
writeToolDefinition,
|
||||
deleteToolDefinition
|
||||
]
|
||||
}
|
||||
})
|
||||
|
||||
// Register tool call handler
|
||||
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
||||
try {
|
||||
const { name, arguments: args } = request.params
|
||||
|
||||
switch (name) {
|
||||
case 'glob':
|
||||
return await handleGlobTool(args, this.baseDir)
|
||||
|
||||
case 'ls':
|
||||
return await handleLsTool(args, this.baseDir)
|
||||
|
||||
case 'grep':
|
||||
return await handleGrepTool(args, this.baseDir)
|
||||
|
||||
case 'read':
|
||||
return await handleReadTool(args, this.baseDir)
|
||||
|
||||
case 'edit':
|
||||
return await handleEditTool(args, this.baseDir)
|
||||
|
||||
case 'write':
|
||||
return await handleWriteTool(args, this.baseDir)
|
||||
|
||||
case 'delete':
|
||||
return await handleDeleteTool(args, this.baseDir)
|
||||
|
||||
default:
|
||||
throw new Error(`Unknown tool: ${name}`)
|
||||
}
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error)
|
||||
logger.error(`Tool execution error for ${request.params.name}:`, { error })
|
||||
return {
|
||||
content: [{ type: 'text', text: `Error: ${errorMessage}` }],
|
||||
isError: true
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
export default FileSystemServer
|
||||
93
src/main/mcpServers/filesystem/tools/delete.ts
Normal file
93
src/main/mcpServers/filesystem/tools/delete.ts
Normal file
@ -0,0 +1,93 @@
|
||||
import fs from 'fs/promises'
|
||||
import path from 'path'
|
||||
import * as z from 'zod'
|
||||
|
||||
import { logger, validatePath } from '../types'
|
||||
|
||||
// Schema definition
|
||||
export const DeleteToolSchema = z.object({
|
||||
path: z.string().describe('The path to the file or directory to delete'),
|
||||
recursive: z.boolean().optional().describe('For directories, whether to delete recursively (default: false)')
|
||||
})
|
||||
|
||||
// Tool definition with detailed description
|
||||
export const deleteToolDefinition = {
|
||||
name: 'delete',
|
||||
description: `Deletes a file or directory from the filesystem.
|
||||
|
||||
CAUTION: This operation cannot be undone!
|
||||
|
||||
- For files: simply provide the path
|
||||
- For empty directories: provide the path
|
||||
- For non-empty directories: set recursive=true
|
||||
- The path must be an absolute path, not a relative path
|
||||
- Always verify the path before deleting to avoid data loss`,
|
||||
inputSchema: z.toJSONSchema(DeleteToolSchema)
|
||||
}
|
||||
|
||||
// Handler implementation
|
||||
export async function handleDeleteTool(args: unknown, baseDir: string) {
|
||||
const parsed = DeleteToolSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for delete: ${parsed.error}`)
|
||||
}
|
||||
|
||||
const targetPath = parsed.data.path
|
||||
const validPath = await validatePath(targetPath, baseDir)
|
||||
const recursive = parsed.data.recursive || false
|
||||
|
||||
// Check if path exists and get stats
|
||||
let stats
|
||||
try {
|
||||
stats = await fs.stat(validPath)
|
||||
} catch (error: any) {
|
||||
if (error.code === 'ENOENT') {
|
||||
throw new Error(`Path not found: ${targetPath}`)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
|
||||
const isDirectory = stats.isDirectory()
|
||||
const relativePath = path.relative(baseDir, validPath)
|
||||
|
||||
// Perform deletion
|
||||
try {
|
||||
if (isDirectory) {
|
||||
if (recursive) {
|
||||
// Delete directory recursively
|
||||
await fs.rm(validPath, { recursive: true, force: true })
|
||||
} else {
|
||||
// Try to delete empty directory
|
||||
await fs.rmdir(validPath)
|
||||
}
|
||||
} else {
|
||||
// Delete file
|
||||
await fs.unlink(validPath)
|
||||
}
|
||||
} catch (error: any) {
|
||||
if (error.code === 'ENOTEMPTY') {
|
||||
throw new Error(`Directory not empty: ${targetPath}. Use recursive=true to delete non-empty directories.`)
|
||||
}
|
||||
throw new Error(`Failed to delete: ${error.message}`)
|
||||
}
|
||||
|
||||
// Log the operation
|
||||
logger.info('Path deleted', {
|
||||
path: validPath,
|
||||
type: isDirectory ? 'directory' : 'file',
|
||||
recursive: isDirectory ? recursive : undefined
|
||||
})
|
||||
|
||||
// Format output
|
||||
const itemType = isDirectory ? 'Directory' : 'File'
|
||||
const recursiveNote = isDirectory && recursive ? ' (recursive)' : ''
|
||||
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: `${itemType} deleted${recursiveNote}: ${relativePath}`
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
130
src/main/mcpServers/filesystem/tools/edit.ts
Normal file
130
src/main/mcpServers/filesystem/tools/edit.ts
Normal file
@ -0,0 +1,130 @@
|
||||
import fs from 'fs/promises'
|
||||
import path from 'path'
|
||||
import * as z from 'zod'
|
||||
|
||||
import { logger, replaceWithFuzzyMatch, validatePath } from '../types'
|
||||
|
||||
// Schema definition
|
||||
export const EditToolSchema = z.object({
|
||||
file_path: z.string().describe('The path to the file to modify'),
|
||||
old_string: z.string().describe('The text to replace'),
|
||||
new_string: z.string().describe('The text to replace it with'),
|
||||
replace_all: z.boolean().optional().default(false).describe('Replace all occurrences of old_string (default false)')
|
||||
})
|
||||
|
||||
// Tool definition with detailed description
|
||||
export const editToolDefinition = {
|
||||
name: 'edit',
|
||||
description: `Performs exact string replacements in files.
|
||||
|
||||
- You must use the 'read' tool at least once before editing
|
||||
- The file_path must be an absolute path, not a relative path
|
||||
- Preserve exact indentation from read output (after the line number prefix)
|
||||
- Never include line number prefixes in old_string or new_string
|
||||
- ALWAYS prefer editing existing files over creating new ones
|
||||
- The edit will FAIL if old_string is not found in the file
|
||||
- The edit will FAIL if old_string appears multiple times (provide more context or use replace_all)
|
||||
- The edit will FAIL if old_string equals new_string
|
||||
- Use replace_all to rename variables or replace all occurrences`,
|
||||
inputSchema: z.toJSONSchema(EditToolSchema)
|
||||
}
|
||||
|
||||
// Handler implementation
|
||||
export async function handleEditTool(args: unknown, baseDir: string) {
|
||||
const parsed = EditToolSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for edit: ${parsed.error}`)
|
||||
}
|
||||
|
||||
const { file_path: filePath, old_string: oldString, new_string: newString, replace_all: replaceAll } = parsed.data
|
||||
|
||||
// Validate path
|
||||
const validPath = await validatePath(filePath, baseDir)
|
||||
|
||||
// Check if file exists
|
||||
try {
|
||||
const stats = await fs.stat(validPath)
|
||||
if (!stats.isFile()) {
|
||||
throw new Error(`Path is not a file: ${filePath}`)
|
||||
}
|
||||
} catch (error: any) {
|
||||
if (error.code === 'ENOENT') {
|
||||
// If old_string is empty, this is a create new file operation
|
||||
if (oldString === '') {
|
||||
// Create parent directory if needed
|
||||
const parentDir = path.dirname(validPath)
|
||||
await fs.mkdir(parentDir, { recursive: true })
|
||||
|
||||
// Write the new content
|
||||
await fs.writeFile(validPath, newString, 'utf-8')
|
||||
|
||||
logger.info('File created', { path: validPath })
|
||||
|
||||
const relativePath = path.relative(baseDir, validPath)
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: `Created new file: ${relativePath}\nLines: ${newString.split('\n').length}`
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
throw new Error(`File not found: ${filePath}`)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
|
||||
// Read current content
|
||||
const content = await fs.readFile(validPath, 'utf-8')
|
||||
|
||||
// Handle special case: old_string is empty (create file with content)
|
||||
if (oldString === '') {
|
||||
await fs.writeFile(validPath, newString, 'utf-8')
|
||||
|
||||
logger.info('File overwritten', { path: validPath })
|
||||
|
||||
const relativePath = path.relative(baseDir, validPath)
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: `Overwrote file: ${relativePath}\nLines: ${newString.split('\n').length}`
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
// Perform the replacement with fuzzy matching
|
||||
const newContent = replaceWithFuzzyMatch(content, oldString, newString, replaceAll)
|
||||
|
||||
// Write the modified content
|
||||
await fs.writeFile(validPath, newContent, 'utf-8')
|
||||
|
||||
logger.info('File edited', {
|
||||
path: validPath,
|
||||
replaceAll
|
||||
})
|
||||
|
||||
// Generate a simple diff summary
|
||||
const oldLines = content.split('\n').length
|
||||
const newLines = newContent.split('\n').length
|
||||
const lineDiff = newLines - oldLines
|
||||
|
||||
const relativePath = path.relative(baseDir, validPath)
|
||||
let diffSummary = `Edited: ${relativePath}`
|
||||
if (lineDiff > 0) {
|
||||
diffSummary += `\n+${lineDiff} lines`
|
||||
} else if (lineDiff < 0) {
|
||||
diffSummary += `\n${lineDiff} lines`
|
||||
}
|
||||
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: diffSummary
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
149
src/main/mcpServers/filesystem/tools/glob.ts
Normal file
149
src/main/mcpServers/filesystem/tools/glob.ts
Normal file
@ -0,0 +1,149 @@
|
||||
import fs from 'fs/promises'
|
||||
import path from 'path'
|
||||
import * as z from 'zod'
|
||||
|
||||
import type { FileInfo } from '../types'
|
||||
import { logger, MAX_FILES_LIMIT, runRipgrep, validatePath } from '../types'
|
||||
|
||||
// Schema definition
|
||||
export const GlobToolSchema = z.object({
|
||||
pattern: z.string().describe('The glob pattern to match files against'),
|
||||
path: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe('The directory to search in (must be absolute path). Defaults to the base directory')
|
||||
})
|
||||
|
||||
// Tool definition with detailed description
|
||||
export const globToolDefinition = {
|
||||
name: 'glob',
|
||||
description: `Fast file pattern matching tool that works with any codebase size.
|
||||
|
||||
- Supports glob patterns like "**/*.js" or "src/**/*.ts"
|
||||
- Returns matching absolute file paths sorted by modification time (newest first)
|
||||
- Use this when you need to find files by name patterns
|
||||
- Patterns without "/" (e.g., "*.txt") match files at ANY depth in the directory tree
|
||||
- Patterns with "/" (e.g., "src/*.ts") match relative to the search path
|
||||
- Pattern syntax: * (any chars), ** (any path), {a,b} (alternatives), ? (single char)
|
||||
- Results are limited to 100 files
|
||||
- The path parameter must be an absolute path if specified
|
||||
- If path is not specified, defaults to the base directory
|
||||
- IMPORTANT: Omit the path field for the default directory (don't use "undefined" or "null")`,
|
||||
inputSchema: z.toJSONSchema(GlobToolSchema)
|
||||
}
|
||||
|
||||
// Handler implementation
|
||||
export async function handleGlobTool(args: unknown, baseDir: string) {
|
||||
const parsed = GlobToolSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for glob: ${parsed.error}`)
|
||||
}
|
||||
|
||||
const searchPath = parsed.data.path || baseDir
|
||||
const validPath = await validatePath(searchPath, baseDir)
|
||||
|
||||
// Verify the search directory exists
|
||||
try {
|
||||
const stats = await fs.stat(validPath)
|
||||
if (!stats.isDirectory()) {
|
||||
throw new Error(`Path is not a directory: ${validPath}`)
|
||||
}
|
||||
} catch (error: unknown) {
|
||||
if (error && typeof error === 'object' && 'code' in error && error.code === 'ENOENT') {
|
||||
throw new Error(`Directory not found: ${validPath}`)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
|
||||
// Validate pattern
|
||||
const pattern = parsed.data.pattern.trim()
|
||||
if (!pattern) {
|
||||
throw new Error('Pattern cannot be empty')
|
||||
}
|
||||
|
||||
const files: FileInfo[] = []
|
||||
let truncated = false
|
||||
|
||||
// Build ripgrep arguments for file listing using --glob=pattern format
|
||||
const rgArgs: string[] = [
|
||||
'--files',
|
||||
'--follow',
|
||||
'--hidden',
|
||||
`--glob=${pattern}`,
|
||||
'--glob=!.git/*',
|
||||
'--glob=!node_modules/*',
|
||||
'--glob=!dist/*',
|
||||
'--glob=!build/*',
|
||||
'--glob=!__pycache__/*',
|
||||
validPath
|
||||
]
|
||||
|
||||
// Use ripgrep for file listing
|
||||
logger.debug('Running ripgrep with args', { rgArgs })
|
||||
const rgResult = await runRipgrep(rgArgs)
|
||||
logger.debug('Ripgrep result', {
|
||||
ok: rgResult.ok,
|
||||
exitCode: rgResult.exitCode,
|
||||
stdoutLength: rgResult.stdout.length,
|
||||
stdoutPreview: rgResult.stdout.slice(0, 500)
|
||||
})
|
||||
|
||||
// Process results if we have stdout content
|
||||
// Exit code 2 can indicate partial errors (e.g., permission denied on some dirs) but still have valid results
|
||||
if (rgResult.ok && rgResult.stdout.length > 0) {
|
||||
const lines = rgResult.stdout.split('\n').filter(Boolean)
|
||||
logger.debug('Parsed lines from ripgrep', { lineCount: lines.length, lines })
|
||||
|
||||
for (const line of lines) {
|
||||
if (files.length >= MAX_FILES_LIMIT) {
|
||||
truncated = true
|
||||
break
|
||||
}
|
||||
|
||||
const filePath = line.trim()
|
||||
if (!filePath) continue
|
||||
|
||||
const absolutePath = path.isAbsolute(filePath) ? filePath : path.resolve(validPath, filePath)
|
||||
|
||||
try {
|
||||
const stats = await fs.stat(absolutePath)
|
||||
files.push({
|
||||
path: absolutePath,
|
||||
type: 'file', // ripgrep --files only returns files
|
||||
size: stats.size,
|
||||
modified: stats.mtime
|
||||
})
|
||||
} catch (error) {
|
||||
logger.debug('Failed to stat file from ripgrep output, skipping', { file: absolutePath, error })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by modification time (newest first)
|
||||
files.sort((a, b) => {
|
||||
const aTime = a.modified ? a.modified.getTime() : 0
|
||||
const bTime = b.modified ? b.modified.getTime() : 0
|
||||
return bTime - aTime
|
||||
})
|
||||
|
||||
// Format output - always use absolute paths
|
||||
const output: string[] = []
|
||||
if (files.length === 0) {
|
||||
output.push(`No files found matching pattern "${parsed.data.pattern}" in ${validPath}`)
|
||||
} else {
|
||||
output.push(...files.map((f) => f.path))
|
||||
if (truncated) {
|
||||
output.push('')
|
||||
output.push(`(Results truncated to ${MAX_FILES_LIMIT} files. Consider using a more specific pattern.)`)
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: output.join('\n')
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
266
src/main/mcpServers/filesystem/tools/grep.ts
Normal file
266
src/main/mcpServers/filesystem/tools/grep.ts
Normal file
@ -0,0 +1,266 @@
|
||||
import fs from 'fs/promises'
|
||||
import path from 'path'
|
||||
import * as z from 'zod'
|
||||
|
||||
import type { GrepMatch } from '../types'
|
||||
import { isBinaryFile, MAX_GREP_MATCHES, MAX_LINE_LENGTH, runRipgrep, validatePath } from '../types'
|
||||
|
||||
// Schema definition
|
||||
export const GrepToolSchema = z.object({
|
||||
pattern: z.string().describe('The regex pattern to search for in file contents'),
|
||||
path: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe('The directory to search in (must be absolute path). Defaults to the base directory'),
|
||||
include: z.string().optional().describe('File pattern to include in the search (e.g. "*.js", "*.{ts,tsx}")')
|
||||
})
|
||||
|
||||
// Tool definition with detailed description
|
||||
export const grepToolDefinition = {
|
||||
name: 'grep',
|
||||
description: `Fast content search tool that works with any codebase size.
|
||||
|
||||
- Searches file contents using regular expressions
|
||||
- Supports full regex syntax (e.g., "log.*Error", "function\\s+\\w+")
|
||||
- Filter files by pattern with include (e.g., "*.js", "*.{ts,tsx}")
|
||||
- Returns absolute file paths and line numbers with matching content
|
||||
- Results are limited to 100 matches
|
||||
- Binary files are automatically skipped
|
||||
- Common directories (node_modules, .git, dist) are excluded
|
||||
- The path parameter must be an absolute path if specified
|
||||
- If path is not specified, defaults to the base directory`,
|
||||
inputSchema: z.toJSONSchema(GrepToolSchema)
|
||||
}
|
||||
|
||||
// Handler implementation
|
||||
export async function handleGrepTool(args: unknown, baseDir: string) {
|
||||
const parsed = GrepToolSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for grep: ${parsed.error}`)
|
||||
}
|
||||
|
||||
const data = parsed.data
|
||||
|
||||
if (!data.pattern) {
|
||||
throw new Error('Pattern is required for grep')
|
||||
}
|
||||
|
||||
const searchPath = data.path || baseDir
|
||||
const validPath = await validatePath(searchPath, baseDir)
|
||||
|
||||
const matches: GrepMatch[] = []
|
||||
let truncated = false
|
||||
let regex: RegExp
|
||||
|
||||
// Build ripgrep arguments
|
||||
const rgArgs: string[] = [
|
||||
'--no-heading',
|
||||
'--line-number',
|
||||
'--color',
|
||||
'never',
|
||||
'--ignore-case',
|
||||
'--glob',
|
||||
'!.git/**',
|
||||
'--glob',
|
||||
'!node_modules/**',
|
||||
'--glob',
|
||||
'!dist/**',
|
||||
'--glob',
|
||||
'!build/**',
|
||||
'--glob',
|
||||
'!__pycache__/**'
|
||||
]
|
||||
|
||||
if (data.include) {
|
||||
for (const pat of data.include
|
||||
.split(',')
|
||||
.map((p) => p.trim())
|
||||
.filter(Boolean)) {
|
||||
rgArgs.push('--glob', pat)
|
||||
}
|
||||
}
|
||||
|
||||
rgArgs.push(data.pattern)
|
||||
rgArgs.push(validPath)
|
||||
|
||||
try {
|
||||
regex = new RegExp(data.pattern, 'gi')
|
||||
} catch (error) {
|
||||
throw new Error(`Invalid regex pattern: ${data.pattern}`)
|
||||
}
|
||||
|
||||
async function searchFile(filePath: string): Promise<void> {
|
||||
if (matches.length >= MAX_GREP_MATCHES) {
|
||||
truncated = true
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
// Skip binary files
|
||||
if (await isBinaryFile(filePath)) {
|
||||
return
|
||||
}
|
||||
|
||||
const content = await fs.readFile(filePath, 'utf-8')
|
||||
const lines = content.split('\n')
|
||||
|
||||
lines.forEach((line, index) => {
|
||||
if (matches.length >= MAX_GREP_MATCHES) {
|
||||
truncated = true
|
||||
return
|
||||
}
|
||||
|
||||
if (regex.test(line)) {
|
||||
// Truncate long lines
|
||||
const truncatedLine = line.length > MAX_LINE_LENGTH ? line.substring(0, MAX_LINE_LENGTH) + '...' : line
|
||||
|
||||
matches.push({
|
||||
file: filePath,
|
||||
line: index + 1,
|
||||
content: truncatedLine.trim()
|
||||
})
|
||||
}
|
||||
})
|
||||
} catch (error) {
|
||||
// Skip files we can't read
|
||||
}
|
||||
}
|
||||
|
||||
async function searchDirectory(dir: string): Promise<void> {
|
||||
if (matches.length >= MAX_GREP_MATCHES) {
|
||||
truncated = true
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
const entries = await fs.readdir(dir, { withFileTypes: true })
|
||||
|
||||
for (const entry of entries) {
|
||||
if (matches.length >= MAX_GREP_MATCHES) {
|
||||
truncated = true
|
||||
break
|
||||
}
|
||||
|
||||
const fullPath = path.join(dir, entry.name)
|
||||
|
||||
// Skip common ignore patterns
|
||||
if (entry.name.startsWith('.') && entry.name !== '.env.example') {
|
||||
continue
|
||||
}
|
||||
if (['node_modules', 'dist', 'build', '__pycache__', '.git'].includes(entry.name)) {
|
||||
continue
|
||||
}
|
||||
|
||||
if (entry.isFile()) {
|
||||
// Check if file matches include pattern
|
||||
if (data.include) {
|
||||
const includePatterns = data.include.split(',').map((p) => p.trim())
|
||||
const fileName = path.basename(fullPath)
|
||||
const matchesInclude = includePatterns.some((pattern) => {
|
||||
// Simple glob pattern matching
|
||||
const regexPattern = pattern
|
||||
.replace(/\*/g, '.*')
|
||||
.replace(/\?/g, '.')
|
||||
.replace(/\{([^}]+)\}/g, (_, group) => `(${group.split(',').join('|')})`)
|
||||
return new RegExp(`^${regexPattern}$`).test(fileName)
|
||||
})
|
||||
if (!matchesInclude) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
await searchFile(fullPath)
|
||||
} else if (entry.isDirectory()) {
|
||||
await searchDirectory(fullPath)
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
// Skip directories we can't read
|
||||
}
|
||||
}
|
||||
|
||||
// Perform the search
|
||||
let usedRipgrep = false
|
||||
try {
|
||||
const rgResult = await runRipgrep(rgArgs)
|
||||
if (rgResult.ok && rgResult.exitCode !== null && rgResult.exitCode !== 2) {
|
||||
usedRipgrep = true
|
||||
const lines = rgResult.stdout.split('\n').filter(Boolean)
|
||||
for (const line of lines) {
|
||||
if (matches.length >= MAX_GREP_MATCHES) {
|
||||
truncated = true
|
||||
break
|
||||
}
|
||||
|
||||
const firstColon = line.indexOf(':')
|
||||
const secondColon = line.indexOf(':', firstColon + 1)
|
||||
if (firstColon === -1 || secondColon === -1) continue
|
||||
|
||||
const filePart = line.slice(0, firstColon)
|
||||
const linePart = line.slice(firstColon + 1, secondColon)
|
||||
const contentPart = line.slice(secondColon + 1)
|
||||
const lineNum = Number.parseInt(linePart, 10)
|
||||
if (!Number.isFinite(lineNum)) continue
|
||||
|
||||
const absoluteFilePath = path.isAbsolute(filePart) ? filePart : path.resolve(baseDir, filePart)
|
||||
const truncatedLine =
|
||||
contentPart.length > MAX_LINE_LENGTH ? contentPart.substring(0, MAX_LINE_LENGTH) + '...' : contentPart
|
||||
|
||||
matches.push({
|
||||
file: absoluteFilePath,
|
||||
line: lineNum,
|
||||
content: truncatedLine.trim()
|
||||
})
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
usedRipgrep = false
|
||||
}
|
||||
|
||||
if (!usedRipgrep) {
|
||||
const stats = await fs.stat(validPath)
|
||||
if (stats.isFile()) {
|
||||
await searchFile(validPath)
|
||||
} else {
|
||||
await searchDirectory(validPath)
|
||||
}
|
||||
}
|
||||
|
||||
// Format output
|
||||
const output: string[] = []
|
||||
|
||||
if (matches.length === 0) {
|
||||
output.push('No matches found')
|
||||
} else {
|
||||
// Group matches by file
|
||||
const fileGroups = new Map<string, GrepMatch[]>()
|
||||
matches.forEach((match) => {
|
||||
if (!fileGroups.has(match.file)) {
|
||||
fileGroups.set(match.file, [])
|
||||
}
|
||||
fileGroups.get(match.file)!.push(match)
|
||||
})
|
||||
|
||||
// Format grouped matches - always use absolute paths
|
||||
fileGroups.forEach((fileMatches, filePath) => {
|
||||
output.push(`\n${filePath}:`)
|
||||
fileMatches.forEach((match) => {
|
||||
output.push(` ${match.line}: ${match.content}`)
|
||||
})
|
||||
})
|
||||
|
||||
if (truncated) {
|
||||
output.push('')
|
||||
output.push(`(Results truncated to ${MAX_GREP_MATCHES} matches. Consider using a more specific pattern or path.)`)
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: output.join('\n')
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
8
src/main/mcpServers/filesystem/tools/index.ts
Normal file
8
src/main/mcpServers/filesystem/tools/index.ts
Normal file
@ -0,0 +1,8 @@
|
||||
// Export all tool definitions and handlers
|
||||
export { deleteToolDefinition, handleDeleteTool } from './delete'
|
||||
export { editToolDefinition, handleEditTool } from './edit'
|
||||
export { globToolDefinition, handleGlobTool } from './glob'
|
||||
export { grepToolDefinition, handleGrepTool } from './grep'
|
||||
export { handleLsTool, lsToolDefinition } from './ls'
|
||||
export { handleReadTool, readToolDefinition } from './read'
|
||||
export { handleWriteTool, writeToolDefinition } from './write'
|
||||
150
src/main/mcpServers/filesystem/tools/ls.ts
Normal file
150
src/main/mcpServers/filesystem/tools/ls.ts
Normal file
@ -0,0 +1,150 @@
|
||||
import fs from 'fs/promises'
|
||||
import path from 'path'
|
||||
import * as z from 'zod'
|
||||
|
||||
import { MAX_FILES_LIMIT, validatePath } from '../types'
|
||||
|
||||
// Schema definition
|
||||
export const LsToolSchema = z.object({
|
||||
path: z.string().optional().describe('The directory to list (must be absolute path). Defaults to the base directory'),
|
||||
recursive: z.boolean().optional().describe('Whether to list directories recursively (default: false)')
|
||||
})
|
||||
|
||||
// Tool definition with detailed description
|
||||
export const lsToolDefinition = {
|
||||
name: 'ls',
|
||||
description: `Lists files and directories in a specified path.
|
||||
|
||||
- Returns a tree-like structure with icons (📁 directories, 📄 files)
|
||||
- Shows the absolute directory path in the header
|
||||
- Entries are sorted alphabetically with directories first
|
||||
- Can list recursively with recursive=true (up to 5 levels deep)
|
||||
- Common directories (node_modules, dist, .git) are excluded
|
||||
- Hidden files (starting with .) are excluded except .env.example
|
||||
- Results are limited to 100 entries
|
||||
- The path parameter must be an absolute path if specified
|
||||
- If path is not specified, defaults to the base directory`,
|
||||
inputSchema: z.toJSONSchema(LsToolSchema)
|
||||
}
|
||||
|
||||
// Handler implementation
|
||||
export async function handleLsTool(args: unknown, baseDir: string) {
|
||||
const parsed = LsToolSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for ls: ${parsed.error}`)
|
||||
}
|
||||
|
||||
const targetPath = parsed.data.path || baseDir
|
||||
const validPath = await validatePath(targetPath, baseDir)
|
||||
const recursive = parsed.data.recursive || false
|
||||
|
||||
interface TreeNode {
|
||||
name: string
|
||||
type: 'file' | 'directory'
|
||||
children?: TreeNode[]
|
||||
}
|
||||
|
||||
let fileCount = 0
|
||||
let truncated = false
|
||||
|
||||
async function buildTree(dirPath: string, depth: number = 0): Promise<TreeNode[]> {
|
||||
if (fileCount >= MAX_FILES_LIMIT) {
|
||||
truncated = true
|
||||
return []
|
||||
}
|
||||
|
||||
try {
|
||||
const entries = await fs.readdir(dirPath, { withFileTypes: true })
|
||||
const nodes: TreeNode[] = []
|
||||
|
||||
// Sort entries: directories first, then files, alphabetically
|
||||
entries.sort((a, b) => {
|
||||
if (a.isDirectory() && !b.isDirectory()) return -1
|
||||
if (!a.isDirectory() && b.isDirectory()) return 1
|
||||
return a.name.localeCompare(b.name)
|
||||
})
|
||||
|
||||
for (const entry of entries) {
|
||||
if (fileCount >= MAX_FILES_LIMIT) {
|
||||
truncated = true
|
||||
break
|
||||
}
|
||||
|
||||
// Skip hidden files and common ignore patterns
|
||||
if (entry.name.startsWith('.') && entry.name !== '.env.example') {
|
||||
continue
|
||||
}
|
||||
if (['node_modules', 'dist', 'build', '__pycache__'].includes(entry.name)) {
|
||||
continue
|
||||
}
|
||||
|
||||
fileCount++
|
||||
const node: TreeNode = {
|
||||
name: entry.name,
|
||||
type: entry.isDirectory() ? 'directory' : 'file'
|
||||
}
|
||||
|
||||
if (entry.isDirectory() && recursive && depth < 5) {
|
||||
// Limit depth to prevent infinite recursion
|
||||
const childPath = path.join(dirPath, entry.name)
|
||||
node.children = await buildTree(childPath, depth + 1)
|
||||
}
|
||||
|
||||
nodes.push(node)
|
||||
}
|
||||
|
||||
return nodes
|
||||
} catch (error) {
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
// Build the tree
|
||||
const tree = await buildTree(validPath)
|
||||
|
||||
// Format as text output
|
||||
function formatTree(nodes: TreeNode[], prefix: string = ''): string[] {
|
||||
const lines: string[] = []
|
||||
|
||||
nodes.forEach((node, index) => {
|
||||
const isLastNode = index === nodes.length - 1
|
||||
const connector = isLastNode ? '└── ' : '├── '
|
||||
const icon = node.type === 'directory' ? '📁 ' : '📄 '
|
||||
|
||||
lines.push(prefix + connector + icon + node.name)
|
||||
|
||||
if (node.children && node.children.length > 0) {
|
||||
const childPrefix = prefix + (isLastNode ? ' ' : '│ ')
|
||||
lines.push(...formatTree(node.children, childPrefix))
|
||||
}
|
||||
})
|
||||
|
||||
return lines
|
||||
}
|
||||
|
||||
// Generate output
|
||||
const output: string[] = []
|
||||
output.push(`Directory: ${validPath}`)
|
||||
output.push('')
|
||||
|
||||
if (tree.length === 0) {
|
||||
output.push('(empty directory)')
|
||||
} else {
|
||||
const treeLines = formatTree(tree, '')
|
||||
output.push(...treeLines)
|
||||
|
||||
if (truncated) {
|
||||
output.push('')
|
||||
output.push(`(Results truncated to ${MAX_FILES_LIMIT} files. Consider listing a more specific directory.)`)
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: output.join('\n')
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
101
src/main/mcpServers/filesystem/tools/read.ts
Normal file
101
src/main/mcpServers/filesystem/tools/read.ts
Normal file
@ -0,0 +1,101 @@
|
||||
import fs from 'fs/promises'
|
||||
import path from 'path'
|
||||
import * as z from 'zod'
|
||||
|
||||
import { DEFAULT_READ_LIMIT, isBinaryFile, MAX_LINE_LENGTH, validatePath } from '../types'
|
||||
|
||||
// Schema definition
|
||||
export const ReadToolSchema = z.object({
|
||||
file_path: z.string().describe('The path to the file to read'),
|
||||
offset: z.number().optional().describe('The line number to start reading from (1-based)'),
|
||||
limit: z.number().optional().describe('The number of lines to read (defaults to 2000)')
|
||||
})
|
||||
|
||||
// Tool definition with detailed description
|
||||
export const readToolDefinition = {
|
||||
name: 'read',
|
||||
description: `Reads a file from the local filesystem.
|
||||
|
||||
- Assumes this tool can read all files on the machine
|
||||
- The file_path parameter must be an absolute path, not a relative path
|
||||
- By default, reads up to 2000 lines starting from the beginning
|
||||
- You can optionally specify a line offset and limit for long files
|
||||
- Any lines longer than 2000 characters will be truncated
|
||||
- Results are returned with line numbers starting at 1
|
||||
- Binary files are detected and rejected with an error
|
||||
- Empty files return a warning`,
|
||||
inputSchema: z.toJSONSchema(ReadToolSchema)
|
||||
}
|
||||
|
||||
// Handler implementation
|
||||
export async function handleReadTool(args: unknown, baseDir: string) {
|
||||
const parsed = ReadToolSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for read: ${parsed.error}`)
|
||||
}
|
||||
|
||||
const filePath = parsed.data.file_path
|
||||
const validPath = await validatePath(filePath, baseDir)
|
||||
|
||||
// Check if file exists
|
||||
try {
|
||||
const stats = await fs.stat(validPath)
|
||||
if (!stats.isFile()) {
|
||||
throw new Error(`Path is not a file: ${filePath}`)
|
||||
}
|
||||
} catch (error: any) {
|
||||
if (error.code === 'ENOENT') {
|
||||
throw new Error(`File not found: ${filePath}`)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
|
||||
// Check if file is binary
|
||||
if (await isBinaryFile(validPath)) {
|
||||
throw new Error(`Cannot read binary file: ${filePath}`)
|
||||
}
|
||||
|
||||
// Read file content
|
||||
const content = await fs.readFile(validPath, 'utf-8')
|
||||
const lines = content.split('\n')
|
||||
|
||||
// Apply offset and limit
|
||||
const offset = (parsed.data.offset || 1) - 1 // Convert to 0-based
|
||||
const limit = parsed.data.limit || DEFAULT_READ_LIMIT
|
||||
|
||||
if (offset < 0 || offset >= lines.length) {
|
||||
throw new Error(`Invalid offset: ${offset + 1}. File has ${lines.length} lines.`)
|
||||
}
|
||||
|
||||
const selectedLines = lines.slice(offset, offset + limit)
|
||||
|
||||
// Format output with line numbers and truncate long lines
|
||||
const output: string[] = []
|
||||
const relativePath = path.relative(baseDir, validPath)
|
||||
|
||||
output.push(`File: ${relativePath}`)
|
||||
if (offset > 0 || limit < lines.length) {
|
||||
output.push(`Lines ${offset + 1} to ${Math.min(offset + limit, lines.length)} of ${lines.length}`)
|
||||
}
|
||||
output.push('')
|
||||
|
||||
selectedLines.forEach((line, index) => {
|
||||
const lineNumber = offset + index + 1
|
||||
const truncatedLine = line.length > MAX_LINE_LENGTH ? line.substring(0, MAX_LINE_LENGTH) + '...' : line
|
||||
output.push(`${lineNumber.toString().padStart(6)}\t${truncatedLine}`)
|
||||
})
|
||||
|
||||
if (offset + limit < lines.length) {
|
||||
output.push('')
|
||||
output.push(`(${lines.length - (offset + limit)} more lines not shown)`)
|
||||
}
|
||||
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: output.join('\n')
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
83
src/main/mcpServers/filesystem/tools/write.ts
Normal file
83
src/main/mcpServers/filesystem/tools/write.ts
Normal file
@ -0,0 +1,83 @@
|
||||
import fs from 'fs/promises'
|
||||
import path from 'path'
|
||||
import * as z from 'zod'
|
||||
|
||||
import { logger, validatePath } from '../types'
|
||||
|
||||
// Schema definition
|
||||
export const WriteToolSchema = z.object({
|
||||
file_path: z.string().describe('The path to the file to write'),
|
||||
content: z.string().describe('The content to write to the file')
|
||||
})
|
||||
|
||||
// Tool definition with detailed description
|
||||
export const writeToolDefinition = {
|
||||
name: 'write',
|
||||
description: `Writes a file to the local filesystem.
|
||||
|
||||
- This tool will overwrite the existing file if one exists at the path
|
||||
- You MUST use the read tool first to understand what you're overwriting
|
||||
- ALWAYS prefer using the 'edit' tool for existing files
|
||||
- NEVER proactively create documentation files unless explicitly requested
|
||||
- Parent directories will be created automatically if they don't exist
|
||||
- The file_path must be an absolute path, not a relative path`,
|
||||
inputSchema: z.toJSONSchema(WriteToolSchema)
|
||||
}
|
||||
|
||||
// Handler implementation
|
||||
export async function handleWriteTool(args: unknown, baseDir: string) {
|
||||
const parsed = WriteToolSchema.safeParse(args)
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Invalid arguments for write: ${parsed.error}`)
|
||||
}
|
||||
|
||||
const filePath = parsed.data.file_path
|
||||
const validPath = await validatePath(filePath, baseDir)
|
||||
|
||||
// Create parent directory if it doesn't exist
|
||||
const parentDir = path.dirname(validPath)
|
||||
try {
|
||||
await fs.mkdir(parentDir, { recursive: true })
|
||||
} catch (error: any) {
|
||||
if (error.code !== 'EEXIST') {
|
||||
throw new Error(`Failed to create parent directory: ${error.message}`)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if file exists (for logging)
|
||||
let isOverwrite = false
|
||||
try {
|
||||
await fs.stat(validPath)
|
||||
isOverwrite = true
|
||||
} catch {
|
||||
// File doesn't exist, that's fine
|
||||
}
|
||||
|
||||
// Write the file
|
||||
try {
|
||||
await fs.writeFile(validPath, parsed.data.content, 'utf-8')
|
||||
} catch (error: any) {
|
||||
throw new Error(`Failed to write file: ${error.message}`)
|
||||
}
|
||||
|
||||
// Log the operation
|
||||
logger.info('File written', {
|
||||
path: validPath,
|
||||
overwrite: isOverwrite,
|
||||
size: parsed.data.content.length
|
||||
})
|
||||
|
||||
// Format output
|
||||
const relativePath = path.relative(baseDir, validPath)
|
||||
const action = isOverwrite ? 'Updated' : 'Created'
|
||||
const lines = parsed.data.content.split('\n').length
|
||||
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: `${action} file: ${relativePath}\n` + `Size: ${parsed.data.content.length} bytes\n` + `Lines: ${lines}`
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
627
src/main/mcpServers/filesystem/types.ts
Normal file
627
src/main/mcpServers/filesystem/types.ts
Normal file
@ -0,0 +1,627 @@
|
||||
import { loggerService } from '@logger'
|
||||
import { isMac, isWin } from '@main/constant'
|
||||
import { spawn } from 'child_process'
|
||||
import fs from 'fs/promises'
|
||||
import os from 'os'
|
||||
import path from 'path'
|
||||
|
||||
export const logger = loggerService.withContext('MCP:FileSystemServer')
|
||||
|
||||
// Constants
|
||||
export const MAX_LINE_LENGTH = 2000
|
||||
export const DEFAULT_READ_LIMIT = 2000
|
||||
export const MAX_FILES_LIMIT = 100
|
||||
export const MAX_GREP_MATCHES = 100
|
||||
|
||||
// Common types
|
||||
export interface FileInfo {
|
||||
path: string
|
||||
type: 'file' | 'directory'
|
||||
size?: number
|
||||
modified?: Date
|
||||
}
|
||||
|
||||
export interface GrepMatch {
|
||||
file: string
|
||||
line: number
|
||||
content: string
|
||||
}
|
||||
|
||||
// Utility functions for path handling
|
||||
export function normalizePath(p: string): string {
|
||||
return path.normalize(p)
|
||||
}
|
||||
|
||||
export function expandHome(filepath: string): string {
|
||||
if (filepath.startsWith('~/') || filepath === '~') {
|
||||
return path.join(os.homedir(), filepath.slice(1))
|
||||
}
|
||||
return filepath
|
||||
}
|
||||
|
||||
// Security validation
|
||||
export async function validatePath(requestedPath: string, baseDir?: string): Promise<string> {
|
||||
const expandedPath = expandHome(requestedPath)
|
||||
const root = baseDir ?? process.cwd()
|
||||
const absolute = path.isAbsolute(expandedPath) ? path.resolve(expandedPath) : path.resolve(root, expandedPath)
|
||||
|
||||
// Handle symlinks by checking their real path
|
||||
try {
|
||||
const realPath = await fs.realpath(absolute)
|
||||
return normalizePath(realPath)
|
||||
} catch (error) {
|
||||
// For new files that don't exist yet, verify parent directory
|
||||
const parentDir = path.dirname(absolute)
|
||||
try {
|
||||
const realParentPath = await fs.realpath(parentDir)
|
||||
normalizePath(realParentPath)
|
||||
return normalizePath(absolute)
|
||||
} catch {
|
||||
return normalizePath(absolute)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Edit Tool Utilities - Fuzzy matching replacers from opencode
|
||||
// ============================================================================
|
||||
|
||||
export type Replacer = (content: string, find: string) => Generator<string, void, unknown>
|
||||
|
||||
// Similarity thresholds for block anchor fallback matching
|
||||
const SINGLE_CANDIDATE_SIMILARITY_THRESHOLD = 0.0
|
||||
const MULTIPLE_CANDIDATES_SIMILARITY_THRESHOLD = 0.3
|
||||
|
||||
/**
|
||||
* Levenshtein distance algorithm implementation
|
||||
*/
|
||||
function levenshtein(a: string, b: string): number {
|
||||
if (a === '' || b === '') {
|
||||
return Math.max(a.length, b.length)
|
||||
}
|
||||
const matrix = Array.from({ length: a.length + 1 }, (_, i) =>
|
||||
Array.from({ length: b.length + 1 }, (_, j) => (i === 0 ? j : j === 0 ? i : 0))
|
||||
)
|
||||
|
||||
for (let i = 1; i <= a.length; i++) {
|
||||
for (let j = 1; j <= b.length; j++) {
|
||||
const cost = a[i - 1] === b[j - 1] ? 0 : 1
|
||||
matrix[i][j] = Math.min(matrix[i - 1][j] + 1, matrix[i][j - 1] + 1, matrix[i - 1][j - 1] + cost)
|
||||
}
|
||||
}
|
||||
return matrix[a.length][b.length]
|
||||
}
|
||||
|
||||
export const SimpleReplacer: Replacer = function* (_content, find) {
|
||||
yield find
|
||||
}
|
||||
|
||||
export const LineTrimmedReplacer: Replacer = function* (content, find) {
|
||||
const originalLines = content.split('\n')
|
||||
const searchLines = find.split('\n')
|
||||
|
||||
if (searchLines[searchLines.length - 1] === '') {
|
||||
searchLines.pop()
|
||||
}
|
||||
|
||||
for (let i = 0; i <= originalLines.length - searchLines.length; i++) {
|
||||
let matches = true
|
||||
|
||||
for (let j = 0; j < searchLines.length; j++) {
|
||||
const originalTrimmed = originalLines[i + j].trim()
|
||||
const searchTrimmed = searchLines[j].trim()
|
||||
|
||||
if (originalTrimmed !== searchTrimmed) {
|
||||
matches = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if (matches) {
|
||||
let matchStartIndex = 0
|
||||
for (let k = 0; k < i; k++) {
|
||||
matchStartIndex += originalLines[k].length + 1
|
||||
}
|
||||
|
||||
let matchEndIndex = matchStartIndex
|
||||
for (let k = 0; k < searchLines.length; k++) {
|
||||
matchEndIndex += originalLines[i + k].length
|
||||
if (k < searchLines.length - 1) {
|
||||
matchEndIndex += 1
|
||||
}
|
||||
}
|
||||
|
||||
yield content.substring(matchStartIndex, matchEndIndex)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const BlockAnchorReplacer: Replacer = function* (content, find) {
|
||||
const originalLines = content.split('\n')
|
||||
const searchLines = find.split('\n')
|
||||
|
||||
if (searchLines.length < 3) {
|
||||
return
|
||||
}
|
||||
|
||||
if (searchLines[searchLines.length - 1] === '') {
|
||||
searchLines.pop()
|
||||
}
|
||||
|
||||
const firstLineSearch = searchLines[0].trim()
|
||||
const lastLineSearch = searchLines[searchLines.length - 1].trim()
|
||||
const searchBlockSize = searchLines.length
|
||||
|
||||
const candidates: Array<{ startLine: number; endLine: number }> = []
|
||||
for (let i = 0; i < originalLines.length; i++) {
|
||||
if (originalLines[i].trim() !== firstLineSearch) {
|
||||
continue
|
||||
}
|
||||
|
||||
for (let j = i + 2; j < originalLines.length; j++) {
|
||||
if (originalLines[j].trim() === lastLineSearch) {
|
||||
candidates.push({ startLine: i, endLine: j })
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (candidates.length === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
if (candidates.length === 1) {
|
||||
const { startLine, endLine } = candidates[0]
|
||||
const actualBlockSize = endLine - startLine + 1
|
||||
|
||||
let similarity = 0
|
||||
const linesToCheck = Math.min(searchBlockSize - 2, actualBlockSize - 2)
|
||||
|
||||
if (linesToCheck > 0) {
|
||||
for (let j = 1; j < searchBlockSize - 1 && j < actualBlockSize - 1; j++) {
|
||||
const originalLine = originalLines[startLine + j].trim()
|
||||
const searchLine = searchLines[j].trim()
|
||||
const maxLen = Math.max(originalLine.length, searchLine.length)
|
||||
if (maxLen === 0) {
|
||||
continue
|
||||
}
|
||||
const distance = levenshtein(originalLine, searchLine)
|
||||
similarity += (1 - distance / maxLen) / linesToCheck
|
||||
|
||||
if (similarity >= SINGLE_CANDIDATE_SIMILARITY_THRESHOLD) {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
similarity = 1.0
|
||||
}
|
||||
|
||||
if (similarity >= SINGLE_CANDIDATE_SIMILARITY_THRESHOLD) {
|
||||
let matchStartIndex = 0
|
||||
for (let k = 0; k < startLine; k++) {
|
||||
matchStartIndex += originalLines[k].length + 1
|
||||
}
|
||||
let matchEndIndex = matchStartIndex
|
||||
for (let k = startLine; k <= endLine; k++) {
|
||||
matchEndIndex += originalLines[k].length
|
||||
if (k < endLine) {
|
||||
matchEndIndex += 1
|
||||
}
|
||||
}
|
||||
yield content.substring(matchStartIndex, matchEndIndex)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
let bestMatch: { startLine: number; endLine: number } | null = null
|
||||
let maxSimilarity = -1
|
||||
|
||||
for (const candidate of candidates) {
|
||||
const { startLine, endLine } = candidate
|
||||
const actualBlockSize = endLine - startLine + 1
|
||||
|
||||
let similarity = 0
|
||||
const linesToCheck = Math.min(searchBlockSize - 2, actualBlockSize - 2)
|
||||
|
||||
if (linesToCheck > 0) {
|
||||
for (let j = 1; j < searchBlockSize - 1 && j < actualBlockSize - 1; j++) {
|
||||
const originalLine = originalLines[startLine + j].trim()
|
||||
const searchLine = searchLines[j].trim()
|
||||
const maxLen = Math.max(originalLine.length, searchLine.length)
|
||||
if (maxLen === 0) {
|
||||
continue
|
||||
}
|
||||
const distance = levenshtein(originalLine, searchLine)
|
||||
similarity += 1 - distance / maxLen
|
||||
}
|
||||
similarity /= linesToCheck
|
||||
} else {
|
||||
similarity = 1.0
|
||||
}
|
||||
|
||||
if (similarity > maxSimilarity) {
|
||||
maxSimilarity = similarity
|
||||
bestMatch = candidate
|
||||
}
|
||||
}
|
||||
|
||||
if (maxSimilarity >= MULTIPLE_CANDIDATES_SIMILARITY_THRESHOLD && bestMatch) {
|
||||
const { startLine, endLine } = bestMatch
|
||||
let matchStartIndex = 0
|
||||
for (let k = 0; k < startLine; k++) {
|
||||
matchStartIndex += originalLines[k].length + 1
|
||||
}
|
||||
let matchEndIndex = matchStartIndex
|
||||
for (let k = startLine; k <= endLine; k++) {
|
||||
matchEndIndex += originalLines[k].length
|
||||
if (k < endLine) {
|
||||
matchEndIndex += 1
|
||||
}
|
||||
}
|
||||
yield content.substring(matchStartIndex, matchEndIndex)
|
||||
}
|
||||
}
|
||||
|
||||
export const WhitespaceNormalizedReplacer: Replacer = function* (content, find) {
|
||||
const normalizeWhitespace = (text: string) => text.replace(/\s+/g, ' ').trim()
|
||||
const normalizedFind = normalizeWhitespace(find)
|
||||
|
||||
const lines = content.split('\n')
|
||||
for (let i = 0; i < lines.length; i++) {
|
||||
const line = lines[i]
|
||||
if (normalizeWhitespace(line) === normalizedFind) {
|
||||
yield line
|
||||
} else {
|
||||
const normalizedLine = normalizeWhitespace(line)
|
||||
if (normalizedLine.includes(normalizedFind)) {
|
||||
const words = find.trim().split(/\s+/)
|
||||
if (words.length > 0) {
|
||||
const pattern = words.map((word) => word.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')).join('\\s+')
|
||||
try {
|
||||
const regex = new RegExp(pattern)
|
||||
const match = line.match(regex)
|
||||
if (match) {
|
||||
yield match[0]
|
||||
}
|
||||
} catch {
|
||||
// Invalid regex pattern, skip
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const findLines = find.split('\n')
|
||||
if (findLines.length > 1) {
|
||||
for (let i = 0; i <= lines.length - findLines.length; i++) {
|
||||
const block = lines.slice(i, i + findLines.length)
|
||||
if (normalizeWhitespace(block.join('\n')) === normalizedFind) {
|
||||
yield block.join('\n')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const IndentationFlexibleReplacer: Replacer = function* (content, find) {
|
||||
const removeIndentation = (text: string) => {
|
||||
const lines = text.split('\n')
|
||||
const nonEmptyLines = lines.filter((line) => line.trim().length > 0)
|
||||
if (nonEmptyLines.length === 0) return text
|
||||
|
||||
const minIndent = Math.min(
|
||||
...nonEmptyLines.map((line) => {
|
||||
const match = line.match(/^(\s*)/)
|
||||
return match ? match[1].length : 0
|
||||
})
|
||||
)
|
||||
|
||||
return lines.map((line) => (line.trim().length === 0 ? line : line.slice(minIndent))).join('\n')
|
||||
}
|
||||
|
||||
const normalizedFind = removeIndentation(find)
|
||||
const contentLines = content.split('\n')
|
||||
const findLines = find.split('\n')
|
||||
|
||||
for (let i = 0; i <= contentLines.length - findLines.length; i++) {
|
||||
const block = contentLines.slice(i, i + findLines.length).join('\n')
|
||||
if (removeIndentation(block) === normalizedFind) {
|
||||
yield block
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const EscapeNormalizedReplacer: Replacer = function* (content, find) {
|
||||
const unescapeString = (str: string): string => {
|
||||
return str.replace(/\\(n|t|r|'|"|`|\\|\n|\$)/g, (match, capturedChar) => {
|
||||
switch (capturedChar) {
|
||||
case 'n':
|
||||
return '\n'
|
||||
case 't':
|
||||
return '\t'
|
||||
case 'r':
|
||||
return '\r'
|
||||
case "'":
|
||||
return "'"
|
||||
case '"':
|
||||
return '"'
|
||||
case '`':
|
||||
return '`'
|
||||
case '\\':
|
||||
return '\\'
|
||||
case '\n':
|
||||
return '\n'
|
||||
case '$':
|
||||
return '$'
|
||||
default:
|
||||
return match
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
const unescapedFind = unescapeString(find)
|
||||
|
||||
if (content.includes(unescapedFind)) {
|
||||
yield unescapedFind
|
||||
}
|
||||
|
||||
const lines = content.split('\n')
|
||||
const findLines = unescapedFind.split('\n')
|
||||
|
||||
for (let i = 0; i <= lines.length - findLines.length; i++) {
|
||||
const block = lines.slice(i, i + findLines.length).join('\n')
|
||||
const unescapedBlock = unescapeString(block)
|
||||
|
||||
if (unescapedBlock === unescapedFind) {
|
||||
yield block
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const TrimmedBoundaryReplacer: Replacer = function* (content, find) {
|
||||
const trimmedFind = find.trim()
|
||||
|
||||
if (trimmedFind === find) {
|
||||
return
|
||||
}
|
||||
|
||||
if (content.includes(trimmedFind)) {
|
||||
yield trimmedFind
|
||||
}
|
||||
|
||||
const lines = content.split('\n')
|
||||
const findLines = find.split('\n')
|
||||
|
||||
for (let i = 0; i <= lines.length - findLines.length; i++) {
|
||||
const block = lines.slice(i, i + findLines.length).join('\n')
|
||||
|
||||
if (block.trim() === trimmedFind) {
|
||||
yield block
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const ContextAwareReplacer: Replacer = function* (content, find) {
|
||||
const findLines = find.split('\n')
|
||||
if (findLines.length < 3) {
|
||||
return
|
||||
}
|
||||
|
||||
if (findLines[findLines.length - 1] === '') {
|
||||
findLines.pop()
|
||||
}
|
||||
|
||||
const contentLines = content.split('\n')
|
||||
|
||||
const firstLine = findLines[0].trim()
|
||||
const lastLine = findLines[findLines.length - 1].trim()
|
||||
|
||||
for (let i = 0; i < contentLines.length; i++) {
|
||||
if (contentLines[i].trim() !== firstLine) continue
|
||||
|
||||
for (let j = i + 2; j < contentLines.length; j++) {
|
||||
if (contentLines[j].trim() === lastLine) {
|
||||
const blockLines = contentLines.slice(i, j + 1)
|
||||
const block = blockLines.join('\n')
|
||||
|
||||
if (blockLines.length === findLines.length) {
|
||||
let matchingLines = 0
|
||||
let totalNonEmptyLines = 0
|
||||
|
||||
for (let k = 1; k < blockLines.length - 1; k++) {
|
||||
const blockLine = blockLines[k].trim()
|
||||
const findLine = findLines[k].trim()
|
||||
|
||||
if (blockLine.length > 0 || findLine.length > 0) {
|
||||
totalNonEmptyLines++
|
||||
if (blockLine === findLine) {
|
||||
matchingLines++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (totalNonEmptyLines === 0 || matchingLines / totalNonEmptyLines >= 0.5) {
|
||||
yield block
|
||||
break
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const MultiOccurrenceReplacer: Replacer = function* (content, find) {
|
||||
let startIndex = 0
|
||||
|
||||
while (true) {
|
||||
const index = content.indexOf(find, startIndex)
|
||||
if (index === -1) break
|
||||
|
||||
yield find
|
||||
startIndex = index + find.length
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* All replacers in order of specificity
|
||||
*/
|
||||
export const ALL_REPLACERS: Replacer[] = [
|
||||
SimpleReplacer,
|
||||
LineTrimmedReplacer,
|
||||
BlockAnchorReplacer,
|
||||
WhitespaceNormalizedReplacer,
|
||||
IndentationFlexibleReplacer,
|
||||
EscapeNormalizedReplacer,
|
||||
TrimmedBoundaryReplacer,
|
||||
ContextAwareReplacer,
|
||||
MultiOccurrenceReplacer
|
||||
]
|
||||
|
||||
/**
|
||||
* Replace oldString with newString in content using fuzzy matching
|
||||
*/
|
||||
export function replaceWithFuzzyMatch(
|
||||
content: string,
|
||||
oldString: string,
|
||||
newString: string,
|
||||
replaceAll = false
|
||||
): string {
|
||||
if (oldString === newString) {
|
||||
throw new Error('old_string and new_string must be different')
|
||||
}
|
||||
|
||||
let notFound = true
|
||||
|
||||
for (const replacer of ALL_REPLACERS) {
|
||||
for (const search of replacer(content, oldString)) {
|
||||
const index = content.indexOf(search)
|
||||
if (index === -1) continue
|
||||
notFound = false
|
||||
if (replaceAll) {
|
||||
return content.replaceAll(search, newString)
|
||||
}
|
||||
const lastIndex = content.lastIndexOf(search)
|
||||
if (index !== lastIndex) continue
|
||||
return content.substring(0, index) + newString + content.substring(index + search.length)
|
||||
}
|
||||
}
|
||||
|
||||
if (notFound) {
|
||||
throw new Error('old_string not found in content')
|
||||
}
|
||||
throw new Error(
|
||||
'Found multiple matches for old_string. Provide more surrounding lines in old_string to identify the correct match.'
|
||||
)
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Binary File Detection
|
||||
// ============================================================================
|
||||
|
||||
// Check if a file is likely binary
|
||||
export async function isBinaryFile(filePath: string): Promise<boolean> {
|
||||
try {
|
||||
const buffer = Buffer.alloc(4096)
|
||||
const fd = await fs.open(filePath, 'r')
|
||||
const { bytesRead } = await fd.read(buffer, 0, buffer.length, 0)
|
||||
await fd.close()
|
||||
|
||||
if (bytesRead === 0) return false
|
||||
|
||||
const view = buffer.subarray(0, bytesRead)
|
||||
|
||||
let zeroBytes = 0
|
||||
let evenZeros = 0
|
||||
let oddZeros = 0
|
||||
let nonPrintable = 0
|
||||
|
||||
for (let i = 0; i < view.length; i++) {
|
||||
const b = view[i]
|
||||
|
||||
if (b === 0) {
|
||||
zeroBytes++
|
||||
if (i % 2 === 0) evenZeros++
|
||||
else oddZeros++
|
||||
continue
|
||||
}
|
||||
|
||||
// treat common whitespace as printable
|
||||
if (b === 9 || b === 10 || b === 13) continue
|
||||
|
||||
// basic ASCII printable range
|
||||
if (b >= 32 && b <= 126) continue
|
||||
|
||||
// bytes >= 128 are likely part of UTF-8 sequences; count as printable
|
||||
if (b >= 128) continue
|
||||
|
||||
nonPrintable++
|
||||
}
|
||||
|
||||
// If there are lots of null bytes, it's probably binary unless it looks like UTF-16 text.
|
||||
if (zeroBytes > 0) {
|
||||
const evenSlots = Math.ceil(view.length / 2)
|
||||
const oddSlots = Math.floor(view.length / 2)
|
||||
const evenZeroRatio = evenSlots > 0 ? evenZeros / evenSlots : 0
|
||||
const oddZeroRatio = oddSlots > 0 ? oddZeros / oddSlots : 0
|
||||
|
||||
// UTF-16LE/BE tends to have zeros on every other byte.
|
||||
if (evenZeroRatio > 0.7 || oddZeroRatio > 0.7) return false
|
||||
|
||||
if (zeroBytes / view.length > 0.05) return true
|
||||
}
|
||||
|
||||
// Heuristic: too many non-printable bytes => binary.
|
||||
return nonPrintable / view.length > 0.3
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Ripgrep Utilities
|
||||
// ============================================================================
|
||||
|
||||
export interface RipgrepResult {
|
||||
ok: boolean
|
||||
stdout: string
|
||||
exitCode: number | null
|
||||
}
|
||||
|
||||
export function getRipgrepAddonPath(): string {
|
||||
const pkgJsonPath = require.resolve('@anthropic-ai/claude-agent-sdk/package.json')
|
||||
const pkgRoot = path.dirname(pkgJsonPath)
|
||||
const platform = isMac ? 'darwin' : isWin ? 'win32' : 'linux'
|
||||
const arch = process.arch === 'arm64' ? 'arm64' : 'x64'
|
||||
return path.join(pkgRoot, 'vendor', 'ripgrep', `${arch}-${platform}`, 'ripgrep.node')
|
||||
}
|
||||
|
||||
export async function runRipgrep(args: string[]): Promise<RipgrepResult> {
|
||||
const addonPath = getRipgrepAddonPath()
|
||||
const childScript = `const { ripgrepMain } = require(process.env.RIPGREP_ADDON_PATH); process.exit(ripgrepMain(process.argv.slice(1)));`
|
||||
|
||||
return new Promise((resolve) => {
|
||||
const child = spawn(process.execPath, ['--eval', childScript, 'rg', ...args], {
|
||||
cwd: process.cwd(),
|
||||
env: {
|
||||
...process.env,
|
||||
ELECTRON_RUN_AS_NODE: '1',
|
||||
RIPGREP_ADDON_PATH: addonPath
|
||||
},
|
||||
stdio: ['ignore', 'pipe', 'pipe']
|
||||
})
|
||||
|
||||
let stdout = ''
|
||||
|
||||
child.stdout?.on('data', (chunk) => {
|
||||
stdout += chunk.toString('utf-8')
|
||||
})
|
||||
|
||||
child.on('error', () => {
|
||||
resolve({ ok: false, stdout: '', exitCode: null })
|
||||
})
|
||||
|
||||
child.on('close', (code) => {
|
||||
resolve({ ok: true, stdout, exitCode: code })
|
||||
})
|
||||
})
|
||||
}
|
||||
@ -31,7 +31,9 @@ export enum ConfigKeys {
|
||||
DisableHardwareAcceleration = 'disableHardwareAcceleration',
|
||||
Proxy = 'proxy',
|
||||
EnableDeveloperMode = 'enableDeveloperMode',
|
||||
ClientId = 'clientId'
|
||||
ClientId = 'clientId',
|
||||
GitBashPath = 'gitBashPath',
|
||||
GitBashPathSource = 'gitBashPathSource' // 'manual' | 'auto' | null
|
||||
}
|
||||
|
||||
export class ConfigManager {
|
||||
|
||||
@ -151,6 +151,7 @@ class FileStorage {
|
||||
private currentWatchPath?: string
|
||||
private debounceTimer?: NodeJS.Timeout
|
||||
private watcherConfig: Required<FileWatcherConfig> = DEFAULT_WATCHER_CONFIG
|
||||
private isPaused = false
|
||||
|
||||
constructor() {
|
||||
this.initStorageDir()
|
||||
@ -162,7 +163,7 @@ class FileStorage {
|
||||
fs.mkdirSync(this.storageDir, { recursive: true })
|
||||
}
|
||||
if (!fs.existsSync(this.notesDir)) {
|
||||
fs.mkdirSync(this.storageDir, { recursive: true })
|
||||
fs.mkdirSync(this.notesDir, { recursive: true })
|
||||
}
|
||||
if (!fs.existsSync(this.tempDir)) {
|
||||
fs.mkdirSync(this.tempDir, { recursive: true })
|
||||
@ -1479,6 +1480,12 @@ class FileStorage {
|
||||
|
||||
private createChangeHandler() {
|
||||
return (eventType: string, filePath: string) => {
|
||||
// Skip processing if watcher is paused
|
||||
if (this.isPaused) {
|
||||
logger.debug('File change ignored (watcher paused)', { eventType, filePath })
|
||||
return
|
||||
}
|
||||
|
||||
if (!this.shouldWatchFile(filePath, eventType)) {
|
||||
return
|
||||
}
|
||||
@ -1636,6 +1643,165 @@ class FileStorage {
|
||||
logger.error('Failed to show item in folder:', error as Error)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Batch upload markdown files from native File objects
|
||||
* This handles all I/O operations in the Main process to avoid blocking Renderer
|
||||
*/
|
||||
public batchUploadMarkdownFiles = async (
|
||||
_: Electron.IpcMainInvokeEvent,
|
||||
filePaths: string[],
|
||||
targetPath: string
|
||||
): Promise<{
|
||||
fileCount: number
|
||||
folderCount: number
|
||||
skippedFiles: number
|
||||
}> => {
|
||||
try {
|
||||
logger.info('Starting batch upload', { fileCount: filePaths.length, targetPath })
|
||||
|
||||
const basePath = path.resolve(targetPath)
|
||||
const MARKDOWN_EXTS = ['.md', '.markdown']
|
||||
|
||||
// Filter markdown files
|
||||
const markdownFiles = filePaths.filter((filePath) => {
|
||||
const ext = path.extname(filePath).toLowerCase()
|
||||
return MARKDOWN_EXTS.includes(ext)
|
||||
})
|
||||
|
||||
const skippedFiles = filePaths.length - markdownFiles.length
|
||||
|
||||
if (markdownFiles.length === 0) {
|
||||
return { fileCount: 0, folderCount: 0, skippedFiles }
|
||||
}
|
||||
|
||||
// Collect unique folders needed
|
||||
const foldersSet = new Set<string>()
|
||||
const fileOperations: Array<{ sourcePath: string; targetPath: string }> = []
|
||||
|
||||
for (const filePath of markdownFiles) {
|
||||
try {
|
||||
// Get relative path if file is from a directory upload
|
||||
const fileName = path.basename(filePath)
|
||||
const relativePath = path.dirname(filePath)
|
||||
|
||||
// Determine target directory structure
|
||||
let targetDir = basePath
|
||||
const folderParts: string[] = []
|
||||
|
||||
// Extract folder structure from file path for nested uploads
|
||||
// This is a simplified version - in real scenario we'd need the original directory structure
|
||||
if (relativePath && relativePath !== '.') {
|
||||
const parts = relativePath.split(path.sep)
|
||||
// Get the last few parts that represent the folder structure within upload
|
||||
const relevantParts = parts.slice(Math.max(0, parts.length - 3))
|
||||
folderParts.push(...relevantParts)
|
||||
}
|
||||
|
||||
// Build target directory path
|
||||
for (const part of folderParts) {
|
||||
targetDir = path.join(targetDir, part)
|
||||
foldersSet.add(targetDir)
|
||||
}
|
||||
|
||||
// Determine final file name
|
||||
const nameWithoutExt = fileName.endsWith('.md')
|
||||
? fileName.slice(0, -3)
|
||||
: fileName.endsWith('.markdown')
|
||||
? fileName.slice(0, -9)
|
||||
: fileName
|
||||
|
||||
const { safeName } = await this.fileNameGuard(_, targetDir, nameWithoutExt, true)
|
||||
const finalPath = path.join(targetDir, safeName + '.md')
|
||||
|
||||
fileOperations.push({ sourcePath: filePath, targetPath: finalPath })
|
||||
} catch (error) {
|
||||
logger.error('Failed to prepare file operation:', error as Error, { filePath })
|
||||
}
|
||||
}
|
||||
|
||||
// Create folders in order (shallow to deep)
|
||||
const sortedFolders = Array.from(foldersSet).sort((a, b) => a.length - b.length)
|
||||
for (const folder of sortedFolders) {
|
||||
try {
|
||||
if (!fs.existsSync(folder)) {
|
||||
await fs.promises.mkdir(folder, { recursive: true })
|
||||
}
|
||||
} catch (error) {
|
||||
logger.debug('Folder already exists or creation failed', { folder, error: (error as Error).message })
|
||||
}
|
||||
}
|
||||
|
||||
// Process files in batches
|
||||
const BATCH_SIZE = 10 // Higher batch size since we're in Main process
|
||||
let successCount = 0
|
||||
|
||||
for (let i = 0; i < fileOperations.length; i += BATCH_SIZE) {
|
||||
const batch = fileOperations.slice(i, i + BATCH_SIZE)
|
||||
|
||||
const results = await Promise.allSettled(
|
||||
batch.map(async (op) => {
|
||||
// Read from source and write to target in Main process
|
||||
const content = await fs.promises.readFile(op.sourcePath, 'utf-8')
|
||||
await fs.promises.writeFile(op.targetPath, content, 'utf-8')
|
||||
return true
|
||||
})
|
||||
)
|
||||
|
||||
results.forEach((result, index) => {
|
||||
if (result.status === 'fulfilled') {
|
||||
successCount++
|
||||
} else {
|
||||
logger.error('Failed to upload file:', result.reason, {
|
||||
file: batch[index].sourcePath
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
logger.info('Batch upload completed', {
|
||||
successCount,
|
||||
folderCount: foldersSet.size,
|
||||
skippedFiles
|
||||
})
|
||||
|
||||
return {
|
||||
fileCount: successCount,
|
||||
folderCount: foldersSet.size,
|
||||
skippedFiles
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Batch upload failed:', error as Error)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Pause file watcher to prevent events during batch operations
|
||||
*/
|
||||
public pauseFileWatcher = async (): Promise<void> => {
|
||||
if (this.watcher) {
|
||||
logger.debug('Pausing file watcher')
|
||||
this.isPaused = true
|
||||
// Clear any pending debounced notifications
|
||||
if (this.debounceTimer) {
|
||||
clearTimeout(this.debounceTimer)
|
||||
this.debounceTimer = undefined
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Resume file watcher and trigger a refresh
|
||||
*/
|
||||
public resumeFileWatcher = async (): Promise<void> => {
|
||||
if (this.watcher && this.currentWatchPath) {
|
||||
logger.debug('Resuming file watcher')
|
||||
this.isPaused = false
|
||||
// Send a synthetic refresh event to trigger tree reload
|
||||
this.notifyChange('refresh', this.currentWatchPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const fileStorage = new FileStorage()
|
||||
|
||||
@ -33,6 +33,7 @@ import {
|
||||
import { nanoid } from '@reduxjs/toolkit'
|
||||
import { HOME_CHERRY_DIR } from '@shared/config/constant'
|
||||
import type { MCPProgressEvent } from '@shared/config/types'
|
||||
import type { MCPServerLogEntry } from '@shared/config/types'
|
||||
import { IpcChannel } from '@shared/IpcChannel'
|
||||
import { defaultAppHeaders } from '@shared/utils'
|
||||
import {
|
||||
@ -56,6 +57,7 @@ import { CacheService } from './CacheService'
|
||||
import DxtService from './DxtService'
|
||||
import { CallBackServer } from './mcp/oauth/callback'
|
||||
import { McpOAuthClientProvider } from './mcp/oauth/provider'
|
||||
import { ServerLogBuffer } from './mcp/ServerLogBuffer'
|
||||
import { windowService } from './WindowService'
|
||||
|
||||
// Generic type for caching wrapped functions
|
||||
@ -142,6 +144,7 @@ class McpService {
|
||||
private pendingClients: Map<string, Promise<Client>> = new Map()
|
||||
private dxtService = new DxtService()
|
||||
private activeToolCalls: Map<string, AbortController> = new Map()
|
||||
private serverLogs = new ServerLogBuffer(200)
|
||||
|
||||
constructor() {
|
||||
this.initClient = this.initClient.bind(this)
|
||||
@ -159,6 +162,7 @@ class McpService {
|
||||
this.cleanup = this.cleanup.bind(this)
|
||||
this.checkMcpConnectivity = this.checkMcpConnectivity.bind(this)
|
||||
this.getServerVersion = this.getServerVersion.bind(this)
|
||||
this.getServerLogs = this.getServerLogs.bind(this)
|
||||
}
|
||||
|
||||
private getServerKey(server: MCPServer): string {
|
||||
@ -172,6 +176,19 @@ class McpService {
|
||||
})
|
||||
}
|
||||
|
||||
private emitServerLog(server: MCPServer, entry: MCPServerLogEntry) {
|
||||
const serverKey = this.getServerKey(server)
|
||||
this.serverLogs.append(serverKey, entry)
|
||||
const mainWindow = windowService.getMainWindow()
|
||||
if (mainWindow) {
|
||||
mainWindow.webContents.send(IpcChannel.Mcp_ServerLog, { ...entry, serverId: server.id })
|
||||
}
|
||||
}
|
||||
|
||||
public getServerLogs(_: Electron.IpcMainInvokeEvent, server: MCPServer): MCPServerLogEntry[] {
|
||||
return this.serverLogs.get(this.getServerKey(server))
|
||||
}
|
||||
|
||||
async initClient(server: MCPServer): Promise<Client> {
|
||||
const serverKey = this.getServerKey(server)
|
||||
|
||||
@ -232,6 +249,26 @@ class McpService {
|
||||
StdioClientTransport | SSEClientTransport | InMemoryTransport | StreamableHTTPClientTransport
|
||||
> => {
|
||||
// Create appropriate transport based on configuration
|
||||
|
||||
// Special case for nowledgeMem - uses HTTP transport instead of in-memory
|
||||
if (isBuiltinMCPServer(server) && server.name === BuiltinMCPServerNames.nowledgeMem) {
|
||||
const nowledgeMemUrl = 'http://127.0.0.1:14242/mcp'
|
||||
const options: StreamableHTTPClientTransportOptions = {
|
||||
fetch: async (url, init) => {
|
||||
return net.fetch(typeof url === 'string' ? url : url.toString(), init)
|
||||
},
|
||||
requestInit: {
|
||||
headers: {
|
||||
...defaultAppHeaders(),
|
||||
APP: 'Cherry Studio'
|
||||
}
|
||||
},
|
||||
authProvider
|
||||
}
|
||||
getServerLogger(server).debug(`Using StreamableHTTPClientTransport for ${server.name}`)
|
||||
return new StreamableHTTPClientTransport(new URL(nowledgeMemUrl), options)
|
||||
}
|
||||
|
||||
if (isBuiltinMCPServer(server) && server.name !== BuiltinMCPServerNames.mcpAutoInstall) {
|
||||
getServerLogger(server).debug(`Using in-memory transport`)
|
||||
const [clientTransport, serverTransport] = InMemoryTransport.createLinkedPair()
|
||||
@ -366,9 +403,18 @@ class McpService {
|
||||
}
|
||||
|
||||
const stdioTransport = new StdioClientTransport(transportOptions)
|
||||
stdioTransport.stderr?.on('data', (data) =>
|
||||
getServerLogger(server).debug(`Stdio stderr`, { data: data.toString() })
|
||||
)
|
||||
stdioTransport.stderr?.on('data', (data) => {
|
||||
const msg = data.toString()
|
||||
getServerLogger(server).debug(`Stdio stderr`, { data: msg })
|
||||
this.emitServerLog(server, {
|
||||
timestamp: Date.now(),
|
||||
level: 'stderr',
|
||||
message: msg.trim(),
|
||||
source: 'stdio'
|
||||
})
|
||||
})
|
||||
// StdioClientTransport does not expose stdout as a readable stream for raw logging
|
||||
// (stdout is reserved for JSON-RPC). Avoid attaching a listener that would never fire.
|
||||
return stdioTransport
|
||||
} else {
|
||||
throw new Error('Either baseUrl or command must be provided')
|
||||
@ -436,6 +482,13 @@ class McpService {
|
||||
}
|
||||
}
|
||||
|
||||
this.emitServerLog(server, {
|
||||
timestamp: Date.now(),
|
||||
level: 'info',
|
||||
message: 'Server connected',
|
||||
source: 'client'
|
||||
})
|
||||
|
||||
// Store the new client in the cache
|
||||
this.clients.set(serverKey, client)
|
||||
|
||||
@ -446,9 +499,22 @@ class McpService {
|
||||
this.clearServerCache(serverKey)
|
||||
|
||||
logger.debug(`Activated server: ${server.name}`)
|
||||
this.emitServerLog(server, {
|
||||
timestamp: Date.now(),
|
||||
level: 'info',
|
||||
message: 'Server activated',
|
||||
source: 'client'
|
||||
})
|
||||
return client
|
||||
} catch (error) {
|
||||
getServerLogger(server).error(`Error activating server ${server.name}`, error as Error)
|
||||
this.emitServerLog(server, {
|
||||
timestamp: Date.now(),
|
||||
level: 'error',
|
||||
message: `Error activating server: ${(error as Error)?.message}`,
|
||||
data: redactSensitive(error),
|
||||
source: 'client'
|
||||
})
|
||||
throw error
|
||||
}
|
||||
} finally {
|
||||
@ -506,6 +572,16 @@ class McpService {
|
||||
// Set up logging message notification handler
|
||||
client.setNotificationHandler(LoggingMessageNotificationSchema, async (notification) => {
|
||||
logger.debug(`Message from server ${server.name}:`, notification.params)
|
||||
const msg = notification.params?.message
|
||||
if (msg) {
|
||||
this.emitServerLog(server, {
|
||||
timestamp: Date.now(),
|
||||
level: (notification.params?.level as MCPServerLogEntry['level']) || 'info',
|
||||
message: typeof msg === 'string' ? msg : JSON.stringify(msg),
|
||||
data: redactSensitive(notification.params?.data),
|
||||
source: notification.params?.logger || 'server'
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
getServerLogger(server).debug(`Set up notification handlers`)
|
||||
@ -540,6 +616,7 @@ class McpService {
|
||||
this.clients.delete(serverKey)
|
||||
// Clear all caches for this server
|
||||
this.clearServerCache(serverKey)
|
||||
this.serverLogs.remove(serverKey)
|
||||
} else {
|
||||
logger.warn(`No client found for server`, { serverKey })
|
||||
}
|
||||
@ -548,6 +625,12 @@ class McpService {
|
||||
async stopServer(_: Electron.IpcMainInvokeEvent, server: MCPServer) {
|
||||
const serverKey = this.getServerKey(server)
|
||||
getServerLogger(server).debug(`Stopping server`)
|
||||
this.emitServerLog(server, {
|
||||
timestamp: Date.now(),
|
||||
level: 'info',
|
||||
message: 'Stopping server',
|
||||
source: 'client'
|
||||
})
|
||||
await this.closeClient(serverKey)
|
||||
}
|
||||
|
||||
@ -574,6 +657,12 @@ class McpService {
|
||||
async restartServer(_: Electron.IpcMainInvokeEvent, server: MCPServer) {
|
||||
getServerLogger(server).debug(`Restarting server`)
|
||||
const serverKey = this.getServerKey(server)
|
||||
this.emitServerLog(server, {
|
||||
timestamp: Date.now(),
|
||||
level: 'info',
|
||||
message: 'Restarting server',
|
||||
source: 'client'
|
||||
})
|
||||
await this.closeClient(serverKey)
|
||||
// Clear cache before restarting to ensure fresh data
|
||||
this.clearServerCache(serverKey)
|
||||
@ -606,9 +695,22 @@ class McpService {
|
||||
// Attempt to list tools as a way to check connectivity
|
||||
await client.listTools()
|
||||
getServerLogger(server).debug(`Connectivity check successful`)
|
||||
this.emitServerLog(server, {
|
||||
timestamp: Date.now(),
|
||||
level: 'info',
|
||||
message: 'Connectivity check successful',
|
||||
source: 'connectivity'
|
||||
})
|
||||
return true
|
||||
} catch (error) {
|
||||
getServerLogger(server).error(`Connectivity check failed`, error as Error)
|
||||
this.emitServerLog(server, {
|
||||
timestamp: Date.now(),
|
||||
level: 'error',
|
||||
message: `Connectivity check failed: ${(error as Error).message}`,
|
||||
data: redactSensitive(error),
|
||||
source: 'connectivity'
|
||||
})
|
||||
// Close the client if connectivity check fails to ensure a clean state for the next attempt
|
||||
const serverKey = this.getServerKey(server)
|
||||
await this.closeClient(serverKey)
|
||||
|
||||
@ -1393,6 +1393,50 @@ export class SelectionService {
|
||||
actionWindow.setAlwaysOnTop(isPinned)
|
||||
}
|
||||
|
||||
/**
|
||||
* [Windows only] Manual window resize handler
|
||||
*
|
||||
* ELECTRON BUG WORKAROUND:
|
||||
* In Electron, when using `frame: false` + `transparent: true`, the native window
|
||||
* resize functionality is broken on Windows. This is a known Electron bug.
|
||||
* See: https://github.com/electron/electron/issues/48554
|
||||
*
|
||||
* This method can be removed once the Electron bug is fixed.
|
||||
*/
|
||||
public resizeActionWindow(actionWindow: BrowserWindow, deltaX: number, deltaY: number, direction: string): void {
|
||||
const bounds = actionWindow.getBounds()
|
||||
const minWidth = 300
|
||||
const minHeight = 200
|
||||
|
||||
let { x, y, width, height } = bounds
|
||||
|
||||
// Handle horizontal resize
|
||||
if (direction.includes('e')) {
|
||||
width = Math.max(minWidth, width + deltaX)
|
||||
}
|
||||
if (direction.includes('w')) {
|
||||
const newWidth = Math.max(minWidth, width - deltaX)
|
||||
if (newWidth !== width) {
|
||||
x = x + (width - newWidth)
|
||||
width = newWidth
|
||||
}
|
||||
}
|
||||
|
||||
// Handle vertical resize
|
||||
if (direction.includes('s')) {
|
||||
height = Math.max(minHeight, height + deltaY)
|
||||
}
|
||||
if (direction.includes('n')) {
|
||||
const newHeight = Math.max(minHeight, height - deltaY)
|
||||
if (newHeight !== height) {
|
||||
y = y + (height - newHeight)
|
||||
height = newHeight
|
||||
}
|
||||
}
|
||||
|
||||
actionWindow.setBounds({ x, y, width, height })
|
||||
}
|
||||
|
||||
/**
|
||||
* Update trigger mode behavior
|
||||
* Switches between selection-based and alt-key based triggering
|
||||
@ -1510,6 +1554,18 @@ export class SelectionService {
|
||||
}
|
||||
})
|
||||
|
||||
// [Windows only] Electron bug workaround - can be removed once fixed
|
||||
// See: https://github.com/electron/electron/issues/48554
|
||||
ipcMain.handle(
|
||||
IpcChannel.Selection_ActionWindowResize,
|
||||
(event, deltaX: number, deltaY: number, direction: string) => {
|
||||
const actionWindow = BrowserWindow.fromWebContents(event.sender)
|
||||
if (actionWindow) {
|
||||
selectionService?.resizeActionWindow(actionWindow, deltaX, deltaY, direction)
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
this.isIpcHandlerRegistered = true
|
||||
}
|
||||
|
||||
|
||||
@ -35,6 +35,15 @@ function getShortcutHandler(shortcut: Shortcut) {
|
||||
}
|
||||
case 'mini_window':
|
||||
return () => {
|
||||
// 在处理器内部检查QuickAssistant状态,而不是在注册时检查
|
||||
const quickAssistantEnabled = configManager.getEnableQuickAssistant()
|
||||
logger.info(`mini_window shortcut triggered, QuickAssistant enabled: ${quickAssistantEnabled}`)
|
||||
|
||||
if (!quickAssistantEnabled) {
|
||||
logger.warn('QuickAssistant is disabled, ignoring mini_window shortcut trigger')
|
||||
return
|
||||
}
|
||||
|
||||
windowService.toggleMiniWindow()
|
||||
}
|
||||
case 'selection_assistant_toggle':
|
||||
@ -190,11 +199,10 @@ export function registerShortcuts(window: BrowserWindow) {
|
||||
break
|
||||
|
||||
case 'mini_window':
|
||||
//available only when QuickAssistant enabled
|
||||
if (!configManager.getEnableQuickAssistant()) {
|
||||
return
|
||||
}
|
||||
// 移除注册时的条件检查,在处理器内部进行检查
|
||||
logger.info(`Processing mini_window shortcut, enabled: ${shortcut.enabled}`)
|
||||
showMiniWindowAccelerator = formatShortcutKey(shortcut.shortcut)
|
||||
logger.debug(`Mini window accelerator set to: ${showMiniWindowAccelerator}`)
|
||||
break
|
||||
|
||||
case 'selection_assistant_toggle':
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import { IpcChannel } from '@shared/IpcChannel'
|
||||
import { app, session, shell, webContents } from 'electron'
|
||||
import { app, dialog, session, shell, webContents } from 'electron'
|
||||
import { promises as fs } from 'fs'
|
||||
|
||||
/**
|
||||
* init the useragent of the webview session
|
||||
@ -53,11 +54,17 @@ const attachKeyboardHandler = (contents: Electron.WebContents) => {
|
||||
return
|
||||
}
|
||||
|
||||
const isFindShortcut = (input.control || input.meta) && key === 'f'
|
||||
const isEscape = key === 'escape'
|
||||
const isEnter = key === 'enter'
|
||||
// Helper to check if this is a shortcut we handle
|
||||
const isHandledShortcut = (k: string) => {
|
||||
const isFindShortcut = (input.control || input.meta) && k === 'f'
|
||||
const isPrintShortcut = (input.control || input.meta) && k === 'p'
|
||||
const isSaveShortcut = (input.control || input.meta) && k === 's'
|
||||
const isEscape = k === 'escape'
|
||||
const isEnter = k === 'enter'
|
||||
return isFindShortcut || isPrintShortcut || isSaveShortcut || isEscape || isEnter
|
||||
}
|
||||
|
||||
if (!isFindShortcut && !isEscape && !isEnter) {
|
||||
if (!isHandledShortcut(key)) {
|
||||
return
|
||||
}
|
||||
|
||||
@ -66,11 +73,20 @@ const attachKeyboardHandler = (contents: Electron.WebContents) => {
|
||||
return
|
||||
}
|
||||
|
||||
const isFindShortcut = (input.control || input.meta) && key === 'f'
|
||||
const isPrintShortcut = (input.control || input.meta) && key === 'p'
|
||||
const isSaveShortcut = (input.control || input.meta) && key === 's'
|
||||
|
||||
// Always prevent Cmd/Ctrl+F to override the guest page's native find dialog
|
||||
if (isFindShortcut) {
|
||||
event.preventDefault()
|
||||
}
|
||||
|
||||
// Prevent default print/save dialogs and handle them with custom logic
|
||||
if (isPrintShortcut || isSaveShortcut) {
|
||||
event.preventDefault()
|
||||
}
|
||||
|
||||
// Send the hotkey event to the renderer
|
||||
// The renderer will decide whether to preventDefault for Escape and Enter
|
||||
// based on whether the search bar is visible
|
||||
@ -100,3 +116,130 @@ export function initWebviewHotkeys() {
|
||||
attachKeyboardHandler(contents)
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Print webview content to PDF
|
||||
* @param webviewId The webview webContents id
|
||||
* @returns Path to saved PDF file or null if user cancelled
|
||||
*/
|
||||
export async function printWebviewToPDF(webviewId: number): Promise<string | null> {
|
||||
const webview = webContents.fromId(webviewId)
|
||||
if (!webview) {
|
||||
throw new Error('Webview not found')
|
||||
}
|
||||
|
||||
try {
|
||||
// Get the page title for default filename
|
||||
const pageTitle = await webview.executeJavaScript('document.title || "webpage"').catch(() => 'webpage')
|
||||
// Sanitize filename by removing invalid characters
|
||||
const sanitizedTitle = pageTitle.replace(/[<>:"/\\|?*]/g, '-').substring(0, 100)
|
||||
const defaultFilename = sanitizedTitle ? `${sanitizedTitle}.pdf` : `webpage-${Date.now()}.pdf`
|
||||
|
||||
// Show save dialog
|
||||
const { canceled, filePath } = await dialog.showSaveDialog({
|
||||
title: 'Save as PDF',
|
||||
defaultPath: defaultFilename,
|
||||
filters: [{ name: 'PDF Files', extensions: ['pdf'] }]
|
||||
})
|
||||
|
||||
if (canceled || !filePath) {
|
||||
return null
|
||||
}
|
||||
|
||||
// Generate PDF with settings to capture full page
|
||||
const pdfData = await webview.printToPDF({
|
||||
margins: {
|
||||
marginType: 'default'
|
||||
},
|
||||
printBackground: true,
|
||||
landscape: false,
|
||||
pageSize: 'A4',
|
||||
preferCSSPageSize: true
|
||||
})
|
||||
|
||||
// Save PDF to file
|
||||
await fs.writeFile(filePath, pdfData)
|
||||
|
||||
return filePath
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to print to PDF: ${(error as Error).message}`)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Save webview content as HTML
|
||||
* @param webviewId The webview webContents id
|
||||
* @returns Path to saved HTML file or null if user cancelled
|
||||
*/
|
||||
export async function saveWebviewAsHTML(webviewId: number): Promise<string | null> {
|
||||
const webview = webContents.fromId(webviewId)
|
||||
if (!webview) {
|
||||
throw new Error('Webview not found')
|
||||
}
|
||||
|
||||
try {
|
||||
// Get the page title for default filename
|
||||
const pageTitle = await webview.executeJavaScript('document.title || "webpage"').catch(() => 'webpage')
|
||||
// Sanitize filename by removing invalid characters
|
||||
const sanitizedTitle = pageTitle.replace(/[<>:"/\\|?*]/g, '-').substring(0, 100)
|
||||
const defaultFilename = sanitizedTitle ? `${sanitizedTitle}.html` : `webpage-${Date.now()}.html`
|
||||
|
||||
// Show save dialog
|
||||
const { canceled, filePath } = await dialog.showSaveDialog({
|
||||
title: 'Save as HTML',
|
||||
defaultPath: defaultFilename,
|
||||
filters: [
|
||||
{ name: 'HTML Files', extensions: ['html', 'htm'] },
|
||||
{ name: 'All Files', extensions: ['*'] }
|
||||
]
|
||||
})
|
||||
|
||||
if (canceled || !filePath) {
|
||||
return null
|
||||
}
|
||||
|
||||
// Get the HTML content with safe error handling
|
||||
const html = await webview.executeJavaScript(`
|
||||
(() => {
|
||||
try {
|
||||
// Build complete DOCTYPE string if present
|
||||
let doctype = '';
|
||||
if (document.doctype) {
|
||||
const dt = document.doctype;
|
||||
doctype = '<!DOCTYPE ' + (dt.name || 'html');
|
||||
|
||||
// Add PUBLIC identifier if publicId is present
|
||||
if (dt.publicId) {
|
||||
// Escape single quotes in publicId
|
||||
const escapedPublicId = String(dt.publicId).replace(/'/g, "\\'");
|
||||
doctype += " PUBLIC '" + escapedPublicId + "'";
|
||||
|
||||
// Add systemId if present (required when publicId is present)
|
||||
if (dt.systemId) {
|
||||
const escapedSystemId = String(dt.systemId).replace(/'/g, "\\'");
|
||||
doctype += " '" + escapedSystemId + "'";
|
||||
}
|
||||
} else if (dt.systemId) {
|
||||
// SYSTEM identifier (without PUBLIC)
|
||||
const escapedSystemId = String(dt.systemId).replace(/'/g, "\\'");
|
||||
doctype += " SYSTEM '" + escapedSystemId + "'";
|
||||
}
|
||||
|
||||
doctype += '>';
|
||||
}
|
||||
return doctype + (document.documentElement?.outerHTML || '');
|
||||
} catch (error) {
|
||||
// Fallback: just return the HTML without DOCTYPE if there's an error
|
||||
return document.documentElement?.outerHTML || '';
|
||||
}
|
||||
})()
|
||||
`)
|
||||
|
||||
// Save HTML to file
|
||||
await fs.writeFile(filePath, html, 'utf-8')
|
||||
|
||||
return filePath
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to save as HTML: ${(error as Error).message}`)
|
||||
}
|
||||
}
|
||||
|
||||
@ -271,9 +271,9 @@ export class WindowService {
|
||||
'https://account.siliconflow.cn/oauth',
|
||||
'https://cloud.siliconflow.cn/bills',
|
||||
'https://cloud.siliconflow.cn/expensebill',
|
||||
'https://aihubmix.com/token',
|
||||
'https://aihubmix.com/topup',
|
||||
'https://aihubmix.com/statistics',
|
||||
'https://console.aihubmix.com/token',
|
||||
'https://console.aihubmix.com/topup',
|
||||
'https://console.aihubmix.com/statistics',
|
||||
'https://dash.302.ai/sso/login',
|
||||
'https://dash.302.ai/charge',
|
||||
'https://www.aiionly.com/login'
|
||||
|
||||
29
src/main/services/__tests__/ServerLogBuffer.test.ts
Normal file
29
src/main/services/__tests__/ServerLogBuffer.test.ts
Normal file
@ -0,0 +1,29 @@
|
||||
import { describe, expect, it } from 'vitest'
|
||||
|
||||
import { ServerLogBuffer } from '../mcp/ServerLogBuffer'
|
||||
|
||||
describe('ServerLogBuffer', () => {
|
||||
it('keeps a bounded number of entries per server', () => {
|
||||
const buffer = new ServerLogBuffer(3)
|
||||
const key = 'srv'
|
||||
|
||||
buffer.append(key, { timestamp: 1, level: 'info', message: 'a' })
|
||||
buffer.append(key, { timestamp: 2, level: 'info', message: 'b' })
|
||||
buffer.append(key, { timestamp: 3, level: 'info', message: 'c' })
|
||||
buffer.append(key, { timestamp: 4, level: 'info', message: 'd' })
|
||||
|
||||
const logs = buffer.get(key)
|
||||
expect(logs).toHaveLength(3)
|
||||
expect(logs[0].message).toBe('b')
|
||||
expect(logs[2].message).toBe('d')
|
||||
})
|
||||
|
||||
it('isolates entries by server key', () => {
|
||||
const buffer = new ServerLogBuffer(5)
|
||||
buffer.append('one', { timestamp: 1, level: 'info', message: 'a' })
|
||||
buffer.append('two', { timestamp: 2, level: 'info', message: 'b' })
|
||||
|
||||
expect(buffer.get('one')).toHaveLength(1)
|
||||
expect(buffer.get('two')).toHaveLength(1)
|
||||
})
|
||||
})
|
||||
@ -78,7 +78,7 @@ export abstract class BaseService {
|
||||
* Get database instance
|
||||
* Automatically waits for initialization to complete
|
||||
*/
|
||||
protected async getDatabase() {
|
||||
public async getDatabase() {
|
||||
const dbManager = await DatabaseManager.getInstance()
|
||||
return dbManager.getDatabase()
|
||||
}
|
||||
|
||||
@ -15,6 +15,8 @@ import { query } from '@anthropic-ai/claude-agent-sdk'
|
||||
import { loggerService } from '@logger'
|
||||
import { config as apiConfigService } from '@main/apiServer/config'
|
||||
import { validateModelId } from '@main/apiServer/utils'
|
||||
import { isWin } from '@main/constant'
|
||||
import { autoDiscoverGitBash } from '@main/utils/process'
|
||||
import getLoginShellEnvironment from '@main/utils/shell-env'
|
||||
import { app } from 'electron'
|
||||
|
||||
@ -107,6 +109,9 @@ class ClaudeCodeService implements AgentServiceInterface {
|
||||
Object.entries(loginShellEnv).filter(([key]) => !key.toLowerCase().endsWith('_proxy'))
|
||||
) as Record<string, string>
|
||||
|
||||
// Auto-discover Git Bash path on Windows (already logs internally)
|
||||
const customGitBashPath = isWin ? autoDiscoverGitBash() : null
|
||||
|
||||
const env = {
|
||||
...loginShellEnvWithoutProxies,
|
||||
// TODO: fix the proxy api server
|
||||
@ -126,7 +131,8 @@ class ClaudeCodeService implements AgentServiceInterface {
|
||||
// Set CLAUDE_CONFIG_DIR to app's userData directory to avoid path encoding issues
|
||||
// on Windows when the username contains non-ASCII characters (e.g., Chinese characters)
|
||||
// This prevents the SDK from using the user's home directory which may have encoding problems
|
||||
CLAUDE_CONFIG_DIR: path.join(app.getPath('userData'), '.claude')
|
||||
CLAUDE_CONFIG_DIR: path.join(app.getPath('userData'), '.claude'),
|
||||
...(customGitBashPath ? { CLAUDE_CODE_GIT_BASH_PATH: customGitBashPath } : {})
|
||||
}
|
||||
|
||||
const errorChunks: string[] = []
|
||||
|
||||
36
src/main/services/mcp/ServerLogBuffer.ts
Normal file
36
src/main/services/mcp/ServerLogBuffer.ts
Normal file
@ -0,0 +1,36 @@
|
||||
export type MCPServerLogEntry = {
|
||||
timestamp: number
|
||||
level: 'debug' | 'info' | 'warn' | 'error' | 'stderr' | 'stdout'
|
||||
message: string
|
||||
data?: any
|
||||
source?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Lightweight ring buffer for per-server MCP logs.
|
||||
*/
|
||||
export class ServerLogBuffer {
|
||||
private maxEntries: number
|
||||
private logs: Map<string, MCPServerLogEntry[]> = new Map()
|
||||
|
||||
constructor(maxEntries = 200) {
|
||||
this.maxEntries = maxEntries
|
||||
}
|
||||
|
||||
append(serverKey: string, entry: MCPServerLogEntry) {
|
||||
const list = this.logs.get(serverKey) ?? []
|
||||
list.push(entry)
|
||||
if (list.length > this.maxEntries) {
|
||||
list.splice(0, list.length - this.maxEntries)
|
||||
}
|
||||
this.logs.set(serverKey, list)
|
||||
}
|
||||
|
||||
get(serverKey: string): MCPServerLogEntry[] {
|
||||
return [...(this.logs.get(serverKey) ?? [])]
|
||||
}
|
||||
|
||||
remove(serverKey: string) {
|
||||
this.logs.delete(serverKey)
|
||||
}
|
||||
}
|
||||
@ -128,8 +128,8 @@ export class CallBackServer {
|
||||
})
|
||||
|
||||
return new Promise<http.Server>((resolve, reject) => {
|
||||
server.listen(port, () => {
|
||||
logger.info(`OAuth callback server listening on port ${port}`)
|
||||
server.listen(port, '127.0.0.1', () => {
|
||||
logger.info(`OAuth callback server listening on 127.0.0.1:${port}`)
|
||||
resolve(server)
|
||||
})
|
||||
|
||||
|
||||
990
src/main/utils/__tests__/process.test.ts
Normal file
990
src/main/utils/__tests__/process.test.ts
Normal file
@ -0,0 +1,990 @@
|
||||
import { configManager } from '@main/services/ConfigManager'
|
||||
import { execFileSync } from 'child_process'
|
||||
import fs from 'fs'
|
||||
import path from 'path'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import { autoDiscoverGitBash, findExecutable, findGitBash, validateGitBashPath } from '../process'
|
||||
|
||||
// Mock configManager
|
||||
vi.mock('@main/services/ConfigManager', () => ({
|
||||
ConfigKeys: {
|
||||
GitBashPath: 'gitBashPath'
|
||||
},
|
||||
configManager: {
|
||||
get: vi.fn(),
|
||||
set: vi.fn()
|
||||
}
|
||||
}))
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('child_process')
|
||||
vi.mock('fs')
|
||||
vi.mock('path')
|
||||
|
||||
// These tests only run on Windows since the functions have platform guards
|
||||
describe.skipIf(process.platform !== 'win32')('process utilities', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
|
||||
// Mock path.join to concatenate paths with backslashes (Windows-style)
|
||||
vi.mocked(path.join).mockImplementation((...args) => args.join('\\'))
|
||||
|
||||
// Mock path.resolve to handle path resolution with .. support
|
||||
vi.mocked(path.resolve).mockImplementation((...args) => {
|
||||
let result = args.join('\\')
|
||||
|
||||
// Handle .. navigation
|
||||
while (result.includes('\\..')) {
|
||||
result = result.replace(/\\[^\\]+\\\.\./g, '')
|
||||
}
|
||||
|
||||
// Ensure absolute path
|
||||
if (!result.match(/^[A-Z]:/)) {
|
||||
result = `C:\\cwd\\${result}`
|
||||
}
|
||||
|
||||
return result
|
||||
})
|
||||
|
||||
// Mock path.dirname
|
||||
vi.mocked(path.dirname).mockImplementation((p) => {
|
||||
const parts = p.split('\\')
|
||||
parts.pop()
|
||||
return parts.join('\\')
|
||||
})
|
||||
|
||||
// Mock path.sep
|
||||
Object.defineProperty(path, 'sep', { value: '\\', writable: true })
|
||||
|
||||
// Mock process.cwd()
|
||||
vi.spyOn(process, 'cwd').mockReturnValue('C:\\cwd')
|
||||
})
|
||||
|
||||
describe('findExecutable', () => {
|
||||
describe('git common paths', () => {
|
||||
it('should find git at Program Files path', () => {
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
process.env.ProgramFiles = 'C:\\Program Files'
|
||||
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => p === gitPath)
|
||||
|
||||
const result = findExecutable('git')
|
||||
|
||||
expect(result).toBe(gitPath)
|
||||
expect(fs.existsSync).toHaveBeenCalledWith(gitPath)
|
||||
})
|
||||
|
||||
it('should find git at Program Files (x86) path', () => {
|
||||
const gitPath = 'C:\\Program Files (x86)\\Git\\cmd\\git.exe'
|
||||
process.env['ProgramFiles(x86)'] = 'C:\\Program Files (x86)'
|
||||
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => p === gitPath)
|
||||
|
||||
const result = findExecutable('git')
|
||||
|
||||
expect(result).toBe(gitPath)
|
||||
expect(fs.existsSync).toHaveBeenCalledWith(gitPath)
|
||||
})
|
||||
|
||||
it('should use fallback paths when environment variables are not set', () => {
|
||||
delete process.env.ProgramFiles
|
||||
delete process.env['ProgramFiles(x86)']
|
||||
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => p === gitPath)
|
||||
|
||||
const result = findExecutable('git')
|
||||
|
||||
expect(result).toBe(gitPath)
|
||||
})
|
||||
})
|
||||
|
||||
describe('where.exe PATH lookup', () => {
|
||||
beforeEach(() => {
|
||||
Object.defineProperty(process, 'platform', { value: 'win32', writable: true })
|
||||
// Common paths don't exist
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false)
|
||||
})
|
||||
|
||||
it('should find executable via where.exe', () => {
|
||||
const gitPath = 'C:\\Git\\bin\\git.exe'
|
||||
|
||||
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||
|
||||
const result = findExecutable('git')
|
||||
|
||||
expect(result).toBe(gitPath)
|
||||
expect(execFileSync).toHaveBeenCalledWith('where.exe', ['git.exe'], {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe']
|
||||
})
|
||||
})
|
||||
|
||||
it('should add .exe extension when calling where.exe', () => {
|
||||
vi.mocked(execFileSync).mockImplementation(() => {
|
||||
throw new Error('Not found')
|
||||
})
|
||||
|
||||
findExecutable('node')
|
||||
|
||||
expect(execFileSync).toHaveBeenCalledWith('where.exe', ['node.exe'], expect.any(Object))
|
||||
})
|
||||
|
||||
it('should handle Windows line endings (CRLF)', () => {
|
||||
const gitPath1 = 'C:\\Git\\bin\\git.exe'
|
||||
const gitPath2 = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
|
||||
vi.mocked(execFileSync).mockReturnValue(`${gitPath1}\r\n${gitPath2}\r\n`)
|
||||
|
||||
const result = findExecutable('git')
|
||||
|
||||
// Should return the first valid path
|
||||
expect(result).toBe(gitPath1)
|
||||
})
|
||||
|
||||
it('should handle Unix line endings (LF)', () => {
|
||||
const gitPath1 = 'C:\\Git\\bin\\git.exe'
|
||||
const gitPath2 = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
|
||||
vi.mocked(execFileSync).mockReturnValue(`${gitPath1}\n${gitPath2}\n`)
|
||||
|
||||
const result = findExecutable('git')
|
||||
|
||||
expect(result).toBe(gitPath1)
|
||||
})
|
||||
|
||||
it('should handle mixed line endings', () => {
|
||||
const gitPath1 = 'C:\\Git\\bin\\git.exe'
|
||||
const gitPath2 = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
|
||||
vi.mocked(execFileSync).mockReturnValue(`${gitPath1}\r\n${gitPath2}\n`)
|
||||
|
||||
const result = findExecutable('git')
|
||||
|
||||
expect(result).toBe(gitPath1)
|
||||
})
|
||||
|
||||
it('should trim whitespace from paths', () => {
|
||||
const gitPath = 'C:\\Git\\bin\\git.exe'
|
||||
|
||||
vi.mocked(execFileSync).mockReturnValue(` ${gitPath} \n`)
|
||||
|
||||
const result = findExecutable('git')
|
||||
|
||||
expect(result).toBe(gitPath)
|
||||
})
|
||||
|
||||
it('should filter empty lines', () => {
|
||||
const gitPath = 'C:\\Git\\bin\\git.exe'
|
||||
|
||||
vi.mocked(execFileSync).mockReturnValue(`\n\n${gitPath}\n\n`)
|
||||
|
||||
const result = findExecutable('git')
|
||||
|
||||
expect(result).toBe(gitPath)
|
||||
})
|
||||
})
|
||||
|
||||
describe('security checks', () => {
|
||||
beforeEach(() => {
|
||||
Object.defineProperty(process, 'platform', { value: 'win32', writable: true })
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false)
|
||||
})
|
||||
|
||||
it('should skip executables in current directory', () => {
|
||||
const maliciousPath = 'C:\\cwd\\git.exe'
|
||||
const safePath = 'C:\\Git\\bin\\git.exe'
|
||||
|
||||
vi.mocked(execFileSync).mockReturnValue(`${maliciousPath}\n${safePath}`)
|
||||
|
||||
vi.mocked(path.resolve).mockImplementation((p) => {
|
||||
if (p.includes('cwd\\git.exe')) return 'c:\\cwd\\git.exe'
|
||||
return 'c:\\git\\bin\\git.exe'
|
||||
})
|
||||
|
||||
vi.mocked(path.dirname).mockImplementation((p) => {
|
||||
if (p.includes('cwd\\git.exe')) return 'c:\\cwd'
|
||||
return 'c:\\git\\bin'
|
||||
})
|
||||
|
||||
const result = findExecutable('git')
|
||||
|
||||
// Should skip malicious path and return safe path
|
||||
expect(result).toBe(safePath)
|
||||
})
|
||||
|
||||
it('should skip executables in current directory subdirectories', () => {
|
||||
const maliciousPath = 'C:\\cwd\\subdir\\git.exe'
|
||||
const safePath = 'C:\\Git\\bin\\git.exe'
|
||||
|
||||
vi.mocked(execFileSync).mockReturnValue(`${maliciousPath}\n${safePath}`)
|
||||
|
||||
vi.mocked(path.resolve).mockImplementation((p) => {
|
||||
if (p.includes('cwd\\subdir')) return 'c:\\cwd\\subdir\\git.exe'
|
||||
return 'c:\\git\\bin\\git.exe'
|
||||
})
|
||||
|
||||
vi.mocked(path.dirname).mockImplementation((p) => {
|
||||
if (p.includes('cwd\\subdir')) return 'c:\\cwd\\subdir'
|
||||
return 'c:\\git\\bin'
|
||||
})
|
||||
|
||||
const result = findExecutable('git')
|
||||
|
||||
expect(result).toBe(safePath)
|
||||
})
|
||||
|
||||
it('should return null when only malicious executables are found', () => {
|
||||
const maliciousPath = 'C:\\cwd\\git.exe'
|
||||
|
||||
vi.mocked(execFileSync).mockReturnValue(maliciousPath)
|
||||
|
||||
vi.mocked(path.resolve).mockReturnValue('c:\\cwd\\git.exe')
|
||||
vi.mocked(path.dirname).mockReturnValue('c:\\cwd')
|
||||
|
||||
const result = findExecutable('git')
|
||||
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
})
|
||||
|
||||
describe('error handling', () => {
|
||||
beforeEach(() => {
|
||||
Object.defineProperty(process, 'platform', { value: 'win32', writable: true })
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false)
|
||||
})
|
||||
|
||||
it('should return null when where.exe fails', () => {
|
||||
vi.mocked(execFileSync).mockImplementation(() => {
|
||||
throw new Error('Command failed')
|
||||
})
|
||||
|
||||
const result = findExecutable('nonexistent')
|
||||
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
it('should return null when where.exe returns empty output', () => {
|
||||
vi.mocked(execFileSync).mockReturnValue('')
|
||||
|
||||
const result = findExecutable('git')
|
||||
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
it('should return null when where.exe returns only whitespace', () => {
|
||||
vi.mocked(execFileSync).mockReturnValue(' \n\n ')
|
||||
|
||||
const result = findExecutable('git')
|
||||
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
})
|
||||
|
||||
describe('non-git executables', () => {
|
||||
beforeEach(() => {
|
||||
Object.defineProperty(process, 'platform', { value: 'win32', writable: true })
|
||||
})
|
||||
|
||||
it('should skip common paths check for non-git executables', () => {
|
||||
const nodePath = 'C:\\Program Files\\nodejs\\node.exe'
|
||||
|
||||
vi.mocked(execFileSync).mockReturnValue(nodePath)
|
||||
|
||||
const result = findExecutable('node')
|
||||
|
||||
expect(result).toBe(nodePath)
|
||||
// Should not check common Git paths
|
||||
expect(fs.existsSync).not.toHaveBeenCalledWith(expect.stringContaining('Git\\cmd\\node.exe'))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('validateGitBashPath', () => {
|
||||
it('returns null when path is null', () => {
|
||||
const result = validateGitBashPath(null)
|
||||
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
it('returns null when path is undefined', () => {
|
||||
const result = validateGitBashPath(undefined)
|
||||
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
it('returns normalized path when valid bash.exe exists', () => {
|
||||
const customPath = 'C:\\PortableGit\\bin\\bash.exe'
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => p === 'C:\\PortableGit\\bin\\bash.exe')
|
||||
|
||||
const result = validateGitBashPath(customPath)
|
||||
|
||||
expect(result).toBe('C:\\PortableGit\\bin\\bash.exe')
|
||||
})
|
||||
|
||||
it('returns null when file does not exist', () => {
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false)
|
||||
|
||||
const result = validateGitBashPath('C:\\missing\\bash.exe')
|
||||
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
it('returns null when path is not bash.exe', () => {
|
||||
const customPath = 'C:\\PortableGit\\bin\\git.exe'
|
||||
vi.mocked(fs.existsSync).mockReturnValue(true)
|
||||
|
||||
const result = validateGitBashPath(customPath)
|
||||
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
})
|
||||
|
||||
describe('findGitBash', () => {
|
||||
describe('customPath parameter', () => {
|
||||
beforeEach(() => {
|
||||
delete process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||
})
|
||||
|
||||
it('uses customPath when valid', () => {
|
||||
const customPath = 'C:\\CustomGit\\bin\\bash.exe'
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => p === customPath)
|
||||
|
||||
const result = findGitBash(customPath)
|
||||
|
||||
expect(result).toBe(customPath)
|
||||
expect(execFileSync).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('falls back when customPath is invalid', () => {
|
||||
const customPath = 'C:\\Invalid\\bash.exe'
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||
if (p === customPath) return false
|
||||
if (p === gitPath) return true
|
||||
if (p === bashPath) return true
|
||||
return false
|
||||
})
|
||||
|
||||
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||
|
||||
const result = findGitBash(customPath)
|
||||
|
||||
expect(result).toBe(bashPath)
|
||||
})
|
||||
|
||||
it('prioritizes customPath over env override', () => {
|
||||
const customPath = 'C:\\CustomGit\\bin\\bash.exe'
|
||||
const envPath = 'C:\\EnvGit\\bin\\bash.exe'
|
||||
process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath
|
||||
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => p === customPath || p === envPath)
|
||||
|
||||
const result = findGitBash(customPath)
|
||||
|
||||
expect(result).toBe(customPath)
|
||||
})
|
||||
})
|
||||
|
||||
describe('env override', () => {
|
||||
beforeEach(() => {
|
||||
delete process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||
})
|
||||
|
||||
it('uses CLAUDE_CODE_GIT_BASH_PATH when valid', () => {
|
||||
const envPath = 'C:\\OverrideGit\\bin\\bash.exe'
|
||||
process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath
|
||||
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => p === envPath)
|
||||
|
||||
const result = findGitBash()
|
||||
|
||||
expect(result).toBe(envPath)
|
||||
expect(execFileSync).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('falls back when CLAUDE_CODE_GIT_BASH_PATH is invalid', () => {
|
||||
const envPath = 'C:\\Invalid\\bash.exe'
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
|
||||
process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath
|
||||
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||
if (p === envPath) return false
|
||||
if (p === gitPath) return true
|
||||
if (p === bashPath) return true
|
||||
return false
|
||||
})
|
||||
|
||||
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||
|
||||
const result = findGitBash()
|
||||
|
||||
expect(result).toBe(bashPath)
|
||||
})
|
||||
})
|
||||
|
||||
describe('git.exe path derivation', () => {
|
||||
it('should derive bash.exe from standard Git installation (Git/cmd/git.exe)', () => {
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
|
||||
// findExecutable will find git at common path
|
||||
process.env.ProgramFiles = 'C:\\Program Files'
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||
return p === gitPath || p === bashPath
|
||||
})
|
||||
|
||||
const result = findGitBash()
|
||||
|
||||
expect(result).toBe(bashPath)
|
||||
})
|
||||
|
||||
it('should derive bash.exe from portable Git installation (Git/bin/git.exe)', () => {
|
||||
const gitPath = 'C:\\PortableGit\\bin\\git.exe'
|
||||
const bashPath = 'C:\\PortableGit\\bin\\bash.exe'
|
||||
|
||||
// Mock: common git paths don't exist, but where.exe finds portable git
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||
const pathStr = p?.toString() || ''
|
||||
// Common git paths don't exist
|
||||
if (pathStr.includes('Program Files\\Git\\cmd\\git.exe')) return false
|
||||
if (pathStr.includes('Program Files (x86)\\Git\\cmd\\git.exe')) return false
|
||||
// Portable bash.exe exists at Git/bin/bash.exe (second path in possibleBashPaths)
|
||||
if (pathStr === bashPath) return true
|
||||
return false
|
||||
})
|
||||
|
||||
// where.exe returns portable git path
|
||||
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||
|
||||
const result = findGitBash()
|
||||
|
||||
expect(result).toBe(bashPath)
|
||||
})
|
||||
|
||||
it('should derive bash.exe from MSYS2 Git installation (Git/usr/bin/bash.exe)', () => {
|
||||
const gitPath = 'C:\\msys64\\usr\\bin\\git.exe'
|
||||
const bashPath = 'C:\\msys64\\usr\\bin\\bash.exe'
|
||||
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||
const pathStr = p?.toString() || ''
|
||||
// Common git paths don't exist
|
||||
if (pathStr.includes('Program Files\\Git\\cmd\\git.exe')) return false
|
||||
if (pathStr.includes('Program Files (x86)\\Git\\cmd\\git.exe')) return false
|
||||
// MSYS2 bash.exe exists at usr/bin/bash.exe (third path in possibleBashPaths)
|
||||
if (pathStr === bashPath) return true
|
||||
return false
|
||||
})
|
||||
|
||||
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||
|
||||
const result = findGitBash()
|
||||
|
||||
expect(result).toBe(bashPath)
|
||||
})
|
||||
|
||||
it('should try multiple bash.exe locations in order', () => {
|
||||
const gitPath = 'C:\\Git\\cmd\\git.exe'
|
||||
const bashPath = 'C:\\Git\\bin\\bash.exe'
|
||||
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||
const pathStr = p?.toString() || ''
|
||||
// Common git paths don't exist
|
||||
if (pathStr.includes('Program Files\\Git\\cmd\\git.exe')) return false
|
||||
if (pathStr.includes('Program Files (x86)\\Git\\cmd\\git.exe')) return false
|
||||
// Standard path exists (first in possibleBashPaths)
|
||||
if (pathStr === bashPath) return true
|
||||
return false
|
||||
})
|
||||
|
||||
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||
|
||||
const result = findGitBash()
|
||||
|
||||
expect(result).toBe(bashPath)
|
||||
})
|
||||
|
||||
it('should handle when git.exe is found but bash.exe is not at any derived location', () => {
|
||||
const gitPath = 'C:\\Git\\cmd\\git.exe'
|
||||
|
||||
// git.exe exists via where.exe, but bash.exe doesn't exist at any derived location
|
||||
vi.mocked(fs.existsSync).mockImplementation(() => {
|
||||
// Only return false for all bash.exe checks
|
||||
return false
|
||||
})
|
||||
|
||||
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||
|
||||
const result = findGitBash()
|
||||
|
||||
// Should fall back to common paths check
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
})
|
||||
|
||||
describe('common paths fallback', () => {
|
||||
beforeEach(() => {
|
||||
// git.exe not found
|
||||
vi.mocked(execFileSync).mockImplementation(() => {
|
||||
throw new Error('Not found')
|
||||
})
|
||||
})
|
||||
|
||||
it('should check Program Files path', () => {
|
||||
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
process.env.ProgramFiles = 'C:\\Program Files'
|
||||
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => p === bashPath)
|
||||
|
||||
const result = findGitBash()
|
||||
|
||||
expect(result).toBe(bashPath)
|
||||
})
|
||||
|
||||
it('should check Program Files (x86) path', () => {
|
||||
const bashPath = 'C:\\Program Files (x86)\\Git\\bin\\bash.exe'
|
||||
process.env['ProgramFiles(x86)'] = 'C:\\Program Files (x86)'
|
||||
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => p === bashPath)
|
||||
|
||||
const result = findGitBash()
|
||||
|
||||
expect(result).toBe(bashPath)
|
||||
})
|
||||
|
||||
it('should check LOCALAPPDATA path', () => {
|
||||
const bashPath = 'C:\\Users\\User\\AppData\\Local\\Programs\\Git\\bin\\bash.exe'
|
||||
process.env.LOCALAPPDATA = 'C:\\Users\\User\\AppData\\Local'
|
||||
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => p === bashPath)
|
||||
|
||||
const result = findGitBash()
|
||||
|
||||
expect(result).toBe(bashPath)
|
||||
})
|
||||
|
||||
it('should skip LOCALAPPDATA check when environment variable is not set', () => {
|
||||
delete process.env.LOCALAPPDATA
|
||||
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false)
|
||||
|
||||
const result = findGitBash()
|
||||
|
||||
expect(result).toBeNull()
|
||||
// Should not check invalid path with empty LOCALAPPDATA
|
||||
expect(fs.existsSync).not.toHaveBeenCalledWith(expect.stringContaining('undefined'))
|
||||
})
|
||||
|
||||
it('should use fallback values when environment variables are not set', () => {
|
||||
delete process.env.ProgramFiles
|
||||
delete process.env['ProgramFiles(x86)']
|
||||
|
||||
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => p === bashPath)
|
||||
|
||||
const result = findGitBash()
|
||||
|
||||
expect(result).toBe(bashPath)
|
||||
})
|
||||
})
|
||||
|
||||
describe('priority order', () => {
|
||||
it('should prioritize git.exe derivation over common paths', () => {
|
||||
const gitPath = 'C:\\CustomPath\\Git\\cmd\\git.exe'
|
||||
const derivedBashPath = 'C:\\CustomPath\\Git\\bin\\bash.exe'
|
||||
const commonBashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
|
||||
// Both exist
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||
const pathStr = p?.toString() || ''
|
||||
// Common git paths don't exist (so findExecutable uses where.exe)
|
||||
if (pathStr.includes('Program Files\\Git\\cmd\\git.exe')) return false
|
||||
if (pathStr.includes('Program Files (x86)\\Git\\cmd\\git.exe')) return false
|
||||
// Both bash paths exist, but derived should be checked first
|
||||
if (pathStr === derivedBashPath) return true
|
||||
if (pathStr === commonBashPath) return true
|
||||
return false
|
||||
})
|
||||
|
||||
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||
|
||||
const result = findGitBash()
|
||||
|
||||
// Should return derived path, not common path
|
||||
expect(result).toBe(derivedBashPath)
|
||||
})
|
||||
})
|
||||
|
||||
describe('error scenarios', () => {
|
||||
it('should return null when Git is not installed anywhere', () => {
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false)
|
||||
vi.mocked(execFileSync).mockImplementation(() => {
|
||||
throw new Error('Not found')
|
||||
})
|
||||
|
||||
const result = findGitBash()
|
||||
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
it('should return null when git.exe exists but bash.exe does not', () => {
|
||||
const gitPath = 'C:\\Git\\cmd\\git.exe'
|
||||
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||
// git.exe exists, but no bash.exe anywhere
|
||||
return p === gitPath
|
||||
})
|
||||
|
||||
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||
|
||||
const result = findGitBash()
|
||||
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
})
|
||||
|
||||
describe('real-world scenarios', () => {
|
||||
it('should handle official Git for Windows installer', () => {
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
|
||||
process.env.ProgramFiles = 'C:\\Program Files'
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||
return p === gitPath || p === bashPath
|
||||
})
|
||||
|
||||
const result = findGitBash()
|
||||
|
||||
expect(result).toBe(bashPath)
|
||||
})
|
||||
|
||||
it('should handle portable Git installation in custom directory', () => {
|
||||
const gitPath = 'D:\\DevTools\\PortableGit\\bin\\git.exe'
|
||||
const bashPath = 'D:\\DevTools\\PortableGit\\bin\\bash.exe'
|
||||
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||
const pathStr = p?.toString() || ''
|
||||
// Common paths don't exist
|
||||
if (pathStr.includes('Program Files\\Git\\cmd\\git.exe')) return false
|
||||
if (pathStr.includes('Program Files (x86)\\Git\\cmd\\git.exe')) return false
|
||||
// Portable Git paths exist (portable uses second path: Git/bin/bash.exe)
|
||||
if (pathStr === bashPath) return true
|
||||
return false
|
||||
})
|
||||
|
||||
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||
|
||||
const result = findGitBash()
|
||||
|
||||
expect(result).toBe(bashPath)
|
||||
})
|
||||
|
||||
it('should handle Git installed via Scoop', () => {
|
||||
// Scoop typically installs to %USERPROFILE%\scoop\apps\git\current
|
||||
const gitPath = 'C:\\Users\\User\\scoop\\apps\\git\\current\\cmd\\git.exe'
|
||||
const bashPath = 'C:\\Users\\User\\scoop\\apps\\git\\current\\bin\\bash.exe'
|
||||
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||
const pathStr = p?.toString() || ''
|
||||
// Common paths don't exist
|
||||
if (pathStr.includes('Program Files\\Git\\cmd\\git.exe')) return false
|
||||
if (pathStr.includes('Program Files (x86)\\Git\\cmd\\git.exe')) return false
|
||||
// Scoop bash path exists (standard structure: cmd -> bin)
|
||||
if (pathStr === bashPath) return true
|
||||
return false
|
||||
})
|
||||
|
||||
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||
|
||||
const result = findGitBash()
|
||||
|
||||
expect(result).toBe(bashPath)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('autoDiscoverGitBash', () => {
|
||||
const originalEnvVar = process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||
|
||||
beforeEach(() => {
|
||||
vi.mocked(configManager.get).mockReset()
|
||||
vi.mocked(configManager.set).mockReset()
|
||||
delete process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
// Restore original environment variable
|
||||
if (originalEnvVar !== undefined) {
|
||||
process.env.CLAUDE_CODE_GIT_BASH_PATH = originalEnvVar
|
||||
} else {
|
||||
delete process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||
}
|
||||
})
|
||||
|
||||
/**
|
||||
* Helper to mock fs.existsSync with a set of valid paths
|
||||
*/
|
||||
const mockExistingPaths = (...validPaths: string[]) => {
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => validPaths.includes(p as string))
|
||||
}
|
||||
|
||||
describe('with no existing config path', () => {
|
||||
it('should discover and persist Git Bash path when not configured', () => {
|
||||
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||
process.env.ProgramFiles = 'C:\\Program Files'
|
||||
mockExistingPaths(gitPath, bashPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
expect(result).toBe(bashPath)
|
||||
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', bashPath)
|
||||
})
|
||||
|
||||
it('should return null and not persist when Git Bash is not found', () => {
|
||||
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false)
|
||||
vi.mocked(execFileSync).mockImplementation(() => {
|
||||
throw new Error('Not found')
|
||||
})
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
expect(result).toBeNull()
|
||||
expect(configManager.set).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe('environment variable precedence', () => {
|
||||
it('should use env var over valid config path', () => {
|
||||
const envPath = 'C:\\EnvGit\\bin\\bash.exe'
|
||||
const configPath = 'C:\\ConfigGit\\bin\\bash.exe'
|
||||
|
||||
process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath
|
||||
vi.mocked(configManager.get).mockReturnValue(configPath)
|
||||
mockExistingPaths(envPath, configPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
// Env var should take precedence
|
||||
expect(result).toBe(envPath)
|
||||
// Should not persist env var path (it's a runtime override)
|
||||
expect(configManager.set).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should fall back to config path when env var is invalid', () => {
|
||||
const envPath = 'C:\\Invalid\\bash.exe'
|
||||
const configPath = 'C:\\ConfigGit\\bin\\bash.exe'
|
||||
|
||||
process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath
|
||||
vi.mocked(configManager.get).mockReturnValue(configPath)
|
||||
// Env path is invalid (doesn't exist), only config path exists
|
||||
mockExistingPaths(configPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
// Should fall back to config path
|
||||
expect(result).toBe(configPath)
|
||||
expect(configManager.set).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should fall back to auto-discovery when both env var and config are invalid', () => {
|
||||
const envPath = 'C:\\InvalidEnv\\bash.exe'
|
||||
const configPath = 'C:\\InvalidConfig\\bash.exe'
|
||||
const discoveredPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
|
||||
process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath
|
||||
process.env.ProgramFiles = 'C:\\Program Files'
|
||||
vi.mocked(configManager.get).mockReturnValue(configPath)
|
||||
// Both env and config paths are invalid, only standard Git exists
|
||||
mockExistingPaths(gitPath, discoveredPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
expect(result).toBe(discoveredPath)
|
||||
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', discoveredPath)
|
||||
})
|
||||
})
|
||||
|
||||
describe('with valid existing config path', () => {
|
||||
it('should validate and return existing path without re-discovering', () => {
|
||||
const existingPath = 'C:\\CustomGit\\bin\\bash.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(existingPath)
|
||||
mockExistingPaths(existingPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
expect(result).toBe(existingPath)
|
||||
// Should not call findGitBash or persist again
|
||||
expect(configManager.set).not.toHaveBeenCalled()
|
||||
// Should not call execFileSync (which findGitBash would use for discovery)
|
||||
expect(execFileSync).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should not override existing valid config with auto-discovery', () => {
|
||||
const existingPath = 'C:\\CustomGit\\bin\\bash.exe'
|
||||
const discoveredPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(existingPath)
|
||||
mockExistingPaths(existingPath, discoveredPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
expect(result).toBe(existingPath)
|
||||
expect(configManager.set).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe('with invalid existing config path', () => {
|
||||
it('should attempt auto-discovery when existing path does not exist', () => {
|
||||
const existingPath = 'C:\\NonExistent\\bin\\bash.exe'
|
||||
const discoveredPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(existingPath)
|
||||
process.env.ProgramFiles = 'C:\\Program Files'
|
||||
// Invalid path doesn't exist, but Git is installed at standard location
|
||||
mockExistingPaths(gitPath, discoveredPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
// Should discover and return the new path
|
||||
expect(result).toBe(discoveredPath)
|
||||
// Should persist the discovered path (overwrites invalid)
|
||||
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', discoveredPath)
|
||||
})
|
||||
|
||||
it('should attempt auto-discovery when existing path is not bash.exe', () => {
|
||||
const existingPath = 'C:\\CustomGit\\bin\\git.exe'
|
||||
const discoveredPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(existingPath)
|
||||
process.env.ProgramFiles = 'C:\\Program Files'
|
||||
// Invalid path exists but is not bash.exe (validation will fail)
|
||||
// Git is installed at standard location
|
||||
mockExistingPaths(existingPath, gitPath, discoveredPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
// Should discover and return the new path
|
||||
expect(result).toBe(discoveredPath)
|
||||
// Should persist the discovered path (overwrites invalid)
|
||||
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', discoveredPath)
|
||||
})
|
||||
|
||||
it('should return null when existing path is invalid and discovery fails', () => {
|
||||
const existingPath = 'C:\\NonExistent\\bin\\bash.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(existingPath)
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false)
|
||||
vi.mocked(execFileSync).mockImplementation(() => {
|
||||
throw new Error('Not found')
|
||||
})
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
// Both validation and discovery failed
|
||||
expect(result).toBeNull()
|
||||
// Should not persist when discovery fails
|
||||
expect(configManager.set).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe('config persistence verification', () => {
|
||||
it('should persist discovered path with correct config key', () => {
|
||||
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||
process.env.ProgramFiles = 'C:\\Program Files'
|
||||
mockExistingPaths(gitPath, bashPath)
|
||||
|
||||
autoDiscoverGitBash()
|
||||
|
||||
// Verify the exact call to configManager.set
|
||||
expect(configManager.set).toHaveBeenCalledTimes(1)
|
||||
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', bashPath)
|
||||
})
|
||||
|
||||
it('should persist on each discovery when config remains undefined', () => {
|
||||
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||
process.env.ProgramFiles = 'C:\\Program Files'
|
||||
mockExistingPaths(gitPath, bashPath)
|
||||
|
||||
autoDiscoverGitBash()
|
||||
autoDiscoverGitBash()
|
||||
|
||||
// Each call discovers and persists since config remains undefined (mocked)
|
||||
expect(configManager.set).toHaveBeenCalledTimes(2)
|
||||
})
|
||||
})
|
||||
|
||||
describe('real-world scenarios', () => {
|
||||
it('should discover and persist standard Git for Windows installation', () => {
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||
process.env.ProgramFiles = 'C:\\Program Files'
|
||||
mockExistingPaths(gitPath, bashPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
expect(result).toBe(bashPath)
|
||||
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', bashPath)
|
||||
})
|
||||
|
||||
it('should discover portable Git via where.exe and persist', () => {
|
||||
const gitPath = 'D:\\PortableApps\\Git\\bin\\git.exe'
|
||||
const bashPath = 'D:\\PortableApps\\Git\\bin\\bash.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||
const pathStr = p?.toString() || ''
|
||||
// Common git paths don't exist
|
||||
if (pathStr.includes('Program Files\\Git\\cmd\\git.exe')) return false
|
||||
if (pathStr.includes('Program Files (x86)\\Git\\cmd\\git.exe')) return false
|
||||
// Portable bash path exists
|
||||
if (pathStr === bashPath) return true
|
||||
return false
|
||||
})
|
||||
|
||||
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
expect(result).toBe(bashPath)
|
||||
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', bashPath)
|
||||
})
|
||||
|
||||
it('should respect user-configured path over auto-discovery', () => {
|
||||
const userConfiguredPath = 'D:\\MyGit\\bin\\bash.exe'
|
||||
const systemPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(userConfiguredPath)
|
||||
mockExistingPaths(userConfiguredPath, systemPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
expect(result).toBe(userConfiguredPath)
|
||||
expect(configManager.set).not.toHaveBeenCalled()
|
||||
// Verify findGitBash was not called for discovery
|
||||
expect(execFileSync).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -1,10 +1,13 @@
|
||||
import { loggerService } from '@logger'
|
||||
import type { GitBashPathInfo, GitBashPathSource } from '@shared/config/constant'
|
||||
import { HOME_CHERRY_DIR } from '@shared/config/constant'
|
||||
import { spawn } from 'child_process'
|
||||
import { execFileSync, spawn } from 'child_process'
|
||||
import fs from 'fs'
|
||||
import os from 'os'
|
||||
import path from 'path'
|
||||
|
||||
import { isWin } from '../constant'
|
||||
import { ConfigKeys, configManager } from '../services/ConfigManager'
|
||||
import { getResourcePath } from '.'
|
||||
|
||||
const logger = loggerService.withContext('Utils:Process')
|
||||
@ -39,7 +42,7 @@ export function runInstallScript(scriptPath: string): Promise<void> {
|
||||
}
|
||||
|
||||
export async function getBinaryName(name: string): Promise<string> {
|
||||
if (process.platform === 'win32') {
|
||||
if (isWin) {
|
||||
return `${name}.exe`
|
||||
}
|
||||
return name
|
||||
@ -58,5 +61,243 @@ export async function getBinaryPath(name?: string): Promise<string> {
|
||||
|
||||
export async function isBinaryExists(name: string): Promise<boolean> {
|
||||
const cmd = await getBinaryPath(name)
|
||||
return await fs.existsSync(cmd)
|
||||
return fs.existsSync(cmd)
|
||||
}
|
||||
|
||||
/**
|
||||
* Find executable in common paths or PATH environment variable
|
||||
* Based on Claude Code's implementation with security checks
|
||||
* @param name - Name of the executable to find (without .exe extension)
|
||||
* @returns Full path to the executable or null if not found
|
||||
*/
|
||||
export function findExecutable(name: string): string | null {
|
||||
// This implementation uses where.exe which is Windows-only
|
||||
if (!isWin) {
|
||||
return null
|
||||
}
|
||||
|
||||
// Special handling for git - check common installation paths first
|
||||
if (name === 'git') {
|
||||
const commonGitPaths = [
|
||||
path.join(process.env.ProgramFiles || 'C:\\Program Files', 'Git', 'cmd', 'git.exe'),
|
||||
path.join(process.env['ProgramFiles(x86)'] || 'C:\\Program Files (x86)', 'Git', 'cmd', 'git.exe')
|
||||
]
|
||||
|
||||
for (const gitPath of commonGitPaths) {
|
||||
if (fs.existsSync(gitPath)) {
|
||||
logger.debug(`Found ${name} at common path`, { path: gitPath })
|
||||
return gitPath
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Use where.exe to find executable in PATH
|
||||
// Use execFileSync to prevent command injection
|
||||
try {
|
||||
// Add .exe extension for more precise matching on Windows
|
||||
const executableName = `${name}.exe`
|
||||
const result = execFileSync('where.exe', [executableName], {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe']
|
||||
})
|
||||
|
||||
// Handle both Windows (\r\n) and Unix (\n) line endings
|
||||
const paths = result.trim().split(/\r?\n/).filter(Boolean)
|
||||
const currentDir = process.cwd().toLowerCase()
|
||||
|
||||
// Security check: skip executables in current directory
|
||||
for (const exePath of paths) {
|
||||
// Trim whitespace from where.exe output
|
||||
const cleanPath = exePath.trim()
|
||||
const resolvedPath = path.resolve(cleanPath).toLowerCase()
|
||||
const execDir = path.dirname(resolvedPath).toLowerCase()
|
||||
|
||||
// Skip if in current directory or subdirectory (potential malware)
|
||||
if (execDir === currentDir || execDir.startsWith(currentDir + path.sep)) {
|
||||
logger.warn('Skipping potentially malicious executable in current directory', {
|
||||
path: cleanPath
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
logger.debug(`Found ${name} via where.exe`, { path: cleanPath })
|
||||
return cleanPath
|
||||
}
|
||||
|
||||
return null
|
||||
} catch (error) {
|
||||
logger.debug(`where.exe ${name} failed`, { error })
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find Git Bash executable on Windows
|
||||
* @param customPath - Optional custom path from config
|
||||
* @returns Full path to bash.exe or null if not found
|
||||
*/
|
||||
export function findGitBash(customPath?: string | null): string | null {
|
||||
// Git Bash is Windows-only
|
||||
if (!isWin) {
|
||||
return null
|
||||
}
|
||||
|
||||
// 1. Check custom path from config first
|
||||
if (customPath) {
|
||||
const validated = validateGitBashPath(customPath)
|
||||
if (validated) {
|
||||
logger.debug('Using custom Git Bash path from config', { path: validated })
|
||||
return validated
|
||||
}
|
||||
logger.warn('Custom Git Bash path provided but invalid', { path: customPath })
|
||||
}
|
||||
|
||||
// 2. Check environment variable override
|
||||
const envOverride = process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||
if (envOverride) {
|
||||
const validated = validateGitBashPath(envOverride)
|
||||
if (validated) {
|
||||
logger.debug('Using CLAUDE_CODE_GIT_BASH_PATH override for bash.exe', { path: validated })
|
||||
return validated
|
||||
}
|
||||
logger.warn('CLAUDE_CODE_GIT_BASH_PATH provided but path is invalid', { path: envOverride })
|
||||
}
|
||||
|
||||
// 3. Find git.exe and derive bash.exe path
|
||||
const gitPath = findExecutable('git')
|
||||
if (gitPath) {
|
||||
// Try multiple possible locations for bash.exe relative to git.exe
|
||||
// Different Git installations have different directory structures
|
||||
const possibleBashPaths = [
|
||||
path.join(gitPath, '..', '..', 'bin', 'bash.exe'), // Standard Git: git.exe at Git/cmd/ -> navigate up 2 levels -> then bin/bash.exe
|
||||
path.join(gitPath, '..', 'bash.exe'), // Portable Git: git.exe at Git/bin/ -> bash.exe in same directory
|
||||
path.join(gitPath, '..', '..', 'usr', 'bin', 'bash.exe') // MSYS2 Git: git.exe at msys64/usr/bin/ -> navigate up 2 levels -> then usr/bin/bash.exe
|
||||
]
|
||||
|
||||
for (const bashPath of possibleBashPaths) {
|
||||
const resolvedBashPath = path.resolve(bashPath)
|
||||
if (fs.existsSync(resolvedBashPath)) {
|
||||
logger.debug('Found bash.exe via git.exe path derivation', { path: resolvedBashPath })
|
||||
return resolvedBashPath
|
||||
}
|
||||
}
|
||||
|
||||
logger.debug('bash.exe not found at expected locations relative to git.exe', {
|
||||
gitPath,
|
||||
checkedPaths: possibleBashPaths.map((p) => path.resolve(p))
|
||||
})
|
||||
}
|
||||
|
||||
// 4. Fallback: check common Git Bash paths directly
|
||||
const commonBashPaths = [
|
||||
path.join(process.env.ProgramFiles || 'C:\\Program Files', 'Git', 'bin', 'bash.exe'),
|
||||
path.join(process.env['ProgramFiles(x86)'] || 'C:\\Program Files (x86)', 'Git', 'bin', 'bash.exe'),
|
||||
...(process.env.LOCALAPPDATA ? [path.join(process.env.LOCALAPPDATA, 'Programs', 'Git', 'bin', 'bash.exe')] : [])
|
||||
]
|
||||
|
||||
for (const bashPath of commonBashPaths) {
|
||||
if (fs.existsSync(bashPath)) {
|
||||
logger.debug('Found bash.exe at common path', { path: bashPath })
|
||||
return bashPath
|
||||
}
|
||||
}
|
||||
|
||||
logger.debug('Git Bash not found - checked git derivation and common paths')
|
||||
return null
|
||||
}
|
||||
|
||||
export function validateGitBashPath(customPath?: string | null): string | null {
|
||||
if (!customPath) {
|
||||
return null
|
||||
}
|
||||
|
||||
const resolved = path.resolve(customPath)
|
||||
|
||||
if (!fs.existsSync(resolved)) {
|
||||
logger.warn('Custom Git Bash path does not exist', { path: resolved })
|
||||
return null
|
||||
}
|
||||
|
||||
const isExe = resolved.toLowerCase().endsWith('bash.exe')
|
||||
if (!isExe) {
|
||||
logger.warn('Custom Git Bash path is not bash.exe', { path: resolved })
|
||||
return null
|
||||
}
|
||||
|
||||
logger.debug('Validated custom Git Bash path', { path: resolved })
|
||||
return resolved
|
||||
}
|
||||
|
||||
/**
|
||||
* Auto-discover and persist Git Bash path if not already configured
|
||||
* Only called when Git Bash is actually needed
|
||||
*
|
||||
* Precedence order:
|
||||
* 1. CLAUDE_CODE_GIT_BASH_PATH environment variable (highest - runtime override)
|
||||
* 2. Configured path from settings (manual or auto)
|
||||
* 3. Auto-discovery via findGitBash (only if no valid config exists)
|
||||
*/
|
||||
export function autoDiscoverGitBash(): string | null {
|
||||
if (!isWin) {
|
||||
return null
|
||||
}
|
||||
|
||||
// 1. Check environment variable override first (highest priority)
|
||||
const envOverride = process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||
if (envOverride) {
|
||||
const validated = validateGitBashPath(envOverride)
|
||||
if (validated) {
|
||||
logger.debug('Using CLAUDE_CODE_GIT_BASH_PATH override', { path: validated })
|
||||
return validated
|
||||
}
|
||||
logger.warn('CLAUDE_CODE_GIT_BASH_PATH provided but path is invalid', { path: envOverride })
|
||||
}
|
||||
|
||||
// 2. Check if a path is already configured
|
||||
const existingPath = configManager.get<string | undefined>(ConfigKeys.GitBashPath)
|
||||
const existingSource = configManager.get<GitBashPathSource | undefined>(ConfigKeys.GitBashPathSource)
|
||||
|
||||
if (existingPath) {
|
||||
const validated = validateGitBashPath(existingPath)
|
||||
if (validated) {
|
||||
return validated
|
||||
}
|
||||
// Existing path is invalid, try to auto-discover
|
||||
logger.warn('Existing Git Bash path is invalid, attempting auto-discovery', {
|
||||
path: existingPath,
|
||||
source: existingSource
|
||||
})
|
||||
}
|
||||
|
||||
// 3. Try to find Git Bash via auto-discovery
|
||||
const discoveredPath = findGitBash()
|
||||
if (discoveredPath) {
|
||||
// Persist the discovered path with 'auto' source
|
||||
configManager.set(ConfigKeys.GitBashPath, discoveredPath)
|
||||
configManager.set(ConfigKeys.GitBashPathSource, 'auto')
|
||||
logger.info('Auto-discovered Git Bash path', { path: discoveredPath })
|
||||
}
|
||||
|
||||
return discoveredPath
|
||||
}
|
||||
|
||||
/**
|
||||
* Get Git Bash path info including source
|
||||
* If no path is configured, triggers auto-discovery first
|
||||
*/
|
||||
export function getGitBashPathInfo(): GitBashPathInfo {
|
||||
if (!isWin) {
|
||||
return { path: null, source: null }
|
||||
}
|
||||
|
||||
let path = configManager.get<string | null>(ConfigKeys.GitBashPath) ?? null
|
||||
let source = configManager.get<GitBashPathSource | null>(ConfigKeys.GitBashPathSource) ?? null
|
||||
|
||||
// If no path configured, trigger auto-discovery (handles upgrade from old versions)
|
||||
if (!path) {
|
||||
path = autoDiscoverGitBash()
|
||||
source = path ? 'auto' : null
|
||||
}
|
||||
|
||||
return { path, source }
|
||||
}
|
||||
|
||||
@ -2,9 +2,10 @@ import type { PermissionUpdate } from '@anthropic-ai/claude-agent-sdk'
|
||||
import { electronAPI } from '@electron-toolkit/preload'
|
||||
import type { SpanEntity, TokenUsage } from '@mcp-trace/trace-core'
|
||||
import type { SpanContext } from '@opentelemetry/api'
|
||||
import type { TerminalConfig, UpgradeChannel } from '@shared/config/constant'
|
||||
import type { GitBashPathInfo, TerminalConfig, UpgradeChannel } from '@shared/config/constant'
|
||||
import type { LogLevel, LogSourceWithContext } from '@shared/config/logger'
|
||||
import type { FileChangeEvent, WebviewKeyEvent } from '@shared/config/types'
|
||||
import type { MCPServerLogEntry } from '@shared/config/types'
|
||||
import { IpcChannel } from '@shared/IpcChannel'
|
||||
import type { Notification } from '@types'
|
||||
import type {
|
||||
@ -123,7 +124,11 @@ const api = {
|
||||
getDeviceType: () => ipcRenderer.invoke(IpcChannel.System_GetDeviceType),
|
||||
getHostname: () => ipcRenderer.invoke(IpcChannel.System_GetHostname),
|
||||
getCpuName: () => ipcRenderer.invoke(IpcChannel.System_GetCpuName),
|
||||
checkGitBash: (): Promise<boolean> => ipcRenderer.invoke(IpcChannel.System_CheckGitBash)
|
||||
checkGitBash: (): Promise<boolean> => ipcRenderer.invoke(IpcChannel.System_CheckGitBash),
|
||||
getGitBashPath: (): Promise<string | null> => ipcRenderer.invoke(IpcChannel.System_GetGitBashPath),
|
||||
getGitBashPathInfo: (): Promise<GitBashPathInfo> => ipcRenderer.invoke(IpcChannel.System_GetGitBashPathInfo),
|
||||
setGitBashPath: (newPath: string | null): Promise<boolean> =>
|
||||
ipcRenderer.invoke(IpcChannel.System_SetGitBashPath, newPath)
|
||||
},
|
||||
devTools: {
|
||||
toggle: () => ipcRenderer.invoke(IpcChannel.System_ToggleDevTools)
|
||||
@ -221,6 +226,10 @@ const api = {
|
||||
startFileWatcher: (dirPath: string, config?: any) =>
|
||||
ipcRenderer.invoke(IpcChannel.File_StartWatcher, dirPath, config),
|
||||
stopFileWatcher: () => ipcRenderer.invoke(IpcChannel.File_StopWatcher),
|
||||
pauseFileWatcher: () => ipcRenderer.invoke(IpcChannel.File_PauseWatcher),
|
||||
resumeFileWatcher: () => ipcRenderer.invoke(IpcChannel.File_ResumeWatcher),
|
||||
batchUploadMarkdown: (filePaths: string[], targetPath: string) =>
|
||||
ipcRenderer.invoke(IpcChannel.File_BatchUploadMarkdown, filePaths, targetPath),
|
||||
onFileChange: (callback: (data: FileChangeEvent) => void) => {
|
||||
const listener = (_event: Electron.IpcRendererEvent, data: any) => {
|
||||
if (data && typeof data === 'object') {
|
||||
@ -368,7 +377,16 @@ const api = {
|
||||
},
|
||||
abortTool: (callId: string) => ipcRenderer.invoke(IpcChannel.Mcp_AbortTool, callId),
|
||||
getServerVersion: (server: MCPServer): Promise<string | null> =>
|
||||
ipcRenderer.invoke(IpcChannel.Mcp_GetServerVersion, server)
|
||||
ipcRenderer.invoke(IpcChannel.Mcp_GetServerVersion, server),
|
||||
getServerLogs: (server: MCPServer): Promise<MCPServerLogEntry[]> =>
|
||||
ipcRenderer.invoke(IpcChannel.Mcp_GetServerLogs, server),
|
||||
onServerLog: (callback: (log: MCPServerLogEntry & { serverId?: string }) => void) => {
|
||||
const listener = (_event: Electron.IpcRendererEvent, log: MCPServerLogEntry & { serverId?: string }) => {
|
||||
callback(log)
|
||||
}
|
||||
ipcRenderer.on(IpcChannel.Mcp_ServerLog, listener)
|
||||
return () => ipcRenderer.off(IpcChannel.Mcp_ServerLog, listener)
|
||||
}
|
||||
},
|
||||
python: {
|
||||
execute: (script: string, context?: Record<string, any>, timeout?: number) =>
|
||||
@ -420,6 +438,8 @@ const api = {
|
||||
ipcRenderer.invoke(IpcChannel.Webview_SetOpenLinkExternal, webviewId, isExternal),
|
||||
setSpellCheckEnabled: (webviewId: number, isEnable: boolean) =>
|
||||
ipcRenderer.invoke(IpcChannel.Webview_SetSpellCheckEnabled, webviewId, isEnable),
|
||||
printToPDF: (webviewId: number) => ipcRenderer.invoke(IpcChannel.Webview_PrintToPDF, webviewId),
|
||||
saveAsHTML: (webviewId: number) => ipcRenderer.invoke(IpcChannel.Webview_SaveAsHTML, webviewId),
|
||||
onFindShortcut: (callback: (payload: WebviewKeyEvent) => void) => {
|
||||
const listener = (_event: Electron.IpcRendererEvent, payload: WebviewKeyEvent) => {
|
||||
callback(payload)
|
||||
@ -452,7 +472,10 @@ const api = {
|
||||
ipcRenderer.invoke(IpcChannel.Selection_ProcessAction, actionItem, isFullScreen),
|
||||
closeActionWindow: () => ipcRenderer.invoke(IpcChannel.Selection_ActionWindowClose),
|
||||
minimizeActionWindow: () => ipcRenderer.invoke(IpcChannel.Selection_ActionWindowMinimize),
|
||||
pinActionWindow: (isPinned: boolean) => ipcRenderer.invoke(IpcChannel.Selection_ActionWindowPin, isPinned)
|
||||
pinActionWindow: (isPinned: boolean) => ipcRenderer.invoke(IpcChannel.Selection_ActionWindowPin, isPinned),
|
||||
// [Windows only] Electron bug workaround - can be removed once https://github.com/electron/electron/issues/48554 is fixed
|
||||
resizeActionWindow: (deltaX: number, deltaY: number, direction: string) =>
|
||||
ipcRenderer.invoke(IpcChannel.Selection_ActionWindowResize, deltaX, deltaY, direction)
|
||||
},
|
||||
agentTools: {
|
||||
respondToPermission: (payload: {
|
||||
|
||||
@ -91,7 +91,9 @@ export default class ModernAiProvider {
|
||||
if (this.isModel(modelOrProvider)) {
|
||||
// 传入的是 Model
|
||||
this.model = modelOrProvider
|
||||
this.actualProvider = provider ? adaptProvider({ provider }) : getActualProvider(modelOrProvider)
|
||||
this.actualProvider = provider
|
||||
? adaptProvider({ provider, model: modelOrProvider })
|
||||
: getActualProvider(modelOrProvider)
|
||||
// 只保存配置,不预先创建executor
|
||||
this.config = providerToAiSdkConfig(this.actualProvider, modelOrProvider)
|
||||
} else {
|
||||
|
||||
@ -2,9 +2,10 @@ import { loggerService } from '@logger'
|
||||
import {
|
||||
getModelSupportedVerbosity,
|
||||
isFunctionCallingModel,
|
||||
isNotSupportTemperatureAndTopP,
|
||||
isOpenAIModel,
|
||||
isSupportFlexServiceTierModel
|
||||
isSupportFlexServiceTierModel,
|
||||
isSupportTemperatureModel,
|
||||
isSupportTopPModel
|
||||
} from '@renderer/config/models'
|
||||
import { REFERENCE_PROMPT } from '@renderer/config/prompts'
|
||||
import { getLMStudioKeepAliveTime } from '@renderer/hooks/useLMStudio'
|
||||
@ -200,7 +201,7 @@ export abstract class BaseApiClient<
|
||||
}
|
||||
|
||||
public getTemperature(assistant: Assistant, model: Model): number | undefined {
|
||||
if (isNotSupportTemperatureAndTopP(model)) {
|
||||
if (!isSupportTemperatureModel(model)) {
|
||||
return undefined
|
||||
}
|
||||
const assistantSettings = getAssistantSettings(assistant)
|
||||
@ -208,7 +209,7 @@ export abstract class BaseApiClient<
|
||||
}
|
||||
|
||||
public getTopP(assistant: Assistant, model: Model): number | undefined {
|
||||
if (isNotSupportTemperatureAndTopP(model)) {
|
||||
if (!isSupportTopPModel(model)) {
|
||||
return undefined
|
||||
}
|
||||
const assistantSettings = getAssistantSettings(assistant)
|
||||
|
||||
@ -124,7 +124,8 @@ export class AnthropicAPIClient extends BaseApiClient<
|
||||
|
||||
override async listModels(): Promise<Anthropic.ModelInfo[]> {
|
||||
const sdk = (await this.getSdkInstance()) as Anthropic
|
||||
const response = await sdk.models.list()
|
||||
// prevent auto appended /v1. It's included in baseUrl.
|
||||
const response = await sdk.models.list({ path: '/models' })
|
||||
return response.data
|
||||
}
|
||||
|
||||
|
||||
@ -173,13 +173,15 @@ export class GeminiAPIClient extends BaseApiClient<
|
||||
return this.sdkInstance
|
||||
}
|
||||
|
||||
const apiVersion = this.getApiVersion()
|
||||
|
||||
this.sdkInstance = new GoogleGenAI({
|
||||
vertexai: false,
|
||||
apiKey: this.apiKey,
|
||||
apiVersion: this.getApiVersion(),
|
||||
apiVersion,
|
||||
httpOptions: {
|
||||
baseUrl: this.getBaseURL(),
|
||||
apiVersion: this.getApiVersion(),
|
||||
apiVersion,
|
||||
headers: {
|
||||
...this.provider.extra_headers
|
||||
}
|
||||
@ -200,7 +202,7 @@ export class GeminiAPIClient extends BaseApiClient<
|
||||
return trailingVersion
|
||||
}
|
||||
|
||||
return 'v1beta'
|
||||
return ''
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@ -10,7 +10,7 @@ import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant'
|
||||
import {
|
||||
findTokenLimit,
|
||||
GEMINI_FLASH_MODEL_REGEX,
|
||||
getThinkModelType,
|
||||
getModelSupportedReasoningEffortOptions,
|
||||
isDeepSeekHybridInferenceModel,
|
||||
isDoubaoThinkingAutoModel,
|
||||
isGPT5SeriesModel,
|
||||
@ -33,7 +33,6 @@ import {
|
||||
isSupportedThinkingTokenQwenModel,
|
||||
isSupportedThinkingTokenZhipuModel,
|
||||
isVisionModel,
|
||||
MODEL_SUPPORTED_REASONING_EFFORT,
|
||||
ZHIPU_RESULT_TOKENS
|
||||
} from '@renderer/config/models'
|
||||
import { mapLanguageToQwenMTModel } from '@renderer/config/translate'
|
||||
@ -143,6 +142,10 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
|
||||
return { thinking: { type: reasoningEffort ? 'enabled' : 'disabled' } }
|
||||
}
|
||||
|
||||
if (reasoningEffort === 'default') {
|
||||
return {}
|
||||
}
|
||||
|
||||
if (!reasoningEffort) {
|
||||
// DeepSeek hybrid inference models, v3.1 and maybe more in the future
|
||||
// 不同的 provider 有不同的思考控制方式,在这里统一解决
|
||||
@ -304,16 +307,15 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
|
||||
// Grok models/Perplexity models/OpenAI models
|
||||
if (isSupportedReasoningEffortModel(model)) {
|
||||
// 检查模型是否支持所选选项
|
||||
const modelType = getThinkModelType(model)
|
||||
const supportedOptions = MODEL_SUPPORTED_REASONING_EFFORT[modelType]
|
||||
if (supportedOptions.includes(reasoningEffort)) {
|
||||
const supportedOptions = getModelSupportedReasoningEffortOptions(model)?.filter((option) => option !== 'default')
|
||||
if (supportedOptions?.includes(reasoningEffort)) {
|
||||
return {
|
||||
reasoning_effort: reasoningEffort
|
||||
}
|
||||
} else {
|
||||
// 如果不支持,fallback到第一个支持的值
|
||||
return {
|
||||
reasoning_effort: supportedOptions[0]
|
||||
reasoning_effort: supportedOptions?.[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -25,7 +25,7 @@ import type {
|
||||
OpenAISdkRawOutput,
|
||||
ReasoningEffortOptionalParams
|
||||
} from '@renderer/types/sdk'
|
||||
import { formatApiHost, withoutTrailingSlash } from '@renderer/utils/api'
|
||||
import { withoutTrailingSlash } from '@renderer/utils/api'
|
||||
import { isOllamaProvider } from '@renderer/utils/provider'
|
||||
|
||||
import { BaseApiClient } from '../BaseApiClient'
|
||||
@ -49,8 +49,9 @@ export abstract class OpenAIBaseClient<
|
||||
}
|
||||
|
||||
// 仅适用于openai
|
||||
override getBaseURL(isSupportedAPIVerion: boolean = true): string {
|
||||
return formatApiHost(this.provider.apiHost, isSupportedAPIVerion)
|
||||
override getBaseURL(): string {
|
||||
// apiHost is formatted when called by AiProvider
|
||||
return this.provider.apiHost
|
||||
}
|
||||
|
||||
override async generateImage({
|
||||
@ -68,7 +69,7 @@ export abstract class OpenAIBaseClient<
|
||||
const sdk = await this.getSdkInstance()
|
||||
const response = (await sdk.request({
|
||||
method: 'post',
|
||||
path: '/images/generations',
|
||||
path: '/v1/images/generations',
|
||||
signal,
|
||||
body: {
|
||||
model,
|
||||
@ -87,7 +88,11 @@ export abstract class OpenAIBaseClient<
|
||||
}
|
||||
|
||||
override async getEmbeddingDimensions(model: Model): Promise<number> {
|
||||
const sdk = await this.getSdkInstance()
|
||||
let sdk: OpenAI = await this.getSdkInstance()
|
||||
if (isOllamaProvider(this.provider)) {
|
||||
const embedBaseUrl = `${this.provider.apiHost.replace(/(\/(api|v1))\/?$/, '')}/v1`
|
||||
sdk = sdk.withOptions({ baseURL: embedBaseUrl })
|
||||
}
|
||||
|
||||
const data = await sdk.embeddings.create({
|
||||
model: model.id,
|
||||
@ -100,6 +105,17 @@ export abstract class OpenAIBaseClient<
|
||||
override async listModels(): Promise<OpenAI.Models.Model[]> {
|
||||
try {
|
||||
const sdk = await this.getSdkInstance()
|
||||
if (this.provider.id === 'openrouter') {
|
||||
// https://openrouter.ai/docs/api/api-reference/embeddings/list-embeddings-models
|
||||
const embedBaseUrl = 'https://openrouter.ai/api/v1/embeddings'
|
||||
const embedSdk = sdk.withOptions({ baseURL: embedBaseUrl })
|
||||
const modelPromise = sdk.models.list()
|
||||
const embedModelPromise = embedSdk.models.list()
|
||||
const [modelResponse, embedModelResponse] = await Promise.all([modelPromise, embedModelPromise])
|
||||
const models = [...modelResponse.data, ...embedModelResponse.data]
|
||||
const uniqueModels = Array.from(new Map(models.map((model) => [model.id, model])).values())
|
||||
return uniqueModels.filter(isSupportedModel)
|
||||
}
|
||||
if (this.provider.id === 'github') {
|
||||
// GitHub Models 其 models 和 chat completions 两个接口的 baseUrl 不一样
|
||||
const baseUrl = 'https://models.github.ai/catalog/'
|
||||
@ -118,7 +134,7 @@ export abstract class OpenAIBaseClient<
|
||||
}
|
||||
|
||||
if (isOllamaProvider(this.provider)) {
|
||||
const baseUrl = withoutTrailingSlash(this.getBaseURL(false))
|
||||
const baseUrl = withoutTrailingSlash(this.getBaseURL())
|
||||
.replace(/\/v1$/, '')
|
||||
.replace(/\/api$/, '')
|
||||
const response = await fetch(`${baseUrl}/api/tags`, {
|
||||
@ -173,6 +189,7 @@ export abstract class OpenAIBaseClient<
|
||||
|
||||
let apiKeyForSdkInstance = this.apiKey
|
||||
let baseURLForSdkInstance = this.getBaseURL()
|
||||
logger.debug('baseURLForSdkInstance', { baseURLForSdkInstance })
|
||||
let headersForSdkInstance = {
|
||||
...this.defaultHeaders(),
|
||||
...this.provider.extra_headers
|
||||
@ -184,7 +201,7 @@ export abstract class OpenAIBaseClient<
|
||||
// this.provider.apiKey不允许修改
|
||||
// this.provider.apiKey = token
|
||||
apiKeyForSdkInstance = token
|
||||
baseURLForSdkInstance = this.getBaseURL(false)
|
||||
baseURLForSdkInstance = this.getBaseURL()
|
||||
headersForSdkInstance = {
|
||||
...headersForSdkInstance,
|
||||
...COPILOT_DEFAULT_HEADERS
|
||||
|
||||
@ -122,6 +122,7 @@ export class OpenAIResponseAPIClient extends OpenAIBaseClient<
|
||||
if (this.sdkInstance) {
|
||||
return this.sdkInstance
|
||||
}
|
||||
const baseUrl = this.getBaseURL()
|
||||
|
||||
if (this.provider.id === 'azure-openai' || this.provider.type === 'azure-openai') {
|
||||
return new AzureOpenAI({
|
||||
@ -134,7 +135,7 @@ export class OpenAIResponseAPIClient extends OpenAIBaseClient<
|
||||
return new OpenAI({
|
||||
dangerouslyAllowBrowser: true,
|
||||
apiKey: this.apiKey,
|
||||
baseURL: this.getBaseURL(),
|
||||
baseURL: baseUrl,
|
||||
defaultHeaders: {
|
||||
...this.defaultHeaders(),
|
||||
...this.provider.extra_headers
|
||||
|
||||
@ -2,7 +2,6 @@ import { loggerService } from '@logger'
|
||||
import { ApiClientFactory } from '@renderer/aiCore/legacy/clients/ApiClientFactory'
|
||||
import type { BaseApiClient } from '@renderer/aiCore/legacy/clients/BaseApiClient'
|
||||
import { isDedicatedImageGenerationModel, isFunctionCallingModel } from '@renderer/config/models'
|
||||
import { getProviderByModel } from '@renderer/services/AssistantService'
|
||||
import { withSpanResult } from '@renderer/services/SpanManagerService'
|
||||
import type { StartSpanParams } from '@renderer/trace/types/ModelSpanEntity'
|
||||
import type { GenerateImageParams, Model, Provider } from '@renderer/types'
|
||||
@ -160,9 +159,6 @@ export default class AiProvider {
|
||||
public async getEmbeddingDimensions(model: Model): Promise<number> {
|
||||
try {
|
||||
// Use the SDK instance to test embedding capabilities
|
||||
if (this.apiClient instanceof OpenAIResponseAPIClient && getProviderByModel(model).type === 'azure-openai') {
|
||||
this.apiClient = this.apiClient.getClient(model) as BaseApiClient
|
||||
}
|
||||
const dimensions = await this.apiClient.getEmbeddingDimensions(model)
|
||||
return dimensions
|
||||
} catch (error) {
|
||||
|
||||
@ -7,7 +7,6 @@ import type { Chunk } from '@renderer/types/chunk'
|
||||
import { isOllamaProvider, isSupportEnableThinkingProvider } from '@renderer/utils/provider'
|
||||
import type { LanguageModelMiddleware } from 'ai'
|
||||
import { extractReasoningMiddleware, simulateStreamingMiddleware } from 'ai'
|
||||
import { isEmpty } from 'lodash'
|
||||
|
||||
import { getAiSdkProviderId } from '../provider/factory'
|
||||
import { isOpenRouterGeminiGenerateImageModel } from '../utils/image'
|
||||
@ -16,7 +15,6 @@ import { openrouterGenerateImageMiddleware } from './openrouterGenerateImageMidd
|
||||
import { openrouterReasoningMiddleware } from './openrouterReasoningMiddleware'
|
||||
import { qwenThinkingMiddleware } from './qwenThinkingMiddleware'
|
||||
import { skipGeminiThoughtSignatureMiddleware } from './skipGeminiThoughtSignatureMiddleware'
|
||||
import { toolChoiceMiddleware } from './toolChoiceMiddleware'
|
||||
|
||||
const logger = loggerService.withContext('AiSdkMiddlewareBuilder')
|
||||
|
||||
@ -136,15 +134,6 @@ export class AiSdkMiddlewareBuilder {
|
||||
export function buildAiSdkMiddlewares(config: AiSdkMiddlewareConfig): LanguageModelMiddleware[] {
|
||||
const builder = new AiSdkMiddlewareBuilder()
|
||||
|
||||
// 0. 知识库强制调用中间件(必须在最前面,确保第一轮强制调用知识库)
|
||||
if (!isEmpty(config.assistant?.knowledge_bases?.map((base) => base.id)) && config.knowledgeRecognition !== 'on') {
|
||||
builder.add({
|
||||
name: 'force-knowledge-first',
|
||||
middleware: toolChoiceMiddleware('builtin_knowledge_search')
|
||||
})
|
||||
logger.debug('Added toolChoice middleware to force knowledge base search on first round')
|
||||
}
|
||||
|
||||
// 1. 根据provider添加特定中间件
|
||||
if (config.provider) {
|
||||
addProviderSpecificMiddlewares(builder, config)
|
||||
|
||||
@ -31,7 +31,7 @@ import { webSearchToolWithPreExtractedKeywords } from '../tools/WebSearchTool'
|
||||
|
||||
const logger = loggerService.withContext('SearchOrchestrationPlugin')
|
||||
|
||||
const getMessageContent = (message: ModelMessage) => {
|
||||
export const getMessageContent = (message: ModelMessage) => {
|
||||
if (typeof message.content === 'string') return message.content
|
||||
return message.content.reduce((acc, part) => {
|
||||
if (part.type === 'text') {
|
||||
@ -266,14 +266,14 @@ export const searchOrchestrationPlugin = (assistant: Assistant, topicId: string)
|
||||
// 判断是否需要各种搜索
|
||||
const knowledgeBaseIds = assistant.knowledge_bases?.map((base) => base.id)
|
||||
const hasKnowledgeBase = !isEmpty(knowledgeBaseIds)
|
||||
const knowledgeRecognition = assistant.knowledgeRecognition || 'on'
|
||||
const knowledgeRecognition = assistant.knowledgeRecognition || 'off'
|
||||
const globalMemoryEnabled = selectGlobalMemoryEnabled(store.getState())
|
||||
const shouldWebSearch = !!assistant.webSearchProviderId
|
||||
const shouldKnowledgeSearch = hasKnowledgeBase && knowledgeRecognition === 'on'
|
||||
const shouldMemorySearch = globalMemoryEnabled && assistant.enableMemory
|
||||
|
||||
// 执行意图分析
|
||||
if (shouldWebSearch || hasKnowledgeBase) {
|
||||
if (shouldWebSearch || shouldKnowledgeSearch) {
|
||||
const analysisResult = await analyzeSearchIntent(lastUserMessage, assistant, {
|
||||
shouldWebSearch,
|
||||
shouldKnowledgeSearch,
|
||||
@ -330,41 +330,25 @@ export const searchOrchestrationPlugin = (assistant: Assistant, topicId: string)
|
||||
// 📚 知识库搜索工具配置
|
||||
const knowledgeBaseIds = assistant.knowledge_bases?.map((base) => base.id)
|
||||
const hasKnowledgeBase = !isEmpty(knowledgeBaseIds)
|
||||
const knowledgeRecognition = assistant.knowledgeRecognition || 'on'
|
||||
const knowledgeRecognition = assistant.knowledgeRecognition || 'off'
|
||||
const shouldKnowledgeSearch = hasKnowledgeBase && knowledgeRecognition === 'on'
|
||||
|
||||
if (hasKnowledgeBase) {
|
||||
if (knowledgeRecognition === 'off') {
|
||||
// off 模式:直接添加知识库搜索工具,使用用户消息作为搜索关键词
|
||||
if (shouldKnowledgeSearch) {
|
||||
// on 模式:根据意图识别结果决定是否添加工具
|
||||
const needsKnowledgeSearch =
|
||||
analysisResult?.knowledge &&
|
||||
analysisResult.knowledge.question &&
|
||||
analysisResult.knowledge.question[0] !== 'not_needed'
|
||||
|
||||
if (needsKnowledgeSearch && analysisResult.knowledge) {
|
||||
// logger.info('📚 Adding knowledge search tool (intent-based)')
|
||||
const userMessage = userMessages[context.requestId]
|
||||
const fallbackKeywords = {
|
||||
question: [getMessageContent(userMessage) || 'search'],
|
||||
rewrite: getMessageContent(userMessage) || 'search'
|
||||
}
|
||||
// logger.info('📚 Adding knowledge search tool (force mode)')
|
||||
params.tools['builtin_knowledge_search'] = knowledgeSearchTool(
|
||||
assistant,
|
||||
fallbackKeywords,
|
||||
analysisResult.knowledge,
|
||||
getMessageContent(userMessage),
|
||||
topicId
|
||||
)
|
||||
// params.toolChoice = { type: 'tool', toolName: 'builtin_knowledge_search' }
|
||||
} else {
|
||||
// on 模式:根据意图识别结果决定是否添加工具
|
||||
const needsKnowledgeSearch =
|
||||
analysisResult?.knowledge &&
|
||||
analysisResult.knowledge.question &&
|
||||
analysisResult.knowledge.question[0] !== 'not_needed'
|
||||
|
||||
if (needsKnowledgeSearch && analysisResult.knowledge) {
|
||||
// logger.info('📚 Adding knowledge search tool (intent-based)')
|
||||
const userMessage = userMessages[context.requestId]
|
||||
params.tools['builtin_knowledge_search'] = knowledgeSearchTool(
|
||||
assistant,
|
||||
analysisResult.knowledge,
|
||||
getMessageContent(userMessage),
|
||||
topicId
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -109,6 +109,20 @@ const createImageBlock = (
|
||||
...overrides
|
||||
})
|
||||
|
||||
const createThinkingBlock = (
|
||||
messageId: string,
|
||||
overrides: Partial<Omit<ThinkingMessageBlock, 'type' | 'messageId'>> = {}
|
||||
): ThinkingMessageBlock => ({
|
||||
id: overrides.id ?? `thinking-block-${++blockCounter}`,
|
||||
messageId,
|
||||
type: MessageBlockType.THINKING,
|
||||
createdAt: overrides.createdAt ?? new Date(2024, 0, 1, 0, 0, blockCounter).toISOString(),
|
||||
status: overrides.status ?? MessageBlockStatus.SUCCESS,
|
||||
content: overrides.content ?? 'Let me think...',
|
||||
thinking_millsec: overrides.thinking_millsec ?? 1000,
|
||||
...overrides
|
||||
})
|
||||
|
||||
describe('messageConverter', () => {
|
||||
beforeEach(() => {
|
||||
convertFileBlockToFilePartMock.mockReset()
|
||||
@ -137,6 +151,73 @@ describe('messageConverter', () => {
|
||||
})
|
||||
})
|
||||
|
||||
it('extracts base64 data from data URLs and preserves mediaType', async () => {
|
||||
const model = createModel()
|
||||
const message = createMessage('user')
|
||||
message.__mockContent = 'Check this image'
|
||||
message.__mockImageBlocks = [createImageBlock(message.id, { url: 'data:image/png;base64,iVBORw0KGgoAAAANS' })]
|
||||
|
||||
const result = await convertMessageToSdkParam(message, true, model)
|
||||
|
||||
expect(result).toEqual({
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Check this image' },
|
||||
{ type: 'image', image: 'iVBORw0KGgoAAAANS', mediaType: 'image/png' }
|
||||
]
|
||||
})
|
||||
})
|
||||
|
||||
it('handles data URLs without mediaType gracefully', async () => {
|
||||
const model = createModel()
|
||||
const message = createMessage('user')
|
||||
message.__mockContent = 'Check this'
|
||||
message.__mockImageBlocks = [createImageBlock(message.id, { url: 'data:;base64,AAABBBCCC' })]
|
||||
|
||||
const result = await convertMessageToSdkParam(message, true, model)
|
||||
|
||||
expect(result).toEqual({
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Check this' },
|
||||
{ type: 'image', image: 'AAABBBCCC' }
|
||||
]
|
||||
})
|
||||
})
|
||||
|
||||
it('skips malformed data URLs without comma separator', async () => {
|
||||
const model = createModel()
|
||||
const message = createMessage('user')
|
||||
message.__mockContent = 'Malformed data url'
|
||||
message.__mockImageBlocks = [createImageBlock(message.id, { url: 'data:image/pngAAABBB' })]
|
||||
|
||||
const result = await convertMessageToSdkParam(message, true, model)
|
||||
|
||||
expect(result).toEqual({
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Malformed data url' }
|
||||
// Malformed data URL is excluded from the content
|
||||
]
|
||||
})
|
||||
})
|
||||
|
||||
it('handles multiple large base64 images without stack overflow', async () => {
|
||||
const model = createModel()
|
||||
const message = createMessage('user')
|
||||
// Create large base64 strings (~500KB each) to simulate real-world large images
|
||||
const largeBase64 = 'A'.repeat(500_000)
|
||||
message.__mockContent = 'Check these images'
|
||||
message.__mockImageBlocks = [
|
||||
createImageBlock(message.id, { url: `data:image/png;base64,${largeBase64}` }),
|
||||
createImageBlock(message.id, { url: `data:image/png;base64,${largeBase64}` }),
|
||||
createImageBlock(message.id, { url: `data:image/png;base64,${largeBase64}` })
|
||||
]
|
||||
|
||||
// Should not throw RangeError: Maximum call stack size exceeded
|
||||
await expect(convertMessageToSdkParam(message, true, model)).resolves.toBeDefined()
|
||||
})
|
||||
|
||||
it('returns file instructions as a system message when native uploads succeed', async () => {
|
||||
const model = createModel()
|
||||
const message = createMessage('user')
|
||||
@ -162,10 +243,27 @@ describe('messageConverter', () => {
|
||||
}
|
||||
])
|
||||
})
|
||||
|
||||
it('includes reasoning parts for assistant messages with thinking blocks', async () => {
|
||||
const model = createModel()
|
||||
const message = createMessage('assistant')
|
||||
message.__mockContent = 'Here is my answer'
|
||||
message.__mockThinkingBlocks = [createThinkingBlock(message.id, { content: 'Let me think...' })]
|
||||
|
||||
const result = await convertMessageToSdkParam(message, false, model)
|
||||
|
||||
expect(result).toEqual({
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{ type: 'text', text: 'Here is my answer' },
|
||||
{ type: 'reasoning', text: 'Let me think...' }
|
||||
]
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('convertMessagesToSdkMessages', () => {
|
||||
it('appends assistant images to the final user message for image enhancement models', async () => {
|
||||
it('collapses to [system?, user(image)] for image enhancement models', async () => {
|
||||
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
|
||||
const initialUser = createMessage('user')
|
||||
initialUser.__mockContent = 'Start editing'
|
||||
@ -180,14 +278,6 @@ describe('messageConverter', () => {
|
||||
const result = await convertMessagesToSdkMessages([initialUser, assistant, finalUser], model)
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: 'user',
|
||||
content: [{ type: 'text', text: 'Start editing' }]
|
||||
},
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [{ type: 'text', text: 'Here is the current preview' }]
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
@ -198,7 +288,7 @@ describe('messageConverter', () => {
|
||||
])
|
||||
})
|
||||
|
||||
it('preserves preceding system instructions when building enhancement payloads', async () => {
|
||||
it('preserves system messages and collapses others for enhancement payloads', async () => {
|
||||
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
|
||||
const fileUser = createMessage('user')
|
||||
fileUser.__mockContent = 'Use this document as inspiration'
|
||||
@ -221,11 +311,6 @@ describe('messageConverter', () => {
|
||||
|
||||
expect(result).toEqual([
|
||||
{ role: 'system', content: 'fileid://reference' },
|
||||
{ role: 'user', content: [{ type: 'text', text: 'Use this document as inspiration' }] },
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [{ type: 'text', text: 'Generated previews ready' }]
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
@ -235,5 +320,120 @@ describe('messageConverter', () => {
|
||||
}
|
||||
])
|
||||
})
|
||||
|
||||
it('handles no previous assistant message with images', async () => {
|
||||
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
|
||||
const user1 = createMessage('user')
|
||||
user1.__mockContent = 'Start'
|
||||
|
||||
const user2 = createMessage('user')
|
||||
user2.__mockContent = 'Continue without images'
|
||||
|
||||
const result = await convertMessagesToSdkMessages([user1, user2], model)
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: 'user',
|
||||
content: [{ type: 'text', text: 'Continue without images' }]
|
||||
}
|
||||
])
|
||||
})
|
||||
|
||||
it('handles assistant message without images', async () => {
|
||||
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
|
||||
const user1 = createMessage('user')
|
||||
user1.__mockContent = 'Start'
|
||||
|
||||
const assistant = createMessage('assistant')
|
||||
assistant.__mockContent = 'Text only response'
|
||||
assistant.__mockImageBlocks = []
|
||||
|
||||
const user2 = createMessage('user')
|
||||
user2.__mockContent = 'Follow up'
|
||||
|
||||
const result = await convertMessagesToSdkMessages([user1, assistant, user2], model)
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: 'user',
|
||||
content: [{ type: 'text', text: 'Follow up' }]
|
||||
}
|
||||
])
|
||||
})
|
||||
|
||||
it('handles multiple assistant messages by using the most recent one', async () => {
|
||||
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
|
||||
const user1 = createMessage('user')
|
||||
user1.__mockContent = 'Start'
|
||||
|
||||
const assistant1 = createMessage('assistant')
|
||||
assistant1.__mockContent = 'First response'
|
||||
assistant1.__mockImageBlocks = [createImageBlock(assistant1.id, { url: 'https://example.com/old.png' })]
|
||||
|
||||
const user2 = createMessage('user')
|
||||
user2.__mockContent = 'Continue'
|
||||
|
||||
const assistant2 = createMessage('assistant')
|
||||
assistant2.__mockContent = 'Second response'
|
||||
assistant2.__mockImageBlocks = [createImageBlock(assistant2.id, { url: 'https://example.com/new.png' })]
|
||||
|
||||
const user3 = createMessage('user')
|
||||
user3.__mockContent = 'Final request'
|
||||
|
||||
const result = await convertMessagesToSdkMessages([user1, assistant1, user2, assistant2, user3], model)
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Final request' },
|
||||
{ type: 'image', image: 'https://example.com/new.png' }
|
||||
]
|
||||
}
|
||||
])
|
||||
})
|
||||
|
||||
it('handles conversation ending with assistant message', async () => {
|
||||
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
|
||||
const user = createMessage('user')
|
||||
user.__mockContent = 'Start'
|
||||
|
||||
const assistant = createMessage('assistant')
|
||||
assistant.__mockContent = 'Response with image'
|
||||
assistant.__mockImageBlocks = [createImageBlock(assistant.id, { url: 'https://example.com/image.png' })]
|
||||
|
||||
const result = await convertMessagesToSdkMessages([user, assistant], model)
|
||||
|
||||
// The user message is the last user message, but since the assistant comes after,
|
||||
// there's no "previous" assistant message (search starts from messages.length-2 backwards)
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: 'user',
|
||||
content: [{ type: 'text', text: 'Start' }]
|
||||
}
|
||||
])
|
||||
})
|
||||
|
||||
it('handles empty content in last user message', async () => {
|
||||
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
|
||||
const user1 = createMessage('user')
|
||||
user1.__mockContent = 'Start'
|
||||
|
||||
const assistant = createMessage('assistant')
|
||||
assistant.__mockContent = 'Here is the preview'
|
||||
assistant.__mockImageBlocks = [createImageBlock(assistant.id, { url: 'https://example.com/preview.png' })]
|
||||
|
||||
const user2 = createMessage('user')
|
||||
user2.__mockContent = ''
|
||||
|
||||
const result = await convertMessagesToSdkMessages([user1, assistant, user2], model)
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: 'user',
|
||||
content: [{ type: 'image', image: 'https://example.com/preview.png' }]
|
||||
}
|
||||
])
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@ -18,7 +18,7 @@ vi.mock('@renderer/services/AssistantService', () => ({
|
||||
toolUseMode: assistant.settings?.toolUseMode ?? 'prompt',
|
||||
defaultModel: assistant.defaultModel,
|
||||
customParameters: assistant.settings?.customParameters ?? [],
|
||||
reasoning_effort: assistant.settings?.reasoning_effort,
|
||||
reasoning_effort: assistant.settings?.reasoning_effort ?? 'default',
|
||||
reasoning_effort_cache: assistant.settings?.reasoning_effort_cache,
|
||||
qwenThinkMode: assistant.settings?.qwenThinkMode
|
||||
})
|
||||
|
||||
@ -3,10 +3,12 @@
|
||||
* 将 Cherry Studio 消息格式转换为 AI SDK 消息格式
|
||||
*/
|
||||
|
||||
import type { ReasoningPart } from '@ai-sdk/provider-utils'
|
||||
import { loggerService } from '@logger'
|
||||
import { isImageEnhancementModel, isVisionModel } from '@renderer/config/models'
|
||||
import type { Message, Model } from '@renderer/types'
|
||||
import type { FileMessageBlock, ImageMessageBlock, ThinkingMessageBlock } from '@renderer/types/newMessage'
|
||||
import { parseDataUrlMediaType } from '@renderer/utils/image'
|
||||
import {
|
||||
findFileBlocks,
|
||||
findImageBlocks,
|
||||
@ -59,23 +61,29 @@ async function convertImageBlockToImagePart(imageBlocks: ImageMessageBlock[]): P
|
||||
mediaType: image.mime
|
||||
})
|
||||
} catch (error) {
|
||||
logger.warn('Failed to load image:', error as Error)
|
||||
logger.error('Failed to load image file, image will be excluded from message:', {
|
||||
fileId: imageBlock.file.id,
|
||||
fileName: imageBlock.file.origin_name,
|
||||
error: error as Error
|
||||
})
|
||||
}
|
||||
} else if (imageBlock.url) {
|
||||
const isBase64 = imageBlock.url.startsWith('data:')
|
||||
if (isBase64) {
|
||||
const base64 = imageBlock.url.match(/^data:[^;]*;base64,(.+)$/)![1]
|
||||
const mimeMatch = imageBlock.url.match(/^data:([^;]+)/)
|
||||
parts.push({
|
||||
type: 'image',
|
||||
image: base64,
|
||||
mediaType: mimeMatch ? mimeMatch[1] : 'image/png'
|
||||
})
|
||||
const url = imageBlock.url
|
||||
const isDataUrl = url.startsWith('data:')
|
||||
if (isDataUrl) {
|
||||
const { mediaType } = parseDataUrlMediaType(url)
|
||||
const commaIndex = url.indexOf(',')
|
||||
if (commaIndex === -1) {
|
||||
logger.error('Malformed data URL detected (missing comma separator), image will be excluded:', {
|
||||
urlPrefix: url.slice(0, 50) + '...'
|
||||
})
|
||||
continue
|
||||
}
|
||||
const base64Data = url.slice(commaIndex + 1)
|
||||
parts.push({ type: 'image', image: base64Data, ...(mediaType ? { mediaType } : {}) })
|
||||
} else {
|
||||
parts.push({
|
||||
type: 'image',
|
||||
image: imageBlock.url
|
||||
})
|
||||
// For remote URLs we keep payload minimal to match existing expectations.
|
||||
parts.push({ type: 'image', image: url })
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -156,13 +164,13 @@ async function convertMessageToAssistantModelMessage(
|
||||
thinkingBlocks: ThinkingMessageBlock[],
|
||||
model?: Model
|
||||
): Promise<AssistantModelMessage> {
|
||||
const parts: Array<TextPart | FilePart> = []
|
||||
const parts: Array<TextPart | ReasoningPart | FilePart> = []
|
||||
if (content) {
|
||||
parts.push({ type: 'text', text: content })
|
||||
}
|
||||
|
||||
for (const thinkingBlock of thinkingBlocks) {
|
||||
parts.push({ type: 'text', text: thinkingBlock.content })
|
||||
parts.push({ type: 'reasoning', text: thinkingBlock.content })
|
||||
}
|
||||
|
||||
for (const fileBlock of fileBlocks) {
|
||||
@ -194,17 +202,20 @@ async function convertMessageToAssistantModelMessage(
|
||||
* This function processes messages and transforms them into the format required by the SDK.
|
||||
* It handles special cases for vision models and image enhancement models.
|
||||
*
|
||||
* @param messages - Array of messages to convert. Must contain at least 3 messages when using image enhancement models for special handling.
|
||||
* @param messages - Array of messages to convert.
|
||||
* @param model - The model configuration that determines conversion behavior
|
||||
*
|
||||
* @returns A promise that resolves to an array of SDK-compatible model messages
|
||||
*
|
||||
* @remarks
|
||||
* For image enhancement models with 3+ messages:
|
||||
* - Examines the last 2 messages to find an assistant message containing image blocks
|
||||
* - If found, extracts images from the assistant message and appends them to the last user message content
|
||||
* - Returns all converted messages (not just the last two) with the images merged into the user message
|
||||
* - Typical pattern: [system?, assistant(image), user] -> [system?, assistant, user(image)]
|
||||
* For image enhancement models:
|
||||
* - Collapses the conversation into [system?, user(image)] format
|
||||
* - Searches backwards through all messages to find the most recent assistant message with images
|
||||
* - Preserves all system messages (including ones generated from file uploads like 'fileid://...')
|
||||
* - Extracts the last user message content and merges images from the previous assistant message
|
||||
* - Returns only the collapsed messages: system messages (if any) followed by a single user message
|
||||
* - If no user message is found, returns only system messages
|
||||
* - Typical pattern: [system?, user, assistant(image), user] -> [system?, user(image)]
|
||||
*
|
||||
* For other models:
|
||||
* - Returns all converted messages in order without special image handling
|
||||
@ -220,25 +231,66 @@ export async function convertMessagesToSdkMessages(messages: Message[], model: M
|
||||
sdkMessages.push(...(Array.isArray(sdkMessage) ? sdkMessage : [sdkMessage]))
|
||||
}
|
||||
// Special handling for image enhancement models
|
||||
// Only merge images into the user message
|
||||
// [system?, assistant(image), user] -> [system?, assistant, user(image)]
|
||||
if (isImageEnhancementModel(model) && messages.length >= 3) {
|
||||
const needUpdatedMessages = messages.slice(-2)
|
||||
const assistantMessage = needUpdatedMessages.find((m) => m.role === 'assistant')
|
||||
const userSdkMessage = sdkMessages[sdkMessages.length - 1]
|
||||
// Target behavior: Collapse the conversation into [system?, user(image)].
|
||||
// Explanation of why we don't simply use slice:
|
||||
// 1) We need to preserve all system messages: During the convertMessageToSdkParam process, native file uploads may insert `system(fileid://...)`.
|
||||
// Directly slicing the original messages or already converted sdkMessages could easily result in missing these system instructions.
|
||||
// Therefore, we first perform a full conversion and then aggregate the system messages afterward.
|
||||
// 2) The conversion process may split messages: A single user message might be broken into two SDK messages—[system, user].
|
||||
// Slicing either side could lead to obtaining semantically incorrect fragments (e.g., only the split-out system message).
|
||||
// 3) The “previous assistant message” is not necessarily the second-to-last one: There might be system messages or other message blocks inserted in between,
|
||||
// making a simple slice(-2) assumption too rigid. Here, we trace back from the end of the original messages to locate the most recent assistant message, which better aligns with business semantics.
|
||||
// 4) This is a “collapse” rather than a simple “slice”: Ultimately, we need to synthesize a new user message
|
||||
// (with text from the last user message and images from the previous assistant message). Using slice can only extract subarrays,
|
||||
// which still require reassembly; constructing directly according to the target structure is clearer and more reliable.
|
||||
if (isImageEnhancementModel(model)) {
|
||||
// Collect all system messages (including ones generated from file uploads)
|
||||
const systemMessages = sdkMessages.filter((m): m is SystemModelMessage => m.role === 'system')
|
||||
|
||||
if (assistantMessage && userSdkMessage?.role === 'user') {
|
||||
const imageBlocks = findImageBlocks(assistantMessage)
|
||||
const imageParts = await convertImageBlockToImagePart(imageBlocks)
|
||||
// Find the last user message (SDK converted)
|
||||
const lastUserSdkIndex = (() => {
|
||||
for (let i = sdkMessages.length - 1; i >= 0; i--) {
|
||||
if (sdkMessages[i].role === 'user') return i
|
||||
}
|
||||
return -1
|
||||
})()
|
||||
|
||||
if (imageParts.length > 0) {
|
||||
if (typeof userSdkMessage.content === 'string') {
|
||||
userSdkMessage.content = [{ type: 'text', text: userSdkMessage.content }, ...imageParts]
|
||||
} else if (Array.isArray(userSdkMessage.content)) {
|
||||
userSdkMessage.content.push(...imageParts)
|
||||
}
|
||||
const lastUserSdk = lastUserSdkIndex >= 0 ? (sdkMessages[lastUserSdkIndex] as UserModelMessage) : null
|
||||
|
||||
// Find the nearest preceding assistant message in original messages
|
||||
let prevAssistant: Message | null = null
|
||||
for (let i = messages.length - 2; i >= 0; i--) {
|
||||
if (messages[i].role === 'assistant') {
|
||||
prevAssistant = messages[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Build the final user content parts
|
||||
let finalUserParts: Array<TextPart | FilePart | ImagePart> = []
|
||||
if (lastUserSdk) {
|
||||
if (typeof lastUserSdk.content === 'string') {
|
||||
finalUserParts.push({ type: 'text', text: lastUserSdk.content })
|
||||
} else if (Array.isArray(lastUserSdk.content)) {
|
||||
finalUserParts = [...lastUserSdk.content]
|
||||
}
|
||||
}
|
||||
|
||||
// Append images from the previous assistant message if any
|
||||
if (prevAssistant) {
|
||||
const imageBlocks = findImageBlocks(prevAssistant)
|
||||
const imageParts = await convertImageBlockToImagePart(imageBlocks)
|
||||
if (imageParts.length > 0) {
|
||||
finalUserParts.push(...imageParts)
|
||||
}
|
||||
}
|
||||
|
||||
// If we couldn't find a last user message, fall back to returning collected system messages only
|
||||
if (!lastUserSdk) {
|
||||
return systemMessages
|
||||
}
|
||||
|
||||
return [...systemMessages, { role: 'user', content: finalUserParts }]
|
||||
}
|
||||
|
||||
return sdkMessages
|
||||
|
||||
@ -4,60 +4,90 @@
|
||||
*/
|
||||
|
||||
import {
|
||||
isClaude45ReasoningModel,
|
||||
isClaudeReasoningModel,
|
||||
isMaxTemperatureOneModel,
|
||||
isNotSupportTemperatureAndTopP,
|
||||
isSupportedFlexServiceTier,
|
||||
isSupportedThinkingTokenClaudeModel
|
||||
isSupportedThinkingTokenClaudeModel,
|
||||
isSupportTemperatureModel,
|
||||
isSupportTopPModel,
|
||||
isTemperatureTopPMutuallyExclusiveModel
|
||||
} from '@renderer/config/models'
|
||||
import { getAssistantSettings, getProviderByModel } from '@renderer/services/AssistantService'
|
||||
import {
|
||||
DEFAULT_ASSISTANT_SETTINGS,
|
||||
getAssistantSettings,
|
||||
getProviderByModel
|
||||
} from '@renderer/services/AssistantService'
|
||||
import type { Assistant, Model } from '@renderer/types'
|
||||
import { defaultTimeout } from '@shared/config/constant'
|
||||
|
||||
import { getAnthropicThinkingBudget } from '../utils/reasoning'
|
||||
|
||||
/**
|
||||
* Claude 4.5 推理模型:
|
||||
* - 只启用 temperature → 使用 temperature
|
||||
* - 只启用 top_p → 使用 top_p
|
||||
* - 同时启用 → temperature 生效,top_p 被忽略
|
||||
* - 都不启用 → 都不使用
|
||||
* 获取温度参数
|
||||
* Retrieves the temperature parameter, adapting it based on assistant.settings and model capabilities.
|
||||
* - Disabled for Claude reasoning models when reasoning effort is set.
|
||||
* - Disabled for models that do not support temperature.
|
||||
* - Disabled for Claude 4.5 reasoning models when TopP is enabled and temperature is disabled.
|
||||
* Otherwise, returns the temperature value if the assistant has temperature enabled.
|
||||
|
||||
*/
|
||||
export function getTemperature(assistant: Assistant, model: Model): number | undefined {
|
||||
if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
if (!isSupportTemperatureModel(model, assistant)) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
if (
|
||||
isNotSupportTemperatureAndTopP(model) ||
|
||||
(isClaude45ReasoningModel(model) && assistant.settings?.enableTopP && !assistant.settings?.enableTemperature)
|
||||
isTemperatureTopPMutuallyExclusiveModel(model) &&
|
||||
assistant.settings?.enableTopP &&
|
||||
!assistant.settings?.enableTemperature
|
||||
) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
return getTemperatureValue(assistant, model)
|
||||
}
|
||||
|
||||
function getTemperatureValue(assistant: Assistant, model: Model): number | undefined {
|
||||
const assistantSettings = getAssistantSettings(assistant)
|
||||
let temperature = assistantSettings?.temperature
|
||||
if (temperature && isMaxTemperatureOneModel(model)) {
|
||||
temperature = Math.min(1, temperature)
|
||||
}
|
||||
return assistantSettings?.enableTemperature ? temperature : undefined
|
||||
|
||||
// FIXME: assistant.settings.enableTemperature should be always a boolean value.
|
||||
const enableTemperature = assistantSettings?.enableTemperature ?? DEFAULT_ASSISTANT_SETTINGS.enableTemperature
|
||||
return enableTemperature ? temperature : undefined
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取 TopP 参数
|
||||
* Retrieves the TopP parameter, adapting it based on assistant.settings and model capabilities.
|
||||
* - Disabled for Claude reasoning models when reasoning effort is set.
|
||||
* - Disabled for models that do not support TopP.
|
||||
* - Disabled for Claude 4.5 reasoning models when temperature is explicitly enabled.
|
||||
* Otherwise, returns the TopP value if the assistant has TopP enabled.
|
||||
*/
|
||||
export function getTopP(assistant: Assistant, model: Model): number | undefined {
|
||||
if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) {
|
||||
return undefined
|
||||
}
|
||||
if (
|
||||
isNotSupportTemperatureAndTopP(model) ||
|
||||
(isClaude45ReasoningModel(model) && assistant.settings?.enableTemperature)
|
||||
) {
|
||||
if (!isSupportTopPModel(model, assistant)) {
|
||||
return undefined
|
||||
}
|
||||
if (isTemperatureTopPMutuallyExclusiveModel(model) && assistant.settings?.enableTemperature) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
return getTopPValue(assistant)
|
||||
}
|
||||
|
||||
function getTopPValue(assistant: Assistant): number | undefined {
|
||||
const assistantSettings = getAssistantSettings(assistant)
|
||||
return assistantSettings?.enableTopP ? assistantSettings?.topP : undefined
|
||||
// FIXME: assistant.settings.enableTopP should be always a boolean value.
|
||||
const enableTopP = assistantSettings.enableTopP ?? DEFAULT_ASSISTANT_SETTINGS.enableTopP
|
||||
return enableTopP ? assistantSettings?.topP : undefined
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@ -42,7 +42,8 @@ vi.mock('@renderer/utils/api', () => ({
|
||||
routeToEndpoint: vi.fn((host) => ({
|
||||
baseURL: host,
|
||||
endpoint: '/chat/completions'
|
||||
}))
|
||||
})),
|
||||
isWithTrailingSharp: vi.fn((host) => host?.endsWith('#') || false)
|
||||
}))
|
||||
|
||||
vi.mock('@renderer/utils/provider', async (importOriginal) => {
|
||||
@ -227,12 +228,19 @@ describe('CherryAI provider configuration', () => {
|
||||
// Mock the functions to simulate non-CherryAI provider
|
||||
vi.mocked(isCherryAIProvider).mockReturnValue(false)
|
||||
vi.mocked(getProviderByModel).mockReturnValue(provider)
|
||||
// Mock isWithTrailingSharp to return false for this test
|
||||
vi.mocked(formatApiHost as any).mockImplementation((host, isSupportedAPIVersion = true) => {
|
||||
if (isSupportedAPIVersion === false) {
|
||||
return host
|
||||
}
|
||||
return `${host}/v1`
|
||||
})
|
||||
|
||||
// Call getActualProvider
|
||||
const actualProvider = getActualProvider(model)
|
||||
|
||||
// Verify that formatApiHost was called with default parameters (true)
|
||||
expect(formatApiHost).toHaveBeenCalledWith('https://api.openai.com')
|
||||
// Verify that formatApiHost was called with appendApiVersion parameter
|
||||
expect(formatApiHost).toHaveBeenCalledWith('https://api.openai.com', true)
|
||||
expect(actualProvider.apiHost).toBe('https://api.openai.com/v1')
|
||||
})
|
||||
|
||||
@ -303,12 +311,19 @@ describe('Perplexity provider configuration', () => {
|
||||
vi.mocked(isCherryAIProvider).mockReturnValue(false)
|
||||
vi.mocked(isPerplexityProvider).mockReturnValue(false)
|
||||
vi.mocked(getProviderByModel).mockReturnValue(provider)
|
||||
// Mock isWithTrailingSharp to return false for this test
|
||||
vi.mocked(formatApiHost as any).mockImplementation((host, isSupportedAPIVersion = true) => {
|
||||
if (isSupportedAPIVersion === false) {
|
||||
return host
|
||||
}
|
||||
return `${host}/v1`
|
||||
})
|
||||
|
||||
// Call getActualProvider
|
||||
const actualProvider = getActualProvider(model)
|
||||
|
||||
// Verify that formatApiHost was called with default parameters (true)
|
||||
expect(formatApiHost).toHaveBeenCalledWith('https://api.openai.com')
|
||||
// Verify that formatApiHost was called with appendApiVersion parameter
|
||||
expect(formatApiHost).toHaveBeenCalledWith('https://api.openai.com', true)
|
||||
expect(actualProvider.apiHost).toBe('https://api.openai.com/v1')
|
||||
})
|
||||
|
||||
|
||||
@ -9,6 +9,7 @@ import {
|
||||
} from '@renderer/hooks/useAwsBedrock'
|
||||
import { createVertexProvider, isVertexAIConfigured } from '@renderer/hooks/useVertexAI'
|
||||
import { getProviderByModel } from '@renderer/services/AssistantService'
|
||||
import { getProviderById } from '@renderer/services/ProviderService'
|
||||
import store from '@renderer/store'
|
||||
import { isSystemProvider, type Model, type Provider, SystemProviderIds } from '@renderer/types'
|
||||
import type { OpenAICompletionsStreamOptions } from '@renderer/types/aiCoreTypes'
|
||||
@ -17,6 +18,7 @@ import {
|
||||
formatAzureOpenAIApiHost,
|
||||
formatOllamaApiHost,
|
||||
formatVertexApiHost,
|
||||
isWithTrailingSharp,
|
||||
routeToEndpoint
|
||||
} from '@renderer/utils/api'
|
||||
import {
|
||||
@ -30,6 +32,7 @@ import {
|
||||
isSupportStreamOptionsProvider,
|
||||
isVertexProvider
|
||||
} from '@renderer/utils/provider'
|
||||
import { defaultAppHeaders } from '@shared/utils'
|
||||
import { cloneDeep, isEmpty } from 'lodash'
|
||||
|
||||
import type { AiSdkConfig } from '../types'
|
||||
@ -69,14 +72,15 @@ function handleSpecialProviders(model: Model, provider: Provider): Provider {
|
||||
*/
|
||||
export function formatProviderApiHost(provider: Provider): Provider {
|
||||
const formatted = { ...provider }
|
||||
const appendApiVersion = !isWithTrailingSharp(provider.apiHost)
|
||||
if (formatted.anthropicApiHost) {
|
||||
formatted.anthropicApiHost = formatApiHost(formatted.anthropicApiHost)
|
||||
formatted.anthropicApiHost = formatApiHost(formatted.anthropicApiHost, appendApiVersion)
|
||||
}
|
||||
|
||||
if (isAnthropicProvider(provider)) {
|
||||
const baseHost = formatted.anthropicApiHost || formatted.apiHost
|
||||
// AI SDK needs /v1 in baseURL, Anthropic SDK will strip it in getSdkClient
|
||||
formatted.apiHost = formatApiHost(baseHost)
|
||||
formatted.apiHost = formatApiHost(baseHost, appendApiVersion)
|
||||
if (!formatted.anthropicApiHost) {
|
||||
formatted.anthropicApiHost = formatted.apiHost
|
||||
}
|
||||
@ -85,7 +89,7 @@ export function formatProviderApiHost(provider: Provider): Provider {
|
||||
} else if (isOllamaProvider(formatted)) {
|
||||
formatted.apiHost = formatOllamaApiHost(formatted.apiHost)
|
||||
} else if (isGeminiProvider(formatted)) {
|
||||
formatted.apiHost = formatApiHost(formatted.apiHost, true, 'v1beta')
|
||||
formatted.apiHost = formatApiHost(formatted.apiHost, appendApiVersion, 'v1beta')
|
||||
} else if (isAzureOpenAIProvider(formatted)) {
|
||||
formatted.apiHost = formatAzureOpenAIApiHost(formatted.apiHost)
|
||||
} else if (isVertexProvider(formatted)) {
|
||||
@ -95,7 +99,7 @@ export function formatProviderApiHost(provider: Provider): Provider {
|
||||
} else if (isPerplexityProvider(formatted)) {
|
||||
formatted.apiHost = formatApiHost(formatted.apiHost, false)
|
||||
} else {
|
||||
formatted.apiHost = formatApiHost(formatted.apiHost)
|
||||
formatted.apiHost = formatApiHost(formatted.apiHost, appendApiVersion)
|
||||
}
|
||||
return formatted
|
||||
}
|
||||
@ -201,18 +205,13 @@ export function providerToAiSdkConfig(actualProvider: Provider, model: Model): A
|
||||
extraOptions.mode = 'chat'
|
||||
}
|
||||
|
||||
// 添加额外headers
|
||||
if (actualProvider.extra_headers) {
|
||||
extraOptions.headers = actualProvider.extra_headers
|
||||
// copy from openaiBaseClient/openaiResponseApiClient
|
||||
if (aiSdkProviderId === 'openai') {
|
||||
extraOptions.headers = {
|
||||
...extraOptions.headers,
|
||||
'HTTP-Referer': 'https://cherry-ai.com',
|
||||
'X-Title': 'Cherry Studio',
|
||||
'X-Api-Key': baseConfig.apiKey
|
||||
}
|
||||
}
|
||||
extraOptions.headers = {
|
||||
...defaultAppHeaders(),
|
||||
...actualProvider.extra_headers
|
||||
}
|
||||
|
||||
if (aiSdkProviderId === 'openai') {
|
||||
extraOptions.headers['X-Api-Key'] = baseConfig.apiKey
|
||||
}
|
||||
// azure
|
||||
// https://learn.microsoft.com/en-us/azure/ai-foundry/openai/latest
|
||||
@ -255,6 +254,12 @@ export function providerToAiSdkConfig(actualProvider: Provider, model: Model): A
|
||||
if (model.endpoint_type) {
|
||||
extraOptions.endpointType = model.endpoint_type
|
||||
}
|
||||
// CherryIN API Host
|
||||
const cherryinProvider = getProviderById(SystemProviderIds.cherryin)
|
||||
if (cherryinProvider) {
|
||||
extraOptions.anthropicBaseURL = cherryinProvider.anthropicApiHost + '/v1'
|
||||
extraOptions.geminiBaseURL = cherryinProvider.apiHost + '/v1beta/models'
|
||||
}
|
||||
}
|
||||
|
||||
if (hasProviderConfig(aiSdkProviderId) && aiSdkProviderId !== 'openai-compatible') {
|
||||
|
||||
@ -11,6 +11,7 @@ import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import {
|
||||
getAnthropicReasoningParams,
|
||||
getAnthropicThinkingBudget,
|
||||
getBedrockReasoningParams,
|
||||
getCustomParameters,
|
||||
getGeminiReasoningParams,
|
||||
@ -89,7 +90,8 @@ vi.mock('@renderer/config/models', async (importOriginal) => {
|
||||
isQwenAlwaysThinkModel: vi.fn(() => false),
|
||||
isSupportedThinkingTokenHunyuanModel: vi.fn(() => false),
|
||||
isSupportedThinkingTokenModel: vi.fn(() => false),
|
||||
isGPT51SeriesModel: vi.fn(() => false)
|
||||
isGPT51SeriesModel: vi.fn(() => false),
|
||||
findTokenLimit: vi.fn(actual.findTokenLimit)
|
||||
}
|
||||
})
|
||||
|
||||
@ -596,7 +598,7 @@ describe('reasoning utils', () => {
|
||||
expect(result).toEqual({})
|
||||
})
|
||||
|
||||
it('should return disabled thinking when no reasoning effort', async () => {
|
||||
it('should return disabled thinking when reasoning effort is none', async () => {
|
||||
const { isReasoningModel, isSupportedThinkingTokenClaudeModel } = await import('@renderer/config/models')
|
||||
|
||||
vi.mocked(isReasoningModel).mockReturnValue(true)
|
||||
@ -611,7 +613,9 @@ describe('reasoning utils', () => {
|
||||
const assistant: Assistant = {
|
||||
id: 'test',
|
||||
name: 'Test',
|
||||
settings: {}
|
||||
settings: {
|
||||
reasoning_effort: 'none'
|
||||
}
|
||||
} as Assistant
|
||||
|
||||
const result = getAnthropicReasoningParams(assistant, model)
|
||||
@ -647,7 +651,7 @@ describe('reasoning utils', () => {
|
||||
expect(result).toEqual({
|
||||
thinking: {
|
||||
type: 'enabled',
|
||||
budgetTokens: 2048
|
||||
budgetTokens: 4096
|
||||
}
|
||||
})
|
||||
})
|
||||
@ -675,7 +679,7 @@ describe('reasoning utils', () => {
|
||||
expect(result).toEqual({})
|
||||
})
|
||||
|
||||
it('should disable thinking for Flash models without reasoning effort', async () => {
|
||||
it('should disable thinking for Flash models when reasoning effort is none', async () => {
|
||||
const { isReasoningModel, isSupportedThinkingTokenGeminiModel } = await import('@renderer/config/models')
|
||||
|
||||
vi.mocked(isReasoningModel).mockReturnValue(true)
|
||||
@ -690,7 +694,9 @@ describe('reasoning utils', () => {
|
||||
const assistant: Assistant = {
|
||||
id: 'test',
|
||||
name: 'Test',
|
||||
settings: {}
|
||||
settings: {
|
||||
reasoning_effort: 'none'
|
||||
}
|
||||
} as Assistant
|
||||
|
||||
const result = getGeminiReasoningParams(assistant, model)
|
||||
@ -725,7 +731,7 @@ describe('reasoning utils', () => {
|
||||
const result = getGeminiReasoningParams(assistant, model)
|
||||
expect(result).toEqual({
|
||||
thinkingConfig: {
|
||||
thinkingBudget: 16448,
|
||||
thinkingBudget: expect.any(Number),
|
||||
includeThoughts: true
|
||||
}
|
||||
})
|
||||
@ -754,7 +760,8 @@ describe('reasoning utils', () => {
|
||||
const result = getGeminiReasoningParams(assistant, model)
|
||||
expect(result).toEqual({
|
||||
thinkingConfig: {
|
||||
includeThoughts: true
|
||||
includeThoughts: true,
|
||||
thinkingBudget: -1
|
||||
}
|
||||
})
|
||||
})
|
||||
@ -888,7 +895,7 @@ describe('reasoning utils', () => {
|
||||
expect(result).toEqual({
|
||||
reasoningConfig: {
|
||||
type: 'enabled',
|
||||
budgetTokens: 2048
|
||||
budgetTokens: 4096
|
||||
}
|
||||
})
|
||||
})
|
||||
@ -989,4 +996,89 @@ describe('reasoning utils', () => {
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('getAnthropicThinkingBudget', () => {
|
||||
it('should return undefined when reasoningEffort is undefined', async () => {
|
||||
const result = getAnthropicThinkingBudget(4096, undefined, 'claude-3-7-sonnet')
|
||||
expect(result).toBeUndefined()
|
||||
})
|
||||
|
||||
it('should return undefined when reasoningEffort is none', async () => {
|
||||
const result = getAnthropicThinkingBudget(4096, 'none', 'claude-3-7-sonnet')
|
||||
expect(result).toBeUndefined()
|
||||
})
|
||||
|
||||
it('should return undefined when tokenLimit is not found', async () => {
|
||||
const { findTokenLimit } = await import('@renderer/config/models')
|
||||
vi.mocked(findTokenLimit).mockReturnValue(undefined)
|
||||
|
||||
const result = getAnthropicThinkingBudget(4096, 'medium', 'unknown-model')
|
||||
expect(result).toBeUndefined()
|
||||
})
|
||||
|
||||
it('should calculate budget correctly when maxTokens is provided', async () => {
|
||||
const { findTokenLimit } = await import('@renderer/config/models')
|
||||
vi.mocked(findTokenLimit).mockReturnValue({ min: 1024, max: 32768 })
|
||||
|
||||
const result = getAnthropicThinkingBudget(4096, 'medium', 'claude-3-7-sonnet')
|
||||
// EFFORT_RATIO['medium'] = 0.5
|
||||
// budget = Math.floor((32768 - 1024) * 0.5 + 1024)
|
||||
// = Math.floor(31744 * 0.5 + 1024) = Math.floor(15872 + 1024) = 16896
|
||||
// budgetTokens = Math.min(16896, 4096) = 4096
|
||||
// result = Math.max(1024, 4096) = 4096
|
||||
expect(result).toBe(4096)
|
||||
})
|
||||
|
||||
it('should use tokenLimit.max when maxTokens is undefined', async () => {
|
||||
const { findTokenLimit } = await import('@renderer/config/models')
|
||||
vi.mocked(findTokenLimit).mockReturnValue({ min: 1024, max: 32768 })
|
||||
|
||||
const result = getAnthropicThinkingBudget(undefined, 'medium', 'claude-3-7-sonnet')
|
||||
// When maxTokens is undefined, budget is not constrained by maxTokens
|
||||
// EFFORT_RATIO['medium'] = 0.5
|
||||
// budget = Math.floor((32768 - 1024) * 0.5 + 1024)
|
||||
// = Math.floor(31744 * 0.5 + 1024) = Math.floor(15872 + 1024) = 16896
|
||||
// result = Math.max(1024, 16896) = 16896
|
||||
expect(result).toBe(16896)
|
||||
})
|
||||
|
||||
it('should enforce minimum budget of 1024', async () => {
|
||||
const { findTokenLimit } = await import('@renderer/config/models')
|
||||
vi.mocked(findTokenLimit).mockReturnValue({ min: 100, max: 1000 })
|
||||
|
||||
const result = getAnthropicThinkingBudget(500, 'low', 'claude-3-7-sonnet')
|
||||
// EFFORT_RATIO['low'] = 0.05
|
||||
// budget = Math.floor((1000 - 100) * 0.05 + 100)
|
||||
// = Math.floor(900 * 0.05 + 100) = Math.floor(45 + 100) = 145
|
||||
// budgetTokens = Math.min(145, 500) = 145
|
||||
// result = Math.max(1024, 145) = 1024
|
||||
expect(result).toBe(1024)
|
||||
})
|
||||
|
||||
it('should respect effort ratio for high reasoning effort', async () => {
|
||||
const { findTokenLimit } = await import('@renderer/config/models')
|
||||
vi.mocked(findTokenLimit).mockReturnValue({ min: 1024, max: 32768 })
|
||||
|
||||
const result = getAnthropicThinkingBudget(8192, 'high', 'claude-3-7-sonnet')
|
||||
// EFFORT_RATIO['high'] = 0.8
|
||||
// budget = Math.floor((32768 - 1024) * 0.8 + 1024)
|
||||
// = Math.floor(31744 * 0.8 + 1024) = Math.floor(25395.2 + 1024) = 26419
|
||||
// budgetTokens = Math.min(26419, 8192) = 8192
|
||||
// result = Math.max(1024, 8192) = 8192
|
||||
expect(result).toBe(8192)
|
||||
})
|
||||
|
||||
it('should use full token limit when maxTokens is undefined and reasoning effort is high', async () => {
|
||||
const { findTokenLimit } = await import('@renderer/config/models')
|
||||
vi.mocked(findTokenLimit).mockReturnValue({ min: 1024, max: 32768 })
|
||||
|
||||
const result = getAnthropicThinkingBudget(undefined, 'high', 'claude-3-7-sonnet')
|
||||
// When maxTokens is undefined, budget is not constrained by maxTokens
|
||||
// EFFORT_RATIO['high'] = 0.8
|
||||
// budget = Math.floor((32768 - 1024) * 0.8 + 1024)
|
||||
// = Math.floor(31744 * 0.8 + 1024) = Math.floor(25395.2 + 1024) = 26419
|
||||
// result = Math.max(1024, 26419) = 26419
|
||||
expect(result).toBe(26419)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@ -259,7 +259,7 @@ describe('websearch utils', () => {
|
||||
|
||||
expect(result).toEqual({
|
||||
xai: {
|
||||
maxSearchResults: 50,
|
||||
maxSearchResults: 30,
|
||||
returnCitations: true,
|
||||
sources: [{ type: 'web', excludedWebsites: [] }, { type: 'news' }, { type: 'x' }],
|
||||
mode: 'on'
|
||||
|
||||
@ -11,6 +11,7 @@ import {
|
||||
isGeminiModel,
|
||||
isGrokModel,
|
||||
isOpenAIModel,
|
||||
isOpenAIOpenWeightModel,
|
||||
isQwenMTModel,
|
||||
isSupportFlexServiceTierModel,
|
||||
isSupportVerbosityModel
|
||||
@ -244,7 +245,7 @@ export function buildProviderOptions(
|
||||
providerSpecificOptions = buildOpenAIProviderOptions(assistant, model, capabilities, serviceTier)
|
||||
break
|
||||
case SystemProviderIds.ollama:
|
||||
providerSpecificOptions = buildOllamaProviderOptions(assistant, capabilities)
|
||||
providerSpecificOptions = buildOllamaProviderOptions(assistant, model, capabilities)
|
||||
break
|
||||
case SystemProviderIds.gateway:
|
||||
providerSpecificOptions = buildAIGatewayOptions(assistant, model, capabilities, serviceTier, textVerbosity)
|
||||
@ -564,6 +565,7 @@ function buildBedrockProviderOptions(
|
||||
|
||||
function buildOllamaProviderOptions(
|
||||
assistant: Assistant,
|
||||
model: Model,
|
||||
capabilities: {
|
||||
enableReasoning: boolean
|
||||
enableWebSearch: boolean
|
||||
@ -574,7 +576,12 @@ function buildOllamaProviderOptions(
|
||||
const providerOptions: OllamaCompletionProviderOptions = {}
|
||||
const reasoningEffort = assistant.settings?.reasoning_effort
|
||||
if (enableReasoning) {
|
||||
providerOptions.think = !['none', undefined].includes(reasoningEffort)
|
||||
if (isOpenAIOpenWeightModel(model)) {
|
||||
// @ts-ignore upstream type error
|
||||
providerOptions.think = reasoningEffort as any
|
||||
} else {
|
||||
providerOptions.think = !['none', undefined].includes(reasoningEffort)
|
||||
}
|
||||
}
|
||||
return {
|
||||
ollama: providerOptions
|
||||
|
||||
@ -8,16 +8,17 @@ import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant'
|
||||
import {
|
||||
findTokenLimit,
|
||||
GEMINI_FLASH_MODEL_REGEX,
|
||||
getThinkModelType,
|
||||
getModelSupportedReasoningEffortOptions,
|
||||
isDeepSeekHybridInferenceModel,
|
||||
isDoubaoSeed18Model,
|
||||
isDoubaoSeedAfter251015,
|
||||
isDoubaoThinkingAutoModel,
|
||||
isGemini3ThinkingTokenModel,
|
||||
isGPT5SeriesModel,
|
||||
isGPT51SeriesModel,
|
||||
isGrok4FastReasoningModel,
|
||||
isOpenAIDeepResearchModel,
|
||||
isOpenAIModel,
|
||||
isOpenAIReasoningModel,
|
||||
isQwenAlwaysThinkModel,
|
||||
isQwenReasoningModel,
|
||||
isReasoningModel,
|
||||
@ -28,14 +29,14 @@ import {
|
||||
isSupportedThinkingTokenDoubaoModel,
|
||||
isSupportedThinkingTokenGeminiModel,
|
||||
isSupportedThinkingTokenHunyuanModel,
|
||||
isSupportedThinkingTokenMiMoModel,
|
||||
isSupportedThinkingTokenModel,
|
||||
isSupportedThinkingTokenQwenModel,
|
||||
isSupportedThinkingTokenZhipuModel,
|
||||
MODEL_SUPPORTED_REASONING_EFFORT
|
||||
isSupportedThinkingTokenZhipuModel
|
||||
} from '@renderer/config/models'
|
||||
import { getStoreSetting } from '@renderer/hooks/useSettings'
|
||||
import { getAssistantSettings, getProviderByModel } from '@renderer/services/AssistantService'
|
||||
import type { Assistant, Model } from '@renderer/types'
|
||||
import type { Assistant, Model, ReasoningEffortOption } from '@renderer/types'
|
||||
import { EFFORT_RATIO, isSystemProvider, SystemProviderIds } from '@renderer/types'
|
||||
import type { OpenAIReasoningSummary } from '@renderer/types/aiCoreTypes'
|
||||
import type { ReasoningEffortOptionalParams } from '@renderer/types/sdk'
|
||||
@ -65,7 +66,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
// reasoningEffort is not set, no extra reasoning setting
|
||||
// Generally, for every model which supports reasoning control, the reasoning effort won't be undefined.
|
||||
// It's for some reasoning models that don't support reasoning control, such as deepseek reasoner.
|
||||
if (!reasoningEffort) {
|
||||
if (!reasoningEffort || reasoningEffort === 'default') {
|
||||
return {}
|
||||
}
|
||||
|
||||
@ -134,8 +135,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
// https://creator.poe.com/docs/external-applications/openai-compatible-api#additional-considerations
|
||||
// Poe provider - supports custom bot parameters via extra_body
|
||||
if (provider.id === SystemProviderIds.poe) {
|
||||
// GPT-5 series models use reasoning_effort parameter in extra_body
|
||||
if (isGPT5SeriesModel(model) || isGPT51SeriesModel(model)) {
|
||||
if (isOpenAIReasoningModel(model)) {
|
||||
return {
|
||||
extra_body: {
|
||||
reasoning_effort: reasoningEffort === 'auto' ? 'medium' : reasoningEffort
|
||||
@ -331,16 +331,15 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
// Grok models/Perplexity models/OpenAI models, use reasoning_effort
|
||||
if (isSupportedReasoningEffortModel(model)) {
|
||||
// 检查模型是否支持所选选项
|
||||
const modelType = getThinkModelType(model)
|
||||
const supportedOptions = MODEL_SUPPORTED_REASONING_EFFORT[modelType]
|
||||
if (supportedOptions.includes(reasoningEffort)) {
|
||||
const supportedOptions = getModelSupportedReasoningEffortOptions(model)?.filter((option) => option !== 'default')
|
||||
if (supportedOptions?.includes(reasoningEffort)) {
|
||||
return {
|
||||
reasoningEffort
|
||||
}
|
||||
} else {
|
||||
// 如果不支持,fallback到第一个支持的值
|
||||
return {
|
||||
reasoningEffort: supportedOptions[0]
|
||||
reasoningEffort: supportedOptions?.[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -392,7 +391,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
|
||||
// Use thinking, doubao, zhipu, etc.
|
||||
if (isSupportedThinkingTokenDoubaoModel(model)) {
|
||||
if (isDoubaoSeedAfter251015(model)) {
|
||||
if (isDoubaoSeedAfter251015(model) || isDoubaoSeed18Model(model)) {
|
||||
return { reasoningEffort }
|
||||
}
|
||||
if (reasoningEffort === 'high') {
|
||||
@ -411,6 +410,12 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
return { thinking: { type: 'enabled' } }
|
||||
}
|
||||
|
||||
if (isSupportedThinkingTokenMiMoModel(model)) {
|
||||
return {
|
||||
thinking: { type: 'enabled' }
|
||||
}
|
||||
}
|
||||
|
||||
// Default case: no special thinking settings
|
||||
return {}
|
||||
}
|
||||
@ -430,7 +435,7 @@ export function getOpenAIReasoningParams(
|
||||
|
||||
let reasoningEffort = assistant?.settings?.reasoning_effort
|
||||
|
||||
if (!reasoningEffort) {
|
||||
if (!reasoningEffort || reasoningEffort === 'default') {
|
||||
return {}
|
||||
}
|
||||
|
||||
@ -482,16 +487,14 @@ export function getAnthropicThinkingBudget(
|
||||
return undefined
|
||||
}
|
||||
|
||||
const budgetTokens = Math.max(
|
||||
1024,
|
||||
Math.floor(
|
||||
Math.min(
|
||||
(tokenLimit.max - tokenLimit.min) * effortRatio + tokenLimit.min,
|
||||
(maxTokens || DEFAULT_MAX_TOKENS) * effortRatio
|
||||
)
|
||||
)
|
||||
)
|
||||
return budgetTokens
|
||||
const budget = Math.floor((tokenLimit.max - tokenLimit.min) * effortRatio + tokenLimit.min)
|
||||
|
||||
let budgetTokens = budget
|
||||
if (maxTokens !== undefined) {
|
||||
budgetTokens = Math.min(budget, maxTokens)
|
||||
}
|
||||
|
||||
return Math.max(1024, budgetTokens)
|
||||
}
|
||||
|
||||
/**
|
||||
@ -508,7 +511,11 @@ export function getAnthropicReasoningParams(
|
||||
|
||||
const reasoningEffort = assistant?.settings?.reasoning_effort
|
||||
|
||||
if (reasoningEffort === undefined || reasoningEffort === 'none') {
|
||||
if (!reasoningEffort || reasoningEffort === 'default') {
|
||||
return {}
|
||||
}
|
||||
|
||||
if (reasoningEffort === 'none') {
|
||||
return {
|
||||
thinking: {
|
||||
type: 'disabled'
|
||||
@ -532,20 +539,25 @@ export function getAnthropicReasoningParams(
|
||||
return {}
|
||||
}
|
||||
|
||||
// type GoogleThinkingLevel = NonNullable<GoogleGenerativeAIProviderOptions['thinkingConfig']>['thinkingLevel']
|
||||
type GoogleThinkingLevel = NonNullable<GoogleGenerativeAIProviderOptions['thinkingConfig']>['thinkingLevel']
|
||||
|
||||
// function mapToGeminiThinkingLevel(reasoningEffort: ReasoningEffortOption): GoogelThinkingLevel {
|
||||
// switch (reasoningEffort) {
|
||||
// case 'low':
|
||||
// return 'low'
|
||||
// case 'medium':
|
||||
// return 'medium'
|
||||
// case 'high':
|
||||
// return 'high'
|
||||
// default:
|
||||
// return 'medium'
|
||||
// }
|
||||
// }
|
||||
function mapToGeminiThinkingLevel(reasoningEffort: ReasoningEffortOption): GoogleThinkingLevel {
|
||||
switch (reasoningEffort) {
|
||||
case 'default':
|
||||
return undefined
|
||||
case 'minimal':
|
||||
return 'minimal'
|
||||
case 'low':
|
||||
return 'low'
|
||||
case 'medium':
|
||||
return 'medium'
|
||||
case 'high':
|
||||
return 'high'
|
||||
default:
|
||||
logger.warn('Unknown thinking level for Gemini. Fallback to medium instead.', { reasoningEffort })
|
||||
return 'medium'
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取 Gemini 推理参数
|
||||
@ -563,6 +575,10 @@ export function getGeminiReasoningParams(
|
||||
|
||||
const reasoningEffort = assistant?.settings?.reasoning_effort
|
||||
|
||||
if (!reasoningEffort || reasoningEffort === 'default') {
|
||||
return {}
|
||||
}
|
||||
|
||||
// Gemini 推理参数
|
||||
if (isSupportedThinkingTokenGeminiModel(model)) {
|
||||
if (reasoningEffort === undefined || reasoningEffort === 'none') {
|
||||
@ -574,21 +590,22 @@ export function getGeminiReasoningParams(
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: 很多中转还不支持
|
||||
// https://ai.google.dev/gemini-api/docs/gemini-3?thinking=high#new_api_features_in_gemini_3
|
||||
// if (isGemini3ThinkingTokenModel(model)) {
|
||||
// return {
|
||||
// thinkingConfig: {
|
||||
// thinkingLevel: mapToGeminiThinkingLevel(reasoningEffort)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
if (isGemini3ThinkingTokenModel(model)) {
|
||||
return {
|
||||
thinkingConfig: {
|
||||
includeThoughts: true,
|
||||
thinkingLevel: mapToGeminiThinkingLevel(reasoningEffort)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const effortRatio = EFFORT_RATIO[reasoningEffort]
|
||||
|
||||
if (effortRatio > 1) {
|
||||
return {
|
||||
thinkingConfig: {
|
||||
thinkingBudget: -1,
|
||||
includeThoughts: true
|
||||
}
|
||||
}
|
||||
@ -622,10 +639,6 @@ export function getXAIReasoningParams(assistant: Assistant, model: Model): Pick<
|
||||
|
||||
const { reasoning_effort: reasoningEffort } = getAssistantSettings(assistant)
|
||||
|
||||
if (!reasoningEffort || reasoningEffort === 'none') {
|
||||
return {}
|
||||
}
|
||||
|
||||
switch (reasoningEffort) {
|
||||
case 'auto':
|
||||
case 'minimal':
|
||||
@ -634,6 +647,12 @@ export function getXAIReasoningParams(assistant: Assistant, model: Model): Pick<
|
||||
case 'low':
|
||||
case 'high':
|
||||
return { reasoningEffort }
|
||||
case 'xhigh':
|
||||
return { reasoningEffort: 'high' }
|
||||
case 'default':
|
||||
case 'none':
|
||||
default:
|
||||
return {}
|
||||
}
|
||||
}
|
||||
|
||||
@ -650,7 +669,7 @@ export function getBedrockReasoningParams(
|
||||
|
||||
const reasoningEffort = assistant?.settings?.reasoning_effort
|
||||
|
||||
if (reasoningEffort === undefined) {
|
||||
if (reasoningEffort === undefined || reasoningEffort === 'default') {
|
||||
return {}
|
||||
}
|
||||
|
||||
|
||||
@ -9,6 +9,8 @@ import type { CherryWebSearchConfig } from '@renderer/store/websearch'
|
||||
import type { Model } from '@renderer/types'
|
||||
import { mapRegexToPatterns } from '@renderer/utils/blacklistMatchPattern'
|
||||
|
||||
const X_AI_MAX_SEARCH_RESULT = 30
|
||||
|
||||
export function getWebSearchParams(model: Model): Record<string, any> {
|
||||
if (model.provider === 'hunyuan') {
|
||||
return { enable_enhancement: true, citation: true, search_info: true }
|
||||
@ -82,7 +84,7 @@ export function buildProviderBuiltinWebSearchConfig(
|
||||
const excludeDomains = mapRegexToPatterns(webSearchConfig.excludeDomains)
|
||||
return {
|
||||
xai: {
|
||||
maxSearchResults: webSearchConfig.maxResults,
|
||||
maxSearchResults: Math.min(webSearchConfig.maxResults, X_AI_MAX_SEARCH_RESULT),
|
||||
returnCitations: true,
|
||||
sources: [
|
||||
{
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 3.2 KiB After Width: | Height: | Size: 19 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 3.2 KiB After Width: | Height: | Size: 19 KiB |
17
src/renderer/src/assets/images/models/mimo.svg
Normal file
17
src/renderer/src/assets/images/models/mimo.svg
Normal file
@ -0,0 +1,17 @@
|
||||
<svg width="100" height="100" viewBox="0 0 100 100" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g transform="translate(10, 42) scale(1.35)">
|
||||
<!-- m -->
|
||||
<path d="M1.2683 15.9987C0.9317 15.998 0.6091 15.8638 0.3713 15.6256C0.1335 15.3873 0 15.0644 0 14.7278V7.165C0.0148 6.83757 0.1554 6.52848 0.3924 6.30203C0.6293 6.07559 0.9445 5.94922 1.2722 5.94922C1.6 5.94922 1.9152 6.07559 2.1521 6.30203C2.3891 6.52848 2.5296 6.83757 2.5445 7.165V14.7278C2.5442 14.895 2.5109 15.0606 2.4466 15.215C2.3822 15.3693 2.2881 15.5095 2.1696 15.6276C2.0511 15.7456 1.9105 15.8391 1.7559 15.9028C1.6012 15.9665 1.4356 15.9991 1.2683 15.9987Z" fill="currentColor"/>
|
||||
<path d="M14.8841 15.9993C14.5468 15.9993 14.2232 15.8655 13.9845 15.6272C13.7457 15.389 13.6112 15.0657 13.6105 14.7284V4.67881L8.9888 9.45281C8.7538 9.69657 8.4315 9.83697 8.0929 9.84312C7.7544 9.84928 7.4272 9.72069 7.1835 9.48563C6.9397 9.25058 6.7993 8.92832 6.7931 8.58976C6.7901 8.42211 6.8201 8.25551 6.8814 8.09947C6.9428 7.94342 7.0342 7.80098 7.1506 7.68028L13.9703 0.661082C14.1463 0.478921 14.3728 0.35354 14.6207 0.301033C14.8685 0.248526 15.1264 0.271291 15.3612 0.366403C15.5961 0.461516 15.7971 0.624637 15.9385 0.834827C16.08 1.04502 16.1554 1.29268 16.1551 1.54603V14.7284C16.1551 15.0655 16.0212 15.3887 15.7828 15.6271C15.5444 15.8654 15.2212 15.9993 14.8841 15.9993Z" fill="currentColor"/>
|
||||
<path d="M8.0748 9.82621C7.9058 9.82749 7.7383 9.79518 7.5818 9.73117C7.4254 9.66716 7.2833 9.57272 7.1636 9.45332L0.3571 2.4315C0.1224 2.18948 -0.0065 1.86414 -0.0014 1.52705C0.0038 1.18996 0.1427 0.868726 0.3847 0.634023C0.6267 0.399319 0.9521 0.270369 1.2892 0.27554C1.6262 0.280711 1.9475 0.419579 2.1822 0.661595L8.9887 7.66767C9.1623 7.84735 9.2792 8.07413 9.3249 8.31977C9.3706 8.56541 9.343 8.81906 9.2456 9.04914C9.1482 9.27922 8.9852 9.47557 8.7771 9.61374C8.5689 9.75191 8.3247 9.8258 8.0748 9.82621Z" fill="currentColor"/>
|
||||
<!-- i -->
|
||||
<path d="M20.3539 15.9997C20.0169 15.9997 19.6936 15.8658 19.4552 15.6274C19.2169 15.3891 19.083 15.0658 19.083 14.7287V1.54636C19.083 1.20928 19.2169 0.886001 19.4552 0.647648C19.6936 0.409296 20.0169 0.275391 20.3539 0.275391C20.691 0.275391 21.0143 0.409296 21.2526 0.647648C21.491 0.886001 21.6249 1.20928 21.6249 1.54636V14.7287C21.6249 14.8956 21.592 15.0609 21.5282 15.2151C21.4643 15.3693 21.3707 15.5094 21.2526 15.6274C21.1346 15.7454 20.9945 15.839 20.8403 15.9029C20.6861 15.9668 20.5208 15.9997 20.3539 15.9997Z" fill="currentColor"/>
|
||||
<!-- m -->
|
||||
<path d="M25.8263 15.9992C25.4893 15.9992 25.166 15.8653 24.9276 15.627C24.6893 15.3886 24.5554 15.0654 24.5554 14.7283V7.1655C24.5554 6.82842 24.6893 6.50514 24.9276 6.26679C25.166 6.02844 25.4893 5.89453 25.8263 5.89453C26.1634 5.89453 26.4867 6.02844 26.7251 6.26679C26.9634 6.50514 27.0973 6.82842 27.0973 7.1655V14.7283C27.0973 15.0654 26.9634 15.3886 26.7251 15.627C26.4867 15.8653 26.1634 15.9992 25.8263 15.9992Z" fill="currentColor"/>
|
||||
<path d="M39.4394 16.0004C39.1023 16.0004 38.779 15.8664 38.5406 15.6281C38.3023 15.3897 38.1684 15.0665 38.1684 14.7294V4.67982L33.5467 9.45382C33.3117 9.69584 32.9901 9.83457 32.6523 9.83949C32.3156 9.84442 31.9894 9.71513 31.7474 9.48008C31.5054 9.24503 31.3674 8.92346 31.3623 8.58613C31.3573 8.24879 31.4863 7.92331 31.7214 7.6813L38.5284 0.662093C38.7044 0.483575 38.9304 0.361405 39.1767 0.311007C39.4233 0.260609 39.6787 0.284243 39.9114 0.378925C40.1437 0.473608 40.3427 0.635093 40.4837 0.842994C40.6247 1.05089 40.7007 1.29589 40.7027 1.54704V14.7294C40.7017 15.0649 40.5687 15.3866 40.3327 15.6246C40.0957 15.8625 39.7747 15.9976 39.4394 16.0004Z" fill="currentColor"/>
|
||||
<path d="M32.6324 9.82618C32.4634 9.82746 32.2964 9.79516 32.1394 9.73115C31.9834 9.66713 31.8414 9.57269 31.7214 9.45329L24.9151 2.43147C24.7921 2.31326 24.6942 2.1715 24.6271 2.01463C24.5601 1.85777 24.5253 1.68901 24.5249 1.51842C24.5244 1.34783 24.5583 1.1789 24.6246 1.02169C24.6908 0.864476 24.788 0.722207 24.9104 0.603357C25.0327 0.484507 25.1778 0.391509 25.3369 0.329905C25.4959 0.268302 25.6658 0.239353 25.8363 0.244785C26.0068 0.250217 26.1745 0.289918 26.3293 0.361522C26.4841 0.433126 26.623 0.535168 26.7375 0.661566L33.5467 7.66764C33.7204 7.84732 33.8374 8.0741 33.8824 8.31974C33.9284 8.56538 33.9014 8.81903 33.8034 9.04911C33.7064 9.27919 33.5434 9.47554 33.3354 9.61371C33.1267 9.75189 32.8824 9.82577 32.6324 9.82618Z" fill="currentColor"/>
|
||||
<!-- o -->
|
||||
<path d="M50.9434 15.9814C49.5534 15.9865 48.1864 15.6287 46.9774 14.9433C45.7674 14.2579 44.7584 13.2687 44.0484 12.0735C43.3384 10.8783 42.9534 9.5185 42.9304 8.12863C42.9074 6.73875 43.2474 5.36692 43.9164 4.1488C44.0844 3.86356 44.3564 3.65487 44.6754 3.56707C44.9944 3.47927 45.3344 3.51928 45.6244 3.67859C45.9144 3.8379 46.1314 4.10397 46.2274 4.42026C46.3244 4.73656 46.2944 5.07816 46.1434 5.3725C45.5764 6.40664 45.3594 7.59693 45.5264 8.76468C45.6924 9.93243 46.2334 11.0147 47.0674 11.8489C47.9014 12.6831 48.9834 13.2244 50.1514 13.3914C51.3184 13.5584 52.5094 13.3421 53.5434 12.7751C53.8384 12.6125 54.1864 12.5738 54.5104 12.6676C54.8344 12.7614 55.1074 12.98 55.2704 13.2753C55.4324 13.5706 55.4714 13.9184 55.3774 14.2422C55.2834 14.566 55.0654 14.8393 54.7694 15.0019C53.5974 15.6455 52.2814 15.9824 50.9434 15.9814Z" fill="currentColor"/>
|
||||
<path d="M56.8104 12.5052C56.5944 12.5044 56.3834 12.4484 56.1954 12.3424C55.9014 12.1795 55.6824 11.9066 55.5894 11.5833C55.4954 11.26 55.5324 10.9126 55.6944 10.6171C56.2614 9.58297 56.4784 8.39268 56.3114 7.22493C56.1454 6.05718 55.6044 4.97496 54.7704 4.14073C53.9364 3.30649 52.8544 2.76525 51.6864 2.59825C50.5194 2.43125 49.3284 2.64749 48.2944 3.21452C48.1474 3.30059 47.9854 3.3564 47.8164 3.37863C47.6484 3.40087 47.4774 3.38908 47.3134 3.34397C47.1494 3.29886 46.9964 3.22134 46.8624 3.116C46.7294 3.01066 46.6184 2.87964 46.5364 2.73069C46.4544 2.58174 46.4034 2.41788 46.3864 2.24882C46.3684 2.07975 46.3854 1.90891 46.4354 1.7464C46.4854 1.58389 46.5674 1.43301 46.6764 1.3027C46.7854 1.17238 46.9194 1.06527 47.0704 0.987704C48.5874 0.155491 50.3324 -0.162266 52.0454 0.0821474C53.7574 0.326561 55.3454 1.11995 56.5684 2.34319C57.7914 3.56642 58.5844 5.15347 58.8294 6.86604C59.0734 8.5786 58.7554 10.3242 57.9234 11.8408C57.8144 12.0411 57.6534 12.2084 57.4574 12.3253C57.2624 12.4422 57.0384 12.5043 56.8104 12.5052Z" fill="currentColor"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 6.2 KiB |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user