mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2025-12-25 11:20:07 +08:00
Merge branch 'main' into feat/bonjour
This commit is contained in:
commit
c2c416ea93
2
.github/workflows/auto-i18n.yml
vendored
2
.github/workflows/auto-i18n.yml
vendored
@ -54,7 +54,7 @@ jobs:
|
||||
yarn install
|
||||
|
||||
- name: 🏃♀️ Translate
|
||||
run: yarn sync:i18n && yarn auto:i18n
|
||||
run: yarn i18n:sync && yarn i18n:translate
|
||||
|
||||
- name: 🔍 Format
|
||||
run: yarn format
|
||||
|
||||
2
.github/workflows/pr-ci.yml
vendored
2
.github/workflows/pr-ci.yml
vendored
@ -58,7 +58,7 @@ jobs:
|
||||
run: yarn typecheck
|
||||
|
||||
- name: i18n Check
|
||||
run: yarn check:i18n
|
||||
run: yarn i18n:check
|
||||
|
||||
- name: Test
|
||||
run: yarn test
|
||||
|
||||
305
.github/workflows/sync-to-gitcode.yml
vendored
Normal file
305
.github/workflows/sync-to-gitcode.yml
vendored
Normal file
@ -0,0 +1,305 @@
|
||||
name: Sync Release to GitCode
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: 'Release tag (e.g. v1.0.0)'
|
||||
required: true
|
||||
clean:
|
||||
description: 'Clean node_modules before build'
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build-and-sync-to-gitcode:
|
||||
runs-on: [self-hosted, windows-signing]
|
||||
steps:
|
||||
- name: Get tag name
|
||||
id: get-tag
|
||||
shell: bash
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
|
||||
echo "tag=${{ github.event.inputs.tag }}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "tag=${{ github.event.release.tag_name }}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Check out Git repository
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ steps.get-tag.outputs.tag }}
|
||||
|
||||
- name: Set package.json version
|
||||
shell: bash
|
||||
run: |
|
||||
TAG="${{ steps.get-tag.outputs.tag }}"
|
||||
VERSION="${TAG#v}"
|
||||
npm version "$VERSION" --no-git-tag-version --allow-same-version
|
||||
|
||||
- name: Install Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: 22
|
||||
|
||||
- name: Install corepack
|
||||
shell: bash
|
||||
run: corepack enable && corepack prepare yarn@4.9.1 --activate
|
||||
|
||||
- name: Clean node_modules
|
||||
if: ${{ github.event.inputs.clean == 'true' }}
|
||||
shell: bash
|
||||
run: rm -rf node_modules
|
||||
|
||||
- name: Install Dependencies
|
||||
shell: bash
|
||||
run: yarn install
|
||||
|
||||
- name: Build Windows with code signing
|
||||
shell: bash
|
||||
run: yarn build:win
|
||||
env:
|
||||
WIN_SIGN: true
|
||||
CHERRY_CERT_PATH: ${{ secrets.CHERRY_CERT_PATH }}
|
||||
CHERRY_CERT_KEY: ${{ secrets.CHERRY_CERT_KEY }}
|
||||
CHERRY_CERT_CSP: ${{ secrets.CHERRY_CERT_CSP }}
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
NODE_OPTIONS: --max-old-space-size=8192
|
||||
MAIN_VITE_CHERRYAI_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYAI_CLIENT_SECRET }}
|
||||
MAIN_VITE_MINERU_API_KEY: ${{ secrets.MAIN_VITE_MINERU_API_KEY }}
|
||||
RENDERER_VITE_AIHUBMIX_SECRET: ${{ secrets.RENDERER_VITE_AIHUBMIX_SECRET }}
|
||||
RENDERER_VITE_PPIO_APP_SECRET: ${{ secrets.RENDERER_VITE_PPIO_APP_SECRET }}
|
||||
|
||||
- name: List built Windows artifacts
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Built Windows artifacts:"
|
||||
ls -la dist/*.exe dist/*.blockmap dist/latest*.yml
|
||||
|
||||
- name: Download GitHub release assets
|
||||
shell: bash
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
TAG_NAME: ${{ steps.get-tag.outputs.tag }}
|
||||
run: |
|
||||
echo "Downloading release assets for $TAG_NAME..."
|
||||
mkdir -p release-assets
|
||||
cd release-assets
|
||||
|
||||
# Download all assets from the release
|
||||
gh release download "$TAG_NAME" \
|
||||
--repo "${{ github.repository }}" \
|
||||
--pattern "*" \
|
||||
--skip-existing
|
||||
|
||||
echo "Downloaded GitHub release assets:"
|
||||
ls -la
|
||||
|
||||
- name: Replace Windows files with signed versions
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Replacing Windows files with signed versions..."
|
||||
|
||||
# Verify signed files exist first
|
||||
if ! ls dist/*.exe 1>/dev/null 2>&1; then
|
||||
echo "ERROR: No signed .exe files found in dist/"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Remove unsigned Windows files from downloaded assets
|
||||
# *.exe, *.exe.blockmap, latest.yml (Windows only)
|
||||
rm -f release-assets/*.exe release-assets/*.exe.blockmap release-assets/latest.yml 2>/dev/null || true
|
||||
|
||||
# Copy signed Windows files with error checking
|
||||
cp dist/*.exe release-assets/ || { echo "ERROR: Failed to copy .exe files"; exit 1; }
|
||||
cp dist/*.exe.blockmap release-assets/ || { echo "ERROR: Failed to copy .blockmap files"; exit 1; }
|
||||
cp dist/latest.yml release-assets/ || { echo "ERROR: Failed to copy latest.yml"; exit 1; }
|
||||
|
||||
echo "Final release assets:"
|
||||
ls -la release-assets/
|
||||
|
||||
- name: Get release info
|
||||
id: release-info
|
||||
shell: bash
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
TAG_NAME: ${{ steps.get-tag.outputs.tag }}
|
||||
LANG: C.UTF-8
|
||||
LC_ALL: C.UTF-8
|
||||
run: |
|
||||
# Always use gh cli to avoid special character issues
|
||||
RELEASE_NAME=$(gh release view "$TAG_NAME" --repo "${{ github.repository }}" --json name -q '.name')
|
||||
# Use delimiter to safely handle special characters in release name
|
||||
{
|
||||
echo 'name<<EOF'
|
||||
echo "$RELEASE_NAME"
|
||||
echo 'EOF'
|
||||
} >> $GITHUB_OUTPUT
|
||||
# Extract releaseNotes from electron-builder.yml (from releaseNotes: | to end of file, remove 4-space indent)
|
||||
sed -n '/releaseNotes: |/,$ { /releaseNotes: |/d; s/^ //; p }' electron-builder.yml > release_body.txt
|
||||
|
||||
- name: Create GitCode release and upload files
|
||||
shell: bash
|
||||
env:
|
||||
GITCODE_TOKEN: ${{ secrets.GITCODE_TOKEN }}
|
||||
GITCODE_OWNER: ${{ vars.GITCODE_OWNER }}
|
||||
GITCODE_REPO: ${{ vars.GITCODE_REPO }}
|
||||
GITCODE_API_URL: ${{ vars.GITCODE_API_URL }}
|
||||
TAG_NAME: ${{ steps.get-tag.outputs.tag }}
|
||||
RELEASE_NAME: ${{ steps.release-info.outputs.name }}
|
||||
LANG: C.UTF-8
|
||||
LC_ALL: C.UTF-8
|
||||
run: |
|
||||
# Validate required environment variables
|
||||
if [ -z "$GITCODE_TOKEN" ]; then
|
||||
echo "ERROR: GITCODE_TOKEN is not set"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$GITCODE_OWNER" ]; then
|
||||
echo "ERROR: GITCODE_OWNER is not set"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$GITCODE_REPO" ]; then
|
||||
echo "ERROR: GITCODE_REPO is not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
API_URL="${GITCODE_API_URL:-https://api.gitcode.com/api/v5}"
|
||||
|
||||
echo "Creating GitCode release..."
|
||||
echo "Tag: $TAG_NAME"
|
||||
echo "Repo: $GITCODE_OWNER/$GITCODE_REPO"
|
||||
|
||||
# Step 1: Create release
|
||||
# Use --rawfile to read body directly from file, avoiding shell variable encoding issues
|
||||
jq -n \
|
||||
--arg tag "$TAG_NAME" \
|
||||
--arg name "$RELEASE_NAME" \
|
||||
--rawfile body release_body.txt \
|
||||
'{
|
||||
tag_name: $tag,
|
||||
name: $name,
|
||||
body: $body,
|
||||
target_commitish: "main"
|
||||
}' > /tmp/release_payload.json
|
||||
|
||||
RELEASE_RESPONSE=$(curl -s -w "\n%{http_code}" -X POST \
|
||||
--connect-timeout 30 --max-time 60 \
|
||||
"${API_URL}/repos/${GITCODE_OWNER}/${GITCODE_REPO}/releases" \
|
||||
-H "Content-Type: application/json; charset=utf-8" \
|
||||
-H "Authorization: Bearer ${GITCODE_TOKEN}" \
|
||||
--data-binary "@/tmp/release_payload.json")
|
||||
|
||||
HTTP_CODE=$(echo "$RELEASE_RESPONSE" | tail -n1)
|
||||
RESPONSE_BODY=$(echo "$RELEASE_RESPONSE" | sed '$d')
|
||||
|
||||
if [ "$HTTP_CODE" -ge 200 ] && [ "$HTTP_CODE" -lt 300 ]; then
|
||||
echo "Release created successfully"
|
||||
else
|
||||
echo "Warning: Release creation returned HTTP $HTTP_CODE"
|
||||
echo "$RESPONSE_BODY"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Step 2: Upload files to release
|
||||
echo "Uploading files to GitCode release..."
|
||||
|
||||
# Function to upload a single file with retry
|
||||
upload_file() {
|
||||
local file="$1"
|
||||
local filename=$(basename "$file")
|
||||
local max_retries=3
|
||||
local retry=0
|
||||
local curl_status=0
|
||||
|
||||
echo "Uploading: $filename"
|
||||
|
||||
# URL encode the filename
|
||||
encoded_filename=$(printf '%s' "$filename" | jq -sRr @uri)
|
||||
|
||||
while [ $retry -lt $max_retries ]; do
|
||||
# Get upload URL
|
||||
curl_status=0
|
||||
UPLOAD_INFO=$(curl -s --connect-timeout 30 --max-time 60 \
|
||||
-H "Authorization: Bearer ${GITCODE_TOKEN}" \
|
||||
"${API_URL}/repos/${GITCODE_OWNER}/${GITCODE_REPO}/releases/${TAG_NAME}/upload_url?file_name=${encoded_filename}") || curl_status=$?
|
||||
|
||||
if [ $curl_status -eq 0 ]; then
|
||||
UPLOAD_URL=$(echo "$UPLOAD_INFO" | jq -r '.url // empty')
|
||||
|
||||
if [ -n "$UPLOAD_URL" ]; then
|
||||
# Write headers to temp file to avoid shell escaping issues
|
||||
echo "$UPLOAD_INFO" | jq -r '.headers | to_entries[] | "header = \"" + .key + ": " + .value + "\""' > /tmp/upload_headers.txt
|
||||
|
||||
# Upload file using PUT with headers from file
|
||||
curl_status=0
|
||||
UPLOAD_RESPONSE=$(curl -s -w "\n%{http_code}" -X PUT \
|
||||
-K /tmp/upload_headers.txt \
|
||||
--data-binary "@${file}" \
|
||||
"$UPLOAD_URL") || curl_status=$?
|
||||
|
||||
if [ $curl_status -eq 0 ]; then
|
||||
HTTP_CODE=$(echo "$UPLOAD_RESPONSE" | tail -n1)
|
||||
RESPONSE_BODY=$(echo "$UPLOAD_RESPONSE" | sed '$d')
|
||||
|
||||
if [ "$HTTP_CODE" -ge 200 ] && [ "$HTTP_CODE" -lt 300 ]; then
|
||||
echo " Uploaded: $filename"
|
||||
return 0
|
||||
else
|
||||
echo " Failed (HTTP $HTTP_CODE), retry $((retry + 1))/$max_retries"
|
||||
echo " Response: $RESPONSE_BODY"
|
||||
fi
|
||||
else
|
||||
echo " Upload request failed (curl exit $curl_status), retry $((retry + 1))/$max_retries"
|
||||
fi
|
||||
else
|
||||
echo " Failed to get upload URL, retry $((retry + 1))/$max_retries"
|
||||
echo " Response: $UPLOAD_INFO"
|
||||
fi
|
||||
else
|
||||
echo " Failed to get upload URL (curl exit $curl_status), retry $((retry + 1))/$max_retries"
|
||||
echo " Response: $UPLOAD_INFO"
|
||||
fi
|
||||
|
||||
retry=$((retry + 1))
|
||||
[ $retry -lt $max_retries ] && sleep 3
|
||||
done
|
||||
|
||||
echo " Failed: $filename after $max_retries retries"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Upload non-yml/json files first
|
||||
for file in release-assets/*; do
|
||||
if [ -f "$file" ]; then
|
||||
filename=$(basename "$file")
|
||||
if [[ ! "$filename" =~ \.(yml|yaml|json)$ ]]; then
|
||||
upload_file "$file"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
# Upload yml/json files last
|
||||
for file in release-assets/*; do
|
||||
if [ -f "$file" ]; then
|
||||
filename=$(basename "$file")
|
||||
if [[ "$filename" =~ \.(yml|yaml|json)$ ]]; then
|
||||
upload_file "$file"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
echo "GitCode release sync completed!"
|
||||
|
||||
- name: Cleanup temp files
|
||||
if: always()
|
||||
shell: bash
|
||||
run: |
|
||||
rm -f /tmp/release_payload.json /tmp/upload_headers.txt release_body.txt
|
||||
rm -rf release-assets/
|
||||
@ -1,8 +1,8 @@
|
||||
diff --git a/dist/index.js b/dist/index.js
|
||||
index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a70ea2b5a2 100644
|
||||
index 130094d194ea1e8e7d3027d07d82465741192124..4d13dcee8c962ca9ee8f1c3d748f8ffe6a3cfb47 100644
|
||||
--- a/dist/index.js
|
||||
+++ b/dist/index.js
|
||||
@@ -274,6 +274,7 @@ var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)(
|
||||
@@ -290,6 +290,7 @@ var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)(
|
||||
message: import_v42.z.object({
|
||||
role: import_v42.z.literal("assistant").nullish(),
|
||||
content: import_v42.z.string().nullish(),
|
||||
@ -10,7 +10,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7
|
||||
tool_calls: import_v42.z.array(
|
||||
import_v42.z.object({
|
||||
id: import_v42.z.string().nullish(),
|
||||
@@ -340,6 +341,7 @@ var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)(
|
||||
@@ -356,6 +357,7 @@ var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)(
|
||||
delta: import_v42.z.object({
|
||||
role: import_v42.z.enum(["assistant"]).nullish(),
|
||||
content: import_v42.z.string().nullish(),
|
||||
@ -18,7 +18,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7
|
||||
tool_calls: import_v42.z.array(
|
||||
import_v42.z.object({
|
||||
index: import_v42.z.number(),
|
||||
@@ -795,6 +797,13 @@ var OpenAIChatLanguageModel = class {
|
||||
@@ -814,6 +816,13 @@ var OpenAIChatLanguageModel = class {
|
||||
if (text != null && text.length > 0) {
|
||||
content.push({ type: "text", text });
|
||||
}
|
||||
@ -32,7 +32,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7
|
||||
for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
|
||||
content.push({
|
||||
type: "tool-call",
|
||||
@@ -876,6 +885,7 @@ var OpenAIChatLanguageModel = class {
|
||||
@@ -895,6 +904,7 @@ var OpenAIChatLanguageModel = class {
|
||||
};
|
||||
let metadataExtracted = false;
|
||||
let isActiveText = false;
|
||||
@ -40,7 +40,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7
|
||||
const providerMetadata = { openai: {} };
|
||||
return {
|
||||
stream: response.pipeThrough(
|
||||
@@ -933,6 +943,21 @@ var OpenAIChatLanguageModel = class {
|
||||
@@ -952,6 +962,21 @@ var OpenAIChatLanguageModel = class {
|
||||
return;
|
||||
}
|
||||
const delta = choice.delta;
|
||||
@ -62,7 +62,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7
|
||||
if (delta.content != null) {
|
||||
if (!isActiveText) {
|
||||
controller.enqueue({ type: "text-start", id: "0" });
|
||||
@@ -1045,6 +1070,9 @@ var OpenAIChatLanguageModel = class {
|
||||
@@ -1064,6 +1089,9 @@ var OpenAIChatLanguageModel = class {
|
||||
}
|
||||
},
|
||||
flush(controller) {
|
||||
@ -1,5 +1,5 @@
|
||||
diff --git a/sdk.mjs b/sdk.mjs
|
||||
index bf429a344b7d59f70aead16b639f949b07688a81..f77d50cc5d3fb04292cb3ac7fa7085d02dcc628f 100755
|
||||
index dea7766a3432a1e809f12d6daba4f2834a219689..e0b02ef73da177ba32b903887d7bbbeaa08cc6d3 100755
|
||||
--- a/sdk.mjs
|
||||
+++ b/sdk.mjs
|
||||
@@ -6250,7 +6250,7 @@ function createAbortController(maxListeners = DEFAULT_MAX_LISTENERS) {
|
||||
@ -11,7 +11,7 @@ index bf429a344b7d59f70aead16b639f949b07688a81..f77d50cc5d3fb04292cb3ac7fa7085d0
|
||||
import { createInterface } from "readline";
|
||||
|
||||
// ../src/utils/fsOperations.ts
|
||||
@@ -6619,18 +6619,11 @@ class ProcessTransport {
|
||||
@@ -6644,18 +6644,11 @@ class ProcessTransport {
|
||||
const errorMessage = isNativeBinary(pathToClaudeCodeExecutable) ? `Claude Code native binary not found at ${pathToClaudeCodeExecutable}. Please ensure Claude Code is installed via native installer or specify a valid path with options.pathToClaudeCodeExecutable.` : `Claude Code executable not found at ${pathToClaudeCodeExecutable}. Is options.pathToClaudeCodeExecutable set?`;
|
||||
throw new ReferenceError(errorMessage);
|
||||
}
|
||||
145
.yarn/patches/ollama-ai-provider-v2-npm-1.5.5-8bef249af9.patch
vendored
Normal file
145
.yarn/patches/ollama-ai-provider-v2-npm-1.5.5-8bef249af9.patch
vendored
Normal file
@ -0,0 +1,145 @@
|
||||
diff --git a/dist/index.d.ts b/dist/index.d.ts
|
||||
index 8dd9b498050dbecd8dd6b901acf1aa8ca38a49af..ed644349c9d38fe2a66b2fb44214f7c18eb97f89 100644
|
||||
--- a/dist/index.d.ts
|
||||
+++ b/dist/index.d.ts
|
||||
@@ -4,7 +4,7 @@ import { z } from 'zod/v4';
|
||||
|
||||
type OllamaChatModelId = "athene-v2" | "athene-v2:72b" | "aya-expanse" | "aya-expanse:8b" | "aya-expanse:32b" | "codegemma" | "codegemma:2b" | "codegemma:7b" | "codellama" | "codellama:7b" | "codellama:13b" | "codellama:34b" | "codellama:70b" | "codellama:code" | "codellama:python" | "command-r" | "command-r:35b" | "command-r-plus" | "command-r-plus:104b" | "command-r7b" | "command-r7b:7b" | "deepseek-r1" | "deepseek-r1:1.5b" | "deepseek-r1:7b" | "deepseek-r1:8b" | "deepseek-r1:14b" | "deepseek-r1:32b" | "deepseek-r1:70b" | "deepseek-r1:671b" | "deepseek-coder-v2" | "deepseek-coder-v2:16b" | "deepseek-coder-v2:236b" | "deepseek-v3" | "deepseek-v3:671b" | "devstral" | "devstral:24b" | "dolphin3" | "dolphin3:8b" | "exaone3.5" | "exaone3.5:2.4b" | "exaone3.5:7.8b" | "exaone3.5:32b" | "falcon2" | "falcon2:11b" | "falcon3" | "falcon3:1b" | "falcon3:3b" | "falcon3:7b" | "falcon3:10b" | "firefunction-v2" | "firefunction-v2:70b" | "gemma" | "gemma:2b" | "gemma:7b" | "gemma2" | "gemma2:2b" | "gemma2:9b" | "gemma2:27b" | "gemma3" | "gemma3:1b" | "gemma3:4b" | "gemma3:12b" | "gemma3:27b" | "granite3-dense" | "granite3-dense:2b" | "granite3-dense:8b" | "granite3-guardian" | "granite3-guardian:2b" | "granite3-guardian:8b" | "granite3-moe" | "granite3-moe:1b" | "granite3-moe:3b" | "granite3.1-dense" | "granite3.1-dense:2b" | "granite3.1-dense:8b" | "granite3.1-moe" | "granite3.1-moe:1b" | "granite3.1-moe:3b" | "llama2" | "llama2:7b" | "llama2:13b" | "llama2:70b" | "llama3" | "llama3:8b" | "llama3:70b" | "llama3-chatqa" | "llama3-chatqa:8b" | "llama3-chatqa:70b" | "llama3-gradient" | "llama3-gradient:8b" | "llama3-gradient:70b" | "llama3.1" | "llama3.1:8b" | "llama3.1:70b" | "llama3.1:405b" | "llama3.2" | "llama3.2:1b" | "llama3.2:3b" | "llama3.2-vision" | "llama3.2-vision:11b" | "llama3.2-vision:90b" | "llama3.3" | "llama3.3:70b" | "llama4" | "llama4:16x17b" | "llama4:128x17b" | "llama-guard3" | "llama-guard3:1b" | "llama-guard3:8b" | "llava" | "llava:7b" | "llava:13b" | "llava:34b" | "llava-llama3" | "llava-llama3:8b" | "llava-phi3" | "llava-phi3:3.8b" | "marco-o1" | "marco-o1:7b" | "mistral" | "mistral:7b" | "mistral-large" | "mistral-large:123b" | "mistral-nemo" | "mistral-nemo:12b" | "mistral-small" | "mistral-small:22b" | "mixtral" | "mixtral:8x7b" | "mixtral:8x22b" | "moondream" | "moondream:1.8b" | "openhermes" | "openhermes:v2.5" | "nemotron" | "nemotron:70b" | "nemotron-mini" | "nemotron-mini:4b" | "olmo" | "olmo:7b" | "olmo:13b" | "opencoder" | "opencoder:1.5b" | "opencoder:8b" | "phi3" | "phi3:3.8b" | "phi3:14b" | "phi3.5" | "phi3.5:3.8b" | "phi4" | "phi4:14b" | "qwen" | "qwen:7b" | "qwen:14b" | "qwen:32b" | "qwen:72b" | "qwen:110b" | "qwen2" | "qwen2:0.5b" | "qwen2:1.5b" | "qwen2:7b" | "qwen2:72b" | "qwen2.5" | "qwen2.5:0.5b" | "qwen2.5:1.5b" | "qwen2.5:3b" | "qwen2.5:7b" | "qwen2.5:14b" | "qwen2.5:32b" | "qwen2.5:72b" | "qwen2.5-coder" | "qwen2.5-coder:0.5b" | "qwen2.5-coder:1.5b" | "qwen2.5-coder:3b" | "qwen2.5-coder:7b" | "qwen2.5-coder:14b" | "qwen2.5-coder:32b" | "qwen3" | "qwen3:0.6b" | "qwen3:1.7b" | "qwen3:4b" | "qwen3:8b" | "qwen3:14b" | "qwen3:30b" | "qwen3:32b" | "qwen3:235b" | "qwq" | "qwq:32b" | "sailor2" | "sailor2:1b" | "sailor2:8b" | "sailor2:20b" | "shieldgemma" | "shieldgemma:2b" | "shieldgemma:9b" | "shieldgemma:27b" | "smallthinker" | "smallthinker:3b" | "smollm" | "smollm:135m" | "smollm:360m" | "smollm:1.7b" | "tinyllama" | "tinyllama:1.1b" | "tulu3" | "tulu3:8b" | "tulu3:70b" | (string & {});
|
||||
declare const ollamaProviderOptions: z.ZodObject<{
|
||||
- think: z.ZodOptional<z.ZodBoolean>;
|
||||
+ think: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodEnum<['low', 'medium', 'high']>]>>;
|
||||
options: z.ZodOptional<z.ZodObject<{
|
||||
num_ctx: z.ZodOptional<z.ZodNumber>;
|
||||
repeat_last_n: z.ZodOptional<z.ZodNumber>;
|
||||
@@ -27,9 +27,11 @@ interface OllamaCompletionSettings {
|
||||
* the model's thinking from the model's output. When disabled, the model will not think
|
||||
* and directly output the content.
|
||||
*
|
||||
+ * For gpt-oss models, you can also use 'low', 'medium', or 'high' to control the depth of thinking.
|
||||
+ *
|
||||
* Only supported by certain models like DeepSeek R1 and Qwen 3.
|
||||
*/
|
||||
- think?: boolean;
|
||||
+ think?: boolean | 'low' | 'medium' | 'high';
|
||||
/**
|
||||
* Echo back the prompt in addition to the completion.
|
||||
*/
|
||||
@@ -146,7 +148,7 @@ declare const ollamaEmbeddingProviderOptions: z.ZodObject<{
|
||||
type OllamaEmbeddingProviderOptions = z.infer<typeof ollamaEmbeddingProviderOptions>;
|
||||
|
||||
declare const ollamaCompletionProviderOptions: z.ZodObject<{
|
||||
- think: z.ZodOptional<z.ZodBoolean>;
|
||||
+ think: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodEnum<['low', 'medium', 'high']>]>>;
|
||||
user: z.ZodOptional<z.ZodString>;
|
||||
suffix: z.ZodOptional<z.ZodString>;
|
||||
echo: z.ZodOptional<z.ZodBoolean>;
|
||||
diff --git a/dist/index.js b/dist/index.js
|
||||
index 35b5142ce8476ce2549ed7c2ec48e7d8c46c90d9..2ef64dc9a4c2be043e6af608241a6a8309a5a69f 100644
|
||||
--- a/dist/index.js
|
||||
+++ b/dist/index.js
|
||||
@@ -158,7 +158,7 @@ function getResponseMetadata({
|
||||
|
||||
// src/completion/ollama-completion-language-model.ts
|
||||
var ollamaCompletionProviderOptions = import_v42.z.object({
|
||||
- think: import_v42.z.boolean().optional(),
|
||||
+ think: import_v42.z.union([import_v42.z.boolean(), import_v42.z.enum(['low', 'medium', 'high'])]).optional(),
|
||||
user: import_v42.z.string().optional(),
|
||||
suffix: import_v42.z.string().optional(),
|
||||
echo: import_v42.z.boolean().optional()
|
||||
@@ -662,7 +662,7 @@ function convertToOllamaChatMessages({
|
||||
const images = content.filter((part) => part.type === "file" && part.mediaType.startsWith("image/")).map((part) => part.data);
|
||||
messages.push({
|
||||
role: "user",
|
||||
- content: userText.length > 0 ? userText : [],
|
||||
+ content: userText.length > 0 ? userText : '',
|
||||
images: images.length > 0 ? images : void 0
|
||||
});
|
||||
break;
|
||||
@@ -813,9 +813,11 @@ var ollamaProviderOptions = import_v44.z.object({
|
||||
* the model's thinking from the model's output. When disabled, the model will not think
|
||||
* and directly output the content.
|
||||
*
|
||||
+ * For gpt-oss models, you can also use 'low', 'medium', or 'high' to control the depth of thinking.
|
||||
+ *
|
||||
* Only supported by certain models like DeepSeek R1 and Qwen 3.
|
||||
*/
|
||||
- think: import_v44.z.boolean().optional(),
|
||||
+ think: import_v44.z.union([import_v44.z.boolean(), import_v44.z.enum(['low', 'medium', 'high'])]).optional(),
|
||||
options: import_v44.z.object({
|
||||
num_ctx: import_v44.z.number().optional(),
|
||||
repeat_last_n: import_v44.z.number().optional(),
|
||||
@@ -929,14 +931,16 @@ var OllamaRequestBuilder = class {
|
||||
prompt,
|
||||
systemMessageMode: "system"
|
||||
}),
|
||||
- temperature,
|
||||
- top_p: topP,
|
||||
max_output_tokens: maxOutputTokens,
|
||||
...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
|
||||
format: responseFormat.schema != null ? responseFormat.schema : "json"
|
||||
},
|
||||
think: (_a = ollamaOptions == null ? void 0 : ollamaOptions.think) != null ? _a : false,
|
||||
- options: (_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : void 0
|
||||
+ options: {
|
||||
+ ...temperature !== void 0 && { temperature },
|
||||
+ ...topP !== void 0 && { top_p: topP },
|
||||
+ ...((_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : {})
|
||||
+ }
|
||||
};
|
||||
}
|
||||
};
|
||||
diff --git a/dist/index.mjs b/dist/index.mjs
|
||||
index e2a634a78d80ac9542f2cc4f96cf2291094b10cf..67b23efce3c1cf4f026693d3ff9246988a3ef26e 100644
|
||||
--- a/dist/index.mjs
|
||||
+++ b/dist/index.mjs
|
||||
@@ -144,7 +144,7 @@ function getResponseMetadata({
|
||||
|
||||
// src/completion/ollama-completion-language-model.ts
|
||||
var ollamaCompletionProviderOptions = z2.object({
|
||||
- think: z2.boolean().optional(),
|
||||
+ think: z2.union([z2.boolean(), z2.enum(['low', 'medium', 'high'])]).optional(),
|
||||
user: z2.string().optional(),
|
||||
suffix: z2.string().optional(),
|
||||
echo: z2.boolean().optional()
|
||||
@@ -662,7 +662,7 @@ function convertToOllamaChatMessages({
|
||||
const images = content.filter((part) => part.type === "file" && part.mediaType.startsWith("image/")).map((part) => part.data);
|
||||
messages.push({
|
||||
role: "user",
|
||||
- content: userText.length > 0 ? userText : [],
|
||||
+ content: userText.length > 0 ? userText : '',
|
||||
images: images.length > 0 ? images : void 0
|
||||
});
|
||||
break;
|
||||
@@ -815,9 +815,11 @@ var ollamaProviderOptions = z4.object({
|
||||
* the model's thinking from the model's output. When disabled, the model will not think
|
||||
* and directly output the content.
|
||||
*
|
||||
+ * For gpt-oss models, you can also use 'low', 'medium', or 'high' to control the depth of thinking.
|
||||
+ *
|
||||
* Only supported by certain models like DeepSeek R1 and Qwen 3.
|
||||
*/
|
||||
- think: z4.boolean().optional(),
|
||||
+ think: z4.union([z4.boolean(), z4.enum(['low', 'medium', 'high'])]).optional(),
|
||||
options: z4.object({
|
||||
num_ctx: z4.number().optional(),
|
||||
repeat_last_n: z4.number().optional(),
|
||||
@@ -931,14 +933,16 @@ var OllamaRequestBuilder = class {
|
||||
prompt,
|
||||
systemMessageMode: "system"
|
||||
}),
|
||||
- temperature,
|
||||
- top_p: topP,
|
||||
max_output_tokens: maxOutputTokens,
|
||||
...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
|
||||
format: responseFormat.schema != null ? responseFormat.schema : "json"
|
||||
},
|
||||
think: (_a = ollamaOptions == null ? void 0 : ollamaOptions.think) != null ? _a : false,
|
||||
- options: (_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : void 0
|
||||
+ options: {
|
||||
+ ...temperature !== void 0 && { temperature },
|
||||
+ ...topP !== void 0 && { top_p: topP },
|
||||
+ ...((_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : {})
|
||||
+ }
|
||||
};
|
||||
}
|
||||
};
|
||||
11
CLAUDE.md
11
CLAUDE.md
@ -28,7 +28,7 @@ When creating a Pull Request, you MUST:
|
||||
- **Development**: `yarn dev` - Runs Electron app in development mode with hot reload
|
||||
- **Debug**: `yarn debug` - Starts with debugging enabled, use `chrome://inspect` to attach debugger
|
||||
- **Build Check**: `yarn build:check` - **REQUIRED** before commits (lint + test + typecheck)
|
||||
- If having i18n sort issues, run `yarn sync:i18n` first to sync template
|
||||
- If having i18n sort issues, run `yarn i18n:sync` first to sync template
|
||||
- If having formatting issues, run `yarn format` first
|
||||
- **Test**: `yarn test` - Run all tests (Vitest) across main and renderer processes
|
||||
- **Single Test**:
|
||||
@ -40,20 +40,23 @@ When creating a Pull Request, you MUST:
|
||||
## Project Architecture
|
||||
|
||||
### Electron Structure
|
||||
|
||||
- **Main Process** (`src/main/`): Node.js backend with services (MCP, Knowledge, Storage, etc.)
|
||||
- **Renderer Process** (`src/renderer/`): React UI with Redux state management
|
||||
- **Preload Scripts** (`src/preload/`): Secure IPC bridge
|
||||
|
||||
### Key Components
|
||||
|
||||
- **AI Core** (`src/renderer/src/aiCore/`): Middleware pipeline for multiple AI providers.
|
||||
- **Services** (`src/main/services/`): MCPService, KnowledgeService, WindowService, etc.
|
||||
- **Build System**: Electron-Vite with experimental rolldown-vite, yarn workspaces.
|
||||
- **State Management**: Redux Toolkit (`src/renderer/src/store/`) for predictable state.
|
||||
|
||||
### Logging
|
||||
|
||||
```typescript
|
||||
import { loggerService } from '@logger'
|
||||
const logger = loggerService.withContext('moduleName')
|
||||
import { loggerService } from "@logger";
|
||||
const logger = loggerService.withContext("moduleName");
|
||||
// Renderer: loggerService.initWindowSource('windowName') first
|
||||
logger.info('message', CONTEXT)
|
||||
logger.info("message", CONTEXT);
|
||||
```
|
||||
|
||||
@ -23,7 +23,7 @@
|
||||
},
|
||||
"files": {
|
||||
"ignoreUnknown": false,
|
||||
"includes": ["**", "!**/.claude/**", "!**/.vscode/**"],
|
||||
"includes": ["**", "!**/.claude/**", "!**/.vscode/**", "!**/.conductor/**"],
|
||||
"maxSize": 2097152
|
||||
},
|
||||
"formatter": {
|
||||
|
||||
@ -12,8 +12,13 @@
|
||||
|
||||
; https://github.com/electron-userland/electron-builder/issues/1122
|
||||
!ifndef BUILD_UNINSTALLER
|
||||
; Check VC++ Redistributable based on architecture stored in $1
|
||||
Function checkVCRedist
|
||||
ReadRegDWORD $0 HKLM "SOFTWARE\Microsoft\VisualStudio\14.0\VC\Runtimes\x64" "Installed"
|
||||
${If} $1 == "arm64"
|
||||
ReadRegDWORD $0 HKLM "SOFTWARE\Microsoft\VisualStudio\14.0\VC\Runtimes\ARM64" "Installed"
|
||||
${Else}
|
||||
ReadRegDWORD $0 HKLM "SOFTWARE\Microsoft\VisualStudio\14.0\VC\Runtimes\x64" "Installed"
|
||||
${EndIf}
|
||||
FunctionEnd
|
||||
|
||||
Function checkArchitectureCompatibility
|
||||
@ -97,29 +102,47 @@
|
||||
|
||||
Call checkVCRedist
|
||||
${If} $0 != "1"
|
||||
MessageBox MB_YESNO "\
|
||||
NOTE: ${PRODUCT_NAME} requires $\r$\n\
|
||||
'Microsoft Visual C++ Redistributable'$\r$\n\
|
||||
to function properly.$\r$\n$\r$\n\
|
||||
Download and install now?" /SD IDYES IDYES InstallVCRedist IDNO DontInstall
|
||||
InstallVCRedist:
|
||||
inetc::get /CAPTION " " /BANNER "Downloading Microsoft Visual C++ Redistributable..." "https://aka.ms/vs/17/release/vc_redist.x64.exe" "$TEMP\vc_redist.x64.exe"
|
||||
ExecWait "$TEMP\vc_redist.x64.exe /install /norestart"
|
||||
;IfErrors InstallError ContinueInstall ; vc_redist exit code is unreliable :(
|
||||
Call checkVCRedist
|
||||
${If} $0 == "1"
|
||||
Goto ContinueInstall
|
||||
${EndIf}
|
||||
; VC++ is required - install automatically since declining would abort anyway
|
||||
; Select download URL based on system architecture (stored in $1)
|
||||
${If} $1 == "arm64"
|
||||
StrCpy $2 "https://aka.ms/vs/17/release/vc_redist.arm64.exe"
|
||||
StrCpy $3 "$TEMP\vc_redist.arm64.exe"
|
||||
${Else}
|
||||
StrCpy $2 "https://aka.ms/vs/17/release/vc_redist.x64.exe"
|
||||
StrCpy $3 "$TEMP\vc_redist.x64.exe"
|
||||
${EndIf}
|
||||
|
||||
;InstallError:
|
||||
MessageBox MB_ICONSTOP "\
|
||||
There was an unexpected error installing$\r$\n\
|
||||
Microsoft Visual C++ Redistributable.$\r$\n\
|
||||
The installation of ${PRODUCT_NAME} cannot continue."
|
||||
DontInstall:
|
||||
inetc::get /CAPTION " " /BANNER "Downloading Microsoft Visual C++ Redistributable..." \
|
||||
$2 $3 /END
|
||||
Pop $0 ; Get download status from inetc::get
|
||||
${If} $0 != "OK"
|
||||
MessageBox MB_ICONSTOP|MB_YESNO "\
|
||||
Failed to download Microsoft Visual C++ Redistributable.$\r$\n$\r$\n\
|
||||
Error: $0$\r$\n$\r$\n\
|
||||
Would you like to open the download page in your browser?$\r$\n\
|
||||
$2" IDYES openDownloadUrl IDNO skipDownloadUrl
|
||||
openDownloadUrl:
|
||||
ExecShell "open" $2
|
||||
skipDownloadUrl:
|
||||
Abort
|
||||
${EndIf}
|
||||
|
||||
ExecWait "$3 /install /quiet /norestart"
|
||||
; Note: vc_redist exit code is unreliable, verify via registry check instead
|
||||
|
||||
Call checkVCRedist
|
||||
${If} $0 != "1"
|
||||
MessageBox MB_ICONSTOP|MB_YESNO "\
|
||||
Microsoft Visual C++ Redistributable installation failed.$\r$\n$\r$\n\
|
||||
Would you like to open the download page in your browser?$\r$\n\
|
||||
$2$\r$\n$\r$\n\
|
||||
The installation of ${PRODUCT_NAME} cannot continue." IDYES openInstallUrl IDNO skipInstallUrl
|
||||
openInstallUrl:
|
||||
ExecShell "open" $2
|
||||
skipInstallUrl:
|
||||
Abort
|
||||
${EndIf}
|
||||
${EndIf}
|
||||
ContinueInstall:
|
||||
Pop $4
|
||||
Pop $3
|
||||
Pop $2
|
||||
|
||||
@ -71,7 +71,7 @@ Tools like i18n Ally cannot parse dynamic content within template strings, resul
|
||||
|
||||
```javascript
|
||||
// Not recommended - Plugin cannot resolve
|
||||
const message = t(`fruits.${fruit}`)
|
||||
const message = t(`fruits.${fruit}`);
|
||||
```
|
||||
|
||||
#### 2. **No Real-time Rendering in Editor**
|
||||
@ -91,14 +91,14 @@ For example:
|
||||
```ts
|
||||
// src/renderer/src/i18n/label.ts
|
||||
const themeModeKeyMap = {
|
||||
dark: 'settings.theme.dark',
|
||||
light: 'settings.theme.light',
|
||||
system: 'settings.theme.system'
|
||||
} as const
|
||||
dark: "settings.theme.dark",
|
||||
light: "settings.theme.light",
|
||||
system: "settings.theme.system",
|
||||
} as const;
|
||||
|
||||
export const getThemeModeLabel = (key: string): string => {
|
||||
return themeModeKeyMap[key] ? t(themeModeKeyMap[key]) : key
|
||||
}
|
||||
return themeModeKeyMap[key] ? t(themeModeKeyMap[key]) : key;
|
||||
};
|
||||
```
|
||||
|
||||
By avoiding template strings, you gain better developer experience, more reliable translation checks, and a more maintainable codebase.
|
||||
@ -107,7 +107,7 @@ By avoiding template strings, you gain better developer experience, more reliabl
|
||||
|
||||
The project includes several scripts to automate i18n-related tasks:
|
||||
|
||||
### `check:i18n` - Validate i18n Structure
|
||||
### `i18n:check` - Validate i18n Structure
|
||||
|
||||
This script checks:
|
||||
|
||||
@ -116,10 +116,10 @@ This script checks:
|
||||
- Whether keys are properly sorted
|
||||
|
||||
```bash
|
||||
yarn check:i18n
|
||||
yarn i18n:check
|
||||
```
|
||||
|
||||
### `sync:i18n` - Synchronize JSON Structure and Sort Order
|
||||
### `i18n:sync` - Synchronize JSON Structure and Sort Order
|
||||
|
||||
This script uses `zh-cn.json` as the source of truth to sync structure across all language files, including:
|
||||
|
||||
@ -128,14 +128,14 @@ This script uses `zh-cn.json` as the source of truth to sync structure across al
|
||||
3. Sorting keys automatically
|
||||
|
||||
```bash
|
||||
yarn sync:i18n
|
||||
yarn i18n:sync
|
||||
```
|
||||
|
||||
### `auto:i18n` - Automatically Translate Pending Texts
|
||||
### `i18n:translate` - Automatically Translate Pending Texts
|
||||
|
||||
This script fills in texts marked as `[to be translated]` using machine translation.
|
||||
|
||||
Typically, after adding new texts in `zh-cn.json`, run `sync:i18n`, then `auto:i18n` to complete translations.
|
||||
Typically, after adding new texts in `zh-cn.json`, run `i18n:sync`, then `i18n:translate` to complete translations.
|
||||
|
||||
Before using this script, set the required environment variables:
|
||||
|
||||
@ -148,30 +148,20 @@ MODEL="qwen-plus-latest"
|
||||
Alternatively, add these variables directly to your `.env` file.
|
||||
|
||||
```bash
|
||||
yarn auto:i18n
|
||||
```
|
||||
|
||||
### `update:i18n` - Object-level Translation Update
|
||||
|
||||
Updates translations in language files under `src/renderer/src/i18n/translate` at the object level, preserving existing translations and only updating new content.
|
||||
|
||||
**Not recommended** — prefer `auto:i18n` for translation tasks.
|
||||
|
||||
```bash
|
||||
yarn update:i18n
|
||||
yarn i18n:translate
|
||||
```
|
||||
|
||||
### Workflow
|
||||
|
||||
1. During development, first add the required text in `zh-cn.json`
|
||||
2. Confirm it displays correctly in the Chinese environment
|
||||
3. Run `yarn sync:i18n` to propagate the keys to other language files
|
||||
4. Run `yarn auto:i18n` to perform machine translation
|
||||
3. Run `yarn i18n:sync` to propagate the keys to other language files
|
||||
4. Run `yarn i18n:translate` to perform machine translation
|
||||
5. Grab a coffee and let the magic happen!
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Use Chinese as Source Language**: All development starts in Chinese, then translates to other languages.
|
||||
2. **Run Check Script Before Commit**: Use `yarn check:i18n` to catch i18n issues early.
|
||||
2. **Run Check Script Before Commit**: Use `yarn i18n:check` to catch i18n issues early.
|
||||
3. **Translate in Small Increments**: Avoid accumulating a large backlog of untranslated content.
|
||||
4. **Keep Keys Semantically Clear**: Keys should clearly express their purpose, e.g., `user.profile.avatar.upload.error`
|
||||
|
||||
@ -1,17 +1,17 @@
|
||||
# 如何优雅地做好 i18n
|
||||
|
||||
## 使用i18n ally插件提升开发体验
|
||||
## 使用 i18n ally 插件提升开发体验
|
||||
|
||||
i18n ally是一个强大的VSCode插件,它能在开发阶段提供实时反馈,帮助开发者更早发现文案缺失和错译问题。
|
||||
i18n ally 是一个强大的 VSCode 插件,它能在开发阶段提供实时反馈,帮助开发者更早发现文案缺失和错译问题。
|
||||
|
||||
项目中已经配置好了插件设置,直接安装即可。
|
||||
|
||||
### 开发时优势
|
||||
|
||||
- **实时预览**:翻译文案会直接显示在编辑器中
|
||||
- **错误检测**:自动追踪标记出缺失的翻译或未使用的key
|
||||
- **快速跳转**:可通过key直接跳转到定义处(Ctrl/Cmd + click)
|
||||
- **自动补全**:输入i18n key时提供自动补全建议
|
||||
- **错误检测**:自动追踪标记出缺失的翻译或未使用的 key
|
||||
- **快速跳转**:可通过 key 直接跳转到定义处(Ctrl/Cmd + click)
|
||||
- **自动补全**:输入 i18n key 时提供自动补全建议
|
||||
|
||||
### 效果展示
|
||||
|
||||
@ -23,9 +23,9 @@ i18n ally是一个强大的VSCode插件,它能在开发阶段提供实时反
|
||||
|
||||
## i18n 约定
|
||||
|
||||
### **绝对避免使用flat格式**
|
||||
### **绝对避免使用 flat 格式**
|
||||
|
||||
绝对避免使用flat格式,如`"add.button.tip": "添加"`。应采用清晰的嵌套结构:
|
||||
绝对避免使用 flat 格式,如`"add.button.tip": "添加"`。应采用清晰的嵌套结构:
|
||||
|
||||
```json
|
||||
// 错误示例 - flat结构
|
||||
@ -52,14 +52,14 @@ i18n ally是一个强大的VSCode插件,它能在开发阶段提供实时反
|
||||
#### 为什么要使用嵌套结构
|
||||
|
||||
1. **自然分组**:通过对象结构天然能将相关上下文的文案分到一个组别中
|
||||
2. **插件要求**:i18n ally 插件需要嵌套或flat格式其一的文件才能正常分析
|
||||
2. **插件要求**:i18n ally 插件需要嵌套或 flat 格式其一的文件才能正常分析
|
||||
|
||||
### **避免在`t()`中使用模板字符串**
|
||||
|
||||
**强烈建议避免使用模板字符串**进行动态插值。虽然模板字符串在JavaScript开发中非常方便,但在国际化场景下会带来一系列问题。
|
||||
**强烈建议避免使用模板字符串**进行动态插值。虽然模板字符串在 JavaScript 开发中非常方便,但在国际化场景下会带来一系列问题。
|
||||
|
||||
1. **插件无法跟踪**
|
||||
i18n ally等工具无法解析模板字符串中的动态内容,导致:
|
||||
i18n ally 等工具无法解析模板字符串中的动态内容,导致:
|
||||
|
||||
- 无法正确显示实时预览
|
||||
- 无法检测翻译缺失
|
||||
@ -67,11 +67,11 @@ i18n ally是一个强大的VSCode插件,它能在开发阶段提供实时反
|
||||
|
||||
```javascript
|
||||
// 不推荐 - 插件无法解析
|
||||
const message = t(`fruits.${fruit}`)
|
||||
const message = t(`fruits.${fruit}`);
|
||||
```
|
||||
|
||||
2. **编辑器无法实时渲染**
|
||||
在IDE中,模板字符串会显示为原始代码而非最终翻译结果,降低了开发体验。
|
||||
在 IDE 中,模板字符串会显示为原始代码而非最终翻译结果,降低了开发体验。
|
||||
|
||||
3. **更难以维护**
|
||||
由于插件无法跟踪这样的文案,编辑器中也无法渲染,开发者必须人工确认语言文件中是否存在相应的文案。
|
||||
@ -85,36 +85,36 @@ i18n ally是一个强大的VSCode插件,它能在开发阶段提供实时反
|
||||
```ts
|
||||
// src/renderer/src/i18n/label.ts
|
||||
const themeModeKeyMap = {
|
||||
dark: 'settings.theme.dark',
|
||||
light: 'settings.theme.light',
|
||||
system: 'settings.theme.system'
|
||||
} as const
|
||||
dark: "settings.theme.dark",
|
||||
light: "settings.theme.light",
|
||||
system: "settings.theme.system",
|
||||
} as const;
|
||||
|
||||
export const getThemeModeLabel = (key: string): string => {
|
||||
return themeModeKeyMap[key] ? t(themeModeKeyMap[key]) : key
|
||||
}
|
||||
return themeModeKeyMap[key] ? t(themeModeKeyMap[key]) : key;
|
||||
};
|
||||
```
|
||||
|
||||
通过避免模板字符串,可以获得更好的开发体验、更可靠的翻译检查以及更易维护的代码库。
|
||||
|
||||
## 自动化脚本
|
||||
|
||||
项目中有一系列脚本来自动化i18n相关任务:
|
||||
项目中有一系列脚本来自动化 i18n 相关任务:
|
||||
|
||||
### `check:i18n` - 检查i18n结构
|
||||
### `i18n:check` - 检查 i18n 结构
|
||||
|
||||
此脚本会检查:
|
||||
|
||||
- 所有语言文件是否为嵌套结构
|
||||
- 是否存在缺失的key
|
||||
- 是否存在多余的key
|
||||
- 是否存在缺失的 key
|
||||
- 是否存在多余的 key
|
||||
- 是否已经有序
|
||||
|
||||
```bash
|
||||
yarn check:i18n
|
||||
yarn i18n:check
|
||||
```
|
||||
|
||||
### `sync:i18n` - 同步json结构与排序
|
||||
### `i18n:sync` - 同步 json 结构与排序
|
||||
|
||||
此脚本以`zh-cn.json`文件为基准,将结构同步到其他语言文件,包括:
|
||||
|
||||
@ -123,14 +123,14 @@ yarn check:i18n
|
||||
3. 自动排序
|
||||
|
||||
```bash
|
||||
yarn sync:i18n
|
||||
yarn i18n:sync
|
||||
```
|
||||
|
||||
### `auto:i18n` - 自动翻译待翻译文本
|
||||
### `i18n:translate` - 自动翻译待翻译文本
|
||||
|
||||
次脚本自动将标记为待翻译的文本通过机器翻译填充。
|
||||
|
||||
通常,在`zh-cn.json`中添加所需文案后,执行`sync:i18n`即可自动完成翻译。
|
||||
通常,在`zh-cn.json`中添加所需文案后,执行`i18n:sync`即可自动完成翻译。
|
||||
|
||||
使用该脚本前,需要配置环境变量,例如:
|
||||
|
||||
@ -143,29 +143,19 @@ MODEL="qwen-plus-latest"
|
||||
你也可以通过直接编辑`.env`文件来添加环境变量。
|
||||
|
||||
```bash
|
||||
yarn auto:i18n
|
||||
```
|
||||
|
||||
### `update:i18n` - 对象级别翻译更新
|
||||
|
||||
对`src/renderer/src/i18n/translate`中的语言文件进行对象级别的翻译更新,保留已有翻译,只更新新增内容。
|
||||
|
||||
**不建议**使用该脚本,更推荐使用`auto:i18n`进行翻译。
|
||||
|
||||
```bash
|
||||
yarn update:i18n
|
||||
yarn i18n:translate
|
||||
```
|
||||
|
||||
### 工作流
|
||||
|
||||
1. 开发阶段,先在`zh-cn.json`中添加所需文案
|
||||
2. 确认在中文环境下显示无误后,使用`yarn sync:i18n`将文案同步到其他语言文件
|
||||
3. 使用`yarn auto:i18n`进行自动翻译
|
||||
2. 确认在中文环境下显示无误后,使用`yarn i18n:sync`将文案同步到其他语言文件
|
||||
3. 使用`yarn i18n:translate`进行自动翻译
|
||||
4. 喝杯咖啡,等翻译完成吧!
|
||||
|
||||
## 最佳实践
|
||||
|
||||
1. **以中文为源语言**:所有开发首先使用中文,再翻译为其他语言
|
||||
2. **提交前运行检查脚本**:使用`yarn check:i18n`检查i18n是否有问题
|
||||
2. **提交前运行检查脚本**:使用`yarn i18n:check`检查 i18n 是否有问题
|
||||
3. **小步提交翻译**:避免积累大量未翻译文本
|
||||
4. **保持key语义明确**:key应能清晰表达其用途,如`user.profile.avatar.upload.error`
|
||||
4. **保持 key 语义明确**:key 应能清晰表达其用途,如`user.profile.avatar.upload.error`
|
||||
|
||||
@ -134,66 +134,54 @@ artifactBuildCompleted: scripts/artifact-build-completed.js
|
||||
releaseInfo:
|
||||
releaseNotes: |
|
||||
<!--LANG:en-->
|
||||
Cherry Studio 1.7.2 - Stability & Enhancement Update
|
||||
Cherry Studio 1.7.4 - New Browser MCP & Model Updates
|
||||
|
||||
This release focuses on stability improvements, bug fixes, and quality-of-life enhancements.
|
||||
This release adds a powerful browser automation MCP server, new web search provider, and model support updates.
|
||||
|
||||
✨ New Features
|
||||
- [MCP] Add @cherry/browser CDP MCP server with session management for browser automation
|
||||
- [Web Search] Add ExaMCP free web search provider (no API key required)
|
||||
- [Model] Support GPT 5.2 series models
|
||||
- [Model] Add capabilities support for Doubao Seed Code models (tool calling, reasoning, vision)
|
||||
|
||||
🔧 Improvements
|
||||
- Enhanced update dialog functionality and state management
|
||||
- Improved ImageViewer context menu UX
|
||||
- Better temperature and top_p parameter handling
|
||||
- User-configurable stream options for OpenAI API
|
||||
- Translation feature now supports document files
|
||||
|
||||
🤖 AI & Models
|
||||
- Added explicit thinking token support for Gemini 3 Pro Image
|
||||
- Updated DeepSeek logic to match DeepSeek v3.2
|
||||
- Updated AiOnly default models
|
||||
- Updated AI model configurations to latest versions
|
||||
|
||||
♿ Accessibility
|
||||
- Improved screen reader (NVDA) support with aria-label attributes
|
||||
- Added Slovak language support for spell check
|
||||
- [Translate] Add reasoning effort option to translate service
|
||||
- [i18n] Improve zh-TW Traditional Chinese locale
|
||||
- [Settings] Update MCP Settings layout and styling
|
||||
|
||||
🐛 Bug Fixes
|
||||
- Fixed Quick Assistant shortcut registration issue
|
||||
- Fixed UI freeze on multi-file selection via batch processing
|
||||
- Fixed assistant default model update when editing model capabilities
|
||||
- Fixed provider handling and API key rotation logic
|
||||
- Fixed OVMS API URL path formation
|
||||
- Fixed custom parameters placement for Vercel AI Gateway
|
||||
- Fixed topic message blocks clearing
|
||||
- Fixed input bar blocking enter send while generating
|
||||
- [Chat] Fix line numbers being wrongly copied from code blocks
|
||||
- [Translate] Fix default to first supported reasoning effort when translating
|
||||
- [Chat] Fix preserve thinking block in assistant messages
|
||||
- [Web Search] Fix max search result limit
|
||||
- [Embedding] Fix embedding dimensions retrieval for ModernAiProvider
|
||||
- [Chat] Fix token calculation in prompt tool use plugin
|
||||
- [Model] Fix Ollama provider options for Qwen model support
|
||||
- [UI] Fix Chat component marginRight calculation for improved layout
|
||||
|
||||
<!--LANG:zh-CN-->
|
||||
Cherry Studio 1.7.2 - 稳定性与功能增强更新
|
||||
Cherry Studio 1.7.4 - 新增浏览器 MCP 与模型更新
|
||||
|
||||
本次更新专注于稳定性改进、问题修复和用户体验提升。
|
||||
本次更新新增强大的浏览器自动化 MCP 服务器、新的网页搜索提供商以及模型支持更新。
|
||||
|
||||
✨ 新功能
|
||||
- [MCP] 新增 @cherry/browser CDP MCP 服务器,支持会话管理的浏览器自动化
|
||||
- [网页搜索] 新增 ExaMCP 免费网页搜索提供商(无需 API 密钥)
|
||||
- [模型] 支持 GPT 5.2 系列模型
|
||||
- [模型] 为豆包 Seed Code 模型添加能力支持(工具调用、推理、视觉)
|
||||
|
||||
🔧 功能改进
|
||||
- 增强更新对话框功能和状态管理
|
||||
- 优化图片查看器右键菜单体验
|
||||
- 改进温度和 top_p 参数处理逻辑
|
||||
- 支持用户自定义 OpenAI API 流式选项
|
||||
- 翻译功能现已支持文档文件
|
||||
|
||||
🤖 AI 与模型
|
||||
- 为 Gemini 3 Pro Image 添加显式思考 token 支持
|
||||
- 更新 DeepSeek 逻辑以适配 DeepSeek v3.2
|
||||
- 更新 AiOnly 默认模型
|
||||
- 更新 AI 模型配置至最新版本
|
||||
|
||||
♿ 无障碍支持
|
||||
- 改进屏幕阅读器 (NVDA) 支持,添加 aria-label 属性
|
||||
- 新增斯洛伐克语拼写检查支持
|
||||
- [翻译] 为翻译服务添加推理强度选项
|
||||
- [国际化] 改进繁体中文(zh-TW)本地化
|
||||
- [设置] 优化 MCP 设置布局和样式
|
||||
|
||||
🐛 问题修复
|
||||
- 修复快捷助手无法注册快捷键的问题
|
||||
- 修复多文件选择时 UI 冻结问题(通过批处理优化)
|
||||
- 修复编辑模型能力时助手默认模型更新问题
|
||||
- 修复服务商处理和 API 密钥轮换逻辑
|
||||
- 修复 OVMS API URL 路径格式问题
|
||||
- 修复 Vercel AI Gateway 自定义参数位置问题
|
||||
- 修复话题消息块清理问题
|
||||
- 修复生成时输入框阻止回车发送的问题
|
||||
- [聊天] 修复代码块中行号被错误复制的问题
|
||||
- [翻译] 修复翻译时默认使用第一个支持的推理强度
|
||||
- [聊天] 修复助手消息中思考块的保留问题
|
||||
- [网页搜索] 修复最大搜索结果数限制
|
||||
- [嵌入] 修复 ModernAiProvider 嵌入维度获取问题
|
||||
- [聊天] 修复提示词工具使用插件的 token 计算问题
|
||||
- [模型] 修复 Ollama 提供商对 Qwen 模型的支持选项
|
||||
- [界面] 修复聊天组件右边距计算以改善布局
|
||||
<!--LANG:END-->
|
||||
|
||||
@ -61,6 +61,7 @@ export default defineConfig([
|
||||
'tests/**',
|
||||
'.yarn/**',
|
||||
'.gitignore',
|
||||
'.conductor/**',
|
||||
'scripts/cloudflare-worker.js',
|
||||
'src/main/integration/nutstore/sso/lib/**',
|
||||
'src/main/integration/cherryai/index.js',
|
||||
|
||||
25
package.json
25
package.json
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "CherryStudio",
|
||||
"version": "1.7.2",
|
||||
"version": "1.7.4",
|
||||
"private": true,
|
||||
"description": "A powerful AI assistant for producer.",
|
||||
"main": "./out/main/index.js",
|
||||
@ -50,13 +50,13 @@
|
||||
"generate:icons": "electron-icon-builder --input=./build/logo.png --output=build",
|
||||
"analyze:renderer": "VISUALIZER_RENDERER=true yarn build",
|
||||
"analyze:main": "VISUALIZER_MAIN=true yarn build",
|
||||
"typecheck": "npm run typecheck:node && npm run typecheck:web",
|
||||
"typecheck": "concurrently -n \"node,web\" -c \"cyan,magenta\" \"npm run typecheck:node\" \"npm run typecheck:web\"",
|
||||
"typecheck:node": "tsgo --noEmit -p tsconfig.node.json --composite false",
|
||||
"typecheck:web": "tsgo --noEmit -p tsconfig.web.json --composite false",
|
||||
"check:i18n": "dotenv -e .env -- tsx scripts/check-i18n.ts",
|
||||
"sync:i18n": "dotenv -e .env -- tsx scripts/sync-i18n.ts",
|
||||
"update:i18n": "dotenv -e .env -- tsx scripts/update-i18n.ts",
|
||||
"auto:i18n": "dotenv -e .env -- tsx scripts/auto-translate-i18n.ts",
|
||||
"i18n:check": "dotenv -e .env -- tsx scripts/check-i18n.ts",
|
||||
"i18n:sync": "dotenv -e .env -- tsx scripts/sync-i18n.ts",
|
||||
"i18n:translate": "dotenv -e .env -- tsx scripts/auto-translate-i18n.ts",
|
||||
"i18n:all": "yarn i18n:check && yarn i18n:sync && yarn i18n:translate",
|
||||
"update:languages": "tsx scripts/update-languages.ts",
|
||||
"update:upgrade-config": "tsx scripts/update-app-upgrade-config.ts",
|
||||
"test": "vitest run --silent",
|
||||
@ -70,7 +70,7 @@
|
||||
"test:e2e": "yarn playwright test",
|
||||
"test:lint": "oxlint --deny-warnings && eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --cache",
|
||||
"test:scripts": "vitest scripts",
|
||||
"lint": "oxlint --fix && eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --fix --cache && yarn typecheck && yarn check:i18n && yarn format:check",
|
||||
"lint": "oxlint --fix && eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --fix --cache && yarn typecheck && yarn i18n:check && yarn format:check",
|
||||
"format": "biome format --write && biome lint --write",
|
||||
"format:check": "biome format && biome lint",
|
||||
"prepare": "git config blame.ignoreRevsFile .git-blame-ignore-revs && husky",
|
||||
@ -81,7 +81,7 @@
|
||||
"release:ai-sdk-provider": "yarn workspace @cherrystudio/ai-sdk-provider version patch --immediate && yarn workspace @cherrystudio/ai-sdk-provider build && yarn workspace @cherrystudio/ai-sdk-provider npm publish --access public"
|
||||
},
|
||||
"dependencies": {
|
||||
"@anthropic-ai/claude-agent-sdk": "patch:@anthropic-ai/claude-agent-sdk@npm%3A0.1.53#~/.yarn/patches/@anthropic-ai-claude-agent-sdk-npm-0.1.53-4b77f4cf29.patch",
|
||||
"@anthropic-ai/claude-agent-sdk": "patch:@anthropic-ai/claude-agent-sdk@npm%3A0.1.62#~/.yarn/patches/@anthropic-ai-claude-agent-sdk-npm-0.1.62-23ae56f8c8.patch",
|
||||
"@libsql/client": "0.14.0",
|
||||
"@libsql/win32-x64-msvc": "^0.4.7",
|
||||
"@napi-rs/system-ocr": "patch:@napi-rs/system-ocr@npm%3A1.0.2#~/.yarn/patches/@napi-rs-system-ocr-npm-1.0.2-59e7a78e8b.patch",
|
||||
@ -119,7 +119,7 @@
|
||||
"@ai-sdk/google-vertex": "^3.0.79",
|
||||
"@ai-sdk/huggingface": "^0.0.10",
|
||||
"@ai-sdk/mistral": "^2.0.24",
|
||||
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch",
|
||||
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch",
|
||||
"@ai-sdk/perplexity": "^2.0.20",
|
||||
"@ai-sdk/test-server": "^0.0.1",
|
||||
"@ant-design/v5-patch-for-react-19": "^1.0.3",
|
||||
@ -143,7 +143,7 @@
|
||||
"@cherrystudio/embedjs-ollama": "^0.1.31",
|
||||
"@cherrystudio/embedjs-openai": "^0.1.31",
|
||||
"@cherrystudio/extension-table-plus": "workspace:^",
|
||||
"@cherrystudio/openai": "^6.9.0",
|
||||
"@cherrystudio/openai": "^6.12.0",
|
||||
"@dnd-kit/core": "^6.3.1",
|
||||
"@dnd-kit/modifiers": "^9.0.0",
|
||||
"@dnd-kit/sortable": "^10.0.0",
|
||||
@ -258,6 +258,7 @@
|
||||
"clsx": "^2.1.1",
|
||||
"code-inspector-plugin": "^0.20.14",
|
||||
"color": "^5.0.0",
|
||||
"concurrently": "^9.2.1",
|
||||
"country-flag-emoji-polyfill": "0.1.8",
|
||||
"dayjs": "^1.11.11",
|
||||
"dexie": "^4.0.8",
|
||||
@ -318,7 +319,7 @@
|
||||
"motion": "^12.10.5",
|
||||
"notion-helper": "^1.3.22",
|
||||
"npx-scope-finder": "^1.2.0",
|
||||
"ollama-ai-provider-v2": "^1.5.5",
|
||||
"ollama-ai-provider-v2": "patch:ollama-ai-provider-v2@npm%3A1.5.5#~/.yarn/patches/ollama-ai-provider-v2-npm-1.5.5-8bef249af9.patch",
|
||||
"oxlint": "^1.22.0",
|
||||
"oxlint-tsgolint": "^0.2.0",
|
||||
"p-queue": "^8.1.0",
|
||||
@ -414,7 +415,7 @@
|
||||
"@langchain/openai@npm:>=0.1.0 <0.6.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||
"@langchain/openai@npm:^0.3.16": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||
"@langchain/openai@npm:>=0.2.0 <0.7.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||
"@ai-sdk/openai@npm:^2.0.42": "patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch",
|
||||
"@ai-sdk/openai@npm:^2.0.42": "patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch",
|
||||
"@ai-sdk/google@npm:^2.0.40": "patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch",
|
||||
"@ai-sdk/openai-compatible@npm:^1.0.27": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch"
|
||||
},
|
||||
|
||||
@ -40,7 +40,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@ai-sdk/anthropic": "^2.0.49",
|
||||
"@ai-sdk/azure": "^2.0.74",
|
||||
"@ai-sdk/azure": "^2.0.87",
|
||||
"@ai-sdk/deepseek": "^1.0.31",
|
||||
"@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch",
|
||||
"@ai-sdk/provider": "^2.0.0",
|
||||
|
||||
@ -62,7 +62,7 @@ export class StreamEventManager {
|
||||
const recursiveResult = await context.recursiveCall(recursiveParams)
|
||||
|
||||
if (recursiveResult && recursiveResult.fullStream) {
|
||||
await this.pipeRecursiveStream(controller, recursiveResult.fullStream, context)
|
||||
await this.pipeRecursiveStream(controller, recursiveResult.fullStream)
|
||||
} else {
|
||||
console.warn('[MCP Prompt] No fullstream found in recursive result:', recursiveResult)
|
||||
}
|
||||
@ -74,11 +74,7 @@ export class StreamEventManager {
|
||||
/**
|
||||
* 将递归流的数据传递到当前流
|
||||
*/
|
||||
private async pipeRecursiveStream(
|
||||
controller: StreamController,
|
||||
recursiveStream: ReadableStream,
|
||||
context?: AiRequestContext
|
||||
): Promise<void> {
|
||||
private async pipeRecursiveStream(controller: StreamController, recursiveStream: ReadableStream): Promise<void> {
|
||||
const reader = recursiveStream.getReader()
|
||||
try {
|
||||
while (true) {
|
||||
@ -86,18 +82,14 @@ export class StreamEventManager {
|
||||
if (done) {
|
||||
break
|
||||
}
|
||||
if (value.type === 'start') {
|
||||
continue
|
||||
}
|
||||
|
||||
if (value.type === 'finish') {
|
||||
// 迭代的流不发finish,但需要累加其 usage
|
||||
if (value.usage && context?.accumulatedUsage) {
|
||||
this.accumulateUsage(context.accumulatedUsage, value.usage)
|
||||
}
|
||||
break
|
||||
}
|
||||
// 对于 finish-step 类型,累加其 usage
|
||||
if (value.type === 'finish-step' && value.usage && context?.accumulatedUsage) {
|
||||
this.accumulateUsage(context.accumulatedUsage, value.usage)
|
||||
}
|
||||
// 将递归流的数据传递到当前流
|
||||
|
||||
controller.enqueue(value)
|
||||
}
|
||||
} finally {
|
||||
@ -135,10 +127,8 @@ export class StreamEventManager {
|
||||
// 构建新的对话消息
|
||||
const newMessages: ModelMessage[] = [
|
||||
...(context.originalParams.messages || []),
|
||||
{
|
||||
role: 'assistant',
|
||||
content: textBuffer
|
||||
},
|
||||
// 只有当 textBuffer 有内容时才添加 assistant 消息,避免空消息导致 API 错误
|
||||
...(textBuffer ? [{ role: 'assistant' as const, content: textBuffer }] : []),
|
||||
{
|
||||
role: 'user',
|
||||
content: toolResultsText
|
||||
@ -161,7 +151,7 @@ export class StreamEventManager {
|
||||
/**
|
||||
* 累加 usage 数据
|
||||
*/
|
||||
private accumulateUsage(target: any, source: any): void {
|
||||
accumulateUsage(target: any, source: any): void {
|
||||
if (!target || !source) return
|
||||
|
||||
// 累加各种 token 类型
|
||||
|
||||
@ -411,7 +411,10 @@ export const createPromptToolUsePlugin = (config: PromptToolUseConfig = {}) => {
|
||||
}
|
||||
}
|
||||
|
||||
// 如果没有执行工具调用,直接传递原始finish-step事件
|
||||
// 如果没有执行工具调用,累加 usage 后透传 finish-step 事件
|
||||
if (chunk.usage && context.accumulatedUsage) {
|
||||
streamEventManager.accumulateUsage(context.accumulatedUsage, chunk.usage)
|
||||
}
|
||||
controller.enqueue(chunk)
|
||||
|
||||
// 清理状态
|
||||
|
||||
@ -6,6 +6,7 @@ import { type Tool } from 'ai'
|
||||
|
||||
import { createOpenRouterOptions, createXaiOptions, mergeProviderOptions } from '../../../options'
|
||||
import type { ProviderOptionsMap } from '../../../options/types'
|
||||
import type { AiRequestContext } from '../../'
|
||||
import type { OpenRouterSearchConfig } from './openrouter'
|
||||
|
||||
/**
|
||||
@ -95,28 +96,84 @@ export type WebSearchToolInputSchema = {
|
||||
'openai-chat': InferToolInput<OpenAIChatWebSearchTool>
|
||||
}
|
||||
|
||||
export const switchWebSearchTool = (config: WebSearchPluginConfig, params: any) => {
|
||||
if (config.openai) {
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search = openai.tools.webSearch(config.openai)
|
||||
} else if (config['openai-chat']) {
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search_preview = openai.tools.webSearchPreview(config['openai-chat'])
|
||||
} else if (config.anthropic) {
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search = anthropic.tools.webSearch_20250305(config.anthropic)
|
||||
} else if (config.google) {
|
||||
// case 'google-vertex':
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search = google.tools.googleSearch(config.google || {})
|
||||
} else if (config.xai) {
|
||||
const searchOptions = createXaiOptions({
|
||||
searchParameters: { ...config.xai, mode: 'on' }
|
||||
})
|
||||
params.providerOptions = mergeProviderOptions(params.providerOptions, searchOptions)
|
||||
} else if (config.openrouter) {
|
||||
const searchOptions = createOpenRouterOptions(config.openrouter)
|
||||
params.providerOptions = mergeProviderOptions(params.providerOptions, searchOptions)
|
||||
/**
|
||||
* Helper function to ensure params.tools object exists
|
||||
*/
|
||||
const ensureToolsObject = (params: any) => {
|
||||
if (!params.tools) params.tools = {}
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to apply tool-based web search configuration
|
||||
*/
|
||||
const applyToolBasedSearch = (params: any, toolName: string, toolInstance: any) => {
|
||||
ensureToolsObject(params)
|
||||
params.tools[toolName] = toolInstance
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to apply provider options-based web search configuration
|
||||
*/
|
||||
const applyProviderOptionsSearch = (params: any, searchOptions: any) => {
|
||||
params.providerOptions = mergeProviderOptions(params.providerOptions, searchOptions)
|
||||
}
|
||||
|
||||
export const switchWebSearchTool = (config: WebSearchPluginConfig, params: any, context?: AiRequestContext) => {
|
||||
const providerId = context?.providerId
|
||||
|
||||
// Provider-specific configuration map
|
||||
const providerHandlers: Record<string, () => void> = {
|
||||
openai: () => {
|
||||
const cfg = config.openai ?? DEFAULT_WEB_SEARCH_CONFIG.openai
|
||||
applyToolBasedSearch(params, 'web_search', openai.tools.webSearch(cfg))
|
||||
},
|
||||
'openai-chat': () => {
|
||||
const cfg = (config['openai-chat'] ?? DEFAULT_WEB_SEARCH_CONFIG['openai-chat']) as OpenAISearchPreviewConfig
|
||||
applyToolBasedSearch(params, 'web_search_preview', openai.tools.webSearchPreview(cfg))
|
||||
},
|
||||
anthropic: () => {
|
||||
const cfg = config.anthropic ?? DEFAULT_WEB_SEARCH_CONFIG.anthropic
|
||||
applyToolBasedSearch(params, 'web_search', anthropic.tools.webSearch_20250305(cfg))
|
||||
},
|
||||
google: () => {
|
||||
const cfg = (config.google ?? DEFAULT_WEB_SEARCH_CONFIG.google) as GoogleSearchConfig
|
||||
applyToolBasedSearch(params, 'web_search', google.tools.googleSearch(cfg))
|
||||
},
|
||||
xai: () => {
|
||||
const cfg = config.xai ?? DEFAULT_WEB_SEARCH_CONFIG.xai
|
||||
const searchOptions = createXaiOptions({ searchParameters: { ...cfg, mode: 'on' } })
|
||||
applyProviderOptionsSearch(params, searchOptions)
|
||||
},
|
||||
openrouter: () => {
|
||||
const cfg = (config.openrouter ?? DEFAULT_WEB_SEARCH_CONFIG.openrouter) as OpenRouterSearchConfig
|
||||
const searchOptions = createOpenRouterOptions(cfg)
|
||||
applyProviderOptionsSearch(params, searchOptions)
|
||||
}
|
||||
}
|
||||
|
||||
// Try provider-specific handler first
|
||||
const handler = providerId && providerHandlers[providerId]
|
||||
if (handler) {
|
||||
handler()
|
||||
return params
|
||||
}
|
||||
|
||||
// Fallback: apply based on available config keys (prioritized order)
|
||||
const fallbackOrder: Array<keyof WebSearchPluginConfig> = [
|
||||
'openai',
|
||||
'openai-chat',
|
||||
'anthropic',
|
||||
'google',
|
||||
'xai',
|
||||
'openrouter'
|
||||
]
|
||||
|
||||
for (const key of fallbackOrder) {
|
||||
if (config[key]) {
|
||||
providerHandlers[key]()
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return params
|
||||
}
|
||||
|
||||
@ -17,8 +17,22 @@ export const webSearchPlugin = (config: WebSearchPluginConfig = DEFAULT_WEB_SEAR
|
||||
name: 'webSearch',
|
||||
enforce: 'pre',
|
||||
|
||||
transformParams: async (params: any) => {
|
||||
switchWebSearchTool(config, params)
|
||||
transformParams: async (params: any, context) => {
|
||||
let { providerId } = context
|
||||
|
||||
// For cherryin providers, extract the actual provider from the model's provider string
|
||||
// Expected format: "cherryin.{actualProvider}" (e.g., "cherryin.gemini")
|
||||
if (providerId === 'cherryin' || providerId === 'cherryin-chat') {
|
||||
const provider = params.model?.provider
|
||||
if (provider && typeof provider === 'string' && provider.includes('.')) {
|
||||
const extractedProviderId = provider.split('.')[1]
|
||||
if (extractedProviderId) {
|
||||
providerId = extractedProviderId
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switchWebSearchTool(config, params, { ...context, providerId })
|
||||
return params
|
||||
}
|
||||
})
|
||||
|
||||
@ -55,6 +55,8 @@ export enum IpcChannel {
|
||||
Webview_SetOpenLinkExternal = 'webview:set-open-link-external',
|
||||
Webview_SetSpellCheckEnabled = 'webview:set-spell-check-enabled',
|
||||
Webview_SearchHotkey = 'webview:search-hotkey',
|
||||
Webview_PrintToPDF = 'webview:print-to-pdf',
|
||||
Webview_SaveAsHTML = 'webview:save-as-html',
|
||||
|
||||
// Open
|
||||
Open_Path = 'open:path',
|
||||
@ -90,6 +92,8 @@ export enum IpcChannel {
|
||||
Mcp_AbortTool = 'mcp:abort-tool',
|
||||
Mcp_GetServerVersion = 'mcp:get-server-version',
|
||||
Mcp_Progress = 'mcp:progress',
|
||||
Mcp_GetServerLogs = 'mcp:get-server-logs',
|
||||
Mcp_ServerLog = 'mcp:server-log',
|
||||
// Python
|
||||
Python_Execute = 'python:execute',
|
||||
|
||||
@ -241,6 +245,8 @@ export enum IpcChannel {
|
||||
System_GetHostname = 'system:getHostname',
|
||||
System_GetCpuName = 'system:getCpuName',
|
||||
System_CheckGitBash = 'system:checkGitBash',
|
||||
System_GetGitBashPath = 'system:getGitBashPath',
|
||||
System_SetGitBashPath = 'system:setGitBashPath',
|
||||
|
||||
// DevTools
|
||||
System_ToggleDevTools = 'system:toggleDevTools',
|
||||
@ -295,6 +301,8 @@ export enum IpcChannel {
|
||||
Selection_ActionWindowClose = 'selection:action-window-close',
|
||||
Selection_ActionWindowMinimize = 'selection:action-window-minimize',
|
||||
Selection_ActionWindowPin = 'selection:action-window-pin',
|
||||
// [Windows only] Electron bug workaround - can be removed once https://github.com/electron/electron/issues/48554 is fixed
|
||||
Selection_ActionWindowResize = 'selection:action-window-resize',
|
||||
Selection_ProcessAction = 'selection:process-action',
|
||||
Selection_UpdateActionData = 'selection:update-action-data',
|
||||
|
||||
|
||||
@ -88,16 +88,11 @@ export function getSdkClient(
|
||||
}
|
||||
})
|
||||
}
|
||||
let baseURL =
|
||||
const baseURL =
|
||||
provider.type === 'anthropic'
|
||||
? provider.apiHost
|
||||
: (provider.anthropicApiHost && provider.anthropicApiHost.trim()) || provider.apiHost
|
||||
|
||||
// Anthropic SDK automatically appends /v1 to all endpoints (like /v1/messages, /v1/models)
|
||||
// We need to strip api version from baseURL to avoid duplication (e.g., /v3/v1/models)
|
||||
// formatProviderApiHost adds /v1 for AI SDK compatibility, but Anthropic SDK needs it removed
|
||||
baseURL = baseURL.replace(/\/v\d+(?:alpha|beta)?(?=\/|$)/i, '')
|
||||
|
||||
logger.debug('Anthropic API baseURL', { baseURL, providerId: provider.id })
|
||||
|
||||
if (provider.id === 'aihubmix') {
|
||||
|
||||
@ -23,6 +23,14 @@ export type MCPProgressEvent = {
|
||||
progress: number // 0-1 range
|
||||
}
|
||||
|
||||
export type MCPServerLogEntry = {
|
||||
timestamp: number
|
||||
level: 'debug' | 'info' | 'warn' | 'error' | 'stderr' | 'stdout'
|
||||
message: string
|
||||
data?: any
|
||||
source?: string
|
||||
}
|
||||
|
||||
export type WebviewKeyEvent = {
|
||||
webviewId: number
|
||||
key: string
|
||||
|
||||
@ -11,7 +11,7 @@ const OVMS_EX_URL = 'https://gitcode.com/gcw_ggDjjkY3/kjfile/releases/download/d
|
||||
|
||||
/**
|
||||
* error code:
|
||||
* 101: Unsupported CPU (not Intel Ultra)
|
||||
* 101: Unsupported CPU (not Intel)
|
||||
* 102: Unsupported platform (not Windows)
|
||||
* 103: Download failed
|
||||
* 104: Installation failed
|
||||
@ -213,8 +213,8 @@ async function installOvms() {
|
||||
console.log(`CPU Name: ${cpuName}`)
|
||||
|
||||
// Check if CPU name contains "Ultra"
|
||||
if (!cpuName.toLowerCase().includes('intel') || !cpuName.toLowerCase().includes('ultra')) {
|
||||
console.error('OVMS installation requires an Intel(R) Core(TM) Ultra CPU.')
|
||||
if (!cpuName.toLowerCase().includes('intel')) {
|
||||
console.error('OVMS installation requires an Intel CPU.')
|
||||
return 101
|
||||
}
|
||||
|
||||
|
||||
@ -50,7 +50,7 @@ Usage Instructions:
|
||||
- pt-pt (Portuguese)
|
||||
|
||||
Run Command:
|
||||
yarn auto:i18n
|
||||
yarn i18n:translate
|
||||
|
||||
Performance Optimization Recommendations:
|
||||
- For stable API services: MAX_CONCURRENT_TRANSLATIONS=8, TRANSLATION_DELAY_MS=50
|
||||
|
||||
@ -145,7 +145,7 @@ export function main() {
|
||||
console.log('i18n 检查已通过')
|
||||
} catch (e) {
|
||||
console.error(e)
|
||||
throw new Error(`检查未通过。尝试运行 yarn sync:i18n 以解决问题。`)
|
||||
throw new Error(`检查未通过。尝试运行 yarn i18n:sync 以解决问题。`)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -5,9 +5,17 @@ exports.default = async function (configuration) {
|
||||
const { path } = configuration
|
||||
if (configuration.path) {
|
||||
try {
|
||||
const certPath = process.env.CHERRY_CERT_PATH
|
||||
const keyContainer = process.env.CHERRY_CERT_KEY
|
||||
const csp = process.env.CHERRY_CERT_CSP
|
||||
|
||||
if (!certPath || !keyContainer || !csp) {
|
||||
throw new Error('CHERRY_CERT_PATH, CHERRY_CERT_KEY or CHERRY_CERT_CSP is not set')
|
||||
}
|
||||
|
||||
console.log('Start code signing...')
|
||||
console.log('Signing file:', path)
|
||||
const signCommand = `signtool sign /tr http://timestamp.comodoca.com /td sha256 /fd sha256 /a /v "${path}"`
|
||||
const signCommand = `signtool sign /tr http://timestamp.comodoca.com /td sha256 /fd sha256 /v /f "${certPath}" /csp "${csp}" /k "${keyContainer}" "${path}"`
|
||||
execSync(signCommand, { stdio: 'inherit' })
|
||||
console.log('Code signing completed')
|
||||
} catch (error) {
|
||||
|
||||
@ -6,7 +6,7 @@ import { loggerService } from '@logger'
|
||||
import { isLinux, isMac, isPortable, isWin } from '@main/constant'
|
||||
import { generateSignature } from '@main/integration/cherryai'
|
||||
import anthropicService from '@main/services/AnthropicService'
|
||||
import { findGitBash, getBinaryPath, isBinaryExists, runInstallScript } from '@main/utils/process'
|
||||
import { findGitBash, getBinaryPath, isBinaryExists, runInstallScript, validateGitBashPath } from '@main/utils/process'
|
||||
import { handleZoomFactor } from '@main/utils/zoom'
|
||||
import type { SpanEntity, TokenUsage } from '@mcp-trace/trace-core'
|
||||
import type { UpgradeChannel } from '@shared/config/constant'
|
||||
@ -36,7 +36,7 @@ import appService from './services/AppService'
|
||||
import AppUpdater from './services/AppUpdater'
|
||||
import BackupManager from './services/BackupManager'
|
||||
import { codeToolsService } from './services/CodeToolsService'
|
||||
import { configManager } from './services/ConfigManager'
|
||||
import { ConfigKeys, configManager } from './services/ConfigManager'
|
||||
import CopilotService from './services/CopilotService'
|
||||
import DxtService from './services/DxtService'
|
||||
import { ExportService } from './services/ExportService'
|
||||
@ -502,7 +502,8 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
}
|
||||
|
||||
try {
|
||||
const bashPath = findGitBash()
|
||||
const customPath = configManager.get(ConfigKeys.GitBashPath) as string | undefined
|
||||
const bashPath = findGitBash(customPath)
|
||||
|
||||
if (bashPath) {
|
||||
logger.info('Git Bash is available', { path: bashPath })
|
||||
@ -516,6 +517,35 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
return false
|
||||
}
|
||||
})
|
||||
|
||||
ipcMain.handle(IpcChannel.System_GetGitBashPath, () => {
|
||||
if (!isWin) {
|
||||
return null
|
||||
}
|
||||
|
||||
const customPath = configManager.get(ConfigKeys.GitBashPath) as string | undefined
|
||||
return customPath ?? null
|
||||
})
|
||||
|
||||
ipcMain.handle(IpcChannel.System_SetGitBashPath, (_, newPath: string | null) => {
|
||||
if (!isWin) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (!newPath) {
|
||||
configManager.set(ConfigKeys.GitBashPath, null)
|
||||
return true
|
||||
}
|
||||
|
||||
const validated = validateGitBashPath(newPath)
|
||||
if (!validated) {
|
||||
return false
|
||||
}
|
||||
|
||||
configManager.set(ConfigKeys.GitBashPath, validated)
|
||||
return true
|
||||
})
|
||||
|
||||
ipcMain.handle(IpcChannel.System_ToggleDevTools, (e) => {
|
||||
const win = BrowserWindow.fromWebContents(e.sender)
|
||||
win && win.webContents.toggleDevTools()
|
||||
@ -770,6 +800,7 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
ipcMain.handle(IpcChannel.Mcp_CheckConnectivity, mcpService.checkMcpConnectivity)
|
||||
ipcMain.handle(IpcChannel.Mcp_AbortTool, mcpService.abortTool)
|
||||
ipcMain.handle(IpcChannel.Mcp_GetServerVersion, mcpService.getServerVersion)
|
||||
ipcMain.handle(IpcChannel.Mcp_GetServerLogs, mcpService.getServerLogs)
|
||||
|
||||
// DXT upload handler
|
||||
ipcMain.handle(IpcChannel.Mcp_UploadDxt, async (event, fileBuffer: ArrayBuffer, fileName: string) => {
|
||||
@ -848,6 +879,17 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
webview.session.setSpellCheckerEnabled(isEnable)
|
||||
})
|
||||
|
||||
// Webview print and save handlers
|
||||
ipcMain.handle(IpcChannel.Webview_PrintToPDF, async (_, webviewId: number) => {
|
||||
const { printWebviewToPDF } = await import('./services/WebviewService')
|
||||
return await printWebviewToPDF(webviewId)
|
||||
})
|
||||
|
||||
ipcMain.handle(IpcChannel.Webview_SaveAsHTML, async (_, webviewId: number) => {
|
||||
const { saveWebviewAsHTML } = await import('./services/WebviewService')
|
||||
return await saveWebviewAsHTML(webviewId)
|
||||
})
|
||||
|
||||
// store sync
|
||||
storeSyncService.registerIpcHandler()
|
||||
|
||||
|
||||
134
src/main/mcpServers/__tests__/browser.test.ts
Normal file
134
src/main/mcpServers/__tests__/browser.test.ts
Normal file
@ -0,0 +1,134 @@
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
|
||||
vi.mock('electron', () => {
|
||||
const sendCommand = vi.fn(async (command: string, params?: { expression?: string }) => {
|
||||
if (command === 'Runtime.evaluate') {
|
||||
if (params?.expression === 'document.documentElement.outerHTML') {
|
||||
return { result: { value: '<html><body><h1>Test</h1><p>Content</p></body></html>' } }
|
||||
}
|
||||
if (params?.expression === 'document.body.innerText') {
|
||||
return { result: { value: 'Test\nContent' } }
|
||||
}
|
||||
return { result: { value: 'ok' } }
|
||||
}
|
||||
return {}
|
||||
})
|
||||
|
||||
const debuggerObj = {
|
||||
isAttached: vi.fn(() => true),
|
||||
attach: vi.fn(),
|
||||
detach: vi.fn(),
|
||||
sendCommand
|
||||
}
|
||||
|
||||
const webContents = {
|
||||
debugger: debuggerObj,
|
||||
setUserAgent: vi.fn(),
|
||||
getURL: vi.fn(() => 'https://example.com/'),
|
||||
getTitle: vi.fn(async () => 'Example Title'),
|
||||
once: vi.fn(),
|
||||
removeListener: vi.fn(),
|
||||
on: vi.fn()
|
||||
}
|
||||
|
||||
const loadURL = vi.fn(async () => {})
|
||||
|
||||
const windows: any[] = []
|
||||
|
||||
class MockBrowserWindow {
|
||||
private destroyed = false
|
||||
public webContents = webContents
|
||||
public loadURL = loadURL
|
||||
public isDestroyed = vi.fn(() => this.destroyed)
|
||||
public close = vi.fn(() => {
|
||||
this.destroyed = true
|
||||
})
|
||||
public destroy = vi.fn(() => {
|
||||
this.destroyed = true
|
||||
})
|
||||
public on = vi.fn()
|
||||
|
||||
constructor() {
|
||||
windows.push(this)
|
||||
}
|
||||
}
|
||||
|
||||
const app = {
|
||||
isReady: vi.fn(() => true),
|
||||
whenReady: vi.fn(async () => {}),
|
||||
on: vi.fn()
|
||||
}
|
||||
|
||||
return {
|
||||
BrowserWindow: MockBrowserWindow as any,
|
||||
app,
|
||||
__mockDebugger: debuggerObj,
|
||||
__mockSendCommand: sendCommand,
|
||||
__mockLoadURL: loadURL,
|
||||
__mockWindows: windows
|
||||
}
|
||||
})
|
||||
|
||||
import * as electron from 'electron'
|
||||
const { __mockWindows } = electron as typeof electron & { __mockWindows: any[] }
|
||||
|
||||
import { CdpBrowserController } from '../browser'
|
||||
|
||||
describe('CdpBrowserController', () => {
|
||||
it('executes single-line code via Runtime.evaluate', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const result = await controller.execute('1+1')
|
||||
expect(result).toBe('ok')
|
||||
})
|
||||
|
||||
it('opens a URL (hidden) and returns current page info', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const result = await controller.open('https://foo.bar/', 5000, false)
|
||||
expect(result.currentUrl).toBe('https://example.com/')
|
||||
expect(result.title).toBe('Example Title')
|
||||
})
|
||||
|
||||
it('opens a URL (visible) when show=true', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const result = await controller.open('https://foo.bar/', 5000, true, 'session-a')
|
||||
expect(result.currentUrl).toBe('https://example.com/')
|
||||
expect(result.title).toBe('Example Title')
|
||||
})
|
||||
|
||||
it('reuses session for execute and supports multiline', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
await controller.open('https://foo.bar/', 5000, false, 'session-b')
|
||||
const result = await controller.execute('const a=1; const b=2; a+b;', 5000, 'session-b')
|
||||
expect(result).toBe('ok')
|
||||
})
|
||||
|
||||
it('evicts least recently used session when exceeding maxSessions', async () => {
|
||||
const controller = new CdpBrowserController({ maxSessions: 2, idleTimeoutMs: 1000 * 60 })
|
||||
await controller.open('https://foo.bar/', 5000, false, 's1')
|
||||
await controller.open('https://foo.bar/', 5000, false, 's2')
|
||||
await controller.open('https://foo.bar/', 5000, false, 's3')
|
||||
const destroyedCount = __mockWindows.filter(
|
||||
(w: any) => w.destroy.mock.calls.length > 0 || w.close.mock.calls.length > 0
|
||||
).length
|
||||
expect(destroyedCount).toBeGreaterThanOrEqual(1)
|
||||
})
|
||||
|
||||
it('fetches URL and returns html format', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const result = await controller.fetch('https://example.com/', 'html')
|
||||
expect(result).toBe('<html><body><h1>Test</h1><p>Content</p></body></html>')
|
||||
})
|
||||
|
||||
it('fetches URL and returns txt format', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const result = await controller.fetch('https://example.com/', 'txt')
|
||||
expect(result).toBe('Test\nContent')
|
||||
})
|
||||
|
||||
it('fetches URL and returns markdown format (default)', async () => {
|
||||
const controller = new CdpBrowserController()
|
||||
const result = await controller.fetch('https://example.com/')
|
||||
expect(typeof result).toBe('string')
|
||||
expect(result).toContain('Test')
|
||||
})
|
||||
})
|
||||
307
src/main/mcpServers/browser/controller.ts
Normal file
307
src/main/mcpServers/browser/controller.ts
Normal file
@ -0,0 +1,307 @@
|
||||
import { app, BrowserWindow } from 'electron'
|
||||
import TurndownService from 'turndown'
|
||||
|
||||
import { logger, userAgent } from './types'
|
||||
|
||||
/**
|
||||
* Controller for managing browser windows via Chrome DevTools Protocol (CDP).
|
||||
* Supports multiple sessions with LRU eviction and idle timeout cleanup.
|
||||
*/
|
||||
export class CdpBrowserController {
|
||||
private windows: Map<string, { win: BrowserWindow; lastActive: number }> = new Map()
|
||||
private readonly maxSessions: number
|
||||
private readonly idleTimeoutMs: number
|
||||
|
||||
constructor(options?: { maxSessions?: number; idleTimeoutMs?: number }) {
|
||||
this.maxSessions = options?.maxSessions ?? 5
|
||||
this.idleTimeoutMs = options?.idleTimeoutMs ?? 5 * 60 * 1000
|
||||
}
|
||||
|
||||
private async ensureAppReady() {
|
||||
if (!app.isReady()) {
|
||||
await app.whenReady()
|
||||
}
|
||||
}
|
||||
|
||||
private touch(sessionId: string) {
|
||||
const entry = this.windows.get(sessionId)
|
||||
if (entry) entry.lastActive = Date.now()
|
||||
}
|
||||
|
||||
private closeWindow(win: BrowserWindow, sessionId: string) {
|
||||
try {
|
||||
if (!win.isDestroyed()) {
|
||||
if (win.webContents.debugger.isAttached()) {
|
||||
win.webContents.debugger.detach()
|
||||
}
|
||||
win.close()
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn('Error closing window', { error, sessionId })
|
||||
}
|
||||
}
|
||||
|
||||
private async ensureDebuggerAttached(dbg: Electron.Debugger, sessionId: string) {
|
||||
if (!dbg.isAttached()) {
|
||||
try {
|
||||
logger.info('Attaching debugger', { sessionId })
|
||||
dbg.attach('1.3')
|
||||
await dbg.sendCommand('Page.enable')
|
||||
await dbg.sendCommand('Runtime.enable')
|
||||
logger.info('Debugger attached and domains enabled')
|
||||
} catch (error) {
|
||||
logger.error('Failed to attach debugger', { error })
|
||||
throw error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private sweepIdle() {
|
||||
const now = Date.now()
|
||||
for (const [id, entry] of this.windows.entries()) {
|
||||
if (now - entry.lastActive > this.idleTimeoutMs) {
|
||||
this.closeWindow(entry.win, id)
|
||||
this.windows.delete(id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private evictIfNeeded(newSessionId: string) {
|
||||
if (this.windows.size < this.maxSessions) return
|
||||
let lruId: string | null = null
|
||||
let lruTime = Number.POSITIVE_INFINITY
|
||||
for (const [id, entry] of this.windows.entries()) {
|
||||
if (id === newSessionId) continue
|
||||
if (entry.lastActive < lruTime) {
|
||||
lruTime = entry.lastActive
|
||||
lruId = id
|
||||
}
|
||||
}
|
||||
if (lruId) {
|
||||
const entry = this.windows.get(lruId)
|
||||
if (entry) {
|
||||
this.closeWindow(entry.win, lruId)
|
||||
}
|
||||
this.windows.delete(lruId)
|
||||
logger.info('Evicted session to respect maxSessions', { evicted: lruId })
|
||||
}
|
||||
}
|
||||
|
||||
private async getWindow(sessionId = 'default', forceNew = false, show = false): Promise<BrowserWindow> {
|
||||
await this.ensureAppReady()
|
||||
|
||||
this.sweepIdle()
|
||||
|
||||
const existing = this.windows.get(sessionId)
|
||||
if (existing && !existing.win.isDestroyed() && !forceNew) {
|
||||
this.touch(sessionId)
|
||||
return existing.win
|
||||
}
|
||||
|
||||
if (existing && !existing.win.isDestroyed() && forceNew) {
|
||||
try {
|
||||
if (existing.win.webContents.debugger.isAttached()) {
|
||||
existing.win.webContents.debugger.detach()
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn('Error detaching debugger before recreate', { error, sessionId })
|
||||
}
|
||||
existing.win.destroy()
|
||||
this.windows.delete(sessionId)
|
||||
}
|
||||
|
||||
this.evictIfNeeded(sessionId)
|
||||
|
||||
const win = new BrowserWindow({
|
||||
show,
|
||||
webPreferences: {
|
||||
contextIsolation: true,
|
||||
sandbox: true,
|
||||
nodeIntegration: false,
|
||||
devTools: true
|
||||
}
|
||||
})
|
||||
|
||||
// Use a standard Chrome UA to avoid some anti-bot blocks
|
||||
win.webContents.setUserAgent(userAgent)
|
||||
|
||||
// Log navigation lifecycle to help diagnose slow loads
|
||||
win.webContents.on('did-start-loading', () => logger.info(`did-start-loading`, { sessionId }))
|
||||
win.webContents.on('dom-ready', () => logger.info(`dom-ready`, { sessionId }))
|
||||
win.webContents.on('did-finish-load', () => logger.info(`did-finish-load`, { sessionId }))
|
||||
win.webContents.on('did-fail-load', (_e, code, desc) => logger.warn('Navigation failed', { code, desc }))
|
||||
|
||||
win.on('closed', () => {
|
||||
this.windows.delete(sessionId)
|
||||
})
|
||||
|
||||
this.windows.set(sessionId, { win, lastActive: Date.now() })
|
||||
return win
|
||||
}
|
||||
|
||||
/**
|
||||
* Opens a URL in a browser window and waits for navigation to complete.
|
||||
* @param url - The URL to navigate to
|
||||
* @param timeout - Navigation timeout in milliseconds (default: 10000)
|
||||
* @param show - Whether to show the browser window (default: false)
|
||||
* @param sessionId - Session identifier for window reuse (default: 'default')
|
||||
* @returns Object containing the current URL and page title after navigation
|
||||
*/
|
||||
public async open(url: string, timeout = 10000, show = false, sessionId = 'default') {
|
||||
const win = await this.getWindow(sessionId, true, show)
|
||||
logger.info('Loading URL', { url, sessionId })
|
||||
const { webContents } = win
|
||||
this.touch(sessionId)
|
||||
|
||||
// Track resolution state to prevent multiple handlers from firing
|
||||
let resolved = false
|
||||
let onFinish: () => void
|
||||
let onDomReady: () => void
|
||||
let onFail: (_event: Electron.Event, code: number, desc: string) => void
|
||||
|
||||
// Define cleanup outside Promise to ensure it's callable in finally block,
|
||||
// preventing memory leaks when timeout occurs before navigation completes
|
||||
const cleanup = () => {
|
||||
webContents.removeListener('did-finish-load', onFinish)
|
||||
webContents.removeListener('did-fail-load', onFail)
|
||||
webContents.removeListener('dom-ready', onDomReady)
|
||||
}
|
||||
|
||||
const loadPromise = new Promise<void>((resolve, reject) => {
|
||||
onFinish = () => {
|
||||
if (resolved) return
|
||||
resolved = true
|
||||
cleanup()
|
||||
resolve()
|
||||
}
|
||||
onDomReady = () => {
|
||||
if (resolved) return
|
||||
resolved = true
|
||||
cleanup()
|
||||
resolve()
|
||||
}
|
||||
onFail = (_event: Electron.Event, code: number, desc: string) => {
|
||||
if (resolved) return
|
||||
resolved = true
|
||||
cleanup()
|
||||
reject(new Error(`Navigation failed (${code}): ${desc}`))
|
||||
}
|
||||
webContents.once('did-finish-load', onFinish)
|
||||
webContents.once('dom-ready', onDomReady)
|
||||
webContents.once('did-fail-load', onFail)
|
||||
})
|
||||
|
||||
const timeoutPromise = new Promise<void>((_, reject) => {
|
||||
setTimeout(() => reject(new Error('Navigation timed out')), timeout)
|
||||
})
|
||||
|
||||
try {
|
||||
await Promise.race([win.loadURL(url), loadPromise, timeoutPromise])
|
||||
} finally {
|
||||
// Always cleanup listeners to prevent memory leaks on timeout
|
||||
cleanup()
|
||||
}
|
||||
|
||||
const currentUrl = webContents.getURL()
|
||||
const title = await webContents.getTitle()
|
||||
return { currentUrl, title }
|
||||
}
|
||||
|
||||
public async execute(code: string, timeout = 5000, sessionId = 'default') {
|
||||
const win = await this.getWindow(sessionId)
|
||||
this.touch(sessionId)
|
||||
const dbg = win.webContents.debugger
|
||||
|
||||
await this.ensureDebuggerAttached(dbg, sessionId)
|
||||
|
||||
const evalPromise = dbg.sendCommand('Runtime.evaluate', {
|
||||
expression: code,
|
||||
awaitPromise: true,
|
||||
returnByValue: true
|
||||
})
|
||||
|
||||
const result = await Promise.race([
|
||||
evalPromise,
|
||||
new Promise((_, reject) => setTimeout(() => reject(new Error('Execution timed out')), timeout))
|
||||
])
|
||||
|
||||
const evalResult = result as any
|
||||
|
||||
if (evalResult?.exceptionDetails) {
|
||||
const message = evalResult.exceptionDetails.exception?.description || 'Unknown script error'
|
||||
logger.warn('Runtime.evaluate raised exception', { message })
|
||||
throw new Error(message)
|
||||
}
|
||||
|
||||
const value = evalResult?.result?.value ?? evalResult?.result?.description ?? null
|
||||
return value
|
||||
}
|
||||
|
||||
public async reset(sessionId?: string) {
|
||||
if (sessionId) {
|
||||
const entry = this.windows.get(sessionId)
|
||||
if (entry) {
|
||||
this.closeWindow(entry.win, sessionId)
|
||||
}
|
||||
this.windows.delete(sessionId)
|
||||
logger.info('Browser CDP context reset', { sessionId })
|
||||
return
|
||||
}
|
||||
|
||||
for (const [id, entry] of this.windows.entries()) {
|
||||
this.closeWindow(entry.win, id)
|
||||
this.windows.delete(id)
|
||||
}
|
||||
logger.info('Browser CDP context reset (all sessions)')
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetches a URL and returns content in the specified format.
|
||||
* @param url - The URL to fetch
|
||||
* @param format - Output format: 'html', 'txt', 'markdown', or 'json' (default: 'markdown')
|
||||
* @param timeout - Navigation timeout in milliseconds (default: 10000)
|
||||
* @param sessionId - Session identifier (default: 'default')
|
||||
* @returns Content in the requested format. For 'json', returns parsed object or { data: rawContent } if parsing fails
|
||||
*/
|
||||
public async fetch(
|
||||
url: string,
|
||||
format: 'html' | 'txt' | 'markdown' | 'json' = 'markdown',
|
||||
timeout = 10000,
|
||||
sessionId = 'default'
|
||||
) {
|
||||
await this.open(url, timeout, false, sessionId)
|
||||
|
||||
const win = await this.getWindow(sessionId)
|
||||
const dbg = win.webContents.debugger
|
||||
|
||||
await this.ensureDebuggerAttached(dbg, sessionId)
|
||||
|
||||
let expression: string
|
||||
if (format === 'json' || format === 'txt') {
|
||||
expression = 'document.body.innerText'
|
||||
} else {
|
||||
expression = 'document.documentElement.outerHTML'
|
||||
}
|
||||
|
||||
const result = (await dbg.sendCommand('Runtime.evaluate', {
|
||||
expression,
|
||||
returnByValue: true
|
||||
})) as { result?: { value?: string } }
|
||||
|
||||
const content = result?.result?.value ?? ''
|
||||
|
||||
if (format === 'markdown') {
|
||||
const turndownService = new TurndownService()
|
||||
return turndownService.turndown(content)
|
||||
}
|
||||
if (format === 'json') {
|
||||
// Attempt to parse as JSON; if content is not valid JSON, wrap it in a data object
|
||||
try {
|
||||
return JSON.parse(content)
|
||||
} catch {
|
||||
return { data: content }
|
||||
}
|
||||
}
|
||||
return content
|
||||
}
|
||||
}
|
||||
3
src/main/mcpServers/browser/index.ts
Normal file
3
src/main/mcpServers/browser/index.ts
Normal file
@ -0,0 +1,3 @@
|
||||
export { CdpBrowserController } from './controller'
|
||||
export { BrowserServer } from './server'
|
||||
export { BrowserServer as default } from './server'
|
||||
50
src/main/mcpServers/browser/server.ts
Normal file
50
src/main/mcpServers/browser/server.ts
Normal file
@ -0,0 +1,50 @@
|
||||
import type { Server } from '@modelcontextprotocol/sdk/server/index.js'
|
||||
import { Server as MCServer } from '@modelcontextprotocol/sdk/server/index.js'
|
||||
import { CallToolRequestSchema, ListToolsRequestSchema } from '@modelcontextprotocol/sdk/types.js'
|
||||
import { app } from 'electron'
|
||||
|
||||
import { CdpBrowserController } from './controller'
|
||||
import { toolDefinitions, toolHandlers } from './tools'
|
||||
|
||||
export class BrowserServer {
|
||||
public server: Server
|
||||
private controller = new CdpBrowserController()
|
||||
|
||||
constructor() {
|
||||
const server = new MCServer(
|
||||
{
|
||||
name: '@cherry/browser',
|
||||
version: '0.1.0'
|
||||
},
|
||||
{
|
||||
capabilities: {
|
||||
resources: {},
|
||||
tools: {}
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
server.setRequestHandler(ListToolsRequestSchema, async () => {
|
||||
return {
|
||||
tools: toolDefinitions
|
||||
}
|
||||
})
|
||||
|
||||
server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
||||
const { name, arguments: args } = request.params
|
||||
const handler = toolHandlers[name]
|
||||
if (!handler) {
|
||||
throw new Error('Tool not found')
|
||||
}
|
||||
return handler(this.controller, args)
|
||||
})
|
||||
|
||||
app.on('before-quit', () => {
|
||||
void this.controller.reset()
|
||||
})
|
||||
|
||||
this.server = server
|
||||
}
|
||||
}
|
||||
|
||||
export default BrowserServer
|
||||
48
src/main/mcpServers/browser/tools/execute.ts
Normal file
48
src/main/mcpServers/browser/tools/execute.ts
Normal file
@ -0,0 +1,48 @@
|
||||
import * as z from 'zod'
|
||||
|
||||
import type { CdpBrowserController } from '../controller'
|
||||
import { errorResponse, successResponse } from './utils'
|
||||
|
||||
export const ExecuteSchema = z.object({
|
||||
code: z
|
||||
.string()
|
||||
.describe(
|
||||
'JavaScript evaluated via Chrome DevTools Runtime.evaluate. Keep it short; prefer one-line with semicolons for multiple statements.'
|
||||
),
|
||||
timeout: z.number().default(5000).describe('Timeout in milliseconds for code execution (default: 5000ms)'),
|
||||
sessionId: z.string().optional().describe('Session identifier to target a specific page (default: default)')
|
||||
})
|
||||
|
||||
export const executeToolDefinition = {
|
||||
name: 'execute',
|
||||
description:
|
||||
'Run JavaScript in the current page via Runtime.evaluate. Prefer short, single-line snippets; use semicolons for multiple statements.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
code: {
|
||||
type: 'string',
|
||||
description: 'One-line JS to evaluate in page context'
|
||||
},
|
||||
timeout: {
|
||||
type: 'number',
|
||||
description: 'Timeout in milliseconds (default 5000)'
|
||||
},
|
||||
sessionId: {
|
||||
type: 'string',
|
||||
description: 'Session identifier; targets a specific page (default: default)'
|
||||
}
|
||||
},
|
||||
required: ['code']
|
||||
}
|
||||
}
|
||||
|
||||
export async function handleExecute(controller: CdpBrowserController, args: unknown) {
|
||||
const { code, timeout, sessionId } = ExecuteSchema.parse(args)
|
||||
try {
|
||||
const value = await controller.execute(code, timeout, sessionId ?? 'default')
|
||||
return successResponse(typeof value === 'string' ? value : JSON.stringify(value))
|
||||
} catch (error) {
|
||||
return errorResponse(error as Error)
|
||||
}
|
||||
}
|
||||
49
src/main/mcpServers/browser/tools/fetch.ts
Normal file
49
src/main/mcpServers/browser/tools/fetch.ts
Normal file
@ -0,0 +1,49 @@
|
||||
import * as z from 'zod'
|
||||
|
||||
import type { CdpBrowserController } from '../controller'
|
||||
import { errorResponse, successResponse } from './utils'
|
||||
|
||||
export const FetchSchema = z.object({
|
||||
url: z.url().describe('URL to fetch'),
|
||||
format: z.enum(['html', 'txt', 'markdown', 'json']).default('markdown').describe('Output format (default: markdown)'),
|
||||
timeout: z.number().optional().describe('Timeout in milliseconds for navigation (default: 10000)'),
|
||||
sessionId: z.string().optional().describe('Session identifier (default: default)')
|
||||
})
|
||||
|
||||
export const fetchToolDefinition = {
|
||||
name: 'fetch',
|
||||
description: 'Fetch a URL using the browser and return content in specified format (html, txt, markdown, json)',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
url: {
|
||||
type: 'string',
|
||||
description: 'URL to fetch'
|
||||
},
|
||||
format: {
|
||||
type: 'string',
|
||||
enum: ['html', 'txt', 'markdown', 'json'],
|
||||
description: 'Output format (default: markdown)'
|
||||
},
|
||||
timeout: {
|
||||
type: 'number',
|
||||
description: 'Navigation timeout in milliseconds (default: 10000)'
|
||||
},
|
||||
sessionId: {
|
||||
type: 'string',
|
||||
description: 'Session identifier (default: default)'
|
||||
}
|
||||
},
|
||||
required: ['url']
|
||||
}
|
||||
}
|
||||
|
||||
export async function handleFetch(controller: CdpBrowserController, args: unknown) {
|
||||
const { url, format, timeout, sessionId } = FetchSchema.parse(args)
|
||||
try {
|
||||
const content = await controller.fetch(url, format, timeout ?? 10000, sessionId ?? 'default')
|
||||
return successResponse(typeof content === 'string' ? content : JSON.stringify(content))
|
||||
} catch (error) {
|
||||
return errorResponse(error as Error)
|
||||
}
|
||||
}
|
||||
25
src/main/mcpServers/browser/tools/index.ts
Normal file
25
src/main/mcpServers/browser/tools/index.ts
Normal file
@ -0,0 +1,25 @@
|
||||
export { ExecuteSchema, executeToolDefinition, handleExecute } from './execute'
|
||||
export { FetchSchema, fetchToolDefinition, handleFetch } from './fetch'
|
||||
export { handleOpen, OpenSchema, openToolDefinition } from './open'
|
||||
export { handleReset, resetToolDefinition } from './reset'
|
||||
|
||||
import type { CdpBrowserController } from '../controller'
|
||||
import { executeToolDefinition, handleExecute } from './execute'
|
||||
import { fetchToolDefinition, handleFetch } from './fetch'
|
||||
import { handleOpen, openToolDefinition } from './open'
|
||||
import { handleReset, resetToolDefinition } from './reset'
|
||||
|
||||
export const toolDefinitions = [openToolDefinition, executeToolDefinition, resetToolDefinition, fetchToolDefinition]
|
||||
|
||||
export const toolHandlers: Record<
|
||||
string,
|
||||
(
|
||||
controller: CdpBrowserController,
|
||||
args: unknown
|
||||
) => Promise<{ content: { type: string; text: string }[]; isError: boolean }>
|
||||
> = {
|
||||
open: handleOpen,
|
||||
execute: handleExecute,
|
||||
reset: handleReset,
|
||||
fetch: handleFetch
|
||||
}
|
||||
47
src/main/mcpServers/browser/tools/open.ts
Normal file
47
src/main/mcpServers/browser/tools/open.ts
Normal file
@ -0,0 +1,47 @@
|
||||
import * as z from 'zod'
|
||||
|
||||
import type { CdpBrowserController } from '../controller'
|
||||
import { successResponse } from './utils'
|
||||
|
||||
export const OpenSchema = z.object({
|
||||
url: z.url().describe('URL to open in the controlled Electron window'),
|
||||
timeout: z.number().optional().describe('Timeout in milliseconds for navigation (default: 10000)'),
|
||||
show: z.boolean().optional().describe('Whether to show the browser window (default: false)'),
|
||||
sessionId: z
|
||||
.string()
|
||||
.optional()
|
||||
.describe('Session identifier; separate sessions keep separate pages (default: default)')
|
||||
})
|
||||
|
||||
export const openToolDefinition = {
|
||||
name: 'open',
|
||||
description: 'Open a URL in a hidden Electron window controlled via Chrome DevTools Protocol',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
url: {
|
||||
type: 'string',
|
||||
description: 'URL to load'
|
||||
},
|
||||
timeout: {
|
||||
type: 'number',
|
||||
description: 'Navigation timeout in milliseconds (default 10000)'
|
||||
},
|
||||
show: {
|
||||
type: 'boolean',
|
||||
description: 'Whether to show the browser window (default false)'
|
||||
},
|
||||
sessionId: {
|
||||
type: 'string',
|
||||
description: 'Session identifier; separate sessions keep separate pages (default: default)'
|
||||
}
|
||||
},
|
||||
required: ['url']
|
||||
}
|
||||
}
|
||||
|
||||
export async function handleOpen(controller: CdpBrowserController, args: unknown) {
|
||||
const { url, timeout, show, sessionId } = OpenSchema.parse(args)
|
||||
const res = await controller.open(url, timeout ?? 10000, show ?? false, sessionId ?? 'default')
|
||||
return successResponse(JSON.stringify(res))
|
||||
}
|
||||
34
src/main/mcpServers/browser/tools/reset.ts
Normal file
34
src/main/mcpServers/browser/tools/reset.ts
Normal file
@ -0,0 +1,34 @@
|
||||
import * as z from 'zod'
|
||||
|
||||
import type { CdpBrowserController } from '../controller'
|
||||
import { successResponse } from './utils'
|
||||
|
||||
/** Zod schema for validating reset tool arguments */
|
||||
export const ResetSchema = z.object({
|
||||
sessionId: z.string().optional().describe('Session identifier to reset; omit to reset all sessions')
|
||||
})
|
||||
|
||||
/** MCP tool definition for the reset tool */
|
||||
export const resetToolDefinition = {
|
||||
name: 'reset',
|
||||
description: 'Reset the controlled window and detach debugger',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
sessionId: {
|
||||
type: 'string',
|
||||
description: 'Session identifier to reset; omit to reset all sessions'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handler for the reset MCP tool.
|
||||
* Closes browser window(s) and detaches debugger for the specified session or all sessions.
|
||||
*/
|
||||
export async function handleReset(controller: CdpBrowserController, args: unknown) {
|
||||
const { sessionId } = ResetSchema.parse(args)
|
||||
await controller.reset(sessionId)
|
||||
return successResponse('reset')
|
||||
}
|
||||
13
src/main/mcpServers/browser/tools/utils.ts
Normal file
13
src/main/mcpServers/browser/tools/utils.ts
Normal file
@ -0,0 +1,13 @@
|
||||
export function successResponse(text: string) {
|
||||
return {
|
||||
content: [{ type: 'text', text }],
|
||||
isError: false
|
||||
}
|
||||
}
|
||||
|
||||
export function errorResponse(error: Error) {
|
||||
return {
|
||||
content: [{ type: 'text', text: error.message }],
|
||||
isError: true
|
||||
}
|
||||
}
|
||||
4
src/main/mcpServers/browser/types.ts
Normal file
4
src/main/mcpServers/browser/types.ts
Normal file
@ -0,0 +1,4 @@
|
||||
import { loggerService } from '@logger'
|
||||
|
||||
export const logger = loggerService.withContext('MCPBrowserCDP')
|
||||
export const userAgent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:145.0) Gecko/20100101 Firefox/145.0'
|
||||
@ -4,6 +4,7 @@ import type { BuiltinMCPServerName } from '@types'
|
||||
import { BuiltinMCPServerNames } from '@types'
|
||||
|
||||
import BraveSearchServer from './brave-search'
|
||||
import BrowserServer from './browser'
|
||||
import DiDiMcpServer from './didi-mcp'
|
||||
import DifyKnowledgeServer from './dify-knowledge'
|
||||
import FetchServer from './fetch'
|
||||
@ -48,6 +49,9 @@ export function createInMemoryMCPServer(
|
||||
const apiKey = envs.DIDI_API_KEY
|
||||
return new DiDiMcpServer(apiKey).server
|
||||
}
|
||||
case BuiltinMCPServerNames.browser: {
|
||||
return new BrowserServer().server
|
||||
}
|
||||
default:
|
||||
throw new Error(`Unknown in-memory MCP server: ${name}`)
|
||||
}
|
||||
|
||||
@ -31,7 +31,8 @@ export enum ConfigKeys {
|
||||
DisableHardwareAcceleration = 'disableHardwareAcceleration',
|
||||
Proxy = 'proxy',
|
||||
EnableDeveloperMode = 'enableDeveloperMode',
|
||||
ClientId = 'clientId'
|
||||
ClientId = 'clientId',
|
||||
GitBashPath = 'gitBashPath'
|
||||
}
|
||||
|
||||
export class ConfigManager {
|
||||
|
||||
@ -163,7 +163,7 @@ class FileStorage {
|
||||
fs.mkdirSync(this.storageDir, { recursive: true })
|
||||
}
|
||||
if (!fs.existsSync(this.notesDir)) {
|
||||
fs.mkdirSync(this.storageDir, { recursive: true })
|
||||
fs.mkdirSync(this.notesDir, { recursive: true })
|
||||
}
|
||||
if (!fs.existsSync(this.tempDir)) {
|
||||
fs.mkdirSync(this.tempDir, { recursive: true })
|
||||
|
||||
@ -33,6 +33,7 @@ import {
|
||||
import { nanoid } from '@reduxjs/toolkit'
|
||||
import { HOME_CHERRY_DIR } from '@shared/config/constant'
|
||||
import type { MCPProgressEvent } from '@shared/config/types'
|
||||
import type { MCPServerLogEntry } from '@shared/config/types'
|
||||
import { IpcChannel } from '@shared/IpcChannel'
|
||||
import { defaultAppHeaders } from '@shared/utils'
|
||||
import {
|
||||
@ -56,6 +57,7 @@ import { CacheService } from './CacheService'
|
||||
import DxtService from './DxtService'
|
||||
import { CallBackServer } from './mcp/oauth/callback'
|
||||
import { McpOAuthClientProvider } from './mcp/oauth/provider'
|
||||
import { ServerLogBuffer } from './mcp/ServerLogBuffer'
|
||||
import { windowService } from './WindowService'
|
||||
|
||||
// Generic type for caching wrapped functions
|
||||
@ -142,6 +144,7 @@ class McpService {
|
||||
private pendingClients: Map<string, Promise<Client>> = new Map()
|
||||
private dxtService = new DxtService()
|
||||
private activeToolCalls: Map<string, AbortController> = new Map()
|
||||
private serverLogs = new ServerLogBuffer(200)
|
||||
|
||||
constructor() {
|
||||
this.initClient = this.initClient.bind(this)
|
||||
@ -159,6 +162,7 @@ class McpService {
|
||||
this.cleanup = this.cleanup.bind(this)
|
||||
this.checkMcpConnectivity = this.checkMcpConnectivity.bind(this)
|
||||
this.getServerVersion = this.getServerVersion.bind(this)
|
||||
this.getServerLogs = this.getServerLogs.bind(this)
|
||||
}
|
||||
|
||||
private getServerKey(server: MCPServer): string {
|
||||
@ -172,6 +176,19 @@ class McpService {
|
||||
})
|
||||
}
|
||||
|
||||
private emitServerLog(server: MCPServer, entry: MCPServerLogEntry) {
|
||||
const serverKey = this.getServerKey(server)
|
||||
this.serverLogs.append(serverKey, entry)
|
||||
const mainWindow = windowService.getMainWindow()
|
||||
if (mainWindow) {
|
||||
mainWindow.webContents.send(IpcChannel.Mcp_ServerLog, { ...entry, serverId: server.id })
|
||||
}
|
||||
}
|
||||
|
||||
public getServerLogs(_: Electron.IpcMainInvokeEvent, server: MCPServer): MCPServerLogEntry[] {
|
||||
return this.serverLogs.get(this.getServerKey(server))
|
||||
}
|
||||
|
||||
async initClient(server: MCPServer): Promise<Client> {
|
||||
const serverKey = this.getServerKey(server)
|
||||
|
||||
@ -366,9 +383,18 @@ class McpService {
|
||||
}
|
||||
|
||||
const stdioTransport = new StdioClientTransport(transportOptions)
|
||||
stdioTransport.stderr?.on('data', (data) =>
|
||||
getServerLogger(server).debug(`Stdio stderr`, { data: data.toString() })
|
||||
)
|
||||
stdioTransport.stderr?.on('data', (data) => {
|
||||
const msg = data.toString()
|
||||
getServerLogger(server).debug(`Stdio stderr`, { data: msg })
|
||||
this.emitServerLog(server, {
|
||||
timestamp: Date.now(),
|
||||
level: 'stderr',
|
||||
message: msg.trim(),
|
||||
source: 'stdio'
|
||||
})
|
||||
})
|
||||
// StdioClientTransport does not expose stdout as a readable stream for raw logging
|
||||
// (stdout is reserved for JSON-RPC). Avoid attaching a listener that would never fire.
|
||||
return stdioTransport
|
||||
} else {
|
||||
throw new Error('Either baseUrl or command must be provided')
|
||||
@ -436,6 +462,13 @@ class McpService {
|
||||
}
|
||||
}
|
||||
|
||||
this.emitServerLog(server, {
|
||||
timestamp: Date.now(),
|
||||
level: 'info',
|
||||
message: 'Server connected',
|
||||
source: 'client'
|
||||
})
|
||||
|
||||
// Store the new client in the cache
|
||||
this.clients.set(serverKey, client)
|
||||
|
||||
@ -446,9 +479,22 @@ class McpService {
|
||||
this.clearServerCache(serverKey)
|
||||
|
||||
logger.debug(`Activated server: ${server.name}`)
|
||||
this.emitServerLog(server, {
|
||||
timestamp: Date.now(),
|
||||
level: 'info',
|
||||
message: 'Server activated',
|
||||
source: 'client'
|
||||
})
|
||||
return client
|
||||
} catch (error) {
|
||||
getServerLogger(server).error(`Error activating server ${server.name}`, error as Error)
|
||||
this.emitServerLog(server, {
|
||||
timestamp: Date.now(),
|
||||
level: 'error',
|
||||
message: `Error activating server: ${(error as Error)?.message}`,
|
||||
data: redactSensitive(error),
|
||||
source: 'client'
|
||||
})
|
||||
throw error
|
||||
}
|
||||
} finally {
|
||||
@ -506,6 +552,16 @@ class McpService {
|
||||
// Set up logging message notification handler
|
||||
client.setNotificationHandler(LoggingMessageNotificationSchema, async (notification) => {
|
||||
logger.debug(`Message from server ${server.name}:`, notification.params)
|
||||
const msg = notification.params?.message
|
||||
if (msg) {
|
||||
this.emitServerLog(server, {
|
||||
timestamp: Date.now(),
|
||||
level: (notification.params?.level as MCPServerLogEntry['level']) || 'info',
|
||||
message: typeof msg === 'string' ? msg : JSON.stringify(msg),
|
||||
data: redactSensitive(notification.params?.data),
|
||||
source: notification.params?.logger || 'server'
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
getServerLogger(server).debug(`Set up notification handlers`)
|
||||
@ -540,6 +596,7 @@ class McpService {
|
||||
this.clients.delete(serverKey)
|
||||
// Clear all caches for this server
|
||||
this.clearServerCache(serverKey)
|
||||
this.serverLogs.remove(serverKey)
|
||||
} else {
|
||||
logger.warn(`No client found for server`, { serverKey })
|
||||
}
|
||||
@ -548,6 +605,12 @@ class McpService {
|
||||
async stopServer(_: Electron.IpcMainInvokeEvent, server: MCPServer) {
|
||||
const serverKey = this.getServerKey(server)
|
||||
getServerLogger(server).debug(`Stopping server`)
|
||||
this.emitServerLog(server, {
|
||||
timestamp: Date.now(),
|
||||
level: 'info',
|
||||
message: 'Stopping server',
|
||||
source: 'client'
|
||||
})
|
||||
await this.closeClient(serverKey)
|
||||
}
|
||||
|
||||
@ -574,6 +637,12 @@ class McpService {
|
||||
async restartServer(_: Electron.IpcMainInvokeEvent, server: MCPServer) {
|
||||
getServerLogger(server).debug(`Restarting server`)
|
||||
const serverKey = this.getServerKey(server)
|
||||
this.emitServerLog(server, {
|
||||
timestamp: Date.now(),
|
||||
level: 'info',
|
||||
message: 'Restarting server',
|
||||
source: 'client'
|
||||
})
|
||||
await this.closeClient(serverKey)
|
||||
// Clear cache before restarting to ensure fresh data
|
||||
this.clearServerCache(serverKey)
|
||||
@ -606,9 +675,22 @@ class McpService {
|
||||
// Attempt to list tools as a way to check connectivity
|
||||
await client.listTools()
|
||||
getServerLogger(server).debug(`Connectivity check successful`)
|
||||
this.emitServerLog(server, {
|
||||
timestamp: Date.now(),
|
||||
level: 'info',
|
||||
message: 'Connectivity check successful',
|
||||
source: 'connectivity'
|
||||
})
|
||||
return true
|
||||
} catch (error) {
|
||||
getServerLogger(server).error(`Connectivity check failed`, error as Error)
|
||||
this.emitServerLog(server, {
|
||||
timestamp: Date.now(),
|
||||
level: 'error',
|
||||
message: `Connectivity check failed: ${(error as Error).message}`,
|
||||
data: redactSensitive(error),
|
||||
source: 'connectivity'
|
||||
})
|
||||
// Close the client if connectivity check fails to ensure a clean state for the next attempt
|
||||
const serverKey = this.getServerKey(server)
|
||||
await this.closeClient(serverKey)
|
||||
|
||||
@ -1393,6 +1393,50 @@ export class SelectionService {
|
||||
actionWindow.setAlwaysOnTop(isPinned)
|
||||
}
|
||||
|
||||
/**
|
||||
* [Windows only] Manual window resize handler
|
||||
*
|
||||
* ELECTRON BUG WORKAROUND:
|
||||
* In Electron, when using `frame: false` + `transparent: true`, the native window
|
||||
* resize functionality is broken on Windows. This is a known Electron bug.
|
||||
* See: https://github.com/electron/electron/issues/48554
|
||||
*
|
||||
* This method can be removed once the Electron bug is fixed.
|
||||
*/
|
||||
public resizeActionWindow(actionWindow: BrowserWindow, deltaX: number, deltaY: number, direction: string): void {
|
||||
const bounds = actionWindow.getBounds()
|
||||
const minWidth = 300
|
||||
const minHeight = 200
|
||||
|
||||
let { x, y, width, height } = bounds
|
||||
|
||||
// Handle horizontal resize
|
||||
if (direction.includes('e')) {
|
||||
width = Math.max(minWidth, width + deltaX)
|
||||
}
|
||||
if (direction.includes('w')) {
|
||||
const newWidth = Math.max(minWidth, width - deltaX)
|
||||
if (newWidth !== width) {
|
||||
x = x + (width - newWidth)
|
||||
width = newWidth
|
||||
}
|
||||
}
|
||||
|
||||
// Handle vertical resize
|
||||
if (direction.includes('s')) {
|
||||
height = Math.max(minHeight, height + deltaY)
|
||||
}
|
||||
if (direction.includes('n')) {
|
||||
const newHeight = Math.max(minHeight, height - deltaY)
|
||||
if (newHeight !== height) {
|
||||
y = y + (height - newHeight)
|
||||
height = newHeight
|
||||
}
|
||||
}
|
||||
|
||||
actionWindow.setBounds({ x, y, width, height })
|
||||
}
|
||||
|
||||
/**
|
||||
* Update trigger mode behavior
|
||||
* Switches between selection-based and alt-key based triggering
|
||||
@ -1510,6 +1554,18 @@ export class SelectionService {
|
||||
}
|
||||
})
|
||||
|
||||
// [Windows only] Electron bug workaround - can be removed once fixed
|
||||
// See: https://github.com/electron/electron/issues/48554
|
||||
ipcMain.handle(
|
||||
IpcChannel.Selection_ActionWindowResize,
|
||||
(event, deltaX: number, deltaY: number, direction: string) => {
|
||||
const actionWindow = BrowserWindow.fromWebContents(event.sender)
|
||||
if (actionWindow) {
|
||||
selectionService?.resizeActionWindow(actionWindow, deltaX, deltaY, direction)
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
this.isIpcHandlerRegistered = true
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import { IpcChannel } from '@shared/IpcChannel'
|
||||
import { app, session, shell, webContents } from 'electron'
|
||||
import { app, dialog, session, shell, webContents } from 'electron'
|
||||
import { promises as fs } from 'fs'
|
||||
|
||||
/**
|
||||
* init the useragent of the webview session
|
||||
@ -53,11 +54,17 @@ const attachKeyboardHandler = (contents: Electron.WebContents) => {
|
||||
return
|
||||
}
|
||||
|
||||
const isFindShortcut = (input.control || input.meta) && key === 'f'
|
||||
const isEscape = key === 'escape'
|
||||
const isEnter = key === 'enter'
|
||||
// Helper to check if this is a shortcut we handle
|
||||
const isHandledShortcut = (k: string) => {
|
||||
const isFindShortcut = (input.control || input.meta) && k === 'f'
|
||||
const isPrintShortcut = (input.control || input.meta) && k === 'p'
|
||||
const isSaveShortcut = (input.control || input.meta) && k === 's'
|
||||
const isEscape = k === 'escape'
|
||||
const isEnter = k === 'enter'
|
||||
return isFindShortcut || isPrintShortcut || isSaveShortcut || isEscape || isEnter
|
||||
}
|
||||
|
||||
if (!isFindShortcut && !isEscape && !isEnter) {
|
||||
if (!isHandledShortcut(key)) {
|
||||
return
|
||||
}
|
||||
|
||||
@ -66,11 +73,20 @@ const attachKeyboardHandler = (contents: Electron.WebContents) => {
|
||||
return
|
||||
}
|
||||
|
||||
const isFindShortcut = (input.control || input.meta) && key === 'f'
|
||||
const isPrintShortcut = (input.control || input.meta) && key === 'p'
|
||||
const isSaveShortcut = (input.control || input.meta) && key === 's'
|
||||
|
||||
// Always prevent Cmd/Ctrl+F to override the guest page's native find dialog
|
||||
if (isFindShortcut) {
|
||||
event.preventDefault()
|
||||
}
|
||||
|
||||
// Prevent default print/save dialogs and handle them with custom logic
|
||||
if (isPrintShortcut || isSaveShortcut) {
|
||||
event.preventDefault()
|
||||
}
|
||||
|
||||
// Send the hotkey event to the renderer
|
||||
// The renderer will decide whether to preventDefault for Escape and Enter
|
||||
// based on whether the search bar is visible
|
||||
@ -100,3 +116,130 @@ export function initWebviewHotkeys() {
|
||||
attachKeyboardHandler(contents)
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Print webview content to PDF
|
||||
* @param webviewId The webview webContents id
|
||||
* @returns Path to saved PDF file or null if user cancelled
|
||||
*/
|
||||
export async function printWebviewToPDF(webviewId: number): Promise<string | null> {
|
||||
const webview = webContents.fromId(webviewId)
|
||||
if (!webview) {
|
||||
throw new Error('Webview not found')
|
||||
}
|
||||
|
||||
try {
|
||||
// Get the page title for default filename
|
||||
const pageTitle = await webview.executeJavaScript('document.title || "webpage"').catch(() => 'webpage')
|
||||
// Sanitize filename by removing invalid characters
|
||||
const sanitizedTitle = pageTitle.replace(/[<>:"/\\|?*]/g, '-').substring(0, 100)
|
||||
const defaultFilename = sanitizedTitle ? `${sanitizedTitle}.pdf` : `webpage-${Date.now()}.pdf`
|
||||
|
||||
// Show save dialog
|
||||
const { canceled, filePath } = await dialog.showSaveDialog({
|
||||
title: 'Save as PDF',
|
||||
defaultPath: defaultFilename,
|
||||
filters: [{ name: 'PDF Files', extensions: ['pdf'] }]
|
||||
})
|
||||
|
||||
if (canceled || !filePath) {
|
||||
return null
|
||||
}
|
||||
|
||||
// Generate PDF with settings to capture full page
|
||||
const pdfData = await webview.printToPDF({
|
||||
margins: {
|
||||
marginType: 'default'
|
||||
},
|
||||
printBackground: true,
|
||||
landscape: false,
|
||||
pageSize: 'A4',
|
||||
preferCSSPageSize: true
|
||||
})
|
||||
|
||||
// Save PDF to file
|
||||
await fs.writeFile(filePath, pdfData)
|
||||
|
||||
return filePath
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to print to PDF: ${(error as Error).message}`)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Save webview content as HTML
|
||||
* @param webviewId The webview webContents id
|
||||
* @returns Path to saved HTML file or null if user cancelled
|
||||
*/
|
||||
export async function saveWebviewAsHTML(webviewId: number): Promise<string | null> {
|
||||
const webview = webContents.fromId(webviewId)
|
||||
if (!webview) {
|
||||
throw new Error('Webview not found')
|
||||
}
|
||||
|
||||
try {
|
||||
// Get the page title for default filename
|
||||
const pageTitle = await webview.executeJavaScript('document.title || "webpage"').catch(() => 'webpage')
|
||||
// Sanitize filename by removing invalid characters
|
||||
const sanitizedTitle = pageTitle.replace(/[<>:"/\\|?*]/g, '-').substring(0, 100)
|
||||
const defaultFilename = sanitizedTitle ? `${sanitizedTitle}.html` : `webpage-${Date.now()}.html`
|
||||
|
||||
// Show save dialog
|
||||
const { canceled, filePath } = await dialog.showSaveDialog({
|
||||
title: 'Save as HTML',
|
||||
defaultPath: defaultFilename,
|
||||
filters: [
|
||||
{ name: 'HTML Files', extensions: ['html', 'htm'] },
|
||||
{ name: 'All Files', extensions: ['*'] }
|
||||
]
|
||||
})
|
||||
|
||||
if (canceled || !filePath) {
|
||||
return null
|
||||
}
|
||||
|
||||
// Get the HTML content with safe error handling
|
||||
const html = await webview.executeJavaScript(`
|
||||
(() => {
|
||||
try {
|
||||
// Build complete DOCTYPE string if present
|
||||
let doctype = '';
|
||||
if (document.doctype) {
|
||||
const dt = document.doctype;
|
||||
doctype = '<!DOCTYPE ' + (dt.name || 'html');
|
||||
|
||||
// Add PUBLIC identifier if publicId is present
|
||||
if (dt.publicId) {
|
||||
// Escape single quotes in publicId
|
||||
const escapedPublicId = String(dt.publicId).replace(/'/g, "\\'");
|
||||
doctype += " PUBLIC '" + escapedPublicId + "'";
|
||||
|
||||
// Add systemId if present (required when publicId is present)
|
||||
if (dt.systemId) {
|
||||
const escapedSystemId = String(dt.systemId).replace(/'/g, "\\'");
|
||||
doctype += " '" + escapedSystemId + "'";
|
||||
}
|
||||
} else if (dt.systemId) {
|
||||
// SYSTEM identifier (without PUBLIC)
|
||||
const escapedSystemId = String(dt.systemId).replace(/'/g, "\\'");
|
||||
doctype += " SYSTEM '" + escapedSystemId + "'";
|
||||
}
|
||||
|
||||
doctype += '>';
|
||||
}
|
||||
return doctype + (document.documentElement?.outerHTML || '');
|
||||
} catch (error) {
|
||||
// Fallback: just return the HTML without DOCTYPE if there's an error
|
||||
return document.documentElement?.outerHTML || '';
|
||||
}
|
||||
})()
|
||||
`)
|
||||
|
||||
// Save HTML to file
|
||||
await fs.writeFile(filePath, html, 'utf-8')
|
||||
|
||||
return filePath
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to save as HTML: ${(error as Error).message}`)
|
||||
}
|
||||
}
|
||||
|
||||
29
src/main/services/__tests__/ServerLogBuffer.test.ts
Normal file
29
src/main/services/__tests__/ServerLogBuffer.test.ts
Normal file
@ -0,0 +1,29 @@
|
||||
import { describe, expect, it } from 'vitest'
|
||||
|
||||
import { ServerLogBuffer } from '../mcp/ServerLogBuffer'
|
||||
|
||||
describe('ServerLogBuffer', () => {
|
||||
it('keeps a bounded number of entries per server', () => {
|
||||
const buffer = new ServerLogBuffer(3)
|
||||
const key = 'srv'
|
||||
|
||||
buffer.append(key, { timestamp: 1, level: 'info', message: 'a' })
|
||||
buffer.append(key, { timestamp: 2, level: 'info', message: 'b' })
|
||||
buffer.append(key, { timestamp: 3, level: 'info', message: 'c' })
|
||||
buffer.append(key, { timestamp: 4, level: 'info', message: 'd' })
|
||||
|
||||
const logs = buffer.get(key)
|
||||
expect(logs).toHaveLength(3)
|
||||
expect(logs[0].message).toBe('b')
|
||||
expect(logs[2].message).toBe('d')
|
||||
})
|
||||
|
||||
it('isolates entries by server key', () => {
|
||||
const buffer = new ServerLogBuffer(5)
|
||||
buffer.append('one', { timestamp: 1, level: 'info', message: 'a' })
|
||||
buffer.append('two', { timestamp: 2, level: 'info', message: 'b' })
|
||||
|
||||
expect(buffer.get('one')).toHaveLength(1)
|
||||
expect(buffer.get('two')).toHaveLength(1)
|
||||
})
|
||||
})
|
||||
@ -15,6 +15,8 @@ import { query } from '@anthropic-ai/claude-agent-sdk'
|
||||
import { loggerService } from '@logger'
|
||||
import { config as apiConfigService } from '@main/apiServer/config'
|
||||
import { validateModelId } from '@main/apiServer/utils'
|
||||
import { ConfigKeys, configManager } from '@main/services/ConfigManager'
|
||||
import { validateGitBashPath } from '@main/utils/process'
|
||||
import getLoginShellEnvironment from '@main/utils/shell-env'
|
||||
import { app } from 'electron'
|
||||
|
||||
@ -107,6 +109,8 @@ class ClaudeCodeService implements AgentServiceInterface {
|
||||
Object.entries(loginShellEnv).filter(([key]) => !key.toLowerCase().endsWith('_proxy'))
|
||||
) as Record<string, string>
|
||||
|
||||
const customGitBashPath = validateGitBashPath(configManager.get(ConfigKeys.GitBashPath) as string | undefined)
|
||||
|
||||
const env = {
|
||||
...loginShellEnvWithoutProxies,
|
||||
// TODO: fix the proxy api server
|
||||
@ -126,7 +130,8 @@ class ClaudeCodeService implements AgentServiceInterface {
|
||||
// Set CLAUDE_CONFIG_DIR to app's userData directory to avoid path encoding issues
|
||||
// on Windows when the username contains non-ASCII characters (e.g., Chinese characters)
|
||||
// This prevents the SDK from using the user's home directory which may have encoding problems
|
||||
CLAUDE_CONFIG_DIR: path.join(app.getPath('userData'), '.claude')
|
||||
CLAUDE_CONFIG_DIR: path.join(app.getPath('userData'), '.claude'),
|
||||
...(customGitBashPath ? { CLAUDE_CODE_GIT_BASH_PATH: customGitBashPath } : {})
|
||||
}
|
||||
|
||||
const errorChunks: string[] = []
|
||||
|
||||
36
src/main/services/mcp/ServerLogBuffer.ts
Normal file
36
src/main/services/mcp/ServerLogBuffer.ts
Normal file
@ -0,0 +1,36 @@
|
||||
export type MCPServerLogEntry = {
|
||||
timestamp: number
|
||||
level: 'debug' | 'info' | 'warn' | 'error' | 'stderr' | 'stdout'
|
||||
message: string
|
||||
data?: any
|
||||
source?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Lightweight ring buffer for per-server MCP logs.
|
||||
*/
|
||||
export class ServerLogBuffer {
|
||||
private maxEntries: number
|
||||
private logs: Map<string, MCPServerLogEntry[]> = new Map()
|
||||
|
||||
constructor(maxEntries = 200) {
|
||||
this.maxEntries = maxEntries
|
||||
}
|
||||
|
||||
append(serverKey: string, entry: MCPServerLogEntry) {
|
||||
const list = this.logs.get(serverKey) ?? []
|
||||
list.push(entry)
|
||||
if (list.length > this.maxEntries) {
|
||||
list.splice(0, list.length - this.maxEntries)
|
||||
}
|
||||
this.logs.set(serverKey, list)
|
||||
}
|
||||
|
||||
get(serverKey: string): MCPServerLogEntry[] {
|
||||
return [...(this.logs.get(serverKey) ?? [])]
|
||||
}
|
||||
|
||||
remove(serverKey: string) {
|
||||
this.logs.delete(serverKey)
|
||||
}
|
||||
}
|
||||
@ -128,8 +128,8 @@ export class CallBackServer {
|
||||
})
|
||||
|
||||
return new Promise<http.Server>((resolve, reject) => {
|
||||
server.listen(port, () => {
|
||||
logger.info(`OAuth callback server listening on port ${port}`)
|
||||
server.listen(port, '127.0.0.1', () => {
|
||||
logger.info(`OAuth callback server listening on 127.0.0.1:${port}`)
|
||||
resolve(server)
|
||||
})
|
||||
|
||||
|
||||
@ -3,7 +3,7 @@ import fs from 'fs'
|
||||
import path from 'path'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import { findExecutable, findGitBash } from '../process'
|
||||
import { findExecutable, findGitBash, validateGitBashPath } from '../process'
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('child_process')
|
||||
@ -289,7 +289,133 @@ describe.skipIf(process.platform !== 'win32')('process utilities', () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe('validateGitBashPath', () => {
|
||||
it('returns null when path is null', () => {
|
||||
const result = validateGitBashPath(null)
|
||||
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
it('returns null when path is undefined', () => {
|
||||
const result = validateGitBashPath(undefined)
|
||||
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
it('returns normalized path when valid bash.exe exists', () => {
|
||||
const customPath = 'C:\\PortableGit\\bin\\bash.exe'
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => p === 'C:\\PortableGit\\bin\\bash.exe')
|
||||
|
||||
const result = validateGitBashPath(customPath)
|
||||
|
||||
expect(result).toBe('C:\\PortableGit\\bin\\bash.exe')
|
||||
})
|
||||
|
||||
it('returns null when file does not exist', () => {
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false)
|
||||
|
||||
const result = validateGitBashPath('C:\\missing\\bash.exe')
|
||||
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
|
||||
it('returns null when path is not bash.exe', () => {
|
||||
const customPath = 'C:\\PortableGit\\bin\\git.exe'
|
||||
vi.mocked(fs.existsSync).mockReturnValue(true)
|
||||
|
||||
const result = validateGitBashPath(customPath)
|
||||
|
||||
expect(result).toBeNull()
|
||||
})
|
||||
})
|
||||
|
||||
describe('findGitBash', () => {
|
||||
describe('customPath parameter', () => {
|
||||
beforeEach(() => {
|
||||
delete process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||
})
|
||||
|
||||
it('uses customPath when valid', () => {
|
||||
const customPath = 'C:\\CustomGit\\bin\\bash.exe'
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => p === customPath)
|
||||
|
||||
const result = findGitBash(customPath)
|
||||
|
||||
expect(result).toBe(customPath)
|
||||
expect(execFileSync).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('falls back when customPath is invalid', () => {
|
||||
const customPath = 'C:\\Invalid\\bash.exe'
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||
if (p === customPath) return false
|
||||
if (p === gitPath) return true
|
||||
if (p === bashPath) return true
|
||||
return false
|
||||
})
|
||||
|
||||
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||
|
||||
const result = findGitBash(customPath)
|
||||
|
||||
expect(result).toBe(bashPath)
|
||||
})
|
||||
|
||||
it('prioritizes customPath over env override', () => {
|
||||
const customPath = 'C:\\CustomGit\\bin\\bash.exe'
|
||||
const envPath = 'C:\\EnvGit\\bin\\bash.exe'
|
||||
process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath
|
||||
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => p === customPath || p === envPath)
|
||||
|
||||
const result = findGitBash(customPath)
|
||||
|
||||
expect(result).toBe(customPath)
|
||||
})
|
||||
})
|
||||
|
||||
describe('env override', () => {
|
||||
beforeEach(() => {
|
||||
delete process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||
})
|
||||
|
||||
it('uses CLAUDE_CODE_GIT_BASH_PATH when valid', () => {
|
||||
const envPath = 'C:\\OverrideGit\\bin\\bash.exe'
|
||||
process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath
|
||||
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => p === envPath)
|
||||
|
||||
const result = findGitBash()
|
||||
|
||||
expect(result).toBe(envPath)
|
||||
expect(execFileSync).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('falls back when CLAUDE_CODE_GIT_BASH_PATH is invalid', () => {
|
||||
const envPath = 'C:\\Invalid\\bash.exe'
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
|
||||
process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath
|
||||
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||
if (p === envPath) return false
|
||||
if (p === gitPath) return true
|
||||
if (p === bashPath) return true
|
||||
return false
|
||||
})
|
||||
|
||||
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||
|
||||
const result = findGitBash()
|
||||
|
||||
expect(result).toBe(bashPath)
|
||||
})
|
||||
})
|
||||
|
||||
describe('git.exe path derivation', () => {
|
||||
it('should derive bash.exe from standard Git installation (Git/cmd/git.exe)', () => {
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
|
||||
@ -131,15 +131,37 @@ export function findExecutable(name: string): string | null {
|
||||
|
||||
/**
|
||||
* Find Git Bash executable on Windows
|
||||
* @param customPath - Optional custom path from config
|
||||
* @returns Full path to bash.exe or null if not found
|
||||
*/
|
||||
export function findGitBash(): string | null {
|
||||
export function findGitBash(customPath?: string | null): string | null {
|
||||
// Git Bash is Windows-only
|
||||
if (!isWin) {
|
||||
return null
|
||||
}
|
||||
|
||||
// 1. Find git.exe and derive bash.exe path
|
||||
// 1. Check custom path from config first
|
||||
if (customPath) {
|
||||
const validated = validateGitBashPath(customPath)
|
||||
if (validated) {
|
||||
logger.debug('Using custom Git Bash path from config', { path: validated })
|
||||
return validated
|
||||
}
|
||||
logger.warn('Custom Git Bash path provided but invalid', { path: customPath })
|
||||
}
|
||||
|
||||
// 2. Check environment variable override
|
||||
const envOverride = process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||
if (envOverride) {
|
||||
const validated = validateGitBashPath(envOverride)
|
||||
if (validated) {
|
||||
logger.debug('Using CLAUDE_CODE_GIT_BASH_PATH override for bash.exe', { path: validated })
|
||||
return validated
|
||||
}
|
||||
logger.warn('CLAUDE_CODE_GIT_BASH_PATH provided but path is invalid', { path: envOverride })
|
||||
}
|
||||
|
||||
// 3. Find git.exe and derive bash.exe path
|
||||
const gitPath = findExecutable('git')
|
||||
if (gitPath) {
|
||||
// Try multiple possible locations for bash.exe relative to git.exe
|
||||
@ -164,7 +186,7 @@ export function findGitBash(): string | null {
|
||||
})
|
||||
}
|
||||
|
||||
// 2. Fallback: check common Git Bash paths directly
|
||||
// 4. Fallback: check common Git Bash paths directly
|
||||
const commonBashPaths = [
|
||||
path.join(process.env.ProgramFiles || 'C:\\Program Files', 'Git', 'bin', 'bash.exe'),
|
||||
path.join(process.env['ProgramFiles(x86)'] || 'C:\\Program Files (x86)', 'Git', 'bin', 'bash.exe'),
|
||||
@ -181,3 +203,25 @@ export function findGitBash(): string | null {
|
||||
logger.debug('Git Bash not found - checked git derivation and common paths')
|
||||
return null
|
||||
}
|
||||
|
||||
export function validateGitBashPath(customPath?: string | null): string | null {
|
||||
if (!customPath) {
|
||||
return null
|
||||
}
|
||||
|
||||
const resolved = path.resolve(customPath)
|
||||
|
||||
if (!fs.existsSync(resolved)) {
|
||||
logger.warn('Custom Git Bash path does not exist', { path: resolved })
|
||||
return null
|
||||
}
|
||||
|
||||
const isExe = resolved.toLowerCase().endsWith('bash.exe')
|
||||
if (!isExe) {
|
||||
logger.warn('Custom Git Bash path is not bash.exe', { path: resolved })
|
||||
return null
|
||||
}
|
||||
|
||||
logger.debug('Validated custom Git Bash path', { path: resolved })
|
||||
return resolved
|
||||
}
|
||||
|
||||
@ -13,6 +13,7 @@ import type {
|
||||
LocalTransferState,
|
||||
WebviewKeyEvent
|
||||
} from '@shared/config/types'
|
||||
import type { MCPServerLogEntry } from '@shared/config/types'
|
||||
import { IpcChannel } from '@shared/IpcChannel'
|
||||
import type { Notification } from '@types'
|
||||
import type {
|
||||
@ -131,7 +132,10 @@ const api = {
|
||||
getDeviceType: () => ipcRenderer.invoke(IpcChannel.System_GetDeviceType),
|
||||
getHostname: () => ipcRenderer.invoke(IpcChannel.System_GetHostname),
|
||||
getCpuName: () => ipcRenderer.invoke(IpcChannel.System_GetCpuName),
|
||||
checkGitBash: (): Promise<boolean> => ipcRenderer.invoke(IpcChannel.System_CheckGitBash)
|
||||
checkGitBash: (): Promise<boolean> => ipcRenderer.invoke(IpcChannel.System_CheckGitBash),
|
||||
getGitBashPath: (): Promise<string | null> => ipcRenderer.invoke(IpcChannel.System_GetGitBashPath),
|
||||
setGitBashPath: (newPath: string | null): Promise<boolean> =>
|
||||
ipcRenderer.invoke(IpcChannel.System_SetGitBashPath, newPath)
|
||||
},
|
||||
devTools: {
|
||||
toggle: () => ipcRenderer.invoke(IpcChannel.System_ToggleDevTools)
|
||||
@ -384,7 +388,16 @@ const api = {
|
||||
},
|
||||
abortTool: (callId: string) => ipcRenderer.invoke(IpcChannel.Mcp_AbortTool, callId),
|
||||
getServerVersion: (server: MCPServer): Promise<string | null> =>
|
||||
ipcRenderer.invoke(IpcChannel.Mcp_GetServerVersion, server)
|
||||
ipcRenderer.invoke(IpcChannel.Mcp_GetServerVersion, server),
|
||||
getServerLogs: (server: MCPServer): Promise<MCPServerLogEntry[]> =>
|
||||
ipcRenderer.invoke(IpcChannel.Mcp_GetServerLogs, server),
|
||||
onServerLog: (callback: (log: MCPServerLogEntry & { serverId?: string }) => void) => {
|
||||
const listener = (_event: Electron.IpcRendererEvent, log: MCPServerLogEntry & { serverId?: string }) => {
|
||||
callback(log)
|
||||
}
|
||||
ipcRenderer.on(IpcChannel.Mcp_ServerLog, listener)
|
||||
return () => ipcRenderer.off(IpcChannel.Mcp_ServerLog, listener)
|
||||
}
|
||||
},
|
||||
python: {
|
||||
execute: (script: string, context?: Record<string, any>, timeout?: number) =>
|
||||
@ -436,6 +449,8 @@ const api = {
|
||||
ipcRenderer.invoke(IpcChannel.Webview_SetOpenLinkExternal, webviewId, isExternal),
|
||||
setSpellCheckEnabled: (webviewId: number, isEnable: boolean) =>
|
||||
ipcRenderer.invoke(IpcChannel.Webview_SetSpellCheckEnabled, webviewId, isEnable),
|
||||
printToPDF: (webviewId: number) => ipcRenderer.invoke(IpcChannel.Webview_PrintToPDF, webviewId),
|
||||
saveAsHTML: (webviewId: number) => ipcRenderer.invoke(IpcChannel.Webview_SaveAsHTML, webviewId),
|
||||
onFindShortcut: (callback: (payload: WebviewKeyEvent) => void) => {
|
||||
const listener = (_event: Electron.IpcRendererEvent, payload: WebviewKeyEvent) => {
|
||||
callback(payload)
|
||||
@ -468,7 +483,10 @@ const api = {
|
||||
ipcRenderer.invoke(IpcChannel.Selection_ProcessAction, actionItem, isFullScreen),
|
||||
closeActionWindow: () => ipcRenderer.invoke(IpcChannel.Selection_ActionWindowClose),
|
||||
minimizeActionWindow: () => ipcRenderer.invoke(IpcChannel.Selection_ActionWindowMinimize),
|
||||
pinActionWindow: (isPinned: boolean) => ipcRenderer.invoke(IpcChannel.Selection_ActionWindowPin, isPinned)
|
||||
pinActionWindow: (isPinned: boolean) => ipcRenderer.invoke(IpcChannel.Selection_ActionWindowPin, isPinned),
|
||||
// [Windows only] Electron bug workaround - can be removed once https://github.com/electron/electron/issues/48554 is fixed
|
||||
resizeActionWindow: (deltaX: number, deltaY: number, direction: string) =>
|
||||
ipcRenderer.invoke(IpcChannel.Selection_ActionWindowResize, deltaX, deltaY, direction)
|
||||
},
|
||||
agentTools: {
|
||||
respondToPermission: (payload: {
|
||||
|
||||
@ -91,7 +91,9 @@ export default class ModernAiProvider {
|
||||
if (this.isModel(modelOrProvider)) {
|
||||
// 传入的是 Model
|
||||
this.model = modelOrProvider
|
||||
this.actualProvider = provider ? adaptProvider({ provider }) : getActualProvider(modelOrProvider)
|
||||
this.actualProvider = provider
|
||||
? adaptProvider({ provider, model: modelOrProvider })
|
||||
: getActualProvider(modelOrProvider)
|
||||
// 只保存配置,不预先创建executor
|
||||
this.config = providerToAiSdkConfig(this.actualProvider, modelOrProvider)
|
||||
} else {
|
||||
|
||||
@ -124,7 +124,8 @@ export class AnthropicAPIClient extends BaseApiClient<
|
||||
|
||||
override async listModels(): Promise<Anthropic.ModelInfo[]> {
|
||||
const sdk = (await this.getSdkInstance()) as Anthropic
|
||||
const response = await sdk.models.list()
|
||||
// prevent auto appended /v1. It's included in baseUrl.
|
||||
const response = await sdk.models.list({ path: '/models' })
|
||||
return response.data
|
||||
}
|
||||
|
||||
|
||||
@ -173,13 +173,15 @@ export class GeminiAPIClient extends BaseApiClient<
|
||||
return this.sdkInstance
|
||||
}
|
||||
|
||||
const apiVersion = this.getApiVersion()
|
||||
|
||||
this.sdkInstance = new GoogleGenAI({
|
||||
vertexai: false,
|
||||
apiKey: this.apiKey,
|
||||
apiVersion: this.getApiVersion(),
|
||||
apiVersion,
|
||||
httpOptions: {
|
||||
baseUrl: this.getBaseURL(),
|
||||
apiVersion: this.getApiVersion(),
|
||||
apiVersion,
|
||||
headers: {
|
||||
...this.provider.extra_headers
|
||||
}
|
||||
@ -200,7 +202,7 @@ export class GeminiAPIClient extends BaseApiClient<
|
||||
return trailingVersion
|
||||
}
|
||||
|
||||
return 'v1beta'
|
||||
return ''
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@ -10,7 +10,7 @@ import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant'
|
||||
import {
|
||||
findTokenLimit,
|
||||
GEMINI_FLASH_MODEL_REGEX,
|
||||
getThinkModelType,
|
||||
getModelSupportedReasoningEffortOptions,
|
||||
isDeepSeekHybridInferenceModel,
|
||||
isDoubaoThinkingAutoModel,
|
||||
isGPT5SeriesModel,
|
||||
@ -33,7 +33,6 @@ import {
|
||||
isSupportedThinkingTokenQwenModel,
|
||||
isSupportedThinkingTokenZhipuModel,
|
||||
isVisionModel,
|
||||
MODEL_SUPPORTED_REASONING_EFFORT,
|
||||
ZHIPU_RESULT_TOKENS
|
||||
} from '@renderer/config/models'
|
||||
import { mapLanguageToQwenMTModel } from '@renderer/config/translate'
|
||||
@ -304,16 +303,15 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
|
||||
// Grok models/Perplexity models/OpenAI models
|
||||
if (isSupportedReasoningEffortModel(model)) {
|
||||
// 检查模型是否支持所选选项
|
||||
const modelType = getThinkModelType(model)
|
||||
const supportedOptions = MODEL_SUPPORTED_REASONING_EFFORT[modelType]
|
||||
if (supportedOptions.includes(reasoningEffort)) {
|
||||
const supportedOptions = getModelSupportedReasoningEffortOptions(model)
|
||||
if (supportedOptions?.includes(reasoningEffort)) {
|
||||
return {
|
||||
reasoning_effort: reasoningEffort
|
||||
}
|
||||
} else {
|
||||
// 如果不支持,fallback到第一个支持的值
|
||||
return {
|
||||
reasoning_effort: supportedOptions[0]
|
||||
reasoning_effort: supportedOptions?.[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -25,7 +25,7 @@ import type {
|
||||
OpenAISdkRawOutput,
|
||||
ReasoningEffortOptionalParams
|
||||
} from '@renderer/types/sdk'
|
||||
import { formatApiHost, withoutTrailingSlash } from '@renderer/utils/api'
|
||||
import { withoutTrailingSlash } from '@renderer/utils/api'
|
||||
import { isOllamaProvider } from '@renderer/utils/provider'
|
||||
|
||||
import { BaseApiClient } from '../BaseApiClient'
|
||||
@ -49,8 +49,9 @@ export abstract class OpenAIBaseClient<
|
||||
}
|
||||
|
||||
// 仅适用于openai
|
||||
override getBaseURL(isSupportedAPIVerion: boolean = true): string {
|
||||
return formatApiHost(this.provider.apiHost, isSupportedAPIVerion)
|
||||
override getBaseURL(): string {
|
||||
// apiHost is formatted when called by AiProvider
|
||||
return this.provider.apiHost
|
||||
}
|
||||
|
||||
override async generateImage({
|
||||
@ -87,7 +88,11 @@ export abstract class OpenAIBaseClient<
|
||||
}
|
||||
|
||||
override async getEmbeddingDimensions(model: Model): Promise<number> {
|
||||
const sdk = await this.getSdkInstance()
|
||||
let sdk: OpenAI = await this.getSdkInstance()
|
||||
if (isOllamaProvider(this.provider)) {
|
||||
const embedBaseUrl = `${this.provider.apiHost.replace(/(\/(api|v1))\/?$/, '')}/v1`
|
||||
sdk = sdk.withOptions({ baseURL: embedBaseUrl })
|
||||
}
|
||||
|
||||
const data = await sdk.embeddings.create({
|
||||
model: model.id,
|
||||
@ -100,6 +105,17 @@ export abstract class OpenAIBaseClient<
|
||||
override async listModels(): Promise<OpenAI.Models.Model[]> {
|
||||
try {
|
||||
const sdk = await this.getSdkInstance()
|
||||
if (this.provider.id === 'openrouter') {
|
||||
// https://openrouter.ai/docs/api/api-reference/embeddings/list-embeddings-models
|
||||
const embedBaseUrl = 'https://openrouter.ai/api/v1/embeddings'
|
||||
const embedSdk = sdk.withOptions({ baseURL: embedBaseUrl })
|
||||
const modelPromise = sdk.models.list()
|
||||
const embedModelPromise = embedSdk.models.list()
|
||||
const [modelResponse, embedModelResponse] = await Promise.all([modelPromise, embedModelPromise])
|
||||
const models = [...modelResponse.data, ...embedModelResponse.data]
|
||||
const uniqueModels = Array.from(new Map(models.map((model) => [model.id, model])).values())
|
||||
return uniqueModels.filter(isSupportedModel)
|
||||
}
|
||||
if (this.provider.id === 'github') {
|
||||
// GitHub Models 其 models 和 chat completions 两个接口的 baseUrl 不一样
|
||||
const baseUrl = 'https://models.github.ai/catalog/'
|
||||
@ -118,7 +134,7 @@ export abstract class OpenAIBaseClient<
|
||||
}
|
||||
|
||||
if (isOllamaProvider(this.provider)) {
|
||||
const baseUrl = withoutTrailingSlash(this.getBaseURL(false))
|
||||
const baseUrl = withoutTrailingSlash(this.getBaseURL())
|
||||
.replace(/\/v1$/, '')
|
||||
.replace(/\/api$/, '')
|
||||
const response = await fetch(`${baseUrl}/api/tags`, {
|
||||
@ -173,6 +189,7 @@ export abstract class OpenAIBaseClient<
|
||||
|
||||
let apiKeyForSdkInstance = this.apiKey
|
||||
let baseURLForSdkInstance = this.getBaseURL()
|
||||
logger.debug('baseURLForSdkInstance', { baseURLForSdkInstance })
|
||||
let headersForSdkInstance = {
|
||||
...this.defaultHeaders(),
|
||||
...this.provider.extra_headers
|
||||
@ -184,7 +201,7 @@ export abstract class OpenAIBaseClient<
|
||||
// this.provider.apiKey不允许修改
|
||||
// this.provider.apiKey = token
|
||||
apiKeyForSdkInstance = token
|
||||
baseURLForSdkInstance = this.getBaseURL(false)
|
||||
baseURLForSdkInstance = this.getBaseURL()
|
||||
headersForSdkInstance = {
|
||||
...headersForSdkInstance,
|
||||
...COPILOT_DEFAULT_HEADERS
|
||||
|
||||
@ -122,6 +122,7 @@ export class OpenAIResponseAPIClient extends OpenAIBaseClient<
|
||||
if (this.sdkInstance) {
|
||||
return this.sdkInstance
|
||||
}
|
||||
const baseUrl = this.getBaseURL()
|
||||
|
||||
if (this.provider.id === 'azure-openai' || this.provider.type === 'azure-openai') {
|
||||
return new AzureOpenAI({
|
||||
@ -134,7 +135,7 @@ export class OpenAIResponseAPIClient extends OpenAIBaseClient<
|
||||
return new OpenAI({
|
||||
dangerouslyAllowBrowser: true,
|
||||
apiKey: this.apiKey,
|
||||
baseURL: this.getBaseURL(),
|
||||
baseURL: baseUrl,
|
||||
defaultHeaders: {
|
||||
...this.defaultHeaders(),
|
||||
...this.provider.extra_headers
|
||||
|
||||
@ -2,7 +2,6 @@ import { loggerService } from '@logger'
|
||||
import { ApiClientFactory } from '@renderer/aiCore/legacy/clients/ApiClientFactory'
|
||||
import type { BaseApiClient } from '@renderer/aiCore/legacy/clients/BaseApiClient'
|
||||
import { isDedicatedImageGenerationModel, isFunctionCallingModel } from '@renderer/config/models'
|
||||
import { getProviderByModel } from '@renderer/services/AssistantService'
|
||||
import { withSpanResult } from '@renderer/services/SpanManagerService'
|
||||
import type { StartSpanParams } from '@renderer/trace/types/ModelSpanEntity'
|
||||
import type { GenerateImageParams, Model, Provider } from '@renderer/types'
|
||||
@ -160,9 +159,6 @@ export default class AiProvider {
|
||||
public async getEmbeddingDimensions(model: Model): Promise<number> {
|
||||
try {
|
||||
// Use the SDK instance to test embedding capabilities
|
||||
if (this.apiClient instanceof OpenAIResponseAPIClient && getProviderByModel(model).type === 'azure-openai') {
|
||||
this.apiClient = this.apiClient.getClient(model) as BaseApiClient
|
||||
}
|
||||
const dimensions = await this.apiClient.getEmbeddingDimensions(model)
|
||||
return dimensions
|
||||
} catch (error) {
|
||||
|
||||
@ -109,6 +109,20 @@ const createImageBlock = (
|
||||
...overrides
|
||||
})
|
||||
|
||||
const createThinkingBlock = (
|
||||
messageId: string,
|
||||
overrides: Partial<Omit<ThinkingMessageBlock, 'type' | 'messageId'>> = {}
|
||||
): ThinkingMessageBlock => ({
|
||||
id: overrides.id ?? `thinking-block-${++blockCounter}`,
|
||||
messageId,
|
||||
type: MessageBlockType.THINKING,
|
||||
createdAt: overrides.createdAt ?? new Date(2024, 0, 1, 0, 0, blockCounter).toISOString(),
|
||||
status: overrides.status ?? MessageBlockStatus.SUCCESS,
|
||||
content: overrides.content ?? 'Let me think...',
|
||||
thinking_millsec: overrides.thinking_millsec ?? 1000,
|
||||
...overrides
|
||||
})
|
||||
|
||||
describe('messageConverter', () => {
|
||||
beforeEach(() => {
|
||||
convertFileBlockToFilePartMock.mockReset()
|
||||
@ -137,6 +151,73 @@ describe('messageConverter', () => {
|
||||
})
|
||||
})
|
||||
|
||||
it('extracts base64 data from data URLs and preserves mediaType', async () => {
|
||||
const model = createModel()
|
||||
const message = createMessage('user')
|
||||
message.__mockContent = 'Check this image'
|
||||
message.__mockImageBlocks = [createImageBlock(message.id, { url: 'data:image/png;base64,iVBORw0KGgoAAAANS' })]
|
||||
|
||||
const result = await convertMessageToSdkParam(message, true, model)
|
||||
|
||||
expect(result).toEqual({
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Check this image' },
|
||||
{ type: 'image', image: 'iVBORw0KGgoAAAANS', mediaType: 'image/png' }
|
||||
]
|
||||
})
|
||||
})
|
||||
|
||||
it('handles data URLs without mediaType gracefully', async () => {
|
||||
const model = createModel()
|
||||
const message = createMessage('user')
|
||||
message.__mockContent = 'Check this'
|
||||
message.__mockImageBlocks = [createImageBlock(message.id, { url: 'data:;base64,AAABBBCCC' })]
|
||||
|
||||
const result = await convertMessageToSdkParam(message, true, model)
|
||||
|
||||
expect(result).toEqual({
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Check this' },
|
||||
{ type: 'image', image: 'AAABBBCCC' }
|
||||
]
|
||||
})
|
||||
})
|
||||
|
||||
it('skips malformed data URLs without comma separator', async () => {
|
||||
const model = createModel()
|
||||
const message = createMessage('user')
|
||||
message.__mockContent = 'Malformed data url'
|
||||
message.__mockImageBlocks = [createImageBlock(message.id, { url: 'data:image/pngAAABBB' })]
|
||||
|
||||
const result = await convertMessageToSdkParam(message, true, model)
|
||||
|
||||
expect(result).toEqual({
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Malformed data url' }
|
||||
// Malformed data URL is excluded from the content
|
||||
]
|
||||
})
|
||||
})
|
||||
|
||||
it('handles multiple large base64 images without stack overflow', async () => {
|
||||
const model = createModel()
|
||||
const message = createMessage('user')
|
||||
// Create large base64 strings (~500KB each) to simulate real-world large images
|
||||
const largeBase64 = 'A'.repeat(500_000)
|
||||
message.__mockContent = 'Check these images'
|
||||
message.__mockImageBlocks = [
|
||||
createImageBlock(message.id, { url: `data:image/png;base64,${largeBase64}` }),
|
||||
createImageBlock(message.id, { url: `data:image/png;base64,${largeBase64}` }),
|
||||
createImageBlock(message.id, { url: `data:image/png;base64,${largeBase64}` })
|
||||
]
|
||||
|
||||
// Should not throw RangeError: Maximum call stack size exceeded
|
||||
await expect(convertMessageToSdkParam(message, true, model)).resolves.toBeDefined()
|
||||
})
|
||||
|
||||
it('returns file instructions as a system message when native uploads succeed', async () => {
|
||||
const model = createModel()
|
||||
const message = createMessage('user')
|
||||
@ -162,10 +243,27 @@ describe('messageConverter', () => {
|
||||
}
|
||||
])
|
||||
})
|
||||
|
||||
it('includes reasoning parts for assistant messages with thinking blocks', async () => {
|
||||
const model = createModel()
|
||||
const message = createMessage('assistant')
|
||||
message.__mockContent = 'Here is my answer'
|
||||
message.__mockThinkingBlocks = [createThinkingBlock(message.id, { content: 'Let me think...' })]
|
||||
|
||||
const result = await convertMessageToSdkParam(message, false, model)
|
||||
|
||||
expect(result).toEqual({
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{ type: 'text', text: 'Here is my answer' },
|
||||
{ type: 'reasoning', text: 'Let me think...' }
|
||||
]
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('convertMessagesToSdkMessages', () => {
|
||||
it('appends assistant images to the final user message for image enhancement models', async () => {
|
||||
it('collapses to [system?, user(image)] for image enhancement models', async () => {
|
||||
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
|
||||
const initialUser = createMessage('user')
|
||||
initialUser.__mockContent = 'Start editing'
|
||||
@ -180,14 +278,6 @@ describe('messageConverter', () => {
|
||||
const result = await convertMessagesToSdkMessages([initialUser, assistant, finalUser], model)
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: 'user',
|
||||
content: [{ type: 'text', text: 'Start editing' }]
|
||||
},
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [{ type: 'text', text: 'Here is the current preview' }]
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
@ -198,7 +288,7 @@ describe('messageConverter', () => {
|
||||
])
|
||||
})
|
||||
|
||||
it('preserves preceding system instructions when building enhancement payloads', async () => {
|
||||
it('preserves system messages and collapses others for enhancement payloads', async () => {
|
||||
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
|
||||
const fileUser = createMessage('user')
|
||||
fileUser.__mockContent = 'Use this document as inspiration'
|
||||
@ -221,11 +311,6 @@ describe('messageConverter', () => {
|
||||
|
||||
expect(result).toEqual([
|
||||
{ role: 'system', content: 'fileid://reference' },
|
||||
{ role: 'user', content: [{ type: 'text', text: 'Use this document as inspiration' }] },
|
||||
{
|
||||
role: 'assistant',
|
||||
content: [{ type: 'text', text: 'Generated previews ready' }]
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
@ -235,5 +320,120 @@ describe('messageConverter', () => {
|
||||
}
|
||||
])
|
||||
})
|
||||
|
||||
it('handles no previous assistant message with images', async () => {
|
||||
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
|
||||
const user1 = createMessage('user')
|
||||
user1.__mockContent = 'Start'
|
||||
|
||||
const user2 = createMessage('user')
|
||||
user2.__mockContent = 'Continue without images'
|
||||
|
||||
const result = await convertMessagesToSdkMessages([user1, user2], model)
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: 'user',
|
||||
content: [{ type: 'text', text: 'Continue without images' }]
|
||||
}
|
||||
])
|
||||
})
|
||||
|
||||
it('handles assistant message without images', async () => {
|
||||
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
|
||||
const user1 = createMessage('user')
|
||||
user1.__mockContent = 'Start'
|
||||
|
||||
const assistant = createMessage('assistant')
|
||||
assistant.__mockContent = 'Text only response'
|
||||
assistant.__mockImageBlocks = []
|
||||
|
||||
const user2 = createMessage('user')
|
||||
user2.__mockContent = 'Follow up'
|
||||
|
||||
const result = await convertMessagesToSdkMessages([user1, assistant, user2], model)
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: 'user',
|
||||
content: [{ type: 'text', text: 'Follow up' }]
|
||||
}
|
||||
])
|
||||
})
|
||||
|
||||
it('handles multiple assistant messages by using the most recent one', async () => {
|
||||
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
|
||||
const user1 = createMessage('user')
|
||||
user1.__mockContent = 'Start'
|
||||
|
||||
const assistant1 = createMessage('assistant')
|
||||
assistant1.__mockContent = 'First response'
|
||||
assistant1.__mockImageBlocks = [createImageBlock(assistant1.id, { url: 'https://example.com/old.png' })]
|
||||
|
||||
const user2 = createMessage('user')
|
||||
user2.__mockContent = 'Continue'
|
||||
|
||||
const assistant2 = createMessage('assistant')
|
||||
assistant2.__mockContent = 'Second response'
|
||||
assistant2.__mockImageBlocks = [createImageBlock(assistant2.id, { url: 'https://example.com/new.png' })]
|
||||
|
||||
const user3 = createMessage('user')
|
||||
user3.__mockContent = 'Final request'
|
||||
|
||||
const result = await convertMessagesToSdkMessages([user1, assistant1, user2, assistant2, user3], model)
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Final request' },
|
||||
{ type: 'image', image: 'https://example.com/new.png' }
|
||||
]
|
||||
}
|
||||
])
|
||||
})
|
||||
|
||||
it('handles conversation ending with assistant message', async () => {
|
||||
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
|
||||
const user = createMessage('user')
|
||||
user.__mockContent = 'Start'
|
||||
|
||||
const assistant = createMessage('assistant')
|
||||
assistant.__mockContent = 'Response with image'
|
||||
assistant.__mockImageBlocks = [createImageBlock(assistant.id, { url: 'https://example.com/image.png' })]
|
||||
|
||||
const result = await convertMessagesToSdkMessages([user, assistant], model)
|
||||
|
||||
// The user message is the last user message, but since the assistant comes after,
|
||||
// there's no "previous" assistant message (search starts from messages.length-2 backwards)
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: 'user',
|
||||
content: [{ type: 'text', text: 'Start' }]
|
||||
}
|
||||
])
|
||||
})
|
||||
|
||||
it('handles empty content in last user message', async () => {
|
||||
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
|
||||
const user1 = createMessage('user')
|
||||
user1.__mockContent = 'Start'
|
||||
|
||||
const assistant = createMessage('assistant')
|
||||
assistant.__mockContent = 'Here is the preview'
|
||||
assistant.__mockImageBlocks = [createImageBlock(assistant.id, { url: 'https://example.com/preview.png' })]
|
||||
|
||||
const user2 = createMessage('user')
|
||||
user2.__mockContent = ''
|
||||
|
||||
const result = await convertMessagesToSdkMessages([user1, assistant, user2], model)
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
role: 'user',
|
||||
content: [{ type: 'image', image: 'https://example.com/preview.png' }]
|
||||
}
|
||||
])
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@ -3,10 +3,12 @@
|
||||
* 将 Cherry Studio 消息格式转换为 AI SDK 消息格式
|
||||
*/
|
||||
|
||||
import type { ReasoningPart } from '@ai-sdk/provider-utils'
|
||||
import { loggerService } from '@logger'
|
||||
import { isImageEnhancementModel, isVisionModel } from '@renderer/config/models'
|
||||
import type { Message, Model } from '@renderer/types'
|
||||
import type { FileMessageBlock, ImageMessageBlock, ThinkingMessageBlock } from '@renderer/types/newMessage'
|
||||
import { parseDataUrlMediaType } from '@renderer/utils/image'
|
||||
import {
|
||||
findFileBlocks,
|
||||
findImageBlocks,
|
||||
@ -59,23 +61,29 @@ async function convertImageBlockToImagePart(imageBlocks: ImageMessageBlock[]): P
|
||||
mediaType: image.mime
|
||||
})
|
||||
} catch (error) {
|
||||
logger.warn('Failed to load image:', error as Error)
|
||||
logger.error('Failed to load image file, image will be excluded from message:', {
|
||||
fileId: imageBlock.file.id,
|
||||
fileName: imageBlock.file.origin_name,
|
||||
error: error as Error
|
||||
})
|
||||
}
|
||||
} else if (imageBlock.url) {
|
||||
const isBase64 = imageBlock.url.startsWith('data:')
|
||||
if (isBase64) {
|
||||
const base64 = imageBlock.url.match(/^data:[^;]*;base64,(.+)$/)![1]
|
||||
const mimeMatch = imageBlock.url.match(/^data:([^;]+)/)
|
||||
parts.push({
|
||||
type: 'image',
|
||||
image: base64,
|
||||
mediaType: mimeMatch ? mimeMatch[1] : 'image/png'
|
||||
})
|
||||
const url = imageBlock.url
|
||||
const isDataUrl = url.startsWith('data:')
|
||||
if (isDataUrl) {
|
||||
const { mediaType } = parseDataUrlMediaType(url)
|
||||
const commaIndex = url.indexOf(',')
|
||||
if (commaIndex === -1) {
|
||||
logger.error('Malformed data URL detected (missing comma separator), image will be excluded:', {
|
||||
urlPrefix: url.slice(0, 50) + '...'
|
||||
})
|
||||
continue
|
||||
}
|
||||
const base64Data = url.slice(commaIndex + 1)
|
||||
parts.push({ type: 'image', image: base64Data, ...(mediaType ? { mediaType } : {}) })
|
||||
} else {
|
||||
parts.push({
|
||||
type: 'image',
|
||||
image: imageBlock.url
|
||||
})
|
||||
// For remote URLs we keep payload minimal to match existing expectations.
|
||||
parts.push({ type: 'image', image: url })
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -156,13 +164,13 @@ async function convertMessageToAssistantModelMessage(
|
||||
thinkingBlocks: ThinkingMessageBlock[],
|
||||
model?: Model
|
||||
): Promise<AssistantModelMessage> {
|
||||
const parts: Array<TextPart | FilePart> = []
|
||||
const parts: Array<TextPart | ReasoningPart | FilePart> = []
|
||||
if (content) {
|
||||
parts.push({ type: 'text', text: content })
|
||||
}
|
||||
|
||||
for (const thinkingBlock of thinkingBlocks) {
|
||||
parts.push({ type: 'text', text: thinkingBlock.content })
|
||||
parts.push({ type: 'reasoning', text: thinkingBlock.content })
|
||||
}
|
||||
|
||||
for (const fileBlock of fileBlocks) {
|
||||
@ -194,17 +202,20 @@ async function convertMessageToAssistantModelMessage(
|
||||
* This function processes messages and transforms them into the format required by the SDK.
|
||||
* It handles special cases for vision models and image enhancement models.
|
||||
*
|
||||
* @param messages - Array of messages to convert. Must contain at least 3 messages when using image enhancement models for special handling.
|
||||
* @param messages - Array of messages to convert.
|
||||
* @param model - The model configuration that determines conversion behavior
|
||||
*
|
||||
* @returns A promise that resolves to an array of SDK-compatible model messages
|
||||
*
|
||||
* @remarks
|
||||
* For image enhancement models with 3+ messages:
|
||||
* - Examines the last 2 messages to find an assistant message containing image blocks
|
||||
* - If found, extracts images from the assistant message and appends them to the last user message content
|
||||
* - Returns all converted messages (not just the last two) with the images merged into the user message
|
||||
* - Typical pattern: [system?, assistant(image), user] -> [system?, assistant, user(image)]
|
||||
* For image enhancement models:
|
||||
* - Collapses the conversation into [system?, user(image)] format
|
||||
* - Searches backwards through all messages to find the most recent assistant message with images
|
||||
* - Preserves all system messages (including ones generated from file uploads like 'fileid://...')
|
||||
* - Extracts the last user message content and merges images from the previous assistant message
|
||||
* - Returns only the collapsed messages: system messages (if any) followed by a single user message
|
||||
* - If no user message is found, returns only system messages
|
||||
* - Typical pattern: [system?, user, assistant(image), user] -> [system?, user(image)]
|
||||
*
|
||||
* For other models:
|
||||
* - Returns all converted messages in order without special image handling
|
||||
@ -220,25 +231,66 @@ export async function convertMessagesToSdkMessages(messages: Message[], model: M
|
||||
sdkMessages.push(...(Array.isArray(sdkMessage) ? sdkMessage : [sdkMessage]))
|
||||
}
|
||||
// Special handling for image enhancement models
|
||||
// Only merge images into the user message
|
||||
// [system?, assistant(image), user] -> [system?, assistant, user(image)]
|
||||
if (isImageEnhancementModel(model) && messages.length >= 3) {
|
||||
const needUpdatedMessages = messages.slice(-2)
|
||||
const assistantMessage = needUpdatedMessages.find((m) => m.role === 'assistant')
|
||||
const userSdkMessage = sdkMessages[sdkMessages.length - 1]
|
||||
// Target behavior: Collapse the conversation into [system?, user(image)].
|
||||
// Explanation of why we don't simply use slice:
|
||||
// 1) We need to preserve all system messages: During the convertMessageToSdkParam process, native file uploads may insert `system(fileid://...)`.
|
||||
// Directly slicing the original messages or already converted sdkMessages could easily result in missing these system instructions.
|
||||
// Therefore, we first perform a full conversion and then aggregate the system messages afterward.
|
||||
// 2) The conversion process may split messages: A single user message might be broken into two SDK messages—[system, user].
|
||||
// Slicing either side could lead to obtaining semantically incorrect fragments (e.g., only the split-out system message).
|
||||
// 3) The “previous assistant message” is not necessarily the second-to-last one: There might be system messages or other message blocks inserted in between,
|
||||
// making a simple slice(-2) assumption too rigid. Here, we trace back from the end of the original messages to locate the most recent assistant message, which better aligns with business semantics.
|
||||
// 4) This is a “collapse” rather than a simple “slice”: Ultimately, we need to synthesize a new user message
|
||||
// (with text from the last user message and images from the previous assistant message). Using slice can only extract subarrays,
|
||||
// which still require reassembly; constructing directly according to the target structure is clearer and more reliable.
|
||||
if (isImageEnhancementModel(model)) {
|
||||
// Collect all system messages (including ones generated from file uploads)
|
||||
const systemMessages = sdkMessages.filter((m): m is SystemModelMessage => m.role === 'system')
|
||||
|
||||
if (assistantMessage && userSdkMessage?.role === 'user') {
|
||||
const imageBlocks = findImageBlocks(assistantMessage)
|
||||
const imageParts = await convertImageBlockToImagePart(imageBlocks)
|
||||
// Find the last user message (SDK converted)
|
||||
const lastUserSdkIndex = (() => {
|
||||
for (let i = sdkMessages.length - 1; i >= 0; i--) {
|
||||
if (sdkMessages[i].role === 'user') return i
|
||||
}
|
||||
return -1
|
||||
})()
|
||||
|
||||
if (imageParts.length > 0) {
|
||||
if (typeof userSdkMessage.content === 'string') {
|
||||
userSdkMessage.content = [{ type: 'text', text: userSdkMessage.content }, ...imageParts]
|
||||
} else if (Array.isArray(userSdkMessage.content)) {
|
||||
userSdkMessage.content.push(...imageParts)
|
||||
}
|
||||
const lastUserSdk = lastUserSdkIndex >= 0 ? (sdkMessages[lastUserSdkIndex] as UserModelMessage) : null
|
||||
|
||||
// Find the nearest preceding assistant message in original messages
|
||||
let prevAssistant: Message | null = null
|
||||
for (let i = messages.length - 2; i >= 0; i--) {
|
||||
if (messages[i].role === 'assistant') {
|
||||
prevAssistant = messages[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Build the final user content parts
|
||||
let finalUserParts: Array<TextPart | FilePart | ImagePart> = []
|
||||
if (lastUserSdk) {
|
||||
if (typeof lastUserSdk.content === 'string') {
|
||||
finalUserParts.push({ type: 'text', text: lastUserSdk.content })
|
||||
} else if (Array.isArray(lastUserSdk.content)) {
|
||||
finalUserParts = [...lastUserSdk.content]
|
||||
}
|
||||
}
|
||||
|
||||
// Append images from the previous assistant message if any
|
||||
if (prevAssistant) {
|
||||
const imageBlocks = findImageBlocks(prevAssistant)
|
||||
const imageParts = await convertImageBlockToImagePart(imageBlocks)
|
||||
if (imageParts.length > 0) {
|
||||
finalUserParts.push(...imageParts)
|
||||
}
|
||||
}
|
||||
|
||||
// If we couldn't find a last user message, fall back to returning collected system messages only
|
||||
if (!lastUserSdk) {
|
||||
return systemMessages
|
||||
}
|
||||
|
||||
return [...systemMessages, { role: 'user', content: finalUserParts }]
|
||||
}
|
||||
|
||||
return sdkMessages
|
||||
|
||||
@ -28,13 +28,14 @@ import { getAnthropicThinkingBudget } from '../utils/reasoning'
|
||||
* - Disabled for models that do not support temperature.
|
||||
* - Disabled for Claude 4.5 reasoning models when TopP is enabled and temperature is disabled.
|
||||
* Otherwise, returns the temperature value if the assistant has temperature enabled.
|
||||
|
||||
*/
|
||||
export function getTemperature(assistant: Assistant, model: Model): number | undefined {
|
||||
if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
if (!isSupportTemperatureModel(model)) {
|
||||
if (!isSupportTemperatureModel(model, assistant)) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
@ -46,6 +47,10 @@ export function getTemperature(assistant: Assistant, model: Model): number | und
|
||||
return undefined
|
||||
}
|
||||
|
||||
return getTemperatureValue(assistant, model)
|
||||
}
|
||||
|
||||
function getTemperatureValue(assistant: Assistant, model: Model): number | undefined {
|
||||
const assistantSettings = getAssistantSettings(assistant)
|
||||
let temperature = assistantSettings?.temperature
|
||||
if (temperature && isMaxTemperatureOneModel(model)) {
|
||||
@ -68,13 +73,17 @@ export function getTopP(assistant: Assistant, model: Model): number | undefined
|
||||
if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) {
|
||||
return undefined
|
||||
}
|
||||
if (!isSupportTopPModel(model)) {
|
||||
if (!isSupportTopPModel(model, assistant)) {
|
||||
return undefined
|
||||
}
|
||||
if (isTemperatureTopPMutuallyExclusiveModel(model) && assistant.settings?.enableTemperature) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
return getTopPValue(assistant)
|
||||
}
|
||||
|
||||
function getTopPValue(assistant: Assistant): number | undefined {
|
||||
const assistantSettings = getAssistantSettings(assistant)
|
||||
// FIXME: assistant.settings.enableTopP should be always a boolean value.
|
||||
const enableTopP = assistantSettings.enableTopP ?? DEFAULT_ASSISTANT_SETTINGS.enableTopP
|
||||
|
||||
@ -42,7 +42,8 @@ vi.mock('@renderer/utils/api', () => ({
|
||||
routeToEndpoint: vi.fn((host) => ({
|
||||
baseURL: host,
|
||||
endpoint: '/chat/completions'
|
||||
}))
|
||||
})),
|
||||
isWithTrailingSharp: vi.fn((host) => host?.endsWith('#') || false)
|
||||
}))
|
||||
|
||||
vi.mock('@renderer/utils/provider', async (importOriginal) => {
|
||||
@ -227,12 +228,19 @@ describe('CherryAI provider configuration', () => {
|
||||
// Mock the functions to simulate non-CherryAI provider
|
||||
vi.mocked(isCherryAIProvider).mockReturnValue(false)
|
||||
vi.mocked(getProviderByModel).mockReturnValue(provider)
|
||||
// Mock isWithTrailingSharp to return false for this test
|
||||
vi.mocked(formatApiHost as any).mockImplementation((host, isSupportedAPIVersion = true) => {
|
||||
if (isSupportedAPIVersion === false) {
|
||||
return host
|
||||
}
|
||||
return `${host}/v1`
|
||||
})
|
||||
|
||||
// Call getActualProvider
|
||||
const actualProvider = getActualProvider(model)
|
||||
|
||||
// Verify that formatApiHost was called with default parameters (true)
|
||||
expect(formatApiHost).toHaveBeenCalledWith('https://api.openai.com')
|
||||
// Verify that formatApiHost was called with appendApiVersion parameter
|
||||
expect(formatApiHost).toHaveBeenCalledWith('https://api.openai.com', true)
|
||||
expect(actualProvider.apiHost).toBe('https://api.openai.com/v1')
|
||||
})
|
||||
|
||||
@ -303,12 +311,19 @@ describe('Perplexity provider configuration', () => {
|
||||
vi.mocked(isCherryAIProvider).mockReturnValue(false)
|
||||
vi.mocked(isPerplexityProvider).mockReturnValue(false)
|
||||
vi.mocked(getProviderByModel).mockReturnValue(provider)
|
||||
// Mock isWithTrailingSharp to return false for this test
|
||||
vi.mocked(formatApiHost as any).mockImplementation((host, isSupportedAPIVersion = true) => {
|
||||
if (isSupportedAPIVersion === false) {
|
||||
return host
|
||||
}
|
||||
return `${host}/v1`
|
||||
})
|
||||
|
||||
// Call getActualProvider
|
||||
const actualProvider = getActualProvider(model)
|
||||
|
||||
// Verify that formatApiHost was called with default parameters (true)
|
||||
expect(formatApiHost).toHaveBeenCalledWith('https://api.openai.com')
|
||||
// Verify that formatApiHost was called with appendApiVersion parameter
|
||||
expect(formatApiHost).toHaveBeenCalledWith('https://api.openai.com', true)
|
||||
expect(actualProvider.apiHost).toBe('https://api.openai.com/v1')
|
||||
})
|
||||
|
||||
|
||||
@ -9,6 +9,7 @@ import {
|
||||
} from '@renderer/hooks/useAwsBedrock'
|
||||
import { createVertexProvider, isVertexAIConfigured } from '@renderer/hooks/useVertexAI'
|
||||
import { getProviderByModel } from '@renderer/services/AssistantService'
|
||||
import { getProviderById } from '@renderer/services/ProviderService'
|
||||
import store from '@renderer/store'
|
||||
import { isSystemProvider, type Model, type Provider, SystemProviderIds } from '@renderer/types'
|
||||
import type { OpenAICompletionsStreamOptions } from '@renderer/types/aiCoreTypes'
|
||||
@ -17,6 +18,7 @@ import {
|
||||
formatAzureOpenAIApiHost,
|
||||
formatOllamaApiHost,
|
||||
formatVertexApiHost,
|
||||
isWithTrailingSharp,
|
||||
routeToEndpoint
|
||||
} from '@renderer/utils/api'
|
||||
import {
|
||||
@ -30,6 +32,7 @@ import {
|
||||
isSupportStreamOptionsProvider,
|
||||
isVertexProvider
|
||||
} from '@renderer/utils/provider'
|
||||
import { defaultAppHeaders } from '@shared/utils'
|
||||
import { cloneDeep, isEmpty } from 'lodash'
|
||||
|
||||
import type { AiSdkConfig } from '../types'
|
||||
@ -69,14 +72,15 @@ function handleSpecialProviders(model: Model, provider: Provider): Provider {
|
||||
*/
|
||||
export function formatProviderApiHost(provider: Provider): Provider {
|
||||
const formatted = { ...provider }
|
||||
const appendApiVersion = !isWithTrailingSharp(provider.apiHost)
|
||||
if (formatted.anthropicApiHost) {
|
||||
formatted.anthropicApiHost = formatApiHost(formatted.anthropicApiHost)
|
||||
formatted.anthropicApiHost = formatApiHost(formatted.anthropicApiHost, appendApiVersion)
|
||||
}
|
||||
|
||||
if (isAnthropicProvider(provider)) {
|
||||
const baseHost = formatted.anthropicApiHost || formatted.apiHost
|
||||
// AI SDK needs /v1 in baseURL, Anthropic SDK will strip it in getSdkClient
|
||||
formatted.apiHost = formatApiHost(baseHost)
|
||||
formatted.apiHost = formatApiHost(baseHost, appendApiVersion)
|
||||
if (!formatted.anthropicApiHost) {
|
||||
formatted.anthropicApiHost = formatted.apiHost
|
||||
}
|
||||
@ -85,7 +89,7 @@ export function formatProviderApiHost(provider: Provider): Provider {
|
||||
} else if (isOllamaProvider(formatted)) {
|
||||
formatted.apiHost = formatOllamaApiHost(formatted.apiHost)
|
||||
} else if (isGeminiProvider(formatted)) {
|
||||
formatted.apiHost = formatApiHost(formatted.apiHost, true, 'v1beta')
|
||||
formatted.apiHost = formatApiHost(formatted.apiHost, appendApiVersion, 'v1beta')
|
||||
} else if (isAzureOpenAIProvider(formatted)) {
|
||||
formatted.apiHost = formatAzureOpenAIApiHost(formatted.apiHost)
|
||||
} else if (isVertexProvider(formatted)) {
|
||||
@ -95,7 +99,7 @@ export function formatProviderApiHost(provider: Provider): Provider {
|
||||
} else if (isPerplexityProvider(formatted)) {
|
||||
formatted.apiHost = formatApiHost(formatted.apiHost, false)
|
||||
} else {
|
||||
formatted.apiHost = formatApiHost(formatted.apiHost)
|
||||
formatted.apiHost = formatApiHost(formatted.apiHost, appendApiVersion)
|
||||
}
|
||||
return formatted
|
||||
}
|
||||
@ -194,18 +198,13 @@ export function providerToAiSdkConfig(actualProvider: Provider, model: Model): A
|
||||
extraOptions.mode = 'chat'
|
||||
}
|
||||
|
||||
// 添加额外headers
|
||||
if (actualProvider.extra_headers) {
|
||||
extraOptions.headers = actualProvider.extra_headers
|
||||
// copy from openaiBaseClient/openaiResponseApiClient
|
||||
if (aiSdkProviderId === 'openai') {
|
||||
extraOptions.headers = {
|
||||
...extraOptions.headers,
|
||||
'HTTP-Referer': 'https://cherry-ai.com',
|
||||
'X-Title': 'Cherry Studio',
|
||||
'X-Api-Key': baseConfig.apiKey
|
||||
}
|
||||
}
|
||||
extraOptions.headers = {
|
||||
...defaultAppHeaders(),
|
||||
...actualProvider.extra_headers
|
||||
}
|
||||
|
||||
if (aiSdkProviderId === 'openai') {
|
||||
extraOptions.headers['X-Api-Key'] = baseConfig.apiKey
|
||||
}
|
||||
// azure
|
||||
// https://learn.microsoft.com/en-us/azure/ai-foundry/openai/latest
|
||||
@ -248,6 +247,12 @@ export function providerToAiSdkConfig(actualProvider: Provider, model: Model): A
|
||||
if (model.endpoint_type) {
|
||||
extraOptions.endpointType = model.endpoint_type
|
||||
}
|
||||
// CherryIN API Host
|
||||
const cherryinProvider = getProviderById(SystemProviderIds.cherryin)
|
||||
if (cherryinProvider) {
|
||||
extraOptions.anthropicBaseURL = cherryinProvider.anthropicApiHost + '/v1'
|
||||
extraOptions.geminiBaseURL = cherryinProvider.apiHost + '/v1beta/models'
|
||||
}
|
||||
}
|
||||
|
||||
if (hasProviderConfig(aiSdkProviderId) && aiSdkProviderId !== 'openai-compatible') {
|
||||
|
||||
@ -754,7 +754,8 @@ describe('reasoning utils', () => {
|
||||
const result = getGeminiReasoningParams(assistant, model)
|
||||
expect(result).toEqual({
|
||||
thinkingConfig: {
|
||||
includeThoughts: true
|
||||
includeThoughts: true,
|
||||
thinkingBudget: -1
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
@ -259,7 +259,7 @@ describe('websearch utils', () => {
|
||||
|
||||
expect(result).toEqual({
|
||||
xai: {
|
||||
maxSearchResults: 50,
|
||||
maxSearchResults: 30,
|
||||
returnCitations: true,
|
||||
sources: [{ type: 'web', excludedWebsites: [] }, { type: 'news' }, { type: 'x' }],
|
||||
mode: 'on'
|
||||
|
||||
@ -11,6 +11,7 @@ import {
|
||||
isGeminiModel,
|
||||
isGrokModel,
|
||||
isOpenAIModel,
|
||||
isOpenAIOpenWeightModel,
|
||||
isQwenMTModel,
|
||||
isSupportFlexServiceTierModel,
|
||||
isSupportVerbosityModel
|
||||
@ -244,7 +245,7 @@ export function buildProviderOptions(
|
||||
providerSpecificOptions = buildOpenAIProviderOptions(assistant, model, capabilities, serviceTier)
|
||||
break
|
||||
case SystemProviderIds.ollama:
|
||||
providerSpecificOptions = buildOllamaProviderOptions(assistant, capabilities)
|
||||
providerSpecificOptions = buildOllamaProviderOptions(assistant, model, capabilities)
|
||||
break
|
||||
case SystemProviderIds.gateway:
|
||||
providerSpecificOptions = buildAIGatewayOptions(assistant, model, capabilities, serviceTier, textVerbosity)
|
||||
@ -564,6 +565,7 @@ function buildBedrockProviderOptions(
|
||||
|
||||
function buildOllamaProviderOptions(
|
||||
assistant: Assistant,
|
||||
model: Model,
|
||||
capabilities: {
|
||||
enableReasoning: boolean
|
||||
enableWebSearch: boolean
|
||||
@ -574,7 +576,12 @@ function buildOllamaProviderOptions(
|
||||
const providerOptions: OllamaCompletionProviderOptions = {}
|
||||
const reasoningEffort = assistant.settings?.reasoning_effort
|
||||
if (enableReasoning) {
|
||||
providerOptions.think = !['none', undefined].includes(reasoningEffort)
|
||||
if (isOpenAIOpenWeightModel(model)) {
|
||||
// @ts-ignore upstream type error
|
||||
providerOptions.think = reasoningEffort as any
|
||||
} else {
|
||||
providerOptions.think = !['none', undefined].includes(reasoningEffort)
|
||||
}
|
||||
}
|
||||
return {
|
||||
ollama: providerOptions
|
||||
|
||||
@ -8,16 +8,16 @@ import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant'
|
||||
import {
|
||||
findTokenLimit,
|
||||
GEMINI_FLASH_MODEL_REGEX,
|
||||
getThinkModelType,
|
||||
getModelSupportedReasoningEffortOptions,
|
||||
isDeepSeekHybridInferenceModel,
|
||||
isDoubaoSeedAfter251015,
|
||||
isDoubaoThinkingAutoModel,
|
||||
isGemini3ThinkingTokenModel,
|
||||
isGPT5SeriesModel,
|
||||
isGPT51SeriesModel,
|
||||
isGrok4FastReasoningModel,
|
||||
isOpenAIDeepResearchModel,
|
||||
isOpenAIModel,
|
||||
isOpenAIReasoningModel,
|
||||
isQwenAlwaysThinkModel,
|
||||
isQwenReasoningModel,
|
||||
isReasoningModel,
|
||||
@ -30,8 +30,7 @@ import {
|
||||
isSupportedThinkingTokenHunyuanModel,
|
||||
isSupportedThinkingTokenModel,
|
||||
isSupportedThinkingTokenQwenModel,
|
||||
isSupportedThinkingTokenZhipuModel,
|
||||
MODEL_SUPPORTED_REASONING_EFFORT
|
||||
isSupportedThinkingTokenZhipuModel
|
||||
} from '@renderer/config/models'
|
||||
import { getStoreSetting } from '@renderer/hooks/useSettings'
|
||||
import { getAssistantSettings, getProviderByModel } from '@renderer/services/AssistantService'
|
||||
@ -134,8 +133,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
// https://creator.poe.com/docs/external-applications/openai-compatible-api#additional-considerations
|
||||
// Poe provider - supports custom bot parameters via extra_body
|
||||
if (provider.id === SystemProviderIds.poe) {
|
||||
// GPT-5 series models use reasoning_effort parameter in extra_body
|
||||
if (isGPT5SeriesModel(model) || isGPT51SeriesModel(model)) {
|
||||
if (isOpenAIReasoningModel(model)) {
|
||||
return {
|
||||
extra_body: {
|
||||
reasoning_effort: reasoningEffort === 'auto' ? 'medium' : reasoningEffort
|
||||
@ -331,16 +329,15 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
// Grok models/Perplexity models/OpenAI models, use reasoning_effort
|
||||
if (isSupportedReasoningEffortModel(model)) {
|
||||
// 检查模型是否支持所选选项
|
||||
const modelType = getThinkModelType(model)
|
||||
const supportedOptions = MODEL_SUPPORTED_REASONING_EFFORT[modelType]
|
||||
if (supportedOptions.includes(reasoningEffort)) {
|
||||
const supportedOptions = getModelSupportedReasoningEffortOptions(model)
|
||||
if (supportedOptions?.includes(reasoningEffort)) {
|
||||
return {
|
||||
reasoningEffort
|
||||
}
|
||||
} else {
|
||||
// 如果不支持,fallback到第一个支持的值
|
||||
return {
|
||||
reasoningEffort: supportedOptions[0]
|
||||
reasoningEffort: supportedOptions?.[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -589,6 +586,7 @@ export function getGeminiReasoningParams(
|
||||
if (effortRatio > 1) {
|
||||
return {
|
||||
thinkingConfig: {
|
||||
thinkingBudget: -1,
|
||||
includeThoughts: true
|
||||
}
|
||||
}
|
||||
@ -634,6 +632,8 @@ export function getXAIReasoningParams(assistant: Assistant, model: Model): Pick<
|
||||
case 'low':
|
||||
case 'high':
|
||||
return { reasoningEffort }
|
||||
case 'xhigh':
|
||||
return { reasoningEffort: 'high' }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -9,6 +9,8 @@ import type { CherryWebSearchConfig } from '@renderer/store/websearch'
|
||||
import type { Model } from '@renderer/types'
|
||||
import { mapRegexToPatterns } from '@renderer/utils/blacklistMatchPattern'
|
||||
|
||||
const X_AI_MAX_SEARCH_RESULT = 30
|
||||
|
||||
export function getWebSearchParams(model: Model): Record<string, any> {
|
||||
if (model.provider === 'hunyuan') {
|
||||
return { enable_enhancement: true, citation: true, search_info: true }
|
||||
@ -82,7 +84,7 @@ export function buildProviderBuiltinWebSearchConfig(
|
||||
const excludeDomains = mapRegexToPatterns(webSearchConfig.excludeDomains)
|
||||
return {
|
||||
xai: {
|
||||
maxSearchResults: webSearchConfig.maxResults,
|
||||
maxSearchResults: Math.min(webSearchConfig.maxResults, X_AI_MAX_SEARCH_RESULT),
|
||||
returnCitations: true,
|
||||
sources: [
|
||||
{
|
||||
|
||||
34
src/renderer/src/components/Avatar/AssistantAvatar.tsx
Normal file
34
src/renderer/src/components/Avatar/AssistantAvatar.tsx
Normal file
@ -0,0 +1,34 @@
|
||||
import EmojiIcon from '@renderer/components/EmojiIcon'
|
||||
import { useSettings } from '@renderer/hooks/useSettings'
|
||||
import { getDefaultModel } from '@renderer/services/AssistantService'
|
||||
import type { Assistant } from '@renderer/types'
|
||||
import { getLeadingEmoji } from '@renderer/utils'
|
||||
import type { FC } from 'react'
|
||||
import { useMemo } from 'react'
|
||||
|
||||
import ModelAvatar from './ModelAvatar'
|
||||
|
||||
interface AssistantAvatarProps {
|
||||
assistant: Assistant
|
||||
size?: number
|
||||
className?: string
|
||||
}
|
||||
|
||||
const AssistantAvatar: FC<AssistantAvatarProps> = ({ assistant, size = 24, className }) => {
|
||||
const { assistantIconType } = useSettings()
|
||||
const defaultModel = getDefaultModel()
|
||||
|
||||
const assistantName = useMemo(() => assistant.name || '', [assistant.name])
|
||||
|
||||
if (assistantIconType === 'model') {
|
||||
return <ModelAvatar model={assistant.model || defaultModel} size={size} className={className} />
|
||||
}
|
||||
|
||||
if (assistantIconType === 'emoji') {
|
||||
return <EmojiIcon emoji={assistant.emoji || getLeadingEmoji(assistantName)} size={size} className={className} />
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
export default AssistantAvatar
|
||||
@ -25,7 +25,7 @@ type ViewMode = 'split' | 'code' | 'preview'
|
||||
const HtmlArtifactsPopup: React.FC<HtmlArtifactsPopupProps> = ({ open, title, html, onSave, onClose }) => {
|
||||
const { t } = useTranslation()
|
||||
const [viewMode, setViewMode] = useState<ViewMode>('split')
|
||||
const [isFullscreen, setIsFullscreen] = useState(false)
|
||||
const [isFullscreen, setIsFullscreen] = useState(true)
|
||||
const [saved, setSaved] = useTemporaryValue(false, 2000)
|
||||
const codeEditorRef = useRef<CodeEditorHandles>(null)
|
||||
const previewFrameRef = useRef<HTMLIFrameElement>(null)
|
||||
@ -78,7 +78,7 @@ const HtmlArtifactsPopup: React.FC<HtmlArtifactsPopupProps> = ({ open, title, ht
|
||||
</HeaderLeft>
|
||||
|
||||
<HeaderCenter>
|
||||
<ViewControls onDoubleClick={(e) => e.stopPropagation()}>
|
||||
<ViewControls onDoubleClick={(e) => e.stopPropagation()} className="nodrag">
|
||||
<ViewButton
|
||||
size="small"
|
||||
type={viewMode === 'split' ? 'primary' : 'default'}
|
||||
|
||||
@ -6,6 +6,61 @@ interface ContextMenuProps {
|
||||
children: React.ReactNode
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract text content from selection, filtering out line numbers in code viewers.
|
||||
* Preserves all content including plain text and code blocks, only removing line numbers.
|
||||
* This ensures right-click copy in code blocks doesn't include line numbers while preserving indentation.
|
||||
*/
|
||||
function extractSelectedText(selection: Selection): string {
|
||||
// Validate selection
|
||||
if (selection.rangeCount === 0 || selection.isCollapsed) {
|
||||
return ''
|
||||
}
|
||||
|
||||
const range = selection.getRangeAt(0)
|
||||
const fragment = range.cloneContents()
|
||||
|
||||
// Check if the selection contains code viewer elements
|
||||
const hasLineNumbers = fragment.querySelectorAll('.line-number').length > 0
|
||||
|
||||
// If no line numbers, return the original text (preserves formatting)
|
||||
if (!hasLineNumbers) {
|
||||
return selection.toString()
|
||||
}
|
||||
|
||||
// Remove all line number elements
|
||||
fragment.querySelectorAll('.line-number').forEach((el) => el.remove())
|
||||
|
||||
// Handle all content using optimized TreeWalker with precise node filtering
|
||||
// This approach handles mixed content correctly while improving performance
|
||||
const walker = document.createTreeWalker(fragment, NodeFilter.SHOW_TEXT | NodeFilter.SHOW_ELEMENT, null)
|
||||
|
||||
let result = ''
|
||||
let node = walker.nextNode()
|
||||
|
||||
while (node) {
|
||||
if (node.nodeType === Node.TEXT_NODE) {
|
||||
// Preserve text content including whitespace
|
||||
result += node.textContent
|
||||
} else if (node.nodeType === Node.ELEMENT_NODE) {
|
||||
const element = node as Element
|
||||
|
||||
// Add newline after block elements and code lines to preserve structure
|
||||
if (['H1', 'H2', 'H3', 'H4', 'H5', 'H6'].includes(element.tagName)) {
|
||||
result += '\n'
|
||||
} else if (element.classList.contains('line')) {
|
||||
// Add newline after code lines to preserve code structure
|
||||
result += '\n'
|
||||
}
|
||||
}
|
||||
|
||||
node = walker.nextNode()
|
||||
}
|
||||
|
||||
// Clean up excessive newlines but preserve code structure
|
||||
return result.trim()
|
||||
}
|
||||
|
||||
// FIXME: Why does this component name look like a generic component but is not customizable at all?
|
||||
const ContextMenu: React.FC<ContextMenuProps> = ({ children }) => {
|
||||
const { t } = useTranslation()
|
||||
@ -45,8 +100,12 @@ const ContextMenu: React.FC<ContextMenuProps> = ({ children }) => {
|
||||
|
||||
const onOpenChange = (open: boolean) => {
|
||||
if (open) {
|
||||
const selectedText = window.getSelection()?.toString()
|
||||
setSelectedText(selectedText)
|
||||
const selection = window.getSelection()
|
||||
if (!selection || selection.rangeCount === 0 || selection.isCollapsed) {
|
||||
setSelectedText(undefined)
|
||||
return
|
||||
}
|
||||
setSelectedText(extractSelectedText(selection) || undefined)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
import { loggerService } from '@logger'
|
||||
import AiProvider from '@renderer/aiCore'
|
||||
import { RefreshIcon } from '@renderer/components/Icons'
|
||||
import { useProvider } from '@renderer/hooks/useProvider'
|
||||
import type { Model } from '@renderer/types'
|
||||
@ -8,6 +7,8 @@ import { Button, InputNumber, Space, Tooltip } from 'antd'
|
||||
import { memo, useCallback, useMemo, useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
|
||||
import AiProviderNew from '../aiCore/index_new'
|
||||
|
||||
const logger = loggerService.withContext('DimensionsInput')
|
||||
|
||||
interface InputEmbeddingDimensionProps {
|
||||
@ -47,7 +48,7 @@ const InputEmbeddingDimension = ({
|
||||
|
||||
setLoading(true)
|
||||
try {
|
||||
const aiProvider = new AiProvider(provider)
|
||||
const aiProvider = new AiProviderNew(provider)
|
||||
const dimension = await aiProvider.getEmbeddingDimensions(model)
|
||||
// for controlled input
|
||||
if (ref?.current) {
|
||||
|
||||
@ -106,6 +106,51 @@ const WebviewContainer = memo(
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [appid, url])
|
||||
|
||||
// Setup keyboard shortcuts handler for print and save
|
||||
useEffect(() => {
|
||||
if (!webviewRef.current) return
|
||||
|
||||
const unsubscribe = window.api?.webview?.onFindShortcut?.(async (payload) => {
|
||||
// Get webviewId when event is triggered
|
||||
const webviewId = webviewRef.current?.getWebContentsId()
|
||||
|
||||
// Only handle events for this webview
|
||||
if (!webviewId || payload.webviewId !== webviewId) return
|
||||
|
||||
const key = payload.key?.toLowerCase()
|
||||
const isModifier = payload.control || payload.meta
|
||||
|
||||
if (!isModifier || !key) return
|
||||
|
||||
try {
|
||||
if (key === 'p') {
|
||||
// Print to PDF
|
||||
logger.info(`Printing webview ${appid} to PDF`)
|
||||
const filePath = await window.api.webview.printToPDF(webviewId)
|
||||
if (filePath) {
|
||||
window.toast?.success?.(`PDF saved to: ${filePath}`)
|
||||
logger.info(`PDF saved to: ${filePath}`)
|
||||
}
|
||||
} else if (key === 's') {
|
||||
// Save as HTML
|
||||
logger.info(`Saving webview ${appid} as HTML`)
|
||||
const filePath = await window.api.webview.saveAsHTML(webviewId)
|
||||
if (filePath) {
|
||||
window.toast?.success?.(`HTML saved to: ${filePath}`)
|
||||
logger.info(`HTML saved to: ${filePath}`)
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`Failed to handle shortcut for webview ${appid}:`, error as Error)
|
||||
window.toast?.error?.(`Failed: ${(error as Error).message}`)
|
||||
}
|
||||
})
|
||||
|
||||
return () => {
|
||||
unsubscribe?.()
|
||||
}
|
||||
}, [appid])
|
||||
|
||||
// Update webview settings when they change
|
||||
useEffect(() => {
|
||||
if (!webviewRef.current) return
|
||||
|
||||
@ -60,6 +60,7 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
||||
|
||||
const [form, setForm] = useState<BaseAgentForm>(() => buildAgentForm(agent))
|
||||
const [hasGitBash, setHasGitBash] = useState<boolean>(true)
|
||||
const [customGitBashPath, setCustomGitBashPath] = useState<string>('')
|
||||
|
||||
useEffect(() => {
|
||||
if (open) {
|
||||
@ -70,7 +71,11 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
||||
const checkGitBash = useCallback(
|
||||
async (showToast = false) => {
|
||||
try {
|
||||
const gitBashInstalled = await window.api.system.checkGitBash()
|
||||
const [gitBashInstalled, savedPath] = await Promise.all([
|
||||
window.api.system.checkGitBash(),
|
||||
window.api.system.getGitBashPath().catch(() => null)
|
||||
])
|
||||
setCustomGitBashPath(savedPath ?? '')
|
||||
setHasGitBash(gitBashInstalled)
|
||||
if (showToast) {
|
||||
if (gitBashInstalled) {
|
||||
@ -93,6 +98,46 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
||||
|
||||
const selectedPermissionMode = form.configuration?.permission_mode ?? 'default'
|
||||
|
||||
const handlePickGitBash = useCallback(async () => {
|
||||
try {
|
||||
const selected = await window.api.file.select({
|
||||
title: t('agent.gitBash.pick.title', 'Select Git Bash executable'),
|
||||
filters: [{ name: 'Executable', extensions: ['exe'] }],
|
||||
properties: ['openFile']
|
||||
})
|
||||
|
||||
if (!selected || selected.length === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
const pickedPath = selected[0].path
|
||||
const ok = await window.api.system.setGitBashPath(pickedPath)
|
||||
if (!ok) {
|
||||
window.toast.error(
|
||||
t('agent.gitBash.pick.invalidPath', 'Selected file is not a valid Git Bash executable (bash.exe).')
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
setCustomGitBashPath(pickedPath)
|
||||
await checkGitBash(true)
|
||||
} catch (error) {
|
||||
logger.error('Failed to pick Git Bash path', error as Error)
|
||||
window.toast.error(t('agent.gitBash.pick.failed', 'Failed to set Git Bash path'))
|
||||
}
|
||||
}, [checkGitBash, t])
|
||||
|
||||
const handleClearGitBash = useCallback(async () => {
|
||||
try {
|
||||
await window.api.system.setGitBashPath(null)
|
||||
setCustomGitBashPath('')
|
||||
await checkGitBash(true)
|
||||
} catch (error) {
|
||||
logger.error('Failed to clear Git Bash path', error as Error)
|
||||
window.toast.error(t('agent.gitBash.pick.failed', 'Failed to set Git Bash path'))
|
||||
}
|
||||
}, [checkGitBash, t])
|
||||
|
||||
const onPermissionModeChange = useCallback((value: PermissionMode) => {
|
||||
setForm((prev) => {
|
||||
const parsedConfiguration = AgentConfigurationSchema.parse(prev.configuration ?? {})
|
||||
@ -324,6 +369,9 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
||||
<Button size="small" onClick={() => checkGitBash(true)}>
|
||||
{t('agent.gitBash.error.recheck', 'Recheck Git Bash Installation')}
|
||||
</Button>
|
||||
<Button size="small" style={{ marginLeft: 8 }} onClick={handlePickGitBash}>
|
||||
{t('agent.gitBash.pick.button', 'Select Git Bash Path')}
|
||||
</Button>
|
||||
</div>
|
||||
}
|
||||
type="error"
|
||||
@ -331,6 +379,33 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
||||
style={{ marginBottom: 16 }}
|
||||
/>
|
||||
)}
|
||||
|
||||
{hasGitBash && customGitBashPath && (
|
||||
<Alert
|
||||
message={t('agent.gitBash.found.title', 'Git Bash configured')}
|
||||
description={
|
||||
<div style={{ display: 'flex', flexDirection: 'column', gap: 8 }}>
|
||||
<div>
|
||||
{t('agent.gitBash.customPath', {
|
||||
defaultValue: 'Using custom path: {{path}}',
|
||||
path: customGitBashPath
|
||||
})}
|
||||
</div>
|
||||
<div style={{ display: 'flex', gap: 8 }}>
|
||||
<Button size="small" onClick={handlePickGitBash}>
|
||||
{t('agent.gitBash.pick.button', 'Select Git Bash Path')}
|
||||
</Button>
|
||||
<Button size="small" onClick={handleClearGitBash}>
|
||||
{t('agent.gitBash.clear.button', 'Clear custom path')}
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
}
|
||||
type="success"
|
||||
showIcon
|
||||
style={{ marginBottom: 16 }}
|
||||
/>
|
||||
)}
|
||||
<FormRow>
|
||||
<FormItem style={{ flex: 1 }}>
|
||||
<Label>
|
||||
|
||||
@ -79,7 +79,7 @@ vi.mock('antd', () => {
|
||||
})
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('@renderer/aiCore', () => ({
|
||||
vi.mock('@renderer/aiCore/index_new', () => ({
|
||||
default: vi.fn().mockImplementation(() => ({
|
||||
getEmbeddingDimensions: mocks.aiCore.getEmbeddingDimensions
|
||||
}))
|
||||
|
||||
@ -89,10 +89,7 @@ const Sidebar: FC = () => {
|
||||
)}
|
||||
</MainMenusContainer>
|
||||
<Menus>
|
||||
<Tooltip
|
||||
title={t('settings.theme.title') + ': ' + getThemeModeLabel(settedTheme)}
|
||||
mouseEnterDelay={0.8}
|
||||
placement="right">
|
||||
<Tooltip title={t('settings.theme.title') + ': ' + getThemeModeLabel(settedTheme)} placement="right">
|
||||
<Icon theme={theme} onClick={toggleTheme}>
|
||||
{settedTheme === ThemeMode.dark ? (
|
||||
<Moon size={20} className="icon" />
|
||||
|
||||
@ -5,6 +5,7 @@ import { isEmbeddingModel, isRerankModel } from '../embedding'
|
||||
import { isOpenAIReasoningModel, isSupportedReasoningEffortOpenAIModel } from '../openai'
|
||||
import {
|
||||
findTokenLimit,
|
||||
getModelSupportedReasoningEffortOptions,
|
||||
getThinkModelType,
|
||||
isClaude4SeriesModel,
|
||||
isClaude45ReasoningModel,
|
||||
@ -1651,3 +1652,355 @@ describe('isGemini3ThinkingTokenModel', () => {
|
||||
).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
describe('Edge cases', () => {
|
||||
it('should return undefined for undefined model', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(undefined)).toBeUndefined()
|
||||
})
|
||||
|
||||
it('should return undefined for null model', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(null)).toBeUndefined()
|
||||
})
|
||||
|
||||
it('should return undefined for non-reasoning models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-4o' }))).toBeUndefined()
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'claude-3-opus' }))).toBeUndefined()
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'random-model' }))).toBeUndefined()
|
||||
})
|
||||
})
|
||||
|
||||
describe('OpenAI models', () => {
|
||||
it('should return correct options for o-series models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'o3' }))).toEqual(['low', 'medium', 'high'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'o3-mini' }))).toEqual(['low', 'medium', 'high'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'o4' }))).toEqual(['low', 'medium', 'high'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-oss-reasoning' }))).toEqual([
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
})
|
||||
|
||||
it('should return correct options for deep research models', () => {
|
||||
// Note: Deep research models need to be actual OpenAI reasoning models to be detected
|
||||
// 'sonar-deep-research' from Perplexity is the primary deep research model
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'sonar-deep-research' }))).toEqual(['medium'])
|
||||
})
|
||||
|
||||
it('should return correct options for GPT-5 models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5' }))).toEqual([
|
||||
'minimal',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-preview' }))).toEqual([
|
||||
'minimal',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
})
|
||||
|
||||
it('should return correct options for GPT-5 Pro models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-pro' }))).toEqual(['high'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-pro-preview' }))).toEqual(['high'])
|
||||
})
|
||||
|
||||
it('should return correct options for GPT-5 Codex models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-codex' }))).toEqual([
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-codex-mini' }))).toEqual([
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
})
|
||||
|
||||
it('should return correct options for GPT-5.1 models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5.1' }))).toEqual([
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5.1-preview' }))).toEqual([
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5.1-mini' }))).toEqual([
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
})
|
||||
|
||||
it('should return correct options for GPT-5.1 Codex models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5.1-codex' }))).toEqual([
|
||||
'none',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5.1-codex-mini' }))).toEqual([
|
||||
'none',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
})
|
||||
})
|
||||
|
||||
describe('Grok models', () => {
|
||||
it('should return correct options for Grok 3 mini', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'grok-3-mini' }))).toEqual(['low', 'high'])
|
||||
})
|
||||
|
||||
it('should return correct options for Grok 4 Fast', () => {
|
||||
expect(
|
||||
getModelSupportedReasoningEffortOptions(createModel({ id: 'grok-4-fast', provider: 'openrouter' }))
|
||||
).toEqual(['none', 'auto'])
|
||||
})
|
||||
})
|
||||
|
||||
describe('Gemini models', () => {
|
||||
it('should return correct options for Gemini Flash models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-2.5-flash-latest' }))).toEqual([
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
'high',
|
||||
'auto'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-flash-latest' }))).toEqual([
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
'high',
|
||||
'auto'
|
||||
])
|
||||
})
|
||||
|
||||
it('should return correct options for Gemini Pro models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-2.5-pro-latest' }))).toEqual([
|
||||
'low',
|
||||
'medium',
|
||||
'high',
|
||||
'auto'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-pro-latest' }))).toEqual([
|
||||
'low',
|
||||
'medium',
|
||||
'high',
|
||||
'auto'
|
||||
])
|
||||
})
|
||||
|
||||
it('should return correct options for Gemini 3 models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-3-flash' }))).toEqual([
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-3-pro-preview' }))).toEqual([
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
})
|
||||
})
|
||||
|
||||
describe('Qwen models', () => {
|
||||
it('should return correct options for controllable Qwen models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'qwen-plus' }))).toEqual([
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'qwen-turbo' }))).toEqual([
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'qwen-flash' }))).toEqual([
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'qwen3-8b' }))).toEqual([
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
})
|
||||
|
||||
it('should return undefined for always-thinking Qwen models', () => {
|
||||
// These models always think and don't support thinking token control
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'qwen3-thinking' }))).toBeUndefined()
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'qwen3-vl-235b-thinking' }))).toBeUndefined()
|
||||
})
|
||||
})
|
||||
|
||||
describe('Doubao models', () => {
|
||||
it('should return correct options for auto-thinking Doubao models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'doubao-seed-1.6' }))).toEqual([
|
||||
'none',
|
||||
'auto',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'doubao-1-5-thinking-pro-m' }))).toEqual([
|
||||
'none',
|
||||
'auto',
|
||||
'high'
|
||||
])
|
||||
})
|
||||
|
||||
it('should return correct options for Doubao models after 251015', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'doubao-seed-1-6-251015' }))).toEqual([
|
||||
'minimal',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'doubao-seed-1-6-lite-251015' }))).toEqual([
|
||||
'minimal',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
})
|
||||
|
||||
it('should return correct options for other Doubao thinking models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'doubao-1.5-thinking-vision-pro' }))).toEqual([
|
||||
'none',
|
||||
'high'
|
||||
])
|
||||
})
|
||||
})
|
||||
|
||||
describe('Other providers', () => {
|
||||
it('should return correct options for Hunyuan models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'hunyuan-a13b' }))).toEqual(['none', 'auto'])
|
||||
})
|
||||
|
||||
it('should return correct options for Zhipu models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'glm-4.5' }))).toEqual(['none', 'auto'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'glm-4.6' }))).toEqual(['none', 'auto'])
|
||||
})
|
||||
|
||||
it('should return correct options for Perplexity models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'sonar-deep-research' }))).toEqual(['medium'])
|
||||
})
|
||||
|
||||
it('should return correct options for DeepSeek hybrid models', () => {
|
||||
expect(
|
||||
getModelSupportedReasoningEffortOptions(createModel({ id: 'deepseek-v3.1', provider: 'deepseek' }))
|
||||
).toEqual(['none', 'auto'])
|
||||
expect(
|
||||
getModelSupportedReasoningEffortOptions(createModel({ id: 'deepseek-v3.2', provider: 'openrouter' }))
|
||||
).toEqual(['none', 'auto'])
|
||||
expect(
|
||||
getModelSupportedReasoningEffortOptions(createModel({ id: 'deepseek-chat', provider: 'deepseek' }))
|
||||
).toEqual(['none', 'auto'])
|
||||
})
|
||||
})
|
||||
|
||||
describe('Name-based fallback', () => {
|
||||
it('should fall back to name when id does not match', () => {
|
||||
// Grok 4 Fast requires openrouter provider to be recognized
|
||||
expect(
|
||||
getModelSupportedReasoningEffortOptions(
|
||||
createModel({
|
||||
id: 'custom-id',
|
||||
name: 'grok-4-fast',
|
||||
provider: 'openrouter'
|
||||
})
|
||||
)
|
||||
).toEqual(['none', 'auto'])
|
||||
|
||||
expect(
|
||||
getModelSupportedReasoningEffortOptions(
|
||||
createModel({
|
||||
id: 'custom-id',
|
||||
name: 'gpt-5.1'
|
||||
})
|
||||
)
|
||||
).toEqual(['none', 'low', 'medium', 'high'])
|
||||
|
||||
// Qwen models work well for name-based fallback
|
||||
expect(
|
||||
getModelSupportedReasoningEffortOptions(
|
||||
createModel({
|
||||
id: 'custom-id',
|
||||
name: 'qwen-plus'
|
||||
})
|
||||
)
|
||||
).toEqual(['none', 'low', 'medium', 'high'])
|
||||
})
|
||||
|
||||
it('should use id result when id matches', () => {
|
||||
expect(
|
||||
getModelSupportedReasoningEffortOptions(
|
||||
createModel({
|
||||
id: 'gpt-5.1',
|
||||
name: 'Different Name'
|
||||
})
|
||||
)
|
||||
).toEqual(['none', 'low', 'medium', 'high'])
|
||||
|
||||
expect(
|
||||
getModelSupportedReasoningEffortOptions(
|
||||
createModel({
|
||||
id: 'o3-mini',
|
||||
name: 'Some other name'
|
||||
})
|
||||
)
|
||||
).toEqual(['low', 'medium', 'high'])
|
||||
})
|
||||
})
|
||||
|
||||
describe('Case sensitivity', () => {
|
||||
it('should handle case insensitive model IDs', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'GPT-5.1' }))).toEqual([
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'O3-MINI' }))).toEqual(['low', 'medium', 'high'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'Gemini-2.5-Flash-Latest' }))).toEqual([
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
'high',
|
||||
'auto'
|
||||
])
|
||||
})
|
||||
})
|
||||
|
||||
describe('Integration with MODEL_SUPPORTED_OPTIONS', () => {
|
||||
it('should return values that match MODEL_SUPPORTED_OPTIONS configuration', () => {
|
||||
// Verify that returned values match the configuration
|
||||
const model = createModel({ id: 'o3' })
|
||||
const result = getModelSupportedReasoningEffortOptions(model)
|
||||
expect(result).toEqual(MODEL_SUPPORTED_OPTIONS.o)
|
||||
|
||||
const gpt5Model = createModel({ id: 'gpt-5' })
|
||||
const gpt5Result = getModelSupportedReasoningEffortOptions(gpt5Model)
|
||||
expect(gpt5Result).toEqual(MODEL_SUPPORTED_OPTIONS.gpt5)
|
||||
|
||||
const geminiModel = createModel({ id: 'gemini-2.5-flash-latest' })
|
||||
const geminiResult = getModelSupportedReasoningEffortOptions(geminiModel)
|
||||
expect(geminiResult).toEqual(MODEL_SUPPORTED_OPTIONS.gemini)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@ -193,7 +193,7 @@ export function getModelLogoById(modelId: string): string | undefined {
|
||||
'gpt-5.1': GPT51ModelLogo,
|
||||
'gpt-5': GPT5ModelLogo,
|
||||
gpts: isLight ? ChatGPT4ModelLogo : ChatGPT4ModelLogoDark,
|
||||
'gpt-oss(?:-[\\w-]+)': isLight ? ChatGptModelLogo : ChatGptModelLogoDark,
|
||||
'gpt-oss(?::|-[\\w-]+)': isLight ? ChatGptModelLogo : ChatGptModelLogoDark,
|
||||
'text-moderation': isLight ? ChatGptModelLogo : ChatGptModelLogoDark,
|
||||
'babbage-': isLight ? ChatGptModelLogo : ChatGptModelLogoDark,
|
||||
'(sora-|sora_)': isLight ? ChatGptModelLogo : ChatGptModelLogoDark,
|
||||
|
||||
@ -35,6 +35,16 @@ export const isGPT5ProModel = (model: Model) => {
|
||||
return modelId.includes('gpt-5-pro')
|
||||
}
|
||||
|
||||
export const isGPT52ProModel = (model: Model) => {
|
||||
const modelId = getLowerBaseModelName(model.id)
|
||||
return modelId.includes('gpt-5.2-pro')
|
||||
}
|
||||
|
||||
export const isGPT51CodexMaxModel = (model: Model) => {
|
||||
const modelId = getLowerBaseModelName(model.id)
|
||||
return modelId.includes('gpt-5.1-codex-max')
|
||||
}
|
||||
|
||||
export const isOpenAIOpenWeightModel = (model: Model) => {
|
||||
const modelId = getLowerBaseModelName(model.id)
|
||||
return modelId.includes('gpt-oss')
|
||||
@ -42,7 +52,7 @@ export const isOpenAIOpenWeightModel = (model: Model) => {
|
||||
|
||||
export const isGPT5SeriesModel = (model: Model) => {
|
||||
const modelId = getLowerBaseModelName(model.id)
|
||||
return modelId.includes('gpt-5') && !modelId.includes('gpt-5.1')
|
||||
return modelId.includes('gpt-5') && !modelId.includes('gpt-5.1') && !modelId.includes('gpt-5.2')
|
||||
}
|
||||
|
||||
export const isGPT5SeriesReasoningModel = (model: Model) => {
|
||||
@ -55,6 +65,11 @@ export const isGPT51SeriesModel = (model: Model) => {
|
||||
return modelId.includes('gpt-5.1')
|
||||
}
|
||||
|
||||
export const isGPT52SeriesModel = (model: Model) => {
|
||||
const modelId = getLowerBaseModelName(model.id)
|
||||
return modelId.includes('gpt-5.2')
|
||||
}
|
||||
|
||||
export function isSupportVerbosityModel(model: Model): boolean {
|
||||
const modelId = getLowerBaseModelName(model.id)
|
||||
return (isGPT5SeriesModel(model) || isGPT51SeriesModel(model)) && !modelId.includes('chat')
|
||||
@ -86,7 +101,7 @@ export function isSupportedReasoningEffortOpenAIModel(model: Model): boolean {
|
||||
modelId.includes('o3') ||
|
||||
modelId.includes('o4') ||
|
||||
modelId.includes('gpt-oss') ||
|
||||
((isGPT5SeriesModel(model) || isGPT51SeriesModel(model)) && !modelId.includes('chat'))
|
||||
((isGPT5SeriesModel(model) || isGPT51SeriesModel(model) || isGPT52SeriesModel(model)) && !modelId.includes('chat'))
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import type {
|
||||
Model,
|
||||
ReasoningEffortConfig,
|
||||
ReasoningEffortOption,
|
||||
SystemProviderId,
|
||||
ThinkingModelType,
|
||||
ThinkingOptionConfig
|
||||
@ -11,7 +12,10 @@ import { isEmbeddingModel, isRerankModel } from './embedding'
|
||||
import {
|
||||
isGPT5ProModel,
|
||||
isGPT5SeriesModel,
|
||||
isGPT51CodexMaxModel,
|
||||
isGPT51SeriesModel,
|
||||
isGPT52ProModel,
|
||||
isGPT52SeriesModel,
|
||||
isOpenAIDeepResearchModel,
|
||||
isOpenAIReasoningModel,
|
||||
isSupportedReasoningEffortOpenAIModel
|
||||
@ -25,7 +29,7 @@ export const REASONING_REGEX =
|
||||
|
||||
// 模型类型到支持的reasoning_effort的映射表
|
||||
// TODO: refactor this. too many identical options
|
||||
export const MODEL_SUPPORTED_REASONING_EFFORT: ReasoningEffortConfig = {
|
||||
export const MODEL_SUPPORTED_REASONING_EFFORT = {
|
||||
default: ['low', 'medium', 'high'] as const,
|
||||
o: ['low', 'medium', 'high'] as const,
|
||||
openai_deep_research: ['medium'] as const,
|
||||
@ -33,7 +37,10 @@ export const MODEL_SUPPORTED_REASONING_EFFORT: ReasoningEffortConfig = {
|
||||
gpt5_codex: ['low', 'medium', 'high'] as const,
|
||||
gpt5_1: ['none', 'low', 'medium', 'high'] as const,
|
||||
gpt5_1_codex: ['none', 'medium', 'high'] as const,
|
||||
gpt5_1_codex_max: ['none', 'medium', 'high', 'xhigh'] as const,
|
||||
gpt5_2: ['none', 'low', 'medium', 'high', 'xhigh'] as const,
|
||||
gpt5pro: ['high'] as const,
|
||||
gpt52pro: ['medium', 'high', 'xhigh'] as const,
|
||||
grok: ['low', 'high'] as const,
|
||||
grok4_fast: ['auto'] as const,
|
||||
gemini: ['low', 'medium', 'high', 'auto'] as const,
|
||||
@ -48,7 +55,7 @@ export const MODEL_SUPPORTED_REASONING_EFFORT: ReasoningEffortConfig = {
|
||||
zhipu: ['auto'] as const,
|
||||
perplexity: ['low', 'medium', 'high'] as const,
|
||||
deepseek_hybrid: ['auto'] as const
|
||||
} as const
|
||||
} as const satisfies ReasoningEffortConfig
|
||||
|
||||
// 模型类型到支持选项的映射表
|
||||
export const MODEL_SUPPORTED_OPTIONS: ThinkingOptionConfig = {
|
||||
@ -60,6 +67,9 @@ export const MODEL_SUPPORTED_OPTIONS: ThinkingOptionConfig = {
|
||||
gpt5_codex: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_codex,
|
||||
gpt5_1: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1,
|
||||
gpt5_1_codex: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1_codex,
|
||||
gpt5_2: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_2,
|
||||
gpt5_1_codex_max: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1_codex_max,
|
||||
gpt52pro: MODEL_SUPPORTED_REASONING_EFFORT.gpt52pro,
|
||||
grok: MODEL_SUPPORTED_REASONING_EFFORT.grok,
|
||||
grok4_fast: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.grok4_fast] as const,
|
||||
gemini: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini] as const,
|
||||
@ -84,6 +94,7 @@ const withModelIdAndNameAsId = <T>(model: Model, fn: (model: Model) => T): { idR
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: add ut
|
||||
const _getThinkModelType = (model: Model): ThinkingModelType => {
|
||||
let thinkingModelType: ThinkingModelType = 'default'
|
||||
const modelId = getLowerBaseModelName(model.id)
|
||||
@ -93,9 +104,17 @@ const _getThinkModelType = (model: Model): ThinkingModelType => {
|
||||
if (isGPT51SeriesModel(model)) {
|
||||
if (modelId.includes('codex')) {
|
||||
thinkingModelType = 'gpt5_1_codex'
|
||||
if (isGPT51CodexMaxModel(model)) {
|
||||
thinkingModelType = 'gpt5_1_codex_max'
|
||||
}
|
||||
} else {
|
||||
thinkingModelType = 'gpt5_1'
|
||||
}
|
||||
} else if (isGPT52SeriesModel(model)) {
|
||||
thinkingModelType = 'gpt5_2'
|
||||
if (isGPT52ProModel(model)) {
|
||||
thinkingModelType = 'gpt52pro'
|
||||
}
|
||||
} else if (isGPT5SeriesModel(model)) {
|
||||
if (modelId.includes('codex')) {
|
||||
thinkingModelType = 'gpt5_codex'
|
||||
@ -148,6 +167,64 @@ export const getThinkModelType = (model: Model): ThinkingModelType => {
|
||||
}
|
||||
}
|
||||
|
||||
const _getModelSupportedReasoningEffortOptions = (model: Model): ReasoningEffortOption[] | undefined => {
|
||||
if (!isSupportedReasoningEffortModel(model) && !isSupportedThinkingTokenModel(model)) {
|
||||
return undefined
|
||||
}
|
||||
// use private function to avoid redundant function calling
|
||||
const thinkingType = _getThinkModelType(model)
|
||||
return MODEL_SUPPORTED_OPTIONS[thinkingType]
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the supported reasoning effort options for a given model.
|
||||
*
|
||||
* This function determines which reasoning effort levels a model supports based on its type.
|
||||
* It works with models that support either `reasoning_effort` parameter (like OpenAI o-series)
|
||||
* or thinking token control (like Claude, Gemini, Qwen, etc.).
|
||||
*
|
||||
* The function implements a fallback mechanism: it first checks the model's `id`, and if that
|
||||
* doesn't match any known patterns, it falls back to checking the model's `name`.
|
||||
*
|
||||
* @param model - The model to check for reasoning effort support. Can be undefined or null.
|
||||
* @returns An array of supported reasoning effort options, or undefined if:
|
||||
* - The model is null/undefined
|
||||
* - The model doesn't support reasoning effort or thinking tokens
|
||||
*
|
||||
* @example
|
||||
* // OpenAI o-series models support low, medium, high
|
||||
* getModelSupportedReasoningEffortOptions({ id: 'o3-mini', ... })
|
||||
* // Returns: ['low', 'medium', 'high']
|
||||
*
|
||||
* @example
|
||||
* // GPT-5.1 models support none, low, medium, high
|
||||
* getModelSupportedReasoningEffortOptions({ id: 'gpt-5.1', ... })
|
||||
* // Returns: ['none', 'low', 'medium', 'high']
|
||||
*
|
||||
* @example
|
||||
* // Gemini Flash models support none, low, medium, high, auto
|
||||
* getModelSupportedReasoningEffortOptions({ id: 'gemini-2.5-flash-latest', ... })
|
||||
* // Returns: ['none', 'low', 'medium', 'high', 'auto']
|
||||
*
|
||||
* @example
|
||||
* // Non-reasoning models return undefined
|
||||
* getModelSupportedReasoningEffortOptions({ id: 'gpt-4o', ... })
|
||||
* // Returns: undefined
|
||||
*
|
||||
* @example
|
||||
* // Name fallback when id doesn't match
|
||||
* getModelSupportedReasoningEffortOptions({ id: 'custom-id', name: 'gpt-5.1', ... })
|
||||
* // Returns: ['none', 'low', 'medium', 'high']
|
||||
*/
|
||||
export const getModelSupportedReasoningEffortOptions = (
|
||||
model: Model | undefined | null
|
||||
): ReasoningEffortOption[] | undefined => {
|
||||
if (!model) return undefined
|
||||
|
||||
const { idResult, nameResult } = withModelIdAndNameAsId(model, _getModelSupportedReasoningEffortOptions)
|
||||
return idResult ?? nameResult
|
||||
}
|
||||
|
||||
function _isSupportedThinkingTokenModel(model: Model): boolean {
|
||||
// Specifically for DeepSeek V3.1. White list for now
|
||||
if (isDeepSeekHybridInferenceModel(model)) {
|
||||
@ -183,12 +260,14 @@ function _isSupportedThinkingTokenModel(model: Model): boolean {
|
||||
}
|
||||
|
||||
/** 用于判断是否支持控制思考,但不一定以reasoning_effort的方式 */
|
||||
// TODO: rename it
|
||||
export function isSupportedThinkingTokenModel(model?: Model): boolean {
|
||||
if (!model) return false
|
||||
const { idResult, nameResult } = withModelIdAndNameAsId(model, _isSupportedThinkingTokenModel)
|
||||
return idResult || nameResult
|
||||
}
|
||||
|
||||
// TODO: it should be merged in isSupportedThinkingTokenModel
|
||||
export function isSupportedReasoningEffortModel(model?: Model): boolean {
|
||||
if (!model) {
|
||||
return false
|
||||
@ -370,7 +449,7 @@ export function isQwenAlwaysThinkModel(model?: Model): boolean {
|
||||
|
||||
// Doubao 支持思考模式的模型正则
|
||||
export const DOUBAO_THINKING_MODEL_REGEX =
|
||||
/doubao-(?:1[.-]5-thinking-vision-pro|1[.-]5-thinking-pro-m|seed-1[.-]6(?:-flash)?(?!-(?:thinking)(?:-|$)))(?:-[\w-]+)*/i
|
||||
/doubao-(?:1[.-]5-thinking-vision-pro|1[.-]5-thinking-pro-m|seed-1[.-]6(?:-flash)?(?!-(?:thinking)(?:-|$))|seed-code(?:-preview)?(?:-\d+)?)(?:-[\w-]+)*/i
|
||||
|
||||
// 支持 auto 的 Doubao 模型 doubao-seed-1.6-xxx doubao-seed-1-6-xxx doubao-1-5-thinking-pro-m-xxx
|
||||
// Auto thinking is no longer supported after version 251015, see https://console.volcengine.com/ark/region:ark+cn-beijing/model/detail?Id=doubao-seed-1-6
|
||||
|
||||
@ -26,6 +26,7 @@ export const FUNCTION_CALLING_MODELS = [
|
||||
'gemini(?:-[\\w-]+)?', // 提前排除了gemini的嵌入模型
|
||||
'grok-3(?:-[\\w-]+)?',
|
||||
'doubao-seed-1[.-]6(?:-[\\w-]+)?',
|
||||
'doubao-seed-code(?:-[\\w-]+)?',
|
||||
'kimi-k2(?:-[\\w-]+)?',
|
||||
'ling-\\w+(?:-[\\w-]+)?',
|
||||
'ring-\\w+(?:-[\\w-]+)?',
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import type OpenAI from '@cherrystudio/openai'
|
||||
import { isEmbeddingModel, isRerankModel } from '@renderer/config/models/embedding'
|
||||
import type { Assistant } from '@renderer/types'
|
||||
import { type Model, SystemProviderIds } from '@renderer/types'
|
||||
import type { OpenAIVerbosity, ValidOpenAIVerbosity } from '@renderer/types/aiCoreTypes'
|
||||
import { getLowerBaseModelName } from '@renderer/utils'
|
||||
@ -8,6 +9,7 @@ import {
|
||||
isGPT5ProModel,
|
||||
isGPT5SeriesModel,
|
||||
isGPT51SeriesModel,
|
||||
isGPT52SeriesModel,
|
||||
isOpenAIChatCompletionOnlyModel,
|
||||
isOpenAIOpenWeightModel,
|
||||
isOpenAIReasoningModel,
|
||||
@ -48,13 +50,16 @@ export function isSupportedModel(model: OpenAI.Models.Model): boolean {
|
||||
* @param model - The model to check
|
||||
* @returns true if the model supports temperature parameter
|
||||
*/
|
||||
export function isSupportTemperatureModel(model: Model | undefined | null): boolean {
|
||||
export function isSupportTemperatureModel(model: Model | undefined | null, assistant?: Assistant): boolean {
|
||||
if (!model) {
|
||||
return false
|
||||
}
|
||||
|
||||
// OpenAI reasoning models (except open weight) don't support temperature
|
||||
if (isOpenAIReasoningModel(model) && !isOpenAIOpenWeightModel(model)) {
|
||||
if (isGPT52SeriesModel(model) && assistant?.settings?.reasoning_effort === 'none') {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@ -76,13 +81,16 @@ export function isSupportTemperatureModel(model: Model | undefined | null): bool
|
||||
* @param model - The model to check
|
||||
* @returns true if the model supports top_p parameter
|
||||
*/
|
||||
export function isSupportTopPModel(model: Model | undefined | null): boolean {
|
||||
export function isSupportTopPModel(model: Model | undefined | null, assistant?: Assistant): boolean {
|
||||
if (!model) {
|
||||
return false
|
||||
}
|
||||
|
||||
// OpenAI reasoning models (except open weight) don't support top_p
|
||||
if (isOpenAIReasoningModel(model) && !isOpenAIOpenWeightModel(model)) {
|
||||
if (isGPT52SeriesModel(model) && assistant?.settings?.reasoning_effort === 'none') {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@ -46,6 +46,7 @@ const visionAllowedModels = [
|
||||
'kimi-latest',
|
||||
'gemma-3(?:-[\\w-]+)',
|
||||
'doubao-seed-1[.-]6(?:-[\\w-]+)?',
|
||||
'doubao-seed-code(?:-[\\w-]+)?',
|
||||
'kimi-thinking-preview',
|
||||
`gemma3(?:[-:\\w]+)?`,
|
||||
'kimi-vl-a3b-thinking(?:-[\\w-]+)?',
|
||||
|
||||
@ -306,7 +306,7 @@ export const SEARCH_SUMMARY_PROMPT_KNOWLEDGE_ONLY = `
|
||||
**Use user's language to rephrase the question.**
|
||||
Follow these guidelines:
|
||||
1. If the question is a simple writing task, greeting (e.g., Hi, Hello, How are you), or does not require searching for information (unless the greeting contains a follow-up question), return 'not_needed' in the 'question' XML block. This indicates that no search is required.
|
||||
2. For knowledge, You need rewrite user query into 'rewrite' XML block with one alternative version while preserving the original intent and meaning. Also include the original question in the 'question' block.
|
||||
2. For knowledge, You need rewrite user query into 'rewrite' XML block with one alternative version while preserving the original intent and meaning. Also include the rephrased or decomposed question(s) in the 'question' block.
|
||||
3. Always return the rephrased question inside the 'question' XML block.
|
||||
4. Always wrap the rephrased question in the appropriate XML blocks: use <knowledge></knowledge> for queries that can be answered from a pre-existing knowledge base. Ensure that the rephrased question is always contained within a <question></question> block inside the wrapper.
|
||||
5. *use knowledge to rephrase the question*
|
||||
|
||||
@ -31,6 +31,11 @@ export const WEB_SEARCH_PROVIDER_CONFIG: Record<WebSearchProviderId, WebSearchPr
|
||||
apiKey: 'https://dashboard.exa.ai/api-keys'
|
||||
}
|
||||
},
|
||||
'exa-mcp': {
|
||||
websites: {
|
||||
official: 'https://exa.ai'
|
||||
}
|
||||
},
|
||||
bocha: {
|
||||
websites: {
|
||||
official: 'https://bochaai.com',
|
||||
@ -80,6 +85,11 @@ export const WEB_SEARCH_PROVIDERS: WebSearchProvider[] = [
|
||||
apiHost: 'https://api.exa.ai',
|
||||
apiKey: ''
|
||||
},
|
||||
{
|
||||
id: 'exa-mcp',
|
||||
name: 'ExaMCP',
|
||||
apiHost: 'https://mcp.exa.ai/mcp'
|
||||
},
|
||||
{
|
||||
id: 'bocha',
|
||||
name: 'Bocha',
|
||||
|
||||
@ -163,6 +163,7 @@ export const useKnowledge = (baseId: string) => {
|
||||
processingProgress: 0,
|
||||
processingError: '',
|
||||
uniqueId: undefined,
|
||||
retryCount: 0,
|
||||
updated_at: Date.now()
|
||||
})
|
||||
checkAllBases()
|
||||
@ -182,6 +183,7 @@ export const useKnowledge = (baseId: string) => {
|
||||
processingProgress: 0,
|
||||
processingError: '',
|
||||
uniqueId: undefined,
|
||||
retryCount: 0,
|
||||
updated_at: Date.now()
|
||||
})
|
||||
setTimeout(() => KnowledgeQueue.checkAllBases(), 0)
|
||||
|
||||
@ -316,7 +316,8 @@ const reasoningEffortOptionsKeyMap: Record<ThinkingOption, string> = {
|
||||
high: 'assistants.settings.reasoning_effort.high',
|
||||
low: 'assistants.settings.reasoning_effort.low',
|
||||
medium: 'assistants.settings.reasoning_effort.medium',
|
||||
auto: 'assistants.settings.reasoning_effort.default'
|
||||
auto: 'assistants.settings.reasoning_effort.default',
|
||||
xhigh: 'assistants.settings.reasoning_effort.xhigh'
|
||||
} as const
|
||||
|
||||
export const getReasoningEffortOptionsLabel = (key: string): string => {
|
||||
@ -342,7 +343,8 @@ const builtInMcpDescriptionKeyMap: Record<BuiltinMCPServerName, string> = {
|
||||
[BuiltinMCPServerNames.filesystem]: 'settings.mcp.builtinServersDescriptions.filesystem',
|
||||
[BuiltinMCPServerNames.difyKnowledge]: 'settings.mcp.builtinServersDescriptions.dify_knowledge',
|
||||
[BuiltinMCPServerNames.python]: 'settings.mcp.builtinServersDescriptions.python',
|
||||
[BuiltinMCPServerNames.didiMCP]: 'settings.mcp.builtinServersDescriptions.didi_mcp'
|
||||
[BuiltinMCPServerNames.didiMCP]: 'settings.mcp.builtinServersDescriptions.didi_mcp',
|
||||
[BuiltinMCPServerNames.browser]: 'settings.mcp.builtinServersDescriptions.browser'
|
||||
} as const
|
||||
|
||||
export const getBuiltInMcpServerDescriptionLabel = (key: string): string => {
|
||||
|
||||
@ -31,12 +31,26 @@
|
||||
}
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "Using auto-detected Git Bash",
|
||||
"clear": {
|
||||
"button": "Clear custom path"
|
||||
},
|
||||
"customPath": "Using custom path: {{path}}",
|
||||
"error": {
|
||||
"description": "Git Bash is required to run agents on Windows. The agent cannot function without it. Please install Git for Windows from",
|
||||
"recheck": "Recheck Git Bash Installation",
|
||||
"title": "Git Bash Required"
|
||||
},
|
||||
"found": {
|
||||
"title": "Git Bash configured"
|
||||
},
|
||||
"notFound": "Git Bash not found. Please install it first.",
|
||||
"pick": {
|
||||
"button": "Select Git Bash Path",
|
||||
"failed": "Failed to set Git Bash path",
|
||||
"invalidPath": "Selected file is not a valid Git Bash executable (bash.exe).",
|
||||
"title": "Select Git Bash executable"
|
||||
},
|
||||
"success": "Git Bash detected successfully!"
|
||||
},
|
||||
"input": {
|
||||
@ -458,6 +472,7 @@
|
||||
"button": "Import",
|
||||
"error": {
|
||||
"fetch_failed": "Failed to fetch from URL",
|
||||
"file_required": "Please select a file first",
|
||||
"invalid_format": "Invalid assistant format: missing required fields",
|
||||
"url_required": "Please enter a URL"
|
||||
},
|
||||
@ -471,6 +486,17 @@
|
||||
"url_placeholder": "Enter JSON URL"
|
||||
},
|
||||
"manage": {
|
||||
"batch_delete": {
|
||||
"button": "Delete",
|
||||
"confirm": "Are you sure you want to delete the selected {{count}} assistants?"
|
||||
},
|
||||
"batch_export": {
|
||||
"button": "Export"
|
||||
},
|
||||
"mode": {
|
||||
"manage": "Manage",
|
||||
"sort": "Sort"
|
||||
},
|
||||
"title": "Manage Assistants"
|
||||
},
|
||||
"my_agents": "My Assistants",
|
||||
@ -524,7 +550,8 @@
|
||||
"low": "Low",
|
||||
"medium": "Medium",
|
||||
"minimal": "Minimal",
|
||||
"off": "Off"
|
||||
"off": "Off",
|
||||
"xhigh": "Extra High"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "Add Phrase",
|
||||
@ -1004,6 +1031,29 @@
|
||||
"yuque": "Export to Yuque"
|
||||
},
|
||||
"list": "Topic List",
|
||||
"manage": {
|
||||
"clear_selection": "Clear Selection",
|
||||
"delete": {
|
||||
"confirm": {
|
||||
"content": "Are you sure you want to delete {{count}} selected topic(s)? This action cannot be undone.",
|
||||
"title": "Delete Topics"
|
||||
},
|
||||
"success": "Deleted {{count}} topic(s)"
|
||||
},
|
||||
"deselect_all": "Deselect All",
|
||||
"error": {
|
||||
"at_least_one": "At least one topic must be kept"
|
||||
},
|
||||
"move": {
|
||||
"button": "Move",
|
||||
"placeholder": "Select target assistant",
|
||||
"success": "Moved {{count}} topic(s)"
|
||||
},
|
||||
"pinned": "Pinned Topics",
|
||||
"selected_count": "{{count}} selected",
|
||||
"title": "Manage Topics",
|
||||
"unpinned": "Unpinned Topics"
|
||||
},
|
||||
"move_to": "Move to",
|
||||
"new": "New Topic",
|
||||
"pin": "Pin Topic",
|
||||
@ -1014,6 +1064,10 @@
|
||||
"label": "Topic Prompts",
|
||||
"tips": "Topic Prompts: Additional supplementary prompts provided for the current topic"
|
||||
},
|
||||
"search": {
|
||||
"placeholder": "Search topics...",
|
||||
"title": "Search"
|
||||
},
|
||||
"title": "Topics",
|
||||
"unpin": "Unpin Topic"
|
||||
},
|
||||
@ -1185,6 +1239,7 @@
|
||||
"saved": "Saved",
|
||||
"search": "Search",
|
||||
"select": "Select",
|
||||
"select_all": "Select All",
|
||||
"selected": "Selected",
|
||||
"selectedItems": "Selected {{count}} items",
|
||||
"selectedMessages": "Selected {{count}} messages",
|
||||
@ -1197,11 +1252,13 @@
|
||||
}
|
||||
},
|
||||
"stop": "Stop",
|
||||
"subscribe": "Subscribe",
|
||||
"success": "Success",
|
||||
"swap": "Swap",
|
||||
"topics": "Topics",
|
||||
"unknown": "Unknown",
|
||||
"unnamed": "Unnamed",
|
||||
"unsubscribe": "Unsubscribe",
|
||||
"update_success": "Update successfully",
|
||||
"upload_files": "Upload file",
|
||||
"warning": "Warning",
|
||||
@ -1696,7 +1753,7 @@
|
||||
"import": {
|
||||
"error": "Import failed"
|
||||
},
|
||||
"imported": "Imported successfully"
|
||||
"imported": "Successfully imported {{count}} assistant(s)"
|
||||
},
|
||||
"api": {
|
||||
"check": {
|
||||
@ -2128,6 +2185,7 @@
|
||||
"collapse": "Collapse",
|
||||
"content_placeholder": "Please enter the note content...",
|
||||
"copyContent": "Copy Content",
|
||||
"crossPlatformRestoreWarning": "Cross-platform configuration restored, but notes directory is empty. Please copy your note files to: {{path}}",
|
||||
"delete": "delete",
|
||||
"delete_confirm": "Are you sure you want to delete this {{type}}?",
|
||||
"delete_folder_confirm": "Are you sure you want to delete the folder \"{{name}}\" and all of its contents?",
|
||||
@ -2303,7 +2361,7 @@
|
||||
"failed": {
|
||||
"install": "Install OVMS failed:",
|
||||
"install_code_100": "Unknown Error",
|
||||
"install_code_101": "Only supports Intel(R) Core(TM) Ultra CPU",
|
||||
"install_code_101": "Only supports Intel(R) CPU",
|
||||
"install_code_102": "Only supports Windows",
|
||||
"install_code_103": "Download OVMS runtime failed",
|
||||
"install_code_104": "Failed to install OVMS runtime",
|
||||
@ -3891,6 +3949,7 @@
|
||||
"builtinServers": "Builtin Servers",
|
||||
"builtinServersDescriptions": {
|
||||
"brave_search": "An MCP server implementation integrating the Brave Search API, providing both web and local search functionalities. Requires configuring the BRAVE_API_KEY environment variable",
|
||||
"browser": "Control a headless Electron window via Chrome DevTools Protocol. Tools: open URL, execute single-line JS, reset session.",
|
||||
"didi_mcp": "DiDi MCP server providing ride-hailing services including map search, price estimation, order management, and driver tracking. Only available in Mainland China. Requires configuring the DIDI_API_KEY environment variable",
|
||||
"dify_knowledge": "Dify's MCP server implementation provides a simple API to interact with Dify. Requires configuring the Dify Key",
|
||||
"fetch": "MCP server for retrieving URL web content",
|
||||
@ -3943,6 +4002,7 @@
|
||||
"jsonSaveError": "Failed to save JSON configuration.",
|
||||
"jsonSaveSuccess": "JSON configuration has been saved.",
|
||||
"logoUrl": "Logo URL",
|
||||
"logs": "Logs",
|
||||
"longRunning": "Long Running Mode",
|
||||
"longRunningTooltip": "When enabled, the server supports long-running tasks. When receiving progress notifications, the timeout will be reset and the maximum execution time will be extended to 10 minutes.",
|
||||
"marketplaces": "Marketplaces",
|
||||
@ -3962,6 +4022,7 @@
|
||||
"name": "Name",
|
||||
"newServer": "MCP Server",
|
||||
"noDescriptionAvailable": "No description available",
|
||||
"noLogs": "No logs yet",
|
||||
"noServers": "No servers configured",
|
||||
"not_support": "Model not supported",
|
||||
"npx_list": {
|
||||
|
||||
@ -31,12 +31,26 @@
|
||||
}
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "使用自动检测的 Git Bash",
|
||||
"clear": {
|
||||
"button": "清除自定义路径"
|
||||
},
|
||||
"customPath": "使用自定义路径:{{path}}",
|
||||
"error": {
|
||||
"description": "在 Windows 上运行智能体需要 Git Bash。没有它智能体无法运行。请从以下地址安装 Git for Windows",
|
||||
"recheck": "重新检测 Git Bash 安装",
|
||||
"title": "需要 Git Bash"
|
||||
},
|
||||
"found": {
|
||||
"title": "已配置 Git Bash"
|
||||
},
|
||||
"notFound": "未找到 Git Bash。请先安装。",
|
||||
"pick": {
|
||||
"button": "选择 Git Bash 路径",
|
||||
"failed": "设置 Git Bash 路径失败",
|
||||
"invalidPath": "选择的文件不是有效的 Git Bash 可执行文件(bash.exe)。",
|
||||
"title": "选择 Git Bash 可执行文件"
|
||||
},
|
||||
"success": "成功检测到 Git Bash!"
|
||||
},
|
||||
"input": {
|
||||
@ -458,6 +472,7 @@
|
||||
"button": "导入",
|
||||
"error": {
|
||||
"fetch_failed": "从 URL 获取数据失败",
|
||||
"file_required": "请先选择文件",
|
||||
"invalid_format": "无效的助手格式:缺少必填字段",
|
||||
"url_required": "请输入 URL"
|
||||
},
|
||||
@ -471,6 +486,17 @@
|
||||
"url_placeholder": "输入 JSON URL"
|
||||
},
|
||||
"manage": {
|
||||
"batch_delete": {
|
||||
"button": "删除",
|
||||
"confirm": "确定要删除选中的 {{count}} 个助手吗?"
|
||||
},
|
||||
"batch_export": {
|
||||
"button": "导出"
|
||||
},
|
||||
"mode": {
|
||||
"manage": "管理",
|
||||
"sort": "排序"
|
||||
},
|
||||
"title": "管理助手"
|
||||
},
|
||||
"my_agents": "我的助手",
|
||||
@ -524,7 +550,8 @@
|
||||
"low": "浮想",
|
||||
"medium": "斟酌",
|
||||
"minimal": "微念",
|
||||
"off": "关闭"
|
||||
"off": "关闭",
|
||||
"xhigh": "穷究"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "添加短语",
|
||||
@ -1004,6 +1031,29 @@
|
||||
"yuque": "导出到语雀"
|
||||
},
|
||||
"list": "话题列表",
|
||||
"manage": {
|
||||
"clear_selection": "取消选择",
|
||||
"delete": {
|
||||
"confirm": {
|
||||
"content": "确定要删除选中的 {{count}} 个话题吗?此操作不可撤销。",
|
||||
"title": "删除话题"
|
||||
},
|
||||
"success": "已删除 {{count}} 个话题"
|
||||
},
|
||||
"deselect_all": "取消全选",
|
||||
"error": {
|
||||
"at_least_one": "至少需要保留一个话题"
|
||||
},
|
||||
"move": {
|
||||
"button": "移动",
|
||||
"placeholder": "选择目标助手",
|
||||
"success": "已移动 {{count}} 个话题"
|
||||
},
|
||||
"pinned": "已固定的话题",
|
||||
"selected_count": "已选择 {{count}} 个",
|
||||
"title": "管理话题",
|
||||
"unpinned": "未固定的话题"
|
||||
},
|
||||
"move_to": "移动到",
|
||||
"new": "开始新对话",
|
||||
"pin": "固定话题",
|
||||
@ -1014,6 +1064,10 @@
|
||||
"label": "话题提示词",
|
||||
"tips": "话题提示词:针对当前话题提供额外的补充提示词"
|
||||
},
|
||||
"search": {
|
||||
"placeholder": "搜索话题...",
|
||||
"title": "搜索"
|
||||
},
|
||||
"title": "话题",
|
||||
"unpin": "取消固定"
|
||||
},
|
||||
@ -1185,6 +1239,7 @@
|
||||
"saved": "已保存",
|
||||
"search": "搜索",
|
||||
"select": "选择",
|
||||
"select_all": "全选",
|
||||
"selected": "已选择",
|
||||
"selectedItems": "已选择 {{count}} 项",
|
||||
"selectedMessages": "选中 {{count}} 条消息",
|
||||
@ -1197,11 +1252,13 @@
|
||||
}
|
||||
},
|
||||
"stop": "停止",
|
||||
"subscribe": "订阅",
|
||||
"success": "成功",
|
||||
"swap": "交换",
|
||||
"topics": "话题",
|
||||
"unknown": "未知",
|
||||
"unnamed": "未命名",
|
||||
"unsubscribe": "退订",
|
||||
"update_success": "更新成功",
|
||||
"upload_files": "上传文件",
|
||||
"warning": "警告",
|
||||
@ -1696,7 +1753,7 @@
|
||||
"import": {
|
||||
"error": "导入失败"
|
||||
},
|
||||
"imported": "导入成功"
|
||||
"imported": "成功导入 {{count}} 个助手"
|
||||
},
|
||||
"api": {
|
||||
"check": {
|
||||
@ -2128,6 +2185,7 @@
|
||||
"collapse": "收起",
|
||||
"content_placeholder": "请输入笔记内容...",
|
||||
"copyContent": "复制内容",
|
||||
"crossPlatformRestoreWarning": "检测到从其他设备恢复配置,但笔记目录为空。请将笔记文件复制到: {{path}}",
|
||||
"delete": "删除",
|
||||
"delete_confirm": "确定要删除这个{{type}}吗?",
|
||||
"delete_folder_confirm": "确定要删除文件夹 \"{{name}}\" 及其所有内容吗?",
|
||||
@ -2303,7 +2361,7 @@
|
||||
"failed": {
|
||||
"install": "安装 OVMS 失败:",
|
||||
"install_code_100": "未知错误",
|
||||
"install_code_101": "仅支持 Intel(R) Core(TM) Ultra CPU",
|
||||
"install_code_101": "仅支持 Intel(R) CPU",
|
||||
"install_code_102": "仅支持 Windows",
|
||||
"install_code_103": "下载 OVMS runtime 失败",
|
||||
"install_code_104": "安装 OVMS runtime 失败",
|
||||
@ -3891,6 +3949,7 @@
|
||||
"builtinServers": "内置服务器",
|
||||
"builtinServersDescriptions": {
|
||||
"brave_search": "一个集成了Brave 搜索 API 的 MCP 服务器实现,提供网页与本地搜索双重功能。需要配置 BRAVE_API_KEY 环境变量",
|
||||
"browser": "通过 Chrome DevTools 协议控制隐藏的 Electron 窗口,支持打开 URL、执行单行 JS、重置会话",
|
||||
"didi_mcp": "一个集成了滴滴 MCP 服务器实现,提供网约车服务包括地图搜索、价格预估、订单管理和司机跟踪。仅支持中国大陆地区。需要配置 DIDI_API_KEY 环境变量",
|
||||
"dify_knowledge": "Dify 的 MCP 服务器实现,提供了一个简单的 API 来与 Dify 进行交互。需要配置 Dify Key",
|
||||
"fetch": "用于获取 URL 网页内容的 MCP 服务器",
|
||||
@ -3943,6 +4002,7 @@
|
||||
"jsonSaveError": "保存 JSON 配置失败",
|
||||
"jsonSaveSuccess": "JSON 配置已保存",
|
||||
"logoUrl": "标志网址",
|
||||
"logs": "日志",
|
||||
"longRunning": "长时间运行模式",
|
||||
"longRunningTooltip": "启用后,服务器支持长时间任务,接收到进度通知时会重置超时计时器,并延长最大超时时间至10分钟",
|
||||
"marketplaces": "市场",
|
||||
@ -3962,6 +4022,7 @@
|
||||
"name": "名称",
|
||||
"newServer": "MCP 服务器",
|
||||
"noDescriptionAvailable": "暂无描述",
|
||||
"noLogs": "暂无日志",
|
||||
"noServers": "未配置服务器",
|
||||
"not_support": "模型不支持",
|
||||
"npx_list": {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -31,12 +31,26 @@
|
||||
}
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "Automatisch ermitteltes Git Bash wird verwendet",
|
||||
"clear": {
|
||||
"button": "Benutzerdefinierten Pfad löschen"
|
||||
},
|
||||
"customPath": "Benutzerdefinierter Pfad: {{path}}",
|
||||
"error": {
|
||||
"description": "Git Bash ist erforderlich, um Agents unter Windows auszuführen. Der Agent kann ohne es nicht funktionieren. Bitte installieren Sie Git für Windows von",
|
||||
"recheck": "Überprüfe die Git Bash-Installation erneut",
|
||||
"title": "Git Bash erforderlich"
|
||||
},
|
||||
"found": {
|
||||
"title": "Git Bash konfiguriert"
|
||||
},
|
||||
"notFound": "Git Bash nicht gefunden. Bitte installieren Sie es zuerst.",
|
||||
"pick": {
|
||||
"button": "Git Bash Pfad auswählen",
|
||||
"failed": "Git Bash Pfad konnte nicht gesetzt werden",
|
||||
"invalidPath": "Die ausgewählte Datei ist keine gültige Git Bash ausführbare Datei (bash.exe).",
|
||||
"title": "Git Bash ausführbare Datei auswählen"
|
||||
},
|
||||
"success": "Git Bash erfolgreich erkannt!"
|
||||
},
|
||||
"input": {
|
||||
@ -458,6 +472,7 @@
|
||||
"button": "Importieren",
|
||||
"error": {
|
||||
"fetch_failed": "Daten von URL abrufen fehlgeschlagen",
|
||||
"file_required": "Bitte wählen Sie zuerst eine Datei aus",
|
||||
"invalid_format": "Ungültiges Assistentenformat: Pflichtfelder fehlen",
|
||||
"url_required": "Bitte geben Sie eine URL ein"
|
||||
},
|
||||
@ -471,6 +486,17 @@
|
||||
"url_placeholder": "JSON-URL eingeben"
|
||||
},
|
||||
"manage": {
|
||||
"batch_delete": {
|
||||
"button": "Stapel löschen",
|
||||
"confirm": "Sind Sie sicher, dass Sie die ausgewählten {{count}} Assistenten löschen möchten?"
|
||||
},
|
||||
"batch_export": {
|
||||
"button": "Exportieren"
|
||||
},
|
||||
"mode": {
|
||||
"manage": "Verwalten",
|
||||
"sort": "Sortieren"
|
||||
},
|
||||
"title": "Assistenten verwalten"
|
||||
},
|
||||
"my_agents": "Meine Assistenten",
|
||||
@ -524,7 +550,8 @@
|
||||
"low": "Spontan",
|
||||
"medium": "Überlegt",
|
||||
"minimal": "Minimal",
|
||||
"off": "Aus"
|
||||
"off": "Aus",
|
||||
"xhigh": "Extra hoch"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "Phrase hinzufügen",
|
||||
@ -1004,6 +1031,29 @@
|
||||
"yuque": "Nach Yuque exportieren"
|
||||
},
|
||||
"list": "Themenliste",
|
||||
"manage": {
|
||||
"clear_selection": "Auswahl aufheben",
|
||||
"delete": {
|
||||
"confirm": {
|
||||
"content": "Sind Sie sicher, dass Sie {{count}} ausgewähltes Thema bzw. ausgewählte Themen löschen möchten? Diese Aktion kann nicht rückgängig gemacht werden.",
|
||||
"title": "Themen löschen"
|
||||
},
|
||||
"success": "{{count}} Thema/Themen gelöscht"
|
||||
},
|
||||
"deselect_all": "Alle abwählen",
|
||||
"error": {
|
||||
"at_least_one": "Mindestens ein Thema muss beibehalten werden"
|
||||
},
|
||||
"move": {
|
||||
"button": "Bewegen",
|
||||
"placeholder": "Ziel auswählen",
|
||||
"success": "{{count}} Thema(s) verschoben"
|
||||
},
|
||||
"pinned": "Angepinnte Themen",
|
||||
"selected_count": "{{count}} ausgewählt",
|
||||
"title": "Themen verwalten",
|
||||
"unpinned": "Losgelöste Themen"
|
||||
},
|
||||
"move_to": "Verschieben nach",
|
||||
"new": "Neues Gespräch starten",
|
||||
"pin": "Thema anheften",
|
||||
@ -1014,6 +1064,10 @@
|
||||
"label": "Themen-Prompt",
|
||||
"tips": "Themen-Prompt: Bietet zusätzliche ergänzende Prompts für das aktuelle Thema"
|
||||
},
|
||||
"search": {
|
||||
"placeholder": "Themen durchsuchen...",
|
||||
"title": "Suche"
|
||||
},
|
||||
"title": "Thema",
|
||||
"unpin": "Anheften aufheben"
|
||||
},
|
||||
@ -1185,6 +1239,7 @@
|
||||
"saved": "Gespeichert",
|
||||
"search": "Suchen",
|
||||
"select": "Auswählen",
|
||||
"select_all": "Alle auswählen",
|
||||
"selected": "Ausgewählt",
|
||||
"selectedItems": "{{count}} Elemente ausgewählt",
|
||||
"selectedMessages": "{{count}} Nachrichten ausgewählt",
|
||||
@ -1197,11 +1252,13 @@
|
||||
}
|
||||
},
|
||||
"stop": "Stoppen",
|
||||
"subscribe": "Abonnieren",
|
||||
"success": "Erfolgreich",
|
||||
"swap": "Tauschen",
|
||||
"topics": "Themen",
|
||||
"unknown": "Unbekannt",
|
||||
"unnamed": "Unbenannt",
|
||||
"unsubscribe": "Abmelden",
|
||||
"update_success": "Erfolgreich aktualisiert",
|
||||
"upload_files": "Dateien hochladen",
|
||||
"warning": "Warnung",
|
||||
@ -2128,6 +2185,7 @@
|
||||
"collapse": "Einklappen",
|
||||
"content_placeholder": "Bitte Notizinhalt eingeben...",
|
||||
"copyContent": "Inhalt kopieren",
|
||||
"crossPlatformRestoreWarning": "[to be translated]:Cross-platform configuration restored, but notes directory is empty. Please copy your note files to: {{path}}",
|
||||
"delete": "Löschen",
|
||||
"delete_confirm": "Möchten Sie diesen {{type}} wirklich löschen?",
|
||||
"delete_folder_confirm": "Möchten Sie Ordner \"{{name}}\" und alle seine Inhalte wirklich löschen?",
|
||||
@ -3891,6 +3949,7 @@
|
||||
"builtinServers": "Integrierter Server",
|
||||
"builtinServersDescriptions": {
|
||||
"brave_search": "MCP-Server-Implementierung mit Brave-Search-API, die sowohl Web- als auch lokale Suchfunktionen bietet. BRAVE_API_KEY-Umgebungsvariable muss konfiguriert werden",
|
||||
"browser": "Steuert ein headless Electron-Fenster über das Chrome DevTools Protocol. Tools: URL öffnen, einzeiligen JS ausführen, Sitzung zurücksetzen.",
|
||||
"didi_mcp": "An integrated Didi MCP server implementation that provides ride-hailing services including map search, price estimation, order management, and driver tracking. Only available in mainland China. Requires the DIDI_API_KEY environment variable to be configured.",
|
||||
"dify_knowledge": "MCP-Server-Implementierung von Dify, die einen einfachen API-Zugriff auf Dify bietet. Dify Key muss konfiguriert werden",
|
||||
"fetch": "MCP-Server zum Abrufen von Webseiteninhalten",
|
||||
@ -3943,6 +4002,7 @@
|
||||
"jsonSaveError": "JSON-Konfiguration speichern fehlgeschlagen",
|
||||
"jsonSaveSuccess": "JSON-Konfiguration erfolgreich gespeichert",
|
||||
"logoUrl": "Logo-URL",
|
||||
"logs": "Protokolle",
|
||||
"longRunning": "Lang laufender Modus",
|
||||
"longRunningTooltip": "Nach Aktivierung unterstützt der Server lange Aufgaben. Wenn ein Fortschrittsbenachrichtigung empfangen wird, wird der Timeout-Timer zurückgesetzt und die maximale Timeout-Zeit auf 10 Minuten verlängert",
|
||||
"marketplaces": "Marktplätze",
|
||||
@ -3962,6 +4022,7 @@
|
||||
"name": "Name",
|
||||
"newServer": "MCP-Server",
|
||||
"noDescriptionAvailable": "Keine Beschreibung",
|
||||
"noLogs": "Noch keine Protokolle",
|
||||
"noServers": "Server nicht konfiguriert",
|
||||
"not_support": "Modell nicht unterstützt",
|
||||
"npx_list": {
|
||||
|
||||
@ -31,12 +31,26 @@
|
||||
}
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "Χρησιμοποιείται αυτόματα εντοπισμένο Git Bash",
|
||||
"clear": {
|
||||
"button": "Διαγραφή προσαρμοσμένης διαδρομής"
|
||||
},
|
||||
"customPath": "Χρησιμοποιείται προσαρμοσμένη διαδρομή: {{path}}",
|
||||
"error": {
|
||||
"description": "Το Git Bash απαιτείται για την εκτέλεση πρακτόρων στα Windows. Ο πράκτορας δεν μπορεί να λειτουργήσει χωρίς αυτό. Παρακαλούμε εγκαταστήστε το Git για Windows από",
|
||||
"recheck": "Επανέλεγχος Εγκατάστασης του Git Bash",
|
||||
"title": "Απαιτείται Git Bash"
|
||||
},
|
||||
"found": {
|
||||
"title": "Το Git Bash διαμορφώθηκε"
|
||||
},
|
||||
"notFound": "Το Git Bash δεν βρέθηκε. Παρακαλώ εγκαταστήστε το πρώτα.",
|
||||
"pick": {
|
||||
"button": "Επιλογή διαδρομής Git Bash",
|
||||
"failed": "Αποτυχία ορισμού διαδρομής Git Bash",
|
||||
"invalidPath": "Το επιλεγμένο αρχείο δεν είναι έγκυρο εκτελέσιμο Git Bash (bash.exe).",
|
||||
"title": "Επιλογή εκτελέσιμου Git Bash"
|
||||
},
|
||||
"success": "Το Git Bash εντοπίστηκε με επιτυχία!"
|
||||
},
|
||||
"input": {
|
||||
@ -458,6 +472,7 @@
|
||||
"button": "Εισαγωγή",
|
||||
"error": {
|
||||
"fetch_failed": "Αποτυχία λήψης δεδομένων από το URL",
|
||||
"file_required": "Παρακαλώ επιλέξτε πρώτα ένα αρχείο",
|
||||
"invalid_format": "Μη έγκυρη μορφή βοηθού: λείπουν υποχρεωτικά πεδία",
|
||||
"url_required": "Παρακαλώ εισάγετε ένα URL"
|
||||
},
|
||||
@ -471,6 +486,17 @@
|
||||
"url_placeholder": "Εισάγετε JSON URL"
|
||||
},
|
||||
"manage": {
|
||||
"batch_delete": {
|
||||
"button": "Μαζική Διαγραφή",
|
||||
"confirm": "Είστε βέβαιοι ότι θέλετε να διαγράψετε τους επιλεγμένους {{count}} βοηθούς;"
|
||||
},
|
||||
"batch_export": {
|
||||
"button": "Εξαγωγή"
|
||||
},
|
||||
"mode": {
|
||||
"manage": "Διαχειριστείτε",
|
||||
"sort": "Ταξινόμηση"
|
||||
},
|
||||
"title": "Διαχείριση βοηθών"
|
||||
},
|
||||
"my_agents": "Οι βοηθοί μου",
|
||||
@ -524,7 +550,8 @@
|
||||
"low": "Μικρό",
|
||||
"medium": "Μεσαίο",
|
||||
"minimal": "ελάχιστος",
|
||||
"off": "Απενεργοποίηση"
|
||||
"off": "Απενεργοποίηση",
|
||||
"xhigh": "Εξαιρετικά Υψηλή"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "Προσθήκη φράσης",
|
||||
@ -1004,6 +1031,29 @@
|
||||
"yuque": "Εξαγωγή στο Yuque"
|
||||
},
|
||||
"list": "Λίστα θεμάτων",
|
||||
"manage": {
|
||||
"clear_selection": "Καθαρισμός Επιλογής",
|
||||
"delete": {
|
||||
"confirm": {
|
||||
"content": "Είσαι βέβαιος ότι θέλεις να διαγράψεις {{count}} επιλεγμένο(α) θέμα(τα); Αυτή η ενέργεια δεν μπορεί να αναιρεθεί.",
|
||||
"title": "Διαγραφή Θεμάτων"
|
||||
},
|
||||
"success": "Διαγράφηκαν {{count}} θέμα(τα)"
|
||||
},
|
||||
"deselect_all": "Αποεπιλογή όλων",
|
||||
"error": {
|
||||
"at_least_one": "Τουλάχιστον ένα θέμα πρέπει να διατηρηθεί"
|
||||
},
|
||||
"move": {
|
||||
"button": "Μετακίνηση",
|
||||
"placeholder": "Επιλέξτε στόχο",
|
||||
"success": "Μετακινήθηκαν {{count}} θέματα"
|
||||
},
|
||||
"pinned": "Καρφιτσωμένα Θέματα",
|
||||
"selected_count": "{{count}} επιλεγμένα",
|
||||
"title": "Διαχείριση Θεμάτων",
|
||||
"unpinned": "Ξεκαρφωμένα Θέματα"
|
||||
},
|
||||
"move_to": "Μετακίνηση στο",
|
||||
"new": "Ξεκινήστε νέα συζήτηση",
|
||||
"pin": "Σταθερά θέματα",
|
||||
@ -1014,6 +1064,10 @@
|
||||
"label": "Προσδοκώμενα όρια",
|
||||
"tips": "Προσδοκώμενα όρια: προσθέτει επιπλέον επιστημονικές προσθήκες για το παρόν θέμα"
|
||||
},
|
||||
"search": {
|
||||
"placeholder": "Αναζήτηση θεμάτων...",
|
||||
"title": "Αναζήτηση"
|
||||
},
|
||||
"title": "Θέματα",
|
||||
"unpin": "Ξεκαρφίτσωμα"
|
||||
},
|
||||
@ -1185,6 +1239,7 @@
|
||||
"saved": "Αποθηκεύτηκε",
|
||||
"search": "Αναζήτηση",
|
||||
"select": "Επιλογή",
|
||||
"select_all": "Επιλογή Όλων",
|
||||
"selected": "Επιλεγμένο",
|
||||
"selectedItems": "Επιλέχθηκαν {{count}} αντικείμενα",
|
||||
"selectedMessages": "Επιλέχθηκαν {{count}} μηνύματα",
|
||||
@ -1197,11 +1252,13 @@
|
||||
}
|
||||
},
|
||||
"stop": "σταματήστε",
|
||||
"subscribe": "Εγγραφείτε",
|
||||
"success": "Επιτυχία",
|
||||
"swap": "Εναλλαγή",
|
||||
"topics": "Θέματα",
|
||||
"unknown": "Άγνωστο",
|
||||
"unnamed": "Χωρίς όνομα",
|
||||
"unsubscribe": "Απεγγραφή",
|
||||
"update_success": "Επιτυχής ενημέρωση",
|
||||
"upload_files": "Ανέβασμα αρχείου",
|
||||
"warning": "Προσοχή",
|
||||
@ -2128,6 +2185,7 @@
|
||||
"collapse": "σύμπτυξη",
|
||||
"content_placeholder": "Παρακαλώ εισαγάγετε το περιεχόμενο των σημειώσεων...",
|
||||
"copyContent": "αντιγραφή περιεχομένου",
|
||||
"crossPlatformRestoreWarning": "[to be translated]:Cross-platform configuration restored, but notes directory is empty. Please copy your note files to: {{path}}",
|
||||
"delete": "διαγραφή",
|
||||
"delete_confirm": "Είστε βέβαιοι ότι θέλετε να διαγράψετε αυτό το {{type}};",
|
||||
"delete_folder_confirm": "Θέλετε να διαγράψετε τον φάκελο «{{name}}» και όλο το περιεχόμενό του;",
|
||||
@ -3891,6 +3949,7 @@
|
||||
"builtinServers": "Ενσωματωμένοι Διακομιστές",
|
||||
"builtinServersDescriptions": {
|
||||
"brave_search": "μια εφαρμογή διακομιστή MCP που ενσωματώνει το Brave Search API, παρέχοντας δυνατότητες αναζήτησης στον ιστό και τοπικής αναζήτησης. Απαιτείται η ρύθμιση της μεταβλητής περιβάλλοντος BRAVE_API_KEY",
|
||||
"browser": "Ελέγχει ένα headless παράθυρο Electron μέσω του Chrome DevTools Protocol. Εργαλεία: άνοιγμα URL, εκτέλεση JS μίας γραμμής, επαναφορά συνεδρίας.",
|
||||
"didi_mcp": "Διακομιστής DiDi MCP που παρέχει υπηρεσίες μεταφοράς συμπεριλαμβανομένης της αναζήτησης χαρτών, εκτίμησης τιμών, διαχείρισης παραγγελιών και παρακολούθησης οδηγών. Διαθέσιμο μόνο στην ηπειρωτική Κίνα. Απαιτεί διαμόρφωση της μεταβλητής περιβάλλοντος DIDI_API_KEY",
|
||||
"dify_knowledge": "Η υλοποίηση του Dify για τον διακομιστή MCP, παρέχει μια απλή API για να αλληλεπιδρά με το Dify. Απαιτείται η ρύθμιση του κλειδιού Dify",
|
||||
"fetch": "Εξυπηρετητής MCP για λήψη περιεχομένου ιστοσελίδας URL",
|
||||
@ -3943,6 +4002,7 @@
|
||||
"jsonSaveError": "Αποτυχία αποθήκευσης της διαμορφωτικής ρύθμισης JSON",
|
||||
"jsonSaveSuccess": "Η διαμορφωτική ρύθμιση JSON αποθηκεύτηκε επιτυχώς",
|
||||
"logoUrl": "URL Λογότυπου",
|
||||
"logs": "Αρχεία καταγραφής",
|
||||
"longRunning": "Μακροχρόνια λειτουργία",
|
||||
"longRunningTooltip": "Όταν ενεργοποιηθεί, ο διακομιστής υποστηρίζει μακροχρόνιες εργασίες, επαναφέρει το χρονικό όριο μετά από λήψη ειδοποίησης προόδου και επεκτείνει το μέγιστο χρονικό όριο σε 10 λεπτά.",
|
||||
"marketplaces": "Αγορές",
|
||||
@ -3962,6 +4022,7 @@
|
||||
"name": "Όνομα",
|
||||
"newServer": "Διακομιστής MCP",
|
||||
"noDescriptionAvailable": "Δεν υπάρχει διαθέσιμη περιγραφή",
|
||||
"noLogs": "Δεν υπάρχουν αρχεία καταγραφής ακόμα",
|
||||
"noServers": "Δεν έχουν ρυθμιστεί διακομιστές",
|
||||
"not_support": "Το μοντέλο δεν υποστηρίζεται",
|
||||
"npx_list": {
|
||||
|
||||
@ -31,12 +31,26 @@
|
||||
}
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "Usando Git Bash detectado automáticamente",
|
||||
"clear": {
|
||||
"button": "Borrar ruta personalizada"
|
||||
},
|
||||
"customPath": "Usando ruta personalizada: {{path}}",
|
||||
"error": {
|
||||
"description": "Se requiere Git Bash para ejecutar agentes en Windows. El agente no puede funcionar sin él. Instale Git para Windows desde",
|
||||
"recheck": "Volver a verificar la instalación de Git Bash",
|
||||
"title": "Git Bash Requerido"
|
||||
},
|
||||
"found": {
|
||||
"title": "Git Bash configurado"
|
||||
},
|
||||
"notFound": "Git Bash no encontrado. Por favor, instálalo primero.",
|
||||
"pick": {
|
||||
"button": "Seleccionar ruta de Git Bash",
|
||||
"failed": "No se pudo configurar la ruta de Git Bash",
|
||||
"invalidPath": "El archivo seleccionado no es un ejecutable válido de Git Bash (bash.exe).",
|
||||
"title": "Seleccionar ejecutable de Git Bash"
|
||||
},
|
||||
"success": "¡Git Bash detectado con éxito!"
|
||||
},
|
||||
"input": {
|
||||
@ -458,6 +472,7 @@
|
||||
"button": "Importar",
|
||||
"error": {
|
||||
"fetch_failed": "Error al obtener datos desde la URL",
|
||||
"file_required": "Por favor, selecciona primero un archivo",
|
||||
"invalid_format": "Formato de asistente inválido: faltan campos obligatorios",
|
||||
"url_required": "Por favor introduce una URL"
|
||||
},
|
||||
@ -471,6 +486,17 @@
|
||||
"url_placeholder": "Introducir URL JSON"
|
||||
},
|
||||
"manage": {
|
||||
"batch_delete": {
|
||||
"button": "Eliminación por lotes",
|
||||
"confirm": "¿Estás seguro de que quieres eliminar los {{count}} asistentes seleccionados?"
|
||||
},
|
||||
"batch_export": {
|
||||
"button": "Exportar"
|
||||
},
|
||||
"mode": {
|
||||
"manage": "Gestionar",
|
||||
"sort": "Ordenar"
|
||||
},
|
||||
"title": "Gestionar asistentes"
|
||||
},
|
||||
"my_agents": "Mis asistentes",
|
||||
@ -524,7 +550,8 @@
|
||||
"low": "Corto",
|
||||
"medium": "Medio",
|
||||
"minimal": "minimal",
|
||||
"off": "Apagado"
|
||||
"off": "Apagado",
|
||||
"xhigh": "Extra Alta"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "Agregar frase",
|
||||
@ -1004,6 +1031,29 @@
|
||||
"yuque": "Exportar a Yuque"
|
||||
},
|
||||
"list": "Lista de temas",
|
||||
"manage": {
|
||||
"clear_selection": "Borrar selección",
|
||||
"delete": {
|
||||
"confirm": {
|
||||
"content": "¿Estás seguro de que quieres eliminar {{count}} tema(s) seleccionado(s)? Esta acción no se puede deshacer.",
|
||||
"title": "Eliminar Temas"
|
||||
},
|
||||
"success": "Eliminado(s) {{count}} tema(s)"
|
||||
},
|
||||
"deselect_all": "Deseleccionar todo",
|
||||
"error": {
|
||||
"at_least_one": "Al menos se debe mantener un tema."
|
||||
},
|
||||
"move": {
|
||||
"button": "Mover",
|
||||
"placeholder": "Seleccionar asistente de destino",
|
||||
"success": "Movido(s) {{count}} tema(s)"
|
||||
},
|
||||
"pinned": "Temas Fijados",
|
||||
"selected_count": "{{count}} seleccionado",
|
||||
"title": "Administrar Temas",
|
||||
"unpinned": "Temas no fijados"
|
||||
},
|
||||
"move_to": "Mover a",
|
||||
"new": "Iniciar nueva conversación",
|
||||
"pin": "Fijar tema",
|
||||
@ -1014,6 +1064,10 @@
|
||||
"label": "Palabras clave del tema",
|
||||
"tips": "Palabras clave del tema: proporcionar indicaciones adicionales para el tema actual"
|
||||
},
|
||||
"search": {
|
||||
"placeholder": "Buscar temas...",
|
||||
"title": "Buscar"
|
||||
},
|
||||
"title": "Tema",
|
||||
"unpin": "Quitar fijación"
|
||||
},
|
||||
@ -1185,6 +1239,7 @@
|
||||
"saved": "Guardado",
|
||||
"search": "Buscar",
|
||||
"select": "Seleccionar",
|
||||
"select_all": "Seleccionar todo",
|
||||
"selected": "Seleccionado",
|
||||
"selectedItems": "{{count}} elementos seleccionados",
|
||||
"selectedMessages": "{{count}} mensajes seleccionados",
|
||||
@ -1197,11 +1252,13 @@
|
||||
}
|
||||
},
|
||||
"stop": "Detener",
|
||||
"subscribe": "Suscribirse",
|
||||
"success": "Éxito",
|
||||
"swap": "Intercambiar",
|
||||
"topics": "Temas",
|
||||
"unknown": "Desconocido",
|
||||
"unnamed": "Sin nombre",
|
||||
"unsubscribe": "Cancelar suscripción",
|
||||
"update_success": "Actualización exitosa",
|
||||
"upload_files": "Subir archivo",
|
||||
"warning": "Advertencia",
|
||||
@ -2128,6 +2185,7 @@
|
||||
"collapse": "ocultar",
|
||||
"content_placeholder": "Introduzca el contenido de la nota...",
|
||||
"copyContent": "copiar contenido",
|
||||
"crossPlatformRestoreWarning": "[to be translated]:Cross-platform configuration restored, but notes directory is empty. Please copy your note files to: {{path}}",
|
||||
"delete": "eliminar",
|
||||
"delete_confirm": "¿Estás seguro de que deseas eliminar este {{type}}?",
|
||||
"delete_folder_confirm": "¿Está seguro de que desea eliminar la carpeta \"{{name}}\" y todo su contenido?",
|
||||
@ -3891,6 +3949,7 @@
|
||||
"builtinServers": "Servidores integrados",
|
||||
"builtinServersDescriptions": {
|
||||
"brave_search": "Una implementación de servidor MCP que integra la API de búsqueda de Brave, proporcionando funciones de búsqueda web y búsqueda local. Requiere configurar la variable de entorno BRAVE_API_KEY",
|
||||
"browser": "Controla una ventana Electron headless mediante Chrome DevTools Protocol. Herramientas: abrir URL, ejecutar JS de una línea, reiniciar sesión.",
|
||||
"didi_mcp": "Servidor DiDi MCP que proporciona servicios de transporte incluyendo búsqueda de mapas, estimación de precios, gestión de pedidos y seguimiento de conductores. Disponible solo en China Continental. Requiere configurar la variable de entorno DIDI_API_KEY",
|
||||
"dify_knowledge": "Implementación del servidor MCP de Dify, que proporciona una API sencilla para interactuar con Dify. Se requiere configurar la clave de Dify.",
|
||||
"fetch": "Servidor MCP para obtener el contenido de la página web de una URL",
|
||||
@ -3943,6 +4002,7 @@
|
||||
"jsonSaveError": "Fallo al guardar la configuración JSON",
|
||||
"jsonSaveSuccess": "Configuración JSON guardada exitosamente",
|
||||
"logoUrl": "URL del logotipo",
|
||||
"logs": "Registros",
|
||||
"longRunning": "Modo de ejecución prolongada",
|
||||
"longRunningTooltip": "Una vez habilitado, el servidor admite tareas de larga duración, reinicia el temporizador de tiempo de espera al recibir notificaciones de progreso y amplía el tiempo máximo de espera hasta 10 minutos.",
|
||||
"marketplaces": "Mercados",
|
||||
@ -3962,6 +4022,7 @@
|
||||
"name": "Nombre",
|
||||
"newServer": "Servidor MCP",
|
||||
"noDescriptionAvailable": "Sin descripción disponible por ahora",
|
||||
"noLogs": "Aún no hay registros",
|
||||
"noServers": "No se han configurado servidores",
|
||||
"not_support": "El modelo no es compatible",
|
||||
"npx_list": {
|
||||
|
||||
@ -31,12 +31,26 @@
|
||||
}
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "Utilisation de Git Bash détecté automatiquement",
|
||||
"clear": {
|
||||
"button": "Effacer le chemin personnalisé"
|
||||
},
|
||||
"customPath": "Utilisation du chemin personnalisé : {{path}}",
|
||||
"error": {
|
||||
"description": "Git Bash est requis pour exécuter des agents sur Windows. L'agent ne peut pas fonctionner sans. Veuillez installer Git pour Windows depuis",
|
||||
"recheck": "Revérifier l'installation de Git Bash",
|
||||
"title": "Git Bash requis"
|
||||
},
|
||||
"notFound": "Git Bash introuvable. Veuillez l’installer d’abord.",
|
||||
"found": {
|
||||
"title": "Git Bash configuré"
|
||||
},
|
||||
"notFound": "Git Bash non trouvé. Veuillez l'installer d'abord.",
|
||||
"pick": {
|
||||
"button": "Sélectionner le chemin Git Bash",
|
||||
"failed": "Échec de la configuration du chemin Git Bash",
|
||||
"invalidPath": "Le fichier sélectionné n'est pas un exécutable Git Bash valide (bash.exe).",
|
||||
"title": "Sélectionner l'exécutable Git Bash"
|
||||
},
|
||||
"success": "Git Bash détecté avec succès !"
|
||||
},
|
||||
"input": {
|
||||
@ -458,6 +472,7 @@
|
||||
"button": "Importer",
|
||||
"error": {
|
||||
"fetch_failed": "Échec de la récupération des données depuis l'URL",
|
||||
"file_required": "Veuillez d'abord sélectionner un fichier",
|
||||
"invalid_format": "Format d'assistant invalide : champs obligatoires manquants",
|
||||
"url_required": "Veuillez saisir une URL"
|
||||
},
|
||||
@ -471,6 +486,17 @@
|
||||
"url_placeholder": "Saisir l'URL JSON"
|
||||
},
|
||||
"manage": {
|
||||
"batch_delete": {
|
||||
"button": "Suppression par lot",
|
||||
"confirm": "Êtes-vous sûr de vouloir supprimer les {{count}} assistants sélectionnés ?"
|
||||
},
|
||||
"batch_export": {
|
||||
"button": "Exporter"
|
||||
},
|
||||
"mode": {
|
||||
"manage": "Gérer",
|
||||
"sort": "Trier"
|
||||
},
|
||||
"title": "Gérer les assistants"
|
||||
},
|
||||
"my_agents": "Mes assistants",
|
||||
@ -524,7 +550,8 @@
|
||||
"low": "Court",
|
||||
"medium": "Moyen",
|
||||
"minimal": "minimal",
|
||||
"off": "Off"
|
||||
"off": "Off",
|
||||
"xhigh": "Très élevée"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "Добавить фразу",
|
||||
@ -1004,6 +1031,29 @@
|
||||
"yuque": "Exporter vers Yuque"
|
||||
},
|
||||
"list": "Liste des sujets",
|
||||
"manage": {
|
||||
"clear_selection": "Effacer la sélection",
|
||||
"delete": {
|
||||
"confirm": {
|
||||
"content": "Êtes-vous sûr de vouloir supprimer {{count}} sujet(s) sélectionné(s) ? Cette action est irréversible.",
|
||||
"title": "Supprimer des sujets"
|
||||
},
|
||||
"success": "Supprimé {{count}} sujet(s)"
|
||||
},
|
||||
"deselect_all": "Tout désélectionner",
|
||||
"error": {
|
||||
"at_least_one": "Au moins un sujet doit être conservé"
|
||||
},
|
||||
"move": {
|
||||
"button": "Déplacer",
|
||||
"placeholder": "Sélectionner la cible",
|
||||
"success": "Déplacé {{count}} sujet(s)"
|
||||
},
|
||||
"pinned": "Sujets épinglés",
|
||||
"selected_count": "{{count}} sélectionné",
|
||||
"title": "Gérer les sujets",
|
||||
"unpinned": "Sujets non épinglés"
|
||||
},
|
||||
"move_to": "Déplacer vers",
|
||||
"new": "Commencer une nouvelle conversation",
|
||||
"pin": "Fixer le sujet",
|
||||
@ -1014,6 +1064,10 @@
|
||||
"label": "Indicateurs de sujet",
|
||||
"tips": "Indicateurs de sujet : fournir des indications supplémentaires pour le sujet actuel"
|
||||
},
|
||||
"search": {
|
||||
"placeholder": "Rechercher des sujets...",
|
||||
"title": "Rechercher"
|
||||
},
|
||||
"title": "Sujet",
|
||||
"unpin": "Annuler le fixage"
|
||||
},
|
||||
@ -1185,6 +1239,7 @@
|
||||
"saved": "enregistré",
|
||||
"search": "Rechercher",
|
||||
"select": "Sélectionner",
|
||||
"select_all": "Tout sélectionner",
|
||||
"selected": "Sélectionné",
|
||||
"selectedItems": "{{count}} éléments sélectionnés",
|
||||
"selectedMessages": "{{count}} messages sélectionnés",
|
||||
@ -1197,11 +1252,13 @@
|
||||
}
|
||||
},
|
||||
"stop": "Arrêter",
|
||||
"subscribe": "S'abonner",
|
||||
"success": "Succès",
|
||||
"swap": "Échanger",
|
||||
"topics": "Sujets",
|
||||
"unknown": "Inconnu",
|
||||
"unnamed": "Sans nom",
|
||||
"unsubscribe": "Se désabonner",
|
||||
"update_success": "Mise à jour réussie",
|
||||
"upload_files": "Uploader des fichiers",
|
||||
"warning": "Avertissement",
|
||||
@ -2128,6 +2185,7 @@
|
||||
"collapse": "réduire",
|
||||
"content_placeholder": "Veuillez saisir le contenu de la note...",
|
||||
"copyContent": "contenu copié",
|
||||
"crossPlatformRestoreWarning": "[to be translated]:Cross-platform configuration restored, but notes directory is empty. Please copy your note files to: {{path}}",
|
||||
"delete": "supprimer",
|
||||
"delete_confirm": "Êtes-vous sûr de vouloir supprimer ce {{type}} ?",
|
||||
"delete_folder_confirm": "Êtes-vous sûr de vouloir supprimer le dossier \"{{name}}\" et tout son contenu ?",
|
||||
@ -3891,6 +3949,7 @@
|
||||
"builtinServers": "Serveurs intégrés",
|
||||
"builtinServersDescriptions": {
|
||||
"brave_search": "Une implémentation de serveur MCP intégrant l'API de recherche Brave, offrant des fonctionnalités de recherche web et locale. Nécessite la configuration de la variable d'environnement BRAVE_API_KEY",
|
||||
"browser": "Contrôle une fenêtre Electron headless via Chrome DevTools Protocol. Outils : ouvrir une URL, exécuter du JS en une ligne, réinitialiser la session.",
|
||||
"didi_mcp": "Serveur DiDi MCP fournissant des services de transport incluant la recherche de cartes, l'estimation des prix, la gestion des commandes et le suivi des conducteurs. Disponible uniquement en Chine continentale. Nécessite la configuration de la variable d'environnement DIDI_API_KEY",
|
||||
"dify_knowledge": "Implémentation du serveur MCP de Dify, fournissant une API simple pour interagir avec Dify. Nécessite la configuration de la clé Dify",
|
||||
"fetch": "serveur MCP utilisé pour récupérer le contenu des pages web URL",
|
||||
@ -3943,6 +4002,7 @@
|
||||
"jsonSaveError": "Échec de la sauvegarde de la configuration JSON",
|
||||
"jsonSaveSuccess": "Configuration JSON sauvegardée",
|
||||
"logoUrl": "Адрес логотипа",
|
||||
"logs": "Journaux",
|
||||
"longRunning": "Mode d'exécution prolongée",
|
||||
"longRunningTooltip": "Une fois activé, le serveur prend en charge les tâches de longue durée, réinitialise le minuteur de temporisation à la réception des notifications de progression, et prolonge le délai d'expiration maximal à 10 minutes.",
|
||||
"marketplaces": "Places de marché",
|
||||
@ -3962,6 +4022,7 @@
|
||||
"name": "Nom",
|
||||
"newServer": "Сервер MCP",
|
||||
"noDescriptionAvailable": "Aucune description disponible pour le moment",
|
||||
"noLogs": "Aucun journal pour le moment",
|
||||
"noServers": "Aucun serveur configuré",
|
||||
"not_support": "Модель не поддерживается",
|
||||
"npx_list": {
|
||||
|
||||
@ -31,12 +31,26 @@
|
||||
}
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "自動検出されたGit Bashを使用中",
|
||||
"clear": {
|
||||
"button": "カスタムパスをクリア"
|
||||
},
|
||||
"customPath": "カスタムパスを使用中: {{path}}",
|
||||
"error": {
|
||||
"description": "Windowsでエージェントを実行するにはGit Bashが必要です。これがないとエージェントは動作しません。以下からGit for Windowsをインストールしてください。",
|
||||
"recheck": "Git Bashのインストールを再確認してください",
|
||||
"title": "Git Bashが必要です"
|
||||
},
|
||||
"found": {
|
||||
"title": "Git Bashが設定されました"
|
||||
},
|
||||
"notFound": "Git Bash が見つかりません。先にインストールしてください。",
|
||||
"pick": {
|
||||
"button": "Git Bashパスを選択",
|
||||
"failed": "Git Bashパスの設定に失敗しました",
|
||||
"invalidPath": "選択されたファイルは有効なGit Bash実行ファイル(bash.exe)ではありません。",
|
||||
"title": "Git Bash実行ファイルを選択"
|
||||
},
|
||||
"success": "Git Bashが正常に検出されました!"
|
||||
},
|
||||
"input": {
|
||||
@ -458,6 +472,7 @@
|
||||
"button": "インポート",
|
||||
"error": {
|
||||
"fetch_failed": "URLからのデータ取得に失敗しました",
|
||||
"file_required": "まずファイルを選択してください",
|
||||
"invalid_format": "無効なアシスタント形式:必須フィールドが不足しています",
|
||||
"url_required": "URLを入力してください"
|
||||
},
|
||||
@ -471,6 +486,17 @@
|
||||
"url_placeholder": "JSON URLを入力"
|
||||
},
|
||||
"manage": {
|
||||
"batch_delete": {
|
||||
"button": "バッチ削除",
|
||||
"confirm": "選択した{{count}}件のアシスタントを削除してもよろしいですか?"
|
||||
},
|
||||
"batch_export": {
|
||||
"button": "エクスポート"
|
||||
},
|
||||
"mode": {
|
||||
"manage": "管理",
|
||||
"sort": "並べ替え"
|
||||
},
|
||||
"title": "アシスタントを管理"
|
||||
},
|
||||
"my_agents": "マイアシスタント",
|
||||
@ -524,7 +550,8 @@
|
||||
"low": "少しの思考",
|
||||
"medium": "普通の思考",
|
||||
"minimal": "最小限の思考",
|
||||
"off": "オフ"
|
||||
"off": "オフ",
|
||||
"xhigh": "超高"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "プロンプトを追加",
|
||||
@ -1004,6 +1031,29 @@
|
||||
"yuque": "語雀にエクスポート"
|
||||
},
|
||||
"list": "トピックリスト",
|
||||
"manage": {
|
||||
"clear_selection": "選択をクリア",
|
||||
"delete": {
|
||||
"confirm": {
|
||||
"content": "{{count}}件の選択したトピックを削除してもよろしいですか?この操作は元に戻せません。",
|
||||
"title": "トピックを削除"
|
||||
},
|
||||
"success": "{{count}}件のトピックを削除しました"
|
||||
},
|
||||
"deselect_all": "すべての選択を解除",
|
||||
"error": {
|
||||
"at_least_one": "少なくとも1つのトピックは保持されなければなりません"
|
||||
},
|
||||
"move": {
|
||||
"button": "移動",
|
||||
"placeholder": "対象を選択",
|
||||
"success": "{{count}}件のトピックを移動しました"
|
||||
},
|
||||
"pinned": "ピン留めされたトピック",
|
||||
"selected_count": "{{count}} 選択済み",
|
||||
"title": "トピックを管理",
|
||||
"unpinned": "ピン留めされていないトピック"
|
||||
},
|
||||
"move_to": "移動先",
|
||||
"new": "新しいトピック",
|
||||
"pin": "トピックを固定",
|
||||
@ -1014,6 +1064,10 @@
|
||||
"label": "トピック提示語",
|
||||
"tips": "トピック提示語:現在のトピックに対して追加の補足提示語を提供"
|
||||
},
|
||||
"search": {
|
||||
"placeholder": "トピックを検索...",
|
||||
"title": "検索"
|
||||
},
|
||||
"title": "トピック",
|
||||
"unpin": "固定解除"
|
||||
},
|
||||
@ -1185,6 +1239,7 @@
|
||||
"saved": "保存されました",
|
||||
"search": "検索",
|
||||
"select": "選択",
|
||||
"select_all": "すべて選択",
|
||||
"selected": "選択済み",
|
||||
"selectedItems": "{{count}}件の項目を選択しました",
|
||||
"selectedMessages": "{{count}}件のメッセージを選択しました",
|
||||
@ -1197,11 +1252,13 @@
|
||||
}
|
||||
},
|
||||
"stop": "停止",
|
||||
"subscribe": "購読",
|
||||
"success": "成功",
|
||||
"swap": "交換",
|
||||
"topics": "トピック",
|
||||
"unknown": "Unknown",
|
||||
"unnamed": "無題",
|
||||
"unsubscribe": "配信停止",
|
||||
"update_success": "更新成功",
|
||||
"upload_files": "ファイルをアップロードする",
|
||||
"warning": "警告",
|
||||
@ -2128,6 +2185,7 @@
|
||||
"collapse": "閉じる",
|
||||
"content_placeholder": "メモの内容を入力してください...",
|
||||
"copyContent": "コンテンツをコピーします",
|
||||
"crossPlatformRestoreWarning": "[to be translated]:Cross-platform configuration restored, but notes directory is empty. Please copy your note files to: {{path}}",
|
||||
"delete": "削除",
|
||||
"delete_confirm": "この{{type}}を本当に削除しますか?",
|
||||
"delete_folder_confirm": "「{{name}}」フォルダーとそのすべての内容を削除してもよろしいですか?",
|
||||
@ -3891,6 +3949,7 @@
|
||||
"builtinServers": "組み込みサーバー",
|
||||
"builtinServersDescriptions": {
|
||||
"brave_search": "Brave検索APIを統合したMCPサーバーの実装で、ウェブ検索とローカル検索の両機能を提供します。BRAVE_API_KEY環境変数の設定が必要です",
|
||||
"browser": "Chrome DevTools Protocolを介してheadless Electronウィンドウを制御します。ツール:URLを開く、単一行JSを実行、セッションをリセット。",
|
||||
"didi_mcp": "DiDi MCPサーバーは、地図検索、料金見積もり、注文管理、ドライバー追跡を含むライドシェアサービスを提供します。中国本土でのみ利用可能です。DIDI_API_KEY環境変数の設定が必要です",
|
||||
"dify_knowledge": "DifyのMCPサーバー実装は、Difyと対話するためのシンプルなAPIを提供します。Dify Keyの設定が必要です。",
|
||||
"fetch": "URLのウェブページコンテンツを取得するためのMCPサーバー",
|
||||
@ -3943,6 +4002,7 @@
|
||||
"jsonSaveError": "JSON設定の保存に失敗しました",
|
||||
"jsonSaveSuccess": "JSON設定が保存されました。",
|
||||
"logoUrl": "ロゴURL",
|
||||
"logs": "ログ",
|
||||
"longRunning": "長時間運行モード",
|
||||
"longRunningTooltip": "このオプションを有効にすると、サーバーは長時間のタスクをサポートします。進行状況通知を受信すると、タイムアウトがリセットされ、最大実行時間が10分に延長されます。",
|
||||
"marketplaces": "マーケットプレイス",
|
||||
@ -3962,6 +4022,7 @@
|
||||
"name": "名前",
|
||||
"newServer": "MCP サーバー",
|
||||
"noDescriptionAvailable": "説明がありません",
|
||||
"noLogs": "ログはまだありません",
|
||||
"noServers": "サーバーが設定されていません",
|
||||
"not_support": "モデルはサポートされていません",
|
||||
"npx_list": {
|
||||
|
||||
@ -31,12 +31,26 @@
|
||||
}
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "Usando Git Bash detectado automaticamente",
|
||||
"clear": {
|
||||
"button": "Limpar caminho personalizado"
|
||||
},
|
||||
"customPath": "Usando caminho personalizado: {{path}}",
|
||||
"error": {
|
||||
"description": "O Git Bash é necessário para executar agentes no Windows. O agente não pode funcionar sem ele. Por favor, instale o Git para Windows a partir de",
|
||||
"recheck": "Reverificar a Instalação do Git Bash",
|
||||
"title": "Git Bash Necessário"
|
||||
},
|
||||
"found": {
|
||||
"title": "Git Bash configurado"
|
||||
},
|
||||
"notFound": "Git Bash não encontrado. Por favor, instale-o primeiro.",
|
||||
"pick": {
|
||||
"button": "Selecionar caminho do Git Bash",
|
||||
"failed": "Falha ao configurar o caminho do Git Bash",
|
||||
"invalidPath": "O arquivo selecionado não é um executável válido do Git Bash (bash.exe).",
|
||||
"title": "Selecionar executável do Git Bash"
|
||||
},
|
||||
"success": "Git Bash detectado com sucesso!"
|
||||
},
|
||||
"input": {
|
||||
@ -458,6 +472,7 @@
|
||||
"button": "Importar",
|
||||
"error": {
|
||||
"fetch_failed": "Falha ao obter dados do URL",
|
||||
"file_required": "Por favor, selecione um arquivo primeiro",
|
||||
"invalid_format": "Formato de assistente inválido: campos obrigatórios em falta",
|
||||
"url_required": "Por favor insere um URL"
|
||||
},
|
||||
@ -471,6 +486,17 @@
|
||||
"url_placeholder": "Inserir URL JSON"
|
||||
},
|
||||
"manage": {
|
||||
"batch_delete": {
|
||||
"button": "Exclusão em Lote",
|
||||
"confirm": "Tem certeza de que deseja excluir os {{count}} assistentes selecionados?"
|
||||
},
|
||||
"batch_export": {
|
||||
"button": "Exportar"
|
||||
},
|
||||
"mode": {
|
||||
"manage": "Gerenciar",
|
||||
"sort": "Ordenar"
|
||||
},
|
||||
"title": "Gerir assistentes"
|
||||
},
|
||||
"my_agents": "Os meus assistentes",
|
||||
@ -524,7 +550,8 @@
|
||||
"low": "Curto",
|
||||
"medium": "Médio",
|
||||
"minimal": "mínimo",
|
||||
"off": "Desligado"
|
||||
"off": "Desligado",
|
||||
"xhigh": "Extra Alta"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "Adicionar Frase",
|
||||
@ -1004,6 +1031,29 @@
|
||||
"yuque": "Exportar para Yuque"
|
||||
},
|
||||
"list": "Lista de tópicos",
|
||||
"manage": {
|
||||
"clear_selection": "Limpar Seleção",
|
||||
"delete": {
|
||||
"confirm": {
|
||||
"content": "Tem certeza de que deseja excluir {{count}} tópico(s) selecionado(s)? Esta ação não pode ser desfeita.",
|
||||
"title": "Excluir Tópicos"
|
||||
},
|
||||
"success": "Excluído(s) {{count}} tópico(s)"
|
||||
},
|
||||
"deselect_all": "Desmarcar Todos",
|
||||
"error": {
|
||||
"at_least_one": "Pelo menos um tópico deve ser mantido"
|
||||
},
|
||||
"move": {
|
||||
"button": "Mover",
|
||||
"placeholder": "Selecionar assistente de destino",
|
||||
"success": "Movido(s) {{count}} tópico(s)"
|
||||
},
|
||||
"pinned": "Tópicos Fixados",
|
||||
"selected_count": "{{count}} selecionado",
|
||||
"title": "Gerenciar Tópicos",
|
||||
"unpinned": "Tópicos Desafixados"
|
||||
},
|
||||
"move_to": "Mover para",
|
||||
"new": "Começar nova conversa",
|
||||
"pin": "Fixar tópico",
|
||||
@ -1014,6 +1064,10 @@
|
||||
"label": "Prompt do tópico",
|
||||
"tips": "Prompt do tópico: fornecer prompts adicionais para o tópico atual"
|
||||
},
|
||||
"search": {
|
||||
"placeholder": "Pesquisar tópicos...",
|
||||
"title": "Pesquisar"
|
||||
},
|
||||
"title": "Tópicos",
|
||||
"unpin": "Desfixar"
|
||||
},
|
||||
@ -1185,6 +1239,7 @@
|
||||
"saved": "Guardado",
|
||||
"search": "Pesquisar",
|
||||
"select": "Selecionar",
|
||||
"select_all": "Selecionar Tudo",
|
||||
"selected": "Selecionado",
|
||||
"selectedItems": "{{count}} itens selecionados",
|
||||
"selectedMessages": "{{count}} mensagens selecionadas",
|
||||
@ -1197,11 +1252,13 @@
|
||||
}
|
||||
},
|
||||
"stop": "Parar",
|
||||
"subscribe": "Subscrever",
|
||||
"success": "Sucesso",
|
||||
"swap": "Trocar",
|
||||
"topics": "Tópicos",
|
||||
"unknown": "Desconhecido",
|
||||
"unnamed": "Sem nome",
|
||||
"unsubscribe": "Cancelar inscrição",
|
||||
"update_success": "Atualização bem-sucedida",
|
||||
"upload_files": "Carregar arquivo",
|
||||
"warning": "Aviso",
|
||||
@ -2128,6 +2185,7 @@
|
||||
"collapse": "[minimizar]",
|
||||
"content_placeholder": "Introduza o conteúdo da nota...",
|
||||
"copyContent": "copiar conteúdo",
|
||||
"crossPlatformRestoreWarning": "[to be translated]:Cross-platform configuration restored, but notes directory is empty. Please copy your note files to: {{path}}",
|
||||
"delete": "eliminar",
|
||||
"delete_confirm": "Tem a certeza de que deseja eliminar este {{type}}?",
|
||||
"delete_folder_confirm": "Tem a certeza de que deseja eliminar a pasta \"{{name}}\" e todos os seus conteúdos?",
|
||||
@ -3891,6 +3949,7 @@
|
||||
"builtinServers": "Servidores integrados",
|
||||
"builtinServersDescriptions": {
|
||||
"brave_search": "uma implementação de servidor MCP integrada com a API de pesquisa Brave, fornecendo funcionalidades de pesquisa web e local. Requer a configuração da variável de ambiente BRAVE_API_KEY",
|
||||
"browser": "Controla uma janela Electron headless via Chrome DevTools Protocol. Ferramentas: abrir URL, executar JS de linha única, reiniciar sessão.",
|
||||
"didi_mcp": "Servidor DiDi MCP que fornece serviços de transporte incluindo pesquisa de mapas, estimativa de preços, gestão de pedidos e rastreamento de motoristas. Disponível apenas na China Continental. Requer configuração da variável de ambiente DIDI_API_KEY",
|
||||
"dify_knowledge": "Implementação do servidor MCP do Dify, que fornece uma API simples para interagir com o Dify. Requer a configuração da chave Dify",
|
||||
"fetch": "servidor MCP para obter o conteúdo da página web do URL",
|
||||
@ -3943,6 +4002,7 @@
|
||||
"jsonSaveError": "Falha ao salvar configuração JSON",
|
||||
"jsonSaveSuccess": "Configuração JSON salva com sucesso",
|
||||
"logoUrl": "URL do Logotipo",
|
||||
"logs": "Registros",
|
||||
"longRunning": "Modo de execução prolongada",
|
||||
"longRunningTooltip": "Quando ativado, o servidor suporta tarefas de longa duração, redefinindo o temporizador de tempo limite ao receber notificações de progresso e estendendo o tempo máximo de tempo limite para 10 minutos.",
|
||||
"marketplaces": "Mercados",
|
||||
@ -3962,6 +4022,7 @@
|
||||
"name": "Nome",
|
||||
"newServer": "Servidor MCP",
|
||||
"noDescriptionAvailable": "Nenhuma descrição disponível no momento",
|
||||
"noLogs": "Ainda sem registos",
|
||||
"noServers": "Nenhum servidor configurado",
|
||||
"not_support": "Modelo Não Suportado",
|
||||
"npx_list": {
|
||||
|
||||
@ -31,12 +31,26 @@
|
||||
}
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "Используется автоматически обнаруженный Git Bash",
|
||||
"clear": {
|
||||
"button": "Очистить пользовательский путь"
|
||||
},
|
||||
"customPath": "Используется пользовательский путь: {{path}}",
|
||||
"error": {
|
||||
"description": "Для запуска агентов в Windows требуется Git Bash. Без него агент не может работать. Пожалуйста, установите Git для Windows с",
|
||||
"recheck": "Повторная проверка установки Git Bash",
|
||||
"title": "Требуется Git Bash"
|
||||
},
|
||||
"found": {
|
||||
"title": "Git Bash настроен"
|
||||
},
|
||||
"notFound": "Git Bash не найден. Пожалуйста, сначала установите его.",
|
||||
"pick": {
|
||||
"button": "Выбрать путь Git Bash",
|
||||
"failed": "Не удалось настроить путь Git Bash",
|
||||
"invalidPath": "Выбранный файл не является допустимым исполняемым файлом Git Bash (bash.exe).",
|
||||
"title": "Выберите исполняемый файл Git Bash"
|
||||
},
|
||||
"success": "Git Bash успешно обнаружен!"
|
||||
},
|
||||
"input": {
|
||||
@ -458,6 +472,7 @@
|
||||
"button": "Импортировать",
|
||||
"error": {
|
||||
"fetch_failed": "Ошибка получения данных с URL",
|
||||
"file_required": "Сначала выберите файл",
|
||||
"invalid_format": "Неверный формат помощника: отсутствуют обязательные поля",
|
||||
"url_required": "Пожалуйста, введите URL"
|
||||
},
|
||||
@ -471,6 +486,17 @@
|
||||
"url_placeholder": "Введите JSON URL"
|
||||
},
|
||||
"manage": {
|
||||
"batch_delete": {
|
||||
"button": "Массовое удаление",
|
||||
"confirm": "Вы уверены, что хотите удалить выбранных {{count}} ассистентов?"
|
||||
},
|
||||
"batch_export": {
|
||||
"button": "Экспорт"
|
||||
},
|
||||
"mode": {
|
||||
"manage": "Управлять",
|
||||
"sort": "Сортировать"
|
||||
},
|
||||
"title": "Управление помощниками"
|
||||
},
|
||||
"my_agents": "Мои помощники",
|
||||
@ -524,7 +550,8 @@
|
||||
"low": "Меньше думать",
|
||||
"medium": "Среднее",
|
||||
"minimal": "минимальный",
|
||||
"off": "Выключить"
|
||||
"off": "Выключить",
|
||||
"xhigh": "Сверхвысокое"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "Добавить подсказку",
|
||||
@ -1004,6 +1031,29 @@
|
||||
"yuque": "Экспорт в Yuque"
|
||||
},
|
||||
"list": "Список топиков",
|
||||
"manage": {
|
||||
"clear_selection": "Очистить выбор",
|
||||
"delete": {
|
||||
"confirm": {
|
||||
"content": "Вы уверены, что хотите удалить выбранные темы ({{count}})? Это действие нельзя отменить.",
|
||||
"title": "Удалить темы"
|
||||
},
|
||||
"success": "Удалено {{count}} тем(ы)"
|
||||
},
|
||||
"deselect_all": "Снять выделение со всех",
|
||||
"error": {
|
||||
"at_least_one": "Должна быть сохранена хотя бы одна тема"
|
||||
},
|
||||
"move": {
|
||||
"button": "Переместить",
|
||||
"placeholder": "Выберите цель",
|
||||
"success": "Перемещено {{count}} тем(ы)"
|
||||
},
|
||||
"pinned": "Закреплённые темы",
|
||||
"selected_count": "{{count}} выбрано",
|
||||
"title": "Управление темами",
|
||||
"unpinned": "Откреплённые темы"
|
||||
},
|
||||
"move_to": "Переместить в",
|
||||
"new": "Новый топик",
|
||||
"pin": "Закрепленные темы",
|
||||
@ -1014,6 +1064,10 @@
|
||||
"label": "Тематические подсказки",
|
||||
"tips": "Тематические подсказки: Дополнительные подсказки, предоставленные для текущей темы"
|
||||
},
|
||||
"search": {
|
||||
"placeholder": "Искать темы...",
|
||||
"title": "Поиск"
|
||||
},
|
||||
"title": "Топики",
|
||||
"unpin": "Открепленные темы"
|
||||
},
|
||||
@ -1185,6 +1239,7 @@
|
||||
"saved": "Сохранено",
|
||||
"search": "Поиск",
|
||||
"select": "Выбрать",
|
||||
"select_all": "Выбрать все",
|
||||
"selected": "Выбрано",
|
||||
"selectedItems": "Выбрано {{count}} элементов",
|
||||
"selectedMessages": "Выбрано {{count}} сообщений",
|
||||
@ -1197,11 +1252,13 @@
|
||||
}
|
||||
},
|
||||
"stop": "остановить",
|
||||
"subscribe": "Подписаться",
|
||||
"success": "Успешно",
|
||||
"swap": "Поменять местами",
|
||||
"topics": "Топики",
|
||||
"unknown": "Неизвестно",
|
||||
"unnamed": "Без имени",
|
||||
"unsubscribe": "Отписаться",
|
||||
"update_success": "Обновление выполнено успешно",
|
||||
"upload_files": "Загрузить файл",
|
||||
"warning": "Предупреждение",
|
||||
@ -2128,6 +2185,7 @@
|
||||
"collapse": "Свернуть",
|
||||
"content_placeholder": "Введите содержимое заметки...",
|
||||
"copyContent": "Копировать контент",
|
||||
"crossPlatformRestoreWarning": "[to be translated]:Cross-platform configuration restored, but notes directory is empty. Please copy your note files to: {{path}}",
|
||||
"delete": "удалить",
|
||||
"delete_confirm": "Вы уверены, что хотите удалить этот объект {{type}}?",
|
||||
"delete_folder_confirm": "Вы уверены, что хотите удалить папку \"{{name}}\" со всем ее содержимым?",
|
||||
@ -3891,6 +3949,7 @@
|
||||
"builtinServers": "Встроенные серверы",
|
||||
"builtinServersDescriptions": {
|
||||
"brave_search": "реализация сервера MCP с интеграцией API поиска Brave, обеспечивающая функции веб-поиска и локального поиска. Требуется настройка переменной среды BRAVE_API_KEY",
|
||||
"browser": "Управление headless-окном Electron через Chrome DevTools Protocol. Инструменты: открытие URL, выполнение однострочного JS, сброс сессии.",
|
||||
"didi_mcp": "Сервер DiDi MCP, предоставляющий услуги такси, включая поиск на карте, оценку стоимости, управление заказами и отслеживание водителей. Доступен только в материковом Китае. Требует настройки переменной окружения DIDI_API_KEY",
|
||||
"dify_knowledge": "Реализация сервера MCP Dify, предоставляющая простой API для взаимодействия с Dify. Требуется настройка ключа Dify",
|
||||
"fetch": "MCP-сервер для получения содержимого веб-страниц по URL",
|
||||
@ -3943,6 +4002,7 @@
|
||||
"jsonSaveError": "Не удалось сохранить конфигурацию JSON",
|
||||
"jsonSaveSuccess": "JSON конфигурация сохранена",
|
||||
"logoUrl": "URL логотипа",
|
||||
"logs": "Журналы",
|
||||
"longRunning": "Длительный режим работы",
|
||||
"longRunningTooltip": "Включив эту опцию, сервер будет поддерживать длительные задачи. При получении уведомлений о ходе выполнения будет сброшен тайм-аут и максимальное время выполнения будет увеличено до 10 минут.",
|
||||
"marketplaces": "Торговые площадки",
|
||||
@ -3962,6 +4022,7 @@
|
||||
"name": "Имя",
|
||||
"newServer": "MCP сервер",
|
||||
"noDescriptionAvailable": "Описание отсутствует",
|
||||
"noLogs": "Логов пока нет",
|
||||
"noServers": "Серверы не настроены",
|
||||
"not_support": "Модель не поддерживается",
|
||||
"npx_list": {
|
||||
|
||||
@ -180,7 +180,8 @@ const Chat: FC<Props> = (props) => {
|
||||
<HStack>
|
||||
<motion.div
|
||||
animate={{
|
||||
marginRight: topicPosition === 'right' && showTopics ? 'var(--assistants-width)' : 0
|
||||
marginRight:
|
||||
topicPosition === 'right' && showTopics ? 'calc(var(--assistants-width) + var(--border-width))' : 0
|
||||
}}
|
||||
transition={{ duration: 0.3, ease: 'easeInOut' }}
|
||||
style={{ flex: 1, display: 'flex', minWidth: 0 }}>
|
||||
|
||||
@ -19,6 +19,7 @@ import styled from 'styled-components'
|
||||
|
||||
import AssistantsDrawer from './components/AssistantsDrawer'
|
||||
import ChatNavbarContent from './components/ChatNavbarContent'
|
||||
import SettingsButton from './components/SettingsButton'
|
||||
import UpdateAppButton from './components/UpdateAppButton'
|
||||
|
||||
interface Props {
|
||||
@ -65,14 +66,6 @@ const HeaderNavbar: FC<Props> = ({ activeAssistant, setActiveAssistant, activeTo
|
||||
})
|
||||
}
|
||||
|
||||
// const handleUpdateModel = useCallback(
|
||||
// async (model: ApiModel) => {
|
||||
// if (!activeSession || !activeAgent) return
|
||||
// return updateModel(activeSession.id, model.id, { showSuccessToast: false })
|
||||
// },
|
||||
// [activeAgent, activeSession, updateModel]
|
||||
// )
|
||||
|
||||
return (
|
||||
<NavbarHeader className="home-navbar" style={{ height: 'var(--navbar-height)' }}>
|
||||
<div className="flex h-full min-w-0 flex-1 shrink items-center overflow-auto">
|
||||
@ -107,6 +100,7 @@ const HeaderNavbar: FC<Props> = ({ activeAssistant, setActiveAssistant, activeTo
|
||||
</div>
|
||||
<HStack alignItems="center" gap={8}>
|
||||
{isTopNavbar && <UpdateAppButton />}
|
||||
<SettingsButton assistant={assistant} />
|
||||
{isTopNavbar && (
|
||||
<Tooltip title={t('navbar.expand')} mouseEnterDelay={0.8}>
|
||||
<NarrowIcon onClick={handleNarrowModeToggle}>
|
||||
|
||||
@ -526,7 +526,8 @@ export const InputbarCore: FC<InputbarCoreProps> = ({
|
||||
const handleFocus = useCallback(() => {
|
||||
setInputFocus(true)
|
||||
dispatch(setSearching(false))
|
||||
if (quickPanel.isVisible && quickPanel.triggerInfo?.type !== 'input') {
|
||||
// Don't close panel in multiple selection mode, or if triggered by input
|
||||
if (quickPanel.isVisible && quickPanel.triggerInfo?.type !== 'input' && !quickPanel.multiple) {
|
||||
quickPanel.close()
|
||||
}
|
||||
PasteService.setLastFocusedComponent('inputbar')
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user