diff --git a/.github/workflows/sync-to-gitcode.yml b/.github/workflows/sync-to-gitcode.yml new file mode 100644 index 000000000..4462ff637 --- /dev/null +++ b/.github/workflows/sync-to-gitcode.yml @@ -0,0 +1,293 @@ +name: Sync Release to GitCode + +on: + release: + types: [published] + workflow_dispatch: + inputs: + tag: + description: 'Release tag (e.g. v1.0.0)' + required: true + clean: + description: 'Clean node_modules before build' + type: boolean + default: false + +permissions: + contents: read + +jobs: + build-and-sync-to-gitcode: + runs-on: [self-hosted, windows-signing] + steps: + - name: Get tag name + id: get-tag + shell: bash + run: | + if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then + echo "tag=${{ github.event.inputs.tag }}" >> $GITHUB_OUTPUT + else + echo "tag=${{ github.event.release.tag_name }}" >> $GITHUB_OUTPUT + fi + + - name: Check out Git repository + uses: actions/checkout@v6 + with: + fetch-depth: 0 + ref: ${{ steps.get-tag.outputs.tag }} + + - name: Set package.json version + shell: bash + run: | + TAG="${{ steps.get-tag.outputs.tag }}" + VERSION="${TAG#v}" + npm version "$VERSION" --no-git-tag-version --allow-same-version + + - name: Install Node.js + uses: actions/setup-node@v6 + with: + node-version: 22 + + - name: Install corepack + shell: bash + run: corepack enable && corepack prepare yarn@4.9.1 --activate + + - name: Clean node_modules + if: ${{ github.event.inputs.clean == 'true' }} + shell: bash + run: rm -rf node_modules + + - name: Install Dependencies + shell: bash + run: yarn install + + - name: Build Windows with code signing + shell: bash + run: yarn build:win + env: + WIN_SIGN: true + CHERRY_CERT_PATH: ${{ secrets.CHERRY_CERT_PATH }} + CHERRY_CERT_KEY: ${{ secrets.CHERRY_CERT_KEY }} + CHERRY_CERT_CSP: ${{ secrets.CHERRY_CERT_CSP }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + NODE_OPTIONS: --max-old-space-size=8192 + MAIN_VITE_CHERRYAI_CLIENT_SECRET: ${{ secrets.MAIN_VITE_CHERRYAI_CLIENT_SECRET }} + MAIN_VITE_MINERU_API_KEY: ${{ secrets.MAIN_VITE_MINERU_API_KEY }} + RENDERER_VITE_AIHUBMIX_SECRET: ${{ secrets.RENDERER_VITE_AIHUBMIX_SECRET }} + RENDERER_VITE_PPIO_APP_SECRET: ${{ secrets.RENDERER_VITE_PPIO_APP_SECRET }} + + - name: List built Windows artifacts + shell: bash + run: | + echo "Built Windows artifacts:" + ls -la dist/*.exe dist/*.blockmap dist/latest*.yml + + - name: Download GitHub release assets + shell: bash + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + TAG_NAME: ${{ steps.get-tag.outputs.tag }} + run: | + echo "Downloading release assets for $TAG_NAME..." + mkdir -p release-assets + cd release-assets + + # Download all assets from the release + gh release download "$TAG_NAME" \ + --repo "${{ github.repository }}" \ + --pattern "*" \ + --skip-existing + + echo "Downloaded GitHub release assets:" + ls -la + + - name: Replace Windows files with signed versions + shell: bash + run: | + echo "Replacing Windows files with signed versions..." + + # Verify signed files exist first + if ! ls dist/*.exe 1>/dev/null 2>&1; then + echo "ERROR: No signed .exe files found in dist/" + exit 1 + fi + + # Remove unsigned Windows files from downloaded assets + # *.exe, *.exe.blockmap, latest.yml (Windows only) + rm -f release-assets/*.exe release-assets/*.exe.blockmap release-assets/latest.yml 2>/dev/null || true + + # Copy signed Windows files with error checking + cp dist/*.exe release-assets/ || { echo "ERROR: Failed to copy .exe files"; exit 1; } + cp dist/*.exe.blockmap release-assets/ || { echo "ERROR: Failed to copy .blockmap files"; exit 1; } + cp dist/latest.yml release-assets/ || { echo "ERROR: Failed to copy latest.yml"; exit 1; } + + echo "Final release assets:" + ls -la release-assets/ + + - name: Get release info + id: release-info + shell: bash + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + TAG_NAME: ${{ steps.get-tag.outputs.tag }} + LANG: C.UTF-8 + LC_ALL: C.UTF-8 + run: | + # Always use gh cli to avoid special character issues + RELEASE_NAME=$(gh release view "$TAG_NAME" --repo "${{ github.repository }}" --json name -q '.name') + # Use delimiter to safely handle special characters in release name + { + echo 'name<> $GITHUB_OUTPUT + # Extract releaseNotes from electron-builder.yml (from releaseNotes: | to end of file, remove 4-space indent) + sed -n '/releaseNotes: |/,$ { /releaseNotes: |/d; s/^ //; p }' electron-builder.yml > release_body.txt + + - name: Create GitCode release and upload files + shell: bash + env: + GITCODE_TOKEN: ${{ secrets.GITCODE_TOKEN }} + GITCODE_OWNER: ${{ vars.GITCODE_OWNER }} + GITCODE_REPO: ${{ vars.GITCODE_REPO }} + GITCODE_API_URL: ${{ vars.GITCODE_API_URL }} + TAG_NAME: ${{ steps.get-tag.outputs.tag }} + RELEASE_NAME: ${{ steps.release-info.outputs.name }} + LANG: C.UTF-8 + LC_ALL: C.UTF-8 + run: | + # Validate required environment variables + if [ -z "$GITCODE_TOKEN" ]; then + echo "ERROR: GITCODE_TOKEN is not set" + exit 1 + fi + if [ -z "$GITCODE_OWNER" ]; then + echo "ERROR: GITCODE_OWNER is not set" + exit 1 + fi + if [ -z "$GITCODE_REPO" ]; then + echo "ERROR: GITCODE_REPO is not set" + exit 1 + fi + + API_URL="${GITCODE_API_URL:-https://api.gitcode.com/api/v5}" + + echo "Creating GitCode release..." + echo "Tag: $TAG_NAME" + echo "Repo: $GITCODE_OWNER/$GITCODE_REPO" + + # Step 1: Create release + # Use --rawfile to read body directly from file, avoiding shell variable encoding issues + jq -n \ + --arg tag "$TAG_NAME" \ + --arg name "$RELEASE_NAME" \ + --rawfile body release_body.txt \ + '{ + tag_name: $tag, + name: $name, + body: $body, + target_commitish: "main" + }' > /tmp/release_payload.json + + RELEASE_RESPONSE=$(curl -s -w "\n%{http_code}" -X POST \ + --connect-timeout 30 --max-time 60 \ + "${API_URL}/repos/${GITCODE_OWNER}/${GITCODE_REPO}/releases" \ + -H "Content-Type: application/json; charset=utf-8" \ + -H "Authorization: Bearer ${GITCODE_TOKEN}" \ + --data-binary "@/tmp/release_payload.json") + + HTTP_CODE=$(echo "$RELEASE_RESPONSE" | tail -n1) + RESPONSE_BODY=$(echo "$RELEASE_RESPONSE" | sed '$d') + + if [ "$HTTP_CODE" -ge 200 ] && [ "$HTTP_CODE" -lt 300 ]; then + echo "Release created successfully" + else + echo "Warning: Release creation returned HTTP $HTTP_CODE" + echo "$RESPONSE_BODY" + exit 1 + fi + + # Step 2: Upload files to release + echo "Uploading files to GitCode release..." + + # Function to upload a single file with retry + upload_file() { + local file="$1" + local filename=$(basename "$file") + local max_retries=3 + local retry=0 + + echo "Uploading: $filename" + + # URL encode the filename + encoded_filename=$(printf '%s' "$filename" | jq -sRr @uri) + + while [ $retry -lt $max_retries ]; do + # Get upload URL + UPLOAD_INFO=$(curl -s --connect-timeout 30 --max-time 60 \ + -H "Authorization: Bearer ${GITCODE_TOKEN}" \ + "${API_URL}/repos/${GITCODE_OWNER}/${GITCODE_REPO}/releases/${TAG_NAME}/upload_url?file_name=${encoded_filename}") + + UPLOAD_URL=$(echo "$UPLOAD_INFO" | jq -r '.url // empty') + + if [ -n "$UPLOAD_URL" ]; then + # Write headers to temp file to avoid shell escaping issues + echo "$UPLOAD_INFO" | jq -r '.headers | to_entries[] | "header = \"" + .key + ": " + .value + "\""' > /tmp/upload_headers.txt + + # Upload file using PUT with headers from file + UPLOAD_RESPONSE=$(curl -s -w "\n%{http_code}" -X PUT \ + -K /tmp/upload_headers.txt \ + --data-binary "@${file}" \ + "$UPLOAD_URL") + + HTTP_CODE=$(echo "$UPLOAD_RESPONSE" | tail -n1) + RESPONSE_BODY=$(echo "$UPLOAD_RESPONSE" | sed '$d') + + if [ "$HTTP_CODE" -ge 200 ] && [ "$HTTP_CODE" -lt 300 ]; then + echo " Uploaded: $filename" + return 0 + else + echo " Failed (HTTP $HTTP_CODE), retry $((retry + 1))/$max_retries" + echo " Response: $RESPONSE_BODY" + fi + else + echo " Failed to get upload URL, retry $((retry + 1))/$max_retries" + echo " Response: $UPLOAD_INFO" + fi + + retry=$((retry + 1)) + [ $retry -lt $max_retries ] && sleep 3 + done + + echo " Failed: $filename after $max_retries retries" + exit 1 + } + + # Upload non-yml/json files first + for file in release-assets/*; do + if [ -f "$file" ]; then + filename=$(basename "$file") + if [[ ! "$filename" =~ \.(yml|yaml|json)$ ]]; then + upload_file "$file" + fi + fi + done + + # Upload yml/json files last + for file in release-assets/*; do + if [ -f "$file" ]; then + filename=$(basename "$file") + if [[ "$filename" =~ \.(yml|yaml|json)$ ]]; then + upload_file "$file" + fi + fi + done + + echo "GitCode release sync completed!" + + - name: Cleanup temp files + if: always() + shell: bash + run: | + rm -f /tmp/release_payload.json /tmp/upload_headers.txt release_body.txt + rm -rf release-assets/ diff --git a/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch b/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch similarity index 84% rename from .yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch rename to .yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch index 973ddc62a..6fbe30e08 100644 --- a/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch +++ b/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch @@ -1,8 +1,8 @@ diff --git a/dist/index.js b/dist/index.js -index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a70ea2b5a2 100644 +index 130094d194ea1e8e7d3027d07d82465741192124..4d13dcee8c962ca9ee8f1c3d748f8ffe6a3cfb47 100644 --- a/dist/index.js +++ b/dist/index.js -@@ -274,6 +274,7 @@ var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)( +@@ -290,6 +290,7 @@ var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)( message: import_v42.z.object({ role: import_v42.z.literal("assistant").nullish(), content: import_v42.z.string().nullish(), @@ -10,7 +10,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7 tool_calls: import_v42.z.array( import_v42.z.object({ id: import_v42.z.string().nullish(), -@@ -340,6 +341,7 @@ var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)( +@@ -356,6 +357,7 @@ var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)( delta: import_v42.z.object({ role: import_v42.z.enum(["assistant"]).nullish(), content: import_v42.z.string().nullish(), @@ -18,7 +18,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7 tool_calls: import_v42.z.array( import_v42.z.object({ index: import_v42.z.number(), -@@ -795,6 +797,13 @@ var OpenAIChatLanguageModel = class { +@@ -814,6 +816,13 @@ var OpenAIChatLanguageModel = class { if (text != null && text.length > 0) { content.push({ type: "text", text }); } @@ -32,7 +32,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7 for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) { content.push({ type: "tool-call", -@@ -876,6 +885,7 @@ var OpenAIChatLanguageModel = class { +@@ -895,6 +904,7 @@ var OpenAIChatLanguageModel = class { }; let metadataExtracted = false; let isActiveText = false; @@ -40,7 +40,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7 const providerMetadata = { openai: {} }; return { stream: response.pipeThrough( -@@ -933,6 +943,21 @@ var OpenAIChatLanguageModel = class { +@@ -952,6 +962,21 @@ var OpenAIChatLanguageModel = class { return; } const delta = choice.delta; @@ -62,7 +62,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7 if (delta.content != null) { if (!isActiveText) { controller.enqueue({ type: "text-start", id: "0" }); -@@ -1045,6 +1070,9 @@ var OpenAIChatLanguageModel = class { +@@ -1064,6 +1089,9 @@ var OpenAIChatLanguageModel = class { } }, flush(controller) { diff --git a/.yarn/patches/@anthropic-ai-claude-agent-sdk-npm-0.1.53-4b77f4cf29.patch b/.yarn/patches/@anthropic-ai-claude-agent-sdk-npm-0.1.62-23ae56f8c8.patch similarity index 92% rename from .yarn/patches/@anthropic-ai-claude-agent-sdk-npm-0.1.53-4b77f4cf29.patch rename to .yarn/patches/@anthropic-ai-claude-agent-sdk-npm-0.1.62-23ae56f8c8.patch index 4481b58f3..62ab76757 100644 --- a/.yarn/patches/@anthropic-ai-claude-agent-sdk-npm-0.1.53-4b77f4cf29.patch +++ b/.yarn/patches/@anthropic-ai-claude-agent-sdk-npm-0.1.62-23ae56f8c8.patch @@ -1,5 +1,5 @@ diff --git a/sdk.mjs b/sdk.mjs -index bf429a344b7d59f70aead16b639f949b07688a81..f77d50cc5d3fb04292cb3ac7fa7085d02dcc628f 100755 +index dea7766a3432a1e809f12d6daba4f2834a219689..e0b02ef73da177ba32b903887d7bbbeaa08cc6d3 100755 --- a/sdk.mjs +++ b/sdk.mjs @@ -6250,7 +6250,7 @@ function createAbortController(maxListeners = DEFAULT_MAX_LISTENERS) { @@ -11,7 +11,7 @@ index bf429a344b7d59f70aead16b639f949b07688a81..f77d50cc5d3fb04292cb3ac7fa7085d0 import { createInterface } from "readline"; // ../src/utils/fsOperations.ts -@@ -6619,18 +6619,11 @@ class ProcessTransport { +@@ -6644,18 +6644,11 @@ class ProcessTransport { const errorMessage = isNativeBinary(pathToClaudeCodeExecutable) ? `Claude Code native binary not found at ${pathToClaudeCodeExecutable}. Please ensure Claude Code is installed via native installer or specify a valid path with options.pathToClaudeCodeExecutable.` : `Claude Code executable not found at ${pathToClaudeCodeExecutable}. Is options.pathToClaudeCodeExecutable set?`; throw new ReferenceError(errorMessage); } diff --git a/.yarn/patches/ollama-ai-provider-v2-npm-1.5.5-8bef249af9.patch b/.yarn/patches/ollama-ai-provider-v2-npm-1.5.5-8bef249af9.patch new file mode 100644 index 000000000..ea1438153 --- /dev/null +++ b/.yarn/patches/ollama-ai-provider-v2-npm-1.5.5-8bef249af9.patch @@ -0,0 +1,145 @@ +diff --git a/dist/index.d.ts b/dist/index.d.ts +index 8dd9b498050dbecd8dd6b901acf1aa8ca38a49af..ed644349c9d38fe2a66b2fb44214f7c18eb97f89 100644 +--- a/dist/index.d.ts ++++ b/dist/index.d.ts +@@ -4,7 +4,7 @@ import { z } from 'zod/v4'; + + type OllamaChatModelId = "athene-v2" | "athene-v2:72b" | "aya-expanse" | "aya-expanse:8b" | "aya-expanse:32b" | "codegemma" | "codegemma:2b" | "codegemma:7b" | "codellama" | "codellama:7b" | "codellama:13b" | "codellama:34b" | "codellama:70b" | "codellama:code" | "codellama:python" | "command-r" | "command-r:35b" | "command-r-plus" | "command-r-plus:104b" | "command-r7b" | "command-r7b:7b" | "deepseek-r1" | "deepseek-r1:1.5b" | "deepseek-r1:7b" | "deepseek-r1:8b" | "deepseek-r1:14b" | "deepseek-r1:32b" | "deepseek-r1:70b" | "deepseek-r1:671b" | "deepseek-coder-v2" | "deepseek-coder-v2:16b" | "deepseek-coder-v2:236b" | "deepseek-v3" | "deepseek-v3:671b" | "devstral" | "devstral:24b" | "dolphin3" | "dolphin3:8b" | "exaone3.5" | "exaone3.5:2.4b" | "exaone3.5:7.8b" | "exaone3.5:32b" | "falcon2" | "falcon2:11b" | "falcon3" | "falcon3:1b" | "falcon3:3b" | "falcon3:7b" | "falcon3:10b" | "firefunction-v2" | "firefunction-v2:70b" | "gemma" | "gemma:2b" | "gemma:7b" | "gemma2" | "gemma2:2b" | "gemma2:9b" | "gemma2:27b" | "gemma3" | "gemma3:1b" | "gemma3:4b" | "gemma3:12b" | "gemma3:27b" | "granite3-dense" | "granite3-dense:2b" | "granite3-dense:8b" | "granite3-guardian" | "granite3-guardian:2b" | "granite3-guardian:8b" | "granite3-moe" | "granite3-moe:1b" | "granite3-moe:3b" | "granite3.1-dense" | "granite3.1-dense:2b" | "granite3.1-dense:8b" | "granite3.1-moe" | "granite3.1-moe:1b" | "granite3.1-moe:3b" | "llama2" | "llama2:7b" | "llama2:13b" | "llama2:70b" | "llama3" | "llama3:8b" | "llama3:70b" | "llama3-chatqa" | "llama3-chatqa:8b" | "llama3-chatqa:70b" | "llama3-gradient" | "llama3-gradient:8b" | "llama3-gradient:70b" | "llama3.1" | "llama3.1:8b" | "llama3.1:70b" | "llama3.1:405b" | "llama3.2" | "llama3.2:1b" | "llama3.2:3b" | "llama3.2-vision" | "llama3.2-vision:11b" | "llama3.2-vision:90b" | "llama3.3" | "llama3.3:70b" | "llama4" | "llama4:16x17b" | "llama4:128x17b" | "llama-guard3" | "llama-guard3:1b" | "llama-guard3:8b" | "llava" | "llava:7b" | "llava:13b" | "llava:34b" | "llava-llama3" | "llava-llama3:8b" | "llava-phi3" | "llava-phi3:3.8b" | "marco-o1" | "marco-o1:7b" | "mistral" | "mistral:7b" | "mistral-large" | "mistral-large:123b" | "mistral-nemo" | "mistral-nemo:12b" | "mistral-small" | "mistral-small:22b" | "mixtral" | "mixtral:8x7b" | "mixtral:8x22b" | "moondream" | "moondream:1.8b" | "openhermes" | "openhermes:v2.5" | "nemotron" | "nemotron:70b" | "nemotron-mini" | "nemotron-mini:4b" | "olmo" | "olmo:7b" | "olmo:13b" | "opencoder" | "opencoder:1.5b" | "opencoder:8b" | "phi3" | "phi3:3.8b" | "phi3:14b" | "phi3.5" | "phi3.5:3.8b" | "phi4" | "phi4:14b" | "qwen" | "qwen:7b" | "qwen:14b" | "qwen:32b" | "qwen:72b" | "qwen:110b" | "qwen2" | "qwen2:0.5b" | "qwen2:1.5b" | "qwen2:7b" | "qwen2:72b" | "qwen2.5" | "qwen2.5:0.5b" | "qwen2.5:1.5b" | "qwen2.5:3b" | "qwen2.5:7b" | "qwen2.5:14b" | "qwen2.5:32b" | "qwen2.5:72b" | "qwen2.5-coder" | "qwen2.5-coder:0.5b" | "qwen2.5-coder:1.5b" | "qwen2.5-coder:3b" | "qwen2.5-coder:7b" | "qwen2.5-coder:14b" | "qwen2.5-coder:32b" | "qwen3" | "qwen3:0.6b" | "qwen3:1.7b" | "qwen3:4b" | "qwen3:8b" | "qwen3:14b" | "qwen3:30b" | "qwen3:32b" | "qwen3:235b" | "qwq" | "qwq:32b" | "sailor2" | "sailor2:1b" | "sailor2:8b" | "sailor2:20b" | "shieldgemma" | "shieldgemma:2b" | "shieldgemma:9b" | "shieldgemma:27b" | "smallthinker" | "smallthinker:3b" | "smollm" | "smollm:135m" | "smollm:360m" | "smollm:1.7b" | "tinyllama" | "tinyllama:1.1b" | "tulu3" | "tulu3:8b" | "tulu3:70b" | (string & {}); + declare const ollamaProviderOptions: z.ZodObject<{ +- think: z.ZodOptional; ++ think: z.ZodOptional]>>; + options: z.ZodOptional; + repeat_last_n: z.ZodOptional; +@@ -27,9 +27,11 @@ interface OllamaCompletionSettings { + * the model's thinking from the model's output. When disabled, the model will not think + * and directly output the content. + * ++ * For gpt-oss models, you can also use 'low', 'medium', or 'high' to control the depth of thinking. ++ * + * Only supported by certain models like DeepSeek R1 and Qwen 3. + */ +- think?: boolean; ++ think?: boolean | 'low' | 'medium' | 'high'; + /** + * Echo back the prompt in addition to the completion. + */ +@@ -146,7 +148,7 @@ declare const ollamaEmbeddingProviderOptions: z.ZodObject<{ + type OllamaEmbeddingProviderOptions = z.infer; + + declare const ollamaCompletionProviderOptions: z.ZodObject<{ +- think: z.ZodOptional; ++ think: z.ZodOptional]>>; + user: z.ZodOptional; + suffix: z.ZodOptional; + echo: z.ZodOptional; +diff --git a/dist/index.js b/dist/index.js +index 35b5142ce8476ce2549ed7c2ec48e7d8c46c90d9..2ef64dc9a4c2be043e6af608241a6a8309a5a69f 100644 +--- a/dist/index.js ++++ b/dist/index.js +@@ -158,7 +158,7 @@ function getResponseMetadata({ + + // src/completion/ollama-completion-language-model.ts + var ollamaCompletionProviderOptions = import_v42.z.object({ +- think: import_v42.z.boolean().optional(), ++ think: import_v42.z.union([import_v42.z.boolean(), import_v42.z.enum(['low', 'medium', 'high'])]).optional(), + user: import_v42.z.string().optional(), + suffix: import_v42.z.string().optional(), + echo: import_v42.z.boolean().optional() +@@ -662,7 +662,7 @@ function convertToOllamaChatMessages({ + const images = content.filter((part) => part.type === "file" && part.mediaType.startsWith("image/")).map((part) => part.data); + messages.push({ + role: "user", +- content: userText.length > 0 ? userText : [], ++ content: userText.length > 0 ? userText : '', + images: images.length > 0 ? images : void 0 + }); + break; +@@ -813,9 +813,11 @@ var ollamaProviderOptions = import_v44.z.object({ + * the model's thinking from the model's output. When disabled, the model will not think + * and directly output the content. + * ++ * For gpt-oss models, you can also use 'low', 'medium', or 'high' to control the depth of thinking. ++ * + * Only supported by certain models like DeepSeek R1 and Qwen 3. + */ +- think: import_v44.z.boolean().optional(), ++ think: import_v44.z.union([import_v44.z.boolean(), import_v44.z.enum(['low', 'medium', 'high'])]).optional(), + options: import_v44.z.object({ + num_ctx: import_v44.z.number().optional(), + repeat_last_n: import_v44.z.number().optional(), +@@ -929,14 +931,16 @@ var OllamaRequestBuilder = class { + prompt, + systemMessageMode: "system" + }), +- temperature, +- top_p: topP, + max_output_tokens: maxOutputTokens, + ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && { + format: responseFormat.schema != null ? responseFormat.schema : "json" + }, + think: (_a = ollamaOptions == null ? void 0 : ollamaOptions.think) != null ? _a : false, +- options: (_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : void 0 ++ options: { ++ ...temperature !== void 0 && { temperature }, ++ ...topP !== void 0 && { top_p: topP }, ++ ...((_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : {}) ++ } + }; + } + }; +diff --git a/dist/index.mjs b/dist/index.mjs +index e2a634a78d80ac9542f2cc4f96cf2291094b10cf..67b23efce3c1cf4f026693d3ff9246988a3ef26e 100644 +--- a/dist/index.mjs ++++ b/dist/index.mjs +@@ -144,7 +144,7 @@ function getResponseMetadata({ + + // src/completion/ollama-completion-language-model.ts + var ollamaCompletionProviderOptions = z2.object({ +- think: z2.boolean().optional(), ++ think: z2.union([z2.boolean(), z2.enum(['low', 'medium', 'high'])]).optional(), + user: z2.string().optional(), + suffix: z2.string().optional(), + echo: z2.boolean().optional() +@@ -662,7 +662,7 @@ function convertToOllamaChatMessages({ + const images = content.filter((part) => part.type === "file" && part.mediaType.startsWith("image/")).map((part) => part.data); + messages.push({ + role: "user", +- content: userText.length > 0 ? userText : [], ++ content: userText.length > 0 ? userText : '', + images: images.length > 0 ? images : void 0 + }); + break; +@@ -815,9 +815,11 @@ var ollamaProviderOptions = z4.object({ + * the model's thinking from the model's output. When disabled, the model will not think + * and directly output the content. + * ++ * For gpt-oss models, you can also use 'low', 'medium', or 'high' to control the depth of thinking. ++ * + * Only supported by certain models like DeepSeek R1 and Qwen 3. + */ +- think: z4.boolean().optional(), ++ think: z4.union([z4.boolean(), z4.enum(['low', 'medium', 'high'])]).optional(), + options: z4.object({ + num_ctx: z4.number().optional(), + repeat_last_n: z4.number().optional(), +@@ -931,14 +933,16 @@ var OllamaRequestBuilder = class { + prompt, + systemMessageMode: "system" + }), +- temperature, +- top_p: topP, + max_output_tokens: maxOutputTokens, + ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && { + format: responseFormat.schema != null ? responseFormat.schema : "json" + }, + think: (_a = ollamaOptions == null ? void 0 : ollamaOptions.think) != null ? _a : false, +- options: (_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : void 0 ++ options: { ++ ...temperature !== void 0 && { temperature }, ++ ...topP !== void 0 && { top_p: topP }, ++ ...((_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : {}) ++ } + }; + } + }; diff --git a/electron-builder.yml b/electron-builder.yml index d736d99b3..7d1b76ade 100644 --- a/electron-builder.yml +++ b/electron-builder.yml @@ -135,66 +135,60 @@ artifactBuildCompleted: scripts/artifact-build-completed.js releaseInfo: releaseNotes: | - Cherry Studio 1.7.2 - Stability & Enhancement Update + Cherry Studio 1.7.3 - Feature & Stability Update - This release focuses on stability improvements, bug fixes, and quality-of-life enhancements. + This release brings new features, UI improvements, and important bug fixes. + + ✨ New Features + - Add MCP server log viewer for better debugging + - Support custom Git Bash path configuration + - Add print to PDF and save as HTML for mini program webviews + - Add CherryIN API host selection settings + - Enhance assistant presets with sort and batch delete modes + - Open URL directly for SelectionAssistant search action + - Enhance web search tool switching with provider-specific context 🔧 Improvements - - Enhanced update dialog functionality and state management - - Improved ImageViewer context menu UX - - Better temperature and top_p parameter handling - - User-configurable stream options for OpenAI API - - Translation feature now supports document files - - 🤖 AI & Models - - Added explicit thinking token support for Gemini 3 Pro Image - - Updated DeepSeek logic to match DeepSeek v3.2 - - Updated AiOnly default models - - Updated AI model configurations to latest versions - - ♿ Accessibility - - Improved screen reader (NVDA) support with aria-label attributes - - Added Slovak language support for spell check + - Remove Intel Ultra limit for OVMS + - Improve settings tab and assistant item UI 🐛 Bug Fixes - - Fixed Quick Assistant shortcut registration issue - - Fixed UI freeze on multi-file selection via batch processing - - Fixed assistant default model update when editing model capabilities - - Fixed provider handling and API key rotation logic - - Fixed OVMS API URL path formation - - Fixed custom parameters placement for Vercel AI Gateway - - Fixed topic message blocks clearing - - Fixed input bar blocking enter send while generating + - Fix stack overflow with base64 images + - Fix infinite loop in knowledge queue processing + - Fix quick panel closing in multiple selection mode + - Fix thinking timer not stopping when reply is aborted + - Fix ThinkingButton icon display for fixed reasoning mode + - Fix knowledge query prioritization and intent prompt + - Fix OpenRouter embeddings support + - Fix SelectionAction window resize on Windows + - Add gpustack provider support for qwen3 thinking mode - Cherry Studio 1.7.2 - 稳定性与功能增强更新 + Cherry Studio 1.7.3 - 功能与稳定性更新 - 本次更新专注于稳定性改进、问题修复和用户体验提升。 + 本次更新带来新功能、界面改进和重要的问题修复。 + + ✨ 新功能 + - 新增 MCP 服务器日志查看器,便于调试 + - 支持自定义 Git Bash 路径配置 + - 小程序 webview 支持打印 PDF 和保存为 HTML + - 新增 CherryIN API 主机选择设置 + - 助手预设增强:支持排序和批量删除模式 + - 划词助手搜索操作直接打开 URL + - 增强网页搜索工具切换逻辑,支持服务商特定上下文 🔧 功能改进 - - 增强更新对话框功能和状态管理 - - 优化图片查看器右键菜单体验 - - 改进温度和 top_p 参数处理逻辑 - - 支持用户自定义 OpenAI API 流式选项 - - 翻译功能现已支持文档文件 - - 🤖 AI 与模型 - - 为 Gemini 3 Pro Image 添加显式思考 token 支持 - - 更新 DeepSeek 逻辑以适配 DeepSeek v3.2 - - 更新 AiOnly 默认模型 - - 更新 AI 模型配置至最新版本 - - ♿ 无障碍支持 - - 改进屏幕阅读器 (NVDA) 支持,添加 aria-label 属性 - - 新增斯洛伐克语拼写检查支持 + - 移除 OVMS 的 Intel Ultra 限制 + - 优化设置标签页和助手项目 UI 🐛 问题修复 - - 修复快捷助手无法注册快捷键的问题 - - 修复多文件选择时 UI 冻结问题(通过批处理优化) - - 修复编辑模型能力时助手默认模型更新问题 - - 修复服务商处理和 API 密钥轮换逻辑 - - 修复 OVMS API URL 路径格式问题 - - 修复 Vercel AI Gateway 自定义参数位置问题 - - 修复话题消息块清理问题 - - 修复生成时输入框阻止回车发送的问题 + - 修复 base64 图片导致的栈溢出问题 + - 修复知识库队列处理的无限循环问题 + - 修复多选模式下快捷面板意外关闭的问题 + - 修复回复中止时思考计时器未停止的问题 + - 修复固定推理模式下思考按钮图标显示问题 + - 修复知识库查询优先级和意图提示 + - 修复 OpenRouter 嵌入模型支持 + - 修复 Windows 上划词助手窗口大小调整问题 + - 为 gpustack 服务商添加 qwen3 思考模式支持 diff --git a/package.json b/package.json index 0c588cf08..82fe1dce5 100644 --- a/package.json +++ b/package.json @@ -84,7 +84,7 @@ "release:ai-sdk-provider": "yarn workspace @cherrystudio/ai-sdk-provider version patch --immediate && yarn workspace @cherrystudio/ai-sdk-provider build && yarn workspace @cherrystudio/ai-sdk-provider npm publish --access public" }, "dependencies": { - "@anthropic-ai/claude-agent-sdk": "patch:@anthropic-ai/claude-agent-sdk@npm%3A0.1.53#~/.yarn/patches/@anthropic-ai-claude-agent-sdk-npm-0.1.53-4b77f4cf29.patch", + "@anthropic-ai/claude-agent-sdk": "patch:@anthropic-ai/claude-agent-sdk@npm%3A0.1.62#~/.yarn/patches/@anthropic-ai-claude-agent-sdk-npm-0.1.62-23ae56f8c8.patch", "@libsql/client": "0.14.0", "@libsql/win32-x64-msvc": "^0.4.7", "@napi-rs/system-ocr": "patch:@napi-rs/system-ocr@npm%3A1.0.2#~/.yarn/patches/@napi-rs-system-ocr-npm-1.0.2-59e7a78e8b.patch", @@ -122,7 +122,7 @@ "@ai-sdk/google-vertex": "^3.0.79", "@ai-sdk/huggingface": "^0.0.10", "@ai-sdk/mistral": "^2.0.24", - "@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch", + "@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch", "@ai-sdk/perplexity": "^2.0.20", "@ai-sdk/test-server": "^0.0.1", "@ant-design/v5-patch-for-react-19": "^1.0.3", @@ -146,7 +146,7 @@ "@cherrystudio/embedjs-ollama": "^0.1.31", "@cherrystudio/embedjs-openai": "^0.1.31", "@cherrystudio/extension-table-plus": "workspace:^", - "@cherrystudio/openai": "^6.9.0", + "@cherrystudio/openai": "^6.12.0", "@cherrystudio/ui": "workspace:*", "@dnd-kit/core": "^6.3.1", "@dnd-kit/modifiers": "^9.0.0", @@ -324,7 +324,7 @@ "motion": "^12.10.5", "notion-helper": "^1.3.22", "npx-scope-finder": "^1.2.0", - "ollama-ai-provider-v2": "^1.5.5", + "ollama-ai-provider-v2": "patch:ollama-ai-provider-v2@npm%3A1.5.5#~/.yarn/patches/ollama-ai-provider-v2-npm-1.5.5-8bef249af9.patch", "oxlint": "^1.22.0", "oxlint-tsgolint": "^0.2.0", "p-queue": "^8.1.0", @@ -420,7 +420,7 @@ "@langchain/openai@npm:>=0.1.0 <0.6.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch", "@langchain/openai@npm:^0.3.16": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch", "@langchain/openai@npm:>=0.2.0 <0.7.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch", - "@ai-sdk/openai@npm:^2.0.42": "patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch", + "@ai-sdk/openai@npm:^2.0.42": "patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch", "@ai-sdk/google@npm:^2.0.40": "patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch", "@ai-sdk/openai-compatible@npm:^1.0.27": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch" }, diff --git a/packages/aiCore/package.json b/packages/aiCore/package.json index a648dcf3c..6fc0f5334 100644 --- a/packages/aiCore/package.json +++ b/packages/aiCore/package.json @@ -40,7 +40,7 @@ }, "dependencies": { "@ai-sdk/anthropic": "^2.0.49", - "@ai-sdk/azure": "^2.0.74", + "@ai-sdk/azure": "^2.0.87", "@ai-sdk/deepseek": "^1.0.31", "@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch", "@ai-sdk/provider": "^2.0.0", diff --git a/packages/aiCore/src/core/plugins/built-in/toolUsePlugin/StreamEventManager.ts b/packages/aiCore/src/core/plugins/built-in/toolUsePlugin/StreamEventManager.ts index 59a425712..c30c2015f 100644 --- a/packages/aiCore/src/core/plugins/built-in/toolUsePlugin/StreamEventManager.ts +++ b/packages/aiCore/src/core/plugins/built-in/toolUsePlugin/StreamEventManager.ts @@ -62,7 +62,7 @@ export class StreamEventManager { const recursiveResult = await context.recursiveCall(recursiveParams) if (recursiveResult && recursiveResult.fullStream) { - await this.pipeRecursiveStream(controller, recursiveResult.fullStream, context) + await this.pipeRecursiveStream(controller, recursiveResult.fullStream) } else { console.warn('[MCP Prompt] No fullstream found in recursive result:', recursiveResult) } @@ -74,11 +74,7 @@ export class StreamEventManager { /** * 将递归流的数据传递到当前流 */ - private async pipeRecursiveStream( - controller: StreamController, - recursiveStream: ReadableStream, - context?: AiRequestContext - ): Promise { + private async pipeRecursiveStream(controller: StreamController, recursiveStream: ReadableStream): Promise { const reader = recursiveStream.getReader() try { while (true) { @@ -86,18 +82,14 @@ export class StreamEventManager { if (done) { break } + if (value.type === 'start') { + continue + } + if (value.type === 'finish') { - // 迭代的流不发finish,但需要累加其 usage - if (value.usage && context?.accumulatedUsage) { - this.accumulateUsage(context.accumulatedUsage, value.usage) - } break } - // 对于 finish-step 类型,累加其 usage - if (value.type === 'finish-step' && value.usage && context?.accumulatedUsage) { - this.accumulateUsage(context.accumulatedUsage, value.usage) - } - // 将递归流的数据传递到当前流 + controller.enqueue(value) } } finally { @@ -135,10 +127,8 @@ export class StreamEventManager { // 构建新的对话消息 const newMessages: ModelMessage[] = [ ...(context.originalParams.messages || []), - { - role: 'assistant', - content: textBuffer - }, + // 只有当 textBuffer 有内容时才添加 assistant 消息,避免空消息导致 API 错误 + ...(textBuffer ? [{ role: 'assistant' as const, content: textBuffer }] : []), { role: 'user', content: toolResultsText @@ -161,7 +151,7 @@ export class StreamEventManager { /** * 累加 usage 数据 */ - private accumulateUsage(target: any, source: any): void { + accumulateUsage(target: any, source: any): void { if (!target || !source) return // 累加各种 token 类型 diff --git a/packages/aiCore/src/core/plugins/built-in/toolUsePlugin/promptToolUsePlugin.ts b/packages/aiCore/src/core/plugins/built-in/toolUsePlugin/promptToolUsePlugin.ts index 274fdcee5..22e8b5a60 100644 --- a/packages/aiCore/src/core/plugins/built-in/toolUsePlugin/promptToolUsePlugin.ts +++ b/packages/aiCore/src/core/plugins/built-in/toolUsePlugin/promptToolUsePlugin.ts @@ -411,7 +411,10 @@ export const createPromptToolUsePlugin = (config: PromptToolUseConfig = {}) => { } } - // 如果没有执行工具调用,直接传递原始finish-step事件 + // 如果没有执行工具调用,累加 usage 后透传 finish-step 事件 + if (chunk.usage && context.accumulatedUsage) { + streamEventManager.accumulateUsage(context.accumulatedUsage, chunk.usage) + } controller.enqueue(chunk) // 清理状态 diff --git a/packages/aiCore/src/core/plugins/built-in/webSearchPlugin/helper.ts b/packages/aiCore/src/core/plugins/built-in/webSearchPlugin/helper.ts index 30ea887b8..7fe193d40 100644 --- a/packages/aiCore/src/core/plugins/built-in/webSearchPlugin/helper.ts +++ b/packages/aiCore/src/core/plugins/built-in/webSearchPlugin/helper.ts @@ -5,6 +5,7 @@ import type { InferToolInput, InferToolOutput, Tool } from 'ai' import { createOpenRouterOptions, createXaiOptions, mergeProviderOptions } from '../../../options' import type { ProviderOptionsMap } from '../../../options/types' +import type { AiRequestContext } from '../../' import type { OpenRouterSearchConfig } from './openrouter' /** @@ -94,28 +95,84 @@ export type WebSearchToolInputSchema = { 'openai-chat': InferToolInput } -export const switchWebSearchTool = (config: WebSearchPluginConfig, params: any) => { - if (config.openai) { - if (!params.tools) params.tools = {} - params.tools.web_search = openai.tools.webSearch(config.openai) - } else if (config['openai-chat']) { - if (!params.tools) params.tools = {} - params.tools.web_search_preview = openai.tools.webSearchPreview(config['openai-chat']) - } else if (config.anthropic) { - if (!params.tools) params.tools = {} - params.tools.web_search = anthropic.tools.webSearch_20250305(config.anthropic) - } else if (config.google) { - // case 'google-vertex': - if (!params.tools) params.tools = {} - params.tools.web_search = google.tools.googleSearch(config.google || {}) - } else if (config.xai) { - const searchOptions = createXaiOptions({ - searchParameters: { ...config.xai, mode: 'on' } - }) - params.providerOptions = mergeProviderOptions(params.providerOptions, searchOptions) - } else if (config.openrouter) { - const searchOptions = createOpenRouterOptions(config.openrouter) - params.providerOptions = mergeProviderOptions(params.providerOptions, searchOptions) +/** + * Helper function to ensure params.tools object exists + */ +const ensureToolsObject = (params: any) => { + if (!params.tools) params.tools = {} +} + +/** + * Helper function to apply tool-based web search configuration + */ +const applyToolBasedSearch = (params: any, toolName: string, toolInstance: any) => { + ensureToolsObject(params) + params.tools[toolName] = toolInstance +} + +/** + * Helper function to apply provider options-based web search configuration + */ +const applyProviderOptionsSearch = (params: any, searchOptions: any) => { + params.providerOptions = mergeProviderOptions(params.providerOptions, searchOptions) +} + +export const switchWebSearchTool = (config: WebSearchPluginConfig, params: any, context?: AiRequestContext) => { + const providerId = context?.providerId + + // Provider-specific configuration map + const providerHandlers: Record void> = { + openai: () => { + const cfg = config.openai ?? DEFAULT_WEB_SEARCH_CONFIG.openai + applyToolBasedSearch(params, 'web_search', openai.tools.webSearch(cfg)) + }, + 'openai-chat': () => { + const cfg = (config['openai-chat'] ?? DEFAULT_WEB_SEARCH_CONFIG['openai-chat']) as OpenAISearchPreviewConfig + applyToolBasedSearch(params, 'web_search_preview', openai.tools.webSearchPreview(cfg)) + }, + anthropic: () => { + const cfg = config.anthropic ?? DEFAULT_WEB_SEARCH_CONFIG.anthropic + applyToolBasedSearch(params, 'web_search', anthropic.tools.webSearch_20250305(cfg)) + }, + google: () => { + const cfg = (config.google ?? DEFAULT_WEB_SEARCH_CONFIG.google) as GoogleSearchConfig + applyToolBasedSearch(params, 'web_search', google.tools.googleSearch(cfg)) + }, + xai: () => { + const cfg = config.xai ?? DEFAULT_WEB_SEARCH_CONFIG.xai + const searchOptions = createXaiOptions({ searchParameters: { ...cfg, mode: 'on' } }) + applyProviderOptionsSearch(params, searchOptions) + }, + openrouter: () => { + const cfg = (config.openrouter ?? DEFAULT_WEB_SEARCH_CONFIG.openrouter) as OpenRouterSearchConfig + const searchOptions = createOpenRouterOptions(cfg) + applyProviderOptionsSearch(params, searchOptions) + } } + + // Try provider-specific handler first + const handler = providerId && providerHandlers[providerId] + if (handler) { + handler() + return params + } + + // Fallback: apply based on available config keys (prioritized order) + const fallbackOrder: Array = [ + 'openai', + 'openai-chat', + 'anthropic', + 'google', + 'xai', + 'openrouter' + ] + + for (const key of fallbackOrder) { + if (config[key]) { + providerHandlers[key]() + break + } + } + return params } diff --git a/packages/aiCore/src/core/plugins/built-in/webSearchPlugin/index.ts b/packages/aiCore/src/core/plugins/built-in/webSearchPlugin/index.ts index a46df7dd4..e02fd179f 100644 --- a/packages/aiCore/src/core/plugins/built-in/webSearchPlugin/index.ts +++ b/packages/aiCore/src/core/plugins/built-in/webSearchPlugin/index.ts @@ -17,8 +17,22 @@ export const webSearchPlugin = (config: WebSearchPluginConfig = DEFAULT_WEB_SEAR name: 'webSearch', enforce: 'pre', - transformParams: async (params: any) => { - switchWebSearchTool(config, params) + transformParams: async (params: any, context) => { + let { providerId } = context + + // For cherryin providers, extract the actual provider from the model's provider string + // Expected format: "cherryin.{actualProvider}" (e.g., "cherryin.gemini") + if (providerId === 'cherryin' || providerId === 'cherryin-chat') { + const provider = params.model?.provider + if (provider && typeof provider === 'string' && provider.includes('.')) { + const extractedProviderId = provider.split('.')[1] + if (extractedProviderId) { + providerId = extractedProviderId + } + } + } + + switchWebSearchTool(config, params, { ...context, providerId }) return params } }) diff --git a/packages/shared/IpcChannel.ts b/packages/shared/IpcChannel.ts index 38a979135..75a52534b 100644 --- a/packages/shared/IpcChannel.ts +++ b/packages/shared/IpcChannel.ts @@ -55,6 +55,8 @@ export enum IpcChannel { Webview_SetOpenLinkExternal = 'webview:set-open-link-external', Webview_SetSpellCheckEnabled = 'webview:set-spell-check-enabled', Webview_SearchHotkey = 'webview:search-hotkey', + Webview_PrintToPDF = 'webview:print-to-pdf', + Webview_SaveAsHTML = 'webview:save-as-html', // Open Open_Path = 'open:path', @@ -90,6 +92,8 @@ export enum IpcChannel { Mcp_AbortTool = 'mcp:abort-tool', Mcp_GetServerVersion = 'mcp:get-server-version', Mcp_Progress = 'mcp:progress', + Mcp_GetServerLogs = 'mcp:get-server-logs', + Mcp_ServerLog = 'mcp:server-log', // Python Python_Execute = 'python:execute', @@ -255,6 +259,8 @@ export enum IpcChannel { System_GetHostname = 'system:getHostname', System_GetCpuName = 'system:getCpuName', System_CheckGitBash = 'system:checkGitBash', + System_GetGitBashPath = 'system:getGitBashPath', + System_SetGitBashPath = 'system:setGitBashPath', // DevTools System_ToggleDevTools = 'system:toggleDevTools', diff --git a/packages/shared/anthropic/index.ts b/packages/shared/anthropic/index.ts index bff143d11..b9e9cb884 100644 --- a/packages/shared/anthropic/index.ts +++ b/packages/shared/anthropic/index.ts @@ -88,16 +88,11 @@ export function getSdkClient( } }) } - let baseURL = + const baseURL = provider.type === 'anthropic' ? provider.apiHost : (provider.anthropicApiHost && provider.anthropicApiHost.trim()) || provider.apiHost - // Anthropic SDK automatically appends /v1 to all endpoints (like /v1/messages, /v1/models) - // We need to strip api version from baseURL to avoid duplication (e.g., /v3/v1/models) - // formatProviderApiHost adds /v1 for AI SDK compatibility, but Anthropic SDK needs it removed - baseURL = baseURL.replace(/\/v\d+(?:alpha|beta)?(?=\/|$)/i, '') - logger.debug('Anthropic API baseURL', { baseURL, providerId: provider.id }) if (provider.id === 'aihubmix') { diff --git a/packages/shared/config/prompts.ts b/packages/shared/config/prompts.ts index 7083cd8c5..d4c625f72 100644 --- a/packages/shared/config/prompts.ts +++ b/packages/shared/config/prompts.ts @@ -306,7 +306,7 @@ export const SEARCH_SUMMARY_PROMPT_KNOWLEDGE_ONLY = ` **Use user's language to rephrase the question.** Follow these guidelines: 1. If the question is a simple writing task, greeting (e.g., Hi, Hello, How are you), or does not require searching for information (unless the greeting contains a follow-up question), return 'not_needed' in the 'question' XML block. This indicates that no search is required. - 2. For knowledge, You need rewrite user query into 'rewrite' XML block with one alternative version while preserving the original intent and meaning. Also include the original question in the 'question' block. + 2. For knowledge, You need rewrite user query into 'rewrite' XML block with one alternative version while preserving the original intent and meaning. Also include the rephrased or decomposed question(s) in the 'question' block. 3. Always return the rephrased question inside the 'question' XML block. 4. Always wrap the rephrased question in the appropriate XML blocks: use for queries that can be answered from a pre-existing knowledge base. Ensure that the rephrased question is always contained within a block inside the wrapper. 5. *use knowledge to rephrase the question* diff --git a/packages/shared/config/types.ts b/packages/shared/config/types.ts index 8fba6399f..7dff53c75 100644 --- a/packages/shared/config/types.ts +++ b/packages/shared/config/types.ts @@ -23,6 +23,14 @@ export type MCPProgressEvent = { progress: number // 0-1 range } +export type MCPServerLogEntry = { + timestamp: number + level: 'debug' | 'info' | 'warn' | 'error' | 'stderr' | 'stdout' + message: string + data?: any + source?: string +} + export type WebviewKeyEvent = { webviewId: number key: string diff --git a/resources/scripts/install-ovms.js b/resources/scripts/install-ovms.js index e4a5cf044..f2be80bff 100644 --- a/resources/scripts/install-ovms.js +++ b/resources/scripts/install-ovms.js @@ -11,7 +11,7 @@ const OVMS_EX_URL = 'https://gitcode.com/gcw_ggDjjkY3/kjfile/releases/download/d /** * error code: - * 101: Unsupported CPU (not Intel Ultra) + * 101: Unsupported CPU (not Intel) * 102: Unsupported platform (not Windows) * 103: Download failed * 104: Installation failed @@ -213,8 +213,8 @@ async function installOvms() { console.log(`CPU Name: ${cpuName}`) // Check if CPU name contains "Ultra" - if (!cpuName.toLowerCase().includes('intel') || !cpuName.toLowerCase().includes('ultra')) { - console.error('OVMS installation requires an Intel(R) Core(TM) Ultra CPU.') + if (!cpuName.toLowerCase().includes('intel')) { + console.error('OVMS installation requires an Intel CPU.') return 101 } diff --git a/scripts/win-sign.js b/scripts/win-sign.js index f9b37c3ae..cdbfe11e1 100644 --- a/scripts/win-sign.js +++ b/scripts/win-sign.js @@ -5,9 +5,17 @@ exports.default = async function (configuration) { const { path } = configuration if (configuration.path) { try { + const certPath = process.env.CHERRY_CERT_PATH + const keyContainer = process.env.CHERRY_CERT_KEY + const csp = process.env.CHERRY_CERT_CSP + + if (!certPath || !keyContainer || !csp) { + throw new Error('CHERRY_CERT_PATH, CHERRY_CERT_KEY or CHERRY_CERT_CSP is not set') + } + console.log('Start code signing...') console.log('Signing file:', path) - const signCommand = `signtool sign /tr http://timestamp.comodoca.com /td sha256 /fd sha256 /a /v "${path}"` + const signCommand = `signtool sign /tr http://timestamp.comodoca.com /td sha256 /fd sha256 /v /f "${certPath}" /csp "${csp}" /k "${keyContainer}" "${path}"` execSync(signCommand, { stdio: 'inherit' }) console.log('Code signing completed') } catch (error) { diff --git a/src/main/index.ts b/src/main/index.ts index ef6ec3ab8..0456797ab 100644 --- a/src/main/index.ts +++ b/src/main/index.ts @@ -20,8 +20,8 @@ import { registerIpc } from './ipc' import { agentService } from './services/agents' import { apiServerService } from './services/ApiServerService' import { appMenuService } from './services/AppMenuService' -import mcpService from './services/MCPService' import { nodeTraceService } from './services/NodeTraceService' +import mcpService from './services/MCPService' import powerMonitorService from './services/PowerMonitorService' import { CHERRY_STUDIO_PROTOCOL, diff --git a/src/main/ipc.ts b/src/main/ipc.ts index f04a91743..55fa3a17b 100644 --- a/src/main/ipc.ts +++ b/src/main/ipc.ts @@ -8,7 +8,7 @@ import { loggerService } from '@logger' import { isLinux, isMac, isPortable, isWin } from '@main/constant' import { generateSignature } from '@main/integration/cherryai' import anthropicService from '@main/services/AnthropicService' -import { findGitBash, getBinaryPath, isBinaryExists, runInstallScript } from '@main/utils/process' +import { findGitBash, getBinaryPath, isBinaryExists, runInstallScript, validateGitBashPath } from '@main/utils/process' import { handleZoomFactor } from '@main/utils/zoom' import type { SpanEntity, TokenUsage } from '@mcp-trace/trace-core' import { MIN_WINDOW_HEIGHT, MIN_WINDOW_WIDTH } from '@shared/config/constant' @@ -36,7 +36,7 @@ import appService from './services/AppService' import AppUpdater from './services/AppUpdater' import BackupManager from './services/BackupManager' import { codeToolsService } from './services/CodeToolsService' -import { configManager } from './services/ConfigManager' +import { ConfigKeys, configManager } from './services/ConfigManager' import CopilotService from './services/CopilotService' import DxtService from './services/DxtService' import { ExportService } from './services/ExportService' @@ -500,7 +500,8 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) { } try { - const bashPath = findGitBash() + const customPath = configManager.get(ConfigKeys.GitBashPath) as string | undefined + const bashPath = findGitBash(customPath) if (bashPath) { logger.info('Git Bash is available', { path: bashPath }) @@ -514,6 +515,35 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) { return false } }) + + ipcMain.handle(IpcChannel.System_GetGitBashPath, () => { + if (!isWin) { + return null + } + + const customPath = configManager.get(ConfigKeys.GitBashPath) as string | undefined + return customPath ?? null + }) + + ipcMain.handle(IpcChannel.System_SetGitBashPath, (_, newPath: string | null) => { + if (!isWin) { + return false + } + + if (!newPath) { + configManager.set(ConfigKeys.GitBashPath, null) + return true + } + + const validated = validateGitBashPath(newPath) + if (!validated) { + return false + } + + configManager.set(ConfigKeys.GitBashPath, validated) + return true + }) + ipcMain.handle(IpcChannel.System_ToggleDevTools, (e) => { const win = BrowserWindow.fromWebContents(e.sender) win && win.webContents.toggleDevTools() @@ -766,6 +796,7 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) { ipcMain.handle(IpcChannel.Mcp_CheckConnectivity, mcpService.checkMcpConnectivity) ipcMain.handle(IpcChannel.Mcp_AbortTool, mcpService.abortTool) ipcMain.handle(IpcChannel.Mcp_GetServerVersion, mcpService.getServerVersion) + ipcMain.handle(IpcChannel.Mcp_GetServerLogs, mcpService.getServerLogs) // DXT upload handler ipcMain.handle(IpcChannel.Mcp_UploadDxt, async (event, fileBuffer: ArrayBuffer, fileName: string) => { @@ -844,6 +875,17 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) { webview.session.setSpellCheckerEnabled(isEnable) }) + // Webview print and save handlers + ipcMain.handle(IpcChannel.Webview_PrintToPDF, async (_, webviewId: number) => { + const { printWebviewToPDF } = await import('./services/WebviewService') + return await printWebviewToPDF(webviewId) + }) + + ipcMain.handle(IpcChannel.Webview_SaveAsHTML, async (_, webviewId: number) => { + const { saveWebviewAsHTML } = await import('./services/WebviewService') + return await saveWebviewAsHTML(webviewId) + }) + // store sync storeSyncService.registerIpcHandler() diff --git a/src/main/services/ConfigManager.ts b/src/main/services/ConfigManager.ts index b6f4e877e..1767ce7b4 100644 --- a/src/main/services/ConfigManager.ts +++ b/src/main/services/ConfigManager.ts @@ -26,7 +26,8 @@ export enum ConfigKeys { DisableHardwareAcceleration = 'disableHardwareAcceleration', Proxy = 'proxy', EnableDeveloperMode = 'enableDeveloperMode', - ClientId = 'clientId' + ClientId = 'clientId', + GitBashPath = 'gitBashPath' } export class ConfigManager { diff --git a/src/main/services/MCPService.ts b/src/main/services/MCPService.ts index 7eb5330a2..ec636dd22 100644 --- a/src/main/services/MCPService.ts +++ b/src/main/services/MCPService.ts @@ -34,6 +34,7 @@ import { import { nanoid } from '@reduxjs/toolkit' import { HOME_CHERRY_DIR } from '@shared/config/constant' import type { MCPProgressEvent } from '@shared/config/types' +import type { MCPServerLogEntry } from '@shared/config/types' import { IpcChannel } from '@shared/IpcChannel' import { defaultAppHeaders } from '@shared/utils' import { @@ -56,6 +57,7 @@ import * as z from 'zod' import DxtService from './DxtService' import { CallBackServer } from './mcp/oauth/callback' import { McpOAuthClientProvider } from './mcp/oauth/provider' +import { ServerLogBuffer } from './mcp/ServerLogBuffer' import { windowService } from './WindowService' // Generic type for caching wrapped functions @@ -142,6 +144,7 @@ class McpService { private pendingClients: Map> = new Map() private dxtService = new DxtService() private activeToolCalls: Map = new Map() + private serverLogs = new ServerLogBuffer(200) constructor() { this.initClient = this.initClient.bind(this) @@ -159,6 +162,7 @@ class McpService { this.cleanup = this.cleanup.bind(this) this.checkMcpConnectivity = this.checkMcpConnectivity.bind(this) this.getServerVersion = this.getServerVersion.bind(this) + this.getServerLogs = this.getServerLogs.bind(this) } private getServerKey(server: MCPServer): string { @@ -172,6 +176,19 @@ class McpService { }) } + private emitServerLog(server: MCPServer, entry: MCPServerLogEntry) { + const serverKey = this.getServerKey(server) + this.serverLogs.append(serverKey, entry) + const mainWindow = windowService.getMainWindow() + if (mainWindow) { + mainWindow.webContents.send(IpcChannel.Mcp_ServerLog, { ...entry, serverId: server.id }) + } + } + + public getServerLogs(_: Electron.IpcMainInvokeEvent, server: MCPServer): MCPServerLogEntry[] { + return this.serverLogs.get(this.getServerKey(server)) + } + async initClient(server: MCPServer): Promise { const serverKey = this.getServerKey(server) @@ -366,9 +383,18 @@ class McpService { } const stdioTransport = new StdioClientTransport(transportOptions) - stdioTransport.stderr?.on('data', (data) => - getServerLogger(server).debug(`Stdio stderr`, { data: data.toString() }) - ) + stdioTransport.stderr?.on('data', (data) => { + const msg = data.toString() + getServerLogger(server).debug(`Stdio stderr`, { data: msg }) + this.emitServerLog(server, { + timestamp: Date.now(), + level: 'stderr', + message: msg.trim(), + source: 'stdio' + }) + }) + // StdioClientTransport does not expose stdout as a readable stream for raw logging + // (stdout is reserved for JSON-RPC). Avoid attaching a listener that would never fire. return stdioTransport } else { throw new Error('Either baseUrl or command must be provided') @@ -436,6 +462,13 @@ class McpService { } } + this.emitServerLog(server, { + timestamp: Date.now(), + level: 'info', + message: 'Server connected', + source: 'client' + }) + // Store the new client in the cache this.clients.set(serverKey, client) @@ -446,9 +479,22 @@ class McpService { this.clearServerCache(serverKey) logger.debug(`Activated server: ${server.name}`) + this.emitServerLog(server, { + timestamp: Date.now(), + level: 'info', + message: 'Server activated', + source: 'client' + }) return client } catch (error) { getServerLogger(server).error(`Error activating server ${server.name}`, error as Error) + this.emitServerLog(server, { + timestamp: Date.now(), + level: 'error', + message: `Error activating server: ${(error as Error)?.message}`, + data: redactSensitive(error), + source: 'client' + }) throw error } } finally { @@ -506,6 +552,16 @@ class McpService { // Set up logging message notification handler client.setNotificationHandler(LoggingMessageNotificationSchema, async (notification) => { logger.debug(`Message from server ${server.name}:`, notification.params) + const msg = notification.params?.message + if (msg) { + this.emitServerLog(server, { + timestamp: Date.now(), + level: (notification.params?.level as MCPServerLogEntry['level']) || 'info', + message: typeof msg === 'string' ? msg : JSON.stringify(msg), + data: redactSensitive(notification.params?.data), + source: notification.params?.logger || 'server' + }) + } }) getServerLogger(server).debug(`Set up notification handlers`) @@ -540,6 +596,7 @@ class McpService { this.clients.delete(serverKey) // Clear all caches for this server this.clearServerCache(serverKey) + this.serverLogs.remove(serverKey) } else { logger.warn(`No client found for server`, { serverKey }) } @@ -548,6 +605,12 @@ class McpService { async stopServer(_: Electron.IpcMainInvokeEvent, server: MCPServer) { const serverKey = this.getServerKey(server) getServerLogger(server).debug(`Stopping server`) + this.emitServerLog(server, { + timestamp: Date.now(), + level: 'info', + message: 'Stopping server', + source: 'client' + }) await this.closeClient(serverKey) } @@ -574,6 +637,12 @@ class McpService { async restartServer(_: Electron.IpcMainInvokeEvent, server: MCPServer) { getServerLogger(server).debug(`Restarting server`) const serverKey = this.getServerKey(server) + this.emitServerLog(server, { + timestamp: Date.now(), + level: 'info', + message: 'Restarting server', + source: 'client' + }) await this.closeClient(serverKey) // Clear cache before restarting to ensure fresh data this.clearServerCache(serverKey) @@ -606,9 +675,22 @@ class McpService { // Attempt to list tools as a way to check connectivity await client.listTools() getServerLogger(server).debug(`Connectivity check successful`) + this.emitServerLog(server, { + timestamp: Date.now(), + level: 'info', + message: 'Connectivity check successful', + source: 'connectivity' + }) return true } catch (error) { getServerLogger(server).error(`Connectivity check failed`, error as Error) + this.emitServerLog(server, { + timestamp: Date.now(), + level: 'error', + message: `Connectivity check failed: ${(error as Error).message}`, + data: redactSensitive(error), + source: 'connectivity' + }) // Close the client if connectivity check fails to ensure a clean state for the next attempt const serverKey = this.getServerKey(server) await this.closeClient(serverKey) diff --git a/src/main/services/WebviewService.ts b/src/main/services/WebviewService.ts index fb2049de7..7af008bd7 100644 --- a/src/main/services/WebviewService.ts +++ b/src/main/services/WebviewService.ts @@ -1,5 +1,6 @@ import { IpcChannel } from '@shared/IpcChannel' -import { app, session, shell, webContents } from 'electron' +import { app, dialog, session, shell, webContents } from 'electron' +import { promises as fs } from 'fs' /** * init the useragent of the webview session @@ -53,11 +54,17 @@ const attachKeyboardHandler = (contents: Electron.WebContents) => { return } - const isFindShortcut = (input.control || input.meta) && key === 'f' - const isEscape = key === 'escape' - const isEnter = key === 'enter' + // Helper to check if this is a shortcut we handle + const isHandledShortcut = (k: string) => { + const isFindShortcut = (input.control || input.meta) && k === 'f' + const isPrintShortcut = (input.control || input.meta) && k === 'p' + const isSaveShortcut = (input.control || input.meta) && k === 's' + const isEscape = k === 'escape' + const isEnter = k === 'enter' + return isFindShortcut || isPrintShortcut || isSaveShortcut || isEscape || isEnter + } - if (!isFindShortcut && !isEscape && !isEnter) { + if (!isHandledShortcut(key)) { return } @@ -66,11 +73,20 @@ const attachKeyboardHandler = (contents: Electron.WebContents) => { return } + const isFindShortcut = (input.control || input.meta) && key === 'f' + const isPrintShortcut = (input.control || input.meta) && key === 'p' + const isSaveShortcut = (input.control || input.meta) && key === 's' + // Always prevent Cmd/Ctrl+F to override the guest page's native find dialog if (isFindShortcut) { event.preventDefault() } + // Prevent default print/save dialogs and handle them with custom logic + if (isPrintShortcut || isSaveShortcut) { + event.preventDefault() + } + // Send the hotkey event to the renderer // The renderer will decide whether to preventDefault for Escape and Enter // based on whether the search bar is visible @@ -100,3 +116,130 @@ export function initWebviewHotkeys() { attachKeyboardHandler(contents) }) } + +/** + * Print webview content to PDF + * @param webviewId The webview webContents id + * @returns Path to saved PDF file or null if user cancelled + */ +export async function printWebviewToPDF(webviewId: number): Promise { + const webview = webContents.fromId(webviewId) + if (!webview) { + throw new Error('Webview not found') + } + + try { + // Get the page title for default filename + const pageTitle = await webview.executeJavaScript('document.title || "webpage"').catch(() => 'webpage') + // Sanitize filename by removing invalid characters + const sanitizedTitle = pageTitle.replace(/[<>:"/\\|?*]/g, '-').substring(0, 100) + const defaultFilename = sanitizedTitle ? `${sanitizedTitle}.pdf` : `webpage-${Date.now()}.pdf` + + // Show save dialog + const { canceled, filePath } = await dialog.showSaveDialog({ + title: 'Save as PDF', + defaultPath: defaultFilename, + filters: [{ name: 'PDF Files', extensions: ['pdf'] }] + }) + + if (canceled || !filePath) { + return null + } + + // Generate PDF with settings to capture full page + const pdfData = await webview.printToPDF({ + margins: { + marginType: 'default' + }, + printBackground: true, + landscape: false, + pageSize: 'A4', + preferCSSPageSize: true + }) + + // Save PDF to file + await fs.writeFile(filePath, pdfData) + + return filePath + } catch (error) { + throw new Error(`Failed to print to PDF: ${(error as Error).message}`) + } +} + +/** + * Save webview content as HTML + * @param webviewId The webview webContents id + * @returns Path to saved HTML file or null if user cancelled + */ +export async function saveWebviewAsHTML(webviewId: number): Promise { + const webview = webContents.fromId(webviewId) + if (!webview) { + throw new Error('Webview not found') + } + + try { + // Get the page title for default filename + const pageTitle = await webview.executeJavaScript('document.title || "webpage"').catch(() => 'webpage') + // Sanitize filename by removing invalid characters + const sanitizedTitle = pageTitle.replace(/[<>:"/\\|?*]/g, '-').substring(0, 100) + const defaultFilename = sanitizedTitle ? `${sanitizedTitle}.html` : `webpage-${Date.now()}.html` + + // Show save dialog + const { canceled, filePath } = await dialog.showSaveDialog({ + title: 'Save as HTML', + defaultPath: defaultFilename, + filters: [ + { name: 'HTML Files', extensions: ['html', 'htm'] }, + { name: 'All Files', extensions: ['*'] } + ] + }) + + if (canceled || !filePath) { + return null + } + + // Get the HTML content with safe error handling + const html = await webview.executeJavaScript(` + (() => { + try { + // Build complete DOCTYPE string if present + let doctype = ''; + if (document.doctype) { + const dt = document.doctype; + doctype = ''; + } + return doctype + (document.documentElement?.outerHTML || ''); + } catch (error) { + // Fallback: just return the HTML without DOCTYPE if there's an error + return document.documentElement?.outerHTML || ''; + } + })() + `) + + // Save HTML to file + await fs.writeFile(filePath, html, 'utf-8') + + return filePath + } catch (error) { + throw new Error(`Failed to save as HTML: ${(error as Error).message}`) + } +} diff --git a/src/main/services/__tests__/ServerLogBuffer.test.ts b/src/main/services/__tests__/ServerLogBuffer.test.ts new file mode 100644 index 000000000..0b7abe91e --- /dev/null +++ b/src/main/services/__tests__/ServerLogBuffer.test.ts @@ -0,0 +1,29 @@ +import { describe, expect, it } from 'vitest' + +import { ServerLogBuffer } from '../mcp/ServerLogBuffer' + +describe('ServerLogBuffer', () => { + it('keeps a bounded number of entries per server', () => { + const buffer = new ServerLogBuffer(3) + const key = 'srv' + + buffer.append(key, { timestamp: 1, level: 'info', message: 'a' }) + buffer.append(key, { timestamp: 2, level: 'info', message: 'b' }) + buffer.append(key, { timestamp: 3, level: 'info', message: 'c' }) + buffer.append(key, { timestamp: 4, level: 'info', message: 'd' }) + + const logs = buffer.get(key) + expect(logs).toHaveLength(3) + expect(logs[0].message).toBe('b') + expect(logs[2].message).toBe('d') + }) + + it('isolates entries by server key', () => { + const buffer = new ServerLogBuffer(5) + buffer.append('one', { timestamp: 1, level: 'info', message: 'a' }) + buffer.append('two', { timestamp: 2, level: 'info', message: 'b' }) + + expect(buffer.get('one')).toHaveLength(1) + expect(buffer.get('two')).toHaveLength(1) + }) +}) diff --git a/src/main/services/agents/services/claudecode/index.ts b/src/main/services/agents/services/claudecode/index.ts index 31f6a2c6b..7b1a43ab8 100644 --- a/src/main/services/agents/services/claudecode/index.ts +++ b/src/main/services/agents/services/claudecode/index.ts @@ -15,6 +15,8 @@ import { query } from '@anthropic-ai/claude-agent-sdk' import { preferenceService } from '@data/PreferenceService' import { loggerService } from '@logger' import { validateModelId } from '@main/apiServer/utils' +import { ConfigKeys, configManager } from '@main/services/ConfigManager' +import { validateGitBashPath } from '@main/utils/process' import getLoginShellEnvironment from '@main/utils/shell-env' import { app } from 'electron' @@ -111,6 +113,8 @@ class ClaudeCodeService implements AgentServiceInterface { Object.entries(loginShellEnv).filter(([key]) => !key.toLowerCase().endsWith('_proxy')) ) as Record + const customGitBashPath = validateGitBashPath(configManager.get(ConfigKeys.GitBashPath) as string | undefined) + const env = { ...loginShellEnvWithoutProxies, // TODO: fix the proxy api server @@ -130,7 +134,8 @@ class ClaudeCodeService implements AgentServiceInterface { // Set CLAUDE_CONFIG_DIR to app's userData directory to avoid path encoding issues // on Windows when the username contains non-ASCII characters (e.g., Chinese characters) // This prevents the SDK from using the user's home directory which may have encoding problems - CLAUDE_CONFIG_DIR: path.join(app.getPath('userData'), '.claude') + CLAUDE_CONFIG_DIR: path.join(app.getPath('userData'), '.claude'), + ...(customGitBashPath ? { CLAUDE_CODE_GIT_BASH_PATH: customGitBashPath } : {}) } const errorChunks: string[] = [] diff --git a/src/main/services/mcp/ServerLogBuffer.ts b/src/main/services/mcp/ServerLogBuffer.ts new file mode 100644 index 000000000..01c45f373 --- /dev/null +++ b/src/main/services/mcp/ServerLogBuffer.ts @@ -0,0 +1,36 @@ +export type MCPServerLogEntry = { + timestamp: number + level: 'debug' | 'info' | 'warn' | 'error' | 'stderr' | 'stdout' + message: string + data?: any + source?: string +} + +/** + * Lightweight ring buffer for per-server MCP logs. + */ +export class ServerLogBuffer { + private maxEntries: number + private logs: Map = new Map() + + constructor(maxEntries = 200) { + this.maxEntries = maxEntries + } + + append(serverKey: string, entry: MCPServerLogEntry) { + const list = this.logs.get(serverKey) ?? [] + list.push(entry) + if (list.length > this.maxEntries) { + list.splice(0, list.length - this.maxEntries) + } + this.logs.set(serverKey, list) + } + + get(serverKey: string): MCPServerLogEntry[] { + return [...(this.logs.get(serverKey) ?? [])] + } + + remove(serverKey: string) { + this.logs.delete(serverKey) + } +} diff --git a/src/main/utils/__tests__/process.test.ts b/src/main/utils/__tests__/process.test.ts index 45c0f8b42..0485ec5fa 100644 --- a/src/main/utils/__tests__/process.test.ts +++ b/src/main/utils/__tests__/process.test.ts @@ -3,7 +3,7 @@ import fs from 'fs' import path from 'path' import { beforeEach, describe, expect, it, vi } from 'vitest' -import { findExecutable, findGitBash } from '../process' +import { findExecutable, findGitBash, validateGitBashPath } from '../process' // Mock dependencies vi.mock('child_process') @@ -289,7 +289,133 @@ describe.skipIf(process.platform !== 'win32')('process utilities', () => { }) }) + describe('validateGitBashPath', () => { + it('returns null when path is null', () => { + const result = validateGitBashPath(null) + + expect(result).toBeNull() + }) + + it('returns null when path is undefined', () => { + const result = validateGitBashPath(undefined) + + expect(result).toBeNull() + }) + + it('returns normalized path when valid bash.exe exists', () => { + const customPath = 'C:\\PortableGit\\bin\\bash.exe' + vi.mocked(fs.existsSync).mockImplementation((p) => p === 'C:\\PortableGit\\bin\\bash.exe') + + const result = validateGitBashPath(customPath) + + expect(result).toBe('C:\\PortableGit\\bin\\bash.exe') + }) + + it('returns null when file does not exist', () => { + vi.mocked(fs.existsSync).mockReturnValue(false) + + const result = validateGitBashPath('C:\\missing\\bash.exe') + + expect(result).toBeNull() + }) + + it('returns null when path is not bash.exe', () => { + const customPath = 'C:\\PortableGit\\bin\\git.exe' + vi.mocked(fs.existsSync).mockReturnValue(true) + + const result = validateGitBashPath(customPath) + + expect(result).toBeNull() + }) + }) + describe('findGitBash', () => { + describe('customPath parameter', () => { + beforeEach(() => { + delete process.env.CLAUDE_CODE_GIT_BASH_PATH + }) + + it('uses customPath when valid', () => { + const customPath = 'C:\\CustomGit\\bin\\bash.exe' + vi.mocked(fs.existsSync).mockImplementation((p) => p === customPath) + + const result = findGitBash(customPath) + + expect(result).toBe(customPath) + expect(execFileSync).not.toHaveBeenCalled() + }) + + it('falls back when customPath is invalid', () => { + const customPath = 'C:\\Invalid\\bash.exe' + const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe' + const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe' + + vi.mocked(fs.existsSync).mockImplementation((p) => { + if (p === customPath) return false + if (p === gitPath) return true + if (p === bashPath) return true + return false + }) + + vi.mocked(execFileSync).mockReturnValue(gitPath) + + const result = findGitBash(customPath) + + expect(result).toBe(bashPath) + }) + + it('prioritizes customPath over env override', () => { + const customPath = 'C:\\CustomGit\\bin\\bash.exe' + const envPath = 'C:\\EnvGit\\bin\\bash.exe' + process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath + + vi.mocked(fs.existsSync).mockImplementation((p) => p === customPath || p === envPath) + + const result = findGitBash(customPath) + + expect(result).toBe(customPath) + }) + }) + + describe('env override', () => { + beforeEach(() => { + delete process.env.CLAUDE_CODE_GIT_BASH_PATH + }) + + it('uses CLAUDE_CODE_GIT_BASH_PATH when valid', () => { + const envPath = 'C:\\OverrideGit\\bin\\bash.exe' + process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath + + vi.mocked(fs.existsSync).mockImplementation((p) => p === envPath) + + const result = findGitBash() + + expect(result).toBe(envPath) + expect(execFileSync).not.toHaveBeenCalled() + }) + + it('falls back when CLAUDE_CODE_GIT_BASH_PATH is invalid', () => { + const envPath = 'C:\\Invalid\\bash.exe' + const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe' + const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe' + + process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath + + vi.mocked(fs.existsSync).mockImplementation((p) => { + if (p === envPath) return false + if (p === gitPath) return true + if (p === bashPath) return true + return false + }) + + vi.mocked(execFileSync).mockReturnValue(gitPath) + + const result = findGitBash() + + expect(result).toBe(bashPath) + }) + }) + describe('git.exe path derivation', () => { it('should derive bash.exe from standard Git installation (Git/cmd/git.exe)', () => { const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe' diff --git a/src/main/utils/process.ts b/src/main/utils/process.ts index b59a37a04..7175af7e7 100644 --- a/src/main/utils/process.ts +++ b/src/main/utils/process.ts @@ -131,15 +131,37 @@ export function findExecutable(name: string): string | null { /** * Find Git Bash executable on Windows + * @param customPath - Optional custom path from config * @returns Full path to bash.exe or null if not found */ -export function findGitBash(): string | null { +export function findGitBash(customPath?: string | null): string | null { // Git Bash is Windows-only if (!isWin) { return null } - // 1. Find git.exe and derive bash.exe path + // 1. Check custom path from config first + if (customPath) { + const validated = validateGitBashPath(customPath) + if (validated) { + logger.debug('Using custom Git Bash path from config', { path: validated }) + return validated + } + logger.warn('Custom Git Bash path provided but invalid', { path: customPath }) + } + + // 2. Check environment variable override + const envOverride = process.env.CLAUDE_CODE_GIT_BASH_PATH + if (envOverride) { + const validated = validateGitBashPath(envOverride) + if (validated) { + logger.debug('Using CLAUDE_CODE_GIT_BASH_PATH override for bash.exe', { path: validated }) + return validated + } + logger.warn('CLAUDE_CODE_GIT_BASH_PATH provided but path is invalid', { path: envOverride }) + } + + // 3. Find git.exe and derive bash.exe path const gitPath = findExecutable('git') if (gitPath) { // Try multiple possible locations for bash.exe relative to git.exe @@ -164,7 +186,7 @@ export function findGitBash(): string | null { }) } - // 2. Fallback: check common Git Bash paths directly + // 4. Fallback: check common Git Bash paths directly const commonBashPaths = [ path.join(process.env.ProgramFiles || 'C:\\Program Files', 'Git', 'bin', 'bash.exe'), path.join(process.env['ProgramFiles(x86)'] || 'C:\\Program Files (x86)', 'Git', 'bin', 'bash.exe'), @@ -181,3 +203,25 @@ export function findGitBash(): string | null { logger.debug('Git Bash not found - checked git derivation and common paths') return null } + +export function validateGitBashPath(customPath?: string | null): string | null { + if (!customPath) { + return null + } + + const resolved = path.resolve(customPath) + + if (!fs.existsSync(resolved)) { + logger.warn('Custom Git Bash path does not exist', { path: resolved }) + return null + } + + const isExe = resolved.toLowerCase().endsWith('bash.exe') + if (!isExe) { + logger.warn('Custom Git Bash path is not bash.exe', { path: resolved }) + return null + } + + logger.debug('Validated custom Git Bash path', { path: resolved }) + return resolved +} diff --git a/src/preload/index.ts b/src/preload/index.ts index fe8441676..0d50ea5f6 100644 --- a/src/preload/index.ts +++ b/src/preload/index.ts @@ -5,6 +5,7 @@ import type { SpanContext } from '@opentelemetry/api' import type { TerminalConfig } from '@shared/config/constant' import type { LogLevel, LogSourceWithContext } from '@shared/config/logger' import type { FileChangeEvent, WebviewKeyEvent } from '@shared/config/types' +import type { MCPServerLogEntry } from '@shared/config/types' import type { CacheSyncMessage } from '@shared/data/cache/cacheTypes' import type { PreferenceDefaultScopeType, @@ -129,7 +130,10 @@ const api = { getDeviceType: () => ipcRenderer.invoke(IpcChannel.System_GetDeviceType), getHostname: () => ipcRenderer.invoke(IpcChannel.System_GetHostname), getCpuName: () => ipcRenderer.invoke(IpcChannel.System_GetCpuName), - checkGitBash: (): Promise => ipcRenderer.invoke(IpcChannel.System_CheckGitBash) + checkGitBash: (): Promise => ipcRenderer.invoke(IpcChannel.System_CheckGitBash), + getGitBashPath: (): Promise => ipcRenderer.invoke(IpcChannel.System_GetGitBashPath), + setGitBashPath: (newPath: string | null): Promise => + ipcRenderer.invoke(IpcChannel.System_SetGitBashPath, newPath) }, devTools: { toggle: () => ipcRenderer.invoke(IpcChannel.System_ToggleDevTools) @@ -378,7 +382,16 @@ const api = { }, abortTool: (callId: string) => ipcRenderer.invoke(IpcChannel.Mcp_AbortTool, callId), getServerVersion: (server: MCPServer): Promise => - ipcRenderer.invoke(IpcChannel.Mcp_GetServerVersion, server) + ipcRenderer.invoke(IpcChannel.Mcp_GetServerVersion, server), + getServerLogs: (server: MCPServer): Promise => + ipcRenderer.invoke(IpcChannel.Mcp_GetServerLogs, server), + onServerLog: (callback: (log: MCPServerLogEntry & { serverId?: string }) => void) => { + const listener = (_event: Electron.IpcRendererEvent, log: MCPServerLogEntry & { serverId?: string }) => { + callback(log) + } + ipcRenderer.on(IpcChannel.Mcp_ServerLog, listener) + return () => ipcRenderer.off(IpcChannel.Mcp_ServerLog, listener) + } }, python: { execute: (script: string, context?: Record, timeout?: number) => @@ -430,6 +443,8 @@ const api = { ipcRenderer.invoke(IpcChannel.Webview_SetOpenLinkExternal, webviewId, isExternal), setSpellCheckEnabled: (webviewId: number, isEnable: boolean) => ipcRenderer.invoke(IpcChannel.Webview_SetSpellCheckEnabled, webviewId, isEnable), + printToPDF: (webviewId: number) => ipcRenderer.invoke(IpcChannel.Webview_PrintToPDF, webviewId), + saveAsHTML: (webviewId: number) => ipcRenderer.invoke(IpcChannel.Webview_SaveAsHTML, webviewId), onFindShortcut: (callback: (payload: WebviewKeyEvent) => void) => { const listener = (_event: Electron.IpcRendererEvent, payload: WebviewKeyEvent) => { callback(payload) diff --git a/src/renderer/src/aiCore/index_new.ts b/src/renderer/src/aiCore/index_new.ts index cd1e4411a..91f509847 100644 --- a/src/renderer/src/aiCore/index_new.ts +++ b/src/renderer/src/aiCore/index_new.ts @@ -91,7 +91,9 @@ export default class ModernAiProvider { if (this.isModel(modelOrProvider)) { // 传入的是 Model this.model = modelOrProvider - this.actualProvider = provider ? adaptProvider({ provider }) : getActualProvider(modelOrProvider) + this.actualProvider = provider + ? adaptProvider({ provider, model: modelOrProvider }) + : getActualProvider(modelOrProvider) // 只保存配置,不预先创建executor this.config = providerToAiSdkConfig(this.actualProvider, modelOrProvider) } else { diff --git a/src/renderer/src/aiCore/legacy/clients/anthropic/AnthropicAPIClient.ts b/src/renderer/src/aiCore/legacy/clients/anthropic/AnthropicAPIClient.ts index 15f3cf100..9b63b77dd 100644 --- a/src/renderer/src/aiCore/legacy/clients/anthropic/AnthropicAPIClient.ts +++ b/src/renderer/src/aiCore/legacy/clients/anthropic/AnthropicAPIClient.ts @@ -124,7 +124,8 @@ export class AnthropicAPIClient extends BaseApiClient< override async listModels(): Promise { const sdk = (await this.getSdkInstance()) as Anthropic - const response = await sdk.models.list() + // prevent auto appended /v1. It's included in baseUrl. + const response = await sdk.models.list({ path: '/models' }) return response.data } diff --git a/src/renderer/src/aiCore/legacy/clients/gemini/GeminiAPIClient.ts b/src/renderer/src/aiCore/legacy/clients/gemini/GeminiAPIClient.ts index 9c930a33e..ac10106f3 100644 --- a/src/renderer/src/aiCore/legacy/clients/gemini/GeminiAPIClient.ts +++ b/src/renderer/src/aiCore/legacy/clients/gemini/GeminiAPIClient.ts @@ -173,13 +173,15 @@ export class GeminiAPIClient extends BaseApiClient< return this.sdkInstance } + const apiVersion = this.getApiVersion() + this.sdkInstance = new GoogleGenAI({ vertexai: false, apiKey: this.apiKey, - apiVersion: this.getApiVersion(), + apiVersion, httpOptions: { baseUrl: this.getBaseURL(), - apiVersion: this.getApiVersion(), + apiVersion, headers: { ...this.provider.extra_headers } @@ -200,7 +202,7 @@ export class GeminiAPIClient extends BaseApiClient< return trailingVersion } - return 'v1beta' + return '' } /** diff --git a/src/renderer/src/aiCore/legacy/clients/openai/OpenAIBaseClient.ts b/src/renderer/src/aiCore/legacy/clients/openai/OpenAIBaseClient.ts index dc97e74a3..c51f8aac8 100644 --- a/src/renderer/src/aiCore/legacy/clients/openai/OpenAIBaseClient.ts +++ b/src/renderer/src/aiCore/legacy/clients/openai/OpenAIBaseClient.ts @@ -25,7 +25,7 @@ import type { OpenAISdkRawOutput, ReasoningEffortOptionalParams } from '@renderer/types/sdk' -import { formatApiHost, withoutTrailingSlash } from '@renderer/utils/api' +import { withoutTrailingSlash } from '@renderer/utils/api' import { isOllamaProvider } from '@renderer/utils/provider' import { BaseApiClient } from '../BaseApiClient' @@ -49,8 +49,9 @@ export abstract class OpenAIBaseClient< } // 仅适用于openai - override getBaseURL(isSupportedAPIVerion: boolean = true): string { - return formatApiHost(this.provider.apiHost, isSupportedAPIVerion) + override getBaseURL(): string { + // apiHost is formatted when called by AiProvider + return this.provider.apiHost } override async generateImage({ @@ -100,6 +101,17 @@ export abstract class OpenAIBaseClient< override async listModels(): Promise { try { const sdk = await this.getSdkInstance() + if (this.provider.id === 'openrouter') { + // https://openrouter.ai/docs/api/api-reference/embeddings/list-embeddings-models + const embedBaseUrl = 'https://openrouter.ai/api/v1/embeddings' + const embedSdk = sdk.withOptions({ baseURL: embedBaseUrl }) + const modelPromise = sdk.models.list() + const embedModelPromise = embedSdk.models.list() + const [modelResponse, embedModelResponse] = await Promise.all([modelPromise, embedModelPromise]) + const models = [...modelResponse.data, ...embedModelResponse.data] + const uniqueModels = Array.from(new Map(models.map((model) => [model.id, model])).values()) + return uniqueModels.filter(isSupportedModel) + } if (this.provider.id === 'github') { // GitHub Models 其 models 和 chat completions 两个接口的 baseUrl 不一样 const baseUrl = 'https://models.github.ai/catalog/' @@ -118,7 +130,7 @@ export abstract class OpenAIBaseClient< } if (isOllamaProvider(this.provider)) { - const baseUrl = withoutTrailingSlash(this.getBaseURL(false)) + const baseUrl = withoutTrailingSlash(this.getBaseURL()) .replace(/\/v1$/, '') .replace(/\/api$/, '') const response = await fetch(`${baseUrl}/api/tags`, { @@ -173,6 +185,7 @@ export abstract class OpenAIBaseClient< let apiKeyForSdkInstance = this.apiKey let baseURLForSdkInstance = this.getBaseURL() + logger.debug('baseURLForSdkInstance', { baseURLForSdkInstance }) let headersForSdkInstance = { ...this.defaultHeaders(), ...this.provider.extra_headers @@ -184,7 +197,7 @@ export abstract class OpenAIBaseClient< // this.provider.apiKey不允许修改 // this.provider.apiKey = token apiKeyForSdkInstance = token - baseURLForSdkInstance = this.getBaseURL(false) + baseURLForSdkInstance = this.getBaseURL() headersForSdkInstance = { ...headersForSdkInstance, ...COPILOT_DEFAULT_HEADERS diff --git a/src/renderer/src/aiCore/legacy/clients/openai/OpenAIResponseAPIClient.ts b/src/renderer/src/aiCore/legacy/clients/openai/OpenAIResponseAPIClient.ts index 8356826e2..b4f63e2bc 100644 --- a/src/renderer/src/aiCore/legacy/clients/openai/OpenAIResponseAPIClient.ts +++ b/src/renderer/src/aiCore/legacy/clients/openai/OpenAIResponseAPIClient.ts @@ -122,6 +122,7 @@ export class OpenAIResponseAPIClient extends OpenAIBaseClient< if (this.sdkInstance) { return this.sdkInstance } + const baseUrl = this.getBaseURL() if (this.provider.id === 'azure-openai' || this.provider.type === 'azure-openai') { return new AzureOpenAI({ @@ -134,7 +135,7 @@ export class OpenAIResponseAPIClient extends OpenAIBaseClient< return new OpenAI({ dangerouslyAllowBrowser: true, apiKey: this.apiKey, - baseURL: this.getBaseURL(), + baseURL: baseUrl, defaultHeaders: { ...this.defaultHeaders(), ...this.provider.extra_headers diff --git a/src/renderer/src/aiCore/legacy/index.ts b/src/renderer/src/aiCore/legacy/index.ts index da6cdb672..7c5f5211d 100644 --- a/src/renderer/src/aiCore/legacy/index.ts +++ b/src/renderer/src/aiCore/legacy/index.ts @@ -2,7 +2,6 @@ import { loggerService } from '@logger' import { ApiClientFactory } from '@renderer/aiCore/legacy/clients/ApiClientFactory' import type { BaseApiClient } from '@renderer/aiCore/legacy/clients/BaseApiClient' import { isDedicatedImageGenerationModel, isFunctionCallingModel } from '@renderer/config/models' -import { getProviderByModel } from '@renderer/services/AssistantService' import { withSpanResult } from '@renderer/services/SpanManagerService' import type { StartSpanParams } from '@renderer/trace/types/ModelSpanEntity' import type { GenerateImageParams, Model, Provider } from '@renderer/types' @@ -160,9 +159,6 @@ export default class AiProvider { public async getEmbeddingDimensions(model: Model): Promise { try { // Use the SDK instance to test embedding capabilities - if (this.apiClient instanceof OpenAIResponseAPIClient && getProviderByModel(model).type === 'azure-openai') { - this.apiClient = this.apiClient.getClient(model) as BaseApiClient - } const dimensions = await this.apiClient.getEmbeddingDimensions(model) return dimensions } catch (error) { diff --git a/src/renderer/src/aiCore/prepareParams/__tests__/message-converter.test.ts b/src/renderer/src/aiCore/prepareParams/__tests__/message-converter.test.ts index 2433192cd..cb0c5cf9a 100644 --- a/src/renderer/src/aiCore/prepareParams/__tests__/message-converter.test.ts +++ b/src/renderer/src/aiCore/prepareParams/__tests__/message-converter.test.ts @@ -137,6 +137,73 @@ describe('messageConverter', () => { }) }) + it('extracts base64 data from data URLs and preserves mediaType', async () => { + const model = createModel() + const message = createMessage('user') + message.__mockContent = 'Check this image' + message.__mockImageBlocks = [createImageBlock(message.id, { url: 'data:image/png;base64,iVBORw0KGgoAAAANS' })] + + const result = await convertMessageToSdkParam(message, true, model) + + expect(result).toEqual({ + role: 'user', + content: [ + { type: 'text', text: 'Check this image' }, + { type: 'image', image: 'iVBORw0KGgoAAAANS', mediaType: 'image/png' } + ] + }) + }) + + it('handles data URLs without mediaType gracefully', async () => { + const model = createModel() + const message = createMessage('user') + message.__mockContent = 'Check this' + message.__mockImageBlocks = [createImageBlock(message.id, { url: 'data:;base64,AAABBBCCC' })] + + const result = await convertMessageToSdkParam(message, true, model) + + expect(result).toEqual({ + role: 'user', + content: [ + { type: 'text', text: 'Check this' }, + { type: 'image', image: 'AAABBBCCC' } + ] + }) + }) + + it('skips malformed data URLs without comma separator', async () => { + const model = createModel() + const message = createMessage('user') + message.__mockContent = 'Malformed data url' + message.__mockImageBlocks = [createImageBlock(message.id, { url: 'data:image/pngAAABBB' })] + + const result = await convertMessageToSdkParam(message, true, model) + + expect(result).toEqual({ + role: 'user', + content: [ + { type: 'text', text: 'Malformed data url' } + // Malformed data URL is excluded from the content + ] + }) + }) + + it('handles multiple large base64 images without stack overflow', async () => { + const model = createModel() + const message = createMessage('user') + // Create large base64 strings (~500KB each) to simulate real-world large images + const largeBase64 = 'A'.repeat(500_000) + message.__mockContent = 'Check these images' + message.__mockImageBlocks = [ + createImageBlock(message.id, { url: `data:image/png;base64,${largeBase64}` }), + createImageBlock(message.id, { url: `data:image/png;base64,${largeBase64}` }), + createImageBlock(message.id, { url: `data:image/png;base64,${largeBase64}` }) + ] + + // Should not throw RangeError: Maximum call stack size exceeded + await expect(convertMessageToSdkParam(message, true, model)).resolves.toBeDefined() + }) + it('returns file instructions as a system message when native uploads succeed', async () => { const model = createModel() const message = createMessage('user') @@ -165,7 +232,7 @@ describe('messageConverter', () => { }) describe('convertMessagesToSdkMessages', () => { - it('appends assistant images to the final user message for image enhancement models', async () => { + it('collapses to [system?, user(image)] for image enhancement models', async () => { const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' }) const initialUser = createMessage('user') initialUser.__mockContent = 'Start editing' @@ -180,14 +247,6 @@ describe('messageConverter', () => { const result = await convertMessagesToSdkMessages([initialUser, assistant, finalUser], model) expect(result).toEqual([ - { - role: 'user', - content: [{ type: 'text', text: 'Start editing' }] - }, - { - role: 'assistant', - content: [{ type: 'text', text: 'Here is the current preview' }] - }, { role: 'user', content: [ @@ -198,7 +257,7 @@ describe('messageConverter', () => { ]) }) - it('preserves preceding system instructions when building enhancement payloads', async () => { + it('preserves system messages and collapses others for enhancement payloads', async () => { const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' }) const fileUser = createMessage('user') fileUser.__mockContent = 'Use this document as inspiration' @@ -221,11 +280,6 @@ describe('messageConverter', () => { expect(result).toEqual([ { role: 'system', content: 'fileid://reference' }, - { role: 'user', content: [{ type: 'text', text: 'Use this document as inspiration' }] }, - { - role: 'assistant', - content: [{ type: 'text', text: 'Generated previews ready' }] - }, { role: 'user', content: [ @@ -235,5 +289,120 @@ describe('messageConverter', () => { } ]) }) + + it('handles no previous assistant message with images', async () => { + const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' }) + const user1 = createMessage('user') + user1.__mockContent = 'Start' + + const user2 = createMessage('user') + user2.__mockContent = 'Continue without images' + + const result = await convertMessagesToSdkMessages([user1, user2], model) + + expect(result).toEqual([ + { + role: 'user', + content: [{ type: 'text', text: 'Continue without images' }] + } + ]) + }) + + it('handles assistant message without images', async () => { + const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' }) + const user1 = createMessage('user') + user1.__mockContent = 'Start' + + const assistant = createMessage('assistant') + assistant.__mockContent = 'Text only response' + assistant.__mockImageBlocks = [] + + const user2 = createMessage('user') + user2.__mockContent = 'Follow up' + + const result = await convertMessagesToSdkMessages([user1, assistant, user2], model) + + expect(result).toEqual([ + { + role: 'user', + content: [{ type: 'text', text: 'Follow up' }] + } + ]) + }) + + it('handles multiple assistant messages by using the most recent one', async () => { + const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' }) + const user1 = createMessage('user') + user1.__mockContent = 'Start' + + const assistant1 = createMessage('assistant') + assistant1.__mockContent = 'First response' + assistant1.__mockImageBlocks = [createImageBlock(assistant1.id, { url: 'https://example.com/old.png' })] + + const user2 = createMessage('user') + user2.__mockContent = 'Continue' + + const assistant2 = createMessage('assistant') + assistant2.__mockContent = 'Second response' + assistant2.__mockImageBlocks = [createImageBlock(assistant2.id, { url: 'https://example.com/new.png' })] + + const user3 = createMessage('user') + user3.__mockContent = 'Final request' + + const result = await convertMessagesToSdkMessages([user1, assistant1, user2, assistant2, user3], model) + + expect(result).toEqual([ + { + role: 'user', + content: [ + { type: 'text', text: 'Final request' }, + { type: 'image', image: 'https://example.com/new.png' } + ] + } + ]) + }) + + it('handles conversation ending with assistant message', async () => { + const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' }) + const user = createMessage('user') + user.__mockContent = 'Start' + + const assistant = createMessage('assistant') + assistant.__mockContent = 'Response with image' + assistant.__mockImageBlocks = [createImageBlock(assistant.id, { url: 'https://example.com/image.png' })] + + const result = await convertMessagesToSdkMessages([user, assistant], model) + + // The user message is the last user message, but since the assistant comes after, + // there's no "previous" assistant message (search starts from messages.length-2 backwards) + expect(result).toEqual([ + { + role: 'user', + content: [{ type: 'text', text: 'Start' }] + } + ]) + }) + + it('handles empty content in last user message', async () => { + const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' }) + const user1 = createMessage('user') + user1.__mockContent = 'Start' + + const assistant = createMessage('assistant') + assistant.__mockContent = 'Here is the preview' + assistant.__mockImageBlocks = [createImageBlock(assistant.id, { url: 'https://example.com/preview.png' })] + + const user2 = createMessage('user') + user2.__mockContent = '' + + const result = await convertMessagesToSdkMessages([user1, assistant, user2], model) + + expect(result).toEqual([ + { + role: 'user', + content: [{ type: 'image', image: 'https://example.com/preview.png' }] + } + ]) + }) }) }) diff --git a/src/renderer/src/aiCore/prepareParams/messageConverter.ts b/src/renderer/src/aiCore/prepareParams/messageConverter.ts index b0c432ef8..328a10b94 100644 --- a/src/renderer/src/aiCore/prepareParams/messageConverter.ts +++ b/src/renderer/src/aiCore/prepareParams/messageConverter.ts @@ -7,6 +7,7 @@ import { loggerService } from '@logger' import { isImageEnhancementModel, isVisionModel } from '@renderer/config/models' import type { Message, Model } from '@renderer/types' import type { FileMessageBlock, ImageMessageBlock, ThinkingMessageBlock } from '@renderer/types/newMessage' +import { parseDataUrlMediaType } from '@renderer/utils/image' import { findFileBlocks, findImageBlocks, @@ -59,23 +60,29 @@ async function convertImageBlockToImagePart(imageBlocks: ImageMessageBlock[]): P mediaType: image.mime }) } catch (error) { - logger.warn('Failed to load image:', error as Error) + logger.error('Failed to load image file, image will be excluded from message:', { + fileId: imageBlock.file.id, + fileName: imageBlock.file.origin_name, + error: error as Error + }) } } else if (imageBlock.url) { - const isBase64 = imageBlock.url.startsWith('data:') - if (isBase64) { - const base64 = imageBlock.url.match(/^data:[^;]*;base64,(.+)$/)![1] - const mimeMatch = imageBlock.url.match(/^data:([^;]+)/) - parts.push({ - type: 'image', - image: base64, - mediaType: mimeMatch ? mimeMatch[1] : 'image/png' - }) + const url = imageBlock.url + const isDataUrl = url.startsWith('data:') + if (isDataUrl) { + const { mediaType } = parseDataUrlMediaType(url) + const commaIndex = url.indexOf(',') + if (commaIndex === -1) { + logger.error('Malformed data URL detected (missing comma separator), image will be excluded:', { + urlPrefix: url.slice(0, 50) + '...' + }) + continue + } + const base64Data = url.slice(commaIndex + 1) + parts.push({ type: 'image', image: base64Data, ...(mediaType ? { mediaType } : {}) }) } else { - parts.push({ - type: 'image', - image: imageBlock.url - }) + // For remote URLs we keep payload minimal to match existing expectations. + parts.push({ type: 'image', image: url }) } } } @@ -194,17 +201,20 @@ async function convertMessageToAssistantModelMessage( * This function processes messages and transforms them into the format required by the SDK. * It handles special cases for vision models and image enhancement models. * - * @param messages - Array of messages to convert. Must contain at least 3 messages when using image enhancement models for special handling. + * @param messages - Array of messages to convert. * @param model - The model configuration that determines conversion behavior * * @returns A promise that resolves to an array of SDK-compatible model messages * * @remarks - * For image enhancement models with 3+ messages: - * - Examines the last 2 messages to find an assistant message containing image blocks - * - If found, extracts images from the assistant message and appends them to the last user message content - * - Returns all converted messages (not just the last two) with the images merged into the user message - * - Typical pattern: [system?, assistant(image), user] -> [system?, assistant, user(image)] + * For image enhancement models: + * - Collapses the conversation into [system?, user(image)] format + * - Searches backwards through all messages to find the most recent assistant message with images + * - Preserves all system messages (including ones generated from file uploads like 'fileid://...') + * - Extracts the last user message content and merges images from the previous assistant message + * - Returns only the collapsed messages: system messages (if any) followed by a single user message + * - If no user message is found, returns only system messages + * - Typical pattern: [system?, user, assistant(image), user] -> [system?, user(image)] * * For other models: * - Returns all converted messages in order without special image handling @@ -220,25 +230,66 @@ export async function convertMessagesToSdkMessages(messages: Message[], model: M sdkMessages.push(...(Array.isArray(sdkMessage) ? sdkMessage : [sdkMessage])) } // Special handling for image enhancement models - // Only merge images into the user message - // [system?, assistant(image), user] -> [system?, assistant, user(image)] - if (isImageEnhancementModel(model) && messages.length >= 3) { - const needUpdatedMessages = messages.slice(-2) - const assistantMessage = needUpdatedMessages.find((m) => m.role === 'assistant') - const userSdkMessage = sdkMessages[sdkMessages.length - 1] + // Target behavior: Collapse the conversation into [system?, user(image)]. + // Explanation of why we don't simply use slice: + // 1) We need to preserve all system messages: During the convertMessageToSdkParam process, native file uploads may insert `system(fileid://...)`. + // Directly slicing the original messages or already converted sdkMessages could easily result in missing these system instructions. + // Therefore, we first perform a full conversion and then aggregate the system messages afterward. + // 2) The conversion process may split messages: A single user message might be broken into two SDK messages—[system, user]. + // Slicing either side could lead to obtaining semantically incorrect fragments (e.g., only the split-out system message). + // 3) The “previous assistant message” is not necessarily the second-to-last one: There might be system messages or other message blocks inserted in between, + // making a simple slice(-2) assumption too rigid. Here, we trace back from the end of the original messages to locate the most recent assistant message, which better aligns with business semantics. + // 4) This is a “collapse” rather than a simple “slice”: Ultimately, we need to synthesize a new user message + // (with text from the last user message and images from the previous assistant message). Using slice can only extract subarrays, + // which still require reassembly; constructing directly according to the target structure is clearer and more reliable. + if (isImageEnhancementModel(model)) { + // Collect all system messages (including ones generated from file uploads) + const systemMessages = sdkMessages.filter((m): m is SystemModelMessage => m.role === 'system') - if (assistantMessage && userSdkMessage?.role === 'user') { - const imageBlocks = findImageBlocks(assistantMessage) - const imageParts = await convertImageBlockToImagePart(imageBlocks) + // Find the last user message (SDK converted) + const lastUserSdkIndex = (() => { + for (let i = sdkMessages.length - 1; i >= 0; i--) { + if (sdkMessages[i].role === 'user') return i + } + return -1 + })() - if (imageParts.length > 0) { - if (typeof userSdkMessage.content === 'string') { - userSdkMessage.content = [{ type: 'text', text: userSdkMessage.content }, ...imageParts] - } else if (Array.isArray(userSdkMessage.content)) { - userSdkMessage.content.push(...imageParts) - } + const lastUserSdk = lastUserSdkIndex >= 0 ? (sdkMessages[lastUserSdkIndex] as UserModelMessage) : null + + // Find the nearest preceding assistant message in original messages + let prevAssistant: Message | null = null + for (let i = messages.length - 2; i >= 0; i--) { + if (messages[i].role === 'assistant') { + prevAssistant = messages[i] + break } } + + // Build the final user content parts + let finalUserParts: Array = [] + if (lastUserSdk) { + if (typeof lastUserSdk.content === 'string') { + finalUserParts.push({ type: 'text', text: lastUserSdk.content }) + } else if (Array.isArray(lastUserSdk.content)) { + finalUserParts = [...lastUserSdk.content] + } + } + + // Append images from the previous assistant message if any + if (prevAssistant) { + const imageBlocks = findImageBlocks(prevAssistant) + const imageParts = await convertImageBlockToImagePart(imageBlocks) + if (imageParts.length > 0) { + finalUserParts.push(...imageParts) + } + } + + // If we couldn't find a last user message, fall back to returning collected system messages only + if (!lastUserSdk) { + return systemMessages + } + + return [...systemMessages, { role: 'user', content: finalUserParts }] } return sdkMessages diff --git a/src/renderer/src/aiCore/prepareParams/modelParameters.ts b/src/renderer/src/aiCore/prepareParams/modelParameters.ts index 34c341828..58b4834f5 100644 --- a/src/renderer/src/aiCore/prepareParams/modelParameters.ts +++ b/src/renderer/src/aiCore/prepareParams/modelParameters.ts @@ -28,13 +28,14 @@ import { getAnthropicThinkingBudget } from '../utils/reasoning' * - Disabled for models that do not support temperature. * - Disabled for Claude 4.5 reasoning models when TopP is enabled and temperature is disabled. * Otherwise, returns the temperature value if the assistant has temperature enabled. + */ export function getTemperature(assistant: Assistant, model: Model): number | undefined { if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) { return undefined } - if (!isSupportTemperatureModel(model)) { + if (!isSupportTemperatureModel(model, assistant)) { return undefined } @@ -46,6 +47,10 @@ export function getTemperature(assistant: Assistant, model: Model): number | und return undefined } + return getTemperatureValue(assistant, model) +} + +function getTemperatureValue(assistant: Assistant, model: Model): number | undefined { const assistantSettings = getAssistantSettings(assistant) let temperature = assistantSettings?.temperature if (temperature && isMaxTemperatureOneModel(model)) { @@ -68,13 +73,17 @@ export function getTopP(assistant: Assistant, model: Model): number | undefined if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) { return undefined } - if (!isSupportTopPModel(model)) { + if (!isSupportTopPModel(model, assistant)) { return undefined } if (isTemperatureTopPMutuallyExclusiveModel(model) && assistant.settings?.enableTemperature) { return undefined } + return getTopPValue(assistant) +} + +function getTopPValue(assistant: Assistant): number | undefined { const assistantSettings = getAssistantSettings(assistant) // FIXME: assistant.settings.enableTopP should be always a boolean value. const enableTopP = assistantSettings.enableTopP ?? DEFAULT_ASSISTANT_SETTINGS.enableTopP diff --git a/src/renderer/src/aiCore/provider/__tests__/providerConfig.test.ts b/src/renderer/src/aiCore/provider/__tests__/providerConfig.test.ts index 43d3cc52b..20aa78dcb 100644 --- a/src/renderer/src/aiCore/provider/__tests__/providerConfig.test.ts +++ b/src/renderer/src/aiCore/provider/__tests__/providerConfig.test.ts @@ -42,7 +42,8 @@ vi.mock('@renderer/utils/api', () => ({ routeToEndpoint: vi.fn((host) => ({ baseURL: host, endpoint: '/chat/completions' - })) + })), + isWithTrailingSharp: vi.fn((host) => host?.endsWith('#') || false) })) vi.mock('@renderer/utils/provider', async (importOriginal) => { @@ -227,12 +228,19 @@ describe('CherryAI provider configuration', () => { // Mock the functions to simulate non-CherryAI provider vi.mocked(isCherryAIProvider).mockReturnValue(false) vi.mocked(getProviderByModel).mockReturnValue(provider) + // Mock isWithTrailingSharp to return false for this test + vi.mocked(formatApiHost as any).mockImplementation((host, isSupportedAPIVersion = true) => { + if (isSupportedAPIVersion === false) { + return host + } + return `${host}/v1` + }) // Call getActualProvider const actualProvider = getActualProvider(model) - // Verify that formatApiHost was called with default parameters (true) - expect(formatApiHost).toHaveBeenCalledWith('https://api.openai.com') + // Verify that formatApiHost was called with appendApiVersion parameter + expect(formatApiHost).toHaveBeenCalledWith('https://api.openai.com', true) expect(actualProvider.apiHost).toBe('https://api.openai.com/v1') }) @@ -303,12 +311,19 @@ describe('Perplexity provider configuration', () => { vi.mocked(isCherryAIProvider).mockReturnValue(false) vi.mocked(isPerplexityProvider).mockReturnValue(false) vi.mocked(getProviderByModel).mockReturnValue(provider) + // Mock isWithTrailingSharp to return false for this test + vi.mocked(formatApiHost as any).mockImplementation((host, isSupportedAPIVersion = true) => { + if (isSupportedAPIVersion === false) { + return host + } + return `${host}/v1` + }) // Call getActualProvider const actualProvider = getActualProvider(model) - // Verify that formatApiHost was called with default parameters (true) - expect(formatApiHost).toHaveBeenCalledWith('https://api.openai.com') + // Verify that formatApiHost was called with appendApiVersion parameter + expect(formatApiHost).toHaveBeenCalledWith('https://api.openai.com', true) expect(actualProvider.apiHost).toBe('https://api.openai.com/v1') }) diff --git a/src/renderer/src/aiCore/provider/providerConfig.ts b/src/renderer/src/aiCore/provider/providerConfig.ts index 99e4fbd1c..1c410bf12 100644 --- a/src/renderer/src/aiCore/provider/providerConfig.ts +++ b/src/renderer/src/aiCore/provider/providerConfig.ts @@ -9,6 +9,7 @@ import { } from '@renderer/hooks/useAwsBedrock' import { createVertexProvider, isVertexAIConfigured } from '@renderer/hooks/useVertexAI' import { getProviderByModel } from '@renderer/services/AssistantService' +import { getProviderById } from '@renderer/services/ProviderService' import store from '@renderer/store' import { isSystemProvider, type Model, type Provider, SystemProviderIds } from '@renderer/types' import type { OpenAICompletionsStreamOptions } from '@renderer/types/aiCoreTypes' @@ -17,6 +18,7 @@ import { formatAzureOpenAIApiHost, formatOllamaApiHost, formatVertexApiHost, + isWithTrailingSharp, routeToEndpoint } from '@renderer/utils/api' import { @@ -69,14 +71,15 @@ function handleSpecialProviders(model: Model, provider: Provider): Provider { */ export function formatProviderApiHost(provider: Provider): Provider { const formatted = { ...provider } + const appendApiVersion = !isWithTrailingSharp(provider.apiHost) if (formatted.anthropicApiHost) { - formatted.anthropicApiHost = formatApiHost(formatted.anthropicApiHost) + formatted.anthropicApiHost = formatApiHost(formatted.anthropicApiHost, appendApiVersion) } if (isAnthropicProvider(provider)) { const baseHost = formatted.anthropicApiHost || formatted.apiHost // AI SDK needs /v1 in baseURL, Anthropic SDK will strip it in getSdkClient - formatted.apiHost = formatApiHost(baseHost) + formatted.apiHost = formatApiHost(baseHost, appendApiVersion) if (!formatted.anthropicApiHost) { formatted.anthropicApiHost = formatted.apiHost } @@ -85,7 +88,7 @@ export function formatProviderApiHost(provider: Provider): Provider { } else if (isOllamaProvider(formatted)) { formatted.apiHost = formatOllamaApiHost(formatted.apiHost) } else if (isGeminiProvider(formatted)) { - formatted.apiHost = formatApiHost(formatted.apiHost, true, 'v1beta') + formatted.apiHost = formatApiHost(formatted.apiHost, appendApiVersion, 'v1beta') } else if (isAzureOpenAIProvider(formatted)) { formatted.apiHost = formatAzureOpenAIApiHost(formatted.apiHost) } else if (isVertexProvider(formatted)) { @@ -95,7 +98,7 @@ export function formatProviderApiHost(provider: Provider): Provider { } else if (isPerplexityProvider(formatted)) { formatted.apiHost = formatApiHost(formatted.apiHost, false) } else { - formatted.apiHost = formatApiHost(formatted.apiHost) + formatted.apiHost = formatApiHost(formatted.apiHost, appendApiVersion) } return formatted } @@ -248,6 +251,12 @@ export function providerToAiSdkConfig(actualProvider: Provider, model: Model): A if (model.endpoint_type) { extraOptions.endpointType = model.endpoint_type } + // CherryIN API Host + const cherryinProvider = getProviderById(SystemProviderIds.cherryin) + if (cherryinProvider) { + extraOptions.anthropicBaseURL = cherryinProvider.anthropicApiHost + '/v1' + extraOptions.geminiBaseURL = cherryinProvider.apiHost + '/v1beta/models' + } } if (hasProviderConfig(aiSdkProviderId) && aiSdkProviderId !== 'openai-compatible') { diff --git a/src/renderer/src/aiCore/utils/__tests__/reasoning.test.ts b/src/renderer/src/aiCore/utils/__tests__/reasoning.test.ts index 36253e5c1..fec4d197e 100644 --- a/src/renderer/src/aiCore/utils/__tests__/reasoning.test.ts +++ b/src/renderer/src/aiCore/utils/__tests__/reasoning.test.ts @@ -754,7 +754,8 @@ describe('reasoning utils', () => { const result = getGeminiReasoningParams(assistant, model) expect(result).toEqual({ thinkingConfig: { - includeThoughts: true + includeThoughts: true, + thinkingBudget: -1 } }) }) diff --git a/src/renderer/src/aiCore/utils/options.ts b/src/renderer/src/aiCore/utils/options.ts index 8ec46c9df..fd9bc590c 100644 --- a/src/renderer/src/aiCore/utils/options.ts +++ b/src/renderer/src/aiCore/utils/options.ts @@ -11,6 +11,7 @@ import { isGeminiModel, isGrokModel, isOpenAIModel, + isOpenAIOpenWeightModel, isQwenMTModel, isSupportFlexServiceTierModel, isSupportVerbosityModel @@ -244,7 +245,7 @@ export function buildProviderOptions( providerSpecificOptions = buildOpenAIProviderOptions(assistant, model, capabilities, serviceTier) break case SystemProviderIds.ollama: - providerSpecificOptions = buildOllamaProviderOptions(assistant, capabilities) + providerSpecificOptions = buildOllamaProviderOptions(assistant, model, capabilities) break case SystemProviderIds.gateway: providerSpecificOptions = buildAIGatewayOptions(assistant, model, capabilities, serviceTier, textVerbosity) @@ -564,6 +565,7 @@ function buildBedrockProviderOptions( function buildOllamaProviderOptions( assistant: Assistant, + model: Model, capabilities: { enableReasoning: boolean enableWebSearch: boolean @@ -574,7 +576,12 @@ function buildOllamaProviderOptions( const providerOptions: OllamaCompletionProviderOptions = {} const reasoningEffort = assistant.settings?.reasoning_effort if (enableReasoning) { - providerOptions.think = !['none', undefined].includes(reasoningEffort) + if (isOpenAIOpenWeightModel(model)) { + // @ts-ignore upstream type error + providerOptions.think = reasoningEffort as any + } else { + providerOptions.think = !['none', undefined].includes(reasoningEffort) + } } return { ollama: providerOptions diff --git a/src/renderer/src/aiCore/utils/reasoning.ts b/src/renderer/src/aiCore/utils/reasoning.ts index 1e74db24d..996d67676 100644 --- a/src/renderer/src/aiCore/utils/reasoning.ts +++ b/src/renderer/src/aiCore/utils/reasoning.ts @@ -13,11 +13,11 @@ import { isDoubaoSeedAfter251015, isDoubaoThinkingAutoModel, isGemini3ThinkingTokenModel, - isGPT5SeriesModel, isGPT51SeriesModel, isGrok4FastReasoningModel, isOpenAIDeepResearchModel, isOpenAIModel, + isOpenAIReasoningModel, isQwenAlwaysThinkModel, isQwenReasoningModel, isReasoningModel, @@ -134,8 +134,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin // https://creator.poe.com/docs/external-applications/openai-compatible-api#additional-considerations // Poe provider - supports custom bot parameters via extra_body if (provider.id === SystemProviderIds.poe) { - // GPT-5 series models use reasoning_effort parameter in extra_body - if (isGPT5SeriesModel(model) || isGPT51SeriesModel(model)) { + if (isOpenAIReasoningModel(model)) { return { extra_body: { reasoning_effort: reasoningEffort === 'auto' ? 'medium' : reasoningEffort @@ -589,6 +588,7 @@ export function getGeminiReasoningParams( if (effortRatio > 1) { return { thinkingConfig: { + thinkingBudget: -1, includeThoughts: true } } @@ -634,6 +634,8 @@ export function getXAIReasoningParams(assistant: Assistant, model: Model): Pick< case 'low': case 'high': return { reasoningEffort } + case 'xhigh': + return { reasoningEffort: 'high' } } } diff --git a/src/renderer/src/components/CodeBlockView/HtmlArtifactsPopup.tsx b/src/renderer/src/components/CodeBlockView/HtmlArtifactsPopup.tsx index 8e155d8be..36bfc559d 100644 --- a/src/renderer/src/components/CodeBlockView/HtmlArtifactsPopup.tsx +++ b/src/renderer/src/components/CodeBlockView/HtmlArtifactsPopup.tsx @@ -29,7 +29,7 @@ const HtmlArtifactsPopup: React.FC = ({ open, title, ht const [fontSize] = usePreference('chat.message.font_size') const { activeCmTheme } = useCodeStyle() const [viewMode, setViewMode] = useState('split') - const [isFullscreen, setIsFullscreen] = useState(false) + const [isFullscreen, setIsFullscreen] = useState(true) const [saved, setSaved] = useTemporaryValue(false, 2000) const codeEditorRef = useRef(null) const previewFrameRef = useRef(null) @@ -82,7 +82,7 @@ const HtmlArtifactsPopup: React.FC = ({ open, title, ht - e.stopPropagation()}> + e.stopPropagation()} className="nodrag"> { + if (!webviewRef.current) return + + const unsubscribe = window.api?.webview?.onFindShortcut?.(async (payload) => { + // Get webviewId when event is triggered + const webviewId = webviewRef.current?.getWebContentsId() + + // Only handle events for this webview + if (!webviewId || payload.webviewId !== webviewId) return + + const key = payload.key?.toLowerCase() + const isModifier = payload.control || payload.meta + + if (!isModifier || !key) return + + try { + if (key === 'p') { + // Print to PDF + logger.info(`Printing webview ${appid} to PDF`) + const filePath = await window.api.webview.printToPDF(webviewId) + if (filePath) { + window.toast?.success?.(`PDF saved to: ${filePath}`) + logger.info(`PDF saved to: ${filePath}`) + } + } else if (key === 's') { + // Save as HTML + logger.info(`Saving webview ${appid} as HTML`) + const filePath = await window.api.webview.saveAsHTML(webviewId) + if (filePath) { + window.toast?.success?.(`HTML saved to: ${filePath}`) + logger.info(`HTML saved to: ${filePath}`) + } + } + } catch (error) { + logger.error(`Failed to handle shortcut for webview ${appid}:`, error as Error) + window.toast?.error?.(`Failed: ${(error as Error).message}`) + } + }) + + return () => { + unsubscribe?.() + } + }, [appid]) + // Update webview settings when they change useEffect(() => { if (!webviewRef.current) return diff --git a/src/renderer/src/components/Popups/agent/AgentModal.tsx b/src/renderer/src/components/Popups/agent/AgentModal.tsx index d3e288454..1e8b2a6fa 100644 --- a/src/renderer/src/components/Popups/agent/AgentModal.tsx +++ b/src/renderer/src/components/Popups/agent/AgentModal.tsx @@ -60,6 +60,7 @@ const PopupContainer: React.FC = ({ agent, afterSubmit, resolve }) => { const [form, setForm] = useState(() => buildAgentForm(agent)) const [hasGitBash, setHasGitBash] = useState(true) + const [customGitBashPath, setCustomGitBashPath] = useState('') useEffect(() => { if (open) { @@ -70,7 +71,11 @@ const PopupContainer: React.FC = ({ agent, afterSubmit, resolve }) => { const checkGitBash = useCallback( async (showToast = false) => { try { - const gitBashInstalled = await window.api.system.checkGitBash() + const [gitBashInstalled, savedPath] = await Promise.all([ + window.api.system.checkGitBash(), + window.api.system.getGitBashPath().catch(() => null) + ]) + setCustomGitBashPath(savedPath ?? '') setHasGitBash(gitBashInstalled) if (showToast) { if (gitBashInstalled) { @@ -93,6 +98,46 @@ const PopupContainer: React.FC = ({ agent, afterSubmit, resolve }) => { const selectedPermissionMode = form.configuration?.permission_mode ?? 'default' + const handlePickGitBash = useCallback(async () => { + try { + const selected = await window.api.file.select({ + title: t('agent.gitBash.pick.title', 'Select Git Bash executable'), + filters: [{ name: 'Executable', extensions: ['exe'] }], + properties: ['openFile'] + }) + + if (!selected || selected.length === 0) { + return + } + + const pickedPath = selected[0].path + const ok = await window.api.system.setGitBashPath(pickedPath) + if (!ok) { + window.toast.error( + t('agent.gitBash.pick.invalidPath', 'Selected file is not a valid Git Bash executable (bash.exe).') + ) + return + } + + setCustomGitBashPath(pickedPath) + await checkGitBash(true) + } catch (error) { + logger.error('Failed to pick Git Bash path', error as Error) + window.toast.error(t('agent.gitBash.pick.failed', 'Failed to set Git Bash path')) + } + }, [checkGitBash, t]) + + const handleClearGitBash = useCallback(async () => { + try { + await window.api.system.setGitBashPath(null) + setCustomGitBashPath('') + await checkGitBash(true) + } catch (error) { + logger.error('Failed to clear Git Bash path', error as Error) + window.toast.error(t('agent.gitBash.pick.failed', 'Failed to set Git Bash path')) + } + }, [checkGitBash, t]) + const onPermissionModeChange = useCallback((value: PermissionMode) => { setForm((prev) => { const parsedConfiguration = AgentConfigurationSchema.parse(prev.configuration ?? {}) @@ -324,6 +369,9 @@ const PopupContainer: React.FC = ({ agent, afterSubmit, resolve }) => { + } type="error" @@ -331,6 +379,33 @@ const PopupContainer: React.FC = ({ agent, afterSubmit, resolve }) => { style={{ marginBottom: 16 }} /> )} + + {hasGitBash && customGitBashPath && ( + +
+ {t('agent.gitBash.customPath', { + defaultValue: 'Using custom path: {{path}}', + path: customGitBashPath + })} +
+
+ + +
+ + } + type="success" + showIcon + style={{ marginBottom: 16 }} + /> + )}