mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2025-12-19 22:52:08 +08:00
Compare commits
44 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3045f924ce | ||
|
|
a6ba5d34e0 | ||
|
|
8ab375161d | ||
|
|
42260710d8 | ||
|
|
5e8646c6a5 | ||
|
|
7e93e8b9b2 | ||
|
|
eb7a2cc85a | ||
|
|
fd6986076a | ||
|
|
6309cc179d | ||
|
|
c04529a23c | ||
|
|
0f1b3afa72 | ||
|
|
0cf0072b51 | ||
|
|
150bb3e3a0 | ||
|
|
739096deca | ||
|
|
1d5dafa325 | ||
|
|
bdfda7afb1 | ||
|
|
ef25eef0eb | ||
|
|
c676a93595 | ||
|
|
e85009fcd6 | ||
|
|
99d7223a0a | ||
|
|
bdd272b7cd | ||
|
|
782f8496e0 | ||
|
|
bfeef7ef91 | ||
|
|
784fdd4fed | ||
|
|
432b31c7b1 | ||
|
|
f2b4a2382b | ||
|
|
b66787280a | ||
|
|
d41229c69b | ||
|
|
aeebd343d7 | ||
|
|
71df9d61fd | ||
|
|
4d3d5ae4ce | ||
|
|
a1f0addafb | ||
|
|
e78f25ff91 | ||
|
|
68f70e3b16 | ||
|
|
fd921103dd | ||
|
|
a1e44a6827 | ||
|
|
ee7eee24da | ||
|
|
f0ec2354dc | ||
|
|
5bd550bfb4 | ||
|
|
dc0c47c64d | ||
|
|
66feee714b | ||
|
|
96aba33077 | ||
|
|
97f6275104 | ||
|
|
b906849c17 |
2
.github/workflows/auto-i18n.yml
vendored
2
.github/workflows/auto-i18n.yml
vendored
@ -54,7 +54,7 @@ jobs:
|
|||||||
yarn install
|
yarn install
|
||||||
|
|
||||||
- name: 🏃♀️ Translate
|
- name: 🏃♀️ Translate
|
||||||
run: yarn sync:i18n && yarn auto:i18n
|
run: yarn i18n:sync && yarn i18n:translate
|
||||||
|
|
||||||
- name: 🔍 Format
|
- name: 🔍 Format
|
||||||
run: yarn format
|
run: yarn format
|
||||||
|
|||||||
2
.github/workflows/pr-ci.yml
vendored
2
.github/workflows/pr-ci.yml
vendored
@ -58,7 +58,7 @@ jobs:
|
|||||||
run: yarn typecheck
|
run: yarn typecheck
|
||||||
|
|
||||||
- name: i18n Check
|
- name: i18n Check
|
||||||
run: yarn check:i18n
|
run: yarn i18n:check
|
||||||
|
|
||||||
- name: Test
|
- name: Test
|
||||||
run: yarn test
|
run: yarn test
|
||||||
|
|||||||
16
.github/workflows/sync-to-gitcode.yml
vendored
16
.github/workflows/sync-to-gitcode.yml
vendored
@ -216,6 +216,7 @@ jobs:
|
|||||||
local filename=$(basename "$file")
|
local filename=$(basename "$file")
|
||||||
local max_retries=3
|
local max_retries=3
|
||||||
local retry=0
|
local retry=0
|
||||||
|
local curl_status=0
|
||||||
|
|
||||||
echo "Uploading: $filename"
|
echo "Uploading: $filename"
|
||||||
|
|
||||||
@ -224,10 +225,12 @@ jobs:
|
|||||||
|
|
||||||
while [ $retry -lt $max_retries ]; do
|
while [ $retry -lt $max_retries ]; do
|
||||||
# Get upload URL
|
# Get upload URL
|
||||||
|
curl_status=0
|
||||||
UPLOAD_INFO=$(curl -s --connect-timeout 30 --max-time 60 \
|
UPLOAD_INFO=$(curl -s --connect-timeout 30 --max-time 60 \
|
||||||
-H "Authorization: Bearer ${GITCODE_TOKEN}" \
|
-H "Authorization: Bearer ${GITCODE_TOKEN}" \
|
||||||
"${API_URL}/repos/${GITCODE_OWNER}/${GITCODE_REPO}/releases/${TAG_NAME}/upload_url?file_name=${encoded_filename}")
|
"${API_URL}/repos/${GITCODE_OWNER}/${GITCODE_REPO}/releases/${TAG_NAME}/upload_url?file_name=${encoded_filename}") || curl_status=$?
|
||||||
|
|
||||||
|
if [ $curl_status -eq 0 ]; then
|
||||||
UPLOAD_URL=$(echo "$UPLOAD_INFO" | jq -r '.url // empty')
|
UPLOAD_URL=$(echo "$UPLOAD_INFO" | jq -r '.url // empty')
|
||||||
|
|
||||||
if [ -n "$UPLOAD_URL" ]; then
|
if [ -n "$UPLOAD_URL" ]; then
|
||||||
@ -235,11 +238,13 @@ jobs:
|
|||||||
echo "$UPLOAD_INFO" | jq -r '.headers | to_entries[] | "header = \"" + .key + ": " + .value + "\""' > /tmp/upload_headers.txt
|
echo "$UPLOAD_INFO" | jq -r '.headers | to_entries[] | "header = \"" + .key + ": " + .value + "\""' > /tmp/upload_headers.txt
|
||||||
|
|
||||||
# Upload file using PUT with headers from file
|
# Upload file using PUT with headers from file
|
||||||
|
curl_status=0
|
||||||
UPLOAD_RESPONSE=$(curl -s -w "\n%{http_code}" -X PUT \
|
UPLOAD_RESPONSE=$(curl -s -w "\n%{http_code}" -X PUT \
|
||||||
-K /tmp/upload_headers.txt \
|
-K /tmp/upload_headers.txt \
|
||||||
--data-binary "@${file}" \
|
--data-binary "@${file}" \
|
||||||
"$UPLOAD_URL")
|
"$UPLOAD_URL") || curl_status=$?
|
||||||
|
|
||||||
|
if [ $curl_status -eq 0 ]; then
|
||||||
HTTP_CODE=$(echo "$UPLOAD_RESPONSE" | tail -n1)
|
HTTP_CODE=$(echo "$UPLOAD_RESPONSE" | tail -n1)
|
||||||
RESPONSE_BODY=$(echo "$UPLOAD_RESPONSE" | sed '$d')
|
RESPONSE_BODY=$(echo "$UPLOAD_RESPONSE" | sed '$d')
|
||||||
|
|
||||||
@ -250,10 +255,17 @@ jobs:
|
|||||||
echo " Failed (HTTP $HTTP_CODE), retry $((retry + 1))/$max_retries"
|
echo " Failed (HTTP $HTTP_CODE), retry $((retry + 1))/$max_retries"
|
||||||
echo " Response: $RESPONSE_BODY"
|
echo " Response: $RESPONSE_BODY"
|
||||||
fi
|
fi
|
||||||
|
else
|
||||||
|
echo " Upload request failed (curl exit $curl_status), retry $((retry + 1))/$max_retries"
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
echo " Failed to get upload URL, retry $((retry + 1))/$max_retries"
|
echo " Failed to get upload URL, retry $((retry + 1))/$max_retries"
|
||||||
echo " Response: $UPLOAD_INFO"
|
echo " Response: $UPLOAD_INFO"
|
||||||
fi
|
fi
|
||||||
|
else
|
||||||
|
echo " Failed to get upload URL (curl exit $curl_status), retry $((retry + 1))/$max_retries"
|
||||||
|
echo " Response: $UPLOAD_INFO"
|
||||||
|
fi
|
||||||
|
|
||||||
retry=$((retry + 1))
|
retry=$((retry + 1))
|
||||||
[ $retry -lt $max_retries ] && sleep 3
|
[ $retry -lt $max_retries ] && sleep 3
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
diff --git a/dist/index.js b/dist/index.js
|
diff --git a/dist/index.js b/dist/index.js
|
||||||
index 51ce7e423934fb717cb90245cdfcdb3dae6780e6..0f7f7009e2f41a79a8669d38c8a44867bbff5e1f 100644
|
index d004b415c5841a1969705823614f395265ea5a8a..6b1e0dad4610b0424393ecc12e9114723bbe316b 100644
|
||||||
--- a/dist/index.js
|
--- a/dist/index.js
|
||||||
+++ b/dist/index.js
|
+++ b/dist/index.js
|
||||||
@@ -474,7 +474,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
|
@@ -474,7 +474,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
|
||||||
@ -12,7 +12,7 @@ index 51ce7e423934fb717cb90245cdfcdb3dae6780e6..0f7f7009e2f41a79a8669d38c8a44867
|
|||||||
|
|
||||||
// src/google-generative-ai-options.ts
|
// src/google-generative-ai-options.ts
|
||||||
diff --git a/dist/index.mjs b/dist/index.mjs
|
diff --git a/dist/index.mjs b/dist/index.mjs
|
||||||
index f4b77e35c0cbfece85a3ef0d4f4e67aa6dde6271..8d2fecf8155a226006a0bde72b00b6036d4014b6 100644
|
index 1780dd2391b7f42224a0b8048c723d2f81222c44..1f12ed14399d6902107ce9b435d7d8e6cc61e06b 100644
|
||||||
--- a/dist/index.mjs
|
--- a/dist/index.mjs
|
||||||
+++ b/dist/index.mjs
|
+++ b/dist/index.mjs
|
||||||
@@ -480,7 +480,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
|
@@ -480,7 +480,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
|
||||||
@ -24,3 +24,14 @@ index f4b77e35c0cbfece85a3ef0d4f4e67aa6dde6271..8d2fecf8155a226006a0bde72b00b603
|
|||||||
}
|
}
|
||||||
|
|
||||||
// src/google-generative-ai-options.ts
|
// src/google-generative-ai-options.ts
|
||||||
|
@@ -1909,8 +1909,7 @@ function createGoogleGenerativeAI(options = {}) {
|
||||||
|
}
|
||||||
|
var google = createGoogleGenerativeAI();
|
||||||
|
export {
|
||||||
|
- VERSION,
|
||||||
|
createGoogleGenerativeAI,
|
||||||
|
- google
|
||||||
|
+ google, VERSION
|
||||||
|
};
|
||||||
|
//# sourceMappingURL=index.mjs.map
|
||||||
|
\ No newline at end of file
|
||||||
@ -1,8 +1,8 @@
|
|||||||
diff --git a/dist/index.js b/dist/index.js
|
diff --git a/dist/index.js b/dist/index.js
|
||||||
index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a70ea2b5a2 100644
|
index 130094d194ea1e8e7d3027d07d82465741192124..4d13dcee8c962ca9ee8f1c3d748f8ffe6a3cfb47 100644
|
||||||
--- a/dist/index.js
|
--- a/dist/index.js
|
||||||
+++ b/dist/index.js
|
+++ b/dist/index.js
|
||||||
@@ -274,6 +274,7 @@ var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)(
|
@@ -290,6 +290,7 @@ var openaiChatResponseSchema = (0, import_provider_utils3.lazyValidator)(
|
||||||
message: import_v42.z.object({
|
message: import_v42.z.object({
|
||||||
role: import_v42.z.literal("assistant").nullish(),
|
role: import_v42.z.literal("assistant").nullish(),
|
||||||
content: import_v42.z.string().nullish(),
|
content: import_v42.z.string().nullish(),
|
||||||
@ -10,7 +10,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7
|
|||||||
tool_calls: import_v42.z.array(
|
tool_calls: import_v42.z.array(
|
||||||
import_v42.z.object({
|
import_v42.z.object({
|
||||||
id: import_v42.z.string().nullish(),
|
id: import_v42.z.string().nullish(),
|
||||||
@@ -340,6 +341,7 @@ var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)(
|
@@ -356,6 +357,7 @@ var openaiChatChunkSchema = (0, import_provider_utils3.lazyValidator)(
|
||||||
delta: import_v42.z.object({
|
delta: import_v42.z.object({
|
||||||
role: import_v42.z.enum(["assistant"]).nullish(),
|
role: import_v42.z.enum(["assistant"]).nullish(),
|
||||||
content: import_v42.z.string().nullish(),
|
content: import_v42.z.string().nullish(),
|
||||||
@ -18,7 +18,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7
|
|||||||
tool_calls: import_v42.z.array(
|
tool_calls: import_v42.z.array(
|
||||||
import_v42.z.object({
|
import_v42.z.object({
|
||||||
index: import_v42.z.number(),
|
index: import_v42.z.number(),
|
||||||
@@ -795,6 +797,13 @@ var OpenAIChatLanguageModel = class {
|
@@ -814,6 +816,13 @@ var OpenAIChatLanguageModel = class {
|
||||||
if (text != null && text.length > 0) {
|
if (text != null && text.length > 0) {
|
||||||
content.push({ type: "text", text });
|
content.push({ type: "text", text });
|
||||||
}
|
}
|
||||||
@ -32,7 +32,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7
|
|||||||
for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
|
for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
|
||||||
content.push({
|
content.push({
|
||||||
type: "tool-call",
|
type: "tool-call",
|
||||||
@@ -876,6 +885,7 @@ var OpenAIChatLanguageModel = class {
|
@@ -895,6 +904,7 @@ var OpenAIChatLanguageModel = class {
|
||||||
};
|
};
|
||||||
let metadataExtracted = false;
|
let metadataExtracted = false;
|
||||||
let isActiveText = false;
|
let isActiveText = false;
|
||||||
@ -40,7 +40,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7
|
|||||||
const providerMetadata = { openai: {} };
|
const providerMetadata = { openai: {} };
|
||||||
return {
|
return {
|
||||||
stream: response.pipeThrough(
|
stream: response.pipeThrough(
|
||||||
@@ -933,6 +943,21 @@ var OpenAIChatLanguageModel = class {
|
@@ -952,6 +962,21 @@ var OpenAIChatLanguageModel = class {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
const delta = choice.delta;
|
const delta = choice.delta;
|
||||||
@ -62,7 +62,7 @@ index bf900591bf2847a3253fe441aad24c06da19c6c1..c1d9bb6fefa2df1383339324073db0a7
|
|||||||
if (delta.content != null) {
|
if (delta.content != null) {
|
||||||
if (!isActiveText) {
|
if (!isActiveText) {
|
||||||
controller.enqueue({ type: "text-start", id: "0" });
|
controller.enqueue({ type: "text-start", id: "0" });
|
||||||
@@ -1045,6 +1070,9 @@ var OpenAIChatLanguageModel = class {
|
@@ -1064,6 +1089,9 @@ var OpenAIChatLanguageModel = class {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
flush(controller) {
|
flush(controller) {
|
||||||
145
.yarn/patches/ollama-ai-provider-v2-npm-1.5.5-8bef249af9.patch
vendored
Normal file
145
.yarn/patches/ollama-ai-provider-v2-npm-1.5.5-8bef249af9.patch
vendored
Normal file
@ -0,0 +1,145 @@
|
|||||||
|
diff --git a/dist/index.d.ts b/dist/index.d.ts
|
||||||
|
index 8dd9b498050dbecd8dd6b901acf1aa8ca38a49af..ed644349c9d38fe2a66b2fb44214f7c18eb97f89 100644
|
||||||
|
--- a/dist/index.d.ts
|
||||||
|
+++ b/dist/index.d.ts
|
||||||
|
@@ -4,7 +4,7 @@ import { z } from 'zod/v4';
|
||||||
|
|
||||||
|
type OllamaChatModelId = "athene-v2" | "athene-v2:72b" | "aya-expanse" | "aya-expanse:8b" | "aya-expanse:32b" | "codegemma" | "codegemma:2b" | "codegemma:7b" | "codellama" | "codellama:7b" | "codellama:13b" | "codellama:34b" | "codellama:70b" | "codellama:code" | "codellama:python" | "command-r" | "command-r:35b" | "command-r-plus" | "command-r-plus:104b" | "command-r7b" | "command-r7b:7b" | "deepseek-r1" | "deepseek-r1:1.5b" | "deepseek-r1:7b" | "deepseek-r1:8b" | "deepseek-r1:14b" | "deepseek-r1:32b" | "deepseek-r1:70b" | "deepseek-r1:671b" | "deepseek-coder-v2" | "deepseek-coder-v2:16b" | "deepseek-coder-v2:236b" | "deepseek-v3" | "deepseek-v3:671b" | "devstral" | "devstral:24b" | "dolphin3" | "dolphin3:8b" | "exaone3.5" | "exaone3.5:2.4b" | "exaone3.5:7.8b" | "exaone3.5:32b" | "falcon2" | "falcon2:11b" | "falcon3" | "falcon3:1b" | "falcon3:3b" | "falcon3:7b" | "falcon3:10b" | "firefunction-v2" | "firefunction-v2:70b" | "gemma" | "gemma:2b" | "gemma:7b" | "gemma2" | "gemma2:2b" | "gemma2:9b" | "gemma2:27b" | "gemma3" | "gemma3:1b" | "gemma3:4b" | "gemma3:12b" | "gemma3:27b" | "granite3-dense" | "granite3-dense:2b" | "granite3-dense:8b" | "granite3-guardian" | "granite3-guardian:2b" | "granite3-guardian:8b" | "granite3-moe" | "granite3-moe:1b" | "granite3-moe:3b" | "granite3.1-dense" | "granite3.1-dense:2b" | "granite3.1-dense:8b" | "granite3.1-moe" | "granite3.1-moe:1b" | "granite3.1-moe:3b" | "llama2" | "llama2:7b" | "llama2:13b" | "llama2:70b" | "llama3" | "llama3:8b" | "llama3:70b" | "llama3-chatqa" | "llama3-chatqa:8b" | "llama3-chatqa:70b" | "llama3-gradient" | "llama3-gradient:8b" | "llama3-gradient:70b" | "llama3.1" | "llama3.1:8b" | "llama3.1:70b" | "llama3.1:405b" | "llama3.2" | "llama3.2:1b" | "llama3.2:3b" | "llama3.2-vision" | "llama3.2-vision:11b" | "llama3.2-vision:90b" | "llama3.3" | "llama3.3:70b" | "llama4" | "llama4:16x17b" | "llama4:128x17b" | "llama-guard3" | "llama-guard3:1b" | "llama-guard3:8b" | "llava" | "llava:7b" | "llava:13b" | "llava:34b" | "llava-llama3" | "llava-llama3:8b" | "llava-phi3" | "llava-phi3:3.8b" | "marco-o1" | "marco-o1:7b" | "mistral" | "mistral:7b" | "mistral-large" | "mistral-large:123b" | "mistral-nemo" | "mistral-nemo:12b" | "mistral-small" | "mistral-small:22b" | "mixtral" | "mixtral:8x7b" | "mixtral:8x22b" | "moondream" | "moondream:1.8b" | "openhermes" | "openhermes:v2.5" | "nemotron" | "nemotron:70b" | "nemotron-mini" | "nemotron-mini:4b" | "olmo" | "olmo:7b" | "olmo:13b" | "opencoder" | "opencoder:1.5b" | "opencoder:8b" | "phi3" | "phi3:3.8b" | "phi3:14b" | "phi3.5" | "phi3.5:3.8b" | "phi4" | "phi4:14b" | "qwen" | "qwen:7b" | "qwen:14b" | "qwen:32b" | "qwen:72b" | "qwen:110b" | "qwen2" | "qwen2:0.5b" | "qwen2:1.5b" | "qwen2:7b" | "qwen2:72b" | "qwen2.5" | "qwen2.5:0.5b" | "qwen2.5:1.5b" | "qwen2.5:3b" | "qwen2.5:7b" | "qwen2.5:14b" | "qwen2.5:32b" | "qwen2.5:72b" | "qwen2.5-coder" | "qwen2.5-coder:0.5b" | "qwen2.5-coder:1.5b" | "qwen2.5-coder:3b" | "qwen2.5-coder:7b" | "qwen2.5-coder:14b" | "qwen2.5-coder:32b" | "qwen3" | "qwen3:0.6b" | "qwen3:1.7b" | "qwen3:4b" | "qwen3:8b" | "qwen3:14b" | "qwen3:30b" | "qwen3:32b" | "qwen3:235b" | "qwq" | "qwq:32b" | "sailor2" | "sailor2:1b" | "sailor2:8b" | "sailor2:20b" | "shieldgemma" | "shieldgemma:2b" | "shieldgemma:9b" | "shieldgemma:27b" | "smallthinker" | "smallthinker:3b" | "smollm" | "smollm:135m" | "smollm:360m" | "smollm:1.7b" | "tinyllama" | "tinyllama:1.1b" | "tulu3" | "tulu3:8b" | "tulu3:70b" | (string & {});
|
||||||
|
declare const ollamaProviderOptions: z.ZodObject<{
|
||||||
|
- think: z.ZodOptional<z.ZodBoolean>;
|
||||||
|
+ think: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodEnum<['low', 'medium', 'high']>]>>;
|
||||||
|
options: z.ZodOptional<z.ZodObject<{
|
||||||
|
num_ctx: z.ZodOptional<z.ZodNumber>;
|
||||||
|
repeat_last_n: z.ZodOptional<z.ZodNumber>;
|
||||||
|
@@ -27,9 +27,11 @@ interface OllamaCompletionSettings {
|
||||||
|
* the model's thinking from the model's output. When disabled, the model will not think
|
||||||
|
* and directly output the content.
|
||||||
|
*
|
||||||
|
+ * For gpt-oss models, you can also use 'low', 'medium', or 'high' to control the depth of thinking.
|
||||||
|
+ *
|
||||||
|
* Only supported by certain models like DeepSeek R1 and Qwen 3.
|
||||||
|
*/
|
||||||
|
- think?: boolean;
|
||||||
|
+ think?: boolean | 'low' | 'medium' | 'high';
|
||||||
|
/**
|
||||||
|
* Echo back the prompt in addition to the completion.
|
||||||
|
*/
|
||||||
|
@@ -146,7 +148,7 @@ declare const ollamaEmbeddingProviderOptions: z.ZodObject<{
|
||||||
|
type OllamaEmbeddingProviderOptions = z.infer<typeof ollamaEmbeddingProviderOptions>;
|
||||||
|
|
||||||
|
declare const ollamaCompletionProviderOptions: z.ZodObject<{
|
||||||
|
- think: z.ZodOptional<z.ZodBoolean>;
|
||||||
|
+ think: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodEnum<['low', 'medium', 'high']>]>>;
|
||||||
|
user: z.ZodOptional<z.ZodString>;
|
||||||
|
suffix: z.ZodOptional<z.ZodString>;
|
||||||
|
echo: z.ZodOptional<z.ZodBoolean>;
|
||||||
|
diff --git a/dist/index.js b/dist/index.js
|
||||||
|
index 35b5142ce8476ce2549ed7c2ec48e7d8c46c90d9..2ef64dc9a4c2be043e6af608241a6a8309a5a69f 100644
|
||||||
|
--- a/dist/index.js
|
||||||
|
+++ b/dist/index.js
|
||||||
|
@@ -158,7 +158,7 @@ function getResponseMetadata({
|
||||||
|
|
||||||
|
// src/completion/ollama-completion-language-model.ts
|
||||||
|
var ollamaCompletionProviderOptions = import_v42.z.object({
|
||||||
|
- think: import_v42.z.boolean().optional(),
|
||||||
|
+ think: import_v42.z.union([import_v42.z.boolean(), import_v42.z.enum(['low', 'medium', 'high'])]).optional(),
|
||||||
|
user: import_v42.z.string().optional(),
|
||||||
|
suffix: import_v42.z.string().optional(),
|
||||||
|
echo: import_v42.z.boolean().optional()
|
||||||
|
@@ -662,7 +662,7 @@ function convertToOllamaChatMessages({
|
||||||
|
const images = content.filter((part) => part.type === "file" && part.mediaType.startsWith("image/")).map((part) => part.data);
|
||||||
|
messages.push({
|
||||||
|
role: "user",
|
||||||
|
- content: userText.length > 0 ? userText : [],
|
||||||
|
+ content: userText.length > 0 ? userText : '',
|
||||||
|
images: images.length > 0 ? images : void 0
|
||||||
|
});
|
||||||
|
break;
|
||||||
|
@@ -813,9 +813,11 @@ var ollamaProviderOptions = import_v44.z.object({
|
||||||
|
* the model's thinking from the model's output. When disabled, the model will not think
|
||||||
|
* and directly output the content.
|
||||||
|
*
|
||||||
|
+ * For gpt-oss models, you can also use 'low', 'medium', or 'high' to control the depth of thinking.
|
||||||
|
+ *
|
||||||
|
* Only supported by certain models like DeepSeek R1 and Qwen 3.
|
||||||
|
*/
|
||||||
|
- think: import_v44.z.boolean().optional(),
|
||||||
|
+ think: import_v44.z.union([import_v44.z.boolean(), import_v44.z.enum(['low', 'medium', 'high'])]).optional(),
|
||||||
|
options: import_v44.z.object({
|
||||||
|
num_ctx: import_v44.z.number().optional(),
|
||||||
|
repeat_last_n: import_v44.z.number().optional(),
|
||||||
|
@@ -929,14 +931,16 @@ var OllamaRequestBuilder = class {
|
||||||
|
prompt,
|
||||||
|
systemMessageMode: "system"
|
||||||
|
}),
|
||||||
|
- temperature,
|
||||||
|
- top_p: topP,
|
||||||
|
max_output_tokens: maxOutputTokens,
|
||||||
|
...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
|
||||||
|
format: responseFormat.schema != null ? responseFormat.schema : "json"
|
||||||
|
},
|
||||||
|
think: (_a = ollamaOptions == null ? void 0 : ollamaOptions.think) != null ? _a : false,
|
||||||
|
- options: (_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : void 0
|
||||||
|
+ options: {
|
||||||
|
+ ...temperature !== void 0 && { temperature },
|
||||||
|
+ ...topP !== void 0 && { top_p: topP },
|
||||||
|
+ ...((_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : {})
|
||||||
|
+ }
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
diff --git a/dist/index.mjs b/dist/index.mjs
|
||||||
|
index e2a634a78d80ac9542f2cc4f96cf2291094b10cf..67b23efce3c1cf4f026693d3ff9246988a3ef26e 100644
|
||||||
|
--- a/dist/index.mjs
|
||||||
|
+++ b/dist/index.mjs
|
||||||
|
@@ -144,7 +144,7 @@ function getResponseMetadata({
|
||||||
|
|
||||||
|
// src/completion/ollama-completion-language-model.ts
|
||||||
|
var ollamaCompletionProviderOptions = z2.object({
|
||||||
|
- think: z2.boolean().optional(),
|
||||||
|
+ think: z2.union([z2.boolean(), z2.enum(['low', 'medium', 'high'])]).optional(),
|
||||||
|
user: z2.string().optional(),
|
||||||
|
suffix: z2.string().optional(),
|
||||||
|
echo: z2.boolean().optional()
|
||||||
|
@@ -662,7 +662,7 @@ function convertToOllamaChatMessages({
|
||||||
|
const images = content.filter((part) => part.type === "file" && part.mediaType.startsWith("image/")).map((part) => part.data);
|
||||||
|
messages.push({
|
||||||
|
role: "user",
|
||||||
|
- content: userText.length > 0 ? userText : [],
|
||||||
|
+ content: userText.length > 0 ? userText : '',
|
||||||
|
images: images.length > 0 ? images : void 0
|
||||||
|
});
|
||||||
|
break;
|
||||||
|
@@ -815,9 +815,11 @@ var ollamaProviderOptions = z4.object({
|
||||||
|
* the model's thinking from the model's output. When disabled, the model will not think
|
||||||
|
* and directly output the content.
|
||||||
|
*
|
||||||
|
+ * For gpt-oss models, you can also use 'low', 'medium', or 'high' to control the depth of thinking.
|
||||||
|
+ *
|
||||||
|
* Only supported by certain models like DeepSeek R1 and Qwen 3.
|
||||||
|
*/
|
||||||
|
- think: z4.boolean().optional(),
|
||||||
|
+ think: z4.union([z4.boolean(), z4.enum(['low', 'medium', 'high'])]).optional(),
|
||||||
|
options: z4.object({
|
||||||
|
num_ctx: z4.number().optional(),
|
||||||
|
repeat_last_n: z4.number().optional(),
|
||||||
|
@@ -931,14 +933,16 @@ var OllamaRequestBuilder = class {
|
||||||
|
prompt,
|
||||||
|
systemMessageMode: "system"
|
||||||
|
}),
|
||||||
|
- temperature,
|
||||||
|
- top_p: topP,
|
||||||
|
max_output_tokens: maxOutputTokens,
|
||||||
|
...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
|
||||||
|
format: responseFormat.schema != null ? responseFormat.schema : "json"
|
||||||
|
},
|
||||||
|
think: (_a = ollamaOptions == null ? void 0 : ollamaOptions.think) != null ? _a : false,
|
||||||
|
- options: (_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : void 0
|
||||||
|
+ options: {
|
||||||
|
+ ...temperature !== void 0 && { temperature },
|
||||||
|
+ ...topP !== void 0 && { top_p: topP },
|
||||||
|
+ ...((_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : {})
|
||||||
|
+ }
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
11
CLAUDE.md
11
CLAUDE.md
@ -28,7 +28,7 @@ When creating a Pull Request, you MUST:
|
|||||||
- **Development**: `yarn dev` - Runs Electron app in development mode with hot reload
|
- **Development**: `yarn dev` - Runs Electron app in development mode with hot reload
|
||||||
- **Debug**: `yarn debug` - Starts with debugging enabled, use `chrome://inspect` to attach debugger
|
- **Debug**: `yarn debug` - Starts with debugging enabled, use `chrome://inspect` to attach debugger
|
||||||
- **Build Check**: `yarn build:check` - **REQUIRED** before commits (lint + test + typecheck)
|
- **Build Check**: `yarn build:check` - **REQUIRED** before commits (lint + test + typecheck)
|
||||||
- If having i18n sort issues, run `yarn sync:i18n` first to sync template
|
- If having i18n sort issues, run `yarn i18n:sync` first to sync template
|
||||||
- If having formatting issues, run `yarn format` first
|
- If having formatting issues, run `yarn format` first
|
||||||
- **Test**: `yarn test` - Run all tests (Vitest) across main and renderer processes
|
- **Test**: `yarn test` - Run all tests (Vitest) across main and renderer processes
|
||||||
- **Single Test**:
|
- **Single Test**:
|
||||||
@ -40,20 +40,23 @@ When creating a Pull Request, you MUST:
|
|||||||
## Project Architecture
|
## Project Architecture
|
||||||
|
|
||||||
### Electron Structure
|
### Electron Structure
|
||||||
|
|
||||||
- **Main Process** (`src/main/`): Node.js backend with services (MCP, Knowledge, Storage, etc.)
|
- **Main Process** (`src/main/`): Node.js backend with services (MCP, Knowledge, Storage, etc.)
|
||||||
- **Renderer Process** (`src/renderer/`): React UI with Redux state management
|
- **Renderer Process** (`src/renderer/`): React UI with Redux state management
|
||||||
- **Preload Scripts** (`src/preload/`): Secure IPC bridge
|
- **Preload Scripts** (`src/preload/`): Secure IPC bridge
|
||||||
|
|
||||||
### Key Components
|
### Key Components
|
||||||
|
|
||||||
- **AI Core** (`src/renderer/src/aiCore/`): Middleware pipeline for multiple AI providers.
|
- **AI Core** (`src/renderer/src/aiCore/`): Middleware pipeline for multiple AI providers.
|
||||||
- **Services** (`src/main/services/`): MCPService, KnowledgeService, WindowService, etc.
|
- **Services** (`src/main/services/`): MCPService, KnowledgeService, WindowService, etc.
|
||||||
- **Build System**: Electron-Vite with experimental rolldown-vite, yarn workspaces.
|
- **Build System**: Electron-Vite with experimental rolldown-vite, yarn workspaces.
|
||||||
- **State Management**: Redux Toolkit (`src/renderer/src/store/`) for predictable state.
|
- **State Management**: Redux Toolkit (`src/renderer/src/store/`) for predictable state.
|
||||||
|
|
||||||
### Logging
|
### Logging
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
import { loggerService } from '@logger'
|
import { loggerService } from "@logger";
|
||||||
const logger = loggerService.withContext('moduleName')
|
const logger = loggerService.withContext("moduleName");
|
||||||
// Renderer: loggerService.initWindowSource('windowName') first
|
// Renderer: loggerService.initWindowSource('windowName') first
|
||||||
logger.info('message', CONTEXT)
|
logger.info("message", CONTEXT);
|
||||||
```
|
```
|
||||||
|
|||||||
@ -23,7 +23,7 @@
|
|||||||
},
|
},
|
||||||
"files": {
|
"files": {
|
||||||
"ignoreUnknown": false,
|
"ignoreUnknown": false,
|
||||||
"includes": ["**", "!**/.claude/**", "!**/.vscode/**"],
|
"includes": ["**", "!**/.claude/**", "!**/.vscode/**", "!**/.conductor/**"],
|
||||||
"maxSize": 2097152
|
"maxSize": 2097152
|
||||||
},
|
},
|
||||||
"formatter": {
|
"formatter": {
|
||||||
|
|||||||
@ -12,8 +12,13 @@
|
|||||||
|
|
||||||
; https://github.com/electron-userland/electron-builder/issues/1122
|
; https://github.com/electron-userland/electron-builder/issues/1122
|
||||||
!ifndef BUILD_UNINSTALLER
|
!ifndef BUILD_UNINSTALLER
|
||||||
|
; Check VC++ Redistributable based on architecture stored in $1
|
||||||
Function checkVCRedist
|
Function checkVCRedist
|
||||||
|
${If} $1 == "arm64"
|
||||||
|
ReadRegDWORD $0 HKLM "SOFTWARE\Microsoft\VisualStudio\14.0\VC\Runtimes\ARM64" "Installed"
|
||||||
|
${Else}
|
||||||
ReadRegDWORD $0 HKLM "SOFTWARE\Microsoft\VisualStudio\14.0\VC\Runtimes\x64" "Installed"
|
ReadRegDWORD $0 HKLM "SOFTWARE\Microsoft\VisualStudio\14.0\VC\Runtimes\x64" "Installed"
|
||||||
|
${EndIf}
|
||||||
FunctionEnd
|
FunctionEnd
|
||||||
|
|
||||||
Function checkArchitectureCompatibility
|
Function checkArchitectureCompatibility
|
||||||
@ -97,29 +102,47 @@
|
|||||||
|
|
||||||
Call checkVCRedist
|
Call checkVCRedist
|
||||||
${If} $0 != "1"
|
${If} $0 != "1"
|
||||||
MessageBox MB_YESNO "\
|
; VC++ is required - install automatically since declining would abort anyway
|
||||||
NOTE: ${PRODUCT_NAME} requires $\r$\n\
|
; Select download URL based on system architecture (stored in $1)
|
||||||
'Microsoft Visual C++ Redistributable'$\r$\n\
|
${If} $1 == "arm64"
|
||||||
to function properly.$\r$\n$\r$\n\
|
StrCpy $2 "https://aka.ms/vs/17/release/vc_redist.arm64.exe"
|
||||||
Download and install now?" /SD IDYES IDYES InstallVCRedist IDNO DontInstall
|
StrCpy $3 "$TEMP\vc_redist.arm64.exe"
|
||||||
InstallVCRedist:
|
${Else}
|
||||||
inetc::get /CAPTION " " /BANNER "Downloading Microsoft Visual C++ Redistributable..." "https://aka.ms/vs/17/release/vc_redist.x64.exe" "$TEMP\vc_redist.x64.exe"
|
StrCpy $2 "https://aka.ms/vs/17/release/vc_redist.x64.exe"
|
||||||
ExecWait "$TEMP\vc_redist.x64.exe /install /norestart"
|
StrCpy $3 "$TEMP\vc_redist.x64.exe"
|
||||||
;IfErrors InstallError ContinueInstall ; vc_redist exit code is unreliable :(
|
|
||||||
Call checkVCRedist
|
|
||||||
${If} $0 == "1"
|
|
||||||
Goto ContinueInstall
|
|
||||||
${EndIf}
|
${EndIf}
|
||||||
|
|
||||||
;InstallError:
|
inetc::get /CAPTION " " /BANNER "Downloading Microsoft Visual C++ Redistributable..." \
|
||||||
MessageBox MB_ICONSTOP "\
|
$2 $3 /END
|
||||||
There was an unexpected error installing$\r$\n\
|
Pop $0 ; Get download status from inetc::get
|
||||||
Microsoft Visual C++ Redistributable.$\r$\n\
|
${If} $0 != "OK"
|
||||||
The installation of ${PRODUCT_NAME} cannot continue."
|
MessageBox MB_ICONSTOP|MB_YESNO "\
|
||||||
DontInstall:
|
Failed to download Microsoft Visual C++ Redistributable.$\r$\n$\r$\n\
|
||||||
|
Error: $0$\r$\n$\r$\n\
|
||||||
|
Would you like to open the download page in your browser?$\r$\n\
|
||||||
|
$2" IDYES openDownloadUrl IDNO skipDownloadUrl
|
||||||
|
openDownloadUrl:
|
||||||
|
ExecShell "open" $2
|
||||||
|
skipDownloadUrl:
|
||||||
Abort
|
Abort
|
||||||
${EndIf}
|
${EndIf}
|
||||||
ContinueInstall:
|
|
||||||
|
ExecWait "$3 /install /quiet /norestart"
|
||||||
|
; Note: vc_redist exit code is unreliable, verify via registry check instead
|
||||||
|
|
||||||
|
Call checkVCRedist
|
||||||
|
${If} $0 != "1"
|
||||||
|
MessageBox MB_ICONSTOP|MB_YESNO "\
|
||||||
|
Microsoft Visual C++ Redistributable installation failed.$\r$\n$\r$\n\
|
||||||
|
Would you like to open the download page in your browser?$\r$\n\
|
||||||
|
$2$\r$\n$\r$\n\
|
||||||
|
The installation of ${PRODUCT_NAME} cannot continue." IDYES openInstallUrl IDNO skipInstallUrl
|
||||||
|
openInstallUrl:
|
||||||
|
ExecShell "open" $2
|
||||||
|
skipInstallUrl:
|
||||||
|
Abort
|
||||||
|
${EndIf}
|
||||||
|
${EndIf}
|
||||||
Pop $4
|
Pop $4
|
||||||
Pop $3
|
Pop $3
|
||||||
Pop $2
|
Pop $2
|
||||||
|
|||||||
@ -71,7 +71,7 @@ Tools like i18n Ally cannot parse dynamic content within template strings, resul
|
|||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
// Not recommended - Plugin cannot resolve
|
// Not recommended - Plugin cannot resolve
|
||||||
const message = t(`fruits.${fruit}`)
|
const message = t(`fruits.${fruit}`);
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 2. **No Real-time Rendering in Editor**
|
#### 2. **No Real-time Rendering in Editor**
|
||||||
@ -91,14 +91,14 @@ For example:
|
|||||||
```ts
|
```ts
|
||||||
// src/renderer/src/i18n/label.ts
|
// src/renderer/src/i18n/label.ts
|
||||||
const themeModeKeyMap = {
|
const themeModeKeyMap = {
|
||||||
dark: 'settings.theme.dark',
|
dark: "settings.theme.dark",
|
||||||
light: 'settings.theme.light',
|
light: "settings.theme.light",
|
||||||
system: 'settings.theme.system'
|
system: "settings.theme.system",
|
||||||
} as const
|
} as const;
|
||||||
|
|
||||||
export const getThemeModeLabel = (key: string): string => {
|
export const getThemeModeLabel = (key: string): string => {
|
||||||
return themeModeKeyMap[key] ? t(themeModeKeyMap[key]) : key
|
return themeModeKeyMap[key] ? t(themeModeKeyMap[key]) : key;
|
||||||
}
|
};
|
||||||
```
|
```
|
||||||
|
|
||||||
By avoiding template strings, you gain better developer experience, more reliable translation checks, and a more maintainable codebase.
|
By avoiding template strings, you gain better developer experience, more reliable translation checks, and a more maintainable codebase.
|
||||||
@ -107,7 +107,7 @@ By avoiding template strings, you gain better developer experience, more reliabl
|
|||||||
|
|
||||||
The project includes several scripts to automate i18n-related tasks:
|
The project includes several scripts to automate i18n-related tasks:
|
||||||
|
|
||||||
### `check:i18n` - Validate i18n Structure
|
### `i18n:check` - Validate i18n Structure
|
||||||
|
|
||||||
This script checks:
|
This script checks:
|
||||||
|
|
||||||
@ -116,10 +116,10 @@ This script checks:
|
|||||||
- Whether keys are properly sorted
|
- Whether keys are properly sorted
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
yarn check:i18n
|
yarn i18n:check
|
||||||
```
|
```
|
||||||
|
|
||||||
### `sync:i18n` - Synchronize JSON Structure and Sort Order
|
### `i18n:sync` - Synchronize JSON Structure and Sort Order
|
||||||
|
|
||||||
This script uses `zh-cn.json` as the source of truth to sync structure across all language files, including:
|
This script uses `zh-cn.json` as the source of truth to sync structure across all language files, including:
|
||||||
|
|
||||||
@ -128,14 +128,14 @@ This script uses `zh-cn.json` as the source of truth to sync structure across al
|
|||||||
3. Sorting keys automatically
|
3. Sorting keys automatically
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
yarn sync:i18n
|
yarn i18n:sync
|
||||||
```
|
```
|
||||||
|
|
||||||
### `auto:i18n` - Automatically Translate Pending Texts
|
### `i18n:translate` - Automatically Translate Pending Texts
|
||||||
|
|
||||||
This script fills in texts marked as `[to be translated]` using machine translation.
|
This script fills in texts marked as `[to be translated]` using machine translation.
|
||||||
|
|
||||||
Typically, after adding new texts in `zh-cn.json`, run `sync:i18n`, then `auto:i18n` to complete translations.
|
Typically, after adding new texts in `zh-cn.json`, run `i18n:sync`, then `i18n:translate` to complete translations.
|
||||||
|
|
||||||
Before using this script, set the required environment variables:
|
Before using this script, set the required environment variables:
|
||||||
|
|
||||||
@ -148,30 +148,20 @@ MODEL="qwen-plus-latest"
|
|||||||
Alternatively, add these variables directly to your `.env` file.
|
Alternatively, add these variables directly to your `.env` file.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
yarn auto:i18n
|
yarn i18n:translate
|
||||||
```
|
|
||||||
|
|
||||||
### `update:i18n` - Object-level Translation Update
|
|
||||||
|
|
||||||
Updates translations in language files under `src/renderer/src/i18n/translate` at the object level, preserving existing translations and only updating new content.
|
|
||||||
|
|
||||||
**Not recommended** — prefer `auto:i18n` for translation tasks.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
yarn update:i18n
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Workflow
|
### Workflow
|
||||||
|
|
||||||
1. During development, first add the required text in `zh-cn.json`
|
1. During development, first add the required text in `zh-cn.json`
|
||||||
2. Confirm it displays correctly in the Chinese environment
|
2. Confirm it displays correctly in the Chinese environment
|
||||||
3. Run `yarn sync:i18n` to propagate the keys to other language files
|
3. Run `yarn i18n:sync` to propagate the keys to other language files
|
||||||
4. Run `yarn auto:i18n` to perform machine translation
|
4. Run `yarn i18n:translate` to perform machine translation
|
||||||
5. Grab a coffee and let the magic happen!
|
5. Grab a coffee and let the magic happen!
|
||||||
|
|
||||||
## Best Practices
|
## Best Practices
|
||||||
|
|
||||||
1. **Use Chinese as Source Language**: All development starts in Chinese, then translates to other languages.
|
1. **Use Chinese as Source Language**: All development starts in Chinese, then translates to other languages.
|
||||||
2. **Run Check Script Before Commit**: Use `yarn check:i18n` to catch i18n issues early.
|
2. **Run Check Script Before Commit**: Use `yarn i18n:check` to catch i18n issues early.
|
||||||
3. **Translate in Small Increments**: Avoid accumulating a large backlog of untranslated content.
|
3. **Translate in Small Increments**: Avoid accumulating a large backlog of untranslated content.
|
||||||
4. **Keep Keys Semantically Clear**: Keys should clearly express their purpose, e.g., `user.profile.avatar.upload.error`
|
4. **Keep Keys Semantically Clear**: Keys should clearly express their purpose, e.g., `user.profile.avatar.upload.error`
|
||||||
|
|||||||
@ -1,17 +1,17 @@
|
|||||||
# 如何优雅地做好 i18n
|
# 如何优雅地做好 i18n
|
||||||
|
|
||||||
## 使用i18n ally插件提升开发体验
|
## 使用 i18n ally 插件提升开发体验
|
||||||
|
|
||||||
i18n ally是一个强大的VSCode插件,它能在开发阶段提供实时反馈,帮助开发者更早发现文案缺失和错译问题。
|
i18n ally 是一个强大的 VSCode 插件,它能在开发阶段提供实时反馈,帮助开发者更早发现文案缺失和错译问题。
|
||||||
|
|
||||||
项目中已经配置好了插件设置,直接安装即可。
|
项目中已经配置好了插件设置,直接安装即可。
|
||||||
|
|
||||||
### 开发时优势
|
### 开发时优势
|
||||||
|
|
||||||
- **实时预览**:翻译文案会直接显示在编辑器中
|
- **实时预览**:翻译文案会直接显示在编辑器中
|
||||||
- **错误检测**:自动追踪标记出缺失的翻译或未使用的key
|
- **错误检测**:自动追踪标记出缺失的翻译或未使用的 key
|
||||||
- **快速跳转**:可通过key直接跳转到定义处(Ctrl/Cmd + click)
|
- **快速跳转**:可通过 key 直接跳转到定义处(Ctrl/Cmd + click)
|
||||||
- **自动补全**:输入i18n key时提供自动补全建议
|
- **自动补全**:输入 i18n key 时提供自动补全建议
|
||||||
|
|
||||||
### 效果展示
|
### 效果展示
|
||||||
|
|
||||||
@ -23,9 +23,9 @@ i18n ally是一个强大的VSCode插件,它能在开发阶段提供实时反
|
|||||||
|
|
||||||
## i18n 约定
|
## i18n 约定
|
||||||
|
|
||||||
### **绝对避免使用flat格式**
|
### **绝对避免使用 flat 格式**
|
||||||
|
|
||||||
绝对避免使用flat格式,如`"add.button.tip": "添加"`。应采用清晰的嵌套结构:
|
绝对避免使用 flat 格式,如`"add.button.tip": "添加"`。应采用清晰的嵌套结构:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
// 错误示例 - flat结构
|
// 错误示例 - flat结构
|
||||||
@ -52,14 +52,14 @@ i18n ally是一个强大的VSCode插件,它能在开发阶段提供实时反
|
|||||||
#### 为什么要使用嵌套结构
|
#### 为什么要使用嵌套结构
|
||||||
|
|
||||||
1. **自然分组**:通过对象结构天然能将相关上下文的文案分到一个组别中
|
1. **自然分组**:通过对象结构天然能将相关上下文的文案分到一个组别中
|
||||||
2. **插件要求**:i18n ally 插件需要嵌套或flat格式其一的文件才能正常分析
|
2. **插件要求**:i18n ally 插件需要嵌套或 flat 格式其一的文件才能正常分析
|
||||||
|
|
||||||
### **避免在`t()`中使用模板字符串**
|
### **避免在`t()`中使用模板字符串**
|
||||||
|
|
||||||
**强烈建议避免使用模板字符串**进行动态插值。虽然模板字符串在JavaScript开发中非常方便,但在国际化场景下会带来一系列问题。
|
**强烈建议避免使用模板字符串**进行动态插值。虽然模板字符串在 JavaScript 开发中非常方便,但在国际化场景下会带来一系列问题。
|
||||||
|
|
||||||
1. **插件无法跟踪**
|
1. **插件无法跟踪**
|
||||||
i18n ally等工具无法解析模板字符串中的动态内容,导致:
|
i18n ally 等工具无法解析模板字符串中的动态内容,导致:
|
||||||
|
|
||||||
- 无法正确显示实时预览
|
- 无法正确显示实时预览
|
||||||
- 无法检测翻译缺失
|
- 无法检测翻译缺失
|
||||||
@ -67,11 +67,11 @@ i18n ally是一个强大的VSCode插件,它能在开发阶段提供实时反
|
|||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
// 不推荐 - 插件无法解析
|
// 不推荐 - 插件无法解析
|
||||||
const message = t(`fruits.${fruit}`)
|
const message = t(`fruits.${fruit}`);
|
||||||
```
|
```
|
||||||
|
|
||||||
2. **编辑器无法实时渲染**
|
2. **编辑器无法实时渲染**
|
||||||
在IDE中,模板字符串会显示为原始代码而非最终翻译结果,降低了开发体验。
|
在 IDE 中,模板字符串会显示为原始代码而非最终翻译结果,降低了开发体验。
|
||||||
|
|
||||||
3. **更难以维护**
|
3. **更难以维护**
|
||||||
由于插件无法跟踪这样的文案,编辑器中也无法渲染,开发者必须人工确认语言文件中是否存在相应的文案。
|
由于插件无法跟踪这样的文案,编辑器中也无法渲染,开发者必须人工确认语言文件中是否存在相应的文案。
|
||||||
@ -85,36 +85,36 @@ i18n ally是一个强大的VSCode插件,它能在开发阶段提供实时反
|
|||||||
```ts
|
```ts
|
||||||
// src/renderer/src/i18n/label.ts
|
// src/renderer/src/i18n/label.ts
|
||||||
const themeModeKeyMap = {
|
const themeModeKeyMap = {
|
||||||
dark: 'settings.theme.dark',
|
dark: "settings.theme.dark",
|
||||||
light: 'settings.theme.light',
|
light: "settings.theme.light",
|
||||||
system: 'settings.theme.system'
|
system: "settings.theme.system",
|
||||||
} as const
|
} as const;
|
||||||
|
|
||||||
export const getThemeModeLabel = (key: string): string => {
|
export const getThemeModeLabel = (key: string): string => {
|
||||||
return themeModeKeyMap[key] ? t(themeModeKeyMap[key]) : key
|
return themeModeKeyMap[key] ? t(themeModeKeyMap[key]) : key;
|
||||||
}
|
};
|
||||||
```
|
```
|
||||||
|
|
||||||
通过避免模板字符串,可以获得更好的开发体验、更可靠的翻译检查以及更易维护的代码库。
|
通过避免模板字符串,可以获得更好的开发体验、更可靠的翻译检查以及更易维护的代码库。
|
||||||
|
|
||||||
## 自动化脚本
|
## 自动化脚本
|
||||||
|
|
||||||
项目中有一系列脚本来自动化i18n相关任务:
|
项目中有一系列脚本来自动化 i18n 相关任务:
|
||||||
|
|
||||||
### `check:i18n` - 检查i18n结构
|
### `i18n:check` - 检查 i18n 结构
|
||||||
|
|
||||||
此脚本会检查:
|
此脚本会检查:
|
||||||
|
|
||||||
- 所有语言文件是否为嵌套结构
|
- 所有语言文件是否为嵌套结构
|
||||||
- 是否存在缺失的key
|
- 是否存在缺失的 key
|
||||||
- 是否存在多余的key
|
- 是否存在多余的 key
|
||||||
- 是否已经有序
|
- 是否已经有序
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
yarn check:i18n
|
yarn i18n:check
|
||||||
```
|
```
|
||||||
|
|
||||||
### `sync:i18n` - 同步json结构与排序
|
### `i18n:sync` - 同步 json 结构与排序
|
||||||
|
|
||||||
此脚本以`zh-cn.json`文件为基准,将结构同步到其他语言文件,包括:
|
此脚本以`zh-cn.json`文件为基准,将结构同步到其他语言文件,包括:
|
||||||
|
|
||||||
@ -123,14 +123,14 @@ yarn check:i18n
|
|||||||
3. 自动排序
|
3. 自动排序
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
yarn sync:i18n
|
yarn i18n:sync
|
||||||
```
|
```
|
||||||
|
|
||||||
### `auto:i18n` - 自动翻译待翻译文本
|
### `i18n:translate` - 自动翻译待翻译文本
|
||||||
|
|
||||||
次脚本自动将标记为待翻译的文本通过机器翻译填充。
|
次脚本自动将标记为待翻译的文本通过机器翻译填充。
|
||||||
|
|
||||||
通常,在`zh-cn.json`中添加所需文案后,执行`sync:i18n`即可自动完成翻译。
|
通常,在`zh-cn.json`中添加所需文案后,执行`i18n:sync`即可自动完成翻译。
|
||||||
|
|
||||||
使用该脚本前,需要配置环境变量,例如:
|
使用该脚本前,需要配置环境变量,例如:
|
||||||
|
|
||||||
@ -143,29 +143,19 @@ MODEL="qwen-plus-latest"
|
|||||||
你也可以通过直接编辑`.env`文件来添加环境变量。
|
你也可以通过直接编辑`.env`文件来添加环境变量。
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
yarn auto:i18n
|
yarn i18n:translate
|
||||||
```
|
|
||||||
|
|
||||||
### `update:i18n` - 对象级别翻译更新
|
|
||||||
|
|
||||||
对`src/renderer/src/i18n/translate`中的语言文件进行对象级别的翻译更新,保留已有翻译,只更新新增内容。
|
|
||||||
|
|
||||||
**不建议**使用该脚本,更推荐使用`auto:i18n`进行翻译。
|
|
||||||
|
|
||||||
```bash
|
|
||||||
yarn update:i18n
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### 工作流
|
### 工作流
|
||||||
|
|
||||||
1. 开发阶段,先在`zh-cn.json`中添加所需文案
|
1. 开发阶段,先在`zh-cn.json`中添加所需文案
|
||||||
2. 确认在中文环境下显示无误后,使用`yarn sync:i18n`将文案同步到其他语言文件
|
2. 确认在中文环境下显示无误后,使用`yarn i18n:sync`将文案同步到其他语言文件
|
||||||
3. 使用`yarn auto:i18n`进行自动翻译
|
3. 使用`yarn i18n:translate`进行自动翻译
|
||||||
4. 喝杯咖啡,等翻译完成吧!
|
4. 喝杯咖啡,等翻译完成吧!
|
||||||
|
|
||||||
## 最佳实践
|
## 最佳实践
|
||||||
|
|
||||||
1. **以中文为源语言**:所有开发首先使用中文,再翻译为其他语言
|
1. **以中文为源语言**:所有开发首先使用中文,再翻译为其他语言
|
||||||
2. **提交前运行检查脚本**:使用`yarn check:i18n`检查i18n是否有问题
|
2. **提交前运行检查脚本**:使用`yarn i18n:check`检查 i18n 是否有问题
|
||||||
3. **小步提交翻译**:避免积累大量未翻译文本
|
3. **小步提交翻译**:避免积累大量未翻译文本
|
||||||
4. **保持key语义明确**:key应能清晰表达其用途,如`user.profile.avatar.upload.error`
|
4. **保持 key 语义明确**:key 应能清晰表达其用途,如`user.profile.avatar.upload.error`
|
||||||
|
|||||||
@ -134,60 +134,38 @@ artifactBuildCompleted: scripts/artifact-build-completed.js
|
|||||||
releaseInfo:
|
releaseInfo:
|
||||||
releaseNotes: |
|
releaseNotes: |
|
||||||
<!--LANG:en-->
|
<!--LANG:en-->
|
||||||
Cherry Studio 1.7.3 - Feature & Stability Update
|
Cherry Studio 1.7.6 - New Models & MCP Enhancements
|
||||||
|
|
||||||
This release brings new features, UI improvements, and important bug fixes.
|
This release adds support for new AI models and includes a new MCP server for memory management.
|
||||||
|
|
||||||
✨ New Features
|
✨ New Features
|
||||||
- Add MCP server log viewer for better debugging
|
- [Models] Add support for Xiaomi MiMo model
|
||||||
- Support custom Git Bash path configuration
|
- [Models] Add support for Gemini 3 Flash and Pro model detection
|
||||||
- Add print to PDF and save as HTML for mini program webviews
|
- [Models] Add support for Volcengine Doubao-Seed-1.8 model
|
||||||
- Add CherryIN API host selection settings
|
- [MCP] Add Nowledge Mem builtin MCP server for memory management
|
||||||
- Enhance assistant presets with sort and batch delete modes
|
- [Settings] Add default reasoning effort option to resolve confusion between undefined and none
|
||||||
- Open URL directly for SelectionAssistant search action
|
|
||||||
- Enhance web search tool switching with provider-specific context
|
|
||||||
|
|
||||||
🔧 Improvements
|
|
||||||
- Remove Intel Ultra limit for OVMS
|
|
||||||
- Improve settings tab and assistant item UI
|
|
||||||
|
|
||||||
🐛 Bug Fixes
|
🐛 Bug Fixes
|
||||||
- Fix stack overflow with base64 images
|
- [Azure] Restore deployment-based URLs for non-v1 apiVersion
|
||||||
- Fix infinite loop in knowledge queue processing
|
- [Translation] Disable reasoning mode for translation to improve efficiency
|
||||||
- Fix quick panel closing in multiple selection mode
|
- [Image] Update API path for image generation requests in OpenAIBaseClient
|
||||||
- Fix thinking timer not stopping when reply is aborted
|
- [Windows] Auto-discover and persist Git Bash path on Windows for scoop users
|
||||||
- Fix ThinkingButton icon display for fixed reasoning mode
|
|
||||||
- Fix knowledge query prioritization and intent prompt
|
|
||||||
- Fix OpenRouter embeddings support
|
|
||||||
- Fix SelectionAction window resize on Windows
|
|
||||||
- Add gpustack provider support for qwen3 thinking mode
|
|
||||||
|
|
||||||
<!--LANG:zh-CN-->
|
<!--LANG:zh-CN-->
|
||||||
Cherry Studio 1.7.3 - 功能与稳定性更新
|
Cherry Studio 1.7.6 - 新模型与 MCP 增强
|
||||||
|
|
||||||
本次更新带来新功能、界面改进和重要的问题修复。
|
本次更新添加了多个新 AI 模型支持,并新增记忆管理 MCP 服务器。
|
||||||
|
|
||||||
✨ 新功能
|
✨ 新功能
|
||||||
- 新增 MCP 服务器日志查看器,便于调试
|
- [模型] 添加小米 MiMo 模型支持
|
||||||
- 支持自定义 Git Bash 路径配置
|
- [模型] 添加 Gemini 3 Flash 和 Pro 模型检测支持
|
||||||
- 小程序 webview 支持打印 PDF 和保存为 HTML
|
- [模型] 添加火山引擎 Doubao-Seed-1.8 模型支持
|
||||||
- 新增 CherryIN API 主机选择设置
|
- [MCP] 新增 Nowledge Mem 内置 MCP 服务器,用于记忆管理
|
||||||
- 助手预设增强:支持排序和批量删除模式
|
- [设置] 添加默认推理强度选项,解决 undefined 和 none 之间的混淆
|
||||||
- 划词助手搜索操作直接打开 URL
|
|
||||||
- 增强网页搜索工具切换逻辑,支持服务商特定上下文
|
|
||||||
|
|
||||||
🔧 功能改进
|
|
||||||
- 移除 OVMS 的 Intel Ultra 限制
|
|
||||||
- 优化设置标签页和助手项目 UI
|
|
||||||
|
|
||||||
🐛 问题修复
|
🐛 问题修复
|
||||||
- 修复 base64 图片导致的栈溢出问题
|
- [Azure] 修复非 v1 apiVersion 的部署 URL 问题
|
||||||
- 修复知识库队列处理的无限循环问题
|
- [翻译] 禁用翻译时的推理模式以提高效率
|
||||||
- 修复多选模式下快捷面板意外关闭的问题
|
- [图像] 更新 OpenAIBaseClient 中图像生成请求的 API 路径
|
||||||
- 修复回复中止时思考计时器未停止的问题
|
- [Windows] 自动发现并保存 Windows scoop 用户的 Git Bash 路径
|
||||||
- 修复固定推理模式下思考按钮图标显示问题
|
|
||||||
- 修复知识库查询优先级和意图提示
|
|
||||||
- 修复 OpenRouter 嵌入模型支持
|
|
||||||
- 修复 Windows 上划词助手窗口大小调整问题
|
|
||||||
- 为 gpustack 服务商添加 qwen3 思考模式支持
|
|
||||||
<!--LANG:END-->
|
<!--LANG:END-->
|
||||||
|
|||||||
@ -61,6 +61,7 @@ export default defineConfig([
|
|||||||
'tests/**',
|
'tests/**',
|
||||||
'.yarn/**',
|
'.yarn/**',
|
||||||
'.gitignore',
|
'.gitignore',
|
||||||
|
'.conductor/**',
|
||||||
'scripts/cloudflare-worker.js',
|
'scripts/cloudflare-worker.js',
|
||||||
'src/main/integration/nutstore/sso/lib/**',
|
'src/main/integration/nutstore/sso/lib/**',
|
||||||
'src/main/integration/cherryai/index.js',
|
'src/main/integration/cherryai/index.js',
|
||||||
|
|||||||
27
package.json
27
package.json
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "CherryStudio",
|
"name": "CherryStudio",
|
||||||
"version": "1.7.3",
|
"version": "1.7.6",
|
||||||
"private": true,
|
"private": true,
|
||||||
"description": "A powerful AI assistant for producer.",
|
"description": "A powerful AI assistant for producer.",
|
||||||
"main": "./out/main/index.js",
|
"main": "./out/main/index.js",
|
||||||
@ -53,10 +53,10 @@
|
|||||||
"typecheck": "concurrently -n \"node,web\" -c \"cyan,magenta\" \"npm run typecheck:node\" \"npm run typecheck:web\"",
|
"typecheck": "concurrently -n \"node,web\" -c \"cyan,magenta\" \"npm run typecheck:node\" \"npm run typecheck:web\"",
|
||||||
"typecheck:node": "tsgo --noEmit -p tsconfig.node.json --composite false",
|
"typecheck:node": "tsgo --noEmit -p tsconfig.node.json --composite false",
|
||||||
"typecheck:web": "tsgo --noEmit -p tsconfig.web.json --composite false",
|
"typecheck:web": "tsgo --noEmit -p tsconfig.web.json --composite false",
|
||||||
"check:i18n": "dotenv -e .env -- tsx scripts/check-i18n.ts",
|
"i18n:check": "dotenv -e .env -- tsx scripts/check-i18n.ts",
|
||||||
"sync:i18n": "dotenv -e .env -- tsx scripts/sync-i18n.ts",
|
"i18n:sync": "dotenv -e .env -- tsx scripts/sync-i18n.ts",
|
||||||
"update:i18n": "dotenv -e .env -- tsx scripts/update-i18n.ts",
|
"i18n:translate": "dotenv -e .env -- tsx scripts/auto-translate-i18n.ts",
|
||||||
"auto:i18n": "dotenv -e .env -- tsx scripts/auto-translate-i18n.ts",
|
"i18n:all": "yarn i18n:check && yarn i18n:sync && yarn i18n:translate",
|
||||||
"update:languages": "tsx scripts/update-languages.ts",
|
"update:languages": "tsx scripts/update-languages.ts",
|
||||||
"update:upgrade-config": "tsx scripts/update-app-upgrade-config.ts",
|
"update:upgrade-config": "tsx scripts/update-app-upgrade-config.ts",
|
||||||
"test": "vitest run --silent",
|
"test": "vitest run --silent",
|
||||||
@ -70,7 +70,7 @@
|
|||||||
"test:e2e": "yarn playwright test",
|
"test:e2e": "yarn playwright test",
|
||||||
"test:lint": "oxlint --deny-warnings && eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --cache",
|
"test:lint": "oxlint --deny-warnings && eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --cache",
|
||||||
"test:scripts": "vitest scripts",
|
"test:scripts": "vitest scripts",
|
||||||
"lint": "oxlint --fix && eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --fix --cache && yarn typecheck && yarn check:i18n && yarn format:check",
|
"lint": "oxlint --fix && eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --fix --cache && yarn typecheck && yarn i18n:check && yarn format:check",
|
||||||
"format": "biome format --write && biome lint --write",
|
"format": "biome format --write && biome lint --write",
|
||||||
"format:check": "biome format && biome lint",
|
"format:check": "biome format && biome lint",
|
||||||
"prepare": "git config blame.ignoreRevsFile .git-blame-ignore-revs && husky",
|
"prepare": "git config blame.ignoreRevsFile .git-blame-ignore-revs && husky",
|
||||||
@ -114,11 +114,11 @@
|
|||||||
"@ai-sdk/anthropic": "^2.0.49",
|
"@ai-sdk/anthropic": "^2.0.49",
|
||||||
"@ai-sdk/cerebras": "^1.0.31",
|
"@ai-sdk/cerebras": "^1.0.31",
|
||||||
"@ai-sdk/gateway": "^2.0.15",
|
"@ai-sdk/gateway": "^2.0.15",
|
||||||
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.43#~/.yarn/patches/@ai-sdk-google-npm-2.0.43-689ed559b3.patch",
|
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.49#~/.yarn/patches/@ai-sdk-google-npm-2.0.49-84720f41bd.patch",
|
||||||
"@ai-sdk/google-vertex": "^3.0.79",
|
"@ai-sdk/google-vertex": "^3.0.94",
|
||||||
"@ai-sdk/huggingface": "^0.0.10",
|
"@ai-sdk/huggingface": "^0.0.10",
|
||||||
"@ai-sdk/mistral": "^2.0.24",
|
"@ai-sdk/mistral": "^2.0.24",
|
||||||
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch",
|
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch",
|
||||||
"@ai-sdk/perplexity": "^2.0.20",
|
"@ai-sdk/perplexity": "^2.0.20",
|
||||||
"@ai-sdk/test-server": "^0.0.1",
|
"@ai-sdk/test-server": "^0.0.1",
|
||||||
"@ant-design/v5-patch-for-react-19": "^1.0.3",
|
"@ant-design/v5-patch-for-react-19": "^1.0.3",
|
||||||
@ -142,7 +142,7 @@
|
|||||||
"@cherrystudio/embedjs-ollama": "^0.1.31",
|
"@cherrystudio/embedjs-ollama": "^0.1.31",
|
||||||
"@cherrystudio/embedjs-openai": "^0.1.31",
|
"@cherrystudio/embedjs-openai": "^0.1.31",
|
||||||
"@cherrystudio/extension-table-plus": "workspace:^",
|
"@cherrystudio/extension-table-plus": "workspace:^",
|
||||||
"@cherrystudio/openai": "^6.9.0",
|
"@cherrystudio/openai": "^6.12.0",
|
||||||
"@dnd-kit/core": "^6.3.1",
|
"@dnd-kit/core": "^6.3.1",
|
||||||
"@dnd-kit/modifiers": "^9.0.0",
|
"@dnd-kit/modifiers": "^9.0.0",
|
||||||
"@dnd-kit/sortable": "^10.0.0",
|
"@dnd-kit/sortable": "^10.0.0",
|
||||||
@ -318,7 +318,7 @@
|
|||||||
"motion": "^12.10.5",
|
"motion": "^12.10.5",
|
||||||
"notion-helper": "^1.3.22",
|
"notion-helper": "^1.3.22",
|
||||||
"npx-scope-finder": "^1.2.0",
|
"npx-scope-finder": "^1.2.0",
|
||||||
"ollama-ai-provider-v2": "^1.5.5",
|
"ollama-ai-provider-v2": "patch:ollama-ai-provider-v2@npm%3A1.5.5#~/.yarn/patches/ollama-ai-provider-v2-npm-1.5.5-8bef249af9.patch",
|
||||||
"oxlint": "^1.22.0",
|
"oxlint": "^1.22.0",
|
||||||
"oxlint-tsgolint": "^0.2.0",
|
"oxlint-tsgolint": "^0.2.0",
|
||||||
"p-queue": "^8.1.0",
|
"p-queue": "^8.1.0",
|
||||||
@ -414,9 +414,10 @@
|
|||||||
"@langchain/openai@npm:>=0.1.0 <0.6.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
"@langchain/openai@npm:>=0.1.0 <0.6.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||||
"@langchain/openai@npm:^0.3.16": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
"@langchain/openai@npm:^0.3.16": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||||
"@langchain/openai@npm:>=0.2.0 <0.7.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
"@langchain/openai@npm:>=0.2.0 <0.7.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||||
"@ai-sdk/openai@npm:^2.0.42": "patch:@ai-sdk/openai@npm%3A2.0.72#~/.yarn/patches/@ai-sdk-openai-npm-2.0.72-234e68da87.patch",
|
"@ai-sdk/openai@npm:^2.0.42": "patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch",
|
||||||
"@ai-sdk/google@npm:^2.0.40": "patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch",
|
"@ai-sdk/google@npm:^2.0.40": "patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch",
|
||||||
"@ai-sdk/openai-compatible@npm:^1.0.27": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch"
|
"@ai-sdk/openai-compatible@npm:^1.0.27": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch",
|
||||||
|
"@ai-sdk/google@npm:2.0.49": "patch:@ai-sdk/google@npm%3A2.0.49#~/.yarn/patches/@ai-sdk-google-npm-2.0.49-84720f41bd.patch"
|
||||||
},
|
},
|
||||||
"packageManager": "yarn@4.9.1",
|
"packageManager": "yarn@4.9.1",
|
||||||
"lint-staged": {
|
"lint-staged": {
|
||||||
|
|||||||
@ -40,7 +40,7 @@
|
|||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@ai-sdk/anthropic": "^2.0.49",
|
"@ai-sdk/anthropic": "^2.0.49",
|
||||||
"@ai-sdk/azure": "^2.0.74",
|
"@ai-sdk/azure": "^2.0.87",
|
||||||
"@ai-sdk/deepseek": "^1.0.31",
|
"@ai-sdk/deepseek": "^1.0.31",
|
||||||
"@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch",
|
"@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch",
|
||||||
"@ai-sdk/provider": "^2.0.0",
|
"@ai-sdk/provider": "^2.0.0",
|
||||||
|
|||||||
@ -62,7 +62,7 @@ export class StreamEventManager {
|
|||||||
const recursiveResult = await context.recursiveCall(recursiveParams)
|
const recursiveResult = await context.recursiveCall(recursiveParams)
|
||||||
|
|
||||||
if (recursiveResult && recursiveResult.fullStream) {
|
if (recursiveResult && recursiveResult.fullStream) {
|
||||||
await this.pipeRecursiveStream(controller, recursiveResult.fullStream, context)
|
await this.pipeRecursiveStream(controller, recursiveResult.fullStream)
|
||||||
} else {
|
} else {
|
||||||
console.warn('[MCP Prompt] No fullstream found in recursive result:', recursiveResult)
|
console.warn('[MCP Prompt] No fullstream found in recursive result:', recursiveResult)
|
||||||
}
|
}
|
||||||
@ -74,11 +74,7 @@ export class StreamEventManager {
|
|||||||
/**
|
/**
|
||||||
* 将递归流的数据传递到当前流
|
* 将递归流的数据传递到当前流
|
||||||
*/
|
*/
|
||||||
private async pipeRecursiveStream(
|
private async pipeRecursiveStream(controller: StreamController, recursiveStream: ReadableStream): Promise<void> {
|
||||||
controller: StreamController,
|
|
||||||
recursiveStream: ReadableStream,
|
|
||||||
context?: AiRequestContext
|
|
||||||
): Promise<void> {
|
|
||||||
const reader = recursiveStream.getReader()
|
const reader = recursiveStream.getReader()
|
||||||
try {
|
try {
|
||||||
while (true) {
|
while (true) {
|
||||||
@ -86,18 +82,14 @@ export class StreamEventManager {
|
|||||||
if (done) {
|
if (done) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if (value.type === 'finish') {
|
if (value.type === 'start') {
|
||||||
// 迭代的流不发finish,但需要累加其 usage
|
continue
|
||||||
if (value.usage && context?.accumulatedUsage) {
|
|
||||||
this.accumulateUsage(context.accumulatedUsage, value.usage)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (value.type === 'finish') {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
// 对于 finish-step 类型,累加其 usage
|
|
||||||
if (value.type === 'finish-step' && value.usage && context?.accumulatedUsage) {
|
|
||||||
this.accumulateUsage(context.accumulatedUsage, value.usage)
|
|
||||||
}
|
|
||||||
// 将递归流的数据传递到当前流
|
|
||||||
controller.enqueue(value)
|
controller.enqueue(value)
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
@ -159,7 +151,7 @@ export class StreamEventManager {
|
|||||||
/**
|
/**
|
||||||
* 累加 usage 数据
|
* 累加 usage 数据
|
||||||
*/
|
*/
|
||||||
private accumulateUsage(target: any, source: any): void {
|
accumulateUsage(target: any, source: any): void {
|
||||||
if (!target || !source) return
|
if (!target || !source) return
|
||||||
|
|
||||||
// 累加各种 token 类型
|
// 累加各种 token 类型
|
||||||
|
|||||||
@ -411,7 +411,10 @@ export const createPromptToolUsePlugin = (config: PromptToolUseConfig = {}) => {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 如果没有执行工具调用,直接传递原始finish-step事件
|
// 如果没有执行工具调用,累加 usage 后透传 finish-step 事件
|
||||||
|
if (chunk.usage && context.accumulatedUsage) {
|
||||||
|
streamEventManager.accumulateUsage(context.accumulatedUsage, chunk.usage)
|
||||||
|
}
|
||||||
controller.enqueue(chunk)
|
controller.enqueue(chunk)
|
||||||
|
|
||||||
// 清理状态
|
// 清理状态
|
||||||
|
|||||||
@ -244,6 +244,7 @@ export enum IpcChannel {
|
|||||||
System_GetCpuName = 'system:getCpuName',
|
System_GetCpuName = 'system:getCpuName',
|
||||||
System_CheckGitBash = 'system:checkGitBash',
|
System_CheckGitBash = 'system:checkGitBash',
|
||||||
System_GetGitBashPath = 'system:getGitBashPath',
|
System_GetGitBashPath = 'system:getGitBashPath',
|
||||||
|
System_GetGitBashPathInfo = 'system:getGitBashPathInfo',
|
||||||
System_SetGitBashPath = 'system:setGitBashPath',
|
System_SetGitBashPath = 'system:setGitBashPath',
|
||||||
|
|
||||||
// DevTools
|
// DevTools
|
||||||
|
|||||||
@ -488,3 +488,11 @@ export const MACOS_TERMINALS_WITH_COMMANDS: TerminalConfigWithCommand[] = [
|
|||||||
|
|
||||||
// resources/scripts should be maintained manually
|
// resources/scripts should be maintained manually
|
||||||
export const HOME_CHERRY_DIR = '.cherrystudio'
|
export const HOME_CHERRY_DIR = '.cherrystudio'
|
||||||
|
|
||||||
|
// Git Bash path configuration types
|
||||||
|
export type GitBashPathSource = 'manual' | 'auto'
|
||||||
|
|
||||||
|
export interface GitBashPathInfo {
|
||||||
|
path: string | null
|
||||||
|
source: GitBashPathSource | null
|
||||||
|
}
|
||||||
|
|||||||
@ -50,7 +50,7 @@ Usage Instructions:
|
|||||||
- pt-pt (Portuguese)
|
- pt-pt (Portuguese)
|
||||||
|
|
||||||
Run Command:
|
Run Command:
|
||||||
yarn auto:i18n
|
yarn i18n:translate
|
||||||
|
|
||||||
Performance Optimization Recommendations:
|
Performance Optimization Recommendations:
|
||||||
- For stable API services: MAX_CONCURRENT_TRANSLATIONS=8, TRANSLATION_DELAY_MS=50
|
- For stable API services: MAX_CONCURRENT_TRANSLATIONS=8, TRANSLATION_DELAY_MS=50
|
||||||
|
|||||||
@ -145,7 +145,7 @@ export function main() {
|
|||||||
console.log('i18n 检查已通过')
|
console.log('i18n 检查已通过')
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
console.error(e)
|
console.error(e)
|
||||||
throw new Error(`检查未通过。尝试运行 yarn sync:i18n 以解决问题。`)
|
throw new Error(`检查未通过。尝试运行 yarn i18n:sync 以解决问题。`)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -6,7 +6,14 @@ import { loggerService } from '@logger'
|
|||||||
import { isLinux, isMac, isPortable, isWin } from '@main/constant'
|
import { isLinux, isMac, isPortable, isWin } from '@main/constant'
|
||||||
import { generateSignature } from '@main/integration/cherryai'
|
import { generateSignature } from '@main/integration/cherryai'
|
||||||
import anthropicService from '@main/services/AnthropicService'
|
import anthropicService from '@main/services/AnthropicService'
|
||||||
import { findGitBash, getBinaryPath, isBinaryExists, runInstallScript, validateGitBashPath } from '@main/utils/process'
|
import {
|
||||||
|
autoDiscoverGitBash,
|
||||||
|
getBinaryPath,
|
||||||
|
getGitBashPathInfo,
|
||||||
|
isBinaryExists,
|
||||||
|
runInstallScript,
|
||||||
|
validateGitBashPath
|
||||||
|
} from '@main/utils/process'
|
||||||
import { handleZoomFactor } from '@main/utils/zoom'
|
import { handleZoomFactor } from '@main/utils/zoom'
|
||||||
import type { SpanEntity, TokenUsage } from '@mcp-trace/trace-core'
|
import type { SpanEntity, TokenUsage } from '@mcp-trace/trace-core'
|
||||||
import type { UpgradeChannel } from '@shared/config/constant'
|
import type { UpgradeChannel } from '@shared/config/constant'
|
||||||
@ -499,9 +506,8 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const customPath = configManager.get(ConfigKeys.GitBashPath) as string | undefined
|
// Use autoDiscoverGitBash to handle auto-discovery and persistence
|
||||||
const bashPath = findGitBash(customPath)
|
const bashPath = autoDiscoverGitBash()
|
||||||
|
|
||||||
if (bashPath) {
|
if (bashPath) {
|
||||||
logger.info('Git Bash is available', { path: bashPath })
|
logger.info('Git Bash is available', { path: bashPath })
|
||||||
return true
|
return true
|
||||||
@ -524,13 +530,22 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
|||||||
return customPath ?? null
|
return customPath ?? null
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// Returns { path, source } where source is 'manual' | 'auto' | null
|
||||||
|
ipcMain.handle(IpcChannel.System_GetGitBashPathInfo, () => {
|
||||||
|
return getGitBashPathInfo()
|
||||||
|
})
|
||||||
|
|
||||||
ipcMain.handle(IpcChannel.System_SetGitBashPath, (_, newPath: string | null) => {
|
ipcMain.handle(IpcChannel.System_SetGitBashPath, (_, newPath: string | null) => {
|
||||||
if (!isWin) {
|
if (!isWin) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!newPath) {
|
if (!newPath) {
|
||||||
|
// Clear manual setting and re-run auto-discovery
|
||||||
configManager.set(ConfigKeys.GitBashPath, null)
|
configManager.set(ConfigKeys.GitBashPath, null)
|
||||||
|
configManager.set(ConfigKeys.GitBashPathSource, null)
|
||||||
|
// Re-run auto-discovery to restore auto-discovered path if available
|
||||||
|
autoDiscoverGitBash()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -539,7 +554,9 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set path with 'manual' source
|
||||||
configManager.set(ConfigKeys.GitBashPath, validated)
|
configManager.set(ConfigKeys.GitBashPath, validated)
|
||||||
|
configManager.set(ConfigKeys.GitBashPathSource, 'manual')
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|||||||
134
src/main/mcpServers/__tests__/browser.test.ts
Normal file
134
src/main/mcpServers/__tests__/browser.test.ts
Normal file
@ -0,0 +1,134 @@
|
|||||||
|
import { describe, expect, it, vi } from 'vitest'
|
||||||
|
|
||||||
|
vi.mock('electron', () => {
|
||||||
|
const sendCommand = vi.fn(async (command: string, params?: { expression?: string }) => {
|
||||||
|
if (command === 'Runtime.evaluate') {
|
||||||
|
if (params?.expression === 'document.documentElement.outerHTML') {
|
||||||
|
return { result: { value: '<html><body><h1>Test</h1><p>Content</p></body></html>' } }
|
||||||
|
}
|
||||||
|
if (params?.expression === 'document.body.innerText') {
|
||||||
|
return { result: { value: 'Test\nContent' } }
|
||||||
|
}
|
||||||
|
return { result: { value: 'ok' } }
|
||||||
|
}
|
||||||
|
return {}
|
||||||
|
})
|
||||||
|
|
||||||
|
const debuggerObj = {
|
||||||
|
isAttached: vi.fn(() => true),
|
||||||
|
attach: vi.fn(),
|
||||||
|
detach: vi.fn(),
|
||||||
|
sendCommand
|
||||||
|
}
|
||||||
|
|
||||||
|
const webContents = {
|
||||||
|
debugger: debuggerObj,
|
||||||
|
setUserAgent: vi.fn(),
|
||||||
|
getURL: vi.fn(() => 'https://example.com/'),
|
||||||
|
getTitle: vi.fn(async () => 'Example Title'),
|
||||||
|
once: vi.fn(),
|
||||||
|
removeListener: vi.fn(),
|
||||||
|
on: vi.fn()
|
||||||
|
}
|
||||||
|
|
||||||
|
const loadURL = vi.fn(async () => {})
|
||||||
|
|
||||||
|
const windows: any[] = []
|
||||||
|
|
||||||
|
class MockBrowserWindow {
|
||||||
|
private destroyed = false
|
||||||
|
public webContents = webContents
|
||||||
|
public loadURL = loadURL
|
||||||
|
public isDestroyed = vi.fn(() => this.destroyed)
|
||||||
|
public close = vi.fn(() => {
|
||||||
|
this.destroyed = true
|
||||||
|
})
|
||||||
|
public destroy = vi.fn(() => {
|
||||||
|
this.destroyed = true
|
||||||
|
})
|
||||||
|
public on = vi.fn()
|
||||||
|
|
||||||
|
constructor() {
|
||||||
|
windows.push(this)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const app = {
|
||||||
|
isReady: vi.fn(() => true),
|
||||||
|
whenReady: vi.fn(async () => {}),
|
||||||
|
on: vi.fn()
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
BrowserWindow: MockBrowserWindow as any,
|
||||||
|
app,
|
||||||
|
__mockDebugger: debuggerObj,
|
||||||
|
__mockSendCommand: sendCommand,
|
||||||
|
__mockLoadURL: loadURL,
|
||||||
|
__mockWindows: windows
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
import * as electron from 'electron'
|
||||||
|
const { __mockWindows } = electron as typeof electron & { __mockWindows: any[] }
|
||||||
|
|
||||||
|
import { CdpBrowserController } from '../browser'
|
||||||
|
|
||||||
|
describe('CdpBrowserController', () => {
|
||||||
|
it('executes single-line code via Runtime.evaluate', async () => {
|
||||||
|
const controller = new CdpBrowserController()
|
||||||
|
const result = await controller.execute('1+1')
|
||||||
|
expect(result).toBe('ok')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('opens a URL (hidden) and returns current page info', async () => {
|
||||||
|
const controller = new CdpBrowserController()
|
||||||
|
const result = await controller.open('https://foo.bar/', 5000, false)
|
||||||
|
expect(result.currentUrl).toBe('https://example.com/')
|
||||||
|
expect(result.title).toBe('Example Title')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('opens a URL (visible) when show=true', async () => {
|
||||||
|
const controller = new CdpBrowserController()
|
||||||
|
const result = await controller.open('https://foo.bar/', 5000, true, 'session-a')
|
||||||
|
expect(result.currentUrl).toBe('https://example.com/')
|
||||||
|
expect(result.title).toBe('Example Title')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('reuses session for execute and supports multiline', async () => {
|
||||||
|
const controller = new CdpBrowserController()
|
||||||
|
await controller.open('https://foo.bar/', 5000, false, 'session-b')
|
||||||
|
const result = await controller.execute('const a=1; const b=2; a+b;', 5000, 'session-b')
|
||||||
|
expect(result).toBe('ok')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('evicts least recently used session when exceeding maxSessions', async () => {
|
||||||
|
const controller = new CdpBrowserController({ maxSessions: 2, idleTimeoutMs: 1000 * 60 })
|
||||||
|
await controller.open('https://foo.bar/', 5000, false, 's1')
|
||||||
|
await controller.open('https://foo.bar/', 5000, false, 's2')
|
||||||
|
await controller.open('https://foo.bar/', 5000, false, 's3')
|
||||||
|
const destroyedCount = __mockWindows.filter(
|
||||||
|
(w: any) => w.destroy.mock.calls.length > 0 || w.close.mock.calls.length > 0
|
||||||
|
).length
|
||||||
|
expect(destroyedCount).toBeGreaterThanOrEqual(1)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('fetches URL and returns html format', async () => {
|
||||||
|
const controller = new CdpBrowserController()
|
||||||
|
const result = await controller.fetch('https://example.com/', 'html')
|
||||||
|
expect(result).toBe('<html><body><h1>Test</h1><p>Content</p></body></html>')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('fetches URL and returns txt format', async () => {
|
||||||
|
const controller = new CdpBrowserController()
|
||||||
|
const result = await controller.fetch('https://example.com/', 'txt')
|
||||||
|
expect(result).toBe('Test\nContent')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('fetches URL and returns markdown format (default)', async () => {
|
||||||
|
const controller = new CdpBrowserController()
|
||||||
|
const result = await controller.fetch('https://example.com/')
|
||||||
|
expect(typeof result).toBe('string')
|
||||||
|
expect(result).toContain('Test')
|
||||||
|
})
|
||||||
|
})
|
||||||
307
src/main/mcpServers/browser/controller.ts
Normal file
307
src/main/mcpServers/browser/controller.ts
Normal file
@ -0,0 +1,307 @@
|
|||||||
|
import { app, BrowserWindow } from 'electron'
|
||||||
|
import TurndownService from 'turndown'
|
||||||
|
|
||||||
|
import { logger, userAgent } from './types'
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Controller for managing browser windows via Chrome DevTools Protocol (CDP).
|
||||||
|
* Supports multiple sessions with LRU eviction and idle timeout cleanup.
|
||||||
|
*/
|
||||||
|
export class CdpBrowserController {
|
||||||
|
private windows: Map<string, { win: BrowserWindow; lastActive: number }> = new Map()
|
||||||
|
private readonly maxSessions: number
|
||||||
|
private readonly idleTimeoutMs: number
|
||||||
|
|
||||||
|
constructor(options?: { maxSessions?: number; idleTimeoutMs?: number }) {
|
||||||
|
this.maxSessions = options?.maxSessions ?? 5
|
||||||
|
this.idleTimeoutMs = options?.idleTimeoutMs ?? 5 * 60 * 1000
|
||||||
|
}
|
||||||
|
|
||||||
|
private async ensureAppReady() {
|
||||||
|
if (!app.isReady()) {
|
||||||
|
await app.whenReady()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private touch(sessionId: string) {
|
||||||
|
const entry = this.windows.get(sessionId)
|
||||||
|
if (entry) entry.lastActive = Date.now()
|
||||||
|
}
|
||||||
|
|
||||||
|
private closeWindow(win: BrowserWindow, sessionId: string) {
|
||||||
|
try {
|
||||||
|
if (!win.isDestroyed()) {
|
||||||
|
if (win.webContents.debugger.isAttached()) {
|
||||||
|
win.webContents.debugger.detach()
|
||||||
|
}
|
||||||
|
win.close()
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
logger.warn('Error closing window', { error, sessionId })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private async ensureDebuggerAttached(dbg: Electron.Debugger, sessionId: string) {
|
||||||
|
if (!dbg.isAttached()) {
|
||||||
|
try {
|
||||||
|
logger.info('Attaching debugger', { sessionId })
|
||||||
|
dbg.attach('1.3')
|
||||||
|
await dbg.sendCommand('Page.enable')
|
||||||
|
await dbg.sendCommand('Runtime.enable')
|
||||||
|
logger.info('Debugger attached and domains enabled')
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Failed to attach debugger', { error })
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private sweepIdle() {
|
||||||
|
const now = Date.now()
|
||||||
|
for (const [id, entry] of this.windows.entries()) {
|
||||||
|
if (now - entry.lastActive > this.idleTimeoutMs) {
|
||||||
|
this.closeWindow(entry.win, id)
|
||||||
|
this.windows.delete(id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private evictIfNeeded(newSessionId: string) {
|
||||||
|
if (this.windows.size < this.maxSessions) return
|
||||||
|
let lruId: string | null = null
|
||||||
|
let lruTime = Number.POSITIVE_INFINITY
|
||||||
|
for (const [id, entry] of this.windows.entries()) {
|
||||||
|
if (id === newSessionId) continue
|
||||||
|
if (entry.lastActive < lruTime) {
|
||||||
|
lruTime = entry.lastActive
|
||||||
|
lruId = id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (lruId) {
|
||||||
|
const entry = this.windows.get(lruId)
|
||||||
|
if (entry) {
|
||||||
|
this.closeWindow(entry.win, lruId)
|
||||||
|
}
|
||||||
|
this.windows.delete(lruId)
|
||||||
|
logger.info('Evicted session to respect maxSessions', { evicted: lruId })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private async getWindow(sessionId = 'default', forceNew = false, show = false): Promise<BrowserWindow> {
|
||||||
|
await this.ensureAppReady()
|
||||||
|
|
||||||
|
this.sweepIdle()
|
||||||
|
|
||||||
|
const existing = this.windows.get(sessionId)
|
||||||
|
if (existing && !existing.win.isDestroyed() && !forceNew) {
|
||||||
|
this.touch(sessionId)
|
||||||
|
return existing.win
|
||||||
|
}
|
||||||
|
|
||||||
|
if (existing && !existing.win.isDestroyed() && forceNew) {
|
||||||
|
try {
|
||||||
|
if (existing.win.webContents.debugger.isAttached()) {
|
||||||
|
existing.win.webContents.debugger.detach()
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
logger.warn('Error detaching debugger before recreate', { error, sessionId })
|
||||||
|
}
|
||||||
|
existing.win.destroy()
|
||||||
|
this.windows.delete(sessionId)
|
||||||
|
}
|
||||||
|
|
||||||
|
this.evictIfNeeded(sessionId)
|
||||||
|
|
||||||
|
const win = new BrowserWindow({
|
||||||
|
show,
|
||||||
|
webPreferences: {
|
||||||
|
contextIsolation: true,
|
||||||
|
sandbox: true,
|
||||||
|
nodeIntegration: false,
|
||||||
|
devTools: true
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Use a standard Chrome UA to avoid some anti-bot blocks
|
||||||
|
win.webContents.setUserAgent(userAgent)
|
||||||
|
|
||||||
|
// Log navigation lifecycle to help diagnose slow loads
|
||||||
|
win.webContents.on('did-start-loading', () => logger.info(`did-start-loading`, { sessionId }))
|
||||||
|
win.webContents.on('dom-ready', () => logger.info(`dom-ready`, { sessionId }))
|
||||||
|
win.webContents.on('did-finish-load', () => logger.info(`did-finish-load`, { sessionId }))
|
||||||
|
win.webContents.on('did-fail-load', (_e, code, desc) => logger.warn('Navigation failed', { code, desc }))
|
||||||
|
|
||||||
|
win.on('closed', () => {
|
||||||
|
this.windows.delete(sessionId)
|
||||||
|
})
|
||||||
|
|
||||||
|
this.windows.set(sessionId, { win, lastActive: Date.now() })
|
||||||
|
return win
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Opens a URL in a browser window and waits for navigation to complete.
|
||||||
|
* @param url - The URL to navigate to
|
||||||
|
* @param timeout - Navigation timeout in milliseconds (default: 10000)
|
||||||
|
* @param show - Whether to show the browser window (default: false)
|
||||||
|
* @param sessionId - Session identifier for window reuse (default: 'default')
|
||||||
|
* @returns Object containing the current URL and page title after navigation
|
||||||
|
*/
|
||||||
|
public async open(url: string, timeout = 10000, show = false, sessionId = 'default') {
|
||||||
|
const win = await this.getWindow(sessionId, true, show)
|
||||||
|
logger.info('Loading URL', { url, sessionId })
|
||||||
|
const { webContents } = win
|
||||||
|
this.touch(sessionId)
|
||||||
|
|
||||||
|
// Track resolution state to prevent multiple handlers from firing
|
||||||
|
let resolved = false
|
||||||
|
let onFinish: () => void
|
||||||
|
let onDomReady: () => void
|
||||||
|
let onFail: (_event: Electron.Event, code: number, desc: string) => void
|
||||||
|
|
||||||
|
// Define cleanup outside Promise to ensure it's callable in finally block,
|
||||||
|
// preventing memory leaks when timeout occurs before navigation completes
|
||||||
|
const cleanup = () => {
|
||||||
|
webContents.removeListener('did-finish-load', onFinish)
|
||||||
|
webContents.removeListener('did-fail-load', onFail)
|
||||||
|
webContents.removeListener('dom-ready', onDomReady)
|
||||||
|
}
|
||||||
|
|
||||||
|
const loadPromise = new Promise<void>((resolve, reject) => {
|
||||||
|
onFinish = () => {
|
||||||
|
if (resolved) return
|
||||||
|
resolved = true
|
||||||
|
cleanup()
|
||||||
|
resolve()
|
||||||
|
}
|
||||||
|
onDomReady = () => {
|
||||||
|
if (resolved) return
|
||||||
|
resolved = true
|
||||||
|
cleanup()
|
||||||
|
resolve()
|
||||||
|
}
|
||||||
|
onFail = (_event: Electron.Event, code: number, desc: string) => {
|
||||||
|
if (resolved) return
|
||||||
|
resolved = true
|
||||||
|
cleanup()
|
||||||
|
reject(new Error(`Navigation failed (${code}): ${desc}`))
|
||||||
|
}
|
||||||
|
webContents.once('did-finish-load', onFinish)
|
||||||
|
webContents.once('dom-ready', onDomReady)
|
||||||
|
webContents.once('did-fail-load', onFail)
|
||||||
|
})
|
||||||
|
|
||||||
|
const timeoutPromise = new Promise<void>((_, reject) => {
|
||||||
|
setTimeout(() => reject(new Error('Navigation timed out')), timeout)
|
||||||
|
})
|
||||||
|
|
||||||
|
try {
|
||||||
|
await Promise.race([win.loadURL(url), loadPromise, timeoutPromise])
|
||||||
|
} finally {
|
||||||
|
// Always cleanup listeners to prevent memory leaks on timeout
|
||||||
|
cleanup()
|
||||||
|
}
|
||||||
|
|
||||||
|
const currentUrl = webContents.getURL()
|
||||||
|
const title = await webContents.getTitle()
|
||||||
|
return { currentUrl, title }
|
||||||
|
}
|
||||||
|
|
||||||
|
public async execute(code: string, timeout = 5000, sessionId = 'default') {
|
||||||
|
const win = await this.getWindow(sessionId)
|
||||||
|
this.touch(sessionId)
|
||||||
|
const dbg = win.webContents.debugger
|
||||||
|
|
||||||
|
await this.ensureDebuggerAttached(dbg, sessionId)
|
||||||
|
|
||||||
|
const evalPromise = dbg.sendCommand('Runtime.evaluate', {
|
||||||
|
expression: code,
|
||||||
|
awaitPromise: true,
|
||||||
|
returnByValue: true
|
||||||
|
})
|
||||||
|
|
||||||
|
const result = await Promise.race([
|
||||||
|
evalPromise,
|
||||||
|
new Promise((_, reject) => setTimeout(() => reject(new Error('Execution timed out')), timeout))
|
||||||
|
])
|
||||||
|
|
||||||
|
const evalResult = result as any
|
||||||
|
|
||||||
|
if (evalResult?.exceptionDetails) {
|
||||||
|
const message = evalResult.exceptionDetails.exception?.description || 'Unknown script error'
|
||||||
|
logger.warn('Runtime.evaluate raised exception', { message })
|
||||||
|
throw new Error(message)
|
||||||
|
}
|
||||||
|
|
||||||
|
const value = evalResult?.result?.value ?? evalResult?.result?.description ?? null
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
public async reset(sessionId?: string) {
|
||||||
|
if (sessionId) {
|
||||||
|
const entry = this.windows.get(sessionId)
|
||||||
|
if (entry) {
|
||||||
|
this.closeWindow(entry.win, sessionId)
|
||||||
|
}
|
||||||
|
this.windows.delete(sessionId)
|
||||||
|
logger.info('Browser CDP context reset', { sessionId })
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const [id, entry] of this.windows.entries()) {
|
||||||
|
this.closeWindow(entry.win, id)
|
||||||
|
this.windows.delete(id)
|
||||||
|
}
|
||||||
|
logger.info('Browser CDP context reset (all sessions)')
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Fetches a URL and returns content in the specified format.
|
||||||
|
* @param url - The URL to fetch
|
||||||
|
* @param format - Output format: 'html', 'txt', 'markdown', or 'json' (default: 'markdown')
|
||||||
|
* @param timeout - Navigation timeout in milliseconds (default: 10000)
|
||||||
|
* @param sessionId - Session identifier (default: 'default')
|
||||||
|
* @returns Content in the requested format. For 'json', returns parsed object or { data: rawContent } if parsing fails
|
||||||
|
*/
|
||||||
|
public async fetch(
|
||||||
|
url: string,
|
||||||
|
format: 'html' | 'txt' | 'markdown' | 'json' = 'markdown',
|
||||||
|
timeout = 10000,
|
||||||
|
sessionId = 'default'
|
||||||
|
) {
|
||||||
|
await this.open(url, timeout, false, sessionId)
|
||||||
|
|
||||||
|
const win = await this.getWindow(sessionId)
|
||||||
|
const dbg = win.webContents.debugger
|
||||||
|
|
||||||
|
await this.ensureDebuggerAttached(dbg, sessionId)
|
||||||
|
|
||||||
|
let expression: string
|
||||||
|
if (format === 'json' || format === 'txt') {
|
||||||
|
expression = 'document.body.innerText'
|
||||||
|
} else {
|
||||||
|
expression = 'document.documentElement.outerHTML'
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = (await dbg.sendCommand('Runtime.evaluate', {
|
||||||
|
expression,
|
||||||
|
returnByValue: true
|
||||||
|
})) as { result?: { value?: string } }
|
||||||
|
|
||||||
|
const content = result?.result?.value ?? ''
|
||||||
|
|
||||||
|
if (format === 'markdown') {
|
||||||
|
const turndownService = new TurndownService()
|
||||||
|
return turndownService.turndown(content)
|
||||||
|
}
|
||||||
|
if (format === 'json') {
|
||||||
|
// Attempt to parse as JSON; if content is not valid JSON, wrap it in a data object
|
||||||
|
try {
|
||||||
|
return JSON.parse(content)
|
||||||
|
} catch {
|
||||||
|
return { data: content }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return content
|
||||||
|
}
|
||||||
|
}
|
||||||
3
src/main/mcpServers/browser/index.ts
Normal file
3
src/main/mcpServers/browser/index.ts
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
export { CdpBrowserController } from './controller'
|
||||||
|
export { BrowserServer } from './server'
|
||||||
|
export { BrowserServer as default } from './server'
|
||||||
50
src/main/mcpServers/browser/server.ts
Normal file
50
src/main/mcpServers/browser/server.ts
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
import type { Server } from '@modelcontextprotocol/sdk/server/index.js'
|
||||||
|
import { Server as MCServer } from '@modelcontextprotocol/sdk/server/index.js'
|
||||||
|
import { CallToolRequestSchema, ListToolsRequestSchema } from '@modelcontextprotocol/sdk/types.js'
|
||||||
|
import { app } from 'electron'
|
||||||
|
|
||||||
|
import { CdpBrowserController } from './controller'
|
||||||
|
import { toolDefinitions, toolHandlers } from './tools'
|
||||||
|
|
||||||
|
export class BrowserServer {
|
||||||
|
public server: Server
|
||||||
|
private controller = new CdpBrowserController()
|
||||||
|
|
||||||
|
constructor() {
|
||||||
|
const server = new MCServer(
|
||||||
|
{
|
||||||
|
name: '@cherry/browser',
|
||||||
|
version: '0.1.0'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
capabilities: {
|
||||||
|
resources: {},
|
||||||
|
tools: {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
server.setRequestHandler(ListToolsRequestSchema, async () => {
|
||||||
|
return {
|
||||||
|
tools: toolDefinitions
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
||||||
|
const { name, arguments: args } = request.params
|
||||||
|
const handler = toolHandlers[name]
|
||||||
|
if (!handler) {
|
||||||
|
throw new Error('Tool not found')
|
||||||
|
}
|
||||||
|
return handler(this.controller, args)
|
||||||
|
})
|
||||||
|
|
||||||
|
app.on('before-quit', () => {
|
||||||
|
void this.controller.reset()
|
||||||
|
})
|
||||||
|
|
||||||
|
this.server = server
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export default BrowserServer
|
||||||
48
src/main/mcpServers/browser/tools/execute.ts
Normal file
48
src/main/mcpServers/browser/tools/execute.ts
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import type { CdpBrowserController } from '../controller'
|
||||||
|
import { errorResponse, successResponse } from './utils'
|
||||||
|
|
||||||
|
export const ExecuteSchema = z.object({
|
||||||
|
code: z
|
||||||
|
.string()
|
||||||
|
.describe(
|
||||||
|
'JavaScript evaluated via Chrome DevTools Runtime.evaluate. Keep it short; prefer one-line with semicolons for multiple statements.'
|
||||||
|
),
|
||||||
|
timeout: z.number().default(5000).describe('Timeout in milliseconds for code execution (default: 5000ms)'),
|
||||||
|
sessionId: z.string().optional().describe('Session identifier to target a specific page (default: default)')
|
||||||
|
})
|
||||||
|
|
||||||
|
export const executeToolDefinition = {
|
||||||
|
name: 'execute',
|
||||||
|
description:
|
||||||
|
'Run JavaScript in the current page via Runtime.evaluate. Prefer short, single-line snippets; use semicolons for multiple statements.',
|
||||||
|
inputSchema: {
|
||||||
|
type: 'object',
|
||||||
|
properties: {
|
||||||
|
code: {
|
||||||
|
type: 'string',
|
||||||
|
description: 'One-line JS to evaluate in page context'
|
||||||
|
},
|
||||||
|
timeout: {
|
||||||
|
type: 'number',
|
||||||
|
description: 'Timeout in milliseconds (default 5000)'
|
||||||
|
},
|
||||||
|
sessionId: {
|
||||||
|
type: 'string',
|
||||||
|
description: 'Session identifier; targets a specific page (default: default)'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
required: ['code']
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function handleExecute(controller: CdpBrowserController, args: unknown) {
|
||||||
|
const { code, timeout, sessionId } = ExecuteSchema.parse(args)
|
||||||
|
try {
|
||||||
|
const value = await controller.execute(code, timeout, sessionId ?? 'default')
|
||||||
|
return successResponse(typeof value === 'string' ? value : JSON.stringify(value))
|
||||||
|
} catch (error) {
|
||||||
|
return errorResponse(error as Error)
|
||||||
|
}
|
||||||
|
}
|
||||||
49
src/main/mcpServers/browser/tools/fetch.ts
Normal file
49
src/main/mcpServers/browser/tools/fetch.ts
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import type { CdpBrowserController } from '../controller'
|
||||||
|
import { errorResponse, successResponse } from './utils'
|
||||||
|
|
||||||
|
export const FetchSchema = z.object({
|
||||||
|
url: z.url().describe('URL to fetch'),
|
||||||
|
format: z.enum(['html', 'txt', 'markdown', 'json']).default('markdown').describe('Output format (default: markdown)'),
|
||||||
|
timeout: z.number().optional().describe('Timeout in milliseconds for navigation (default: 10000)'),
|
||||||
|
sessionId: z.string().optional().describe('Session identifier (default: default)')
|
||||||
|
})
|
||||||
|
|
||||||
|
export const fetchToolDefinition = {
|
||||||
|
name: 'fetch',
|
||||||
|
description: 'Fetch a URL using the browser and return content in specified format (html, txt, markdown, json)',
|
||||||
|
inputSchema: {
|
||||||
|
type: 'object',
|
||||||
|
properties: {
|
||||||
|
url: {
|
||||||
|
type: 'string',
|
||||||
|
description: 'URL to fetch'
|
||||||
|
},
|
||||||
|
format: {
|
||||||
|
type: 'string',
|
||||||
|
enum: ['html', 'txt', 'markdown', 'json'],
|
||||||
|
description: 'Output format (default: markdown)'
|
||||||
|
},
|
||||||
|
timeout: {
|
||||||
|
type: 'number',
|
||||||
|
description: 'Navigation timeout in milliseconds (default: 10000)'
|
||||||
|
},
|
||||||
|
sessionId: {
|
||||||
|
type: 'string',
|
||||||
|
description: 'Session identifier (default: default)'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
required: ['url']
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function handleFetch(controller: CdpBrowserController, args: unknown) {
|
||||||
|
const { url, format, timeout, sessionId } = FetchSchema.parse(args)
|
||||||
|
try {
|
||||||
|
const content = await controller.fetch(url, format, timeout ?? 10000, sessionId ?? 'default')
|
||||||
|
return successResponse(typeof content === 'string' ? content : JSON.stringify(content))
|
||||||
|
} catch (error) {
|
||||||
|
return errorResponse(error as Error)
|
||||||
|
}
|
||||||
|
}
|
||||||
25
src/main/mcpServers/browser/tools/index.ts
Normal file
25
src/main/mcpServers/browser/tools/index.ts
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
export { ExecuteSchema, executeToolDefinition, handleExecute } from './execute'
|
||||||
|
export { FetchSchema, fetchToolDefinition, handleFetch } from './fetch'
|
||||||
|
export { handleOpen, OpenSchema, openToolDefinition } from './open'
|
||||||
|
export { handleReset, resetToolDefinition } from './reset'
|
||||||
|
|
||||||
|
import type { CdpBrowserController } from '../controller'
|
||||||
|
import { executeToolDefinition, handleExecute } from './execute'
|
||||||
|
import { fetchToolDefinition, handleFetch } from './fetch'
|
||||||
|
import { handleOpen, openToolDefinition } from './open'
|
||||||
|
import { handleReset, resetToolDefinition } from './reset'
|
||||||
|
|
||||||
|
export const toolDefinitions = [openToolDefinition, executeToolDefinition, resetToolDefinition, fetchToolDefinition]
|
||||||
|
|
||||||
|
export const toolHandlers: Record<
|
||||||
|
string,
|
||||||
|
(
|
||||||
|
controller: CdpBrowserController,
|
||||||
|
args: unknown
|
||||||
|
) => Promise<{ content: { type: string; text: string }[]; isError: boolean }>
|
||||||
|
> = {
|
||||||
|
open: handleOpen,
|
||||||
|
execute: handleExecute,
|
||||||
|
reset: handleReset,
|
||||||
|
fetch: handleFetch
|
||||||
|
}
|
||||||
47
src/main/mcpServers/browser/tools/open.ts
Normal file
47
src/main/mcpServers/browser/tools/open.ts
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import type { CdpBrowserController } from '../controller'
|
||||||
|
import { successResponse } from './utils'
|
||||||
|
|
||||||
|
export const OpenSchema = z.object({
|
||||||
|
url: z.url().describe('URL to open in the controlled Electron window'),
|
||||||
|
timeout: z.number().optional().describe('Timeout in milliseconds for navigation (default: 10000)'),
|
||||||
|
show: z.boolean().optional().describe('Whether to show the browser window (default: false)'),
|
||||||
|
sessionId: z
|
||||||
|
.string()
|
||||||
|
.optional()
|
||||||
|
.describe('Session identifier; separate sessions keep separate pages (default: default)')
|
||||||
|
})
|
||||||
|
|
||||||
|
export const openToolDefinition = {
|
||||||
|
name: 'open',
|
||||||
|
description: 'Open a URL in a hidden Electron window controlled via Chrome DevTools Protocol',
|
||||||
|
inputSchema: {
|
||||||
|
type: 'object',
|
||||||
|
properties: {
|
||||||
|
url: {
|
||||||
|
type: 'string',
|
||||||
|
description: 'URL to load'
|
||||||
|
},
|
||||||
|
timeout: {
|
||||||
|
type: 'number',
|
||||||
|
description: 'Navigation timeout in milliseconds (default 10000)'
|
||||||
|
},
|
||||||
|
show: {
|
||||||
|
type: 'boolean',
|
||||||
|
description: 'Whether to show the browser window (default false)'
|
||||||
|
},
|
||||||
|
sessionId: {
|
||||||
|
type: 'string',
|
||||||
|
description: 'Session identifier; separate sessions keep separate pages (default: default)'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
required: ['url']
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function handleOpen(controller: CdpBrowserController, args: unknown) {
|
||||||
|
const { url, timeout, show, sessionId } = OpenSchema.parse(args)
|
||||||
|
const res = await controller.open(url, timeout ?? 10000, show ?? false, sessionId ?? 'default')
|
||||||
|
return successResponse(JSON.stringify(res))
|
||||||
|
}
|
||||||
34
src/main/mcpServers/browser/tools/reset.ts
Normal file
34
src/main/mcpServers/browser/tools/reset.ts
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import type { CdpBrowserController } from '../controller'
|
||||||
|
import { successResponse } from './utils'
|
||||||
|
|
||||||
|
/** Zod schema for validating reset tool arguments */
|
||||||
|
export const ResetSchema = z.object({
|
||||||
|
sessionId: z.string().optional().describe('Session identifier to reset; omit to reset all sessions')
|
||||||
|
})
|
||||||
|
|
||||||
|
/** MCP tool definition for the reset tool */
|
||||||
|
export const resetToolDefinition = {
|
||||||
|
name: 'reset',
|
||||||
|
description: 'Reset the controlled window and detach debugger',
|
||||||
|
inputSchema: {
|
||||||
|
type: 'object',
|
||||||
|
properties: {
|
||||||
|
sessionId: {
|
||||||
|
type: 'string',
|
||||||
|
description: 'Session identifier to reset; omit to reset all sessions'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handler for the reset MCP tool.
|
||||||
|
* Closes browser window(s) and detaches debugger for the specified session or all sessions.
|
||||||
|
*/
|
||||||
|
export async function handleReset(controller: CdpBrowserController, args: unknown) {
|
||||||
|
const { sessionId } = ResetSchema.parse(args)
|
||||||
|
await controller.reset(sessionId)
|
||||||
|
return successResponse('reset')
|
||||||
|
}
|
||||||
13
src/main/mcpServers/browser/tools/utils.ts
Normal file
13
src/main/mcpServers/browser/tools/utils.ts
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
export function successResponse(text: string) {
|
||||||
|
return {
|
||||||
|
content: [{ type: 'text', text }],
|
||||||
|
isError: false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function errorResponse(error: Error) {
|
||||||
|
return {
|
||||||
|
content: [{ type: 'text', text: error.message }],
|
||||||
|
isError: true
|
||||||
|
}
|
||||||
|
}
|
||||||
4
src/main/mcpServers/browser/types.ts
Normal file
4
src/main/mcpServers/browser/types.ts
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
import { loggerService } from '@logger'
|
||||||
|
|
||||||
|
export const logger = loggerService.withContext('MCPBrowserCDP')
|
||||||
|
export const userAgent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:145.0) Gecko/20100101 Firefox/145.0'
|
||||||
@ -4,6 +4,7 @@ import type { BuiltinMCPServerName } from '@types'
|
|||||||
import { BuiltinMCPServerNames } from '@types'
|
import { BuiltinMCPServerNames } from '@types'
|
||||||
|
|
||||||
import BraveSearchServer from './brave-search'
|
import BraveSearchServer from './brave-search'
|
||||||
|
import BrowserServer from './browser'
|
||||||
import DiDiMcpServer from './didi-mcp'
|
import DiDiMcpServer from './didi-mcp'
|
||||||
import DifyKnowledgeServer from './dify-knowledge'
|
import DifyKnowledgeServer from './dify-knowledge'
|
||||||
import FetchServer from './fetch'
|
import FetchServer from './fetch'
|
||||||
@ -35,7 +36,7 @@ export function createInMemoryMCPServer(
|
|||||||
return new FetchServer().server
|
return new FetchServer().server
|
||||||
}
|
}
|
||||||
case BuiltinMCPServerNames.filesystem: {
|
case BuiltinMCPServerNames.filesystem: {
|
||||||
return new FileSystemServer(args).server
|
return new FileSystemServer(envs.WORKSPACE_ROOT).server
|
||||||
}
|
}
|
||||||
case BuiltinMCPServerNames.difyKnowledge: {
|
case BuiltinMCPServerNames.difyKnowledge: {
|
||||||
const difyKey = envs.DIFY_KEY
|
const difyKey = envs.DIFY_KEY
|
||||||
@ -48,6 +49,9 @@ export function createInMemoryMCPServer(
|
|||||||
const apiKey = envs.DIDI_API_KEY
|
const apiKey = envs.DIDI_API_KEY
|
||||||
return new DiDiMcpServer(apiKey).server
|
return new DiDiMcpServer(apiKey).server
|
||||||
}
|
}
|
||||||
|
case BuiltinMCPServerNames.browser: {
|
||||||
|
return new BrowserServer().server
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
throw new Error(`Unknown in-memory MCP server: ${name}`)
|
throw new Error(`Unknown in-memory MCP server: ${name}`)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,652 +0,0 @@
|
|||||||
// port https://github.com/modelcontextprotocol/servers/blob/main/src/filesystem/index.ts
|
|
||||||
|
|
||||||
import { loggerService } from '@logger'
|
|
||||||
import { Server } from '@modelcontextprotocol/sdk/server/index.js'
|
|
||||||
import { CallToolRequestSchema, ListToolsRequestSchema } from '@modelcontextprotocol/sdk/types.js'
|
|
||||||
import { createTwoFilesPatch } from 'diff'
|
|
||||||
import fs from 'fs/promises'
|
|
||||||
import { minimatch } from 'minimatch'
|
|
||||||
import os from 'os'
|
|
||||||
import path from 'path'
|
|
||||||
import * as z from 'zod'
|
|
||||||
|
|
||||||
const logger = loggerService.withContext('MCP:FileSystemServer')
|
|
||||||
|
|
||||||
// Normalize all paths consistently
|
|
||||||
function normalizePath(p: string): string {
|
|
||||||
return path.normalize(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
function expandHome(filepath: string): string {
|
|
||||||
if (filepath.startsWith('~/') || filepath === '~') {
|
|
||||||
return path.join(os.homedir(), filepath.slice(1))
|
|
||||||
}
|
|
||||||
return filepath
|
|
||||||
}
|
|
||||||
|
|
||||||
// Security utilities
|
|
||||||
async function validatePath(allowedDirectories: string[], requestedPath: string): Promise<string> {
|
|
||||||
const expandedPath = expandHome(requestedPath)
|
|
||||||
const absolute = path.isAbsolute(expandedPath)
|
|
||||||
? path.resolve(expandedPath)
|
|
||||||
: path.resolve(process.cwd(), expandedPath)
|
|
||||||
|
|
||||||
const normalizedRequested = normalizePath(absolute)
|
|
||||||
|
|
||||||
// Check if path is within allowed directories
|
|
||||||
const isAllowed = allowedDirectories.some((dir) => normalizedRequested.startsWith(dir))
|
|
||||||
if (!isAllowed) {
|
|
||||||
throw new Error(
|
|
||||||
`Access denied - path outside allowed directories: ${absolute} not in ${allowedDirectories.join(', ')}`
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle symlinks by checking their real path
|
|
||||||
try {
|
|
||||||
const realPath = await fs.realpath(absolute)
|
|
||||||
const normalizedReal = normalizePath(realPath)
|
|
||||||
const isRealPathAllowed = allowedDirectories.some((dir) => normalizedReal.startsWith(dir))
|
|
||||||
if (!isRealPathAllowed) {
|
|
||||||
throw new Error('Access denied - symlink target outside allowed directories')
|
|
||||||
}
|
|
||||||
return realPath
|
|
||||||
} catch (error) {
|
|
||||||
// For new files that don't exist yet, verify parent directory
|
|
||||||
const parentDir = path.dirname(absolute)
|
|
||||||
try {
|
|
||||||
const realParentPath = await fs.realpath(parentDir)
|
|
||||||
const normalizedParent = normalizePath(realParentPath)
|
|
||||||
const isParentAllowed = allowedDirectories.some((dir) => normalizedParent.startsWith(dir))
|
|
||||||
if (!isParentAllowed) {
|
|
||||||
throw new Error('Access denied - parent directory outside allowed directories')
|
|
||||||
}
|
|
||||||
return absolute
|
|
||||||
} catch {
|
|
||||||
throw new Error(`Parent directory does not exist: ${parentDir}`)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Schema definitions
|
|
||||||
const ReadFileArgsSchema = z.object({
|
|
||||||
path: z.string()
|
|
||||||
})
|
|
||||||
|
|
||||||
const ReadMultipleFilesArgsSchema = z.object({
|
|
||||||
paths: z.array(z.string())
|
|
||||||
})
|
|
||||||
|
|
||||||
const WriteFileArgsSchema = z.object({
|
|
||||||
path: z.string(),
|
|
||||||
content: z.string()
|
|
||||||
})
|
|
||||||
|
|
||||||
const EditOperation = z.object({
|
|
||||||
oldText: z.string().describe('Text to search for - must match exactly'),
|
|
||||||
newText: z.string().describe('Text to replace with')
|
|
||||||
})
|
|
||||||
|
|
||||||
const EditFileArgsSchema = z.object({
|
|
||||||
path: z.string(),
|
|
||||||
edits: z.array(EditOperation),
|
|
||||||
dryRun: z.boolean().default(false).describe('Preview changes using git-style diff format')
|
|
||||||
})
|
|
||||||
|
|
||||||
const CreateDirectoryArgsSchema = z.object({
|
|
||||||
path: z.string()
|
|
||||||
})
|
|
||||||
|
|
||||||
const ListDirectoryArgsSchema = z.object({
|
|
||||||
path: z.string()
|
|
||||||
})
|
|
||||||
|
|
||||||
const DirectoryTreeArgsSchema = z.object({
|
|
||||||
path: z.string()
|
|
||||||
})
|
|
||||||
|
|
||||||
const MoveFileArgsSchema = z.object({
|
|
||||||
source: z.string(),
|
|
||||||
destination: z.string()
|
|
||||||
})
|
|
||||||
|
|
||||||
const SearchFilesArgsSchema = z.object({
|
|
||||||
path: z.string(),
|
|
||||||
pattern: z.string(),
|
|
||||||
excludePatterns: z.array(z.string()).optional().default([])
|
|
||||||
})
|
|
||||||
|
|
||||||
const GetFileInfoArgsSchema = z.object({
|
|
||||||
path: z.string()
|
|
||||||
})
|
|
||||||
|
|
||||||
interface FileInfo {
|
|
||||||
size: number
|
|
||||||
created: Date
|
|
||||||
modified: Date
|
|
||||||
accessed: Date
|
|
||||||
isDirectory: boolean
|
|
||||||
isFile: boolean
|
|
||||||
permissions: string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tool implementations
|
|
||||||
async function getFileStats(filePath: string): Promise<FileInfo> {
|
|
||||||
const stats = await fs.stat(filePath)
|
|
||||||
return {
|
|
||||||
size: stats.size,
|
|
||||||
created: stats.birthtime,
|
|
||||||
modified: stats.mtime,
|
|
||||||
accessed: stats.atime,
|
|
||||||
isDirectory: stats.isDirectory(),
|
|
||||||
isFile: stats.isFile(),
|
|
||||||
permissions: stats.mode.toString(8).slice(-3)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async function searchFiles(
|
|
||||||
allowedDirectories: string[],
|
|
||||||
rootPath: string,
|
|
||||||
pattern: string,
|
|
||||||
excludePatterns: string[] = []
|
|
||||||
): Promise<string[]> {
|
|
||||||
const results: string[] = []
|
|
||||||
|
|
||||||
async function search(currentPath: string) {
|
|
||||||
const entries = await fs.readdir(currentPath, { withFileTypes: true })
|
|
||||||
|
|
||||||
for (const entry of entries) {
|
|
||||||
const fullPath = path.join(currentPath, entry.name)
|
|
||||||
|
|
||||||
try {
|
|
||||||
// Validate each path before processing
|
|
||||||
await validatePath(allowedDirectories, fullPath)
|
|
||||||
|
|
||||||
// Check if path matches any exclude pattern
|
|
||||||
const relativePath = path.relative(rootPath, fullPath)
|
|
||||||
const shouldExclude = excludePatterns.some((pattern) => {
|
|
||||||
const globPattern = pattern.includes('*') ? pattern : `**/${pattern}/**`
|
|
||||||
return minimatch(relativePath, globPattern, { dot: true })
|
|
||||||
})
|
|
||||||
|
|
||||||
if (shouldExclude) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if (entry.name.toLowerCase().includes(pattern.toLowerCase())) {
|
|
||||||
results.push(fullPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
if (entry.isDirectory()) {
|
|
||||||
await search(fullPath)
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// Skip invalid paths during search
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
await search(rootPath)
|
|
||||||
return results
|
|
||||||
}
|
|
||||||
|
|
||||||
// file editing and diffing utilities
|
|
||||||
function normalizeLineEndings(text: string): string {
|
|
||||||
return text.replace(/\r\n/g, '\n')
|
|
||||||
}
|
|
||||||
|
|
||||||
function createUnifiedDiff(originalContent: string, newContent: string, filepath: string = 'file'): string {
|
|
||||||
// Ensure consistent line endings for diff
|
|
||||||
const normalizedOriginal = normalizeLineEndings(originalContent)
|
|
||||||
const normalizedNew = normalizeLineEndings(newContent)
|
|
||||||
|
|
||||||
return createTwoFilesPatch(filepath, filepath, normalizedOriginal, normalizedNew, 'original', 'modified')
|
|
||||||
}
|
|
||||||
|
|
||||||
async function applyFileEdits(
|
|
||||||
filePath: string,
|
|
||||||
edits: Array<{ oldText: string; newText: string }>,
|
|
||||||
dryRun = false
|
|
||||||
): Promise<string> {
|
|
||||||
// Read file content and normalize line endings
|
|
||||||
const content = normalizeLineEndings(await fs.readFile(filePath, 'utf-8'))
|
|
||||||
|
|
||||||
// Apply edits sequentially
|
|
||||||
let modifiedContent = content
|
|
||||||
for (const edit of edits) {
|
|
||||||
const normalizedOld = normalizeLineEndings(edit.oldText)
|
|
||||||
const normalizedNew = normalizeLineEndings(edit.newText)
|
|
||||||
|
|
||||||
// If exact match exists, use it
|
|
||||||
if (modifiedContent.includes(normalizedOld)) {
|
|
||||||
modifiedContent = modifiedContent.replace(normalizedOld, normalizedNew)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise, try line-by-line matching with flexibility for whitespace
|
|
||||||
const oldLines = normalizedOld.split('\n')
|
|
||||||
const contentLines = modifiedContent.split('\n')
|
|
||||||
let matchFound = false
|
|
||||||
|
|
||||||
for (let i = 0; i <= contentLines.length - oldLines.length; i++) {
|
|
||||||
const potentialMatch = contentLines.slice(i, i + oldLines.length)
|
|
||||||
|
|
||||||
// Compare lines with normalized whitespace
|
|
||||||
const isMatch = oldLines.every((oldLine, j) => {
|
|
||||||
const contentLine = potentialMatch[j]
|
|
||||||
return oldLine.trim() === contentLine.trim()
|
|
||||||
})
|
|
||||||
|
|
||||||
if (isMatch) {
|
|
||||||
// Preserve original indentation of first line
|
|
||||||
const originalIndent = contentLines[i].match(/^\s*/)?.[0] || ''
|
|
||||||
const newLines = normalizedNew.split('\n').map((line, j) => {
|
|
||||||
if (j === 0) return originalIndent + line.trimStart()
|
|
||||||
// For subsequent lines, try to preserve relative indentation
|
|
||||||
const oldIndent = oldLines[j]?.match(/^\s*/)?.[0] || ''
|
|
||||||
const newIndent = line.match(/^\s*/)?.[0] || ''
|
|
||||||
if (oldIndent && newIndent) {
|
|
||||||
const relativeIndent = newIndent.length - oldIndent.length
|
|
||||||
return originalIndent + ' '.repeat(Math.max(0, relativeIndent)) + line.trimStart()
|
|
||||||
}
|
|
||||||
return line
|
|
||||||
})
|
|
||||||
|
|
||||||
contentLines.splice(i, oldLines.length, ...newLines)
|
|
||||||
modifiedContent = contentLines.join('\n')
|
|
||||||
matchFound = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!matchFound) {
|
|
||||||
throw new Error(`Could not find exact match for edit:\n${edit.oldText}`)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create unified diff
|
|
||||||
const diff = createUnifiedDiff(content, modifiedContent, filePath)
|
|
||||||
|
|
||||||
// Format diff with appropriate number of backticks
|
|
||||||
let numBackticks = 3
|
|
||||||
while (diff.includes('`'.repeat(numBackticks))) {
|
|
||||||
numBackticks++
|
|
||||||
}
|
|
||||||
const formattedDiff = `${'`'.repeat(numBackticks)}diff\n${diff}${'`'.repeat(numBackticks)}\n\n`
|
|
||||||
|
|
||||||
if (!dryRun) {
|
|
||||||
await fs.writeFile(filePath, modifiedContent, 'utf-8')
|
|
||||||
}
|
|
||||||
|
|
||||||
return formattedDiff
|
|
||||||
}
|
|
||||||
|
|
||||||
class FileSystemServer {
|
|
||||||
public server: Server
|
|
||||||
private allowedDirectories: string[]
|
|
||||||
constructor(allowedDirs: string[]) {
|
|
||||||
if (!Array.isArray(allowedDirs) || allowedDirs.length === 0) {
|
|
||||||
throw new Error('No allowed directories provided, please specify at least one directory in args')
|
|
||||||
}
|
|
||||||
|
|
||||||
this.allowedDirectories = allowedDirs.map((dir) => normalizePath(path.resolve(expandHome(dir))))
|
|
||||||
|
|
||||||
// Validate that all directories exist and are accessible
|
|
||||||
this.validateDirs().catch((error) => {
|
|
||||||
logger.error('Error validating allowed directories:', error)
|
|
||||||
throw new Error(`Error validating allowed directories: ${error}`)
|
|
||||||
})
|
|
||||||
|
|
||||||
this.server = new Server(
|
|
||||||
{
|
|
||||||
name: 'secure-filesystem-server',
|
|
||||||
version: '0.2.0'
|
|
||||||
},
|
|
||||||
{
|
|
||||||
capabilities: {
|
|
||||||
tools: {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
this.initialize()
|
|
||||||
}
|
|
||||||
|
|
||||||
async validateDirs() {
|
|
||||||
// Validate that all directories exist and are accessible
|
|
||||||
await Promise.all(
|
|
||||||
this.allowedDirectories.map(async (dir) => {
|
|
||||||
try {
|
|
||||||
const stats = await fs.stat(expandHome(dir))
|
|
||||||
if (!stats.isDirectory()) {
|
|
||||||
logger.error(`Error: ${dir} is not a directory`)
|
|
||||||
throw new Error(`Error: ${dir} is not a directory`)
|
|
||||||
}
|
|
||||||
} catch (error: any) {
|
|
||||||
logger.error(`Error accessing directory ${dir}:`, error)
|
|
||||||
throw new Error(`Error accessing directory ${dir}:`, error)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
initialize() {
|
|
||||||
// Tool handlers
|
|
||||||
this.server.setRequestHandler(ListToolsRequestSchema, async () => {
|
|
||||||
return {
|
|
||||||
tools: [
|
|
||||||
{
|
|
||||||
name: 'read_file',
|
|
||||||
description:
|
|
||||||
'Read the complete contents of a file from the file system. ' +
|
|
||||||
'Handles various text encodings and provides detailed error messages ' +
|
|
||||||
'if the file cannot be read. Use this tool when you need to examine ' +
|
|
||||||
'the contents of a single file. Only works within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(ReadFileArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'read_multiple_files',
|
|
||||||
description:
|
|
||||||
'Read the contents of multiple files simultaneously. This is more ' +
|
|
||||||
'efficient than reading files one by one when you need to analyze ' +
|
|
||||||
"or compare multiple files. Each file's content is returned with its " +
|
|
||||||
"path as a reference. Failed reads for individual files won't stop " +
|
|
||||||
'the entire operation. Only works within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(ReadMultipleFilesArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'write_file',
|
|
||||||
description:
|
|
||||||
'Create a new file or completely overwrite an existing file with new content. ' +
|
|
||||||
'Use with caution as it will overwrite existing files without warning. ' +
|
|
||||||
'Handles text content with proper encoding. Only works within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(WriteFileArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'edit_file',
|
|
||||||
description:
|
|
||||||
'Make line-based edits to a text file. Each edit replaces exact line sequences ' +
|
|
||||||
'with new content. Returns a git-style diff showing the changes made. ' +
|
|
||||||
'Only works within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(EditFileArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'create_directory',
|
|
||||||
description:
|
|
||||||
'Create a new directory or ensure a directory exists. Can create multiple ' +
|
|
||||||
'nested directories in one operation. If the directory already exists, ' +
|
|
||||||
'this operation will succeed silently. Perfect for setting up directory ' +
|
|
||||||
'structures for projects or ensuring required paths exist. Only works within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(CreateDirectoryArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'list_directory',
|
|
||||||
description:
|
|
||||||
'Get a detailed listing of all files and directories in a specified path. ' +
|
|
||||||
'Results clearly distinguish between files and directories with [FILE] and [DIR] ' +
|
|
||||||
'prefixes. This tool is essential for understanding directory structure and ' +
|
|
||||||
'finding specific files within a directory. Only works within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(ListDirectoryArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'directory_tree',
|
|
||||||
description:
|
|
||||||
'Get a recursive tree view of files and directories as a JSON structure. ' +
|
|
||||||
"Each entry includes 'name', 'type' (file/directory), and 'children' for directories. " +
|
|
||||||
'Files have no children array, while directories always have a children array (which may be empty). ' +
|
|
||||||
'The output is formatted with 2-space indentation for readability. Only works within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(DirectoryTreeArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'move_file',
|
|
||||||
description:
|
|
||||||
'Move or rename files and directories. Can move files between directories ' +
|
|
||||||
'and rename them in a single operation. If the destination exists, the ' +
|
|
||||||
'operation will fail. Works across different directories and can be used ' +
|
|
||||||
'for simple renaming within the same directory. Both source and destination must be within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(MoveFileArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'search_files',
|
|
||||||
description:
|
|
||||||
'Recursively search for files and directories matching a pattern. ' +
|
|
||||||
'Searches through all subdirectories from the starting path. The search ' +
|
|
||||||
'is case-insensitive and matches partial names. Returns full paths to all ' +
|
|
||||||
"matching items. Great for finding files when you don't know their exact location. " +
|
|
||||||
'Only searches within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(SearchFilesArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'get_file_info',
|
|
||||||
description:
|
|
||||||
'Retrieve detailed metadata about a file or directory. Returns comprehensive ' +
|
|
||||||
'information including size, creation time, last modified time, permissions, ' +
|
|
||||||
'and type. This tool is perfect for understanding file characteristics ' +
|
|
||||||
'without reading the actual content. Only works within allowed directories.',
|
|
||||||
inputSchema: z.toJSONSchema(GetFileInfoArgsSchema)
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'list_allowed_directories',
|
|
||||||
description:
|
|
||||||
'Returns the list of directories that this server is allowed to access. ' +
|
|
||||||
'Use this to understand which directories are available before trying to access files.',
|
|
||||||
inputSchema: {
|
|
||||||
type: 'object',
|
|
||||||
properties: {},
|
|
||||||
required: []
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
||||||
try {
|
|
||||||
const { name, arguments: args } = request.params
|
|
||||||
|
|
||||||
switch (name) {
|
|
||||||
case 'read_file': {
|
|
||||||
const parsed = ReadFileArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for read_file: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
|
||||||
const content = await fs.readFile(validPath, 'utf-8')
|
|
||||||
return {
|
|
||||||
content: [{ type: 'text', text: content }]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'read_multiple_files': {
|
|
||||||
const parsed = ReadMultipleFilesArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for read_multiple_files: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const results = await Promise.all(
|
|
||||||
parsed.data.paths.map(async (filePath: string) => {
|
|
||||||
try {
|
|
||||||
const validPath = await validatePath(this.allowedDirectories, filePath)
|
|
||||||
const content = await fs.readFile(validPath, 'utf-8')
|
|
||||||
return `${filePath}:\n${content}\n`
|
|
||||||
} catch (error) {
|
|
||||||
const errorMessage = error instanceof Error ? error.message : String(error)
|
|
||||||
return `${filePath}: Error - ${errorMessage}`
|
|
||||||
}
|
|
||||||
})
|
|
||||||
)
|
|
||||||
return {
|
|
||||||
content: [{ type: 'text', text: results.join('\n---\n') }]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'write_file': {
|
|
||||||
const parsed = WriteFileArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for write_file: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
|
||||||
await fs.writeFile(validPath, parsed.data.content, 'utf-8')
|
|
||||||
return {
|
|
||||||
content: [{ type: 'text', text: `Successfully wrote to ${parsed.data.path}` }]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'edit_file': {
|
|
||||||
const parsed = EditFileArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for edit_file: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
|
||||||
const result = await applyFileEdits(validPath, parsed.data.edits, parsed.data.dryRun)
|
|
||||||
return {
|
|
||||||
content: [{ type: 'text', text: result }]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'create_directory': {
|
|
||||||
const parsed = CreateDirectoryArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for create_directory: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
|
||||||
await fs.mkdir(validPath, { recursive: true })
|
|
||||||
return {
|
|
||||||
content: [{ type: 'text', text: `Successfully created directory ${parsed.data.path}` }]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'list_directory': {
|
|
||||||
const parsed = ListDirectoryArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for list_directory: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
|
||||||
const entries = await fs.readdir(validPath, { withFileTypes: true })
|
|
||||||
const formatted = entries
|
|
||||||
.map((entry) => `${entry.isDirectory() ? '[DIR]' : '[FILE]'} ${entry.name}`)
|
|
||||||
.join('\n')
|
|
||||||
return {
|
|
||||||
content: [{ type: 'text', text: formatted }]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'directory_tree': {
|
|
||||||
const parsed = DirectoryTreeArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for directory_tree: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
|
|
||||||
interface TreeEntry {
|
|
||||||
name: string
|
|
||||||
type: 'file' | 'directory'
|
|
||||||
children?: TreeEntry[]
|
|
||||||
}
|
|
||||||
|
|
||||||
async function buildTree(allowedDirectories: string[], currentPath: string): Promise<TreeEntry[]> {
|
|
||||||
const validPath = await validatePath(allowedDirectories, currentPath)
|
|
||||||
const entries = await fs.readdir(validPath, { withFileTypes: true })
|
|
||||||
const result: TreeEntry[] = []
|
|
||||||
|
|
||||||
for (const entry of entries) {
|
|
||||||
const entryData: TreeEntry = {
|
|
||||||
name: entry.name,
|
|
||||||
type: entry.isDirectory() ? 'directory' : 'file'
|
|
||||||
}
|
|
||||||
|
|
||||||
if (entry.isDirectory()) {
|
|
||||||
const subPath = path.join(currentPath, entry.name)
|
|
||||||
entryData.children = await buildTree(allowedDirectories, subPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
result.push(entryData)
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
const treeData = await buildTree(this.allowedDirectories, parsed.data.path)
|
|
||||||
return {
|
|
||||||
content: [
|
|
||||||
{
|
|
||||||
type: 'text',
|
|
||||||
text: JSON.stringify(treeData, null, 2)
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'move_file': {
|
|
||||||
const parsed = MoveFileArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for move_file: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const validSourcePath = await validatePath(this.allowedDirectories, parsed.data.source)
|
|
||||||
const validDestPath = await validatePath(this.allowedDirectories, parsed.data.destination)
|
|
||||||
await fs.rename(validSourcePath, validDestPath)
|
|
||||||
return {
|
|
||||||
content: [
|
|
||||||
{ type: 'text', text: `Successfully moved ${parsed.data.source} to ${parsed.data.destination}` }
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'search_files': {
|
|
||||||
const parsed = SearchFilesArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for search_files: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
|
||||||
const results = await searchFiles(
|
|
||||||
this.allowedDirectories,
|
|
||||||
validPath,
|
|
||||||
parsed.data.pattern,
|
|
||||||
parsed.data.excludePatterns
|
|
||||||
)
|
|
||||||
return {
|
|
||||||
content: [{ type: 'text', text: results.length > 0 ? results.join('\n') : 'No matches found' }]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'get_file_info': {
|
|
||||||
const parsed = GetFileInfoArgsSchema.safeParse(args)
|
|
||||||
if (!parsed.success) {
|
|
||||||
throw new Error(`Invalid arguments for get_file_info: ${parsed.error}`)
|
|
||||||
}
|
|
||||||
const validPath = await validatePath(this.allowedDirectories, parsed.data.path)
|
|
||||||
const info = await getFileStats(validPath)
|
|
||||||
return {
|
|
||||||
content: [
|
|
||||||
{
|
|
||||||
type: 'text',
|
|
||||||
text: Object.entries(info)
|
|
||||||
.map(([key, value]) => `${key}: ${value}`)
|
|
||||||
.join('\n')
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case 'list_allowed_directories': {
|
|
||||||
return {
|
|
||||||
content: [
|
|
||||||
{
|
|
||||||
type: 'text',
|
|
||||||
text: `Allowed directories:\n${this.allowedDirectories.join('\n')}`
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
|
||||||
throw new Error(`Unknown tool: ${name}`)
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
const errorMessage = error instanceof Error ? error.message : String(error)
|
|
||||||
return {
|
|
||||||
content: [{ type: 'text', text: `Error: ${errorMessage}` }],
|
|
||||||
isError: true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
export default FileSystemServer
|
|
||||||
2
src/main/mcpServers/filesystem/index.ts
Normal file
2
src/main/mcpServers/filesystem/index.ts
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
// Re-export FileSystemServer to maintain existing import pattern
|
||||||
|
export { default, FileSystemServer } from './server'
|
||||||
118
src/main/mcpServers/filesystem/server.ts
Normal file
118
src/main/mcpServers/filesystem/server.ts
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
import { Server } from '@modelcontextprotocol/sdk/server/index.js'
|
||||||
|
import { CallToolRequestSchema, ListToolsRequestSchema } from '@modelcontextprotocol/sdk/types.js'
|
||||||
|
import { app } from 'electron'
|
||||||
|
import fs from 'fs/promises'
|
||||||
|
import path from 'path'
|
||||||
|
|
||||||
|
import {
|
||||||
|
deleteToolDefinition,
|
||||||
|
editToolDefinition,
|
||||||
|
globToolDefinition,
|
||||||
|
grepToolDefinition,
|
||||||
|
handleDeleteTool,
|
||||||
|
handleEditTool,
|
||||||
|
handleGlobTool,
|
||||||
|
handleGrepTool,
|
||||||
|
handleLsTool,
|
||||||
|
handleReadTool,
|
||||||
|
handleWriteTool,
|
||||||
|
lsToolDefinition,
|
||||||
|
readToolDefinition,
|
||||||
|
writeToolDefinition
|
||||||
|
} from './tools'
|
||||||
|
import { logger } from './types'
|
||||||
|
|
||||||
|
export class FileSystemServer {
|
||||||
|
public server: Server
|
||||||
|
private baseDir: string
|
||||||
|
|
||||||
|
constructor(baseDir?: string) {
|
||||||
|
if (baseDir && path.isAbsolute(baseDir)) {
|
||||||
|
this.baseDir = baseDir
|
||||||
|
logger.info(`Using provided baseDir for filesystem MCP: ${baseDir}`)
|
||||||
|
} else {
|
||||||
|
const userData = app.getPath('userData')
|
||||||
|
this.baseDir = path.join(userData, 'Data', 'Workspace')
|
||||||
|
logger.info(`Using default workspace for filesystem MCP baseDir: ${this.baseDir}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
this.server = new Server(
|
||||||
|
{
|
||||||
|
name: 'filesystem-server',
|
||||||
|
version: '2.0.0'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
capabilities: {
|
||||||
|
tools: {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
this.initialize()
|
||||||
|
}
|
||||||
|
|
||||||
|
async initialize() {
|
||||||
|
try {
|
||||||
|
await fs.mkdir(this.baseDir, { recursive: true })
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Failed to create filesystem MCP baseDir', { error, baseDir: this.baseDir })
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register tool list handler
|
||||||
|
this.server.setRequestHandler(ListToolsRequestSchema, async () => {
|
||||||
|
return {
|
||||||
|
tools: [
|
||||||
|
globToolDefinition,
|
||||||
|
lsToolDefinition,
|
||||||
|
grepToolDefinition,
|
||||||
|
readToolDefinition,
|
||||||
|
editToolDefinition,
|
||||||
|
writeToolDefinition,
|
||||||
|
deleteToolDefinition
|
||||||
|
]
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Register tool call handler
|
||||||
|
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
||||||
|
try {
|
||||||
|
const { name, arguments: args } = request.params
|
||||||
|
|
||||||
|
switch (name) {
|
||||||
|
case 'glob':
|
||||||
|
return await handleGlobTool(args, this.baseDir)
|
||||||
|
|
||||||
|
case 'ls':
|
||||||
|
return await handleLsTool(args, this.baseDir)
|
||||||
|
|
||||||
|
case 'grep':
|
||||||
|
return await handleGrepTool(args, this.baseDir)
|
||||||
|
|
||||||
|
case 'read':
|
||||||
|
return await handleReadTool(args, this.baseDir)
|
||||||
|
|
||||||
|
case 'edit':
|
||||||
|
return await handleEditTool(args, this.baseDir)
|
||||||
|
|
||||||
|
case 'write':
|
||||||
|
return await handleWriteTool(args, this.baseDir)
|
||||||
|
|
||||||
|
case 'delete':
|
||||||
|
return await handleDeleteTool(args, this.baseDir)
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw new Error(`Unknown tool: ${name}`)
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
const errorMessage = error instanceof Error ? error.message : String(error)
|
||||||
|
logger.error(`Tool execution error for ${request.params.name}:`, { error })
|
||||||
|
return {
|
||||||
|
content: [{ type: 'text', text: `Error: ${errorMessage}` }],
|
||||||
|
isError: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export default FileSystemServer
|
||||||
93
src/main/mcpServers/filesystem/tools/delete.ts
Normal file
93
src/main/mcpServers/filesystem/tools/delete.ts
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
import fs from 'fs/promises'
|
||||||
|
import path from 'path'
|
||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import { logger, validatePath } from '../types'
|
||||||
|
|
||||||
|
// Schema definition
|
||||||
|
export const DeleteToolSchema = z.object({
|
||||||
|
path: z.string().describe('The path to the file or directory to delete'),
|
||||||
|
recursive: z.boolean().optional().describe('For directories, whether to delete recursively (default: false)')
|
||||||
|
})
|
||||||
|
|
||||||
|
// Tool definition with detailed description
|
||||||
|
export const deleteToolDefinition = {
|
||||||
|
name: 'delete',
|
||||||
|
description: `Deletes a file or directory from the filesystem.
|
||||||
|
|
||||||
|
CAUTION: This operation cannot be undone!
|
||||||
|
|
||||||
|
- For files: simply provide the path
|
||||||
|
- For empty directories: provide the path
|
||||||
|
- For non-empty directories: set recursive=true
|
||||||
|
- The path must be an absolute path, not a relative path
|
||||||
|
- Always verify the path before deleting to avoid data loss`,
|
||||||
|
inputSchema: z.toJSONSchema(DeleteToolSchema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler implementation
|
||||||
|
export async function handleDeleteTool(args: unknown, baseDir: string) {
|
||||||
|
const parsed = DeleteToolSchema.safeParse(args)
|
||||||
|
if (!parsed.success) {
|
||||||
|
throw new Error(`Invalid arguments for delete: ${parsed.error}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const targetPath = parsed.data.path
|
||||||
|
const validPath = await validatePath(targetPath, baseDir)
|
||||||
|
const recursive = parsed.data.recursive || false
|
||||||
|
|
||||||
|
// Check if path exists and get stats
|
||||||
|
let stats
|
||||||
|
try {
|
||||||
|
stats = await fs.stat(validPath)
|
||||||
|
} catch (error: any) {
|
||||||
|
if (error.code === 'ENOENT') {
|
||||||
|
throw new Error(`Path not found: ${targetPath}`)
|
||||||
|
}
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
|
||||||
|
const isDirectory = stats.isDirectory()
|
||||||
|
const relativePath = path.relative(baseDir, validPath)
|
||||||
|
|
||||||
|
// Perform deletion
|
||||||
|
try {
|
||||||
|
if (isDirectory) {
|
||||||
|
if (recursive) {
|
||||||
|
// Delete directory recursively
|
||||||
|
await fs.rm(validPath, { recursive: true, force: true })
|
||||||
|
} else {
|
||||||
|
// Try to delete empty directory
|
||||||
|
await fs.rmdir(validPath)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Delete file
|
||||||
|
await fs.unlink(validPath)
|
||||||
|
}
|
||||||
|
} catch (error: any) {
|
||||||
|
if (error.code === 'ENOTEMPTY') {
|
||||||
|
throw new Error(`Directory not empty: ${targetPath}. Use recursive=true to delete non-empty directories.`)
|
||||||
|
}
|
||||||
|
throw new Error(`Failed to delete: ${error.message}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log the operation
|
||||||
|
logger.info('Path deleted', {
|
||||||
|
path: validPath,
|
||||||
|
type: isDirectory ? 'directory' : 'file',
|
||||||
|
recursive: isDirectory ? recursive : undefined
|
||||||
|
})
|
||||||
|
|
||||||
|
// Format output
|
||||||
|
const itemType = isDirectory ? 'Directory' : 'File'
|
||||||
|
const recursiveNote = isDirectory && recursive ? ' (recursive)' : ''
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: `${itemType} deleted${recursiveNote}: ${relativePath}`
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
130
src/main/mcpServers/filesystem/tools/edit.ts
Normal file
130
src/main/mcpServers/filesystem/tools/edit.ts
Normal file
@ -0,0 +1,130 @@
|
|||||||
|
import fs from 'fs/promises'
|
||||||
|
import path from 'path'
|
||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import { logger, replaceWithFuzzyMatch, validatePath } from '../types'
|
||||||
|
|
||||||
|
// Schema definition
|
||||||
|
export const EditToolSchema = z.object({
|
||||||
|
file_path: z.string().describe('The path to the file to modify'),
|
||||||
|
old_string: z.string().describe('The text to replace'),
|
||||||
|
new_string: z.string().describe('The text to replace it with'),
|
||||||
|
replace_all: z.boolean().optional().default(false).describe('Replace all occurrences of old_string (default false)')
|
||||||
|
})
|
||||||
|
|
||||||
|
// Tool definition with detailed description
|
||||||
|
export const editToolDefinition = {
|
||||||
|
name: 'edit',
|
||||||
|
description: `Performs exact string replacements in files.
|
||||||
|
|
||||||
|
- You must use the 'read' tool at least once before editing
|
||||||
|
- The file_path must be an absolute path, not a relative path
|
||||||
|
- Preserve exact indentation from read output (after the line number prefix)
|
||||||
|
- Never include line number prefixes in old_string or new_string
|
||||||
|
- ALWAYS prefer editing existing files over creating new ones
|
||||||
|
- The edit will FAIL if old_string is not found in the file
|
||||||
|
- The edit will FAIL if old_string appears multiple times (provide more context or use replace_all)
|
||||||
|
- The edit will FAIL if old_string equals new_string
|
||||||
|
- Use replace_all to rename variables or replace all occurrences`,
|
||||||
|
inputSchema: z.toJSONSchema(EditToolSchema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler implementation
|
||||||
|
export async function handleEditTool(args: unknown, baseDir: string) {
|
||||||
|
const parsed = EditToolSchema.safeParse(args)
|
||||||
|
if (!parsed.success) {
|
||||||
|
throw new Error(`Invalid arguments for edit: ${parsed.error}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const { file_path: filePath, old_string: oldString, new_string: newString, replace_all: replaceAll } = parsed.data
|
||||||
|
|
||||||
|
// Validate path
|
||||||
|
const validPath = await validatePath(filePath, baseDir)
|
||||||
|
|
||||||
|
// Check if file exists
|
||||||
|
try {
|
||||||
|
const stats = await fs.stat(validPath)
|
||||||
|
if (!stats.isFile()) {
|
||||||
|
throw new Error(`Path is not a file: ${filePath}`)
|
||||||
|
}
|
||||||
|
} catch (error: any) {
|
||||||
|
if (error.code === 'ENOENT') {
|
||||||
|
// If old_string is empty, this is a create new file operation
|
||||||
|
if (oldString === '') {
|
||||||
|
// Create parent directory if needed
|
||||||
|
const parentDir = path.dirname(validPath)
|
||||||
|
await fs.mkdir(parentDir, { recursive: true })
|
||||||
|
|
||||||
|
// Write the new content
|
||||||
|
await fs.writeFile(validPath, newString, 'utf-8')
|
||||||
|
|
||||||
|
logger.info('File created', { path: validPath })
|
||||||
|
|
||||||
|
const relativePath = path.relative(baseDir, validPath)
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: `Created new file: ${relativePath}\nLines: ${newString.split('\n').length}`
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
throw new Error(`File not found: ${filePath}`)
|
||||||
|
}
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read current content
|
||||||
|
const content = await fs.readFile(validPath, 'utf-8')
|
||||||
|
|
||||||
|
// Handle special case: old_string is empty (create file with content)
|
||||||
|
if (oldString === '') {
|
||||||
|
await fs.writeFile(validPath, newString, 'utf-8')
|
||||||
|
|
||||||
|
logger.info('File overwritten', { path: validPath })
|
||||||
|
|
||||||
|
const relativePath = path.relative(baseDir, validPath)
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: `Overwrote file: ${relativePath}\nLines: ${newString.split('\n').length}`
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform the replacement with fuzzy matching
|
||||||
|
const newContent = replaceWithFuzzyMatch(content, oldString, newString, replaceAll)
|
||||||
|
|
||||||
|
// Write the modified content
|
||||||
|
await fs.writeFile(validPath, newContent, 'utf-8')
|
||||||
|
|
||||||
|
logger.info('File edited', {
|
||||||
|
path: validPath,
|
||||||
|
replaceAll
|
||||||
|
})
|
||||||
|
|
||||||
|
// Generate a simple diff summary
|
||||||
|
const oldLines = content.split('\n').length
|
||||||
|
const newLines = newContent.split('\n').length
|
||||||
|
const lineDiff = newLines - oldLines
|
||||||
|
|
||||||
|
const relativePath = path.relative(baseDir, validPath)
|
||||||
|
let diffSummary = `Edited: ${relativePath}`
|
||||||
|
if (lineDiff > 0) {
|
||||||
|
diffSummary += `\n+${lineDiff} lines`
|
||||||
|
} else if (lineDiff < 0) {
|
||||||
|
diffSummary += `\n${lineDiff} lines`
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: diffSummary
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
149
src/main/mcpServers/filesystem/tools/glob.ts
Normal file
149
src/main/mcpServers/filesystem/tools/glob.ts
Normal file
@ -0,0 +1,149 @@
|
|||||||
|
import fs from 'fs/promises'
|
||||||
|
import path from 'path'
|
||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import type { FileInfo } from '../types'
|
||||||
|
import { logger, MAX_FILES_LIMIT, runRipgrep, validatePath } from '../types'
|
||||||
|
|
||||||
|
// Schema definition
|
||||||
|
export const GlobToolSchema = z.object({
|
||||||
|
pattern: z.string().describe('The glob pattern to match files against'),
|
||||||
|
path: z
|
||||||
|
.string()
|
||||||
|
.optional()
|
||||||
|
.describe('The directory to search in (must be absolute path). Defaults to the base directory')
|
||||||
|
})
|
||||||
|
|
||||||
|
// Tool definition with detailed description
|
||||||
|
export const globToolDefinition = {
|
||||||
|
name: 'glob',
|
||||||
|
description: `Fast file pattern matching tool that works with any codebase size.
|
||||||
|
|
||||||
|
- Supports glob patterns like "**/*.js" or "src/**/*.ts"
|
||||||
|
- Returns matching absolute file paths sorted by modification time (newest first)
|
||||||
|
- Use this when you need to find files by name patterns
|
||||||
|
- Patterns without "/" (e.g., "*.txt") match files at ANY depth in the directory tree
|
||||||
|
- Patterns with "/" (e.g., "src/*.ts") match relative to the search path
|
||||||
|
- Pattern syntax: * (any chars), ** (any path), {a,b} (alternatives), ? (single char)
|
||||||
|
- Results are limited to 100 files
|
||||||
|
- The path parameter must be an absolute path if specified
|
||||||
|
- If path is not specified, defaults to the base directory
|
||||||
|
- IMPORTANT: Omit the path field for the default directory (don't use "undefined" or "null")`,
|
||||||
|
inputSchema: z.toJSONSchema(GlobToolSchema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler implementation
|
||||||
|
export async function handleGlobTool(args: unknown, baseDir: string) {
|
||||||
|
const parsed = GlobToolSchema.safeParse(args)
|
||||||
|
if (!parsed.success) {
|
||||||
|
throw new Error(`Invalid arguments for glob: ${parsed.error}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const searchPath = parsed.data.path || baseDir
|
||||||
|
const validPath = await validatePath(searchPath, baseDir)
|
||||||
|
|
||||||
|
// Verify the search directory exists
|
||||||
|
try {
|
||||||
|
const stats = await fs.stat(validPath)
|
||||||
|
if (!stats.isDirectory()) {
|
||||||
|
throw new Error(`Path is not a directory: ${validPath}`)
|
||||||
|
}
|
||||||
|
} catch (error: unknown) {
|
||||||
|
if (error && typeof error === 'object' && 'code' in error && error.code === 'ENOENT') {
|
||||||
|
throw new Error(`Directory not found: ${validPath}`)
|
||||||
|
}
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate pattern
|
||||||
|
const pattern = parsed.data.pattern.trim()
|
||||||
|
if (!pattern) {
|
||||||
|
throw new Error('Pattern cannot be empty')
|
||||||
|
}
|
||||||
|
|
||||||
|
const files: FileInfo[] = []
|
||||||
|
let truncated = false
|
||||||
|
|
||||||
|
// Build ripgrep arguments for file listing using --glob=pattern format
|
||||||
|
const rgArgs: string[] = [
|
||||||
|
'--files',
|
||||||
|
'--follow',
|
||||||
|
'--hidden',
|
||||||
|
`--glob=${pattern}`,
|
||||||
|
'--glob=!.git/*',
|
||||||
|
'--glob=!node_modules/*',
|
||||||
|
'--glob=!dist/*',
|
||||||
|
'--glob=!build/*',
|
||||||
|
'--glob=!__pycache__/*',
|
||||||
|
validPath
|
||||||
|
]
|
||||||
|
|
||||||
|
// Use ripgrep for file listing
|
||||||
|
logger.debug('Running ripgrep with args', { rgArgs })
|
||||||
|
const rgResult = await runRipgrep(rgArgs)
|
||||||
|
logger.debug('Ripgrep result', {
|
||||||
|
ok: rgResult.ok,
|
||||||
|
exitCode: rgResult.exitCode,
|
||||||
|
stdoutLength: rgResult.stdout.length,
|
||||||
|
stdoutPreview: rgResult.stdout.slice(0, 500)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Process results if we have stdout content
|
||||||
|
// Exit code 2 can indicate partial errors (e.g., permission denied on some dirs) but still have valid results
|
||||||
|
if (rgResult.ok && rgResult.stdout.length > 0) {
|
||||||
|
const lines = rgResult.stdout.split('\n').filter(Boolean)
|
||||||
|
logger.debug('Parsed lines from ripgrep', { lineCount: lines.length, lines })
|
||||||
|
|
||||||
|
for (const line of lines) {
|
||||||
|
if (files.length >= MAX_FILES_LIMIT) {
|
||||||
|
truncated = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
const filePath = line.trim()
|
||||||
|
if (!filePath) continue
|
||||||
|
|
||||||
|
const absolutePath = path.isAbsolute(filePath) ? filePath : path.resolve(validPath, filePath)
|
||||||
|
|
||||||
|
try {
|
||||||
|
const stats = await fs.stat(absolutePath)
|
||||||
|
files.push({
|
||||||
|
path: absolutePath,
|
||||||
|
type: 'file', // ripgrep --files only returns files
|
||||||
|
size: stats.size,
|
||||||
|
modified: stats.mtime
|
||||||
|
})
|
||||||
|
} catch (error) {
|
||||||
|
logger.debug('Failed to stat file from ripgrep output, skipping', { file: absolutePath, error })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by modification time (newest first)
|
||||||
|
files.sort((a, b) => {
|
||||||
|
const aTime = a.modified ? a.modified.getTime() : 0
|
||||||
|
const bTime = b.modified ? b.modified.getTime() : 0
|
||||||
|
return bTime - aTime
|
||||||
|
})
|
||||||
|
|
||||||
|
// Format output - always use absolute paths
|
||||||
|
const output: string[] = []
|
||||||
|
if (files.length === 0) {
|
||||||
|
output.push(`No files found matching pattern "${parsed.data.pattern}" in ${validPath}`)
|
||||||
|
} else {
|
||||||
|
output.push(...files.map((f) => f.path))
|
||||||
|
if (truncated) {
|
||||||
|
output.push('')
|
||||||
|
output.push(`(Results truncated to ${MAX_FILES_LIMIT} files. Consider using a more specific pattern.)`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: output.join('\n')
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
266
src/main/mcpServers/filesystem/tools/grep.ts
Normal file
266
src/main/mcpServers/filesystem/tools/grep.ts
Normal file
@ -0,0 +1,266 @@
|
|||||||
|
import fs from 'fs/promises'
|
||||||
|
import path from 'path'
|
||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import type { GrepMatch } from '../types'
|
||||||
|
import { isBinaryFile, MAX_GREP_MATCHES, MAX_LINE_LENGTH, runRipgrep, validatePath } from '../types'
|
||||||
|
|
||||||
|
// Schema definition
|
||||||
|
export const GrepToolSchema = z.object({
|
||||||
|
pattern: z.string().describe('The regex pattern to search for in file contents'),
|
||||||
|
path: z
|
||||||
|
.string()
|
||||||
|
.optional()
|
||||||
|
.describe('The directory to search in (must be absolute path). Defaults to the base directory'),
|
||||||
|
include: z.string().optional().describe('File pattern to include in the search (e.g. "*.js", "*.{ts,tsx}")')
|
||||||
|
})
|
||||||
|
|
||||||
|
// Tool definition with detailed description
|
||||||
|
export const grepToolDefinition = {
|
||||||
|
name: 'grep',
|
||||||
|
description: `Fast content search tool that works with any codebase size.
|
||||||
|
|
||||||
|
- Searches file contents using regular expressions
|
||||||
|
- Supports full regex syntax (e.g., "log.*Error", "function\\s+\\w+")
|
||||||
|
- Filter files by pattern with include (e.g., "*.js", "*.{ts,tsx}")
|
||||||
|
- Returns absolute file paths and line numbers with matching content
|
||||||
|
- Results are limited to 100 matches
|
||||||
|
- Binary files are automatically skipped
|
||||||
|
- Common directories (node_modules, .git, dist) are excluded
|
||||||
|
- The path parameter must be an absolute path if specified
|
||||||
|
- If path is not specified, defaults to the base directory`,
|
||||||
|
inputSchema: z.toJSONSchema(GrepToolSchema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler implementation
|
||||||
|
export async function handleGrepTool(args: unknown, baseDir: string) {
|
||||||
|
const parsed = GrepToolSchema.safeParse(args)
|
||||||
|
if (!parsed.success) {
|
||||||
|
throw new Error(`Invalid arguments for grep: ${parsed.error}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const data = parsed.data
|
||||||
|
|
||||||
|
if (!data.pattern) {
|
||||||
|
throw new Error('Pattern is required for grep')
|
||||||
|
}
|
||||||
|
|
||||||
|
const searchPath = data.path || baseDir
|
||||||
|
const validPath = await validatePath(searchPath, baseDir)
|
||||||
|
|
||||||
|
const matches: GrepMatch[] = []
|
||||||
|
let truncated = false
|
||||||
|
let regex: RegExp
|
||||||
|
|
||||||
|
// Build ripgrep arguments
|
||||||
|
const rgArgs: string[] = [
|
||||||
|
'--no-heading',
|
||||||
|
'--line-number',
|
||||||
|
'--color',
|
||||||
|
'never',
|
||||||
|
'--ignore-case',
|
||||||
|
'--glob',
|
||||||
|
'!.git/**',
|
||||||
|
'--glob',
|
||||||
|
'!node_modules/**',
|
||||||
|
'--glob',
|
||||||
|
'!dist/**',
|
||||||
|
'--glob',
|
||||||
|
'!build/**',
|
||||||
|
'--glob',
|
||||||
|
'!__pycache__/**'
|
||||||
|
]
|
||||||
|
|
||||||
|
if (data.include) {
|
||||||
|
for (const pat of data.include
|
||||||
|
.split(',')
|
||||||
|
.map((p) => p.trim())
|
||||||
|
.filter(Boolean)) {
|
||||||
|
rgArgs.push('--glob', pat)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rgArgs.push(data.pattern)
|
||||||
|
rgArgs.push(validPath)
|
||||||
|
|
||||||
|
try {
|
||||||
|
regex = new RegExp(data.pattern, 'gi')
|
||||||
|
} catch (error) {
|
||||||
|
throw new Error(`Invalid regex pattern: ${data.pattern}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
async function searchFile(filePath: string): Promise<void> {
|
||||||
|
if (matches.length >= MAX_GREP_MATCHES) {
|
||||||
|
truncated = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Skip binary files
|
||||||
|
if (await isBinaryFile(filePath)) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
const content = await fs.readFile(filePath, 'utf-8')
|
||||||
|
const lines = content.split('\n')
|
||||||
|
|
||||||
|
lines.forEach((line, index) => {
|
||||||
|
if (matches.length >= MAX_GREP_MATCHES) {
|
||||||
|
truncated = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if (regex.test(line)) {
|
||||||
|
// Truncate long lines
|
||||||
|
const truncatedLine = line.length > MAX_LINE_LENGTH ? line.substring(0, MAX_LINE_LENGTH) + '...' : line
|
||||||
|
|
||||||
|
matches.push({
|
||||||
|
file: filePath,
|
||||||
|
line: index + 1,
|
||||||
|
content: truncatedLine.trim()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
} catch (error) {
|
||||||
|
// Skip files we can't read
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function searchDirectory(dir: string): Promise<void> {
|
||||||
|
if (matches.length >= MAX_GREP_MATCHES) {
|
||||||
|
truncated = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const entries = await fs.readdir(dir, { withFileTypes: true })
|
||||||
|
|
||||||
|
for (const entry of entries) {
|
||||||
|
if (matches.length >= MAX_GREP_MATCHES) {
|
||||||
|
truncated = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
const fullPath = path.join(dir, entry.name)
|
||||||
|
|
||||||
|
// Skip common ignore patterns
|
||||||
|
if (entry.name.startsWith('.') && entry.name !== '.env.example') {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if (['node_modules', 'dist', 'build', '__pycache__', '.git'].includes(entry.name)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if (entry.isFile()) {
|
||||||
|
// Check if file matches include pattern
|
||||||
|
if (data.include) {
|
||||||
|
const includePatterns = data.include.split(',').map((p) => p.trim())
|
||||||
|
const fileName = path.basename(fullPath)
|
||||||
|
const matchesInclude = includePatterns.some((pattern) => {
|
||||||
|
// Simple glob pattern matching
|
||||||
|
const regexPattern = pattern
|
||||||
|
.replace(/\*/g, '.*')
|
||||||
|
.replace(/\?/g, '.')
|
||||||
|
.replace(/\{([^}]+)\}/g, (_, group) => `(${group.split(',').join('|')})`)
|
||||||
|
return new RegExp(`^${regexPattern}$`).test(fileName)
|
||||||
|
})
|
||||||
|
if (!matchesInclude) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
await searchFile(fullPath)
|
||||||
|
} else if (entry.isDirectory()) {
|
||||||
|
await searchDirectory(fullPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
// Skip directories we can't read
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform the search
|
||||||
|
let usedRipgrep = false
|
||||||
|
try {
|
||||||
|
const rgResult = await runRipgrep(rgArgs)
|
||||||
|
if (rgResult.ok && rgResult.exitCode !== null && rgResult.exitCode !== 2) {
|
||||||
|
usedRipgrep = true
|
||||||
|
const lines = rgResult.stdout.split('\n').filter(Boolean)
|
||||||
|
for (const line of lines) {
|
||||||
|
if (matches.length >= MAX_GREP_MATCHES) {
|
||||||
|
truncated = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
const firstColon = line.indexOf(':')
|
||||||
|
const secondColon = line.indexOf(':', firstColon + 1)
|
||||||
|
if (firstColon === -1 || secondColon === -1) continue
|
||||||
|
|
||||||
|
const filePart = line.slice(0, firstColon)
|
||||||
|
const linePart = line.slice(firstColon + 1, secondColon)
|
||||||
|
const contentPart = line.slice(secondColon + 1)
|
||||||
|
const lineNum = Number.parseInt(linePart, 10)
|
||||||
|
if (!Number.isFinite(lineNum)) continue
|
||||||
|
|
||||||
|
const absoluteFilePath = path.isAbsolute(filePart) ? filePart : path.resolve(baseDir, filePart)
|
||||||
|
const truncatedLine =
|
||||||
|
contentPart.length > MAX_LINE_LENGTH ? contentPart.substring(0, MAX_LINE_LENGTH) + '...' : contentPart
|
||||||
|
|
||||||
|
matches.push({
|
||||||
|
file: absoluteFilePath,
|
||||||
|
line: lineNum,
|
||||||
|
content: truncatedLine.trim()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
usedRipgrep = false
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!usedRipgrep) {
|
||||||
|
const stats = await fs.stat(validPath)
|
||||||
|
if (stats.isFile()) {
|
||||||
|
await searchFile(validPath)
|
||||||
|
} else {
|
||||||
|
await searchDirectory(validPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format output
|
||||||
|
const output: string[] = []
|
||||||
|
|
||||||
|
if (matches.length === 0) {
|
||||||
|
output.push('No matches found')
|
||||||
|
} else {
|
||||||
|
// Group matches by file
|
||||||
|
const fileGroups = new Map<string, GrepMatch[]>()
|
||||||
|
matches.forEach((match) => {
|
||||||
|
if (!fileGroups.has(match.file)) {
|
||||||
|
fileGroups.set(match.file, [])
|
||||||
|
}
|
||||||
|
fileGroups.get(match.file)!.push(match)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Format grouped matches - always use absolute paths
|
||||||
|
fileGroups.forEach((fileMatches, filePath) => {
|
||||||
|
output.push(`\n${filePath}:`)
|
||||||
|
fileMatches.forEach((match) => {
|
||||||
|
output.push(` ${match.line}: ${match.content}`)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
if (truncated) {
|
||||||
|
output.push('')
|
||||||
|
output.push(`(Results truncated to ${MAX_GREP_MATCHES} matches. Consider using a more specific pattern or path.)`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: output.join('\n')
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
8
src/main/mcpServers/filesystem/tools/index.ts
Normal file
8
src/main/mcpServers/filesystem/tools/index.ts
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
// Export all tool definitions and handlers
|
||||||
|
export { deleteToolDefinition, handleDeleteTool } from './delete'
|
||||||
|
export { editToolDefinition, handleEditTool } from './edit'
|
||||||
|
export { globToolDefinition, handleGlobTool } from './glob'
|
||||||
|
export { grepToolDefinition, handleGrepTool } from './grep'
|
||||||
|
export { handleLsTool, lsToolDefinition } from './ls'
|
||||||
|
export { handleReadTool, readToolDefinition } from './read'
|
||||||
|
export { handleWriteTool, writeToolDefinition } from './write'
|
||||||
150
src/main/mcpServers/filesystem/tools/ls.ts
Normal file
150
src/main/mcpServers/filesystem/tools/ls.ts
Normal file
@ -0,0 +1,150 @@
|
|||||||
|
import fs from 'fs/promises'
|
||||||
|
import path from 'path'
|
||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import { MAX_FILES_LIMIT, validatePath } from '../types'
|
||||||
|
|
||||||
|
// Schema definition
|
||||||
|
export const LsToolSchema = z.object({
|
||||||
|
path: z.string().optional().describe('The directory to list (must be absolute path). Defaults to the base directory'),
|
||||||
|
recursive: z.boolean().optional().describe('Whether to list directories recursively (default: false)')
|
||||||
|
})
|
||||||
|
|
||||||
|
// Tool definition with detailed description
|
||||||
|
export const lsToolDefinition = {
|
||||||
|
name: 'ls',
|
||||||
|
description: `Lists files and directories in a specified path.
|
||||||
|
|
||||||
|
- Returns a tree-like structure with icons (📁 directories, 📄 files)
|
||||||
|
- Shows the absolute directory path in the header
|
||||||
|
- Entries are sorted alphabetically with directories first
|
||||||
|
- Can list recursively with recursive=true (up to 5 levels deep)
|
||||||
|
- Common directories (node_modules, dist, .git) are excluded
|
||||||
|
- Hidden files (starting with .) are excluded except .env.example
|
||||||
|
- Results are limited to 100 entries
|
||||||
|
- The path parameter must be an absolute path if specified
|
||||||
|
- If path is not specified, defaults to the base directory`,
|
||||||
|
inputSchema: z.toJSONSchema(LsToolSchema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler implementation
|
||||||
|
export async function handleLsTool(args: unknown, baseDir: string) {
|
||||||
|
const parsed = LsToolSchema.safeParse(args)
|
||||||
|
if (!parsed.success) {
|
||||||
|
throw new Error(`Invalid arguments for ls: ${parsed.error}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const targetPath = parsed.data.path || baseDir
|
||||||
|
const validPath = await validatePath(targetPath, baseDir)
|
||||||
|
const recursive = parsed.data.recursive || false
|
||||||
|
|
||||||
|
interface TreeNode {
|
||||||
|
name: string
|
||||||
|
type: 'file' | 'directory'
|
||||||
|
children?: TreeNode[]
|
||||||
|
}
|
||||||
|
|
||||||
|
let fileCount = 0
|
||||||
|
let truncated = false
|
||||||
|
|
||||||
|
async function buildTree(dirPath: string, depth: number = 0): Promise<TreeNode[]> {
|
||||||
|
if (fileCount >= MAX_FILES_LIMIT) {
|
||||||
|
truncated = true
|
||||||
|
return []
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const entries = await fs.readdir(dirPath, { withFileTypes: true })
|
||||||
|
const nodes: TreeNode[] = []
|
||||||
|
|
||||||
|
// Sort entries: directories first, then files, alphabetically
|
||||||
|
entries.sort((a, b) => {
|
||||||
|
if (a.isDirectory() && !b.isDirectory()) return -1
|
||||||
|
if (!a.isDirectory() && b.isDirectory()) return 1
|
||||||
|
return a.name.localeCompare(b.name)
|
||||||
|
})
|
||||||
|
|
||||||
|
for (const entry of entries) {
|
||||||
|
if (fileCount >= MAX_FILES_LIMIT) {
|
||||||
|
truncated = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip hidden files and common ignore patterns
|
||||||
|
if (entry.name.startsWith('.') && entry.name !== '.env.example') {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if (['node_modules', 'dist', 'build', '__pycache__'].includes(entry.name)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fileCount++
|
||||||
|
const node: TreeNode = {
|
||||||
|
name: entry.name,
|
||||||
|
type: entry.isDirectory() ? 'directory' : 'file'
|
||||||
|
}
|
||||||
|
|
||||||
|
if (entry.isDirectory() && recursive && depth < 5) {
|
||||||
|
// Limit depth to prevent infinite recursion
|
||||||
|
const childPath = path.join(dirPath, entry.name)
|
||||||
|
node.children = await buildTree(childPath, depth + 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes.push(node)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nodes
|
||||||
|
} catch (error) {
|
||||||
|
return []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the tree
|
||||||
|
const tree = await buildTree(validPath)
|
||||||
|
|
||||||
|
// Format as text output
|
||||||
|
function formatTree(nodes: TreeNode[], prefix: string = ''): string[] {
|
||||||
|
const lines: string[] = []
|
||||||
|
|
||||||
|
nodes.forEach((node, index) => {
|
||||||
|
const isLastNode = index === nodes.length - 1
|
||||||
|
const connector = isLastNode ? '└── ' : '├── '
|
||||||
|
const icon = node.type === 'directory' ? '📁 ' : '📄 '
|
||||||
|
|
||||||
|
lines.push(prefix + connector + icon + node.name)
|
||||||
|
|
||||||
|
if (node.children && node.children.length > 0) {
|
||||||
|
const childPrefix = prefix + (isLastNode ? ' ' : '│ ')
|
||||||
|
lines.push(...formatTree(node.children, childPrefix))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return lines
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate output
|
||||||
|
const output: string[] = []
|
||||||
|
output.push(`Directory: ${validPath}`)
|
||||||
|
output.push('')
|
||||||
|
|
||||||
|
if (tree.length === 0) {
|
||||||
|
output.push('(empty directory)')
|
||||||
|
} else {
|
||||||
|
const treeLines = formatTree(tree, '')
|
||||||
|
output.push(...treeLines)
|
||||||
|
|
||||||
|
if (truncated) {
|
||||||
|
output.push('')
|
||||||
|
output.push(`(Results truncated to ${MAX_FILES_LIMIT} files. Consider listing a more specific directory.)`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: output.join('\n')
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
101
src/main/mcpServers/filesystem/tools/read.ts
Normal file
101
src/main/mcpServers/filesystem/tools/read.ts
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
import fs from 'fs/promises'
|
||||||
|
import path from 'path'
|
||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import { DEFAULT_READ_LIMIT, isBinaryFile, MAX_LINE_LENGTH, validatePath } from '../types'
|
||||||
|
|
||||||
|
// Schema definition
|
||||||
|
export const ReadToolSchema = z.object({
|
||||||
|
file_path: z.string().describe('The path to the file to read'),
|
||||||
|
offset: z.number().optional().describe('The line number to start reading from (1-based)'),
|
||||||
|
limit: z.number().optional().describe('The number of lines to read (defaults to 2000)')
|
||||||
|
})
|
||||||
|
|
||||||
|
// Tool definition with detailed description
|
||||||
|
export const readToolDefinition = {
|
||||||
|
name: 'read',
|
||||||
|
description: `Reads a file from the local filesystem.
|
||||||
|
|
||||||
|
- Assumes this tool can read all files on the machine
|
||||||
|
- The file_path parameter must be an absolute path, not a relative path
|
||||||
|
- By default, reads up to 2000 lines starting from the beginning
|
||||||
|
- You can optionally specify a line offset and limit for long files
|
||||||
|
- Any lines longer than 2000 characters will be truncated
|
||||||
|
- Results are returned with line numbers starting at 1
|
||||||
|
- Binary files are detected and rejected with an error
|
||||||
|
- Empty files return a warning`,
|
||||||
|
inputSchema: z.toJSONSchema(ReadToolSchema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler implementation
|
||||||
|
export async function handleReadTool(args: unknown, baseDir: string) {
|
||||||
|
const parsed = ReadToolSchema.safeParse(args)
|
||||||
|
if (!parsed.success) {
|
||||||
|
throw new Error(`Invalid arguments for read: ${parsed.error}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const filePath = parsed.data.file_path
|
||||||
|
const validPath = await validatePath(filePath, baseDir)
|
||||||
|
|
||||||
|
// Check if file exists
|
||||||
|
try {
|
||||||
|
const stats = await fs.stat(validPath)
|
||||||
|
if (!stats.isFile()) {
|
||||||
|
throw new Error(`Path is not a file: ${filePath}`)
|
||||||
|
}
|
||||||
|
} catch (error: any) {
|
||||||
|
if (error.code === 'ENOENT') {
|
||||||
|
throw new Error(`File not found: ${filePath}`)
|
||||||
|
}
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if file is binary
|
||||||
|
if (await isBinaryFile(validPath)) {
|
||||||
|
throw new Error(`Cannot read binary file: ${filePath}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read file content
|
||||||
|
const content = await fs.readFile(validPath, 'utf-8')
|
||||||
|
const lines = content.split('\n')
|
||||||
|
|
||||||
|
// Apply offset and limit
|
||||||
|
const offset = (parsed.data.offset || 1) - 1 // Convert to 0-based
|
||||||
|
const limit = parsed.data.limit || DEFAULT_READ_LIMIT
|
||||||
|
|
||||||
|
if (offset < 0 || offset >= lines.length) {
|
||||||
|
throw new Error(`Invalid offset: ${offset + 1}. File has ${lines.length} lines.`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const selectedLines = lines.slice(offset, offset + limit)
|
||||||
|
|
||||||
|
// Format output with line numbers and truncate long lines
|
||||||
|
const output: string[] = []
|
||||||
|
const relativePath = path.relative(baseDir, validPath)
|
||||||
|
|
||||||
|
output.push(`File: ${relativePath}`)
|
||||||
|
if (offset > 0 || limit < lines.length) {
|
||||||
|
output.push(`Lines ${offset + 1} to ${Math.min(offset + limit, lines.length)} of ${lines.length}`)
|
||||||
|
}
|
||||||
|
output.push('')
|
||||||
|
|
||||||
|
selectedLines.forEach((line, index) => {
|
||||||
|
const lineNumber = offset + index + 1
|
||||||
|
const truncatedLine = line.length > MAX_LINE_LENGTH ? line.substring(0, MAX_LINE_LENGTH) + '...' : line
|
||||||
|
output.push(`${lineNumber.toString().padStart(6)}\t${truncatedLine}`)
|
||||||
|
})
|
||||||
|
|
||||||
|
if (offset + limit < lines.length) {
|
||||||
|
output.push('')
|
||||||
|
output.push(`(${lines.length - (offset + limit)} more lines not shown)`)
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: output.join('\n')
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
83
src/main/mcpServers/filesystem/tools/write.ts
Normal file
83
src/main/mcpServers/filesystem/tools/write.ts
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
import fs from 'fs/promises'
|
||||||
|
import path from 'path'
|
||||||
|
import * as z from 'zod'
|
||||||
|
|
||||||
|
import { logger, validatePath } from '../types'
|
||||||
|
|
||||||
|
// Schema definition
|
||||||
|
export const WriteToolSchema = z.object({
|
||||||
|
file_path: z.string().describe('The path to the file to write'),
|
||||||
|
content: z.string().describe('The content to write to the file')
|
||||||
|
})
|
||||||
|
|
||||||
|
// Tool definition with detailed description
|
||||||
|
export const writeToolDefinition = {
|
||||||
|
name: 'write',
|
||||||
|
description: `Writes a file to the local filesystem.
|
||||||
|
|
||||||
|
- This tool will overwrite the existing file if one exists at the path
|
||||||
|
- You MUST use the read tool first to understand what you're overwriting
|
||||||
|
- ALWAYS prefer using the 'edit' tool for existing files
|
||||||
|
- NEVER proactively create documentation files unless explicitly requested
|
||||||
|
- Parent directories will be created automatically if they don't exist
|
||||||
|
- The file_path must be an absolute path, not a relative path`,
|
||||||
|
inputSchema: z.toJSONSchema(WriteToolSchema)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handler implementation
|
||||||
|
export async function handleWriteTool(args: unknown, baseDir: string) {
|
||||||
|
const parsed = WriteToolSchema.safeParse(args)
|
||||||
|
if (!parsed.success) {
|
||||||
|
throw new Error(`Invalid arguments for write: ${parsed.error}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
const filePath = parsed.data.file_path
|
||||||
|
const validPath = await validatePath(filePath, baseDir)
|
||||||
|
|
||||||
|
// Create parent directory if it doesn't exist
|
||||||
|
const parentDir = path.dirname(validPath)
|
||||||
|
try {
|
||||||
|
await fs.mkdir(parentDir, { recursive: true })
|
||||||
|
} catch (error: any) {
|
||||||
|
if (error.code !== 'EEXIST') {
|
||||||
|
throw new Error(`Failed to create parent directory: ${error.message}`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if file exists (for logging)
|
||||||
|
let isOverwrite = false
|
||||||
|
try {
|
||||||
|
await fs.stat(validPath)
|
||||||
|
isOverwrite = true
|
||||||
|
} catch {
|
||||||
|
// File doesn't exist, that's fine
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the file
|
||||||
|
try {
|
||||||
|
await fs.writeFile(validPath, parsed.data.content, 'utf-8')
|
||||||
|
} catch (error: any) {
|
||||||
|
throw new Error(`Failed to write file: ${error.message}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log the operation
|
||||||
|
logger.info('File written', {
|
||||||
|
path: validPath,
|
||||||
|
overwrite: isOverwrite,
|
||||||
|
size: parsed.data.content.length
|
||||||
|
})
|
||||||
|
|
||||||
|
// Format output
|
||||||
|
const relativePath = path.relative(baseDir, validPath)
|
||||||
|
const action = isOverwrite ? 'Updated' : 'Created'
|
||||||
|
const lines = parsed.data.content.split('\n').length
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: 'text',
|
||||||
|
text: `${action} file: ${relativePath}\n` + `Size: ${parsed.data.content.length} bytes\n` + `Lines: ${lines}`
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
627
src/main/mcpServers/filesystem/types.ts
Normal file
627
src/main/mcpServers/filesystem/types.ts
Normal file
@ -0,0 +1,627 @@
|
|||||||
|
import { loggerService } from '@logger'
|
||||||
|
import { isMac, isWin } from '@main/constant'
|
||||||
|
import { spawn } from 'child_process'
|
||||||
|
import fs from 'fs/promises'
|
||||||
|
import os from 'os'
|
||||||
|
import path from 'path'
|
||||||
|
|
||||||
|
export const logger = loggerService.withContext('MCP:FileSystemServer')
|
||||||
|
|
||||||
|
// Constants
|
||||||
|
export const MAX_LINE_LENGTH = 2000
|
||||||
|
export const DEFAULT_READ_LIMIT = 2000
|
||||||
|
export const MAX_FILES_LIMIT = 100
|
||||||
|
export const MAX_GREP_MATCHES = 100
|
||||||
|
|
||||||
|
// Common types
|
||||||
|
export interface FileInfo {
|
||||||
|
path: string
|
||||||
|
type: 'file' | 'directory'
|
||||||
|
size?: number
|
||||||
|
modified?: Date
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface GrepMatch {
|
||||||
|
file: string
|
||||||
|
line: number
|
||||||
|
content: string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Utility functions for path handling
|
||||||
|
export function normalizePath(p: string): string {
|
||||||
|
return path.normalize(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
export function expandHome(filepath: string): string {
|
||||||
|
if (filepath.startsWith('~/') || filepath === '~') {
|
||||||
|
return path.join(os.homedir(), filepath.slice(1))
|
||||||
|
}
|
||||||
|
return filepath
|
||||||
|
}
|
||||||
|
|
||||||
|
// Security validation
|
||||||
|
export async function validatePath(requestedPath: string, baseDir?: string): Promise<string> {
|
||||||
|
const expandedPath = expandHome(requestedPath)
|
||||||
|
const root = baseDir ?? process.cwd()
|
||||||
|
const absolute = path.isAbsolute(expandedPath) ? path.resolve(expandedPath) : path.resolve(root, expandedPath)
|
||||||
|
|
||||||
|
// Handle symlinks by checking their real path
|
||||||
|
try {
|
||||||
|
const realPath = await fs.realpath(absolute)
|
||||||
|
return normalizePath(realPath)
|
||||||
|
} catch (error) {
|
||||||
|
// For new files that don't exist yet, verify parent directory
|
||||||
|
const parentDir = path.dirname(absolute)
|
||||||
|
try {
|
||||||
|
const realParentPath = await fs.realpath(parentDir)
|
||||||
|
normalizePath(realParentPath)
|
||||||
|
return normalizePath(absolute)
|
||||||
|
} catch {
|
||||||
|
return normalizePath(absolute)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Edit Tool Utilities - Fuzzy matching replacers from opencode
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
export type Replacer = (content: string, find: string) => Generator<string, void, unknown>
|
||||||
|
|
||||||
|
// Similarity thresholds for block anchor fallback matching
|
||||||
|
const SINGLE_CANDIDATE_SIMILARITY_THRESHOLD = 0.0
|
||||||
|
const MULTIPLE_CANDIDATES_SIMILARITY_THRESHOLD = 0.3
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Levenshtein distance algorithm implementation
|
||||||
|
*/
|
||||||
|
function levenshtein(a: string, b: string): number {
|
||||||
|
if (a === '' || b === '') {
|
||||||
|
return Math.max(a.length, b.length)
|
||||||
|
}
|
||||||
|
const matrix = Array.from({ length: a.length + 1 }, (_, i) =>
|
||||||
|
Array.from({ length: b.length + 1 }, (_, j) => (i === 0 ? j : j === 0 ? i : 0))
|
||||||
|
)
|
||||||
|
|
||||||
|
for (let i = 1; i <= a.length; i++) {
|
||||||
|
for (let j = 1; j <= b.length; j++) {
|
||||||
|
const cost = a[i - 1] === b[j - 1] ? 0 : 1
|
||||||
|
matrix[i][j] = Math.min(matrix[i - 1][j] + 1, matrix[i][j - 1] + 1, matrix[i - 1][j - 1] + cost)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return matrix[a.length][b.length]
|
||||||
|
}
|
||||||
|
|
||||||
|
export const SimpleReplacer: Replacer = function* (_content, find) {
|
||||||
|
yield find
|
||||||
|
}
|
||||||
|
|
||||||
|
export const LineTrimmedReplacer: Replacer = function* (content, find) {
|
||||||
|
const originalLines = content.split('\n')
|
||||||
|
const searchLines = find.split('\n')
|
||||||
|
|
||||||
|
if (searchLines[searchLines.length - 1] === '') {
|
||||||
|
searchLines.pop()
|
||||||
|
}
|
||||||
|
|
||||||
|
for (let i = 0; i <= originalLines.length - searchLines.length; i++) {
|
||||||
|
let matches = true
|
||||||
|
|
||||||
|
for (let j = 0; j < searchLines.length; j++) {
|
||||||
|
const originalTrimmed = originalLines[i + j].trim()
|
||||||
|
const searchTrimmed = searchLines[j].trim()
|
||||||
|
|
||||||
|
if (originalTrimmed !== searchTrimmed) {
|
||||||
|
matches = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (matches) {
|
||||||
|
let matchStartIndex = 0
|
||||||
|
for (let k = 0; k < i; k++) {
|
||||||
|
matchStartIndex += originalLines[k].length + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
let matchEndIndex = matchStartIndex
|
||||||
|
for (let k = 0; k < searchLines.length; k++) {
|
||||||
|
matchEndIndex += originalLines[i + k].length
|
||||||
|
if (k < searchLines.length - 1) {
|
||||||
|
matchEndIndex += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
yield content.substring(matchStartIndex, matchEndIndex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export const BlockAnchorReplacer: Replacer = function* (content, find) {
|
||||||
|
const originalLines = content.split('\n')
|
||||||
|
const searchLines = find.split('\n')
|
||||||
|
|
||||||
|
if (searchLines.length < 3) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if (searchLines[searchLines.length - 1] === '') {
|
||||||
|
searchLines.pop()
|
||||||
|
}
|
||||||
|
|
||||||
|
const firstLineSearch = searchLines[0].trim()
|
||||||
|
const lastLineSearch = searchLines[searchLines.length - 1].trim()
|
||||||
|
const searchBlockSize = searchLines.length
|
||||||
|
|
||||||
|
const candidates: Array<{ startLine: number; endLine: number }> = []
|
||||||
|
for (let i = 0; i < originalLines.length; i++) {
|
||||||
|
if (originalLines[i].trim() !== firstLineSearch) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for (let j = i + 2; j < originalLines.length; j++) {
|
||||||
|
if (originalLines[j].trim() === lastLineSearch) {
|
||||||
|
candidates.push({ startLine: i, endLine: j })
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (candidates.length === 0) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if (candidates.length === 1) {
|
||||||
|
const { startLine, endLine } = candidates[0]
|
||||||
|
const actualBlockSize = endLine - startLine + 1
|
||||||
|
|
||||||
|
let similarity = 0
|
||||||
|
const linesToCheck = Math.min(searchBlockSize - 2, actualBlockSize - 2)
|
||||||
|
|
||||||
|
if (linesToCheck > 0) {
|
||||||
|
for (let j = 1; j < searchBlockSize - 1 && j < actualBlockSize - 1; j++) {
|
||||||
|
const originalLine = originalLines[startLine + j].trim()
|
||||||
|
const searchLine = searchLines[j].trim()
|
||||||
|
const maxLen = Math.max(originalLine.length, searchLine.length)
|
||||||
|
if (maxLen === 0) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
const distance = levenshtein(originalLine, searchLine)
|
||||||
|
similarity += (1 - distance / maxLen) / linesToCheck
|
||||||
|
|
||||||
|
if (similarity >= SINGLE_CANDIDATE_SIMILARITY_THRESHOLD) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
similarity = 1.0
|
||||||
|
}
|
||||||
|
|
||||||
|
if (similarity >= SINGLE_CANDIDATE_SIMILARITY_THRESHOLD) {
|
||||||
|
let matchStartIndex = 0
|
||||||
|
for (let k = 0; k < startLine; k++) {
|
||||||
|
matchStartIndex += originalLines[k].length + 1
|
||||||
|
}
|
||||||
|
let matchEndIndex = matchStartIndex
|
||||||
|
for (let k = startLine; k <= endLine; k++) {
|
||||||
|
matchEndIndex += originalLines[k].length
|
||||||
|
if (k < endLine) {
|
||||||
|
matchEndIndex += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
yield content.substring(matchStartIndex, matchEndIndex)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
let bestMatch: { startLine: number; endLine: number } | null = null
|
||||||
|
let maxSimilarity = -1
|
||||||
|
|
||||||
|
for (const candidate of candidates) {
|
||||||
|
const { startLine, endLine } = candidate
|
||||||
|
const actualBlockSize = endLine - startLine + 1
|
||||||
|
|
||||||
|
let similarity = 0
|
||||||
|
const linesToCheck = Math.min(searchBlockSize - 2, actualBlockSize - 2)
|
||||||
|
|
||||||
|
if (linesToCheck > 0) {
|
||||||
|
for (let j = 1; j < searchBlockSize - 1 && j < actualBlockSize - 1; j++) {
|
||||||
|
const originalLine = originalLines[startLine + j].trim()
|
||||||
|
const searchLine = searchLines[j].trim()
|
||||||
|
const maxLen = Math.max(originalLine.length, searchLine.length)
|
||||||
|
if (maxLen === 0) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
const distance = levenshtein(originalLine, searchLine)
|
||||||
|
similarity += 1 - distance / maxLen
|
||||||
|
}
|
||||||
|
similarity /= linesToCheck
|
||||||
|
} else {
|
||||||
|
similarity = 1.0
|
||||||
|
}
|
||||||
|
|
||||||
|
if (similarity > maxSimilarity) {
|
||||||
|
maxSimilarity = similarity
|
||||||
|
bestMatch = candidate
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (maxSimilarity >= MULTIPLE_CANDIDATES_SIMILARITY_THRESHOLD && bestMatch) {
|
||||||
|
const { startLine, endLine } = bestMatch
|
||||||
|
let matchStartIndex = 0
|
||||||
|
for (let k = 0; k < startLine; k++) {
|
||||||
|
matchStartIndex += originalLines[k].length + 1
|
||||||
|
}
|
||||||
|
let matchEndIndex = matchStartIndex
|
||||||
|
for (let k = startLine; k <= endLine; k++) {
|
||||||
|
matchEndIndex += originalLines[k].length
|
||||||
|
if (k < endLine) {
|
||||||
|
matchEndIndex += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
yield content.substring(matchStartIndex, matchEndIndex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export const WhitespaceNormalizedReplacer: Replacer = function* (content, find) {
|
||||||
|
const normalizeWhitespace = (text: string) => text.replace(/\s+/g, ' ').trim()
|
||||||
|
const normalizedFind = normalizeWhitespace(find)
|
||||||
|
|
||||||
|
const lines = content.split('\n')
|
||||||
|
for (let i = 0; i < lines.length; i++) {
|
||||||
|
const line = lines[i]
|
||||||
|
if (normalizeWhitespace(line) === normalizedFind) {
|
||||||
|
yield line
|
||||||
|
} else {
|
||||||
|
const normalizedLine = normalizeWhitespace(line)
|
||||||
|
if (normalizedLine.includes(normalizedFind)) {
|
||||||
|
const words = find.trim().split(/\s+/)
|
||||||
|
if (words.length > 0) {
|
||||||
|
const pattern = words.map((word) => word.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')).join('\\s+')
|
||||||
|
try {
|
||||||
|
const regex = new RegExp(pattern)
|
||||||
|
const match = line.match(regex)
|
||||||
|
if (match) {
|
||||||
|
yield match[0]
|
||||||
|
}
|
||||||
|
} catch {
|
||||||
|
// Invalid regex pattern, skip
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const findLines = find.split('\n')
|
||||||
|
if (findLines.length > 1) {
|
||||||
|
for (let i = 0; i <= lines.length - findLines.length; i++) {
|
||||||
|
const block = lines.slice(i, i + findLines.length)
|
||||||
|
if (normalizeWhitespace(block.join('\n')) === normalizedFind) {
|
||||||
|
yield block.join('\n')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export const IndentationFlexibleReplacer: Replacer = function* (content, find) {
|
||||||
|
const removeIndentation = (text: string) => {
|
||||||
|
const lines = text.split('\n')
|
||||||
|
const nonEmptyLines = lines.filter((line) => line.trim().length > 0)
|
||||||
|
if (nonEmptyLines.length === 0) return text
|
||||||
|
|
||||||
|
const minIndent = Math.min(
|
||||||
|
...nonEmptyLines.map((line) => {
|
||||||
|
const match = line.match(/^(\s*)/)
|
||||||
|
return match ? match[1].length : 0
|
||||||
|
})
|
||||||
|
)
|
||||||
|
|
||||||
|
return lines.map((line) => (line.trim().length === 0 ? line : line.slice(minIndent))).join('\n')
|
||||||
|
}
|
||||||
|
|
||||||
|
const normalizedFind = removeIndentation(find)
|
||||||
|
const contentLines = content.split('\n')
|
||||||
|
const findLines = find.split('\n')
|
||||||
|
|
||||||
|
for (let i = 0; i <= contentLines.length - findLines.length; i++) {
|
||||||
|
const block = contentLines.slice(i, i + findLines.length).join('\n')
|
||||||
|
if (removeIndentation(block) === normalizedFind) {
|
||||||
|
yield block
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export const EscapeNormalizedReplacer: Replacer = function* (content, find) {
|
||||||
|
const unescapeString = (str: string): string => {
|
||||||
|
return str.replace(/\\(n|t|r|'|"|`|\\|\n|\$)/g, (match, capturedChar) => {
|
||||||
|
switch (capturedChar) {
|
||||||
|
case 'n':
|
||||||
|
return '\n'
|
||||||
|
case 't':
|
||||||
|
return '\t'
|
||||||
|
case 'r':
|
||||||
|
return '\r'
|
||||||
|
case "'":
|
||||||
|
return "'"
|
||||||
|
case '"':
|
||||||
|
return '"'
|
||||||
|
case '`':
|
||||||
|
return '`'
|
||||||
|
case '\\':
|
||||||
|
return '\\'
|
||||||
|
case '\n':
|
||||||
|
return '\n'
|
||||||
|
case '$':
|
||||||
|
return '$'
|
||||||
|
default:
|
||||||
|
return match
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
const unescapedFind = unescapeString(find)
|
||||||
|
|
||||||
|
if (content.includes(unescapedFind)) {
|
||||||
|
yield unescapedFind
|
||||||
|
}
|
||||||
|
|
||||||
|
const lines = content.split('\n')
|
||||||
|
const findLines = unescapedFind.split('\n')
|
||||||
|
|
||||||
|
for (let i = 0; i <= lines.length - findLines.length; i++) {
|
||||||
|
const block = lines.slice(i, i + findLines.length).join('\n')
|
||||||
|
const unescapedBlock = unescapeString(block)
|
||||||
|
|
||||||
|
if (unescapedBlock === unescapedFind) {
|
||||||
|
yield block
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export const TrimmedBoundaryReplacer: Replacer = function* (content, find) {
|
||||||
|
const trimmedFind = find.trim()
|
||||||
|
|
||||||
|
if (trimmedFind === find) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if (content.includes(trimmedFind)) {
|
||||||
|
yield trimmedFind
|
||||||
|
}
|
||||||
|
|
||||||
|
const lines = content.split('\n')
|
||||||
|
const findLines = find.split('\n')
|
||||||
|
|
||||||
|
for (let i = 0; i <= lines.length - findLines.length; i++) {
|
||||||
|
const block = lines.slice(i, i + findLines.length).join('\n')
|
||||||
|
|
||||||
|
if (block.trim() === trimmedFind) {
|
||||||
|
yield block
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export const ContextAwareReplacer: Replacer = function* (content, find) {
|
||||||
|
const findLines = find.split('\n')
|
||||||
|
if (findLines.length < 3) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if (findLines[findLines.length - 1] === '') {
|
||||||
|
findLines.pop()
|
||||||
|
}
|
||||||
|
|
||||||
|
const contentLines = content.split('\n')
|
||||||
|
|
||||||
|
const firstLine = findLines[0].trim()
|
||||||
|
const lastLine = findLines[findLines.length - 1].trim()
|
||||||
|
|
||||||
|
for (let i = 0; i < contentLines.length; i++) {
|
||||||
|
if (contentLines[i].trim() !== firstLine) continue
|
||||||
|
|
||||||
|
for (let j = i + 2; j < contentLines.length; j++) {
|
||||||
|
if (contentLines[j].trim() === lastLine) {
|
||||||
|
const blockLines = contentLines.slice(i, j + 1)
|
||||||
|
const block = blockLines.join('\n')
|
||||||
|
|
||||||
|
if (blockLines.length === findLines.length) {
|
||||||
|
let matchingLines = 0
|
||||||
|
let totalNonEmptyLines = 0
|
||||||
|
|
||||||
|
for (let k = 1; k < blockLines.length - 1; k++) {
|
||||||
|
const blockLine = blockLines[k].trim()
|
||||||
|
const findLine = findLines[k].trim()
|
||||||
|
|
||||||
|
if (blockLine.length > 0 || findLine.length > 0) {
|
||||||
|
totalNonEmptyLines++
|
||||||
|
if (blockLine === findLine) {
|
||||||
|
matchingLines++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (totalNonEmptyLines === 0 || matchingLines / totalNonEmptyLines >= 0.5) {
|
||||||
|
yield block
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export const MultiOccurrenceReplacer: Replacer = function* (content, find) {
|
||||||
|
let startIndex = 0
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
const index = content.indexOf(find, startIndex)
|
||||||
|
if (index === -1) break
|
||||||
|
|
||||||
|
yield find
|
||||||
|
startIndex = index + find.length
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* All replacers in order of specificity
|
||||||
|
*/
|
||||||
|
export const ALL_REPLACERS: Replacer[] = [
|
||||||
|
SimpleReplacer,
|
||||||
|
LineTrimmedReplacer,
|
||||||
|
BlockAnchorReplacer,
|
||||||
|
WhitespaceNormalizedReplacer,
|
||||||
|
IndentationFlexibleReplacer,
|
||||||
|
EscapeNormalizedReplacer,
|
||||||
|
TrimmedBoundaryReplacer,
|
||||||
|
ContextAwareReplacer,
|
||||||
|
MultiOccurrenceReplacer
|
||||||
|
]
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Replace oldString with newString in content using fuzzy matching
|
||||||
|
*/
|
||||||
|
export function replaceWithFuzzyMatch(
|
||||||
|
content: string,
|
||||||
|
oldString: string,
|
||||||
|
newString: string,
|
||||||
|
replaceAll = false
|
||||||
|
): string {
|
||||||
|
if (oldString === newString) {
|
||||||
|
throw new Error('old_string and new_string must be different')
|
||||||
|
}
|
||||||
|
|
||||||
|
let notFound = true
|
||||||
|
|
||||||
|
for (const replacer of ALL_REPLACERS) {
|
||||||
|
for (const search of replacer(content, oldString)) {
|
||||||
|
const index = content.indexOf(search)
|
||||||
|
if (index === -1) continue
|
||||||
|
notFound = false
|
||||||
|
if (replaceAll) {
|
||||||
|
return content.replaceAll(search, newString)
|
||||||
|
}
|
||||||
|
const lastIndex = content.lastIndexOf(search)
|
||||||
|
if (index !== lastIndex) continue
|
||||||
|
return content.substring(0, index) + newString + content.substring(index + search.length)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (notFound) {
|
||||||
|
throw new Error('old_string not found in content')
|
||||||
|
}
|
||||||
|
throw new Error(
|
||||||
|
'Found multiple matches for old_string. Provide more surrounding lines in old_string to identify the correct match.'
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Binary File Detection
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
// Check if a file is likely binary
|
||||||
|
export async function isBinaryFile(filePath: string): Promise<boolean> {
|
||||||
|
try {
|
||||||
|
const buffer = Buffer.alloc(4096)
|
||||||
|
const fd = await fs.open(filePath, 'r')
|
||||||
|
const { bytesRead } = await fd.read(buffer, 0, buffer.length, 0)
|
||||||
|
await fd.close()
|
||||||
|
|
||||||
|
if (bytesRead === 0) return false
|
||||||
|
|
||||||
|
const view = buffer.subarray(0, bytesRead)
|
||||||
|
|
||||||
|
let zeroBytes = 0
|
||||||
|
let evenZeros = 0
|
||||||
|
let oddZeros = 0
|
||||||
|
let nonPrintable = 0
|
||||||
|
|
||||||
|
for (let i = 0; i < view.length; i++) {
|
||||||
|
const b = view[i]
|
||||||
|
|
||||||
|
if (b === 0) {
|
||||||
|
zeroBytes++
|
||||||
|
if (i % 2 === 0) evenZeros++
|
||||||
|
else oddZeros++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// treat common whitespace as printable
|
||||||
|
if (b === 9 || b === 10 || b === 13) continue
|
||||||
|
|
||||||
|
// basic ASCII printable range
|
||||||
|
if (b >= 32 && b <= 126) continue
|
||||||
|
|
||||||
|
// bytes >= 128 are likely part of UTF-8 sequences; count as printable
|
||||||
|
if (b >= 128) continue
|
||||||
|
|
||||||
|
nonPrintable++
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there are lots of null bytes, it's probably binary unless it looks like UTF-16 text.
|
||||||
|
if (zeroBytes > 0) {
|
||||||
|
const evenSlots = Math.ceil(view.length / 2)
|
||||||
|
const oddSlots = Math.floor(view.length / 2)
|
||||||
|
const evenZeroRatio = evenSlots > 0 ? evenZeros / evenSlots : 0
|
||||||
|
const oddZeroRatio = oddSlots > 0 ? oddZeros / oddSlots : 0
|
||||||
|
|
||||||
|
// UTF-16LE/BE tends to have zeros on every other byte.
|
||||||
|
if (evenZeroRatio > 0.7 || oddZeroRatio > 0.7) return false
|
||||||
|
|
||||||
|
if (zeroBytes / view.length > 0.05) return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Heuristic: too many non-printable bytes => binary.
|
||||||
|
return nonPrintable / view.length > 0.3
|
||||||
|
} catch {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// Ripgrep Utilities
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
export interface RipgrepResult {
|
||||||
|
ok: boolean
|
||||||
|
stdout: string
|
||||||
|
exitCode: number | null
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getRipgrepAddonPath(): string {
|
||||||
|
const pkgJsonPath = require.resolve('@anthropic-ai/claude-agent-sdk/package.json')
|
||||||
|
const pkgRoot = path.dirname(pkgJsonPath)
|
||||||
|
const platform = isMac ? 'darwin' : isWin ? 'win32' : 'linux'
|
||||||
|
const arch = process.arch === 'arm64' ? 'arm64' : 'x64'
|
||||||
|
return path.join(pkgRoot, 'vendor', 'ripgrep', `${arch}-${platform}`, 'ripgrep.node')
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function runRipgrep(args: string[]): Promise<RipgrepResult> {
|
||||||
|
const addonPath = getRipgrepAddonPath()
|
||||||
|
const childScript = `const { ripgrepMain } = require(process.env.RIPGREP_ADDON_PATH); process.exit(ripgrepMain(process.argv.slice(1)));`
|
||||||
|
|
||||||
|
return new Promise((resolve) => {
|
||||||
|
const child = spawn(process.execPath, ['--eval', childScript, 'rg', ...args], {
|
||||||
|
cwd: process.cwd(),
|
||||||
|
env: {
|
||||||
|
...process.env,
|
||||||
|
ELECTRON_RUN_AS_NODE: '1',
|
||||||
|
RIPGREP_ADDON_PATH: addonPath
|
||||||
|
},
|
||||||
|
stdio: ['ignore', 'pipe', 'pipe']
|
||||||
|
})
|
||||||
|
|
||||||
|
let stdout = ''
|
||||||
|
|
||||||
|
child.stdout?.on('data', (chunk) => {
|
||||||
|
stdout += chunk.toString('utf-8')
|
||||||
|
})
|
||||||
|
|
||||||
|
child.on('error', () => {
|
||||||
|
resolve({ ok: false, stdout: '', exitCode: null })
|
||||||
|
})
|
||||||
|
|
||||||
|
child.on('close', (code) => {
|
||||||
|
resolve({ ok: true, stdout, exitCode: code })
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
@ -32,7 +32,8 @@ export enum ConfigKeys {
|
|||||||
Proxy = 'proxy',
|
Proxy = 'proxy',
|
||||||
EnableDeveloperMode = 'enableDeveloperMode',
|
EnableDeveloperMode = 'enableDeveloperMode',
|
||||||
ClientId = 'clientId',
|
ClientId = 'clientId',
|
||||||
GitBashPath = 'gitBashPath'
|
GitBashPath = 'gitBashPath',
|
||||||
|
GitBashPathSource = 'gitBashPathSource' // 'manual' | 'auto' | null
|
||||||
}
|
}
|
||||||
|
|
||||||
export class ConfigManager {
|
export class ConfigManager {
|
||||||
|
|||||||
@ -163,7 +163,7 @@ class FileStorage {
|
|||||||
fs.mkdirSync(this.storageDir, { recursive: true })
|
fs.mkdirSync(this.storageDir, { recursive: true })
|
||||||
}
|
}
|
||||||
if (!fs.existsSync(this.notesDir)) {
|
if (!fs.existsSync(this.notesDir)) {
|
||||||
fs.mkdirSync(this.storageDir, { recursive: true })
|
fs.mkdirSync(this.notesDir, { recursive: true })
|
||||||
}
|
}
|
||||||
if (!fs.existsSync(this.tempDir)) {
|
if (!fs.existsSync(this.tempDir)) {
|
||||||
fs.mkdirSync(this.tempDir, { recursive: true })
|
fs.mkdirSync(this.tempDir, { recursive: true })
|
||||||
|
|||||||
@ -249,6 +249,26 @@ class McpService {
|
|||||||
StdioClientTransport | SSEClientTransport | InMemoryTransport | StreamableHTTPClientTransport
|
StdioClientTransport | SSEClientTransport | InMemoryTransport | StreamableHTTPClientTransport
|
||||||
> => {
|
> => {
|
||||||
// Create appropriate transport based on configuration
|
// Create appropriate transport based on configuration
|
||||||
|
|
||||||
|
// Special case for nowledgeMem - uses HTTP transport instead of in-memory
|
||||||
|
if (isBuiltinMCPServer(server) && server.name === BuiltinMCPServerNames.nowledgeMem) {
|
||||||
|
const nowledgeMemUrl = 'http://127.0.0.1:14242/mcp'
|
||||||
|
const options: StreamableHTTPClientTransportOptions = {
|
||||||
|
fetch: async (url, init) => {
|
||||||
|
return net.fetch(typeof url === 'string' ? url : url.toString(), init)
|
||||||
|
},
|
||||||
|
requestInit: {
|
||||||
|
headers: {
|
||||||
|
...defaultAppHeaders(),
|
||||||
|
APP: 'Cherry Studio'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
authProvider
|
||||||
|
}
|
||||||
|
getServerLogger(server).debug(`Using StreamableHTTPClientTransport for ${server.name}`)
|
||||||
|
return new StreamableHTTPClientTransport(new URL(nowledgeMemUrl), options)
|
||||||
|
}
|
||||||
|
|
||||||
if (isBuiltinMCPServer(server) && server.name !== BuiltinMCPServerNames.mcpAutoInstall) {
|
if (isBuiltinMCPServer(server) && server.name !== BuiltinMCPServerNames.mcpAutoInstall) {
|
||||||
getServerLogger(server).debug(`Using in-memory transport`)
|
getServerLogger(server).debug(`Using in-memory transport`)
|
||||||
const [clientTransport, serverTransport] = InMemoryTransport.createLinkedPair()
|
const [clientTransport, serverTransport] = InMemoryTransport.createLinkedPair()
|
||||||
|
|||||||
@ -15,8 +15,8 @@ import { query } from '@anthropic-ai/claude-agent-sdk'
|
|||||||
import { loggerService } from '@logger'
|
import { loggerService } from '@logger'
|
||||||
import { config as apiConfigService } from '@main/apiServer/config'
|
import { config as apiConfigService } from '@main/apiServer/config'
|
||||||
import { validateModelId } from '@main/apiServer/utils'
|
import { validateModelId } from '@main/apiServer/utils'
|
||||||
import { ConfigKeys, configManager } from '@main/services/ConfigManager'
|
import { isWin } from '@main/constant'
|
||||||
import { validateGitBashPath } from '@main/utils/process'
|
import { autoDiscoverGitBash } from '@main/utils/process'
|
||||||
import getLoginShellEnvironment from '@main/utils/shell-env'
|
import getLoginShellEnvironment from '@main/utils/shell-env'
|
||||||
import { app } from 'electron'
|
import { app } from 'electron'
|
||||||
|
|
||||||
@ -109,7 +109,8 @@ class ClaudeCodeService implements AgentServiceInterface {
|
|||||||
Object.entries(loginShellEnv).filter(([key]) => !key.toLowerCase().endsWith('_proxy'))
|
Object.entries(loginShellEnv).filter(([key]) => !key.toLowerCase().endsWith('_proxy'))
|
||||||
) as Record<string, string>
|
) as Record<string, string>
|
||||||
|
|
||||||
const customGitBashPath = validateGitBashPath(configManager.get(ConfigKeys.GitBashPath) as string | undefined)
|
// Auto-discover Git Bash path on Windows (already logs internally)
|
||||||
|
const customGitBashPath = isWin ? autoDiscoverGitBash() : null
|
||||||
|
|
||||||
const env = {
|
const env = {
|
||||||
...loginShellEnvWithoutProxies,
|
...loginShellEnvWithoutProxies,
|
||||||
|
|||||||
@ -128,8 +128,8 @@ export class CallBackServer {
|
|||||||
})
|
})
|
||||||
|
|
||||||
return new Promise<http.Server>((resolve, reject) => {
|
return new Promise<http.Server>((resolve, reject) => {
|
||||||
server.listen(port, () => {
|
server.listen(port, '127.0.0.1', () => {
|
||||||
logger.info(`OAuth callback server listening on port ${port}`)
|
logger.info(`OAuth callback server listening on 127.0.0.1:${port}`)
|
||||||
resolve(server)
|
resolve(server)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|||||||
@ -1,9 +1,21 @@
|
|||||||
|
import { configManager } from '@main/services/ConfigManager'
|
||||||
import { execFileSync } from 'child_process'
|
import { execFileSync } from 'child_process'
|
||||||
import fs from 'fs'
|
import fs from 'fs'
|
||||||
import path from 'path'
|
import path from 'path'
|
||||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||||
|
|
||||||
import { findExecutable, findGitBash, validateGitBashPath } from '../process'
|
import { autoDiscoverGitBash, findExecutable, findGitBash, validateGitBashPath } from '../process'
|
||||||
|
|
||||||
|
// Mock configManager
|
||||||
|
vi.mock('@main/services/ConfigManager', () => ({
|
||||||
|
ConfigKeys: {
|
||||||
|
GitBashPath: 'gitBashPath'
|
||||||
|
},
|
||||||
|
configManager: {
|
||||||
|
get: vi.fn(),
|
||||||
|
set: vi.fn()
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
|
||||||
// Mock dependencies
|
// Mock dependencies
|
||||||
vi.mock('child_process')
|
vi.mock('child_process')
|
||||||
@ -695,4 +707,284 @@ describe.skipIf(process.platform !== 'win32')('process utilities', () => {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
describe('autoDiscoverGitBash', () => {
|
||||||
|
const originalEnvVar = process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.mocked(configManager.get).mockReset()
|
||||||
|
vi.mocked(configManager.set).mockReset()
|
||||||
|
delete process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||||
|
})
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
// Restore original environment variable
|
||||||
|
if (originalEnvVar !== undefined) {
|
||||||
|
process.env.CLAUDE_CODE_GIT_BASH_PATH = originalEnvVar
|
||||||
|
} else {
|
||||||
|
delete process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper to mock fs.existsSync with a set of valid paths
|
||||||
|
*/
|
||||||
|
const mockExistingPaths = (...validPaths: string[]) => {
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => validPaths.includes(p as string))
|
||||||
|
}
|
||||||
|
|
||||||
|
describe('with no existing config path', () => {
|
||||||
|
it('should discover and persist Git Bash path when not configured', () => {
|
||||||
|
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||||
|
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||||
|
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||||
|
process.env.ProgramFiles = 'C:\\Program Files'
|
||||||
|
mockExistingPaths(gitPath, bashPath)
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(bashPath)
|
||||||
|
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', bashPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return null and not persist when Git Bash is not found', () => {
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||||
|
vi.mocked(fs.existsSync).mockReturnValue(false)
|
||||||
|
vi.mocked(execFileSync).mockImplementation(() => {
|
||||||
|
throw new Error('Not found')
|
||||||
|
})
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
expect(result).toBeNull()
|
||||||
|
expect(configManager.set).not.toHaveBeenCalled()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('environment variable precedence', () => {
|
||||||
|
it('should use env var over valid config path', () => {
|
||||||
|
const envPath = 'C:\\EnvGit\\bin\\bash.exe'
|
||||||
|
const configPath = 'C:\\ConfigGit\\bin\\bash.exe'
|
||||||
|
|
||||||
|
process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(configPath)
|
||||||
|
mockExistingPaths(envPath, configPath)
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
// Env var should take precedence
|
||||||
|
expect(result).toBe(envPath)
|
||||||
|
// Should not persist env var path (it's a runtime override)
|
||||||
|
expect(configManager.set).not.toHaveBeenCalled()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should fall back to config path when env var is invalid', () => {
|
||||||
|
const envPath = 'C:\\Invalid\\bash.exe'
|
||||||
|
const configPath = 'C:\\ConfigGit\\bin\\bash.exe'
|
||||||
|
|
||||||
|
process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(configPath)
|
||||||
|
// Env path is invalid (doesn't exist), only config path exists
|
||||||
|
mockExistingPaths(configPath)
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
// Should fall back to config path
|
||||||
|
expect(result).toBe(configPath)
|
||||||
|
expect(configManager.set).not.toHaveBeenCalled()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should fall back to auto-discovery when both env var and config are invalid', () => {
|
||||||
|
const envPath = 'C:\\InvalidEnv\\bash.exe'
|
||||||
|
const configPath = 'C:\\InvalidConfig\\bash.exe'
|
||||||
|
const discoveredPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||||
|
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||||
|
|
||||||
|
process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath
|
||||||
|
process.env.ProgramFiles = 'C:\\Program Files'
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(configPath)
|
||||||
|
// Both env and config paths are invalid, only standard Git exists
|
||||||
|
mockExistingPaths(gitPath, discoveredPath)
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(discoveredPath)
|
||||||
|
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', discoveredPath)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('with valid existing config path', () => {
|
||||||
|
it('should validate and return existing path without re-discovering', () => {
|
||||||
|
const existingPath = 'C:\\CustomGit\\bin\\bash.exe'
|
||||||
|
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(existingPath)
|
||||||
|
mockExistingPaths(existingPath)
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(existingPath)
|
||||||
|
// Should not call findGitBash or persist again
|
||||||
|
expect(configManager.set).not.toHaveBeenCalled()
|
||||||
|
// Should not call execFileSync (which findGitBash would use for discovery)
|
||||||
|
expect(execFileSync).not.toHaveBeenCalled()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should not override existing valid config with auto-discovery', () => {
|
||||||
|
const existingPath = 'C:\\CustomGit\\bin\\bash.exe'
|
||||||
|
const discoveredPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||||
|
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(existingPath)
|
||||||
|
mockExistingPaths(existingPath, discoveredPath)
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(existingPath)
|
||||||
|
expect(configManager.set).not.toHaveBeenCalled()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('with invalid existing config path', () => {
|
||||||
|
it('should attempt auto-discovery when existing path does not exist', () => {
|
||||||
|
const existingPath = 'C:\\NonExistent\\bin\\bash.exe'
|
||||||
|
const discoveredPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||||
|
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||||
|
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(existingPath)
|
||||||
|
process.env.ProgramFiles = 'C:\\Program Files'
|
||||||
|
// Invalid path doesn't exist, but Git is installed at standard location
|
||||||
|
mockExistingPaths(gitPath, discoveredPath)
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
// Should discover and return the new path
|
||||||
|
expect(result).toBe(discoveredPath)
|
||||||
|
// Should persist the discovered path (overwrites invalid)
|
||||||
|
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', discoveredPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should attempt auto-discovery when existing path is not bash.exe', () => {
|
||||||
|
const existingPath = 'C:\\CustomGit\\bin\\git.exe'
|
||||||
|
const discoveredPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||||
|
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||||
|
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(existingPath)
|
||||||
|
process.env.ProgramFiles = 'C:\\Program Files'
|
||||||
|
// Invalid path exists but is not bash.exe (validation will fail)
|
||||||
|
// Git is installed at standard location
|
||||||
|
mockExistingPaths(existingPath, gitPath, discoveredPath)
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
// Should discover and return the new path
|
||||||
|
expect(result).toBe(discoveredPath)
|
||||||
|
// Should persist the discovered path (overwrites invalid)
|
||||||
|
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', discoveredPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return null when existing path is invalid and discovery fails', () => {
|
||||||
|
const existingPath = 'C:\\NonExistent\\bin\\bash.exe'
|
||||||
|
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(existingPath)
|
||||||
|
vi.mocked(fs.existsSync).mockReturnValue(false)
|
||||||
|
vi.mocked(execFileSync).mockImplementation(() => {
|
||||||
|
throw new Error('Not found')
|
||||||
|
})
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
// Both validation and discovery failed
|
||||||
|
expect(result).toBeNull()
|
||||||
|
// Should not persist when discovery fails
|
||||||
|
expect(configManager.set).not.toHaveBeenCalled()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('config persistence verification', () => {
|
||||||
|
it('should persist discovered path with correct config key', () => {
|
||||||
|
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||||
|
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||||
|
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||||
|
process.env.ProgramFiles = 'C:\\Program Files'
|
||||||
|
mockExistingPaths(gitPath, bashPath)
|
||||||
|
|
||||||
|
autoDiscoverGitBash()
|
||||||
|
|
||||||
|
// Verify the exact call to configManager.set
|
||||||
|
expect(configManager.set).toHaveBeenCalledTimes(1)
|
||||||
|
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', bashPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should persist on each discovery when config remains undefined', () => {
|
||||||
|
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||||
|
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||||
|
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||||
|
process.env.ProgramFiles = 'C:\\Program Files'
|
||||||
|
mockExistingPaths(gitPath, bashPath)
|
||||||
|
|
||||||
|
autoDiscoverGitBash()
|
||||||
|
autoDiscoverGitBash()
|
||||||
|
|
||||||
|
// Each call discovers and persists since config remains undefined (mocked)
|
||||||
|
expect(configManager.set).toHaveBeenCalledTimes(2)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('real-world scenarios', () => {
|
||||||
|
it('should discover and persist standard Git for Windows installation', () => {
|
||||||
|
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||||
|
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||||
|
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||||
|
process.env.ProgramFiles = 'C:\\Program Files'
|
||||||
|
mockExistingPaths(gitPath, bashPath)
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(bashPath)
|
||||||
|
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', bashPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should discover portable Git via where.exe and persist', () => {
|
||||||
|
const gitPath = 'D:\\PortableApps\\Git\\bin\\git.exe'
|
||||||
|
const bashPath = 'D:\\PortableApps\\Git\\bin\\bash.exe'
|
||||||
|
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||||
|
|
||||||
|
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||||
|
const pathStr = p?.toString() || ''
|
||||||
|
// Common git paths don't exist
|
||||||
|
if (pathStr.includes('Program Files\\Git\\cmd\\git.exe')) return false
|
||||||
|
if (pathStr.includes('Program Files (x86)\\Git\\cmd\\git.exe')) return false
|
||||||
|
// Portable bash path exists
|
||||||
|
if (pathStr === bashPath) return true
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
|
||||||
|
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(bashPath)
|
||||||
|
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', bashPath)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should respect user-configured path over auto-discovery', () => {
|
||||||
|
const userConfiguredPath = 'D:\\MyGit\\bin\\bash.exe'
|
||||||
|
const systemPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||||
|
|
||||||
|
vi.mocked(configManager.get).mockReturnValue(userConfiguredPath)
|
||||||
|
mockExistingPaths(userConfiguredPath, systemPath)
|
||||||
|
|
||||||
|
const result = autoDiscoverGitBash()
|
||||||
|
|
||||||
|
expect(result).toBe(userConfiguredPath)
|
||||||
|
expect(configManager.set).not.toHaveBeenCalled()
|
||||||
|
// Verify findGitBash was not called for discovery
|
||||||
|
expect(execFileSync).not.toHaveBeenCalled()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
import { loggerService } from '@logger'
|
import { loggerService } from '@logger'
|
||||||
|
import type { GitBashPathInfo, GitBashPathSource } from '@shared/config/constant'
|
||||||
import { HOME_CHERRY_DIR } from '@shared/config/constant'
|
import { HOME_CHERRY_DIR } from '@shared/config/constant'
|
||||||
import { execFileSync, spawn } from 'child_process'
|
import { execFileSync, spawn } from 'child_process'
|
||||||
import fs from 'fs'
|
import fs from 'fs'
|
||||||
@ -6,6 +7,7 @@ import os from 'os'
|
|||||||
import path from 'path'
|
import path from 'path'
|
||||||
|
|
||||||
import { isWin } from '../constant'
|
import { isWin } from '../constant'
|
||||||
|
import { ConfigKeys, configManager } from '../services/ConfigManager'
|
||||||
import { getResourcePath } from '.'
|
import { getResourcePath } from '.'
|
||||||
|
|
||||||
const logger = loggerService.withContext('Utils:Process')
|
const logger = loggerService.withContext('Utils:Process')
|
||||||
@ -59,7 +61,7 @@ export async function getBinaryPath(name?: string): Promise<string> {
|
|||||||
|
|
||||||
export async function isBinaryExists(name: string): Promise<boolean> {
|
export async function isBinaryExists(name: string): Promise<boolean> {
|
||||||
const cmd = await getBinaryPath(name)
|
const cmd = await getBinaryPath(name)
|
||||||
return await fs.existsSync(cmd)
|
return fs.existsSync(cmd)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -225,3 +227,77 @@ export function validateGitBashPath(customPath?: string | null): string | null {
|
|||||||
logger.debug('Validated custom Git Bash path', { path: resolved })
|
logger.debug('Validated custom Git Bash path', { path: resolved })
|
||||||
return resolved
|
return resolved
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Auto-discover and persist Git Bash path if not already configured
|
||||||
|
* Only called when Git Bash is actually needed
|
||||||
|
*
|
||||||
|
* Precedence order:
|
||||||
|
* 1. CLAUDE_CODE_GIT_BASH_PATH environment variable (highest - runtime override)
|
||||||
|
* 2. Configured path from settings (manual or auto)
|
||||||
|
* 3. Auto-discovery via findGitBash (only if no valid config exists)
|
||||||
|
*/
|
||||||
|
export function autoDiscoverGitBash(): string | null {
|
||||||
|
if (!isWin) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1. Check environment variable override first (highest priority)
|
||||||
|
const envOverride = process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||||
|
if (envOverride) {
|
||||||
|
const validated = validateGitBashPath(envOverride)
|
||||||
|
if (validated) {
|
||||||
|
logger.debug('Using CLAUDE_CODE_GIT_BASH_PATH override', { path: validated })
|
||||||
|
return validated
|
||||||
|
}
|
||||||
|
logger.warn('CLAUDE_CODE_GIT_BASH_PATH provided but path is invalid', { path: envOverride })
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Check if a path is already configured
|
||||||
|
const existingPath = configManager.get<string | undefined>(ConfigKeys.GitBashPath)
|
||||||
|
const existingSource = configManager.get<GitBashPathSource | undefined>(ConfigKeys.GitBashPathSource)
|
||||||
|
|
||||||
|
if (existingPath) {
|
||||||
|
const validated = validateGitBashPath(existingPath)
|
||||||
|
if (validated) {
|
||||||
|
return validated
|
||||||
|
}
|
||||||
|
// Existing path is invalid, try to auto-discover
|
||||||
|
logger.warn('Existing Git Bash path is invalid, attempting auto-discovery', {
|
||||||
|
path: existingPath,
|
||||||
|
source: existingSource
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Try to find Git Bash via auto-discovery
|
||||||
|
const discoveredPath = findGitBash()
|
||||||
|
if (discoveredPath) {
|
||||||
|
// Persist the discovered path with 'auto' source
|
||||||
|
configManager.set(ConfigKeys.GitBashPath, discoveredPath)
|
||||||
|
configManager.set(ConfigKeys.GitBashPathSource, 'auto')
|
||||||
|
logger.info('Auto-discovered Git Bash path', { path: discoveredPath })
|
||||||
|
}
|
||||||
|
|
||||||
|
return discoveredPath
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get Git Bash path info including source
|
||||||
|
* If no path is configured, triggers auto-discovery first
|
||||||
|
*/
|
||||||
|
export function getGitBashPathInfo(): GitBashPathInfo {
|
||||||
|
if (!isWin) {
|
||||||
|
return { path: null, source: null }
|
||||||
|
}
|
||||||
|
|
||||||
|
let path = configManager.get<string | null>(ConfigKeys.GitBashPath) ?? null
|
||||||
|
let source = configManager.get<GitBashPathSource | null>(ConfigKeys.GitBashPathSource) ?? null
|
||||||
|
|
||||||
|
// If no path configured, trigger auto-discovery (handles upgrade from old versions)
|
||||||
|
if (!path) {
|
||||||
|
path = autoDiscoverGitBash()
|
||||||
|
source = path ? 'auto' : null
|
||||||
|
}
|
||||||
|
|
||||||
|
return { path, source }
|
||||||
|
}
|
||||||
|
|||||||
@ -2,7 +2,7 @@ import type { PermissionUpdate } from '@anthropic-ai/claude-agent-sdk'
|
|||||||
import { electronAPI } from '@electron-toolkit/preload'
|
import { electronAPI } from '@electron-toolkit/preload'
|
||||||
import type { SpanEntity, TokenUsage } from '@mcp-trace/trace-core'
|
import type { SpanEntity, TokenUsage } from '@mcp-trace/trace-core'
|
||||||
import type { SpanContext } from '@opentelemetry/api'
|
import type { SpanContext } from '@opentelemetry/api'
|
||||||
import type { TerminalConfig, UpgradeChannel } from '@shared/config/constant'
|
import type { GitBashPathInfo, TerminalConfig, UpgradeChannel } from '@shared/config/constant'
|
||||||
import type { LogLevel, LogSourceWithContext } from '@shared/config/logger'
|
import type { LogLevel, LogSourceWithContext } from '@shared/config/logger'
|
||||||
import type { FileChangeEvent, WebviewKeyEvent } from '@shared/config/types'
|
import type { FileChangeEvent, WebviewKeyEvent } from '@shared/config/types'
|
||||||
import type { MCPServerLogEntry } from '@shared/config/types'
|
import type { MCPServerLogEntry } from '@shared/config/types'
|
||||||
@ -126,6 +126,7 @@ const api = {
|
|||||||
getCpuName: () => ipcRenderer.invoke(IpcChannel.System_GetCpuName),
|
getCpuName: () => ipcRenderer.invoke(IpcChannel.System_GetCpuName),
|
||||||
checkGitBash: (): Promise<boolean> => ipcRenderer.invoke(IpcChannel.System_CheckGitBash),
|
checkGitBash: (): Promise<boolean> => ipcRenderer.invoke(IpcChannel.System_CheckGitBash),
|
||||||
getGitBashPath: (): Promise<string | null> => ipcRenderer.invoke(IpcChannel.System_GetGitBashPath),
|
getGitBashPath: (): Promise<string | null> => ipcRenderer.invoke(IpcChannel.System_GetGitBashPath),
|
||||||
|
getGitBashPathInfo: (): Promise<GitBashPathInfo> => ipcRenderer.invoke(IpcChannel.System_GetGitBashPathInfo),
|
||||||
setGitBashPath: (newPath: string | null): Promise<boolean> =>
|
setGitBashPath: (newPath: string | null): Promise<boolean> =>
|
||||||
ipcRenderer.invoke(IpcChannel.System_SetGitBashPath, newPath)
|
ipcRenderer.invoke(IpcChannel.System_SetGitBashPath, newPath)
|
||||||
},
|
},
|
||||||
|
|||||||
@ -10,7 +10,7 @@ import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant'
|
|||||||
import {
|
import {
|
||||||
findTokenLimit,
|
findTokenLimit,
|
||||||
GEMINI_FLASH_MODEL_REGEX,
|
GEMINI_FLASH_MODEL_REGEX,
|
||||||
getThinkModelType,
|
getModelSupportedReasoningEffortOptions,
|
||||||
isDeepSeekHybridInferenceModel,
|
isDeepSeekHybridInferenceModel,
|
||||||
isDoubaoThinkingAutoModel,
|
isDoubaoThinkingAutoModel,
|
||||||
isGPT5SeriesModel,
|
isGPT5SeriesModel,
|
||||||
@ -33,7 +33,6 @@ import {
|
|||||||
isSupportedThinkingTokenQwenModel,
|
isSupportedThinkingTokenQwenModel,
|
||||||
isSupportedThinkingTokenZhipuModel,
|
isSupportedThinkingTokenZhipuModel,
|
||||||
isVisionModel,
|
isVisionModel,
|
||||||
MODEL_SUPPORTED_REASONING_EFFORT,
|
|
||||||
ZHIPU_RESULT_TOKENS
|
ZHIPU_RESULT_TOKENS
|
||||||
} from '@renderer/config/models'
|
} from '@renderer/config/models'
|
||||||
import { mapLanguageToQwenMTModel } from '@renderer/config/translate'
|
import { mapLanguageToQwenMTModel } from '@renderer/config/translate'
|
||||||
@ -143,6 +142,10 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
|
|||||||
return { thinking: { type: reasoningEffort ? 'enabled' : 'disabled' } }
|
return { thinking: { type: reasoningEffort ? 'enabled' : 'disabled' } }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (reasoningEffort === 'default') {
|
||||||
|
return {}
|
||||||
|
}
|
||||||
|
|
||||||
if (!reasoningEffort) {
|
if (!reasoningEffort) {
|
||||||
// DeepSeek hybrid inference models, v3.1 and maybe more in the future
|
// DeepSeek hybrid inference models, v3.1 and maybe more in the future
|
||||||
// 不同的 provider 有不同的思考控制方式,在这里统一解决
|
// 不同的 provider 有不同的思考控制方式,在这里统一解决
|
||||||
@ -304,16 +307,15 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
|
|||||||
// Grok models/Perplexity models/OpenAI models
|
// Grok models/Perplexity models/OpenAI models
|
||||||
if (isSupportedReasoningEffortModel(model)) {
|
if (isSupportedReasoningEffortModel(model)) {
|
||||||
// 检查模型是否支持所选选项
|
// 检查模型是否支持所选选项
|
||||||
const modelType = getThinkModelType(model)
|
const supportedOptions = getModelSupportedReasoningEffortOptions(model)?.filter((option) => option !== 'default')
|
||||||
const supportedOptions = MODEL_SUPPORTED_REASONING_EFFORT[modelType]
|
if (supportedOptions?.includes(reasoningEffort)) {
|
||||||
if (supportedOptions.includes(reasoningEffort)) {
|
|
||||||
return {
|
return {
|
||||||
reasoning_effort: reasoningEffort
|
reasoning_effort: reasoningEffort
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// 如果不支持,fallback到第一个支持的值
|
// 如果不支持,fallback到第一个支持的值
|
||||||
return {
|
return {
|
||||||
reasoning_effort: supportedOptions[0]
|
reasoning_effort: supportedOptions?.[0]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -69,7 +69,7 @@ export abstract class OpenAIBaseClient<
|
|||||||
const sdk = await this.getSdkInstance()
|
const sdk = await this.getSdkInstance()
|
||||||
const response = (await sdk.request({
|
const response = (await sdk.request({
|
||||||
method: 'post',
|
method: 'post',
|
||||||
path: '/images/generations',
|
path: '/v1/images/generations',
|
||||||
signal,
|
signal,
|
||||||
body: {
|
body: {
|
||||||
model,
|
model,
|
||||||
@ -88,7 +88,11 @@ export abstract class OpenAIBaseClient<
|
|||||||
}
|
}
|
||||||
|
|
||||||
override async getEmbeddingDimensions(model: Model): Promise<number> {
|
override async getEmbeddingDimensions(model: Model): Promise<number> {
|
||||||
const sdk = await this.getSdkInstance()
|
let sdk: OpenAI = await this.getSdkInstance()
|
||||||
|
if (isOllamaProvider(this.provider)) {
|
||||||
|
const embedBaseUrl = `${this.provider.apiHost.replace(/(\/(api|v1))\/?$/, '')}/v1`
|
||||||
|
sdk = sdk.withOptions({ baseURL: embedBaseUrl })
|
||||||
|
}
|
||||||
|
|
||||||
const data = await sdk.embeddings.create({
|
const data = await sdk.embeddings.create({
|
||||||
model: model.id,
|
model: model.id,
|
||||||
|
|||||||
@ -2,7 +2,6 @@ import { loggerService } from '@logger'
|
|||||||
import { ApiClientFactory } from '@renderer/aiCore/legacy/clients/ApiClientFactory'
|
import { ApiClientFactory } from '@renderer/aiCore/legacy/clients/ApiClientFactory'
|
||||||
import type { BaseApiClient } from '@renderer/aiCore/legacy/clients/BaseApiClient'
|
import type { BaseApiClient } from '@renderer/aiCore/legacy/clients/BaseApiClient'
|
||||||
import { isDedicatedImageGenerationModel, isFunctionCallingModel } from '@renderer/config/models'
|
import { isDedicatedImageGenerationModel, isFunctionCallingModel } from '@renderer/config/models'
|
||||||
import { getProviderByModel } from '@renderer/services/AssistantService'
|
|
||||||
import { withSpanResult } from '@renderer/services/SpanManagerService'
|
import { withSpanResult } from '@renderer/services/SpanManagerService'
|
||||||
import type { StartSpanParams } from '@renderer/trace/types/ModelSpanEntity'
|
import type { StartSpanParams } from '@renderer/trace/types/ModelSpanEntity'
|
||||||
import type { GenerateImageParams, Model, Provider } from '@renderer/types'
|
import type { GenerateImageParams, Model, Provider } from '@renderer/types'
|
||||||
@ -160,9 +159,6 @@ export default class AiProvider {
|
|||||||
public async getEmbeddingDimensions(model: Model): Promise<number> {
|
public async getEmbeddingDimensions(model: Model): Promise<number> {
|
||||||
try {
|
try {
|
||||||
// Use the SDK instance to test embedding capabilities
|
// Use the SDK instance to test embedding capabilities
|
||||||
if (this.apiClient instanceof OpenAIResponseAPIClient && getProviderByModel(model).type === 'azure-openai') {
|
|
||||||
this.apiClient = this.apiClient.getClient(model) as BaseApiClient
|
|
||||||
}
|
|
||||||
const dimensions = await this.apiClient.getEmbeddingDimensions(model)
|
const dimensions = await this.apiClient.getEmbeddingDimensions(model)
|
||||||
return dimensions
|
return dimensions
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
|||||||
@ -7,7 +7,6 @@ import type { Chunk } from '@renderer/types/chunk'
|
|||||||
import { isOllamaProvider, isSupportEnableThinkingProvider } from '@renderer/utils/provider'
|
import { isOllamaProvider, isSupportEnableThinkingProvider } from '@renderer/utils/provider'
|
||||||
import type { LanguageModelMiddleware } from 'ai'
|
import type { LanguageModelMiddleware } from 'ai'
|
||||||
import { extractReasoningMiddleware, simulateStreamingMiddleware } from 'ai'
|
import { extractReasoningMiddleware, simulateStreamingMiddleware } from 'ai'
|
||||||
import { isEmpty } from 'lodash'
|
|
||||||
|
|
||||||
import { getAiSdkProviderId } from '../provider/factory'
|
import { getAiSdkProviderId } from '../provider/factory'
|
||||||
import { isOpenRouterGeminiGenerateImageModel } from '../utils/image'
|
import { isOpenRouterGeminiGenerateImageModel } from '../utils/image'
|
||||||
@ -16,7 +15,6 @@ import { openrouterGenerateImageMiddleware } from './openrouterGenerateImageMidd
|
|||||||
import { openrouterReasoningMiddleware } from './openrouterReasoningMiddleware'
|
import { openrouterReasoningMiddleware } from './openrouterReasoningMiddleware'
|
||||||
import { qwenThinkingMiddleware } from './qwenThinkingMiddleware'
|
import { qwenThinkingMiddleware } from './qwenThinkingMiddleware'
|
||||||
import { skipGeminiThoughtSignatureMiddleware } from './skipGeminiThoughtSignatureMiddleware'
|
import { skipGeminiThoughtSignatureMiddleware } from './skipGeminiThoughtSignatureMiddleware'
|
||||||
import { toolChoiceMiddleware } from './toolChoiceMiddleware'
|
|
||||||
|
|
||||||
const logger = loggerService.withContext('AiSdkMiddlewareBuilder')
|
const logger = loggerService.withContext('AiSdkMiddlewareBuilder')
|
||||||
|
|
||||||
@ -136,15 +134,6 @@ export class AiSdkMiddlewareBuilder {
|
|||||||
export function buildAiSdkMiddlewares(config: AiSdkMiddlewareConfig): LanguageModelMiddleware[] {
|
export function buildAiSdkMiddlewares(config: AiSdkMiddlewareConfig): LanguageModelMiddleware[] {
|
||||||
const builder = new AiSdkMiddlewareBuilder()
|
const builder = new AiSdkMiddlewareBuilder()
|
||||||
|
|
||||||
// 0. 知识库强制调用中间件(必须在最前面,确保第一轮强制调用知识库)
|
|
||||||
if (!isEmpty(config.assistant?.knowledge_bases?.map((base) => base.id)) && config.knowledgeRecognition !== 'on') {
|
|
||||||
builder.add({
|
|
||||||
name: 'force-knowledge-first',
|
|
||||||
middleware: toolChoiceMiddleware('builtin_knowledge_search')
|
|
||||||
})
|
|
||||||
logger.debug('Added toolChoice middleware to force knowledge base search on first round')
|
|
||||||
}
|
|
||||||
|
|
||||||
// 1. 根据provider添加特定中间件
|
// 1. 根据provider添加特定中间件
|
||||||
if (config.provider) {
|
if (config.provider) {
|
||||||
addProviderSpecificMiddlewares(builder, config)
|
addProviderSpecificMiddlewares(builder, config)
|
||||||
|
|||||||
@ -31,7 +31,7 @@ import { webSearchToolWithPreExtractedKeywords } from '../tools/WebSearchTool'
|
|||||||
|
|
||||||
const logger = loggerService.withContext('SearchOrchestrationPlugin')
|
const logger = loggerService.withContext('SearchOrchestrationPlugin')
|
||||||
|
|
||||||
const getMessageContent = (message: ModelMessage) => {
|
export const getMessageContent = (message: ModelMessage) => {
|
||||||
if (typeof message.content === 'string') return message.content
|
if (typeof message.content === 'string') return message.content
|
||||||
return message.content.reduce((acc, part) => {
|
return message.content.reduce((acc, part) => {
|
||||||
if (part.type === 'text') {
|
if (part.type === 'text') {
|
||||||
@ -266,14 +266,14 @@ export const searchOrchestrationPlugin = (assistant: Assistant, topicId: string)
|
|||||||
// 判断是否需要各种搜索
|
// 判断是否需要各种搜索
|
||||||
const knowledgeBaseIds = assistant.knowledge_bases?.map((base) => base.id)
|
const knowledgeBaseIds = assistant.knowledge_bases?.map((base) => base.id)
|
||||||
const hasKnowledgeBase = !isEmpty(knowledgeBaseIds)
|
const hasKnowledgeBase = !isEmpty(knowledgeBaseIds)
|
||||||
const knowledgeRecognition = assistant.knowledgeRecognition || 'on'
|
const knowledgeRecognition = assistant.knowledgeRecognition || 'off'
|
||||||
const globalMemoryEnabled = selectGlobalMemoryEnabled(store.getState())
|
const globalMemoryEnabled = selectGlobalMemoryEnabled(store.getState())
|
||||||
const shouldWebSearch = !!assistant.webSearchProviderId
|
const shouldWebSearch = !!assistant.webSearchProviderId
|
||||||
const shouldKnowledgeSearch = hasKnowledgeBase && knowledgeRecognition === 'on'
|
const shouldKnowledgeSearch = hasKnowledgeBase && knowledgeRecognition === 'on'
|
||||||
const shouldMemorySearch = globalMemoryEnabled && assistant.enableMemory
|
const shouldMemorySearch = globalMemoryEnabled && assistant.enableMemory
|
||||||
|
|
||||||
// 执行意图分析
|
// 执行意图分析
|
||||||
if (shouldWebSearch || hasKnowledgeBase) {
|
if (shouldWebSearch || shouldKnowledgeSearch) {
|
||||||
const analysisResult = await analyzeSearchIntent(lastUserMessage, assistant, {
|
const analysisResult = await analyzeSearchIntent(lastUserMessage, assistant, {
|
||||||
shouldWebSearch,
|
shouldWebSearch,
|
||||||
shouldKnowledgeSearch,
|
shouldKnowledgeSearch,
|
||||||
@ -330,25 +330,10 @@ export const searchOrchestrationPlugin = (assistant: Assistant, topicId: string)
|
|||||||
// 📚 知识库搜索工具配置
|
// 📚 知识库搜索工具配置
|
||||||
const knowledgeBaseIds = assistant.knowledge_bases?.map((base) => base.id)
|
const knowledgeBaseIds = assistant.knowledge_bases?.map((base) => base.id)
|
||||||
const hasKnowledgeBase = !isEmpty(knowledgeBaseIds)
|
const hasKnowledgeBase = !isEmpty(knowledgeBaseIds)
|
||||||
const knowledgeRecognition = assistant.knowledgeRecognition || 'on'
|
const knowledgeRecognition = assistant.knowledgeRecognition || 'off'
|
||||||
|
const shouldKnowledgeSearch = hasKnowledgeBase && knowledgeRecognition === 'on'
|
||||||
|
|
||||||
if (hasKnowledgeBase) {
|
if (shouldKnowledgeSearch) {
|
||||||
if (knowledgeRecognition === 'off') {
|
|
||||||
// off 模式:直接添加知识库搜索工具,使用用户消息作为搜索关键词
|
|
||||||
const userMessage = userMessages[context.requestId]
|
|
||||||
const fallbackKeywords = {
|
|
||||||
question: [getMessageContent(userMessage) || 'search'],
|
|
||||||
rewrite: getMessageContent(userMessage) || 'search'
|
|
||||||
}
|
|
||||||
// logger.info('📚 Adding knowledge search tool (force mode)')
|
|
||||||
params.tools['builtin_knowledge_search'] = knowledgeSearchTool(
|
|
||||||
assistant,
|
|
||||||
fallbackKeywords,
|
|
||||||
getMessageContent(userMessage),
|
|
||||||
topicId
|
|
||||||
)
|
|
||||||
// params.toolChoice = { type: 'tool', toolName: 'builtin_knowledge_search' }
|
|
||||||
} else {
|
|
||||||
// on 模式:根据意图识别结果决定是否添加工具
|
// on 模式:根据意图识别结果决定是否添加工具
|
||||||
const needsKnowledgeSearch =
|
const needsKnowledgeSearch =
|
||||||
analysisResult?.knowledge &&
|
analysisResult?.knowledge &&
|
||||||
@ -366,7 +351,6 @@ export const searchOrchestrationPlugin = (assistant: Assistant, topicId: string)
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// 🧠 记忆搜索工具配置
|
// 🧠 记忆搜索工具配置
|
||||||
const globalMemoryEnabled = selectGlobalMemoryEnabled(store.getState())
|
const globalMemoryEnabled = selectGlobalMemoryEnabled(store.getState())
|
||||||
|
|||||||
@ -109,6 +109,20 @@ const createImageBlock = (
|
|||||||
...overrides
|
...overrides
|
||||||
})
|
})
|
||||||
|
|
||||||
|
const createThinkingBlock = (
|
||||||
|
messageId: string,
|
||||||
|
overrides: Partial<Omit<ThinkingMessageBlock, 'type' | 'messageId'>> = {}
|
||||||
|
): ThinkingMessageBlock => ({
|
||||||
|
id: overrides.id ?? `thinking-block-${++blockCounter}`,
|
||||||
|
messageId,
|
||||||
|
type: MessageBlockType.THINKING,
|
||||||
|
createdAt: overrides.createdAt ?? new Date(2024, 0, 1, 0, 0, blockCounter).toISOString(),
|
||||||
|
status: overrides.status ?? MessageBlockStatus.SUCCESS,
|
||||||
|
content: overrides.content ?? 'Let me think...',
|
||||||
|
thinking_millsec: overrides.thinking_millsec ?? 1000,
|
||||||
|
...overrides
|
||||||
|
})
|
||||||
|
|
||||||
describe('messageConverter', () => {
|
describe('messageConverter', () => {
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
convertFileBlockToFilePartMock.mockReset()
|
convertFileBlockToFilePartMock.mockReset()
|
||||||
@ -229,6 +243,23 @@ describe('messageConverter', () => {
|
|||||||
}
|
}
|
||||||
])
|
])
|
||||||
})
|
})
|
||||||
|
|
||||||
|
it('includes reasoning parts for assistant messages with thinking blocks', async () => {
|
||||||
|
const model = createModel()
|
||||||
|
const message = createMessage('assistant')
|
||||||
|
message.__mockContent = 'Here is my answer'
|
||||||
|
message.__mockThinkingBlocks = [createThinkingBlock(message.id, { content: 'Let me think...' })]
|
||||||
|
|
||||||
|
const result = await convertMessageToSdkParam(message, false, model)
|
||||||
|
|
||||||
|
expect(result).toEqual({
|
||||||
|
role: 'assistant',
|
||||||
|
content: [
|
||||||
|
{ type: 'text', text: 'Here is my answer' },
|
||||||
|
{ type: 'reasoning', text: 'Let me think...' }
|
||||||
|
]
|
||||||
|
})
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
describe('convertMessagesToSdkMessages', () => {
|
describe('convertMessagesToSdkMessages', () => {
|
||||||
|
|||||||
@ -18,7 +18,7 @@ vi.mock('@renderer/services/AssistantService', () => ({
|
|||||||
toolUseMode: assistant.settings?.toolUseMode ?? 'prompt',
|
toolUseMode: assistant.settings?.toolUseMode ?? 'prompt',
|
||||||
defaultModel: assistant.defaultModel,
|
defaultModel: assistant.defaultModel,
|
||||||
customParameters: assistant.settings?.customParameters ?? [],
|
customParameters: assistant.settings?.customParameters ?? [],
|
||||||
reasoning_effort: assistant.settings?.reasoning_effort,
|
reasoning_effort: assistant.settings?.reasoning_effort ?? 'default',
|
||||||
reasoning_effort_cache: assistant.settings?.reasoning_effort_cache,
|
reasoning_effort_cache: assistant.settings?.reasoning_effort_cache,
|
||||||
qwenThinkMode: assistant.settings?.qwenThinkMode
|
qwenThinkMode: assistant.settings?.qwenThinkMode
|
||||||
})
|
})
|
||||||
|
|||||||
@ -3,6 +3,7 @@
|
|||||||
* 将 Cherry Studio 消息格式转换为 AI SDK 消息格式
|
* 将 Cherry Studio 消息格式转换为 AI SDK 消息格式
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
import type { ReasoningPart } from '@ai-sdk/provider-utils'
|
||||||
import { loggerService } from '@logger'
|
import { loggerService } from '@logger'
|
||||||
import { isImageEnhancementModel, isVisionModel } from '@renderer/config/models'
|
import { isImageEnhancementModel, isVisionModel } from '@renderer/config/models'
|
||||||
import type { Message, Model } from '@renderer/types'
|
import type { Message, Model } from '@renderer/types'
|
||||||
@ -163,13 +164,13 @@ async function convertMessageToAssistantModelMessage(
|
|||||||
thinkingBlocks: ThinkingMessageBlock[],
|
thinkingBlocks: ThinkingMessageBlock[],
|
||||||
model?: Model
|
model?: Model
|
||||||
): Promise<AssistantModelMessage> {
|
): Promise<AssistantModelMessage> {
|
||||||
const parts: Array<TextPart | FilePart> = []
|
const parts: Array<TextPart | ReasoningPart | FilePart> = []
|
||||||
if (content) {
|
if (content) {
|
||||||
parts.push({ type: 'text', text: content })
|
parts.push({ type: 'text', text: content })
|
||||||
}
|
}
|
||||||
|
|
||||||
for (const thinkingBlock of thinkingBlocks) {
|
for (const thinkingBlock of thinkingBlocks) {
|
||||||
parts.push({ type: 'text', text: thinkingBlock.content })
|
parts.push({ type: 'reasoning', text: thinkingBlock.content })
|
||||||
}
|
}
|
||||||
|
|
||||||
for (const fileBlock of fileBlocks) {
|
for (const fileBlock of fileBlocks) {
|
||||||
|
|||||||
@ -28,13 +28,14 @@ import { getAnthropicThinkingBudget } from '../utils/reasoning'
|
|||||||
* - Disabled for models that do not support temperature.
|
* - Disabled for models that do not support temperature.
|
||||||
* - Disabled for Claude 4.5 reasoning models when TopP is enabled and temperature is disabled.
|
* - Disabled for Claude 4.5 reasoning models when TopP is enabled and temperature is disabled.
|
||||||
* Otherwise, returns the temperature value if the assistant has temperature enabled.
|
* Otherwise, returns the temperature value if the assistant has temperature enabled.
|
||||||
|
|
||||||
*/
|
*/
|
||||||
export function getTemperature(assistant: Assistant, model: Model): number | undefined {
|
export function getTemperature(assistant: Assistant, model: Model): number | undefined {
|
||||||
if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) {
|
if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) {
|
||||||
return undefined
|
return undefined
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!isSupportTemperatureModel(model)) {
|
if (!isSupportTemperatureModel(model, assistant)) {
|
||||||
return undefined
|
return undefined
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -46,6 +47,10 @@ export function getTemperature(assistant: Assistant, model: Model): number | und
|
|||||||
return undefined
|
return undefined
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return getTemperatureValue(assistant, model)
|
||||||
|
}
|
||||||
|
|
||||||
|
function getTemperatureValue(assistant: Assistant, model: Model): number | undefined {
|
||||||
const assistantSettings = getAssistantSettings(assistant)
|
const assistantSettings = getAssistantSettings(assistant)
|
||||||
let temperature = assistantSettings?.temperature
|
let temperature = assistantSettings?.temperature
|
||||||
if (temperature && isMaxTemperatureOneModel(model)) {
|
if (temperature && isMaxTemperatureOneModel(model)) {
|
||||||
@ -68,13 +73,17 @@ export function getTopP(assistant: Assistant, model: Model): number | undefined
|
|||||||
if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) {
|
if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) {
|
||||||
return undefined
|
return undefined
|
||||||
}
|
}
|
||||||
if (!isSupportTopPModel(model)) {
|
if (!isSupportTopPModel(model, assistant)) {
|
||||||
return undefined
|
return undefined
|
||||||
}
|
}
|
||||||
if (isTemperatureTopPMutuallyExclusiveModel(model) && assistant.settings?.enableTemperature) {
|
if (isTemperatureTopPMutuallyExclusiveModel(model) && assistant.settings?.enableTemperature) {
|
||||||
return undefined
|
return undefined
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return getTopPValue(assistant)
|
||||||
|
}
|
||||||
|
|
||||||
|
function getTopPValue(assistant: Assistant): number | undefined {
|
||||||
const assistantSettings = getAssistantSettings(assistant)
|
const assistantSettings = getAssistantSettings(assistant)
|
||||||
// FIXME: assistant.settings.enableTopP should be always a boolean value.
|
// FIXME: assistant.settings.enableTopP should be always a boolean value.
|
||||||
const enableTopP = assistantSettings.enableTopP ?? DEFAULT_ASSISTANT_SETTINGS.enableTopP
|
const enableTopP = assistantSettings.enableTopP ?? DEFAULT_ASSISTANT_SETTINGS.enableTopP
|
||||||
|
|||||||
@ -79,7 +79,7 @@ vi.mock('@renderer/services/AssistantService', () => ({
|
|||||||
import { getProviderByModel } from '@renderer/services/AssistantService'
|
import { getProviderByModel } from '@renderer/services/AssistantService'
|
||||||
import type { Model, Provider } from '@renderer/types'
|
import type { Model, Provider } from '@renderer/types'
|
||||||
import { formatApiHost } from '@renderer/utils/api'
|
import { formatApiHost } from '@renderer/utils/api'
|
||||||
import { isCherryAIProvider, isPerplexityProvider } from '@renderer/utils/provider'
|
import { isAzureOpenAIProvider, isCherryAIProvider, isPerplexityProvider } from '@renderer/utils/provider'
|
||||||
|
|
||||||
import { COPILOT_DEFAULT_HEADERS, COPILOT_EDITOR_VERSION, isCopilotResponsesModel } from '../constants'
|
import { COPILOT_DEFAULT_HEADERS, COPILOT_EDITOR_VERSION, isCopilotResponsesModel } from '../constants'
|
||||||
import { getActualProvider, providerToAiSdkConfig } from '../providerConfig'
|
import { getActualProvider, providerToAiSdkConfig } from '../providerConfig'
|
||||||
@ -133,6 +133,17 @@ const createPerplexityProvider = (): Provider => ({
|
|||||||
isSystem: false
|
isSystem: false
|
||||||
})
|
})
|
||||||
|
|
||||||
|
const createAzureProvider = (apiVersion: string): Provider => ({
|
||||||
|
id: 'azure-openai',
|
||||||
|
type: 'azure-openai',
|
||||||
|
name: 'Azure OpenAI',
|
||||||
|
apiKey: 'test-key',
|
||||||
|
apiHost: 'https://example.openai.azure.com/openai',
|
||||||
|
apiVersion,
|
||||||
|
models: [],
|
||||||
|
isSystem: true
|
||||||
|
})
|
||||||
|
|
||||||
describe('Copilot responses routing', () => {
|
describe('Copilot responses routing', () => {
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
;(globalThis as any).window = {
|
;(globalThis as any).window = {
|
||||||
@ -504,3 +515,46 @@ describe('Stream options includeUsage configuration', () => {
|
|||||||
expect(config.providerId).toBe('github-copilot-openai-compatible')
|
expect(config.providerId).toBe('github-copilot-openai-compatible')
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
describe('Azure OpenAI traditional API routing', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
;(globalThis as any).window = {
|
||||||
|
...(globalThis as any).window,
|
||||||
|
keyv: createWindowKeyv()
|
||||||
|
}
|
||||||
|
mockGetState.mockReturnValue({
|
||||||
|
settings: {
|
||||||
|
openAI: {
|
||||||
|
streamOptions: {
|
||||||
|
includeUsage: undefined
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
vi.mocked(isAzureOpenAIProvider).mockImplementation((provider) => provider.type === 'azure-openai')
|
||||||
|
})
|
||||||
|
|
||||||
|
it('uses deployment-based URLs when apiVersion is a date version', () => {
|
||||||
|
const provider = createAzureProvider('2024-02-15-preview')
|
||||||
|
const config = providerToAiSdkConfig(provider, createModel('gpt-4o', 'GPT-4o', provider.id))
|
||||||
|
|
||||||
|
expect(config.providerId).toBe('azure')
|
||||||
|
expect(config.options.apiVersion).toBe('2024-02-15-preview')
|
||||||
|
expect(config.options.useDeploymentBasedUrls).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('does not force deployment-based URLs for apiVersion v1/preview', () => {
|
||||||
|
const v1Provider = createAzureProvider('v1')
|
||||||
|
const v1Config = providerToAiSdkConfig(v1Provider, createModel('gpt-4o', 'GPT-4o', v1Provider.id))
|
||||||
|
expect(v1Config.providerId).toBe('azure-responses')
|
||||||
|
expect(v1Config.options.apiVersion).toBe('v1')
|
||||||
|
expect(v1Config.options.useDeploymentBasedUrls).toBeUndefined()
|
||||||
|
|
||||||
|
const previewProvider = createAzureProvider('preview')
|
||||||
|
const previewConfig = providerToAiSdkConfig(previewProvider, createModel('gpt-4o', 'GPT-4o', previewProvider.id))
|
||||||
|
expect(previewConfig.providerId).toBe('azure-responses')
|
||||||
|
expect(previewConfig.options.apiVersion).toBe('preview')
|
||||||
|
expect(previewConfig.options.useDeploymentBasedUrls).toBeUndefined()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|||||||
@ -32,6 +32,7 @@ import {
|
|||||||
isSupportStreamOptionsProvider,
|
isSupportStreamOptionsProvider,
|
||||||
isVertexProvider
|
isVertexProvider
|
||||||
} from '@renderer/utils/provider'
|
} from '@renderer/utils/provider'
|
||||||
|
import { defaultAppHeaders } from '@shared/utils'
|
||||||
import { cloneDeep, isEmpty } from 'lodash'
|
import { cloneDeep, isEmpty } from 'lodash'
|
||||||
|
|
||||||
import type { AiSdkConfig } from '../types'
|
import type { AiSdkConfig } from '../types'
|
||||||
@ -197,18 +198,13 @@ export function providerToAiSdkConfig(actualProvider: Provider, model: Model): A
|
|||||||
extraOptions.mode = 'chat'
|
extraOptions.mode = 'chat'
|
||||||
}
|
}
|
||||||
|
|
||||||
// 添加额外headers
|
|
||||||
if (actualProvider.extra_headers) {
|
|
||||||
extraOptions.headers = actualProvider.extra_headers
|
|
||||||
// copy from openaiBaseClient/openaiResponseApiClient
|
|
||||||
if (aiSdkProviderId === 'openai') {
|
|
||||||
extraOptions.headers = {
|
extraOptions.headers = {
|
||||||
...extraOptions.headers,
|
...defaultAppHeaders(),
|
||||||
'HTTP-Referer': 'https://cherry-ai.com',
|
...actualProvider.extra_headers
|
||||||
'X-Title': 'Cherry Studio',
|
|
||||||
'X-Api-Key': baseConfig.apiKey
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (aiSdkProviderId === 'openai') {
|
||||||
|
extraOptions.headers['X-Api-Key'] = baseConfig.apiKey
|
||||||
}
|
}
|
||||||
// azure
|
// azure
|
||||||
// https://learn.microsoft.com/en-us/azure/ai-foundry/openai/latest
|
// https://learn.microsoft.com/en-us/azure/ai-foundry/openai/latest
|
||||||
@ -218,6 +214,15 @@ export function providerToAiSdkConfig(actualProvider: Provider, model: Model): A
|
|||||||
} else if (aiSdkProviderId === 'azure') {
|
} else if (aiSdkProviderId === 'azure') {
|
||||||
extraOptions.mode = 'chat'
|
extraOptions.mode = 'chat'
|
||||||
}
|
}
|
||||||
|
if (isAzureOpenAIProvider(actualProvider)) {
|
||||||
|
const apiVersion = actualProvider.apiVersion?.trim()
|
||||||
|
if (apiVersion) {
|
||||||
|
extraOptions.apiVersion = apiVersion
|
||||||
|
if (!['preview', 'v1'].includes(apiVersion)) {
|
||||||
|
extraOptions.useDeploymentBasedUrls = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// bedrock
|
// bedrock
|
||||||
if (aiSdkProviderId === 'bedrock') {
|
if (aiSdkProviderId === 'bedrock') {
|
||||||
@ -254,7 +259,7 @@ export function providerToAiSdkConfig(actualProvider: Provider, model: Model): A
|
|||||||
// CherryIN API Host
|
// CherryIN API Host
|
||||||
const cherryinProvider = getProviderById(SystemProviderIds.cherryin)
|
const cherryinProvider = getProviderById(SystemProviderIds.cherryin)
|
||||||
if (cherryinProvider) {
|
if (cherryinProvider) {
|
||||||
extraOptions.anthropicBaseURL = cherryinProvider.anthropicApiHost
|
extraOptions.anthropicBaseURL = cherryinProvider.anthropicApiHost + '/v1'
|
||||||
extraOptions.geminiBaseURL = cherryinProvider.apiHost + '/v1beta/models'
|
extraOptions.geminiBaseURL = cherryinProvider.apiHost + '/v1beta/models'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -11,6 +11,7 @@ import { beforeEach, describe, expect, it, vi } from 'vitest'
|
|||||||
|
|
||||||
import {
|
import {
|
||||||
getAnthropicReasoningParams,
|
getAnthropicReasoningParams,
|
||||||
|
getAnthropicThinkingBudget,
|
||||||
getBedrockReasoningParams,
|
getBedrockReasoningParams,
|
||||||
getCustomParameters,
|
getCustomParameters,
|
||||||
getGeminiReasoningParams,
|
getGeminiReasoningParams,
|
||||||
@ -89,7 +90,8 @@ vi.mock('@renderer/config/models', async (importOriginal) => {
|
|||||||
isQwenAlwaysThinkModel: vi.fn(() => false),
|
isQwenAlwaysThinkModel: vi.fn(() => false),
|
||||||
isSupportedThinkingTokenHunyuanModel: vi.fn(() => false),
|
isSupportedThinkingTokenHunyuanModel: vi.fn(() => false),
|
||||||
isSupportedThinkingTokenModel: vi.fn(() => false),
|
isSupportedThinkingTokenModel: vi.fn(() => false),
|
||||||
isGPT51SeriesModel: vi.fn(() => false)
|
isGPT51SeriesModel: vi.fn(() => false),
|
||||||
|
findTokenLimit: vi.fn(actual.findTokenLimit)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -596,7 +598,7 @@ describe('reasoning utils', () => {
|
|||||||
expect(result).toEqual({})
|
expect(result).toEqual({})
|
||||||
})
|
})
|
||||||
|
|
||||||
it('should return disabled thinking when no reasoning effort', async () => {
|
it('should return disabled thinking when reasoning effort is none', async () => {
|
||||||
const { isReasoningModel, isSupportedThinkingTokenClaudeModel } = await import('@renderer/config/models')
|
const { isReasoningModel, isSupportedThinkingTokenClaudeModel } = await import('@renderer/config/models')
|
||||||
|
|
||||||
vi.mocked(isReasoningModel).mockReturnValue(true)
|
vi.mocked(isReasoningModel).mockReturnValue(true)
|
||||||
@ -611,7 +613,9 @@ describe('reasoning utils', () => {
|
|||||||
const assistant: Assistant = {
|
const assistant: Assistant = {
|
||||||
id: 'test',
|
id: 'test',
|
||||||
name: 'Test',
|
name: 'Test',
|
||||||
settings: {}
|
settings: {
|
||||||
|
reasoning_effort: 'none'
|
||||||
|
}
|
||||||
} as Assistant
|
} as Assistant
|
||||||
|
|
||||||
const result = getAnthropicReasoningParams(assistant, model)
|
const result = getAnthropicReasoningParams(assistant, model)
|
||||||
@ -647,7 +651,7 @@ describe('reasoning utils', () => {
|
|||||||
expect(result).toEqual({
|
expect(result).toEqual({
|
||||||
thinking: {
|
thinking: {
|
||||||
type: 'enabled',
|
type: 'enabled',
|
||||||
budgetTokens: 2048
|
budgetTokens: 4096
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -675,7 +679,7 @@ describe('reasoning utils', () => {
|
|||||||
expect(result).toEqual({})
|
expect(result).toEqual({})
|
||||||
})
|
})
|
||||||
|
|
||||||
it('should disable thinking for Flash models without reasoning effort', async () => {
|
it('should disable thinking for Flash models when reasoning effort is none', async () => {
|
||||||
const { isReasoningModel, isSupportedThinkingTokenGeminiModel } = await import('@renderer/config/models')
|
const { isReasoningModel, isSupportedThinkingTokenGeminiModel } = await import('@renderer/config/models')
|
||||||
|
|
||||||
vi.mocked(isReasoningModel).mockReturnValue(true)
|
vi.mocked(isReasoningModel).mockReturnValue(true)
|
||||||
@ -690,7 +694,9 @@ describe('reasoning utils', () => {
|
|||||||
const assistant: Assistant = {
|
const assistant: Assistant = {
|
||||||
id: 'test',
|
id: 'test',
|
||||||
name: 'Test',
|
name: 'Test',
|
||||||
settings: {}
|
settings: {
|
||||||
|
reasoning_effort: 'none'
|
||||||
|
}
|
||||||
} as Assistant
|
} as Assistant
|
||||||
|
|
||||||
const result = getGeminiReasoningParams(assistant, model)
|
const result = getGeminiReasoningParams(assistant, model)
|
||||||
@ -725,7 +731,7 @@ describe('reasoning utils', () => {
|
|||||||
const result = getGeminiReasoningParams(assistant, model)
|
const result = getGeminiReasoningParams(assistant, model)
|
||||||
expect(result).toEqual({
|
expect(result).toEqual({
|
||||||
thinkingConfig: {
|
thinkingConfig: {
|
||||||
thinkingBudget: 16448,
|
thinkingBudget: expect.any(Number),
|
||||||
includeThoughts: true
|
includeThoughts: true
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -889,7 +895,7 @@ describe('reasoning utils', () => {
|
|||||||
expect(result).toEqual({
|
expect(result).toEqual({
|
||||||
reasoningConfig: {
|
reasoningConfig: {
|
||||||
type: 'enabled',
|
type: 'enabled',
|
||||||
budgetTokens: 2048
|
budgetTokens: 4096
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -990,4 +996,89 @@ describe('reasoning utils', () => {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
describe('getAnthropicThinkingBudget', () => {
|
||||||
|
it('should return undefined when reasoningEffort is undefined', async () => {
|
||||||
|
const result = getAnthropicThinkingBudget(4096, undefined, 'claude-3-7-sonnet')
|
||||||
|
expect(result).toBeUndefined()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return undefined when reasoningEffort is none', async () => {
|
||||||
|
const result = getAnthropicThinkingBudget(4096, 'none', 'claude-3-7-sonnet')
|
||||||
|
expect(result).toBeUndefined()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return undefined when tokenLimit is not found', async () => {
|
||||||
|
const { findTokenLimit } = await import('@renderer/config/models')
|
||||||
|
vi.mocked(findTokenLimit).mockReturnValue(undefined)
|
||||||
|
|
||||||
|
const result = getAnthropicThinkingBudget(4096, 'medium', 'unknown-model')
|
||||||
|
expect(result).toBeUndefined()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should calculate budget correctly when maxTokens is provided', async () => {
|
||||||
|
const { findTokenLimit } = await import('@renderer/config/models')
|
||||||
|
vi.mocked(findTokenLimit).mockReturnValue({ min: 1024, max: 32768 })
|
||||||
|
|
||||||
|
const result = getAnthropicThinkingBudget(4096, 'medium', 'claude-3-7-sonnet')
|
||||||
|
// EFFORT_RATIO['medium'] = 0.5
|
||||||
|
// budget = Math.floor((32768 - 1024) * 0.5 + 1024)
|
||||||
|
// = Math.floor(31744 * 0.5 + 1024) = Math.floor(15872 + 1024) = 16896
|
||||||
|
// budgetTokens = Math.min(16896, 4096) = 4096
|
||||||
|
// result = Math.max(1024, 4096) = 4096
|
||||||
|
expect(result).toBe(4096)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should use tokenLimit.max when maxTokens is undefined', async () => {
|
||||||
|
const { findTokenLimit } = await import('@renderer/config/models')
|
||||||
|
vi.mocked(findTokenLimit).mockReturnValue({ min: 1024, max: 32768 })
|
||||||
|
|
||||||
|
const result = getAnthropicThinkingBudget(undefined, 'medium', 'claude-3-7-sonnet')
|
||||||
|
// When maxTokens is undefined, budget is not constrained by maxTokens
|
||||||
|
// EFFORT_RATIO['medium'] = 0.5
|
||||||
|
// budget = Math.floor((32768 - 1024) * 0.5 + 1024)
|
||||||
|
// = Math.floor(31744 * 0.5 + 1024) = Math.floor(15872 + 1024) = 16896
|
||||||
|
// result = Math.max(1024, 16896) = 16896
|
||||||
|
expect(result).toBe(16896)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should enforce minimum budget of 1024', async () => {
|
||||||
|
const { findTokenLimit } = await import('@renderer/config/models')
|
||||||
|
vi.mocked(findTokenLimit).mockReturnValue({ min: 100, max: 1000 })
|
||||||
|
|
||||||
|
const result = getAnthropicThinkingBudget(500, 'low', 'claude-3-7-sonnet')
|
||||||
|
// EFFORT_RATIO['low'] = 0.05
|
||||||
|
// budget = Math.floor((1000 - 100) * 0.05 + 100)
|
||||||
|
// = Math.floor(900 * 0.05 + 100) = Math.floor(45 + 100) = 145
|
||||||
|
// budgetTokens = Math.min(145, 500) = 145
|
||||||
|
// result = Math.max(1024, 145) = 1024
|
||||||
|
expect(result).toBe(1024)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should respect effort ratio for high reasoning effort', async () => {
|
||||||
|
const { findTokenLimit } = await import('@renderer/config/models')
|
||||||
|
vi.mocked(findTokenLimit).mockReturnValue({ min: 1024, max: 32768 })
|
||||||
|
|
||||||
|
const result = getAnthropicThinkingBudget(8192, 'high', 'claude-3-7-sonnet')
|
||||||
|
// EFFORT_RATIO['high'] = 0.8
|
||||||
|
// budget = Math.floor((32768 - 1024) * 0.8 + 1024)
|
||||||
|
// = Math.floor(31744 * 0.8 + 1024) = Math.floor(25395.2 + 1024) = 26419
|
||||||
|
// budgetTokens = Math.min(26419, 8192) = 8192
|
||||||
|
// result = Math.max(1024, 8192) = 8192
|
||||||
|
expect(result).toBe(8192)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should use full token limit when maxTokens is undefined and reasoning effort is high', async () => {
|
||||||
|
const { findTokenLimit } = await import('@renderer/config/models')
|
||||||
|
vi.mocked(findTokenLimit).mockReturnValue({ min: 1024, max: 32768 })
|
||||||
|
|
||||||
|
const result = getAnthropicThinkingBudget(undefined, 'high', 'claude-3-7-sonnet')
|
||||||
|
// When maxTokens is undefined, budget is not constrained by maxTokens
|
||||||
|
// EFFORT_RATIO['high'] = 0.8
|
||||||
|
// budget = Math.floor((32768 - 1024) * 0.8 + 1024)
|
||||||
|
// = Math.floor(31744 * 0.8 + 1024) = Math.floor(25395.2 + 1024) = 26419
|
||||||
|
// result = Math.max(1024, 26419) = 26419
|
||||||
|
expect(result).toBe(26419)
|
||||||
|
})
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|||||||
@ -259,7 +259,7 @@ describe('websearch utils', () => {
|
|||||||
|
|
||||||
expect(result).toEqual({
|
expect(result).toEqual({
|
||||||
xai: {
|
xai: {
|
||||||
maxSearchResults: 50,
|
maxSearchResults: 30,
|
||||||
returnCitations: true,
|
returnCitations: true,
|
||||||
sources: [{ type: 'web', excludedWebsites: [] }, { type: 'news' }, { type: 'x' }],
|
sources: [{ type: 'web', excludedWebsites: [] }, { type: 'news' }, { type: 'x' }],
|
||||||
mode: 'on'
|
mode: 'on'
|
||||||
|
|||||||
@ -11,6 +11,7 @@ import {
|
|||||||
isGeminiModel,
|
isGeminiModel,
|
||||||
isGrokModel,
|
isGrokModel,
|
||||||
isOpenAIModel,
|
isOpenAIModel,
|
||||||
|
isOpenAIOpenWeightModel,
|
||||||
isQwenMTModel,
|
isQwenMTModel,
|
||||||
isSupportFlexServiceTierModel,
|
isSupportFlexServiceTierModel,
|
||||||
isSupportVerbosityModel
|
isSupportVerbosityModel
|
||||||
@ -244,7 +245,7 @@ export function buildProviderOptions(
|
|||||||
providerSpecificOptions = buildOpenAIProviderOptions(assistant, model, capabilities, serviceTier)
|
providerSpecificOptions = buildOpenAIProviderOptions(assistant, model, capabilities, serviceTier)
|
||||||
break
|
break
|
||||||
case SystemProviderIds.ollama:
|
case SystemProviderIds.ollama:
|
||||||
providerSpecificOptions = buildOllamaProviderOptions(assistant, capabilities)
|
providerSpecificOptions = buildOllamaProviderOptions(assistant, model, capabilities)
|
||||||
break
|
break
|
||||||
case SystemProviderIds.gateway:
|
case SystemProviderIds.gateway:
|
||||||
providerSpecificOptions = buildAIGatewayOptions(assistant, model, capabilities, serviceTier, textVerbosity)
|
providerSpecificOptions = buildAIGatewayOptions(assistant, model, capabilities, serviceTier, textVerbosity)
|
||||||
@ -564,6 +565,7 @@ function buildBedrockProviderOptions(
|
|||||||
|
|
||||||
function buildOllamaProviderOptions(
|
function buildOllamaProviderOptions(
|
||||||
assistant: Assistant,
|
assistant: Assistant,
|
||||||
|
model: Model,
|
||||||
capabilities: {
|
capabilities: {
|
||||||
enableReasoning: boolean
|
enableReasoning: boolean
|
||||||
enableWebSearch: boolean
|
enableWebSearch: boolean
|
||||||
@ -574,8 +576,13 @@ function buildOllamaProviderOptions(
|
|||||||
const providerOptions: OllamaCompletionProviderOptions = {}
|
const providerOptions: OllamaCompletionProviderOptions = {}
|
||||||
const reasoningEffort = assistant.settings?.reasoning_effort
|
const reasoningEffort = assistant.settings?.reasoning_effort
|
||||||
if (enableReasoning) {
|
if (enableReasoning) {
|
||||||
|
if (isOpenAIOpenWeightModel(model)) {
|
||||||
|
// @ts-ignore upstream type error
|
||||||
|
providerOptions.think = reasoningEffort as any
|
||||||
|
} else {
|
||||||
providerOptions.think = !['none', undefined].includes(reasoningEffort)
|
providerOptions.think = !['none', undefined].includes(reasoningEffort)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return {
|
return {
|
||||||
ollama: providerOptions
|
ollama: providerOptions
|
||||||
}
|
}
|
||||||
|
|||||||
@ -8,16 +8,17 @@ import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant'
|
|||||||
import {
|
import {
|
||||||
findTokenLimit,
|
findTokenLimit,
|
||||||
GEMINI_FLASH_MODEL_REGEX,
|
GEMINI_FLASH_MODEL_REGEX,
|
||||||
getThinkModelType,
|
getModelSupportedReasoningEffortOptions,
|
||||||
isDeepSeekHybridInferenceModel,
|
isDeepSeekHybridInferenceModel,
|
||||||
|
isDoubaoSeed18Model,
|
||||||
isDoubaoSeedAfter251015,
|
isDoubaoSeedAfter251015,
|
||||||
isDoubaoThinkingAutoModel,
|
isDoubaoThinkingAutoModel,
|
||||||
isGemini3ThinkingTokenModel,
|
isGemini3ThinkingTokenModel,
|
||||||
isGPT5SeriesModel,
|
|
||||||
isGPT51SeriesModel,
|
isGPT51SeriesModel,
|
||||||
isGrok4FastReasoningModel,
|
isGrok4FastReasoningModel,
|
||||||
isOpenAIDeepResearchModel,
|
isOpenAIDeepResearchModel,
|
||||||
isOpenAIModel,
|
isOpenAIModel,
|
||||||
|
isOpenAIReasoningModel,
|
||||||
isQwenAlwaysThinkModel,
|
isQwenAlwaysThinkModel,
|
||||||
isQwenReasoningModel,
|
isQwenReasoningModel,
|
||||||
isReasoningModel,
|
isReasoningModel,
|
||||||
@ -28,14 +29,14 @@ import {
|
|||||||
isSupportedThinkingTokenDoubaoModel,
|
isSupportedThinkingTokenDoubaoModel,
|
||||||
isSupportedThinkingTokenGeminiModel,
|
isSupportedThinkingTokenGeminiModel,
|
||||||
isSupportedThinkingTokenHunyuanModel,
|
isSupportedThinkingTokenHunyuanModel,
|
||||||
|
isSupportedThinkingTokenMiMoModel,
|
||||||
isSupportedThinkingTokenModel,
|
isSupportedThinkingTokenModel,
|
||||||
isSupportedThinkingTokenQwenModel,
|
isSupportedThinkingTokenQwenModel,
|
||||||
isSupportedThinkingTokenZhipuModel,
|
isSupportedThinkingTokenZhipuModel
|
||||||
MODEL_SUPPORTED_REASONING_EFFORT
|
|
||||||
} from '@renderer/config/models'
|
} from '@renderer/config/models'
|
||||||
import { getStoreSetting } from '@renderer/hooks/useSettings'
|
import { getStoreSetting } from '@renderer/hooks/useSettings'
|
||||||
import { getAssistantSettings, getProviderByModel } from '@renderer/services/AssistantService'
|
import { getAssistantSettings, getProviderByModel } from '@renderer/services/AssistantService'
|
||||||
import type { Assistant, Model } from '@renderer/types'
|
import type { Assistant, Model, ReasoningEffortOption } from '@renderer/types'
|
||||||
import { EFFORT_RATIO, isSystemProvider, SystemProviderIds } from '@renderer/types'
|
import { EFFORT_RATIO, isSystemProvider, SystemProviderIds } from '@renderer/types'
|
||||||
import type { OpenAIReasoningSummary } from '@renderer/types/aiCoreTypes'
|
import type { OpenAIReasoningSummary } from '@renderer/types/aiCoreTypes'
|
||||||
import type { ReasoningEffortOptionalParams } from '@renderer/types/sdk'
|
import type { ReasoningEffortOptionalParams } from '@renderer/types/sdk'
|
||||||
@ -65,7 +66,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
|||||||
// reasoningEffort is not set, no extra reasoning setting
|
// reasoningEffort is not set, no extra reasoning setting
|
||||||
// Generally, for every model which supports reasoning control, the reasoning effort won't be undefined.
|
// Generally, for every model which supports reasoning control, the reasoning effort won't be undefined.
|
||||||
// It's for some reasoning models that don't support reasoning control, such as deepseek reasoner.
|
// It's for some reasoning models that don't support reasoning control, such as deepseek reasoner.
|
||||||
if (!reasoningEffort) {
|
if (!reasoningEffort || reasoningEffort === 'default') {
|
||||||
return {}
|
return {}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -134,8 +135,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
|||||||
// https://creator.poe.com/docs/external-applications/openai-compatible-api#additional-considerations
|
// https://creator.poe.com/docs/external-applications/openai-compatible-api#additional-considerations
|
||||||
// Poe provider - supports custom bot parameters via extra_body
|
// Poe provider - supports custom bot parameters via extra_body
|
||||||
if (provider.id === SystemProviderIds.poe) {
|
if (provider.id === SystemProviderIds.poe) {
|
||||||
// GPT-5 series models use reasoning_effort parameter in extra_body
|
if (isOpenAIReasoningModel(model)) {
|
||||||
if (isGPT5SeriesModel(model) || isGPT51SeriesModel(model)) {
|
|
||||||
return {
|
return {
|
||||||
extra_body: {
|
extra_body: {
|
||||||
reasoning_effort: reasoningEffort === 'auto' ? 'medium' : reasoningEffort
|
reasoning_effort: reasoningEffort === 'auto' ? 'medium' : reasoningEffort
|
||||||
@ -331,16 +331,15 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
|||||||
// Grok models/Perplexity models/OpenAI models, use reasoning_effort
|
// Grok models/Perplexity models/OpenAI models, use reasoning_effort
|
||||||
if (isSupportedReasoningEffortModel(model)) {
|
if (isSupportedReasoningEffortModel(model)) {
|
||||||
// 检查模型是否支持所选选项
|
// 检查模型是否支持所选选项
|
||||||
const modelType = getThinkModelType(model)
|
const supportedOptions = getModelSupportedReasoningEffortOptions(model)?.filter((option) => option !== 'default')
|
||||||
const supportedOptions = MODEL_SUPPORTED_REASONING_EFFORT[modelType]
|
if (supportedOptions?.includes(reasoningEffort)) {
|
||||||
if (supportedOptions.includes(reasoningEffort)) {
|
|
||||||
return {
|
return {
|
||||||
reasoningEffort
|
reasoningEffort
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// 如果不支持,fallback到第一个支持的值
|
// 如果不支持,fallback到第一个支持的值
|
||||||
return {
|
return {
|
||||||
reasoningEffort: supportedOptions[0]
|
reasoningEffort: supportedOptions?.[0]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -392,7 +391,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
|||||||
|
|
||||||
// Use thinking, doubao, zhipu, etc.
|
// Use thinking, doubao, zhipu, etc.
|
||||||
if (isSupportedThinkingTokenDoubaoModel(model)) {
|
if (isSupportedThinkingTokenDoubaoModel(model)) {
|
||||||
if (isDoubaoSeedAfter251015(model)) {
|
if (isDoubaoSeedAfter251015(model) || isDoubaoSeed18Model(model)) {
|
||||||
return { reasoningEffort }
|
return { reasoningEffort }
|
||||||
}
|
}
|
||||||
if (reasoningEffort === 'high') {
|
if (reasoningEffort === 'high') {
|
||||||
@ -411,6 +410,12 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
|||||||
return { thinking: { type: 'enabled' } }
|
return { thinking: { type: 'enabled' } }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (isSupportedThinkingTokenMiMoModel(model)) {
|
||||||
|
return {
|
||||||
|
thinking: { type: 'enabled' }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Default case: no special thinking settings
|
// Default case: no special thinking settings
|
||||||
return {}
|
return {}
|
||||||
}
|
}
|
||||||
@ -430,7 +435,7 @@ export function getOpenAIReasoningParams(
|
|||||||
|
|
||||||
let reasoningEffort = assistant?.settings?.reasoning_effort
|
let reasoningEffort = assistant?.settings?.reasoning_effort
|
||||||
|
|
||||||
if (!reasoningEffort) {
|
if (!reasoningEffort || reasoningEffort === 'default') {
|
||||||
return {}
|
return {}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -482,16 +487,14 @@ export function getAnthropicThinkingBudget(
|
|||||||
return undefined
|
return undefined
|
||||||
}
|
}
|
||||||
|
|
||||||
const budgetTokens = Math.max(
|
const budget = Math.floor((tokenLimit.max - tokenLimit.min) * effortRatio + tokenLimit.min)
|
||||||
1024,
|
|
||||||
Math.floor(
|
let budgetTokens = budget
|
||||||
Math.min(
|
if (maxTokens !== undefined) {
|
||||||
(tokenLimit.max - tokenLimit.min) * effortRatio + tokenLimit.min,
|
budgetTokens = Math.min(budget, maxTokens)
|
||||||
(maxTokens || DEFAULT_MAX_TOKENS) * effortRatio
|
}
|
||||||
)
|
|
||||||
)
|
return Math.max(1024, budgetTokens)
|
||||||
)
|
|
||||||
return budgetTokens
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -508,7 +511,11 @@ export function getAnthropicReasoningParams(
|
|||||||
|
|
||||||
const reasoningEffort = assistant?.settings?.reasoning_effort
|
const reasoningEffort = assistant?.settings?.reasoning_effort
|
||||||
|
|
||||||
if (reasoningEffort === undefined || reasoningEffort === 'none') {
|
if (!reasoningEffort || reasoningEffort === 'default') {
|
||||||
|
return {}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (reasoningEffort === 'none') {
|
||||||
return {
|
return {
|
||||||
thinking: {
|
thinking: {
|
||||||
type: 'disabled'
|
type: 'disabled'
|
||||||
@ -532,20 +539,25 @@ export function getAnthropicReasoningParams(
|
|||||||
return {}
|
return {}
|
||||||
}
|
}
|
||||||
|
|
||||||
// type GoogleThinkingLevel = NonNullable<GoogleGenerativeAIProviderOptions['thinkingConfig']>['thinkingLevel']
|
type GoogleThinkingLevel = NonNullable<GoogleGenerativeAIProviderOptions['thinkingConfig']>['thinkingLevel']
|
||||||
|
|
||||||
// function mapToGeminiThinkingLevel(reasoningEffort: ReasoningEffortOption): GoogelThinkingLevel {
|
function mapToGeminiThinkingLevel(reasoningEffort: ReasoningEffortOption): GoogleThinkingLevel {
|
||||||
// switch (reasoningEffort) {
|
switch (reasoningEffort) {
|
||||||
// case 'low':
|
case 'default':
|
||||||
// return 'low'
|
return undefined
|
||||||
// case 'medium':
|
case 'minimal':
|
||||||
// return 'medium'
|
return 'minimal'
|
||||||
// case 'high':
|
case 'low':
|
||||||
// return 'high'
|
return 'low'
|
||||||
// default:
|
case 'medium':
|
||||||
// return 'medium'
|
return 'medium'
|
||||||
// }
|
case 'high':
|
||||||
// }
|
return 'high'
|
||||||
|
default:
|
||||||
|
logger.warn('Unknown thinking level for Gemini. Fallback to medium instead.', { reasoningEffort })
|
||||||
|
return 'medium'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* 获取 Gemini 推理参数
|
* 获取 Gemini 推理参数
|
||||||
@ -563,6 +575,10 @@ export function getGeminiReasoningParams(
|
|||||||
|
|
||||||
const reasoningEffort = assistant?.settings?.reasoning_effort
|
const reasoningEffort = assistant?.settings?.reasoning_effort
|
||||||
|
|
||||||
|
if (!reasoningEffort || reasoningEffort === 'default') {
|
||||||
|
return {}
|
||||||
|
}
|
||||||
|
|
||||||
// Gemini 推理参数
|
// Gemini 推理参数
|
||||||
if (isSupportedThinkingTokenGeminiModel(model)) {
|
if (isSupportedThinkingTokenGeminiModel(model)) {
|
||||||
if (reasoningEffort === undefined || reasoningEffort === 'none') {
|
if (reasoningEffort === undefined || reasoningEffort === 'none') {
|
||||||
@ -574,15 +590,15 @@ export function getGeminiReasoningParams(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: 很多中转还不支持
|
|
||||||
// https://ai.google.dev/gemini-api/docs/gemini-3?thinking=high#new_api_features_in_gemini_3
|
// https://ai.google.dev/gemini-api/docs/gemini-3?thinking=high#new_api_features_in_gemini_3
|
||||||
// if (isGemini3ThinkingTokenModel(model)) {
|
if (isGemini3ThinkingTokenModel(model)) {
|
||||||
// return {
|
return {
|
||||||
// thinkingConfig: {
|
thinkingConfig: {
|
||||||
// thinkingLevel: mapToGeminiThinkingLevel(reasoningEffort)
|
includeThoughts: true,
|
||||||
// }
|
thinkingLevel: mapToGeminiThinkingLevel(reasoningEffort)
|
||||||
// }
|
}
|
||||||
// }
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const effortRatio = EFFORT_RATIO[reasoningEffort]
|
const effortRatio = EFFORT_RATIO[reasoningEffort]
|
||||||
|
|
||||||
@ -623,10 +639,6 @@ export function getXAIReasoningParams(assistant: Assistant, model: Model): Pick<
|
|||||||
|
|
||||||
const { reasoning_effort: reasoningEffort } = getAssistantSettings(assistant)
|
const { reasoning_effort: reasoningEffort } = getAssistantSettings(assistant)
|
||||||
|
|
||||||
if (!reasoningEffort || reasoningEffort === 'none') {
|
|
||||||
return {}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (reasoningEffort) {
|
switch (reasoningEffort) {
|
||||||
case 'auto':
|
case 'auto':
|
||||||
case 'minimal':
|
case 'minimal':
|
||||||
@ -635,6 +647,12 @@ export function getXAIReasoningParams(assistant: Assistant, model: Model): Pick<
|
|||||||
case 'low':
|
case 'low':
|
||||||
case 'high':
|
case 'high':
|
||||||
return { reasoningEffort }
|
return { reasoningEffort }
|
||||||
|
case 'xhigh':
|
||||||
|
return { reasoningEffort: 'high' }
|
||||||
|
case 'default':
|
||||||
|
case 'none':
|
||||||
|
default:
|
||||||
|
return {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -651,7 +669,7 @@ export function getBedrockReasoningParams(
|
|||||||
|
|
||||||
const reasoningEffort = assistant?.settings?.reasoning_effort
|
const reasoningEffort = assistant?.settings?.reasoning_effort
|
||||||
|
|
||||||
if (reasoningEffort === undefined) {
|
if (reasoningEffort === undefined || reasoningEffort === 'default') {
|
||||||
return {}
|
return {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -9,6 +9,8 @@ import type { CherryWebSearchConfig } from '@renderer/store/websearch'
|
|||||||
import type { Model } from '@renderer/types'
|
import type { Model } from '@renderer/types'
|
||||||
import { mapRegexToPatterns } from '@renderer/utils/blacklistMatchPattern'
|
import { mapRegexToPatterns } from '@renderer/utils/blacklistMatchPattern'
|
||||||
|
|
||||||
|
const X_AI_MAX_SEARCH_RESULT = 30
|
||||||
|
|
||||||
export function getWebSearchParams(model: Model): Record<string, any> {
|
export function getWebSearchParams(model: Model): Record<string, any> {
|
||||||
if (model.provider === 'hunyuan') {
|
if (model.provider === 'hunyuan') {
|
||||||
return { enable_enhancement: true, citation: true, search_info: true }
|
return { enable_enhancement: true, citation: true, search_info: true }
|
||||||
@ -82,7 +84,7 @@ export function buildProviderBuiltinWebSearchConfig(
|
|||||||
const excludeDomains = mapRegexToPatterns(webSearchConfig.excludeDomains)
|
const excludeDomains = mapRegexToPatterns(webSearchConfig.excludeDomains)
|
||||||
return {
|
return {
|
||||||
xai: {
|
xai: {
|
||||||
maxSearchResults: webSearchConfig.maxResults,
|
maxSearchResults: Math.min(webSearchConfig.maxResults, X_AI_MAX_SEARCH_RESULT),
|
||||||
returnCitations: true,
|
returnCitations: true,
|
||||||
sources: [
|
sources: [
|
||||||
{
|
{
|
||||||
|
|||||||
17
src/renderer/src/assets/images/models/mimo.svg
Normal file
17
src/renderer/src/assets/images/models/mimo.svg
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
<svg width="100" height="100" viewBox="0 0 100 100" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||||
|
<g transform="translate(10, 42) scale(1.35)">
|
||||||
|
<!-- m -->
|
||||||
|
<path d="M1.2683 15.9987C0.9317 15.998 0.6091 15.8638 0.3713 15.6256C0.1335 15.3873 0 15.0644 0 14.7278V7.165C0.0148 6.83757 0.1554 6.52848 0.3924 6.30203C0.6293 6.07559 0.9445 5.94922 1.2722 5.94922C1.6 5.94922 1.9152 6.07559 2.1521 6.30203C2.3891 6.52848 2.5296 6.83757 2.5445 7.165V14.7278C2.5442 14.895 2.5109 15.0606 2.4466 15.215C2.3822 15.3693 2.2881 15.5095 2.1696 15.6276C2.0511 15.7456 1.9105 15.8391 1.7559 15.9028C1.6012 15.9665 1.4356 15.9991 1.2683 15.9987Z" fill="currentColor"/>
|
||||||
|
<path d="M14.8841 15.9993C14.5468 15.9993 14.2232 15.8655 13.9845 15.6272C13.7457 15.389 13.6112 15.0657 13.6105 14.7284V4.67881L8.9888 9.45281C8.7538 9.69657 8.4315 9.83697 8.0929 9.84312C7.7544 9.84928 7.4272 9.72069 7.1835 9.48563C6.9397 9.25058 6.7993 8.92832 6.7931 8.58976C6.7901 8.42211 6.8201 8.25551 6.8814 8.09947C6.9428 7.94342 7.0342 7.80098 7.1506 7.68028L13.9703 0.661082C14.1463 0.478921 14.3728 0.35354 14.6207 0.301033C14.8685 0.248526 15.1264 0.271291 15.3612 0.366403C15.5961 0.461516 15.7971 0.624637 15.9385 0.834827C16.08 1.04502 16.1554 1.29268 16.1551 1.54603V14.7284C16.1551 15.0655 16.0212 15.3887 15.7828 15.6271C15.5444 15.8654 15.2212 15.9993 14.8841 15.9993Z" fill="currentColor"/>
|
||||||
|
<path d="M8.0748 9.82621C7.9058 9.82749 7.7383 9.79518 7.5818 9.73117C7.4254 9.66716 7.2833 9.57272 7.1636 9.45332L0.3571 2.4315C0.1224 2.18948 -0.0065 1.86414 -0.0014 1.52705C0.0038 1.18996 0.1427 0.868726 0.3847 0.634023C0.6267 0.399319 0.9521 0.270369 1.2892 0.27554C1.6262 0.280711 1.9475 0.419579 2.1822 0.661595L8.9887 7.66767C9.1623 7.84735 9.2792 8.07413 9.3249 8.31977C9.3706 8.56541 9.343 8.81906 9.2456 9.04914C9.1482 9.27922 8.9852 9.47557 8.7771 9.61374C8.5689 9.75191 8.3247 9.8258 8.0748 9.82621Z" fill="currentColor"/>
|
||||||
|
<!-- i -->
|
||||||
|
<path d="M20.3539 15.9997C20.0169 15.9997 19.6936 15.8658 19.4552 15.6274C19.2169 15.3891 19.083 15.0658 19.083 14.7287V1.54636C19.083 1.20928 19.2169 0.886001 19.4552 0.647648C19.6936 0.409296 20.0169 0.275391 20.3539 0.275391C20.691 0.275391 21.0143 0.409296 21.2526 0.647648C21.491 0.886001 21.6249 1.20928 21.6249 1.54636V14.7287C21.6249 14.8956 21.592 15.0609 21.5282 15.2151C21.4643 15.3693 21.3707 15.5094 21.2526 15.6274C21.1346 15.7454 20.9945 15.839 20.8403 15.9029C20.6861 15.9668 20.5208 15.9997 20.3539 15.9997Z" fill="currentColor"/>
|
||||||
|
<!-- m -->
|
||||||
|
<path d="M25.8263 15.9992C25.4893 15.9992 25.166 15.8653 24.9276 15.627C24.6893 15.3886 24.5554 15.0654 24.5554 14.7283V7.1655C24.5554 6.82842 24.6893 6.50514 24.9276 6.26679C25.166 6.02844 25.4893 5.89453 25.8263 5.89453C26.1634 5.89453 26.4867 6.02844 26.7251 6.26679C26.9634 6.50514 27.0973 6.82842 27.0973 7.1655V14.7283C27.0973 15.0654 26.9634 15.3886 26.7251 15.627C26.4867 15.8653 26.1634 15.9992 25.8263 15.9992Z" fill="currentColor"/>
|
||||||
|
<path d="M39.4394 16.0004C39.1023 16.0004 38.779 15.8664 38.5406 15.6281C38.3023 15.3897 38.1684 15.0665 38.1684 14.7294V4.67982L33.5467 9.45382C33.3117 9.69584 32.9901 9.83457 32.6523 9.83949C32.3156 9.84442 31.9894 9.71513 31.7474 9.48008C31.5054 9.24503 31.3674 8.92346 31.3623 8.58613C31.3573 8.24879 31.4863 7.92331 31.7214 7.6813L38.5284 0.662093C38.7044 0.483575 38.9304 0.361405 39.1767 0.311007C39.4233 0.260609 39.6787 0.284243 39.9114 0.378925C40.1437 0.473608 40.3427 0.635093 40.4837 0.842994C40.6247 1.05089 40.7007 1.29589 40.7027 1.54704V14.7294C40.7017 15.0649 40.5687 15.3866 40.3327 15.6246C40.0957 15.8625 39.7747 15.9976 39.4394 16.0004Z" fill="currentColor"/>
|
||||||
|
<path d="M32.6324 9.82618C32.4634 9.82746 32.2964 9.79516 32.1394 9.73115C31.9834 9.66713 31.8414 9.57269 31.7214 9.45329L24.9151 2.43147C24.7921 2.31326 24.6942 2.1715 24.6271 2.01463C24.5601 1.85777 24.5253 1.68901 24.5249 1.51842C24.5244 1.34783 24.5583 1.1789 24.6246 1.02169C24.6908 0.864476 24.788 0.722207 24.9104 0.603357C25.0327 0.484507 25.1778 0.391509 25.3369 0.329905C25.4959 0.268302 25.6658 0.239353 25.8363 0.244785C26.0068 0.250217 26.1745 0.289918 26.3293 0.361522C26.4841 0.433126 26.623 0.535168 26.7375 0.661566L33.5467 7.66764C33.7204 7.84732 33.8374 8.0741 33.8824 8.31974C33.9284 8.56538 33.9014 8.81903 33.8034 9.04911C33.7064 9.27919 33.5434 9.47554 33.3354 9.61371C33.1267 9.75189 32.8824 9.82577 32.6324 9.82618Z" fill="currentColor"/>
|
||||||
|
<!-- o -->
|
||||||
|
<path d="M50.9434 15.9814C49.5534 15.9865 48.1864 15.6287 46.9774 14.9433C45.7674 14.2579 44.7584 13.2687 44.0484 12.0735C43.3384 10.8783 42.9534 9.5185 42.9304 8.12863C42.9074 6.73875 43.2474 5.36692 43.9164 4.1488C44.0844 3.86356 44.3564 3.65487 44.6754 3.56707C44.9944 3.47927 45.3344 3.51928 45.6244 3.67859C45.9144 3.8379 46.1314 4.10397 46.2274 4.42026C46.3244 4.73656 46.2944 5.07816 46.1434 5.3725C45.5764 6.40664 45.3594 7.59693 45.5264 8.76468C45.6924 9.93243 46.2334 11.0147 47.0674 11.8489C47.9014 12.6831 48.9834 13.2244 50.1514 13.3914C51.3184 13.5584 52.5094 13.3421 53.5434 12.7751C53.8384 12.6125 54.1864 12.5738 54.5104 12.6676C54.8344 12.7614 55.1074 12.98 55.2704 13.2753C55.4324 13.5706 55.4714 13.9184 55.3774 14.2422C55.2834 14.566 55.0654 14.8393 54.7694 15.0019C53.5974 15.6455 52.2814 15.9824 50.9434 15.9814Z" fill="currentColor"/>
|
||||||
|
<path d="M56.8104 12.5052C56.5944 12.5044 56.3834 12.4484 56.1954 12.3424C55.9014 12.1795 55.6824 11.9066 55.5894 11.5833C55.4954 11.26 55.5324 10.9126 55.6944 10.6171C56.2614 9.58297 56.4784 8.39268 56.3114 7.22493C56.1454 6.05718 55.6044 4.97496 54.7704 4.14073C53.9364 3.30649 52.8544 2.76525 51.6864 2.59825C50.5194 2.43125 49.3284 2.64749 48.2944 3.21452C48.1474 3.30059 47.9854 3.3564 47.8164 3.37863C47.6484 3.40087 47.4774 3.38908 47.3134 3.34397C47.1494 3.29886 46.9964 3.22134 46.8624 3.116C46.7294 3.01066 46.6184 2.87964 46.5364 2.73069C46.4544 2.58174 46.4034 2.41788 46.3864 2.24882C46.3684 2.07975 46.3854 1.90891 46.4354 1.7464C46.4854 1.58389 46.5674 1.43301 46.6764 1.3027C46.7854 1.17238 46.9194 1.06527 47.0704 0.987704C48.5874 0.155491 50.3324 -0.162266 52.0454 0.0821474C53.7574 0.326561 55.3454 1.11995 56.5684 2.34319C57.7914 3.56642 58.5844 5.15347 58.8294 6.86604C59.0734 8.5786 58.7554 10.3242 57.9234 11.8408C57.8144 12.0411 57.6534 12.2084 57.4574 12.3253C57.2624 12.4422 57.0384 12.5043 56.8104 12.5052Z" fill="currentColor"/>
|
||||||
|
</g>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 6.2 KiB |
17
src/renderer/src/assets/images/providers/mimo.svg
Normal file
17
src/renderer/src/assets/images/providers/mimo.svg
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
<svg width="100" height="100" viewBox="0 0 100 100" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||||
|
<g transform="translate(10, 42) scale(1.35)">
|
||||||
|
<!-- m -->
|
||||||
|
<path d="M1.2683 15.9987C0.9317 15.998 0.6091 15.8638 0.3713 15.6256C0.1335 15.3873 0 15.0644 0 14.7278V7.165C0.0148 6.83757 0.1554 6.52848 0.3924 6.30203C0.6293 6.07559 0.9445 5.94922 1.2722 5.94922C1.6 5.94922 1.9152 6.07559 2.1521 6.30203C2.3891 6.52848 2.5296 6.83757 2.5445 7.165V14.7278C2.5442 14.895 2.5109 15.0606 2.4466 15.215C2.3822 15.3693 2.2881 15.5095 2.1696 15.6276C2.0511 15.7456 1.9105 15.8391 1.7559 15.9028C1.6012 15.9665 1.4356 15.9991 1.2683 15.9987Z" fill="currentColor"/>
|
||||||
|
<path d="M14.8841 15.9993C14.5468 15.9993 14.2232 15.8655 13.9845 15.6272C13.7457 15.389 13.6112 15.0657 13.6105 14.7284V4.67881L8.9888 9.45281C8.7538 9.69657 8.4315 9.83697 8.0929 9.84312C7.7544 9.84928 7.4272 9.72069 7.1835 9.48563C6.9397 9.25058 6.7993 8.92832 6.7931 8.58976C6.7901 8.42211 6.8201 8.25551 6.8814 8.09947C6.9428 7.94342 7.0342 7.80098 7.1506 7.68028L13.9703 0.661082C14.1463 0.478921 14.3728 0.35354 14.6207 0.301033C14.8685 0.248526 15.1264 0.271291 15.3612 0.366403C15.5961 0.461516 15.7971 0.624637 15.9385 0.834827C16.08 1.04502 16.1554 1.29268 16.1551 1.54603V14.7284C16.1551 15.0655 16.0212 15.3887 15.7828 15.6271C15.5444 15.8654 15.2212 15.9993 14.8841 15.9993Z" fill="currentColor"/>
|
||||||
|
<path d="M8.0748 9.82621C7.9058 9.82749 7.7383 9.79518 7.5818 9.73117C7.4254 9.66716 7.2833 9.57272 7.1636 9.45332L0.3571 2.4315C0.1224 2.18948 -0.0065 1.86414 -0.0014 1.52705C0.0038 1.18996 0.1427 0.868726 0.3847 0.634023C0.6267 0.399319 0.9521 0.270369 1.2892 0.27554C1.6262 0.280711 1.9475 0.419579 2.1822 0.661595L8.9887 7.66767C9.1623 7.84735 9.2792 8.07413 9.3249 8.31977C9.3706 8.56541 9.343 8.81906 9.2456 9.04914C9.1482 9.27922 8.9852 9.47557 8.7771 9.61374C8.5689 9.75191 8.3247 9.8258 8.0748 9.82621Z" fill="currentColor"/>
|
||||||
|
<!-- i -->
|
||||||
|
<path d="M20.3539 15.9997C20.0169 15.9997 19.6936 15.8658 19.4552 15.6274C19.2169 15.3891 19.083 15.0658 19.083 14.7287V1.54636C19.083 1.20928 19.2169 0.886001 19.4552 0.647648C19.6936 0.409296 20.0169 0.275391 20.3539 0.275391C20.691 0.275391 21.0143 0.409296 21.2526 0.647648C21.491 0.886001 21.6249 1.20928 21.6249 1.54636V14.7287C21.6249 14.8956 21.592 15.0609 21.5282 15.2151C21.4643 15.3693 21.3707 15.5094 21.2526 15.6274C21.1346 15.7454 20.9945 15.839 20.8403 15.9029C20.6861 15.9668 20.5208 15.9997 20.3539 15.9997Z" fill="currentColor"/>
|
||||||
|
<!-- m -->
|
||||||
|
<path d="M25.8263 15.9992C25.4893 15.9992 25.166 15.8653 24.9276 15.627C24.6893 15.3886 24.5554 15.0654 24.5554 14.7283V7.1655C24.5554 6.82842 24.6893 6.50514 24.9276 6.26679C25.166 6.02844 25.4893 5.89453 25.8263 5.89453C26.1634 5.89453 26.4867 6.02844 26.7251 6.26679C26.9634 6.50514 27.0973 6.82842 27.0973 7.1655V14.7283C27.0973 15.0654 26.9634 15.3886 26.7251 15.627C26.4867 15.8653 26.1634 15.9992 25.8263 15.9992Z" fill="currentColor"/>
|
||||||
|
<path d="M39.4394 16.0004C39.1023 16.0004 38.779 15.8664 38.5406 15.6281C38.3023 15.3897 38.1684 15.0665 38.1684 14.7294V4.67982L33.5467 9.45382C33.3117 9.69584 32.9901 9.83457 32.6523 9.83949C32.3156 9.84442 31.9894 9.71513 31.7474 9.48008C31.5054 9.24503 31.3674 8.92346 31.3623 8.58613C31.3573 8.24879 31.4863 7.92331 31.7214 7.6813L38.5284 0.662093C38.7044 0.483575 38.9304 0.361405 39.1767 0.311007C39.4233 0.260609 39.6787 0.284243 39.9114 0.378925C40.1437 0.473608 40.3427 0.635093 40.4837 0.842994C40.6247 1.05089 40.7007 1.29589 40.7027 1.54704V14.7294C40.7017 15.0649 40.5687 15.3866 40.3327 15.6246C40.0957 15.8625 39.7747 15.9976 39.4394 16.0004Z" fill="currentColor"/>
|
||||||
|
<path d="M32.6324 9.82618C32.4634 9.82746 32.2964 9.79516 32.1394 9.73115C31.9834 9.66713 31.8414 9.57269 31.7214 9.45329L24.9151 2.43147C24.7921 2.31326 24.6942 2.1715 24.6271 2.01463C24.5601 1.85777 24.5253 1.68901 24.5249 1.51842C24.5244 1.34783 24.5583 1.1789 24.6246 1.02169C24.6908 0.864476 24.788 0.722207 24.9104 0.603357C25.0327 0.484507 25.1778 0.391509 25.3369 0.329905C25.4959 0.268302 25.6658 0.239353 25.8363 0.244785C26.0068 0.250217 26.1745 0.289918 26.3293 0.361522C26.4841 0.433126 26.623 0.535168 26.7375 0.661566L33.5467 7.66764C33.7204 7.84732 33.8374 8.0741 33.8824 8.31974C33.9284 8.56538 33.9014 8.81903 33.8034 9.04911C33.7064 9.27919 33.5434 9.47554 33.3354 9.61371C33.1267 9.75189 32.8824 9.82577 32.6324 9.82618Z" fill="currentColor"/>
|
||||||
|
<!-- o -->
|
||||||
|
<path d="M50.9434 15.9814C49.5534 15.9865 48.1864 15.6287 46.9774 14.9433C45.7674 14.2579 44.7584 13.2687 44.0484 12.0735C43.3384 10.8783 42.9534 9.5185 42.9304 8.12863C42.9074 6.73875 43.2474 5.36692 43.9164 4.1488C44.0844 3.86356 44.3564 3.65487 44.6754 3.56707C44.9944 3.47927 45.3344 3.51928 45.6244 3.67859C45.9144 3.8379 46.1314 4.10397 46.2274 4.42026C46.3244 4.73656 46.2944 5.07816 46.1434 5.3725C45.5764 6.40664 45.3594 7.59693 45.5264 8.76468C45.6924 9.93243 46.2334 11.0147 47.0674 11.8489C47.9014 12.6831 48.9834 13.2244 50.1514 13.3914C51.3184 13.5584 52.5094 13.3421 53.5434 12.7751C53.8384 12.6125 54.1864 12.5738 54.5104 12.6676C54.8344 12.7614 55.1074 12.98 55.2704 13.2753C55.4324 13.5706 55.4714 13.9184 55.3774 14.2422C55.2834 14.566 55.0654 14.8393 54.7694 15.0019C53.5974 15.6455 52.2814 15.9824 50.9434 15.9814Z" fill="currentColor"/>
|
||||||
|
<path d="M56.8104 12.5052C56.5944 12.5044 56.3834 12.4484 56.1954 12.3424C55.9014 12.1795 55.6824 11.9066 55.5894 11.5833C55.4954 11.26 55.5324 10.9126 55.6944 10.6171C56.2614 9.58297 56.4784 8.39268 56.3114 7.22493C56.1454 6.05718 55.6044 4.97496 54.7704 4.14073C53.9364 3.30649 52.8544 2.76525 51.6864 2.59825C50.5194 2.43125 49.3284 2.64749 48.2944 3.21452C48.1474 3.30059 47.9854 3.3564 47.8164 3.37863C47.6484 3.40087 47.4774 3.38908 47.3134 3.34397C47.1494 3.29886 46.9964 3.22134 46.8624 3.116C46.7294 3.01066 46.6184 2.87964 46.5364 2.73069C46.4544 2.58174 46.4034 2.41788 46.3864 2.24882C46.3684 2.07975 46.3854 1.90891 46.4354 1.7464C46.4854 1.58389 46.5674 1.43301 46.6764 1.3027C46.7854 1.17238 46.9194 1.06527 47.0704 0.987704C48.5874 0.155491 50.3324 -0.162266 52.0454 0.0821474C53.7574 0.326561 55.3454 1.11995 56.5684 2.34319C57.7914 3.56642 58.5844 5.15347 58.8294 6.86604C59.0734 8.5786 58.7554 10.3242 57.9234 11.8408C57.8144 12.0411 57.6534 12.2084 57.4574 12.3253C57.2624 12.4422 57.0384 12.5043 56.8104 12.5052Z" fill="currentColor"/>
|
||||||
|
</g>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 6.2 KiB |
34
src/renderer/src/components/Avatar/AssistantAvatar.tsx
Normal file
34
src/renderer/src/components/Avatar/AssistantAvatar.tsx
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
import EmojiIcon from '@renderer/components/EmojiIcon'
|
||||||
|
import { useSettings } from '@renderer/hooks/useSettings'
|
||||||
|
import { getDefaultModel } from '@renderer/services/AssistantService'
|
||||||
|
import type { Assistant } from '@renderer/types'
|
||||||
|
import { getLeadingEmoji } from '@renderer/utils'
|
||||||
|
import type { FC } from 'react'
|
||||||
|
import { useMemo } from 'react'
|
||||||
|
|
||||||
|
import ModelAvatar from './ModelAvatar'
|
||||||
|
|
||||||
|
interface AssistantAvatarProps {
|
||||||
|
assistant: Assistant
|
||||||
|
size?: number
|
||||||
|
className?: string
|
||||||
|
}
|
||||||
|
|
||||||
|
const AssistantAvatar: FC<AssistantAvatarProps> = ({ assistant, size = 24, className }) => {
|
||||||
|
const { assistantIconType } = useSettings()
|
||||||
|
const defaultModel = getDefaultModel()
|
||||||
|
|
||||||
|
const assistantName = useMemo(() => assistant.name || '', [assistant.name])
|
||||||
|
|
||||||
|
if (assistantIconType === 'model') {
|
||||||
|
return <ModelAvatar model={assistant.model || defaultModel} size={size} className={className} />
|
||||||
|
}
|
||||||
|
|
||||||
|
if (assistantIconType === 'emoji') {
|
||||||
|
return <EmojiIcon emoji={assistant.emoji || getLeadingEmoji(assistantName)} size={size} className={className} />
|
||||||
|
}
|
||||||
|
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
export default AssistantAvatar
|
||||||
@ -6,6 +6,61 @@ interface ContextMenuProps {
|
|||||||
children: React.ReactNode
|
children: React.ReactNode
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Extract text content from selection, filtering out line numbers in code viewers.
|
||||||
|
* Preserves all content including plain text and code blocks, only removing line numbers.
|
||||||
|
* This ensures right-click copy in code blocks doesn't include line numbers while preserving indentation.
|
||||||
|
*/
|
||||||
|
function extractSelectedText(selection: Selection): string {
|
||||||
|
// Validate selection
|
||||||
|
if (selection.rangeCount === 0 || selection.isCollapsed) {
|
||||||
|
return ''
|
||||||
|
}
|
||||||
|
|
||||||
|
const range = selection.getRangeAt(0)
|
||||||
|
const fragment = range.cloneContents()
|
||||||
|
|
||||||
|
// Check if the selection contains code viewer elements
|
||||||
|
const hasLineNumbers = fragment.querySelectorAll('.line-number').length > 0
|
||||||
|
|
||||||
|
// If no line numbers, return the original text (preserves formatting)
|
||||||
|
if (!hasLineNumbers) {
|
||||||
|
return selection.toString()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove all line number elements
|
||||||
|
fragment.querySelectorAll('.line-number').forEach((el) => el.remove())
|
||||||
|
|
||||||
|
// Handle all content using optimized TreeWalker with precise node filtering
|
||||||
|
// This approach handles mixed content correctly while improving performance
|
||||||
|
const walker = document.createTreeWalker(fragment, NodeFilter.SHOW_TEXT | NodeFilter.SHOW_ELEMENT, null)
|
||||||
|
|
||||||
|
let result = ''
|
||||||
|
let node = walker.nextNode()
|
||||||
|
|
||||||
|
while (node) {
|
||||||
|
if (node.nodeType === Node.TEXT_NODE) {
|
||||||
|
// Preserve text content including whitespace
|
||||||
|
result += node.textContent
|
||||||
|
} else if (node.nodeType === Node.ELEMENT_NODE) {
|
||||||
|
const element = node as Element
|
||||||
|
|
||||||
|
// Add newline after block elements and code lines to preserve structure
|
||||||
|
if (['H1', 'H2', 'H3', 'H4', 'H5', 'H6'].includes(element.tagName)) {
|
||||||
|
result += '\n'
|
||||||
|
} else if (element.classList.contains('line')) {
|
||||||
|
// Add newline after code lines to preserve code structure
|
||||||
|
result += '\n'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
node = walker.nextNode()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean up excessive newlines but preserve code structure
|
||||||
|
return result.trim()
|
||||||
|
}
|
||||||
|
|
||||||
// FIXME: Why does this component name look like a generic component but is not customizable at all?
|
// FIXME: Why does this component name look like a generic component but is not customizable at all?
|
||||||
const ContextMenu: React.FC<ContextMenuProps> = ({ children }) => {
|
const ContextMenu: React.FC<ContextMenuProps> = ({ children }) => {
|
||||||
const { t } = useTranslation()
|
const { t } = useTranslation()
|
||||||
@ -45,8 +100,12 @@ const ContextMenu: React.FC<ContextMenuProps> = ({ children }) => {
|
|||||||
|
|
||||||
const onOpenChange = (open: boolean) => {
|
const onOpenChange = (open: boolean) => {
|
||||||
if (open) {
|
if (open) {
|
||||||
const selectedText = window.getSelection()?.toString()
|
const selection = window.getSelection()
|
||||||
setSelectedText(selectedText)
|
if (!selection || selection.rangeCount === 0 || selection.isCollapsed) {
|
||||||
|
setSelectedText(undefined)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
setSelectedText(extractSelectedText(selection) || undefined)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -113,6 +113,18 @@ export function MdiLightbulbOn(props: SVGProps<SVGSVGElement>) {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function MdiLightbulbQuestion(props: SVGProps<SVGSVGElement>) {
|
||||||
|
// {/* Icon from Material Design Icons by Pictogrammers - https://github.com/Templarian/MaterialDesign/blob/master/LICENSE */}
|
||||||
|
return (
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="1em" height="1em" viewBox="0 0 24 24" {...props}>
|
||||||
|
<path
|
||||||
|
fill="currentColor"
|
||||||
|
d="M8 2C11.9 2 15 5.1 15 9C15 11.4 13.8 13.5 12 14.7V17C12 17.6 11.6 18 11 18H5C4.4 18 4 17.6 4 17V14.7C2.2 13.5 1 11.4 1 9C1 5.1 4.1 2 8 2M5 21V20H11V21C11 21.6 10.6 22 10 22H6C5.4 22 5 21.6 5 21M8 4C5.2 4 3 6.2 3 9C3 11.1 4.2 12.8 6 13.6V16H10V13.6C11.8 12.8 13 11.1 13 9C13 6.2 10.8 4 8 4M20.5 14.5V16H19V14.5H20.5M18.5 9.5H17V9C17 7.3 18.3 6 20 6S23 7.3 23 9C23 10 22.5 10.9 21.7 11.4L21.4 11.6C20.8 12 20.5 12.6 20.5 13.3V13.5H19V13.3C19 12.1 19.6 11 20.6 10.4L20.9 10.2C21.3 9.9 21.5 9.5 21.5 9C21.5 8.2 20.8 7.5 20 7.5S18.5 8.2 18.5 9V9.5Z"
|
||||||
|
/>
|
||||||
|
</svg>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
export function BingLogo(props: SVGProps<SVGSVGElement>) {
|
export function BingLogo(props: SVGProps<SVGSVGElement>) {
|
||||||
return (
|
return (
|
||||||
<svg
|
<svg
|
||||||
|
|||||||
@ -1,5 +1,4 @@
|
|||||||
import { loggerService } from '@logger'
|
import { loggerService } from '@logger'
|
||||||
import AiProvider from '@renderer/aiCore'
|
|
||||||
import { RefreshIcon } from '@renderer/components/Icons'
|
import { RefreshIcon } from '@renderer/components/Icons'
|
||||||
import { useProvider } from '@renderer/hooks/useProvider'
|
import { useProvider } from '@renderer/hooks/useProvider'
|
||||||
import type { Model } from '@renderer/types'
|
import type { Model } from '@renderer/types'
|
||||||
@ -8,6 +7,8 @@ import { Button, InputNumber, Space, Tooltip } from 'antd'
|
|||||||
import { memo, useCallback, useMemo, useState } from 'react'
|
import { memo, useCallback, useMemo, useState } from 'react'
|
||||||
import { useTranslation } from 'react-i18next'
|
import { useTranslation } from 'react-i18next'
|
||||||
|
|
||||||
|
import AiProviderNew from '../aiCore/index_new'
|
||||||
|
|
||||||
const logger = loggerService.withContext('DimensionsInput')
|
const logger = loggerService.withContext('DimensionsInput')
|
||||||
|
|
||||||
interface InputEmbeddingDimensionProps {
|
interface InputEmbeddingDimensionProps {
|
||||||
@ -47,7 +48,7 @@ const InputEmbeddingDimension = ({
|
|||||||
|
|
||||||
setLoading(true)
|
setLoading(true)
|
||||||
try {
|
try {
|
||||||
const aiProvider = new AiProvider(provider)
|
const aiProvider = new AiProviderNew(provider)
|
||||||
const dimension = await aiProvider.getEmbeddingDimensions(model)
|
const dimension = await aiProvider.getEmbeddingDimensions(model)
|
||||||
// for controlled input
|
// for controlled input
|
||||||
if (ref?.current) {
|
if (ref?.current) {
|
||||||
|
|||||||
@ -3,6 +3,7 @@ import { ErrorBoundary } from '@renderer/components/ErrorBoundary'
|
|||||||
import { HelpTooltip } from '@renderer/components/TooltipIcons'
|
import { HelpTooltip } from '@renderer/components/TooltipIcons'
|
||||||
import { TopView } from '@renderer/components/TopView'
|
import { TopView } from '@renderer/components/TopView'
|
||||||
import { permissionModeCards } from '@renderer/config/agent'
|
import { permissionModeCards } from '@renderer/config/agent'
|
||||||
|
import { isWin } from '@renderer/config/constant'
|
||||||
import { useAgents } from '@renderer/hooks/agents/useAgents'
|
import { useAgents } from '@renderer/hooks/agents/useAgents'
|
||||||
import { useUpdateAgent } from '@renderer/hooks/agents/useUpdateAgent'
|
import { useUpdateAgent } from '@renderer/hooks/agents/useUpdateAgent'
|
||||||
import SelectAgentBaseModelButton from '@renderer/pages/home/components/SelectAgentBaseModelButton'
|
import SelectAgentBaseModelButton from '@renderer/pages/home/components/SelectAgentBaseModelButton'
|
||||||
@ -16,7 +17,8 @@ import type {
|
|||||||
UpdateAgentForm
|
UpdateAgentForm
|
||||||
} from '@renderer/types'
|
} from '@renderer/types'
|
||||||
import { AgentConfigurationSchema, isAgentType } from '@renderer/types'
|
import { AgentConfigurationSchema, isAgentType } from '@renderer/types'
|
||||||
import { Alert, Button, Input, Modal, Select } from 'antd'
|
import type { GitBashPathInfo } from '@shared/config/constant'
|
||||||
|
import { Button, Input, Modal, Select } from 'antd'
|
||||||
import { AlertTriangleIcon } from 'lucide-react'
|
import { AlertTriangleIcon } from 'lucide-react'
|
||||||
import type { ChangeEvent, FormEvent } from 'react'
|
import type { ChangeEvent, FormEvent } from 'react'
|
||||||
import { useCallback, useEffect, useMemo, useRef, useState } from 'react'
|
import { useCallback, useEffect, useMemo, useRef, useState } from 'react'
|
||||||
@ -59,8 +61,7 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
|||||||
const isEditing = (agent?: AgentWithTools) => agent !== undefined
|
const isEditing = (agent?: AgentWithTools) => agent !== undefined
|
||||||
|
|
||||||
const [form, setForm] = useState<BaseAgentForm>(() => buildAgentForm(agent))
|
const [form, setForm] = useState<BaseAgentForm>(() => buildAgentForm(agent))
|
||||||
const [hasGitBash, setHasGitBash] = useState<boolean>(true)
|
const [gitBashPathInfo, setGitBashPathInfo] = useState<GitBashPathInfo>({ path: null, source: null })
|
||||||
const [customGitBashPath, setCustomGitBashPath] = useState<string>('')
|
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (open) {
|
if (open) {
|
||||||
@ -68,29 +69,15 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
|||||||
}
|
}
|
||||||
}, [agent, open])
|
}, [agent, open])
|
||||||
|
|
||||||
const checkGitBash = useCallback(
|
const checkGitBash = useCallback(async () => {
|
||||||
async (showToast = false) => {
|
if (!isWin) return
|
||||||
try {
|
try {
|
||||||
const [gitBashInstalled, savedPath] = await Promise.all([
|
const pathInfo = await window.api.system.getGitBashPathInfo()
|
||||||
window.api.system.checkGitBash(),
|
setGitBashPathInfo(pathInfo)
|
||||||
window.api.system.getGitBashPath().catch(() => null)
|
|
||||||
])
|
|
||||||
setCustomGitBashPath(savedPath ?? '')
|
|
||||||
setHasGitBash(gitBashInstalled)
|
|
||||||
if (showToast) {
|
|
||||||
if (gitBashInstalled) {
|
|
||||||
window.toast.success(t('agent.gitBash.success', 'Git Bash detected successfully!'))
|
|
||||||
} else {
|
|
||||||
window.toast.error(t('agent.gitBash.notFound', 'Git Bash not found. Please install it first.'))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('Failed to check Git Bash:', error as Error)
|
logger.error('Failed to check Git Bash:', error as Error)
|
||||||
setHasGitBash(true) // Default to true on error to avoid false warnings
|
|
||||||
}
|
}
|
||||||
},
|
}, [])
|
||||||
[t]
|
|
||||||
)
|
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
checkGitBash()
|
checkGitBash()
|
||||||
@ -119,24 +106,22 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
setCustomGitBashPath(pickedPath)
|
await checkGitBash()
|
||||||
await checkGitBash(true)
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('Failed to pick Git Bash path', error as Error)
|
logger.error('Failed to pick Git Bash path', error as Error)
|
||||||
window.toast.error(t('agent.gitBash.pick.failed', 'Failed to set Git Bash path'))
|
window.toast.error(t('agent.gitBash.pick.failed', 'Failed to set Git Bash path'))
|
||||||
}
|
}
|
||||||
}, [checkGitBash, t])
|
}, [checkGitBash, t])
|
||||||
|
|
||||||
const handleClearGitBash = useCallback(async () => {
|
const handleResetGitBash = useCallback(async () => {
|
||||||
try {
|
try {
|
||||||
|
// Clear manual setting and re-run auto-discovery
|
||||||
await window.api.system.setGitBashPath(null)
|
await window.api.system.setGitBashPath(null)
|
||||||
setCustomGitBashPath('')
|
await checkGitBash()
|
||||||
await checkGitBash(true)
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('Failed to clear Git Bash path', error as Error)
|
logger.error('Failed to reset Git Bash path', error as Error)
|
||||||
window.toast.error(t('agent.gitBash.pick.failed', 'Failed to set Git Bash path'))
|
|
||||||
}
|
}
|
||||||
}, [checkGitBash, t])
|
}, [checkGitBash])
|
||||||
|
|
||||||
const onPermissionModeChange = useCallback((value: PermissionMode) => {
|
const onPermissionModeChange = useCallback((value: PermissionMode) => {
|
||||||
setForm((prev) => {
|
setForm((prev) => {
|
||||||
@ -268,6 +253,12 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (isWin && !gitBashPathInfo.path) {
|
||||||
|
window.toast.error(t('agent.gitBash.error.required', 'Git Bash path is required on Windows'))
|
||||||
|
loadingRef.current = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if (isEditing(agent)) {
|
if (isEditing(agent)) {
|
||||||
if (!agent) {
|
if (!agent) {
|
||||||
loadingRef.current = false
|
loadingRef.current = false
|
||||||
@ -327,7 +318,8 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
|||||||
t,
|
t,
|
||||||
updateAgent,
|
updateAgent,
|
||||||
afterSubmit,
|
afterSubmit,
|
||||||
addAgent
|
addAgent,
|
||||||
|
gitBashPathInfo.path
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -346,66 +338,6 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
|||||||
footer={null}>
|
footer={null}>
|
||||||
<StyledForm onSubmit={onSubmit}>
|
<StyledForm onSubmit={onSubmit}>
|
||||||
<FormContent>
|
<FormContent>
|
||||||
{!hasGitBash && (
|
|
||||||
<Alert
|
|
||||||
message={t('agent.gitBash.error.title', 'Git Bash Required')}
|
|
||||||
description={
|
|
||||||
<div>
|
|
||||||
<div style={{ marginBottom: 8 }}>
|
|
||||||
{t(
|
|
||||||
'agent.gitBash.error.description',
|
|
||||||
'Git Bash is required to run agents on Windows. The agent cannot function without it. Please install Git for Windows from'
|
|
||||||
)}{' '}
|
|
||||||
<a
|
|
||||||
href="https://git-scm.com/download/win"
|
|
||||||
onClick={(e) => {
|
|
||||||
e.preventDefault()
|
|
||||||
window.api.openWebsite('https://git-scm.com/download/win')
|
|
||||||
}}
|
|
||||||
style={{ textDecoration: 'underline' }}>
|
|
||||||
git-scm.com
|
|
||||||
</a>
|
|
||||||
</div>
|
|
||||||
<Button size="small" onClick={() => checkGitBash(true)}>
|
|
||||||
{t('agent.gitBash.error.recheck', 'Recheck Git Bash Installation')}
|
|
||||||
</Button>
|
|
||||||
<Button size="small" style={{ marginLeft: 8 }} onClick={handlePickGitBash}>
|
|
||||||
{t('agent.gitBash.pick.button', 'Select Git Bash Path')}
|
|
||||||
</Button>
|
|
||||||
</div>
|
|
||||||
}
|
|
||||||
type="error"
|
|
||||||
showIcon
|
|
||||||
style={{ marginBottom: 16 }}
|
|
||||||
/>
|
|
||||||
)}
|
|
||||||
|
|
||||||
{hasGitBash && customGitBashPath && (
|
|
||||||
<Alert
|
|
||||||
message={t('agent.gitBash.found.title', 'Git Bash configured')}
|
|
||||||
description={
|
|
||||||
<div style={{ display: 'flex', flexDirection: 'column', gap: 8 }}>
|
|
||||||
<div>
|
|
||||||
{t('agent.gitBash.customPath', {
|
|
||||||
defaultValue: 'Using custom path: {{path}}',
|
|
||||||
path: customGitBashPath
|
|
||||||
})}
|
|
||||||
</div>
|
|
||||||
<div style={{ display: 'flex', gap: 8 }}>
|
|
||||||
<Button size="small" onClick={handlePickGitBash}>
|
|
||||||
{t('agent.gitBash.pick.button', 'Select Git Bash Path')}
|
|
||||||
</Button>
|
|
||||||
<Button size="small" onClick={handleClearGitBash}>
|
|
||||||
{t('agent.gitBash.clear.button', 'Clear custom path')}
|
|
||||||
</Button>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
}
|
|
||||||
type="success"
|
|
||||||
showIcon
|
|
||||||
style={{ marginBottom: 16 }}
|
|
||||||
/>
|
|
||||||
)}
|
|
||||||
<FormRow>
|
<FormRow>
|
||||||
<FormItem style={{ flex: 1 }}>
|
<FormItem style={{ flex: 1 }}>
|
||||||
<Label>
|
<Label>
|
||||||
@ -439,6 +371,40 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
|||||||
/>
|
/>
|
||||||
</FormItem>
|
</FormItem>
|
||||||
|
|
||||||
|
{isWin && (
|
||||||
|
<FormItem>
|
||||||
|
<div className="flex items-center gap-2">
|
||||||
|
<Label>
|
||||||
|
Git Bash <RequiredMark>*</RequiredMark>
|
||||||
|
</Label>
|
||||||
|
<HelpTooltip
|
||||||
|
title={t(
|
||||||
|
'agent.gitBash.tooltip',
|
||||||
|
'Git Bash is required to run agents on Windows. Install from git-scm.com if not available.'
|
||||||
|
)}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
<GitBashInputWrapper>
|
||||||
|
<Input
|
||||||
|
value={gitBashPathInfo.path ?? ''}
|
||||||
|
readOnly
|
||||||
|
placeholder={t('agent.gitBash.placeholder', 'Select bash.exe path')}
|
||||||
|
/>
|
||||||
|
<Button size="small" onClick={handlePickGitBash}>
|
||||||
|
{t('common.select', 'Select')}
|
||||||
|
</Button>
|
||||||
|
{gitBashPathInfo.source === 'manual' && (
|
||||||
|
<Button size="small" onClick={handleResetGitBash}>
|
||||||
|
{t('common.reset', 'Reset')}
|
||||||
|
</Button>
|
||||||
|
)}
|
||||||
|
</GitBashInputWrapper>
|
||||||
|
{gitBashPathInfo.path && gitBashPathInfo.source === 'auto' && (
|
||||||
|
<SourceHint>{t('agent.gitBash.autoDiscoveredHint', 'Auto-discovered')}</SourceHint>
|
||||||
|
)}
|
||||||
|
</FormItem>
|
||||||
|
)}
|
||||||
|
|
||||||
<FormItem>
|
<FormItem>
|
||||||
<Label>
|
<Label>
|
||||||
{t('agent.settings.tooling.permissionMode.title', 'Permission mode')} <RequiredMark>*</RequiredMark>
|
{t('agent.settings.tooling.permissionMode.title', 'Permission mode')} <RequiredMark>*</RequiredMark>
|
||||||
@ -511,7 +477,11 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
|||||||
|
|
||||||
<FormFooter>
|
<FormFooter>
|
||||||
<Button onClick={onCancel}>{t('common.close')}</Button>
|
<Button onClick={onCancel}>{t('common.close')}</Button>
|
||||||
<Button type="primary" htmlType="submit" loading={loadingRef.current} disabled={!hasGitBash}>
|
<Button
|
||||||
|
type="primary"
|
||||||
|
htmlType="submit"
|
||||||
|
loading={loadingRef.current}
|
||||||
|
disabled={isWin && !gitBashPathInfo.path}>
|
||||||
{isEditing(agent) ? t('common.confirm') : t('common.add')}
|
{isEditing(agent) ? t('common.confirm') : t('common.add')}
|
||||||
</Button>
|
</Button>
|
||||||
</FormFooter>
|
</FormFooter>
|
||||||
@ -582,6 +552,21 @@ const FormItem = styled.div`
|
|||||||
gap: 8px;
|
gap: 8px;
|
||||||
`
|
`
|
||||||
|
|
||||||
|
const GitBashInputWrapper = styled.div`
|
||||||
|
display: flex;
|
||||||
|
gap: 8px;
|
||||||
|
align-items: center;
|
||||||
|
|
||||||
|
input {
|
||||||
|
flex: 1;
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
const SourceHint = styled.span`
|
||||||
|
font-size: 12px;
|
||||||
|
color: var(--color-text-3);
|
||||||
|
`
|
||||||
|
|
||||||
const Label = styled.label`
|
const Label = styled.label`
|
||||||
font-size: 14px;
|
font-size: 14px;
|
||||||
color: var(--color-text-1);
|
color: var(--color-text-1);
|
||||||
|
|||||||
@ -79,7 +79,7 @@ vi.mock('antd', () => {
|
|||||||
})
|
})
|
||||||
|
|
||||||
// Mock dependencies
|
// Mock dependencies
|
||||||
vi.mock('@renderer/aiCore', () => ({
|
vi.mock('@renderer/aiCore/index_new', () => ({
|
||||||
default: vi.fn().mockImplementation(() => ({
|
default: vi.fn().mockImplementation(() => ({
|
||||||
getEmbeddingDimensions: mocks.aiCore.getEmbeddingDimensions
|
getEmbeddingDimensions: mocks.aiCore.getEmbeddingDimensions
|
||||||
}))
|
}))
|
||||||
|
|||||||
@ -5,6 +5,7 @@ import { isEmbeddingModel, isRerankModel } from '../embedding'
|
|||||||
import { isOpenAIReasoningModel, isSupportedReasoningEffortOpenAIModel } from '../openai'
|
import { isOpenAIReasoningModel, isSupportedReasoningEffortOpenAIModel } from '../openai'
|
||||||
import {
|
import {
|
||||||
findTokenLimit,
|
findTokenLimit,
|
||||||
|
getModelSupportedReasoningEffortOptions,
|
||||||
getThinkModelType,
|
getThinkModelType,
|
||||||
isClaude4SeriesModel,
|
isClaude4SeriesModel,
|
||||||
isClaude45ReasoningModel,
|
isClaude45ReasoningModel,
|
||||||
@ -630,7 +631,7 @@ describe('Reasoning option configuration', () => {
|
|||||||
|
|
||||||
it('restricts GPT-5 Pro reasoning to high effort only', () => {
|
it('restricts GPT-5 Pro reasoning to high effort only', () => {
|
||||||
expect(MODEL_SUPPORTED_REASONING_EFFORT.gpt5pro).toEqual(['high'])
|
expect(MODEL_SUPPORTED_REASONING_EFFORT.gpt5pro).toEqual(['high'])
|
||||||
expect(MODEL_SUPPORTED_OPTIONS.gpt5pro).toEqual(['high'])
|
expect(MODEL_SUPPORTED_OPTIONS.gpt5pro).toEqual(['default', 'high'])
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -694,15 +695,20 @@ describe('getThinkModelType - Comprehensive Coverage', () => {
|
|||||||
})
|
})
|
||||||
|
|
||||||
describe('Gemini models', () => {
|
describe('Gemini models', () => {
|
||||||
it('should return gemini for Flash models', () => {
|
it('should return gemini2_flash for Flash models', () => {
|
||||||
expect(getThinkModelType(createModel({ id: 'gemini-2.5-flash-latest' }))).toBe('gemini')
|
expect(getThinkModelType(createModel({ id: 'gemini-2.5-flash-latest' }))).toBe('gemini2_flash')
|
||||||
expect(getThinkModelType(createModel({ id: 'gemini-flash-latest' }))).toBe('gemini')
|
})
|
||||||
expect(getThinkModelType(createModel({ id: 'gemini-flash-lite-latest' }))).toBe('gemini')
|
it('should return gemini3_flash for Gemini 3 Flash models', () => {
|
||||||
|
expect(getThinkModelType(createModel({ id: 'gemini-3-flash-preview' }))).toBe('gemini3_flash')
|
||||||
|
expect(getThinkModelType(createModel({ id: 'gemini-flash-latest' }))).toBe('gemini3_flash')
|
||||||
})
|
})
|
||||||
|
|
||||||
it('should return gemini_pro for Pro models', () => {
|
it('should return gemini2_pro for Gemini 2.5 Pro models', () => {
|
||||||
expect(getThinkModelType(createModel({ id: 'gemini-2.5-pro-latest' }))).toBe('gemini_pro')
|
expect(getThinkModelType(createModel({ id: 'gemini-2.5-pro-latest' }))).toBe('gemini2_pro')
|
||||||
expect(getThinkModelType(createModel({ id: 'gemini-pro-latest' }))).toBe('gemini_pro')
|
})
|
||||||
|
it('should return gemini3_pro for Gemini 3 Pro models', () => {
|
||||||
|
expect(getThinkModelType(createModel({ id: 'gemini-3-pro-preview' }))).toBe('gemini3_pro')
|
||||||
|
expect(getThinkModelType(createModel({ id: 'gemini-pro-latest' }))).toBe('gemini3_pro')
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -732,6 +738,11 @@ describe('getThinkModelType - Comprehensive Coverage', () => {
|
|||||||
expect(getThinkModelType(createModel({ id: 'doubao-seed-1-6-lite-251015' }))).toBe('doubao_after_251015')
|
expect(getThinkModelType(createModel({ id: 'doubao-seed-1-6-lite-251015' }))).toBe('doubao_after_251015')
|
||||||
})
|
})
|
||||||
|
|
||||||
|
it('should return doubao_after_251015 for Doubao-Seed-1.8 models', () => {
|
||||||
|
expect(getThinkModelType(createModel({ id: 'doubao-seed-1-8-251215' }))).toBe('doubao_after_251015')
|
||||||
|
expect(getThinkModelType(createModel({ id: 'doubao-seed-1.8' }))).toBe('doubao_after_251015')
|
||||||
|
})
|
||||||
|
|
||||||
it('should return doubao_no_auto for other Doubao thinking models', () => {
|
it('should return doubao_no_auto for other Doubao thinking models', () => {
|
||||||
expect(getThinkModelType(createModel({ id: 'doubao-1.5-thinking-vision-pro' }))).toBe('doubao_no_auto')
|
expect(getThinkModelType(createModel({ id: 'doubao-1.5-thinking-vision-pro' }))).toBe('doubao_no_auto')
|
||||||
})
|
})
|
||||||
@ -804,7 +815,7 @@ describe('getThinkModelType - Comprehensive Coverage', () => {
|
|||||||
name: 'gemini-2.5-flash-latest'
|
name: 'gemini-2.5-flash-latest'
|
||||||
})
|
})
|
||||||
)
|
)
|
||||||
).toBe('gemini')
|
).toBe('gemini2_flash')
|
||||||
})
|
})
|
||||||
|
|
||||||
it('should use id result when id matches', () => {
|
it('should use id result when id matches', () => {
|
||||||
@ -829,7 +840,7 @@ describe('getThinkModelType - Comprehensive Coverage', () => {
|
|||||||
|
|
||||||
it('should handle case insensitivity correctly', () => {
|
it('should handle case insensitivity correctly', () => {
|
||||||
expect(getThinkModelType(createModel({ id: 'GPT-5.1' }))).toBe('gpt5_1')
|
expect(getThinkModelType(createModel({ id: 'GPT-5.1' }))).toBe('gpt5_1')
|
||||||
expect(getThinkModelType(createModel({ id: 'Gemini-2.5-Flash-Latest' }))).toBe('gemini')
|
expect(getThinkModelType(createModel({ id: 'Gemini-2.5-Flash-Latest' }))).toBe('gemini2_flash')
|
||||||
expect(getThinkModelType(createModel({ id: 'DeepSeek-V3.1' }))).toBe('deepseek_hybrid')
|
expect(getThinkModelType(createModel({ id: 'DeepSeek-V3.1' }))).toBe('deepseek_hybrid')
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -849,7 +860,7 @@ describe('getThinkModelType - Comprehensive Coverage', () => {
|
|||||||
it('should handle models with version suffixes', () => {
|
it('should handle models with version suffixes', () => {
|
||||||
expect(getThinkModelType(createModel({ id: 'gpt-5-preview-2024' }))).toBe('gpt5')
|
expect(getThinkModelType(createModel({ id: 'gpt-5-preview-2024' }))).toBe('gpt5')
|
||||||
expect(getThinkModelType(createModel({ id: 'o3-mini-2024' }))).toBe('o')
|
expect(getThinkModelType(createModel({ id: 'o3-mini-2024' }))).toBe('o')
|
||||||
expect(getThinkModelType(createModel({ id: 'gemini-2.5-flash-latest-001' }))).toBe('gemini')
|
expect(getThinkModelType(createModel({ id: 'gemini-2.5-flash-latest-001' }))).toBe('gemini2_flash')
|
||||||
})
|
})
|
||||||
|
|
||||||
it('should prioritize GPT-5.1 over GPT-5 detection', () => {
|
it('should prioritize GPT-5.1 over GPT-5 detection', () => {
|
||||||
@ -862,6 +873,7 @@ describe('getThinkModelType - Comprehensive Coverage', () => {
|
|||||||
// auto > after_251015 > no_auto
|
// auto > after_251015 > no_auto
|
||||||
expect(getThinkModelType(createModel({ id: 'doubao-seed-1.6' }))).toBe('doubao')
|
expect(getThinkModelType(createModel({ id: 'doubao-seed-1.6' }))).toBe('doubao')
|
||||||
expect(getThinkModelType(createModel({ id: 'doubao-seed-1-6-251015' }))).toBe('doubao_after_251015')
|
expect(getThinkModelType(createModel({ id: 'doubao-seed-1-6-251015' }))).toBe('doubao_after_251015')
|
||||||
|
expect(getThinkModelType(createModel({ id: 'doubao-seed-1-8-251215' }))).toBe('doubao_after_251015')
|
||||||
expect(getThinkModelType(createModel({ id: 'doubao-1.5-thinking-vision-pro' }))).toBe('doubao_no_auto')
|
expect(getThinkModelType(createModel({ id: 'doubao-1.5-thinking-vision-pro' }))).toBe('doubao_no_auto')
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -948,6 +960,14 @@ describe('Gemini Models', () => {
|
|||||||
group: ''
|
group: ''
|
||||||
})
|
})
|
||||||
).toBe(true)
|
).toBe(true)
|
||||||
|
expect(
|
||||||
|
isSupportedThinkingTokenGeminiModel({
|
||||||
|
id: 'gemini-3-flash-preview',
|
||||||
|
name: '',
|
||||||
|
provider: '',
|
||||||
|
group: ''
|
||||||
|
})
|
||||||
|
).toBe(true)
|
||||||
expect(
|
expect(
|
||||||
isSupportedThinkingTokenGeminiModel({
|
isSupportedThinkingTokenGeminiModel({
|
||||||
id: 'google/gemini-3-pro-preview',
|
id: 'google/gemini-3-pro-preview',
|
||||||
@ -989,6 +1009,31 @@ describe('Gemini Models', () => {
|
|||||||
group: ''
|
group: ''
|
||||||
})
|
})
|
||||||
).toBe(true)
|
).toBe(true)
|
||||||
|
// Version with date suffixes
|
||||||
|
expect(
|
||||||
|
isSupportedThinkingTokenGeminiModel({
|
||||||
|
id: 'gemini-3-flash-preview-09-2025',
|
||||||
|
name: '',
|
||||||
|
provider: '',
|
||||||
|
group: ''
|
||||||
|
})
|
||||||
|
).toBe(true)
|
||||||
|
expect(
|
||||||
|
isSupportedThinkingTokenGeminiModel({
|
||||||
|
id: 'gemini-3-pro-preview-09-2025',
|
||||||
|
name: '',
|
||||||
|
provider: '',
|
||||||
|
group: ''
|
||||||
|
})
|
||||||
|
).toBe(true)
|
||||||
|
expect(
|
||||||
|
isSupportedThinkingTokenGeminiModel({
|
||||||
|
id: 'gemini-3-flash-exp-1234',
|
||||||
|
name: '',
|
||||||
|
provider: '',
|
||||||
|
group: ''
|
||||||
|
})
|
||||||
|
).toBe(true)
|
||||||
// Version with decimals
|
// Version with decimals
|
||||||
expect(
|
expect(
|
||||||
isSupportedThinkingTokenGeminiModel({
|
isSupportedThinkingTokenGeminiModel({
|
||||||
@ -1008,7 +1053,8 @@ describe('Gemini Models', () => {
|
|||||||
).toBe(true)
|
).toBe(true)
|
||||||
})
|
})
|
||||||
|
|
||||||
it('should return true for gemini-3 image models', () => {
|
it('should return true for gemini-3-pro-image models only', () => {
|
||||||
|
// Only gemini-3-pro-image models should return true
|
||||||
expect(
|
expect(
|
||||||
isSupportedThinkingTokenGeminiModel({
|
isSupportedThinkingTokenGeminiModel({
|
||||||
id: 'gemini-3-pro-image-preview',
|
id: 'gemini-3-pro-image-preview',
|
||||||
@ -1017,6 +1063,17 @@ describe('Gemini Models', () => {
|
|||||||
group: ''
|
group: ''
|
||||||
})
|
})
|
||||||
).toBe(true)
|
).toBe(true)
|
||||||
|
expect(
|
||||||
|
isSupportedThinkingTokenGeminiModel({
|
||||||
|
id: 'gemini-3-pro-image',
|
||||||
|
name: '',
|
||||||
|
provider: '',
|
||||||
|
group: ''
|
||||||
|
})
|
||||||
|
).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return false for other gemini-3 image models', () => {
|
||||||
expect(
|
expect(
|
||||||
isSupportedThinkingTokenGeminiModel({
|
isSupportedThinkingTokenGeminiModel({
|
||||||
id: 'gemini-3.0-flash-image-preview',
|
id: 'gemini-3.0-flash-image-preview',
|
||||||
@ -1079,6 +1136,22 @@ describe('Gemini Models', () => {
|
|||||||
group: ''
|
group: ''
|
||||||
})
|
})
|
||||||
).toBe(false)
|
).toBe(false)
|
||||||
|
expect(
|
||||||
|
isSupportedThinkingTokenGeminiModel({
|
||||||
|
id: 'gemini-3-flash-preview-tts',
|
||||||
|
name: '',
|
||||||
|
provider: '',
|
||||||
|
group: ''
|
||||||
|
})
|
||||||
|
).toBe(false)
|
||||||
|
expect(
|
||||||
|
isSupportedThinkingTokenGeminiModel({
|
||||||
|
id: 'gemini-3-pro-tts',
|
||||||
|
name: '',
|
||||||
|
provider: '',
|
||||||
|
group: ''
|
||||||
|
})
|
||||||
|
).toBe(false)
|
||||||
})
|
})
|
||||||
|
|
||||||
it('should return false for older gemini models', () => {
|
it('should return false for older gemini models', () => {
|
||||||
@ -1651,3 +1724,436 @@ describe('isGemini3ThinkingTokenModel', () => {
|
|||||||
).toBe(false)
|
).toBe(false)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
describe('getModelSupportedReasoningEffortOptions', () => {
|
||||||
|
describe('Edge cases', () => {
|
||||||
|
it('should return undefined for undefined model', () => {
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(undefined)).toBeUndefined()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return undefined for null model', () => {
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(null)).toBeUndefined()
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return undefined for non-reasoning models', () => {
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-4o' }))).toBeUndefined()
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'claude-3-opus' }))).toBeUndefined()
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'random-model' }))).toBeUndefined()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('OpenAI models', () => {
|
||||||
|
it('should return correct options for o-series models', () => {
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'o3' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'low',
|
||||||
|
'medium',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'o3-mini' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'low',
|
||||||
|
'medium',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'o4' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'low',
|
||||||
|
'medium',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-oss-reasoning' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'low',
|
||||||
|
'medium',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return correct options for deep research models', () => {
|
||||||
|
// Note: Deep research models need to be actual OpenAI reasoning models to be detected
|
||||||
|
// 'sonar-deep-research' from Perplexity is the primary deep research model
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'sonar-deep-research' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'medium'
|
||||||
|
])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return correct options for GPT-5 models', () => {
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'minimal',
|
||||||
|
'low',
|
||||||
|
'medium',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-preview' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'minimal',
|
||||||
|
'low',
|
||||||
|
'medium',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return correct options for GPT-5 Pro models', () => {
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-pro' }))).toEqual(['default', 'high'])
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-pro-preview' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return correct options for GPT-5 Codex models', () => {
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-codex' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'low',
|
||||||
|
'medium',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-codex-mini' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'low',
|
||||||
|
'medium',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return correct options for GPT-5.1 models', () => {
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5.1' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'none',
|
||||||
|
'low',
|
||||||
|
'medium',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5.1-preview' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'none',
|
||||||
|
'low',
|
||||||
|
'medium',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5.1-mini' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'none',
|
||||||
|
'low',
|
||||||
|
'medium',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return correct options for GPT-5.1 Codex models', () => {
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5.1-codex' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'none',
|
||||||
|
'medium',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5.1-codex-mini' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'none',
|
||||||
|
'medium',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Grok models', () => {
|
||||||
|
it('should return correct options for Grok 3 mini', () => {
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'grok-3-mini' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'low',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return correct options for Grok 4 Fast', () => {
|
||||||
|
expect(
|
||||||
|
getModelSupportedReasoningEffortOptions(createModel({ id: 'grok-4-fast', provider: 'openrouter' }))
|
||||||
|
).toEqual(['default', 'none', 'auto'])
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Gemini models', () => {
|
||||||
|
it('should return correct options for Gemini Flash models', () => {
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-2.5-flash' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'none',
|
||||||
|
'low',
|
||||||
|
'medium',
|
||||||
|
'high',
|
||||||
|
'auto'
|
||||||
|
])
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-3-flash-preview' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'minimal',
|
||||||
|
'low',
|
||||||
|
'medium',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-flash-latest' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'minimal',
|
||||||
|
'low',
|
||||||
|
'medium',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return correct options for Gemini Pro models', () => {
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-2.5-pro' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'low',
|
||||||
|
'medium',
|
||||||
|
'high',
|
||||||
|
'auto'
|
||||||
|
])
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-3-pro-preview' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'low',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-pro-latest' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'low',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return correct options for Gemini 3 models', () => {
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-3-flash' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'minimal',
|
||||||
|
'low',
|
||||||
|
'medium',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-3-pro-preview' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'low',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Qwen models', () => {
|
||||||
|
it('should return correct options for controllable Qwen models', () => {
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'qwen-plus' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'none',
|
||||||
|
'low',
|
||||||
|
'medium',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'qwen-turbo' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'none',
|
||||||
|
'low',
|
||||||
|
'medium',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'qwen-flash' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'none',
|
||||||
|
'low',
|
||||||
|
'medium',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'qwen3-8b' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'none',
|
||||||
|
'low',
|
||||||
|
'medium',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return undefined for always-thinking Qwen models', () => {
|
||||||
|
// These models always think and don't support thinking token control
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'qwen3-thinking' }))).toBeUndefined()
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'qwen3-vl-235b-thinking' }))).toBeUndefined()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Doubao models', () => {
|
||||||
|
it('should return correct options for auto-thinking Doubao models', () => {
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'doubao-seed-1.6' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'none',
|
||||||
|
'auto',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'doubao-1-5-thinking-pro-m' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'none',
|
||||||
|
'auto',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return correct options for Doubao models after 251015', () => {
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'doubao-seed-1-6-251015' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'minimal',
|
||||||
|
'low',
|
||||||
|
'medium',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'doubao-seed-1-6-lite-251015' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'minimal',
|
||||||
|
'low',
|
||||||
|
'medium',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return correct options for other Doubao thinking models', () => {
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'doubao-1.5-thinking-vision-pro' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'none',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Other providers', () => {
|
||||||
|
it('should return correct options for Hunyuan models', () => {
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'hunyuan-a13b' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'none',
|
||||||
|
'auto'
|
||||||
|
])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return correct options for Zhipu models', () => {
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'glm-4.5' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'none',
|
||||||
|
'auto'
|
||||||
|
])
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'glm-4.6' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'none',
|
||||||
|
'auto'
|
||||||
|
])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return correct options for Perplexity models', () => {
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'sonar-deep-research' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'medium'
|
||||||
|
])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should return correct options for DeepSeek hybrid models', () => {
|
||||||
|
expect(
|
||||||
|
getModelSupportedReasoningEffortOptions(createModel({ id: 'deepseek-v3.1', provider: 'deepseek' }))
|
||||||
|
).toEqual(['default', 'none', 'auto'])
|
||||||
|
expect(
|
||||||
|
getModelSupportedReasoningEffortOptions(createModel({ id: 'deepseek-v3.2', provider: 'openrouter' }))
|
||||||
|
).toEqual(['default', 'none', 'auto'])
|
||||||
|
expect(
|
||||||
|
getModelSupportedReasoningEffortOptions(createModel({ id: 'deepseek-chat', provider: 'deepseek' }))
|
||||||
|
).toEqual(['default', 'none', 'auto'])
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Name-based fallback', () => {
|
||||||
|
it('should fall back to name when id does not match', () => {
|
||||||
|
// Grok 4 Fast requires openrouter provider to be recognized
|
||||||
|
expect(
|
||||||
|
getModelSupportedReasoningEffortOptions(
|
||||||
|
createModel({
|
||||||
|
id: 'custom-id',
|
||||||
|
name: 'grok-4-fast',
|
||||||
|
provider: 'openrouter'
|
||||||
|
})
|
||||||
|
)
|
||||||
|
).toEqual(['default', 'none', 'auto'])
|
||||||
|
|
||||||
|
expect(
|
||||||
|
getModelSupportedReasoningEffortOptions(
|
||||||
|
createModel({
|
||||||
|
id: 'custom-id',
|
||||||
|
name: 'gpt-5.1'
|
||||||
|
})
|
||||||
|
)
|
||||||
|
).toEqual(['default', 'none', 'low', 'medium', 'high'])
|
||||||
|
|
||||||
|
// Qwen models work well for name-based fallback
|
||||||
|
expect(
|
||||||
|
getModelSupportedReasoningEffortOptions(
|
||||||
|
createModel({
|
||||||
|
id: 'custom-id',
|
||||||
|
name: 'qwen-plus'
|
||||||
|
})
|
||||||
|
)
|
||||||
|
).toEqual(['default', 'none', 'low', 'medium', 'high'])
|
||||||
|
})
|
||||||
|
|
||||||
|
it('should use id result when id matches', () => {
|
||||||
|
expect(
|
||||||
|
getModelSupportedReasoningEffortOptions(
|
||||||
|
createModel({
|
||||||
|
id: 'gpt-5.1',
|
||||||
|
name: 'Different Name'
|
||||||
|
})
|
||||||
|
)
|
||||||
|
).toEqual(['default', 'none', 'low', 'medium', 'high'])
|
||||||
|
|
||||||
|
expect(
|
||||||
|
getModelSupportedReasoningEffortOptions(
|
||||||
|
createModel({
|
||||||
|
id: 'o3-mini',
|
||||||
|
name: 'Some other name'
|
||||||
|
})
|
||||||
|
)
|
||||||
|
).toEqual(['default', 'low', 'medium', 'high'])
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Case sensitivity', () => {
|
||||||
|
it('should handle case insensitive model IDs', () => {
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'GPT-5.1' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'none',
|
||||||
|
'low',
|
||||||
|
'medium',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'O3-MINI' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'low',
|
||||||
|
'medium',
|
||||||
|
'high'
|
||||||
|
])
|
||||||
|
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'Gemini-2.5-Flash-Latest' }))).toEqual([
|
||||||
|
'default',
|
||||||
|
'none',
|
||||||
|
'low',
|
||||||
|
'medium',
|
||||||
|
'high',
|
||||||
|
'auto'
|
||||||
|
])
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('Integration with MODEL_SUPPORTED_OPTIONS', () => {
|
||||||
|
it('should return values that match MODEL_SUPPORTED_OPTIONS configuration', () => {
|
||||||
|
// Verify that returned values match the configuration
|
||||||
|
const model = createModel({ id: 'o3' })
|
||||||
|
const result = getModelSupportedReasoningEffortOptions(model)
|
||||||
|
expect(result).toEqual(MODEL_SUPPORTED_OPTIONS.o)
|
||||||
|
|
||||||
|
const gpt5Model = createModel({ id: 'gpt-5' })
|
||||||
|
const gpt5Result = getModelSupportedReasoningEffortOptions(gpt5Model)
|
||||||
|
expect(gpt5Result).toEqual(MODEL_SUPPORTED_OPTIONS.gpt5)
|
||||||
|
|
||||||
|
const geminiModel = createModel({ id: 'gemini-2.5-flash-latest' })
|
||||||
|
const geminiResult = getModelSupportedReasoningEffortOptions(geminiModel)
|
||||||
|
expect(geminiResult).toEqual(MODEL_SUPPORTED_OPTIONS.gemini2_flash)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|||||||
@ -20,6 +20,8 @@ import {
|
|||||||
getModelSupportedVerbosity,
|
getModelSupportedVerbosity,
|
||||||
groupQwenModels,
|
groupQwenModels,
|
||||||
isAnthropicModel,
|
isAnthropicModel,
|
||||||
|
isGemini3FlashModel,
|
||||||
|
isGemini3ProModel,
|
||||||
isGeminiModel,
|
isGeminiModel,
|
||||||
isGemmaModel,
|
isGemmaModel,
|
||||||
isGenerateImageModels,
|
isGenerateImageModels,
|
||||||
@ -432,6 +434,101 @@ describe('model utils', () => {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
describe('isGemini3FlashModel', () => {
|
||||||
|
it('detects gemini-3-flash model', () => {
|
||||||
|
expect(isGemini3FlashModel(createModel({ id: 'gemini-3-flash' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('detects gemini-3-flash-preview model', () => {
|
||||||
|
expect(isGemini3FlashModel(createModel({ id: 'gemini-3-flash-preview' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('detects gemini-3-flash with version suffixes', () => {
|
||||||
|
expect(isGemini3FlashModel(createModel({ id: 'gemini-3-flash-latest' }))).toBe(true)
|
||||||
|
expect(isGemini3FlashModel(createModel({ id: 'gemini-3-flash-preview-09-2025' }))).toBe(true)
|
||||||
|
expect(isGemini3FlashModel(createModel({ id: 'gemini-3-flash-exp-1234' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('detects gemini-flash-latest alias', () => {
|
||||||
|
expect(isGemini3FlashModel(createModel({ id: 'gemini-flash-latest' }))).toBe(true)
|
||||||
|
expect(isGemini3FlashModel(createModel({ id: 'Gemini-Flash-Latest' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('detects gemini-3-flash with uppercase', () => {
|
||||||
|
expect(isGemini3FlashModel(createModel({ id: 'Gemini-3-Flash' }))).toBe(true)
|
||||||
|
expect(isGemini3FlashModel(createModel({ id: 'GEMINI-3-FLASH-PREVIEW' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('excludes gemini-3-flash-image models', () => {
|
||||||
|
expect(isGemini3FlashModel(createModel({ id: 'gemini-3-flash-image-preview' }))).toBe(false)
|
||||||
|
expect(isGemini3FlashModel(createModel({ id: 'gemini-3-flash-image' }))).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns false for non-flash gemini-3 models', () => {
|
||||||
|
expect(isGemini3FlashModel(createModel({ id: 'gemini-3-pro' }))).toBe(false)
|
||||||
|
expect(isGemini3FlashModel(createModel({ id: 'gemini-3-pro-preview' }))).toBe(false)
|
||||||
|
expect(isGemini3FlashModel(createModel({ id: 'gemini-3-pro-image-preview' }))).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns false for other gemini models', () => {
|
||||||
|
expect(isGemini3FlashModel(createModel({ id: 'gemini-2-flash' }))).toBe(false)
|
||||||
|
expect(isGemini3FlashModel(createModel({ id: 'gemini-2-flash-preview' }))).toBe(false)
|
||||||
|
expect(isGemini3FlashModel(createModel({ id: 'gemini-2.5-flash-preview-09-2025' }))).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns false for null/undefined models', () => {
|
||||||
|
expect(isGemini3FlashModel(null)).toBe(false)
|
||||||
|
expect(isGemini3FlashModel(undefined)).toBe(false)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('isGemini3ProModel', () => {
|
||||||
|
it('detects gemini-3-pro model', () => {
|
||||||
|
expect(isGemini3ProModel(createModel({ id: 'gemini-3-pro' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('detects gemini-3-pro-preview model', () => {
|
||||||
|
expect(isGemini3ProModel(createModel({ id: 'gemini-3-pro-preview' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('detects gemini-3-pro with version suffixes', () => {
|
||||||
|
expect(isGemini3ProModel(createModel({ id: 'gemini-3-pro-latest' }))).toBe(true)
|
||||||
|
expect(isGemini3ProModel(createModel({ id: 'gemini-3-pro-preview-09-2025' }))).toBe(true)
|
||||||
|
expect(isGemini3ProModel(createModel({ id: 'gemini-3-pro-exp-1234' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('detects gemini-pro-latest alias', () => {
|
||||||
|
expect(isGemini3ProModel(createModel({ id: 'gemini-pro-latest' }))).toBe(true)
|
||||||
|
expect(isGemini3ProModel(createModel({ id: 'Gemini-Pro-Latest' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('detects gemini-3-pro with uppercase', () => {
|
||||||
|
expect(isGemini3ProModel(createModel({ id: 'Gemini-3-Pro' }))).toBe(true)
|
||||||
|
expect(isGemini3ProModel(createModel({ id: 'GEMINI-3-PRO-PREVIEW' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('excludes gemini-3-pro-image models', () => {
|
||||||
|
expect(isGemini3ProModel(createModel({ id: 'gemini-3-pro-image-preview' }))).toBe(false)
|
||||||
|
expect(isGemini3ProModel(createModel({ id: 'gemini-3-pro-image' }))).toBe(false)
|
||||||
|
expect(isGemini3ProModel(createModel({ id: 'gemini-3-pro-image-latest' }))).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns false for non-pro gemini-3 models', () => {
|
||||||
|
expect(isGemini3ProModel(createModel({ id: 'gemini-3-flash' }))).toBe(false)
|
||||||
|
expect(isGemini3ProModel(createModel({ id: 'gemini-3-flash-preview' }))).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns false for other gemini models', () => {
|
||||||
|
expect(isGemini3ProModel(createModel({ id: 'gemini-2-pro' }))).toBe(false)
|
||||||
|
expect(isGemini3ProModel(createModel({ id: 'gemini-2.5-pro-preview-09-2025' }))).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns false for null/undefined models', () => {
|
||||||
|
expect(isGemini3ProModel(null)).toBe(false)
|
||||||
|
expect(isGemini3ProModel(undefined)).toBe(false)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
describe('isZhipuModel', () => {
|
describe('isZhipuModel', () => {
|
||||||
it('detects Zhipu models by provider', () => {
|
it('detects Zhipu models by provider', () => {
|
||||||
expect(isZhipuModel(createModel({ provider: 'zhipu' }))).toBe(true)
|
expect(isZhipuModel(createModel({ provider: 'zhipu' }))).toBe(true)
|
||||||
|
|||||||
@ -362,7 +362,7 @@ export const SYSTEM_MODELS: Record<SystemProviderId | 'defaultModel', Model[]> =
|
|||||||
{
|
{
|
||||||
id: 'gemini-3-pro-image-preview',
|
id: 'gemini-3-pro-image-preview',
|
||||||
provider: 'gemini',
|
provider: 'gemini',
|
||||||
name: 'Gemini 3 Pro Image Privew',
|
name: 'Gemini 3 Pro Image Preview',
|
||||||
group: 'Gemini 3'
|
group: 'Gemini 3'
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -746,6 +746,12 @@ export const SYSTEM_MODELS: Record<SystemProviderId | 'defaultModel', Model[]> =
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
doubao: [
|
doubao: [
|
||||||
|
{
|
||||||
|
id: 'doubao-seed-1-8-251215',
|
||||||
|
provider: 'doubao',
|
||||||
|
name: 'Doubao-Seed-1.8',
|
||||||
|
group: 'Doubao-Seed-1.8'
|
||||||
|
},
|
||||||
{
|
{
|
||||||
id: 'doubao-1-5-vision-pro-32k-250115',
|
id: 'doubao-1-5-vision-pro-32k-250115',
|
||||||
provider: 'doubao',
|
provider: 'doubao',
|
||||||
@ -1785,5 +1791,13 @@ export const SYSTEM_MODELS: Record<SystemProviderId | 'defaultModel', Model[]> =
|
|||||||
provider: 'cerebras',
|
provider: 'cerebras',
|
||||||
group: 'qwen'
|
group: 'qwen'
|
||||||
}
|
}
|
||||||
|
],
|
||||||
|
mimo: [
|
||||||
|
{
|
||||||
|
id: 'mimo-v2-flash',
|
||||||
|
name: 'Mimo V2 Flash',
|
||||||
|
provider: 'mimo',
|
||||||
|
group: 'Mimo'
|
||||||
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@ -103,6 +103,7 @@ import MicrosoftModelLogo from '@renderer/assets/images/models/microsoft.png'
|
|||||||
import MicrosoftModelLogoDark from '@renderer/assets/images/models/microsoft_dark.png'
|
import MicrosoftModelLogoDark from '@renderer/assets/images/models/microsoft_dark.png'
|
||||||
import MidjourneyModelLogo from '@renderer/assets/images/models/midjourney.png'
|
import MidjourneyModelLogo from '@renderer/assets/images/models/midjourney.png'
|
||||||
import MidjourneyModelLogoDark from '@renderer/assets/images/models/midjourney_dark.png'
|
import MidjourneyModelLogoDark from '@renderer/assets/images/models/midjourney_dark.png'
|
||||||
|
import MiMoModelLogo from '@renderer/assets/images/models/mimo.svg'
|
||||||
import {
|
import {
|
||||||
default as MinicpmModelLogo,
|
default as MinicpmModelLogo,
|
||||||
default as MinicpmModelLogoDark
|
default as MinicpmModelLogoDark
|
||||||
@ -193,7 +194,7 @@ export function getModelLogoById(modelId: string): string | undefined {
|
|||||||
'gpt-5.1': GPT51ModelLogo,
|
'gpt-5.1': GPT51ModelLogo,
|
||||||
'gpt-5': GPT5ModelLogo,
|
'gpt-5': GPT5ModelLogo,
|
||||||
gpts: isLight ? ChatGPT4ModelLogo : ChatGPT4ModelLogoDark,
|
gpts: isLight ? ChatGPT4ModelLogo : ChatGPT4ModelLogoDark,
|
||||||
'gpt-oss(?:-[\\w-]+)': isLight ? ChatGptModelLogo : ChatGptModelLogoDark,
|
'gpt-oss(?::|-[\\w-]+)': isLight ? ChatGptModelLogo : ChatGptModelLogoDark,
|
||||||
'text-moderation': isLight ? ChatGptModelLogo : ChatGptModelLogoDark,
|
'text-moderation': isLight ? ChatGptModelLogo : ChatGptModelLogoDark,
|
||||||
'babbage-': isLight ? ChatGptModelLogo : ChatGptModelLogoDark,
|
'babbage-': isLight ? ChatGptModelLogo : ChatGptModelLogoDark,
|
||||||
'(sora-|sora_)': isLight ? ChatGptModelLogo : ChatGptModelLogoDark,
|
'(sora-|sora_)': isLight ? ChatGptModelLogo : ChatGptModelLogoDark,
|
||||||
@ -301,7 +302,8 @@ export function getModelLogoById(modelId: string): string | undefined {
|
|||||||
bytedance: BytedanceModelLogo,
|
bytedance: BytedanceModelLogo,
|
||||||
ling: LingModelLogo,
|
ling: LingModelLogo,
|
||||||
ring: LingModelLogo,
|
ring: LingModelLogo,
|
||||||
'(V_1|V_1_TURBO|V_2|V_2A|V_2_TURBO|DESCRIBE|UPSCALE)': IdeogramModelLogo
|
'(V_1|V_1_TURBO|V_2|V_2A|V_2_TURBO|DESCRIBE|UPSCALE)': IdeogramModelLogo,
|
||||||
|
mimo: MiMoModelLogo
|
||||||
} as const satisfies Record<string, string>
|
} as const satisfies Record<string, string>
|
||||||
|
|
||||||
for (const key in logoMap) {
|
for (const key in logoMap) {
|
||||||
|
|||||||
@ -35,6 +35,16 @@ export const isGPT5ProModel = (model: Model) => {
|
|||||||
return modelId.includes('gpt-5-pro')
|
return modelId.includes('gpt-5-pro')
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export const isGPT52ProModel = (model: Model) => {
|
||||||
|
const modelId = getLowerBaseModelName(model.id)
|
||||||
|
return modelId.includes('gpt-5.2-pro')
|
||||||
|
}
|
||||||
|
|
||||||
|
export const isGPT51CodexMaxModel = (model: Model) => {
|
||||||
|
const modelId = getLowerBaseModelName(model.id)
|
||||||
|
return modelId.includes('gpt-5.1-codex-max')
|
||||||
|
}
|
||||||
|
|
||||||
export const isOpenAIOpenWeightModel = (model: Model) => {
|
export const isOpenAIOpenWeightModel = (model: Model) => {
|
||||||
const modelId = getLowerBaseModelName(model.id)
|
const modelId = getLowerBaseModelName(model.id)
|
||||||
return modelId.includes('gpt-oss')
|
return modelId.includes('gpt-oss')
|
||||||
@ -42,7 +52,7 @@ export const isOpenAIOpenWeightModel = (model: Model) => {
|
|||||||
|
|
||||||
export const isGPT5SeriesModel = (model: Model) => {
|
export const isGPT5SeriesModel = (model: Model) => {
|
||||||
const modelId = getLowerBaseModelName(model.id)
|
const modelId = getLowerBaseModelName(model.id)
|
||||||
return modelId.includes('gpt-5') && !modelId.includes('gpt-5.1')
|
return modelId.includes('gpt-5') && !modelId.includes('gpt-5.1') && !modelId.includes('gpt-5.2')
|
||||||
}
|
}
|
||||||
|
|
||||||
export const isGPT5SeriesReasoningModel = (model: Model) => {
|
export const isGPT5SeriesReasoningModel = (model: Model) => {
|
||||||
@ -55,9 +65,16 @@ export const isGPT51SeriesModel = (model: Model) => {
|
|||||||
return modelId.includes('gpt-5.1')
|
return modelId.includes('gpt-5.1')
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export const isGPT52SeriesModel = (model: Model) => {
|
||||||
|
const modelId = getLowerBaseModelName(model.id)
|
||||||
|
return modelId.includes('gpt-5.2')
|
||||||
|
}
|
||||||
|
|
||||||
export function isSupportVerbosityModel(model: Model): boolean {
|
export function isSupportVerbosityModel(model: Model): boolean {
|
||||||
const modelId = getLowerBaseModelName(model.id)
|
const modelId = getLowerBaseModelName(model.id)
|
||||||
return (isGPT5SeriesModel(model) || isGPT51SeriesModel(model)) && !modelId.includes('chat')
|
return (
|
||||||
|
(isGPT5SeriesModel(model) || isGPT51SeriesModel(model) || isGPT52SeriesModel(model)) && !modelId.includes('chat')
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
export function isOpenAIChatCompletionOnlyModel(model: Model): boolean {
|
export function isOpenAIChatCompletionOnlyModel(model: Model): boolean {
|
||||||
@ -86,7 +103,7 @@ export function isSupportedReasoningEffortOpenAIModel(model: Model): boolean {
|
|||||||
modelId.includes('o3') ||
|
modelId.includes('o3') ||
|
||||||
modelId.includes('o4') ||
|
modelId.includes('o4') ||
|
||||||
modelId.includes('gpt-oss') ||
|
modelId.includes('gpt-oss') ||
|
||||||
((isGPT5SeriesModel(model) || isGPT51SeriesModel(model)) && !modelId.includes('chat'))
|
((isGPT5SeriesModel(model) || isGPT51SeriesModel(model) || isGPT52SeriesModel(model)) && !modelId.includes('chat'))
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -1,6 +1,7 @@
|
|||||||
import type {
|
import type {
|
||||||
Model,
|
Model,
|
||||||
ReasoningEffortConfig,
|
ReasoningEffortConfig,
|
||||||
|
ReasoningEffortOption,
|
||||||
SystemProviderId,
|
SystemProviderId,
|
||||||
ThinkingModelType,
|
ThinkingModelType,
|
||||||
ThinkingOptionConfig
|
ThinkingOptionConfig
|
||||||
@ -11,12 +12,15 @@ import { isEmbeddingModel, isRerankModel } from './embedding'
|
|||||||
import {
|
import {
|
||||||
isGPT5ProModel,
|
isGPT5ProModel,
|
||||||
isGPT5SeriesModel,
|
isGPT5SeriesModel,
|
||||||
|
isGPT51CodexMaxModel,
|
||||||
isGPT51SeriesModel,
|
isGPT51SeriesModel,
|
||||||
|
isGPT52ProModel,
|
||||||
|
isGPT52SeriesModel,
|
||||||
isOpenAIDeepResearchModel,
|
isOpenAIDeepResearchModel,
|
||||||
isOpenAIReasoningModel,
|
isOpenAIReasoningModel,
|
||||||
isSupportedReasoningEffortOpenAIModel
|
isSupportedReasoningEffortOpenAIModel
|
||||||
} from './openai'
|
} from './openai'
|
||||||
import { GEMINI_FLASH_MODEL_REGEX, isGemini3ThinkingTokenModel } from './utils'
|
import { GEMINI_FLASH_MODEL_REGEX, isGemini3FlashModel, isGemini3ProModel } from './utils'
|
||||||
import { isTextToImageModel } from './vision'
|
import { isTextToImageModel } from './vision'
|
||||||
|
|
||||||
// Reasoning models
|
// Reasoning models
|
||||||
@ -25,7 +29,7 @@ export const REASONING_REGEX =
|
|||||||
|
|
||||||
// 模型类型到支持的reasoning_effort的映射表
|
// 模型类型到支持的reasoning_effort的映射表
|
||||||
// TODO: refactor this. too many identical options
|
// TODO: refactor this. too many identical options
|
||||||
export const MODEL_SUPPORTED_REASONING_EFFORT: ReasoningEffortConfig = {
|
export const MODEL_SUPPORTED_REASONING_EFFORT = {
|
||||||
default: ['low', 'medium', 'high'] as const,
|
default: ['low', 'medium', 'high'] as const,
|
||||||
o: ['low', 'medium', 'high'] as const,
|
o: ['low', 'medium', 'high'] as const,
|
||||||
openai_deep_research: ['medium'] as const,
|
openai_deep_research: ['medium'] as const,
|
||||||
@ -33,47 +37,57 @@ export const MODEL_SUPPORTED_REASONING_EFFORT: ReasoningEffortConfig = {
|
|||||||
gpt5_codex: ['low', 'medium', 'high'] as const,
|
gpt5_codex: ['low', 'medium', 'high'] as const,
|
||||||
gpt5_1: ['none', 'low', 'medium', 'high'] as const,
|
gpt5_1: ['none', 'low', 'medium', 'high'] as const,
|
||||||
gpt5_1_codex: ['none', 'medium', 'high'] as const,
|
gpt5_1_codex: ['none', 'medium', 'high'] as const,
|
||||||
|
gpt5_1_codex_max: ['none', 'medium', 'high', 'xhigh'] as const,
|
||||||
|
gpt5_2: ['none', 'low', 'medium', 'high', 'xhigh'] as const,
|
||||||
gpt5pro: ['high'] as const,
|
gpt5pro: ['high'] as const,
|
||||||
|
gpt52pro: ['medium', 'high', 'xhigh'] as const,
|
||||||
grok: ['low', 'high'] as const,
|
grok: ['low', 'high'] as const,
|
||||||
grok4_fast: ['auto'] as const,
|
grok4_fast: ['auto'] as const,
|
||||||
gemini: ['low', 'medium', 'high', 'auto'] as const,
|
gemini2_flash: ['low', 'medium', 'high', 'auto'] as const,
|
||||||
gemini3: ['low', 'medium', 'high'] as const,
|
gemini2_pro: ['low', 'medium', 'high', 'auto'] as const,
|
||||||
gemini_pro: ['low', 'medium', 'high', 'auto'] as const,
|
gemini3_flash: ['minimal', 'low', 'medium', 'high'] as const,
|
||||||
|
gemini3_pro: ['low', 'high'] as const,
|
||||||
qwen: ['low', 'medium', 'high'] as const,
|
qwen: ['low', 'medium', 'high'] as const,
|
||||||
qwen_thinking: ['low', 'medium', 'high'] as const,
|
qwen_thinking: ['low', 'medium', 'high'] as const,
|
||||||
doubao: ['auto', 'high'] as const,
|
doubao: ['auto', 'high'] as const,
|
||||||
doubao_no_auto: ['high'] as const,
|
doubao_no_auto: ['high'] as const,
|
||||||
doubao_after_251015: ['minimal', 'low', 'medium', 'high'] as const,
|
doubao_after_251015: ['minimal', 'low', 'medium', 'high'] as const,
|
||||||
hunyuan: ['auto'] as const,
|
hunyuan: ['auto'] as const,
|
||||||
|
mimo: ['auto'] as const,
|
||||||
zhipu: ['auto'] as const,
|
zhipu: ['auto'] as const,
|
||||||
perplexity: ['low', 'medium', 'high'] as const,
|
perplexity: ['low', 'medium', 'high'] as const,
|
||||||
deepseek_hybrid: ['auto'] as const
|
deepseek_hybrid: ['auto'] as const
|
||||||
} as const
|
} as const satisfies ReasoningEffortConfig
|
||||||
|
|
||||||
// 模型类型到支持选项的映射表
|
// 模型类型到支持选项的映射表
|
||||||
export const MODEL_SUPPORTED_OPTIONS: ThinkingOptionConfig = {
|
export const MODEL_SUPPORTED_OPTIONS: ThinkingOptionConfig = {
|
||||||
default: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.default] as const,
|
default: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.default] as const,
|
||||||
o: MODEL_SUPPORTED_REASONING_EFFORT.o,
|
o: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.o] as const,
|
||||||
openai_deep_research: MODEL_SUPPORTED_REASONING_EFFORT.openai_deep_research,
|
openai_deep_research: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.openai_deep_research] as const,
|
||||||
gpt5: [...MODEL_SUPPORTED_REASONING_EFFORT.gpt5] as const,
|
gpt5: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5] as const,
|
||||||
gpt5pro: MODEL_SUPPORTED_REASONING_EFFORT.gpt5pro,
|
gpt5pro: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5pro] as const,
|
||||||
gpt5_codex: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_codex,
|
gpt5_codex: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5_codex] as const,
|
||||||
gpt5_1: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1,
|
gpt5_1: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1] as const,
|
||||||
gpt5_1_codex: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1_codex,
|
gpt5_1_codex: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1_codex] as const,
|
||||||
grok: MODEL_SUPPORTED_REASONING_EFFORT.grok,
|
gpt5_2: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5_2] as const,
|
||||||
grok4_fast: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.grok4_fast] as const,
|
gpt5_1_codex_max: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1_codex_max] as const,
|
||||||
gemini: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini] as const,
|
gpt52pro: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt52pro] as const,
|
||||||
gemini_pro: MODEL_SUPPORTED_REASONING_EFFORT.gemini_pro,
|
grok: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.grok] as const,
|
||||||
gemini3: MODEL_SUPPORTED_REASONING_EFFORT.gemini3,
|
grok4_fast: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.grok4_fast] as const,
|
||||||
qwen: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.qwen] as const,
|
gemini2_flash: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini2_flash] as const,
|
||||||
qwen_thinking: MODEL_SUPPORTED_REASONING_EFFORT.qwen_thinking,
|
gemini2_pro: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini2_pro] as const,
|
||||||
doubao: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao] as const,
|
gemini3_flash: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini3_flash] as const,
|
||||||
doubao_no_auto: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao_no_auto] as const,
|
gemini3_pro: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini3_pro] as const,
|
||||||
doubao_after_251015: MODEL_SUPPORTED_REASONING_EFFORT.doubao_after_251015,
|
qwen: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.qwen] as const,
|
||||||
hunyuan: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.hunyuan] as const,
|
qwen_thinking: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.qwen_thinking] as const,
|
||||||
zhipu: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.zhipu] as const,
|
doubao: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao] as const,
|
||||||
perplexity: MODEL_SUPPORTED_REASONING_EFFORT.perplexity,
|
doubao_no_auto: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao_no_auto] as const,
|
||||||
deepseek_hybrid: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.deepseek_hybrid] as const
|
doubao_after_251015: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao_after_251015] as const,
|
||||||
|
mimo: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.mimo] as const,
|
||||||
|
hunyuan: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.hunyuan] as const,
|
||||||
|
zhipu: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.zhipu] as const,
|
||||||
|
perplexity: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.perplexity] as const,
|
||||||
|
deepseek_hybrid: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.deepseek_hybrid] as const
|
||||||
} as const
|
} as const
|
||||||
|
|
||||||
const withModelIdAndNameAsId = <T>(model: Model, fn: (model: Model) => T): { idResult: T; nameResult: T } => {
|
const withModelIdAndNameAsId = <T>(model: Model, fn: (model: Model) => T): { idResult: T; nameResult: T } => {
|
||||||
@ -84,18 +98,26 @@ const withModelIdAndNameAsId = <T>(model: Model, fn: (model: Model) => T): { idR
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: add ut
|
||||||
const _getThinkModelType = (model: Model): ThinkingModelType => {
|
const _getThinkModelType = (model: Model): ThinkingModelType => {
|
||||||
let thinkingModelType: ThinkingModelType = 'default'
|
let thinkingModelType: ThinkingModelType = 'default'
|
||||||
const modelId = getLowerBaseModelName(model.id)
|
const modelId = getLowerBaseModelName(model.id)
|
||||||
if (isOpenAIDeepResearchModel(model)) {
|
if (isOpenAIDeepResearchModel(model)) {
|
||||||
return 'openai_deep_research'
|
return 'openai_deep_research'
|
||||||
}
|
} else if (isGPT51SeriesModel(model)) {
|
||||||
if (isGPT51SeriesModel(model)) {
|
|
||||||
if (modelId.includes('codex')) {
|
if (modelId.includes('codex')) {
|
||||||
thinkingModelType = 'gpt5_1_codex'
|
thinkingModelType = 'gpt5_1_codex'
|
||||||
|
if (isGPT51CodexMaxModel(model)) {
|
||||||
|
thinkingModelType = 'gpt5_1_codex_max'
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
thinkingModelType = 'gpt5_1'
|
thinkingModelType = 'gpt5_1'
|
||||||
}
|
}
|
||||||
|
} else if (isGPT52SeriesModel(model)) {
|
||||||
|
thinkingModelType = 'gpt5_2'
|
||||||
|
if (isGPT52ProModel(model)) {
|
||||||
|
thinkingModelType = 'gpt52pro'
|
||||||
|
}
|
||||||
} else if (isGPT5SeriesModel(model)) {
|
} else if (isGPT5SeriesModel(model)) {
|
||||||
if (modelId.includes('codex')) {
|
if (modelId.includes('codex')) {
|
||||||
thinkingModelType = 'gpt5_codex'
|
thinkingModelType = 'gpt5_codex'
|
||||||
@ -110,16 +132,18 @@ const _getThinkModelType = (model: Model): ThinkingModelType => {
|
|||||||
} else if (isGrok4FastReasoningModel(model)) {
|
} else if (isGrok4FastReasoningModel(model)) {
|
||||||
thinkingModelType = 'grok4_fast'
|
thinkingModelType = 'grok4_fast'
|
||||||
} else if (isSupportedThinkingTokenGeminiModel(model)) {
|
} else if (isSupportedThinkingTokenGeminiModel(model)) {
|
||||||
if (GEMINI_FLASH_MODEL_REGEX.test(model.id)) {
|
if (isGemini3FlashModel(model)) {
|
||||||
thinkingModelType = 'gemini'
|
thinkingModelType = 'gemini3_flash'
|
||||||
|
} else if (isGemini3ProModel(model)) {
|
||||||
|
thinkingModelType = 'gemini3_pro'
|
||||||
|
} else if (GEMINI_FLASH_MODEL_REGEX.test(model.id)) {
|
||||||
|
thinkingModelType = 'gemini2_flash'
|
||||||
} else {
|
} else {
|
||||||
thinkingModelType = 'gemini_pro'
|
thinkingModelType = 'gemini2_pro'
|
||||||
}
|
}
|
||||||
if (isGemini3ThinkingTokenModel(model)) {
|
} else if (isSupportedReasoningEffortGrokModel(model)) {
|
||||||
thinkingModelType = 'gemini3'
|
thinkingModelType = 'grok'
|
||||||
}
|
} else if (isSupportedThinkingTokenQwenModel(model)) {
|
||||||
} else if (isSupportedReasoningEffortGrokModel(model)) thinkingModelType = 'grok'
|
|
||||||
else if (isSupportedThinkingTokenQwenModel(model)) {
|
|
||||||
if (isQwenAlwaysThinkModel(model)) {
|
if (isQwenAlwaysThinkModel(model)) {
|
||||||
thinkingModelType = 'qwen_thinking'
|
thinkingModelType = 'qwen_thinking'
|
||||||
}
|
}
|
||||||
@ -127,15 +151,22 @@ const _getThinkModelType = (model: Model): ThinkingModelType => {
|
|||||||
} else if (isSupportedThinkingTokenDoubaoModel(model)) {
|
} else if (isSupportedThinkingTokenDoubaoModel(model)) {
|
||||||
if (isDoubaoThinkingAutoModel(model)) {
|
if (isDoubaoThinkingAutoModel(model)) {
|
||||||
thinkingModelType = 'doubao'
|
thinkingModelType = 'doubao'
|
||||||
} else if (isDoubaoSeedAfter251015(model)) {
|
} else if (isDoubaoSeedAfter251015(model) || isDoubaoSeed18Model(model)) {
|
||||||
thinkingModelType = 'doubao_after_251015'
|
thinkingModelType = 'doubao_after_251015'
|
||||||
} else {
|
} else {
|
||||||
thinkingModelType = 'doubao_no_auto'
|
thinkingModelType = 'doubao_no_auto'
|
||||||
}
|
}
|
||||||
} else if (isSupportedThinkingTokenHunyuanModel(model)) thinkingModelType = 'hunyuan'
|
} else if (isSupportedThinkingTokenHunyuanModel(model)) {
|
||||||
else if (isSupportedReasoningEffortPerplexityModel(model)) thinkingModelType = 'perplexity'
|
thinkingModelType = 'hunyuan'
|
||||||
else if (isSupportedThinkingTokenZhipuModel(model)) thinkingModelType = 'zhipu'
|
} else if (isSupportedReasoningEffortPerplexityModel(model)) {
|
||||||
else if (isDeepSeekHybridInferenceModel(model)) thinkingModelType = 'deepseek_hybrid'
|
thinkingModelType = 'perplexity'
|
||||||
|
} else if (isSupportedThinkingTokenZhipuModel(model)) {
|
||||||
|
thinkingModelType = 'zhipu'
|
||||||
|
} else if (isDeepSeekHybridInferenceModel(model)) {
|
||||||
|
thinkingModelType = 'deepseek_hybrid'
|
||||||
|
} else if (isSupportedThinkingTokenMiMoModel(model)) {
|
||||||
|
thinkingModelType = 'mimo'
|
||||||
|
}
|
||||||
return thinkingModelType
|
return thinkingModelType
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -148,6 +179,72 @@ export const getThinkModelType = (model: Model): ThinkingModelType => {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const _getModelSupportedReasoningEffortOptions = (model: Model): ReasoningEffortOption[] | undefined => {
|
||||||
|
if (!isSupportedReasoningEffortModel(model) && !isSupportedThinkingTokenModel(model)) {
|
||||||
|
return undefined
|
||||||
|
}
|
||||||
|
// use private function to avoid redundant function calling
|
||||||
|
const thinkingType = _getThinkModelType(model)
|
||||||
|
return MODEL_SUPPORTED_OPTIONS[thinkingType]
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the supported reasoning effort options for a given model.
|
||||||
|
*
|
||||||
|
* This function determines which reasoning effort levels a model supports based on its type.
|
||||||
|
* It works with models that support either `reasoning_effort` parameter (like OpenAI o-series)
|
||||||
|
* or thinking token control (like Claude, Gemini, Qwen, etc.).
|
||||||
|
*
|
||||||
|
* The function implements a fallback mechanism: it first checks the model's `id`, and if that
|
||||||
|
* doesn't match any known patterns, it falls back to checking the model's `name`.
|
||||||
|
*
|
||||||
|
* @param model - The model to check for reasoning effort support. Can be undefined or null.
|
||||||
|
* @returns An array of supported reasoning effort options, or undefined if:
|
||||||
|
* - The model is null/undefined
|
||||||
|
* - The model doesn't support reasoning effort or thinking tokens
|
||||||
|
*
|
||||||
|
* All reasoning models support the 'default' option (always the first element),
|
||||||
|
* which represents no additional configuration for thinking behavior.
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* // OpenAI o-series models support default, low, medium, high
|
||||||
|
* getModelSupportedReasoningEffortOptions({ id: 'o3-mini', ... })
|
||||||
|
* // Returns: ['default', 'low', 'medium', 'high']
|
||||||
|
* // 'default' = no additional configuration for thinking behavior
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* // GPT-5.1 models support default, none, low, medium, high
|
||||||
|
* getModelSupportedReasoningEffortOptions({ id: 'gpt-5.1', ... })
|
||||||
|
* // Returns: ['default', 'none', 'low', 'medium', 'high']
|
||||||
|
* // 'default' = no additional configuration
|
||||||
|
* // 'none' = explicitly disable reasoning
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* // Gemini Flash models support default, none, low, medium, high, auto
|
||||||
|
* getModelSupportedReasoningEffortOptions({ id: 'gemini-2.5-flash-latest', ... })
|
||||||
|
* // Returns: ['default', 'none', 'low', 'medium', 'high', 'auto']
|
||||||
|
* // 'default' = no additional configuration
|
||||||
|
* // 'auto' = let the model automatically decide
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* // Non-reasoning models return undefined
|
||||||
|
* getModelSupportedReasoningEffortOptions({ id: 'gpt-4o', ... })
|
||||||
|
* // Returns: undefined
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* // Name fallback when id doesn't match
|
||||||
|
* getModelSupportedReasoningEffortOptions({ id: 'custom-id', name: 'gpt-5.1', ... })
|
||||||
|
* // Returns: ['default', 'none', 'low', 'medium', 'high']
|
||||||
|
*/
|
||||||
|
export const getModelSupportedReasoningEffortOptions = (
|
||||||
|
model: Model | undefined | null
|
||||||
|
): ReasoningEffortOption[] | undefined => {
|
||||||
|
if (!model) return undefined
|
||||||
|
|
||||||
|
const { idResult, nameResult } = withModelIdAndNameAsId(model, _getModelSupportedReasoningEffortOptions)
|
||||||
|
return idResult ?? nameResult
|
||||||
|
}
|
||||||
|
|
||||||
function _isSupportedThinkingTokenModel(model: Model): boolean {
|
function _isSupportedThinkingTokenModel(model: Model): boolean {
|
||||||
// Specifically for DeepSeek V3.1. White list for now
|
// Specifically for DeepSeek V3.1. White list for now
|
||||||
if (isDeepSeekHybridInferenceModel(model)) {
|
if (isDeepSeekHybridInferenceModel(model)) {
|
||||||
@ -178,17 +275,20 @@ function _isSupportedThinkingTokenModel(model: Model): boolean {
|
|||||||
isSupportedThinkingTokenClaudeModel(model) ||
|
isSupportedThinkingTokenClaudeModel(model) ||
|
||||||
isSupportedThinkingTokenDoubaoModel(model) ||
|
isSupportedThinkingTokenDoubaoModel(model) ||
|
||||||
isSupportedThinkingTokenHunyuanModel(model) ||
|
isSupportedThinkingTokenHunyuanModel(model) ||
|
||||||
isSupportedThinkingTokenZhipuModel(model)
|
isSupportedThinkingTokenZhipuModel(model) ||
|
||||||
|
isSupportedThinkingTokenMiMoModel(model)
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/** 用于判断是否支持控制思考,但不一定以reasoning_effort的方式 */
|
/** 用于判断是否支持控制思考,但不一定以reasoning_effort的方式 */
|
||||||
|
// TODO: rename it
|
||||||
export function isSupportedThinkingTokenModel(model?: Model): boolean {
|
export function isSupportedThinkingTokenModel(model?: Model): boolean {
|
||||||
if (!model) return false
|
if (!model) return false
|
||||||
const { idResult, nameResult } = withModelIdAndNameAsId(model, _isSupportedThinkingTokenModel)
|
const { idResult, nameResult } = withModelIdAndNameAsId(model, _isSupportedThinkingTokenModel)
|
||||||
return idResult || nameResult
|
return idResult || nameResult
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: it should be merged in isSupportedThinkingTokenModel
|
||||||
export function isSupportedReasoningEffortModel(model?: Model): boolean {
|
export function isSupportedReasoningEffortModel(model?: Model): boolean {
|
||||||
if (!model) {
|
if (!model) {
|
||||||
return false
|
return false
|
||||||
@ -370,7 +470,7 @@ export function isQwenAlwaysThinkModel(model?: Model): boolean {
|
|||||||
|
|
||||||
// Doubao 支持思考模式的模型正则
|
// Doubao 支持思考模式的模型正则
|
||||||
export const DOUBAO_THINKING_MODEL_REGEX =
|
export const DOUBAO_THINKING_MODEL_REGEX =
|
||||||
/doubao-(?:1[.-]5-thinking-vision-pro|1[.-]5-thinking-pro-m|seed-1[.-]6(?:-flash)?(?!-(?:thinking)(?:-|$)))(?:-[\w-]+)*/i
|
/doubao-(?:1[.-]5-thinking-vision-pro|1[.-]5-thinking-pro-m|seed-1[.-][68](?:-flash)?(?!-(?:thinking)(?:-|$))|seed-code(?:-preview)?(?:-\d+)?)(?:-[\w-]+)*/i
|
||||||
|
|
||||||
// 支持 auto 的 Doubao 模型 doubao-seed-1.6-xxx doubao-seed-1-6-xxx doubao-1-5-thinking-pro-m-xxx
|
// 支持 auto 的 Doubao 模型 doubao-seed-1.6-xxx doubao-seed-1-6-xxx doubao-1-5-thinking-pro-m-xxx
|
||||||
// Auto thinking is no longer supported after version 251015, see https://console.volcengine.com/ark/region:ark+cn-beijing/model/detail?Id=doubao-seed-1-6
|
// Auto thinking is no longer supported after version 251015, see https://console.volcengine.com/ark/region:ark+cn-beijing/model/detail?Id=doubao-seed-1-6
|
||||||
@ -388,6 +488,11 @@ export function isDoubaoSeedAfter251015(model: Model): boolean {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function isDoubaoSeed18Model(model: Model): boolean {
|
||||||
|
const pattern = /doubao-seed-1[.-]8(?:-[\w-]+)?/i
|
||||||
|
return pattern.test(model.id) || pattern.test(model.name)
|
||||||
|
}
|
||||||
|
|
||||||
export function isSupportedThinkingTokenDoubaoModel(model?: Model): boolean {
|
export function isSupportedThinkingTokenDoubaoModel(model?: Model): boolean {
|
||||||
if (!model) {
|
if (!model) {
|
||||||
return false
|
return false
|
||||||
@ -469,6 +574,11 @@ export const isSupportedThinkingTokenZhipuModel = (model: Model): boolean => {
|
|||||||
return ['glm-4.5', 'glm-4.6'].some((id) => modelId.includes(id))
|
return ['glm-4.5', 'glm-4.6'].some((id) => modelId.includes(id))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export const isSupportedThinkingTokenMiMoModel = (model: Model): boolean => {
|
||||||
|
const modelId = getLowerBaseModelName(model.id, '/')
|
||||||
|
return ['mimo-v2-flash'].some((id) => modelId.includes(id))
|
||||||
|
}
|
||||||
|
|
||||||
export const isDeepSeekHybridInferenceModel = (model: Model) => {
|
export const isDeepSeekHybridInferenceModel = (model: Model) => {
|
||||||
const { idResult, nameResult } = withModelIdAndNameAsId(model, (model) => {
|
const { idResult, nameResult } = withModelIdAndNameAsId(model, (model) => {
|
||||||
const modelId = getLowerBaseModelName(model.id)
|
const modelId = getLowerBaseModelName(model.id)
|
||||||
@ -507,6 +617,8 @@ export const isZhipuReasoningModel = (model?: Model): boolean => {
|
|||||||
return isSupportedThinkingTokenZhipuModel(model) || modelId.includes('glm-z1')
|
return isSupportedThinkingTokenZhipuModel(model) || modelId.includes('glm-z1')
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export const isMiMoReasoningModel = isSupportedThinkingTokenMiMoModel
|
||||||
|
|
||||||
export const isStepReasoningModel = (model?: Model): boolean => {
|
export const isStepReasoningModel = (model?: Model): boolean => {
|
||||||
if (!model) {
|
if (!model) {
|
||||||
return false
|
return false
|
||||||
@ -557,6 +669,7 @@ export function isReasoningModel(model?: Model): boolean {
|
|||||||
isDeepSeekHybridInferenceModel(model) ||
|
isDeepSeekHybridInferenceModel(model) ||
|
||||||
isLingReasoningModel(model) ||
|
isLingReasoningModel(model) ||
|
||||||
isMiniMaxReasoningModel(model) ||
|
isMiniMaxReasoningModel(model) ||
|
||||||
|
isMiMoReasoningModel(model) ||
|
||||||
modelId.includes('magistral') ||
|
modelId.includes('magistral') ||
|
||||||
modelId.includes('pangu-pro-moe') ||
|
modelId.includes('pangu-pro-moe') ||
|
||||||
modelId.includes('seed-oss') ||
|
modelId.includes('seed-oss') ||
|
||||||
|
|||||||
@ -25,11 +25,13 @@ export const FUNCTION_CALLING_MODELS = [
|
|||||||
'learnlm(?:-[\\w-]+)?',
|
'learnlm(?:-[\\w-]+)?',
|
||||||
'gemini(?:-[\\w-]+)?', // 提前排除了gemini的嵌入模型
|
'gemini(?:-[\\w-]+)?', // 提前排除了gemini的嵌入模型
|
||||||
'grok-3(?:-[\\w-]+)?',
|
'grok-3(?:-[\\w-]+)?',
|
||||||
'doubao-seed-1[.-]6(?:-[\\w-]+)?',
|
'doubao-seed-1[.-][68](?:-[\\w-]+)?',
|
||||||
|
'doubao-seed-code(?:-[\\w-]+)?',
|
||||||
'kimi-k2(?:-[\\w-]+)?',
|
'kimi-k2(?:-[\\w-]+)?',
|
||||||
'ling-\\w+(?:-[\\w-]+)?',
|
'ling-\\w+(?:-[\\w-]+)?',
|
||||||
'ring-\\w+(?:-[\\w-]+)?',
|
'ring-\\w+(?:-[\\w-]+)?',
|
||||||
'minimax-m2'
|
'minimax-m2',
|
||||||
|
'mimo-v2-flash'
|
||||||
] as const
|
] as const
|
||||||
|
|
||||||
const FUNCTION_CALLING_EXCLUDED_MODELS = [
|
const FUNCTION_CALLING_EXCLUDED_MODELS = [
|
||||||
|
|||||||
@ -1,5 +1,6 @@
|
|||||||
import type OpenAI from '@cherrystudio/openai'
|
import type OpenAI from '@cherrystudio/openai'
|
||||||
import { isEmbeddingModel, isRerankModel } from '@renderer/config/models/embedding'
|
import { isEmbeddingModel, isRerankModel } from '@renderer/config/models/embedding'
|
||||||
|
import type { Assistant } from '@renderer/types'
|
||||||
import { type Model, SystemProviderIds } from '@renderer/types'
|
import { type Model, SystemProviderIds } from '@renderer/types'
|
||||||
import type { OpenAIVerbosity, ValidOpenAIVerbosity } from '@renderer/types/aiCoreTypes'
|
import type { OpenAIVerbosity, ValidOpenAIVerbosity } from '@renderer/types/aiCoreTypes'
|
||||||
import { getLowerBaseModelName } from '@renderer/utils'
|
import { getLowerBaseModelName } from '@renderer/utils'
|
||||||
@ -8,6 +9,7 @@ import {
|
|||||||
isGPT5ProModel,
|
isGPT5ProModel,
|
||||||
isGPT5SeriesModel,
|
isGPT5SeriesModel,
|
||||||
isGPT51SeriesModel,
|
isGPT51SeriesModel,
|
||||||
|
isGPT52SeriesModel,
|
||||||
isOpenAIChatCompletionOnlyModel,
|
isOpenAIChatCompletionOnlyModel,
|
||||||
isOpenAIOpenWeightModel,
|
isOpenAIOpenWeightModel,
|
||||||
isOpenAIReasoningModel,
|
isOpenAIReasoningModel,
|
||||||
@ -48,13 +50,16 @@ export function isSupportedModel(model: OpenAI.Models.Model): boolean {
|
|||||||
* @param model - The model to check
|
* @param model - The model to check
|
||||||
* @returns true if the model supports temperature parameter
|
* @returns true if the model supports temperature parameter
|
||||||
*/
|
*/
|
||||||
export function isSupportTemperatureModel(model: Model | undefined | null): boolean {
|
export function isSupportTemperatureModel(model: Model | undefined | null, assistant?: Assistant): boolean {
|
||||||
if (!model) {
|
if (!model) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// OpenAI reasoning models (except open weight) don't support temperature
|
// OpenAI reasoning models (except open weight) don't support temperature
|
||||||
if (isOpenAIReasoningModel(model) && !isOpenAIOpenWeightModel(model)) {
|
if (isOpenAIReasoningModel(model) && !isOpenAIOpenWeightModel(model)) {
|
||||||
|
if (isGPT52SeriesModel(model) && assistant?.settings?.reasoning_effort === 'none') {
|
||||||
|
return true
|
||||||
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -76,13 +81,16 @@ export function isSupportTemperatureModel(model: Model | undefined | null): bool
|
|||||||
* @param model - The model to check
|
* @param model - The model to check
|
||||||
* @returns true if the model supports top_p parameter
|
* @returns true if the model supports top_p parameter
|
||||||
*/
|
*/
|
||||||
export function isSupportTopPModel(model: Model | undefined | null): boolean {
|
export function isSupportTopPModel(model: Model | undefined | null, assistant?: Assistant): boolean {
|
||||||
if (!model) {
|
if (!model) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// OpenAI reasoning models (except open weight) don't support top_p
|
// OpenAI reasoning models (except open weight) don't support top_p
|
||||||
if (isOpenAIReasoningModel(model) && !isOpenAIOpenWeightModel(model)) {
|
if (isOpenAIReasoningModel(model) && !isOpenAIOpenWeightModel(model)) {
|
||||||
|
if (isGPT52SeriesModel(model) && assistant?.settings?.reasoning_effort === 'none') {
|
||||||
|
return true
|
||||||
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -259,3 +267,43 @@ export const isGemini3ThinkingTokenModel = (model: Model) => {
|
|||||||
const modelId = getLowerBaseModelName(model.id)
|
const modelId = getLowerBaseModelName(model.id)
|
||||||
return isGemini3Model(model) && !modelId.includes('image')
|
return isGemini3Model(model) && !modelId.includes('image')
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if the model is a Gemini 3 Flash model
|
||||||
|
* Matches: gemini-3-flash, gemini-3-flash-preview, gemini-3-flash-preview-09-2025, gemini-flash-latest (alias)
|
||||||
|
* Excludes: gemini-3-flash-image-preview
|
||||||
|
* @param model - The model to check
|
||||||
|
* @returns true if the model is a Gemini 3 Flash model
|
||||||
|
*/
|
||||||
|
export const isGemini3FlashModel = (model: Model | undefined | null): boolean => {
|
||||||
|
if (!model) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
const modelId = getLowerBaseModelName(model.id)
|
||||||
|
// Check for gemini-flash-latest alias (currently points to gemini-3-flash, may change in future)
|
||||||
|
if (modelId === 'gemini-flash-latest') {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Check for gemini-3-flash with optional suffixes, excluding image variants
|
||||||
|
return /gemini-3-flash(?!-image)(?:-[\w-]+)*$/i.test(modelId)
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if the model is a Gemini 3 Pro model
|
||||||
|
* Matches: gemini-3-pro, gemini-3-pro-preview, gemini-3-pro-preview-09-2025, gemini-pro-latest (alias)
|
||||||
|
* Excludes: gemini-3-pro-image-preview
|
||||||
|
* @param model - The model to check
|
||||||
|
* @returns true if the model is a Gemini 3 Pro model
|
||||||
|
*/
|
||||||
|
export const isGemini3ProModel = (model: Model | undefined | null): boolean => {
|
||||||
|
if (!model) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
const modelId = getLowerBaseModelName(model.id)
|
||||||
|
// Check for gemini-pro-latest alias (currently points to gemini-3-pro, may change in future)
|
||||||
|
if (modelId === 'gemini-pro-latest') {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Check for gemini-3-pro with optional suffixes, excluding image variants
|
||||||
|
return /gemini-3-pro(?!-image)(?:-[\w-]+)*$/i.test(modelId)
|
||||||
|
}
|
||||||
|
|||||||
@ -45,7 +45,8 @@ const visionAllowedModels = [
|
|||||||
'deepseek-vl(?:[\\w-]+)?',
|
'deepseek-vl(?:[\\w-]+)?',
|
||||||
'kimi-latest',
|
'kimi-latest',
|
||||||
'gemma-3(?:-[\\w-]+)',
|
'gemma-3(?:-[\\w-]+)',
|
||||||
'doubao-seed-1[.-]6(?:-[\\w-]+)?',
|
'doubao-seed-1[.-][68](?:-[\\w-]+)?',
|
||||||
|
'doubao-seed-code(?:-[\\w-]+)?',
|
||||||
'kimi-thinking-preview',
|
'kimi-thinking-preview',
|
||||||
`gemma3(?:[-:\\w]+)?`,
|
`gemma3(?:[-:\\w]+)?`,
|
||||||
'kimi-vl-a3b-thinking(?:-[\\w-]+)?',
|
'kimi-vl-a3b-thinking(?:-[\\w-]+)?',
|
||||||
|
|||||||
@ -31,6 +31,7 @@ import JinaProviderLogo from '@renderer/assets/images/providers/jina.png'
|
|||||||
import LanyunProviderLogo from '@renderer/assets/images/providers/lanyun.png'
|
import LanyunProviderLogo from '@renderer/assets/images/providers/lanyun.png'
|
||||||
import LMStudioProviderLogo from '@renderer/assets/images/providers/lmstudio.png'
|
import LMStudioProviderLogo from '@renderer/assets/images/providers/lmstudio.png'
|
||||||
import LongCatProviderLogo from '@renderer/assets/images/providers/longcat.png'
|
import LongCatProviderLogo from '@renderer/assets/images/providers/longcat.png'
|
||||||
|
import MiMoProviderLogo from '@renderer/assets/images/providers/mimo.svg'
|
||||||
import MinimaxProviderLogo from '@renderer/assets/images/providers/minimax.png'
|
import MinimaxProviderLogo from '@renderer/assets/images/providers/minimax.png'
|
||||||
import MistralProviderLogo from '@renderer/assets/images/providers/mistral.png'
|
import MistralProviderLogo from '@renderer/assets/images/providers/mistral.png'
|
||||||
import ModelScopeProviderLogo from '@renderer/assets/images/providers/modelscope.png'
|
import ModelScopeProviderLogo from '@renderer/assets/images/providers/modelscope.png'
|
||||||
@ -695,6 +696,17 @@ export const SYSTEM_PROVIDERS_CONFIG: Record<SystemProviderId, SystemProvider> =
|
|||||||
models: SYSTEM_MODELS.cerebras,
|
models: SYSTEM_MODELS.cerebras,
|
||||||
isSystem: true,
|
isSystem: true,
|
||||||
enabled: false
|
enabled: false
|
||||||
|
},
|
||||||
|
mimo: {
|
||||||
|
id: 'mimo',
|
||||||
|
name: 'Xiaomi MiMo',
|
||||||
|
type: 'openai',
|
||||||
|
apiKey: '',
|
||||||
|
apiHost: 'https://api.xiaomimimo.com',
|
||||||
|
anthropicApiHost: 'https://api.xiaomimimo.com/anthropic',
|
||||||
|
models: SYSTEM_MODELS.mimo,
|
||||||
|
isSystem: true,
|
||||||
|
enabled: false
|
||||||
}
|
}
|
||||||
} as const
|
} as const
|
||||||
|
|
||||||
@ -763,7 +775,8 @@ export const PROVIDER_LOGO_MAP: AtLeast<SystemProviderId, string> = {
|
|||||||
huggingface: HuggingfaceProviderLogo,
|
huggingface: HuggingfaceProviderLogo,
|
||||||
sophnet: SophnetProviderLogo,
|
sophnet: SophnetProviderLogo,
|
||||||
gateway: AIGatewayProviderLogo,
|
gateway: AIGatewayProviderLogo,
|
||||||
cerebras: CerebrasProviderLogo
|
cerebras: CerebrasProviderLogo,
|
||||||
|
mimo: MiMoProviderLogo
|
||||||
} as const
|
} as const
|
||||||
|
|
||||||
export function getProviderLogo(providerId: string) {
|
export function getProviderLogo(providerId: string) {
|
||||||
@ -1434,5 +1447,16 @@ export const PROVIDER_URLS: Record<SystemProviderId, ProviderUrls> = {
|
|||||||
docs: 'https://inference-docs.cerebras.ai/introduction',
|
docs: 'https://inference-docs.cerebras.ai/introduction',
|
||||||
models: 'https://inference-docs.cerebras.ai/models/overview'
|
models: 'https://inference-docs.cerebras.ai/models/overview'
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
mimo: {
|
||||||
|
api: {
|
||||||
|
url: 'https://api.xiaomimimo.com'
|
||||||
|
},
|
||||||
|
websites: {
|
||||||
|
official: 'https://platform.xiaomimimo.com/',
|
||||||
|
apiKey: 'https://platform.xiaomimimo.com/#/console/usage',
|
||||||
|
docs: 'https://platform.xiaomimimo.com/#/docs/welcome',
|
||||||
|
models: 'https://platform.xiaomimimo.com/'
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -31,6 +31,11 @@ export const WEB_SEARCH_PROVIDER_CONFIG: Record<WebSearchProviderId, WebSearchPr
|
|||||||
apiKey: 'https://dashboard.exa.ai/api-keys'
|
apiKey: 'https://dashboard.exa.ai/api-keys'
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
'exa-mcp': {
|
||||||
|
websites: {
|
||||||
|
official: 'https://exa.ai'
|
||||||
|
}
|
||||||
|
},
|
||||||
bocha: {
|
bocha: {
|
||||||
websites: {
|
websites: {
|
||||||
official: 'https://bochaai.com',
|
official: 'https://bochaai.com',
|
||||||
@ -80,6 +85,11 @@ export const WEB_SEARCH_PROVIDERS: WebSearchProvider[] = [
|
|||||||
apiHost: 'https://api.exa.ai',
|
apiHost: 'https://api.exa.ai',
|
||||||
apiKey: ''
|
apiKey: ''
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
id: 'exa-mcp',
|
||||||
|
name: 'ExaMCP',
|
||||||
|
apiHost: 'https://mcp.exa.ai/mcp'
|
||||||
|
},
|
||||||
{
|
{
|
||||||
id: 'bocha',
|
id: 'bocha',
|
||||||
name: 'Bocha',
|
name: 'Bocha',
|
||||||
|
|||||||
@ -5,7 +5,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { loggerService } from '@logger'
|
import { loggerService } from '@logger'
|
||||||
import type { AgentType, BuiltinMCPServerName, BuiltinOcrProviderId, ThinkingOption } from '@renderer/types'
|
import type { AgentType, BuiltinMCPServerName, BuiltinOcrProviderId } from '@renderer/types'
|
||||||
import { BuiltinMCPServerNames } from '@renderer/types'
|
import { BuiltinMCPServerNames } from '@renderer/types'
|
||||||
|
|
||||||
import i18n from './index'
|
import i18n from './index'
|
||||||
@ -88,7 +88,8 @@ const providerKeyMap = {
|
|||||||
huggingface: 'provider.huggingface',
|
huggingface: 'provider.huggingface',
|
||||||
sophnet: 'provider.sophnet',
|
sophnet: 'provider.sophnet',
|
||||||
gateway: 'provider.ai-gateway',
|
gateway: 'provider.ai-gateway',
|
||||||
cerebras: 'provider.cerebras'
|
cerebras: 'provider.cerebras',
|
||||||
|
mimo: 'provider.mimo'
|
||||||
} as const
|
} as const
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -310,19 +311,6 @@ export const getHttpMessageLabel = (key: string): string => {
|
|||||||
return getLabel(httpMessageKeyMap, key)
|
return getLabel(httpMessageKeyMap, key)
|
||||||
}
|
}
|
||||||
|
|
||||||
const reasoningEffortOptionsKeyMap: Record<ThinkingOption, string> = {
|
|
||||||
none: 'assistants.settings.reasoning_effort.off',
|
|
||||||
minimal: 'assistants.settings.reasoning_effort.minimal',
|
|
||||||
high: 'assistants.settings.reasoning_effort.high',
|
|
||||||
low: 'assistants.settings.reasoning_effort.low',
|
|
||||||
medium: 'assistants.settings.reasoning_effort.medium',
|
|
||||||
auto: 'assistants.settings.reasoning_effort.default'
|
|
||||||
} as const
|
|
||||||
|
|
||||||
export const getReasoningEffortOptionsLabel = (key: string): string => {
|
|
||||||
return getLabel(reasoningEffortOptionsKeyMap, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
const fileFieldKeyMap = {
|
const fileFieldKeyMap = {
|
||||||
created_at: 'files.created_at',
|
created_at: 'files.created_at',
|
||||||
size: 'files.size',
|
size: 'files.size',
|
||||||
@ -342,7 +330,9 @@ const builtInMcpDescriptionKeyMap: Record<BuiltinMCPServerName, string> = {
|
|||||||
[BuiltinMCPServerNames.filesystem]: 'settings.mcp.builtinServersDescriptions.filesystem',
|
[BuiltinMCPServerNames.filesystem]: 'settings.mcp.builtinServersDescriptions.filesystem',
|
||||||
[BuiltinMCPServerNames.difyKnowledge]: 'settings.mcp.builtinServersDescriptions.dify_knowledge',
|
[BuiltinMCPServerNames.difyKnowledge]: 'settings.mcp.builtinServersDescriptions.dify_knowledge',
|
||||||
[BuiltinMCPServerNames.python]: 'settings.mcp.builtinServersDescriptions.python',
|
[BuiltinMCPServerNames.python]: 'settings.mcp.builtinServersDescriptions.python',
|
||||||
[BuiltinMCPServerNames.didiMCP]: 'settings.mcp.builtinServersDescriptions.didi_mcp'
|
[BuiltinMCPServerNames.didiMCP]: 'settings.mcp.builtinServersDescriptions.didi_mcp',
|
||||||
|
[BuiltinMCPServerNames.browser]: 'settings.mcp.builtinServersDescriptions.browser',
|
||||||
|
[BuiltinMCPServerNames.nowledgeMem]: 'settings.mcp.builtinServersDescriptions.nowledge_mem'
|
||||||
} as const
|
} as const
|
||||||
|
|
||||||
export const getBuiltInMcpServerDescriptionLabel = (key: string): string => {
|
export const getBuiltInMcpServerDescriptionLabel = (key: string): string => {
|
||||||
|
|||||||
@ -32,6 +32,7 @@
|
|||||||
},
|
},
|
||||||
"gitBash": {
|
"gitBash": {
|
||||||
"autoDetected": "Using auto-detected Git Bash",
|
"autoDetected": "Using auto-detected Git Bash",
|
||||||
|
"autoDiscoveredHint": "Auto-discovered",
|
||||||
"clear": {
|
"clear": {
|
||||||
"button": "Clear custom path"
|
"button": "Clear custom path"
|
||||||
},
|
},
|
||||||
@ -39,6 +40,7 @@
|
|||||||
"error": {
|
"error": {
|
||||||
"description": "Git Bash is required to run agents on Windows. The agent cannot function without it. Please install Git for Windows from",
|
"description": "Git Bash is required to run agents on Windows. The agent cannot function without it. Please install Git for Windows from",
|
||||||
"recheck": "Recheck Git Bash Installation",
|
"recheck": "Recheck Git Bash Installation",
|
||||||
|
"required": "Git Bash path is required on Windows",
|
||||||
"title": "Git Bash Required"
|
"title": "Git Bash Required"
|
||||||
},
|
},
|
||||||
"found": {
|
"found": {
|
||||||
@ -51,7 +53,9 @@
|
|||||||
"invalidPath": "Selected file is not a valid Git Bash executable (bash.exe).",
|
"invalidPath": "Selected file is not a valid Git Bash executable (bash.exe).",
|
||||||
"title": "Select Git Bash executable"
|
"title": "Select Git Bash executable"
|
||||||
},
|
},
|
||||||
"success": "Git Bash detected successfully!"
|
"placeholder": "Select bash.exe path",
|
||||||
|
"success": "Git Bash detected successfully!",
|
||||||
|
"tooltip": "Git Bash is required to run agents on Windows. Install from git-scm.com if not available."
|
||||||
},
|
},
|
||||||
"input": {
|
"input": {
|
||||||
"placeholder": "Enter your message here, send with {{key}} - @ select path, / select command"
|
"placeholder": "Enter your message here, send with {{key}} - @ select path, / select command"
|
||||||
@ -472,6 +476,7 @@
|
|||||||
"button": "Import",
|
"button": "Import",
|
||||||
"error": {
|
"error": {
|
||||||
"fetch_failed": "Failed to fetch from URL",
|
"fetch_failed": "Failed to fetch from URL",
|
||||||
|
"file_required": "Please select a file first",
|
||||||
"invalid_format": "Invalid assistant format: missing required fields",
|
"invalid_format": "Invalid assistant format: missing required fields",
|
||||||
"url_required": "Please enter a URL"
|
"url_required": "Please enter a URL"
|
||||||
},
|
},
|
||||||
@ -486,11 +491,14 @@
|
|||||||
},
|
},
|
||||||
"manage": {
|
"manage": {
|
||||||
"batch_delete": {
|
"batch_delete": {
|
||||||
"button": "Batch Delete",
|
"button": "Delete",
|
||||||
"confirm": "Are you sure you want to delete the selected {{count}} assistants?"
|
"confirm": "Are you sure you want to delete the selected {{count}} assistants?"
|
||||||
},
|
},
|
||||||
|
"batch_export": {
|
||||||
|
"button": "Export"
|
||||||
|
},
|
||||||
"mode": {
|
"mode": {
|
||||||
"delete": "Delete",
|
"manage": "Manage",
|
||||||
"sort": "Sort"
|
"sort": "Sort"
|
||||||
},
|
},
|
||||||
"title": "Manage Assistants"
|
"title": "Manage Assistants"
|
||||||
@ -540,13 +548,23 @@
|
|||||||
"more": "Assistant Settings",
|
"more": "Assistant Settings",
|
||||||
"prompt": "Prompt Settings",
|
"prompt": "Prompt Settings",
|
||||||
"reasoning_effort": {
|
"reasoning_effort": {
|
||||||
|
"auto": "Auto",
|
||||||
|
"auto_description": "Flexibly determine reasoning effort",
|
||||||
"default": "Default",
|
"default": "Default",
|
||||||
|
"default_description": "Depend on the model's default behavior, without any configuration.",
|
||||||
"high": "High",
|
"high": "High",
|
||||||
|
"high_description": "High level reasoning",
|
||||||
"label": "Reasoning effort",
|
"label": "Reasoning effort",
|
||||||
"low": "Low",
|
"low": "Low",
|
||||||
|
"low_description": "Low level reasoning",
|
||||||
"medium": "Medium",
|
"medium": "Medium",
|
||||||
|
"medium_description": "Medium level reasoning",
|
||||||
"minimal": "Minimal",
|
"minimal": "Minimal",
|
||||||
"off": "Off"
|
"minimal_description": "Minimal reasoning",
|
||||||
|
"off": "Off",
|
||||||
|
"off_description": "Disable reasoning",
|
||||||
|
"xhigh": "Extra High",
|
||||||
|
"xhigh_description": "Extra high level reasoning"
|
||||||
},
|
},
|
||||||
"regular_phrases": {
|
"regular_phrases": {
|
||||||
"add": "Add Phrase",
|
"add": "Add Phrase",
|
||||||
@ -1026,6 +1044,29 @@
|
|||||||
"yuque": "Export to Yuque"
|
"yuque": "Export to Yuque"
|
||||||
},
|
},
|
||||||
"list": "Topic List",
|
"list": "Topic List",
|
||||||
|
"manage": {
|
||||||
|
"clear_selection": "Clear Selection",
|
||||||
|
"delete": {
|
||||||
|
"confirm": {
|
||||||
|
"content": "Are you sure you want to delete {{count}} selected topic(s)? This action cannot be undone.",
|
||||||
|
"title": "Delete Topics"
|
||||||
|
},
|
||||||
|
"success": "Deleted {{count}} topic(s)"
|
||||||
|
},
|
||||||
|
"deselect_all": "Deselect All",
|
||||||
|
"error": {
|
||||||
|
"at_least_one": "At least one topic must be kept"
|
||||||
|
},
|
||||||
|
"move": {
|
||||||
|
"button": "Move",
|
||||||
|
"placeholder": "Select target assistant",
|
||||||
|
"success": "Moved {{count}} topic(s)"
|
||||||
|
},
|
||||||
|
"pinned": "Pinned Topics",
|
||||||
|
"selected_count": "{{count}} selected",
|
||||||
|
"title": "Manage Topics",
|
||||||
|
"unpinned": "Unpinned Topics"
|
||||||
|
},
|
||||||
"move_to": "Move to",
|
"move_to": "Move to",
|
||||||
"new": "New Topic",
|
"new": "New Topic",
|
||||||
"pin": "Pin Topic",
|
"pin": "Pin Topic",
|
||||||
@ -1036,6 +1077,10 @@
|
|||||||
"label": "Topic Prompts",
|
"label": "Topic Prompts",
|
||||||
"tips": "Topic Prompts: Additional supplementary prompts provided for the current topic"
|
"tips": "Topic Prompts: Additional supplementary prompts provided for the current topic"
|
||||||
},
|
},
|
||||||
|
"search": {
|
||||||
|
"placeholder": "Search topics...",
|
||||||
|
"title": "Search"
|
||||||
|
},
|
||||||
"title": "Topics",
|
"title": "Topics",
|
||||||
"unpin": "Unpin Topic"
|
"unpin": "Unpin Topic"
|
||||||
},
|
},
|
||||||
@ -1220,11 +1265,13 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"stop": "Stop",
|
"stop": "Stop",
|
||||||
|
"subscribe": "Subscribe",
|
||||||
"success": "Success",
|
"success": "Success",
|
||||||
"swap": "Swap",
|
"swap": "Swap",
|
||||||
"topics": "Topics",
|
"topics": "Topics",
|
||||||
"unknown": "Unknown",
|
"unknown": "Unknown",
|
||||||
"unnamed": "Unnamed",
|
"unnamed": "Unnamed",
|
||||||
|
"unsubscribe": "Unsubscribe",
|
||||||
"update_success": "Update successfully",
|
"update_success": "Update successfully",
|
||||||
"upload_files": "Upload file",
|
"upload_files": "Upload file",
|
||||||
"warning": "Warning",
|
"warning": "Warning",
|
||||||
@ -1719,7 +1766,7 @@
|
|||||||
"import": {
|
"import": {
|
||||||
"error": "Import failed"
|
"error": "Import failed"
|
||||||
},
|
},
|
||||||
"imported": "Imported successfully"
|
"imported": "Successfully imported {{count}} assistant(s)"
|
||||||
},
|
},
|
||||||
"api": {
|
"api": {
|
||||||
"check": {
|
"check": {
|
||||||
@ -2151,6 +2198,7 @@
|
|||||||
"collapse": "Collapse",
|
"collapse": "Collapse",
|
||||||
"content_placeholder": "Please enter the note content...",
|
"content_placeholder": "Please enter the note content...",
|
||||||
"copyContent": "Copy Content",
|
"copyContent": "Copy Content",
|
||||||
|
"crossPlatformRestoreWarning": "Cross-platform configuration restored, but notes directory is empty. Please copy your note files to: {{path}}",
|
||||||
"delete": "delete",
|
"delete": "delete",
|
||||||
"delete_confirm": "Are you sure you want to delete this {{type}}?",
|
"delete_confirm": "Are you sure you want to delete this {{type}}?",
|
||||||
"delete_folder_confirm": "Are you sure you want to delete the folder \"{{name}}\" and all of its contents?",
|
"delete_folder_confirm": "Are you sure you want to delete the folder \"{{name}}\" and all of its contents?",
|
||||||
@ -2595,6 +2643,7 @@
|
|||||||
"lanyun": "LANYUN",
|
"lanyun": "LANYUN",
|
||||||
"lmstudio": "LM Studio",
|
"lmstudio": "LM Studio",
|
||||||
"longcat": "LongCat AI",
|
"longcat": "LongCat AI",
|
||||||
|
"mimo": "Xiaomi MiMo",
|
||||||
"minimax": "MiniMax",
|
"minimax": "MiniMax",
|
||||||
"mistral": "Mistral",
|
"mistral": "Mistral",
|
||||||
"modelscope": "ModelScope",
|
"modelscope": "ModelScope",
|
||||||
@ -3883,6 +3932,7 @@
|
|||||||
"builtinServers": "Builtin Servers",
|
"builtinServers": "Builtin Servers",
|
||||||
"builtinServersDescriptions": {
|
"builtinServersDescriptions": {
|
||||||
"brave_search": "An MCP server implementation integrating the Brave Search API, providing both web and local search functionalities. Requires configuring the BRAVE_API_KEY environment variable",
|
"brave_search": "An MCP server implementation integrating the Brave Search API, providing both web and local search functionalities. Requires configuring the BRAVE_API_KEY environment variable",
|
||||||
|
"browser": "Control a headless Electron window via Chrome DevTools Protocol. Tools: open URL, execute single-line JS, reset session.",
|
||||||
"didi_mcp": "DiDi MCP server providing ride-hailing services including map search, price estimation, order management, and driver tracking. Only available in Mainland China. Requires configuring the DIDI_API_KEY environment variable",
|
"didi_mcp": "DiDi MCP server providing ride-hailing services including map search, price estimation, order management, and driver tracking. Only available in Mainland China. Requires configuring the DIDI_API_KEY environment variable",
|
||||||
"dify_knowledge": "Dify's MCP server implementation provides a simple API to interact with Dify. Requires configuring the Dify Key",
|
"dify_knowledge": "Dify's MCP server implementation provides a simple API to interact with Dify. Requires configuring the Dify Key",
|
||||||
"fetch": "MCP server for retrieving URL web content",
|
"fetch": "MCP server for retrieving URL web content",
|
||||||
@ -3890,6 +3940,7 @@
|
|||||||
"mcp_auto_install": "Automatically install MCP service (beta)",
|
"mcp_auto_install": "Automatically install MCP service (beta)",
|
||||||
"memory": "Persistent memory implementation based on a local knowledge graph. This enables the model to remember user-related information across different conversations. Requires configuring the MEMORY_FILE_PATH environment variable.",
|
"memory": "Persistent memory implementation based on a local knowledge graph. This enables the model to remember user-related information across different conversations. Requires configuring the MEMORY_FILE_PATH environment variable.",
|
||||||
"no": "No description",
|
"no": "No description",
|
||||||
|
"nowledge_mem": "Requires Nowledge Mem app running locally. Keeps AI chats, tools, notes, agents, and files in private memory on your computer. Download from https://mem.nowledge.co/",
|
||||||
"python": "Execute Python code in a secure sandbox environment. Run Python with Pyodide, supporting most standard libraries and scientific computing packages",
|
"python": "Execute Python code in a secure sandbox environment. Run Python with Pyodide, supporting most standard libraries and scientific computing packages",
|
||||||
"sequentialthinking": "A MCP server implementation that provides tools for dynamic and reflective problem solving through structured thinking processes"
|
"sequentialthinking": "A MCP server implementation that provides tools for dynamic and reflective problem solving through structured thinking processes"
|
||||||
},
|
},
|
||||||
|
|||||||
@ -32,6 +32,7 @@
|
|||||||
},
|
},
|
||||||
"gitBash": {
|
"gitBash": {
|
||||||
"autoDetected": "使用自动检测的 Git Bash",
|
"autoDetected": "使用自动检测的 Git Bash",
|
||||||
|
"autoDiscoveredHint": "自动发现",
|
||||||
"clear": {
|
"clear": {
|
||||||
"button": "清除自定义路径"
|
"button": "清除自定义路径"
|
||||||
},
|
},
|
||||||
@ -39,6 +40,7 @@
|
|||||||
"error": {
|
"error": {
|
||||||
"description": "在 Windows 上运行智能体需要 Git Bash。没有它智能体无法运行。请从以下地址安装 Git for Windows",
|
"description": "在 Windows 上运行智能体需要 Git Bash。没有它智能体无法运行。请从以下地址安装 Git for Windows",
|
||||||
"recheck": "重新检测 Git Bash 安装",
|
"recheck": "重新检测 Git Bash 安装",
|
||||||
|
"required": "在 Windows 上需要配置 Git Bash 路径",
|
||||||
"title": "需要 Git Bash"
|
"title": "需要 Git Bash"
|
||||||
},
|
},
|
||||||
"found": {
|
"found": {
|
||||||
@ -51,7 +53,9 @@
|
|||||||
"invalidPath": "选择的文件不是有效的 Git Bash 可执行文件(bash.exe)。",
|
"invalidPath": "选择的文件不是有效的 Git Bash 可执行文件(bash.exe)。",
|
||||||
"title": "选择 Git Bash 可执行文件"
|
"title": "选择 Git Bash 可执行文件"
|
||||||
},
|
},
|
||||||
"success": "成功检测到 Git Bash!"
|
"placeholder": "选择 bash.exe 路径",
|
||||||
|
"success": "成功检测到 Git Bash!",
|
||||||
|
"tooltip": "在 Windows 上运行智能体需要 Git Bash。如果未安装,请从 git-scm.com 下载安装。"
|
||||||
},
|
},
|
||||||
"input": {
|
"input": {
|
||||||
"placeholder": "在这里输入消息,按 {{key}} 发送 - @ 选择路径, / 选择命令"
|
"placeholder": "在这里输入消息,按 {{key}} 发送 - @ 选择路径, / 选择命令"
|
||||||
@ -472,6 +476,7 @@
|
|||||||
"button": "导入",
|
"button": "导入",
|
||||||
"error": {
|
"error": {
|
||||||
"fetch_failed": "从 URL 获取数据失败",
|
"fetch_failed": "从 URL 获取数据失败",
|
||||||
|
"file_required": "请先选择文件",
|
||||||
"invalid_format": "无效的助手格式:缺少必填字段",
|
"invalid_format": "无效的助手格式:缺少必填字段",
|
||||||
"url_required": "请输入 URL"
|
"url_required": "请输入 URL"
|
||||||
},
|
},
|
||||||
@ -486,11 +491,14 @@
|
|||||||
},
|
},
|
||||||
"manage": {
|
"manage": {
|
||||||
"batch_delete": {
|
"batch_delete": {
|
||||||
"button": "批量删除",
|
"button": "删除",
|
||||||
"confirm": "确定要删除选中的 {{count}} 个助手吗?"
|
"confirm": "确定要删除选中的 {{count}} 个助手吗?"
|
||||||
},
|
},
|
||||||
|
"batch_export": {
|
||||||
|
"button": "导出"
|
||||||
|
},
|
||||||
"mode": {
|
"mode": {
|
||||||
"delete": "删除",
|
"manage": "管理",
|
||||||
"sort": "排序"
|
"sort": "排序"
|
||||||
},
|
},
|
||||||
"title": "管理助手"
|
"title": "管理助手"
|
||||||
@ -540,13 +548,23 @@
|
|||||||
"more": "助手设置",
|
"more": "助手设置",
|
||||||
"prompt": "提示词设置",
|
"prompt": "提示词设置",
|
||||||
"reasoning_effort": {
|
"reasoning_effort": {
|
||||||
|
"auto": "自动",
|
||||||
|
"auto_description": "灵活决定推理力度",
|
||||||
"default": "默认",
|
"default": "默认",
|
||||||
|
"default_description": "依赖模型默认行为,不作任何配置",
|
||||||
"high": "沉思",
|
"high": "沉思",
|
||||||
|
"high_description": "高强度推理",
|
||||||
"label": "思维链长度",
|
"label": "思维链长度",
|
||||||
"low": "浮想",
|
"low": "浮想",
|
||||||
|
"low_description": "低强度推理",
|
||||||
"medium": "斟酌",
|
"medium": "斟酌",
|
||||||
|
"medium_description": "中强度推理",
|
||||||
"minimal": "微念",
|
"minimal": "微念",
|
||||||
"off": "关闭"
|
"minimal_description": "最小程度的推理",
|
||||||
|
"off": "关闭",
|
||||||
|
"off_description": "禁用推理",
|
||||||
|
"xhigh": "穷究",
|
||||||
|
"xhigh_description": "超高强度推理"
|
||||||
},
|
},
|
||||||
"regular_phrases": {
|
"regular_phrases": {
|
||||||
"add": "添加短语",
|
"add": "添加短语",
|
||||||
@ -1026,6 +1044,29 @@
|
|||||||
"yuque": "导出到语雀"
|
"yuque": "导出到语雀"
|
||||||
},
|
},
|
||||||
"list": "话题列表",
|
"list": "话题列表",
|
||||||
|
"manage": {
|
||||||
|
"clear_selection": "取消选择",
|
||||||
|
"delete": {
|
||||||
|
"confirm": {
|
||||||
|
"content": "确定要删除选中的 {{count}} 个话题吗?此操作不可撤销。",
|
||||||
|
"title": "删除话题"
|
||||||
|
},
|
||||||
|
"success": "已删除 {{count}} 个话题"
|
||||||
|
},
|
||||||
|
"deselect_all": "取消全选",
|
||||||
|
"error": {
|
||||||
|
"at_least_one": "至少需要保留一个话题"
|
||||||
|
},
|
||||||
|
"move": {
|
||||||
|
"button": "移动",
|
||||||
|
"placeholder": "选择目标助手",
|
||||||
|
"success": "已移动 {{count}} 个话题"
|
||||||
|
},
|
||||||
|
"pinned": "已固定的话题",
|
||||||
|
"selected_count": "已选择 {{count}} 个",
|
||||||
|
"title": "管理话题",
|
||||||
|
"unpinned": "未固定的话题"
|
||||||
|
},
|
||||||
"move_to": "移动到",
|
"move_to": "移动到",
|
||||||
"new": "开始新对话",
|
"new": "开始新对话",
|
||||||
"pin": "固定话题",
|
"pin": "固定话题",
|
||||||
@ -1036,6 +1077,10 @@
|
|||||||
"label": "话题提示词",
|
"label": "话题提示词",
|
||||||
"tips": "话题提示词:针对当前话题提供额外的补充提示词"
|
"tips": "话题提示词:针对当前话题提供额外的补充提示词"
|
||||||
},
|
},
|
||||||
|
"search": {
|
||||||
|
"placeholder": "搜索话题...",
|
||||||
|
"title": "搜索"
|
||||||
|
},
|
||||||
"title": "话题",
|
"title": "话题",
|
||||||
"unpin": "取消固定"
|
"unpin": "取消固定"
|
||||||
},
|
},
|
||||||
@ -1220,11 +1265,13 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"stop": "停止",
|
"stop": "停止",
|
||||||
|
"subscribe": "订阅",
|
||||||
"success": "成功",
|
"success": "成功",
|
||||||
"swap": "交换",
|
"swap": "交换",
|
||||||
"topics": "话题",
|
"topics": "话题",
|
||||||
"unknown": "未知",
|
"unknown": "未知",
|
||||||
"unnamed": "未命名",
|
"unnamed": "未命名",
|
||||||
|
"unsubscribe": "退订",
|
||||||
"update_success": "更新成功",
|
"update_success": "更新成功",
|
||||||
"upload_files": "上传文件",
|
"upload_files": "上传文件",
|
||||||
"warning": "警告",
|
"warning": "警告",
|
||||||
@ -1719,7 +1766,7 @@
|
|||||||
"import": {
|
"import": {
|
||||||
"error": "导入失败"
|
"error": "导入失败"
|
||||||
},
|
},
|
||||||
"imported": "导入成功"
|
"imported": "成功导入 {{count}} 个助手"
|
||||||
},
|
},
|
||||||
"api": {
|
"api": {
|
||||||
"check": {
|
"check": {
|
||||||
@ -2151,6 +2198,7 @@
|
|||||||
"collapse": "收起",
|
"collapse": "收起",
|
||||||
"content_placeholder": "请输入笔记内容...",
|
"content_placeholder": "请输入笔记内容...",
|
||||||
"copyContent": "复制内容",
|
"copyContent": "复制内容",
|
||||||
|
"crossPlatformRestoreWarning": "检测到从其他设备恢复配置,但笔记目录为空。请将笔记文件复制到: {{path}}",
|
||||||
"delete": "删除",
|
"delete": "删除",
|
||||||
"delete_confirm": "确定要删除这个{{type}}吗?",
|
"delete_confirm": "确定要删除这个{{type}}吗?",
|
||||||
"delete_folder_confirm": "确定要删除文件夹 \"{{name}}\" 及其所有内容吗?",
|
"delete_folder_confirm": "确定要删除文件夹 \"{{name}}\" 及其所有内容吗?",
|
||||||
@ -2595,6 +2643,7 @@
|
|||||||
"lanyun": "蓝耘科技",
|
"lanyun": "蓝耘科技",
|
||||||
"lmstudio": "LM Studio",
|
"lmstudio": "LM Studio",
|
||||||
"longcat": "龙猫",
|
"longcat": "龙猫",
|
||||||
|
"mimo": "Xiaomi MiMo",
|
||||||
"minimax": "MiniMax",
|
"minimax": "MiniMax",
|
||||||
"mistral": "Mistral",
|
"mistral": "Mistral",
|
||||||
"modelscope": "ModelScope 魔搭",
|
"modelscope": "ModelScope 魔搭",
|
||||||
@ -3883,6 +3932,7 @@
|
|||||||
"builtinServers": "内置服务器",
|
"builtinServers": "内置服务器",
|
||||||
"builtinServersDescriptions": {
|
"builtinServersDescriptions": {
|
||||||
"brave_search": "一个集成了Brave 搜索 API 的 MCP 服务器实现,提供网页与本地搜索双重功能。需要配置 BRAVE_API_KEY 环境变量",
|
"brave_search": "一个集成了Brave 搜索 API 的 MCP 服务器实现,提供网页与本地搜索双重功能。需要配置 BRAVE_API_KEY 环境变量",
|
||||||
|
"browser": "通过 Chrome DevTools 协议控制隐藏的 Electron 窗口,支持打开 URL、执行单行 JS、重置会话",
|
||||||
"didi_mcp": "一个集成了滴滴 MCP 服务器实现,提供网约车服务包括地图搜索、价格预估、订单管理和司机跟踪。仅支持中国大陆地区。需要配置 DIDI_API_KEY 环境变量",
|
"didi_mcp": "一个集成了滴滴 MCP 服务器实现,提供网约车服务包括地图搜索、价格预估、订单管理和司机跟踪。仅支持中国大陆地区。需要配置 DIDI_API_KEY 环境变量",
|
||||||
"dify_knowledge": "Dify 的 MCP 服务器实现,提供了一个简单的 API 来与 Dify 进行交互。需要配置 Dify Key",
|
"dify_knowledge": "Dify 的 MCP 服务器实现,提供了一个简单的 API 来与 Dify 进行交互。需要配置 Dify Key",
|
||||||
"fetch": "用于获取 URL 网页内容的 MCP 服务器",
|
"fetch": "用于获取 URL 网页内容的 MCP 服务器",
|
||||||
@ -3890,6 +3940,7 @@
|
|||||||
"mcp_auto_install": "自动安装 MCP 服务(测试版)",
|
"mcp_auto_install": "自动安装 MCP 服务(测试版)",
|
||||||
"memory": "基于本地知识图谱的持久性记忆基础实现。这使得模型能够在不同对话间记住用户的相关信息。需要配置 MEMORY_FILE_PATH 环境变量。",
|
"memory": "基于本地知识图谱的持久性记忆基础实现。这使得模型能够在不同对话间记住用户的相关信息。需要配置 MEMORY_FILE_PATH 环境变量。",
|
||||||
"no": "无描述",
|
"no": "无描述",
|
||||||
|
"nowledge_mem": "需要本地运行 Nowledge Mem 应用。将 AI 对话、工具、笔记、智能体和文件保存在本地计算机的私有记忆中。请从 https://mem.nowledge.co/ 下载",
|
||||||
"python": "在安全的沙盒环境中执行 Python 代码。使用 Pyodide 运行 Python,支持大多数标准库和科学计算包",
|
"python": "在安全的沙盒环境中执行 Python 代码。使用 Pyodide 运行 Python,支持大多数标准库和科学计算包",
|
||||||
"sequentialthinking": "一个 MCP 服务器实现,提供了通过结构化思维过程进行动态和反思性问题解决的工具"
|
"sequentialthinking": "一个 MCP 服务器实现,提供了通过结构化思维过程进行动态和反思性问题解决的工具"
|
||||||
},
|
},
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -32,6 +32,7 @@
|
|||||||
},
|
},
|
||||||
"gitBash": {
|
"gitBash": {
|
||||||
"autoDetected": "Automatisch ermitteltes Git Bash wird verwendet",
|
"autoDetected": "Automatisch ermitteltes Git Bash wird verwendet",
|
||||||
|
"autoDiscoveredHint": "[to be translated]:Auto-discovered",
|
||||||
"clear": {
|
"clear": {
|
||||||
"button": "Benutzerdefinierten Pfad löschen"
|
"button": "Benutzerdefinierten Pfad löschen"
|
||||||
},
|
},
|
||||||
@ -39,6 +40,7 @@
|
|||||||
"error": {
|
"error": {
|
||||||
"description": "Git Bash ist erforderlich, um Agents unter Windows auszuführen. Der Agent kann ohne es nicht funktionieren. Bitte installieren Sie Git für Windows von",
|
"description": "Git Bash ist erforderlich, um Agents unter Windows auszuführen. Der Agent kann ohne es nicht funktionieren. Bitte installieren Sie Git für Windows von",
|
||||||
"recheck": "Überprüfe die Git Bash-Installation erneut",
|
"recheck": "Überprüfe die Git Bash-Installation erneut",
|
||||||
|
"required": "[to be translated]:Git Bash path is required on Windows",
|
||||||
"title": "Git Bash erforderlich"
|
"title": "Git Bash erforderlich"
|
||||||
},
|
},
|
||||||
"found": {
|
"found": {
|
||||||
@ -51,7 +53,9 @@
|
|||||||
"invalidPath": "Die ausgewählte Datei ist keine gültige Git Bash ausführbare Datei (bash.exe).",
|
"invalidPath": "Die ausgewählte Datei ist keine gültige Git Bash ausführbare Datei (bash.exe).",
|
||||||
"title": "Git Bash ausführbare Datei auswählen"
|
"title": "Git Bash ausführbare Datei auswählen"
|
||||||
},
|
},
|
||||||
"success": "Git Bash erfolgreich erkannt!"
|
"placeholder": "[to be translated]:Select bash.exe path",
|
||||||
|
"success": "Git Bash erfolgreich erkannt!",
|
||||||
|
"tooltip": "[to be translated]:Git Bash is required to run agents on Windows. Install from git-scm.com if not available."
|
||||||
},
|
},
|
||||||
"input": {
|
"input": {
|
||||||
"placeholder": "Gib hier deine Nachricht ein, senden mit {{key}} – @ Pfad auswählen, / Befehl auswählen"
|
"placeholder": "Gib hier deine Nachricht ein, senden mit {{key}} – @ Pfad auswählen, / Befehl auswählen"
|
||||||
@ -472,6 +476,7 @@
|
|||||||
"button": "Importieren",
|
"button": "Importieren",
|
||||||
"error": {
|
"error": {
|
||||||
"fetch_failed": "Daten von URL abrufen fehlgeschlagen",
|
"fetch_failed": "Daten von URL abrufen fehlgeschlagen",
|
||||||
|
"file_required": "Bitte wählen Sie zuerst eine Datei aus",
|
||||||
"invalid_format": "Ungültiges Assistentenformat: Pflichtfelder fehlen",
|
"invalid_format": "Ungültiges Assistentenformat: Pflichtfelder fehlen",
|
||||||
"url_required": "Bitte geben Sie eine URL ein"
|
"url_required": "Bitte geben Sie eine URL ein"
|
||||||
},
|
},
|
||||||
@ -489,8 +494,11 @@
|
|||||||
"button": "Stapel löschen",
|
"button": "Stapel löschen",
|
||||||
"confirm": "Sind Sie sicher, dass Sie die ausgewählten {{count}} Assistenten löschen möchten?"
|
"confirm": "Sind Sie sicher, dass Sie die ausgewählten {{count}} Assistenten löschen möchten?"
|
||||||
},
|
},
|
||||||
|
"batch_export": {
|
||||||
|
"button": "Exportieren"
|
||||||
|
},
|
||||||
"mode": {
|
"mode": {
|
||||||
"delete": "Löschen",
|
"manage": "Verwalten",
|
||||||
"sort": "Sortieren"
|
"sort": "Sortieren"
|
||||||
},
|
},
|
||||||
"title": "Assistenten verwalten"
|
"title": "Assistenten verwalten"
|
||||||
@ -540,13 +548,23 @@
|
|||||||
"more": "Assistenteneinstellungen",
|
"more": "Assistenteneinstellungen",
|
||||||
"prompt": "Prompt-Einstellungen",
|
"prompt": "Prompt-Einstellungen",
|
||||||
"reasoning_effort": {
|
"reasoning_effort": {
|
||||||
|
"auto": "Auto",
|
||||||
|
"auto_description": "Denkaufwand flexibel bestimmen",
|
||||||
"default": "Standard",
|
"default": "Standard",
|
||||||
|
"default_description": "Vom Standardverhalten des Modells abhängen, ohne Konfiguration.",
|
||||||
"high": "Tiefes Nachdenken",
|
"high": "Tiefes Nachdenken",
|
||||||
|
"high_description": "Ganzheitliches Denken",
|
||||||
"label": "Gedankenkettenlänge",
|
"label": "Gedankenkettenlänge",
|
||||||
"low": "Spontan",
|
"low": "Spontan",
|
||||||
|
"low_description": "Geringfügige Argumentation",
|
||||||
"medium": "Überlegt",
|
"medium": "Überlegt",
|
||||||
|
"medium_description": "Denken auf mittlerem Niveau",
|
||||||
"minimal": "Minimal",
|
"minimal": "Minimal",
|
||||||
"off": "Aus"
|
"minimal_description": "Minimales Denken",
|
||||||
|
"off": "Aus",
|
||||||
|
"off_description": "Denken deaktivieren",
|
||||||
|
"xhigh": "Extra hoch",
|
||||||
|
"xhigh_description": "Extra hohes Denkvermögen"
|
||||||
},
|
},
|
||||||
"regular_phrases": {
|
"regular_phrases": {
|
||||||
"add": "Phrase hinzufügen",
|
"add": "Phrase hinzufügen",
|
||||||
@ -1026,6 +1044,29 @@
|
|||||||
"yuque": "Nach Yuque exportieren"
|
"yuque": "Nach Yuque exportieren"
|
||||||
},
|
},
|
||||||
"list": "Themenliste",
|
"list": "Themenliste",
|
||||||
|
"manage": {
|
||||||
|
"clear_selection": "Auswahl aufheben",
|
||||||
|
"delete": {
|
||||||
|
"confirm": {
|
||||||
|
"content": "Sind Sie sicher, dass Sie {{count}} ausgewähltes Thema bzw. ausgewählte Themen löschen möchten? Diese Aktion kann nicht rückgängig gemacht werden.",
|
||||||
|
"title": "Themen löschen"
|
||||||
|
},
|
||||||
|
"success": "{{count}} Thema/Themen gelöscht"
|
||||||
|
},
|
||||||
|
"deselect_all": "Alle abwählen",
|
||||||
|
"error": {
|
||||||
|
"at_least_one": "Mindestens ein Thema muss beibehalten werden"
|
||||||
|
},
|
||||||
|
"move": {
|
||||||
|
"button": "Bewegen",
|
||||||
|
"placeholder": "Ziel auswählen",
|
||||||
|
"success": "{{count}} Thema(s) verschoben"
|
||||||
|
},
|
||||||
|
"pinned": "Angepinnte Themen",
|
||||||
|
"selected_count": "{{count}} ausgewählt",
|
||||||
|
"title": "Themen verwalten",
|
||||||
|
"unpinned": "Losgelöste Themen"
|
||||||
|
},
|
||||||
"move_to": "Verschieben nach",
|
"move_to": "Verschieben nach",
|
||||||
"new": "Neues Gespräch starten",
|
"new": "Neues Gespräch starten",
|
||||||
"pin": "Thema anheften",
|
"pin": "Thema anheften",
|
||||||
@ -1036,6 +1077,10 @@
|
|||||||
"label": "Themen-Prompt",
|
"label": "Themen-Prompt",
|
||||||
"tips": "Themen-Prompt: Bietet zusätzliche ergänzende Prompts für das aktuelle Thema"
|
"tips": "Themen-Prompt: Bietet zusätzliche ergänzende Prompts für das aktuelle Thema"
|
||||||
},
|
},
|
||||||
|
"search": {
|
||||||
|
"placeholder": "Themen durchsuchen...",
|
||||||
|
"title": "Suche"
|
||||||
|
},
|
||||||
"title": "Thema",
|
"title": "Thema",
|
||||||
"unpin": "Anheften aufheben"
|
"unpin": "Anheften aufheben"
|
||||||
},
|
},
|
||||||
@ -1220,11 +1265,13 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"stop": "Stoppen",
|
"stop": "Stoppen",
|
||||||
|
"subscribe": "Abonnieren",
|
||||||
"success": "Erfolgreich",
|
"success": "Erfolgreich",
|
||||||
"swap": "Tauschen",
|
"swap": "Tauschen",
|
||||||
"topics": "Themen",
|
"topics": "Themen",
|
||||||
"unknown": "Unbekannt",
|
"unknown": "Unbekannt",
|
||||||
"unnamed": "Unbenannt",
|
"unnamed": "Unbenannt",
|
||||||
|
"unsubscribe": "Abmelden",
|
||||||
"update_success": "Erfolgreich aktualisiert",
|
"update_success": "Erfolgreich aktualisiert",
|
||||||
"upload_files": "Dateien hochladen",
|
"upload_files": "Dateien hochladen",
|
||||||
"warning": "Warnung",
|
"warning": "Warnung",
|
||||||
@ -2151,6 +2198,7 @@
|
|||||||
"collapse": "Einklappen",
|
"collapse": "Einklappen",
|
||||||
"content_placeholder": "Bitte Notizinhalt eingeben...",
|
"content_placeholder": "Bitte Notizinhalt eingeben...",
|
||||||
"copyContent": "Inhalt kopieren",
|
"copyContent": "Inhalt kopieren",
|
||||||
|
"crossPlatformRestoreWarning": "[to be translated]:Cross-platform configuration restored, but notes directory is empty. Please copy your note files to: {{path}}",
|
||||||
"delete": "Löschen",
|
"delete": "Löschen",
|
||||||
"delete_confirm": "Möchten Sie diesen {{type}} wirklich löschen?",
|
"delete_confirm": "Möchten Sie diesen {{type}} wirklich löschen?",
|
||||||
"delete_folder_confirm": "Möchten Sie Ordner \"{{name}}\" und alle seine Inhalte wirklich löschen?",
|
"delete_folder_confirm": "Möchten Sie Ordner \"{{name}}\" und alle seine Inhalte wirklich löschen?",
|
||||||
@ -2595,6 +2643,7 @@
|
|||||||
"lanyun": "Lanyun Technologie",
|
"lanyun": "Lanyun Technologie",
|
||||||
"lmstudio": "LM Studio",
|
"lmstudio": "LM Studio",
|
||||||
"longcat": "Meißner Riesenhamster",
|
"longcat": "Meißner Riesenhamster",
|
||||||
|
"mimo": "[to be translated]:Xiaomi MiMo",
|
||||||
"minimax": "MiniMax",
|
"minimax": "MiniMax",
|
||||||
"mistral": "Mistral",
|
"mistral": "Mistral",
|
||||||
"modelscope": "ModelScope",
|
"modelscope": "ModelScope",
|
||||||
@ -3883,6 +3932,7 @@
|
|||||||
"builtinServers": "Integrierter Server",
|
"builtinServers": "Integrierter Server",
|
||||||
"builtinServersDescriptions": {
|
"builtinServersDescriptions": {
|
||||||
"brave_search": "MCP-Server-Implementierung mit Brave-Search-API, die sowohl Web- als auch lokale Suchfunktionen bietet. BRAVE_API_KEY-Umgebungsvariable muss konfiguriert werden",
|
"brave_search": "MCP-Server-Implementierung mit Brave-Search-API, die sowohl Web- als auch lokale Suchfunktionen bietet. BRAVE_API_KEY-Umgebungsvariable muss konfiguriert werden",
|
||||||
|
"browser": "Steuert ein headless Electron-Fenster über das Chrome DevTools Protocol. Tools: URL öffnen, einzeiligen JS ausführen, Sitzung zurücksetzen.",
|
||||||
"didi_mcp": "An integrated Didi MCP server implementation that provides ride-hailing services including map search, price estimation, order management, and driver tracking. Only available in mainland China. Requires the DIDI_API_KEY environment variable to be configured.",
|
"didi_mcp": "An integrated Didi MCP server implementation that provides ride-hailing services including map search, price estimation, order management, and driver tracking. Only available in mainland China. Requires the DIDI_API_KEY environment variable to be configured.",
|
||||||
"dify_knowledge": "MCP-Server-Implementierung von Dify, die einen einfachen API-Zugriff auf Dify bietet. Dify Key muss konfiguriert werden",
|
"dify_knowledge": "MCP-Server-Implementierung von Dify, die einen einfachen API-Zugriff auf Dify bietet. Dify Key muss konfiguriert werden",
|
||||||
"fetch": "MCP-Server zum Abrufen von Webseiteninhalten",
|
"fetch": "MCP-Server zum Abrufen von Webseiteninhalten",
|
||||||
@ -3890,6 +3940,7 @@
|
|||||||
"mcp_auto_install": "MCP-Service automatisch installieren (Beta-Version)",
|
"mcp_auto_install": "MCP-Service automatisch installieren (Beta-Version)",
|
||||||
"memory": "MCP-Server mit persistenter Erinnerungsbasis auf lokalem Wissensgraphen, der Informationen über verschiedene Dialoge hinweg speichert. MEMORY_FILE_PATH-Umgebungsvariable muss konfiguriert werden",
|
"memory": "MCP-Server mit persistenter Erinnerungsbasis auf lokalem Wissensgraphen, der Informationen über verschiedene Dialoge hinweg speichert. MEMORY_FILE_PATH-Umgebungsvariable muss konfiguriert werden",
|
||||||
"no": "Keine Beschreibung",
|
"no": "Keine Beschreibung",
|
||||||
|
"nowledge_mem": "Erfordert lokal laufende Nowledge Mem App. Speichert KI-Chats, Tools, Notizen, Agenten und Dateien in einem privaten Speicher auf Ihrem Computer. Download unter https://mem.nowledge.co/",
|
||||||
"python": "Python-Code in einem sicheren Sandbox-Umgebung ausführen. Verwendung von Pyodide für Python, Unterstützung für die meisten Standardbibliotheken und wissenschaftliche Pakete",
|
"python": "Python-Code in einem sicheren Sandbox-Umgebung ausführen. Verwendung von Pyodide für Python, Unterstützung für die meisten Standardbibliotheken und wissenschaftliche Pakete",
|
||||||
"sequentialthinking": "MCP-Server-Implementierung mit strukturiertem Denkprozess, der dynamische und reflektierende Problemlösungen ermöglicht"
|
"sequentialthinking": "MCP-Server-Implementierung mit strukturiertem Denkprozess, der dynamische und reflektierende Problemlösungen ermöglicht"
|
||||||
},
|
},
|
||||||
|
|||||||
@ -31,27 +31,31 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"gitBash": {
|
"gitBash": {
|
||||||
"autoDetected": "[to be translated]:Using auto-detected Git Bash",
|
"autoDetected": "Χρησιμοποιείται αυτόματα εντοπισμένο Git Bash",
|
||||||
|
"autoDiscoveredHint": "[to be translated]:Auto-discovered",
|
||||||
"clear": {
|
"clear": {
|
||||||
"button": "[to be translated]:Clear custom path"
|
"button": "Διαγραφή προσαρμοσμένης διαδρομής"
|
||||||
},
|
},
|
||||||
"customPath": "[to be translated]:Using custom path: {{path}}",
|
"customPath": "Χρησιμοποιείται προσαρμοσμένη διαδρομή: {{path}}",
|
||||||
"error": {
|
"error": {
|
||||||
"description": "Το Git Bash απαιτείται για την εκτέλεση πρακτόρων στα Windows. Ο πράκτορας δεν μπορεί να λειτουργήσει χωρίς αυτό. Παρακαλούμε εγκαταστήστε το Git για Windows από",
|
"description": "Το Git Bash απαιτείται για την εκτέλεση πρακτόρων στα Windows. Ο πράκτορας δεν μπορεί να λειτουργήσει χωρίς αυτό. Παρακαλούμε εγκαταστήστε το Git για Windows από",
|
||||||
"recheck": "Επανέλεγχος Εγκατάστασης του Git Bash",
|
"recheck": "Επανέλεγχος Εγκατάστασης του Git Bash",
|
||||||
|
"required": "[to be translated]:Git Bash path is required on Windows",
|
||||||
"title": "Απαιτείται Git Bash"
|
"title": "Απαιτείται Git Bash"
|
||||||
},
|
},
|
||||||
"found": {
|
"found": {
|
||||||
"title": "[to be translated]:Git Bash configured"
|
"title": "Το Git Bash διαμορφώθηκε"
|
||||||
},
|
},
|
||||||
"notFound": "Το Git Bash δεν βρέθηκε. Παρακαλώ εγκαταστήστε το πρώτα.",
|
"notFound": "Το Git Bash δεν βρέθηκε. Παρακαλώ εγκαταστήστε το πρώτα.",
|
||||||
"pick": {
|
"pick": {
|
||||||
"button": "[to be translated]:Select Git Bash Path",
|
"button": "Επιλογή διαδρομής Git Bash",
|
||||||
"failed": "[to be translated]:Failed to set Git Bash path",
|
"failed": "Αποτυχία ορισμού διαδρομής Git Bash",
|
||||||
"invalidPath": "[to be translated]:Selected file is not a valid Git Bash executable (bash.exe).",
|
"invalidPath": "Το επιλεγμένο αρχείο δεν είναι έγκυρο εκτελέσιμο Git Bash (bash.exe).",
|
||||||
"title": "[to be translated]:Select Git Bash executable"
|
"title": "Επιλογή εκτελέσιμου Git Bash"
|
||||||
},
|
},
|
||||||
"success": "Το Git Bash εντοπίστηκε με επιτυχία!"
|
"placeholder": "[to be translated]:Select bash.exe path",
|
||||||
|
"success": "Το Git Bash εντοπίστηκε με επιτυχία!",
|
||||||
|
"tooltip": "[to be translated]:Git Bash is required to run agents on Windows. Install from git-scm.com if not available."
|
||||||
},
|
},
|
||||||
"input": {
|
"input": {
|
||||||
"placeholder": "Εισάγετε το μήνυμά σας εδώ, στείλτε με {{key}} - @ επιλέξτε διαδρομή, / επιλέξτε εντολή"
|
"placeholder": "Εισάγετε το μήνυμά σας εδώ, στείλτε με {{key}} - @ επιλέξτε διαδρομή, / επιλέξτε εντολή"
|
||||||
@ -472,6 +476,7 @@
|
|||||||
"button": "Εισαγωγή",
|
"button": "Εισαγωγή",
|
||||||
"error": {
|
"error": {
|
||||||
"fetch_failed": "Αποτυχία λήψης δεδομένων από το URL",
|
"fetch_failed": "Αποτυχία λήψης δεδομένων από το URL",
|
||||||
|
"file_required": "Παρακαλώ επιλέξτε πρώτα ένα αρχείο",
|
||||||
"invalid_format": "Μη έγκυρη μορφή βοηθού: λείπουν υποχρεωτικά πεδία",
|
"invalid_format": "Μη έγκυρη μορφή βοηθού: λείπουν υποχρεωτικά πεδία",
|
||||||
"url_required": "Παρακαλώ εισάγετε ένα URL"
|
"url_required": "Παρακαλώ εισάγετε ένα URL"
|
||||||
},
|
},
|
||||||
@ -489,8 +494,11 @@
|
|||||||
"button": "Μαζική Διαγραφή",
|
"button": "Μαζική Διαγραφή",
|
||||||
"confirm": "Είστε βέβαιοι ότι θέλετε να διαγράψετε τους επιλεγμένους {{count}} βοηθούς;"
|
"confirm": "Είστε βέβαιοι ότι θέλετε να διαγράψετε τους επιλεγμένους {{count}} βοηθούς;"
|
||||||
},
|
},
|
||||||
|
"batch_export": {
|
||||||
|
"button": "Εξαγωγή"
|
||||||
|
},
|
||||||
"mode": {
|
"mode": {
|
||||||
"delete": "Διαγραφή",
|
"manage": "Διαχειριστείτε",
|
||||||
"sort": "Ταξινόμηση"
|
"sort": "Ταξινόμηση"
|
||||||
},
|
},
|
||||||
"title": "Διαχείριση βοηθών"
|
"title": "Διαχείριση βοηθών"
|
||||||
@ -540,13 +548,23 @@
|
|||||||
"more": "Ρυθμίσεις Βοηθού",
|
"more": "Ρυθμίσεις Βοηθού",
|
||||||
"prompt": "Ρυθμίσεις προκαλύμματος",
|
"prompt": "Ρυθμίσεις προκαλύμματος",
|
||||||
"reasoning_effort": {
|
"reasoning_effort": {
|
||||||
|
"auto": "Αυτοκίνητο",
|
||||||
|
"auto_description": "Ευέλικτος καθορισμός της προσπάθειας συλλογισμού",
|
||||||
"default": "Προεπιλογή",
|
"default": "Προεπιλογή",
|
||||||
|
"default_description": "Εξαρτηθείτε από την προεπιλεγμένη συμπεριφορά του μοντέλου, χωρίς καμία διαμόρφωση.",
|
||||||
"high": "Μεγάλο",
|
"high": "Μεγάλο",
|
||||||
|
"high_description": "Υψηλού επιπέδου συλλογισμός",
|
||||||
"label": "Μήκος λογισμικού αλυσίδας",
|
"label": "Μήκος λογισμικού αλυσίδας",
|
||||||
"low": "Μικρό",
|
"low": "Μικρό",
|
||||||
|
"low_description": "Χαμηλού επιπέδου συλλογιστική",
|
||||||
"medium": "Μεσαίο",
|
"medium": "Μεσαίο",
|
||||||
|
"medium_description": "Αιτιολόγηση μεσαίου επιπέδου",
|
||||||
"minimal": "ελάχιστος",
|
"minimal": "ελάχιστος",
|
||||||
"off": "Απενεργοποίηση"
|
"minimal_description": "Ελάχιστος συλλογισμός",
|
||||||
|
"off": "Απενεργοποίηση",
|
||||||
|
"off_description": "Απενεργοποίηση λογικής",
|
||||||
|
"xhigh": "Εξαιρετικά Υψηλή",
|
||||||
|
"xhigh_description": "Εξαιρετικά υψηλού επιπέδου συλλογισμός"
|
||||||
},
|
},
|
||||||
"regular_phrases": {
|
"regular_phrases": {
|
||||||
"add": "Προσθήκη φράσης",
|
"add": "Προσθήκη φράσης",
|
||||||
@ -1026,6 +1044,29 @@
|
|||||||
"yuque": "Εξαγωγή στο Yuque"
|
"yuque": "Εξαγωγή στο Yuque"
|
||||||
},
|
},
|
||||||
"list": "Λίστα θεμάτων",
|
"list": "Λίστα θεμάτων",
|
||||||
|
"manage": {
|
||||||
|
"clear_selection": "Καθαρισμός Επιλογής",
|
||||||
|
"delete": {
|
||||||
|
"confirm": {
|
||||||
|
"content": "Είσαι βέβαιος ότι θέλεις να διαγράψεις {{count}} επιλεγμένο(α) θέμα(τα); Αυτή η ενέργεια δεν μπορεί να αναιρεθεί.",
|
||||||
|
"title": "Διαγραφή Θεμάτων"
|
||||||
|
},
|
||||||
|
"success": "Διαγράφηκαν {{count}} θέμα(τα)"
|
||||||
|
},
|
||||||
|
"deselect_all": "Αποεπιλογή όλων",
|
||||||
|
"error": {
|
||||||
|
"at_least_one": "Τουλάχιστον ένα θέμα πρέπει να διατηρηθεί"
|
||||||
|
},
|
||||||
|
"move": {
|
||||||
|
"button": "Μετακίνηση",
|
||||||
|
"placeholder": "Επιλέξτε στόχο",
|
||||||
|
"success": "Μετακινήθηκαν {{count}} θέματα"
|
||||||
|
},
|
||||||
|
"pinned": "Καρφιτσωμένα Θέματα",
|
||||||
|
"selected_count": "{{count}} επιλεγμένα",
|
||||||
|
"title": "Διαχείριση Θεμάτων",
|
||||||
|
"unpinned": "Ξεκαρφωμένα Θέματα"
|
||||||
|
},
|
||||||
"move_to": "Μετακίνηση στο",
|
"move_to": "Μετακίνηση στο",
|
||||||
"new": "Ξεκινήστε νέα συζήτηση",
|
"new": "Ξεκινήστε νέα συζήτηση",
|
||||||
"pin": "Σταθερά θέματα",
|
"pin": "Σταθερά θέματα",
|
||||||
@ -1036,6 +1077,10 @@
|
|||||||
"label": "Προσδοκώμενα όρια",
|
"label": "Προσδοκώμενα όρια",
|
||||||
"tips": "Προσδοκώμενα όρια: προσθέτει επιπλέον επιστημονικές προσθήκες για το παρόν θέμα"
|
"tips": "Προσδοκώμενα όρια: προσθέτει επιπλέον επιστημονικές προσθήκες για το παρόν θέμα"
|
||||||
},
|
},
|
||||||
|
"search": {
|
||||||
|
"placeholder": "Αναζήτηση θεμάτων...",
|
||||||
|
"title": "Αναζήτηση"
|
||||||
|
},
|
||||||
"title": "Θέματα",
|
"title": "Θέματα",
|
||||||
"unpin": "Ξεκαρφίτσωμα"
|
"unpin": "Ξεκαρφίτσωμα"
|
||||||
},
|
},
|
||||||
@ -1220,11 +1265,13 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"stop": "σταματήστε",
|
"stop": "σταματήστε",
|
||||||
|
"subscribe": "Εγγραφείτε",
|
||||||
"success": "Επιτυχία",
|
"success": "Επιτυχία",
|
||||||
"swap": "Εναλλαγή",
|
"swap": "Εναλλαγή",
|
||||||
"topics": "Θέματα",
|
"topics": "Θέματα",
|
||||||
"unknown": "Άγνωστο",
|
"unknown": "Άγνωστο",
|
||||||
"unnamed": "Χωρίς όνομα",
|
"unnamed": "Χωρίς όνομα",
|
||||||
|
"unsubscribe": "Απεγγραφή",
|
||||||
"update_success": "Επιτυχής ενημέρωση",
|
"update_success": "Επιτυχής ενημέρωση",
|
||||||
"upload_files": "Ανέβασμα αρχείου",
|
"upload_files": "Ανέβασμα αρχείου",
|
||||||
"warning": "Προσοχή",
|
"warning": "Προσοχή",
|
||||||
@ -2151,6 +2198,7 @@
|
|||||||
"collapse": "σύμπτυξη",
|
"collapse": "σύμπτυξη",
|
||||||
"content_placeholder": "Παρακαλώ εισαγάγετε το περιεχόμενο των σημειώσεων...",
|
"content_placeholder": "Παρακαλώ εισαγάγετε το περιεχόμενο των σημειώσεων...",
|
||||||
"copyContent": "αντιγραφή περιεχομένου",
|
"copyContent": "αντιγραφή περιεχομένου",
|
||||||
|
"crossPlatformRestoreWarning": "[to be translated]:Cross-platform configuration restored, but notes directory is empty. Please copy your note files to: {{path}}",
|
||||||
"delete": "διαγραφή",
|
"delete": "διαγραφή",
|
||||||
"delete_confirm": "Είστε βέβαιοι ότι θέλετε να διαγράψετε αυτό το {{type}};",
|
"delete_confirm": "Είστε βέβαιοι ότι θέλετε να διαγράψετε αυτό το {{type}};",
|
||||||
"delete_folder_confirm": "Θέλετε να διαγράψετε τον φάκελο «{{name}}» και όλο το περιεχόμενό του;",
|
"delete_folder_confirm": "Θέλετε να διαγράψετε τον φάκελο «{{name}}» και όλο το περιεχόμενό του;",
|
||||||
@ -2595,6 +2643,7 @@
|
|||||||
"lanyun": "Λανιούν Τεχνολογία",
|
"lanyun": "Λανιούν Τεχνολογία",
|
||||||
"lmstudio": "LM Studio",
|
"lmstudio": "LM Studio",
|
||||||
"longcat": "Τσίρο",
|
"longcat": "Τσίρο",
|
||||||
|
"mimo": "[to be translated]:Xiaomi MiMo",
|
||||||
"minimax": "MiniMax",
|
"minimax": "MiniMax",
|
||||||
"mistral": "Mistral",
|
"mistral": "Mistral",
|
||||||
"modelscope": "ModelScope Magpie",
|
"modelscope": "ModelScope Magpie",
|
||||||
@ -3883,6 +3932,7 @@
|
|||||||
"builtinServers": "Ενσωματωμένοι Διακομιστές",
|
"builtinServers": "Ενσωματωμένοι Διακομιστές",
|
||||||
"builtinServersDescriptions": {
|
"builtinServersDescriptions": {
|
||||||
"brave_search": "μια εφαρμογή διακομιστή MCP που ενσωματώνει το Brave Search API, παρέχοντας δυνατότητες αναζήτησης στον ιστό και τοπικής αναζήτησης. Απαιτείται η ρύθμιση της μεταβλητής περιβάλλοντος BRAVE_API_KEY",
|
"brave_search": "μια εφαρμογή διακομιστή MCP που ενσωματώνει το Brave Search API, παρέχοντας δυνατότητες αναζήτησης στον ιστό και τοπικής αναζήτησης. Απαιτείται η ρύθμιση της μεταβλητής περιβάλλοντος BRAVE_API_KEY",
|
||||||
|
"browser": "Ελέγχει ένα headless παράθυρο Electron μέσω του Chrome DevTools Protocol. Εργαλεία: άνοιγμα URL, εκτέλεση JS μίας γραμμής, επαναφορά συνεδρίας.",
|
||||||
"didi_mcp": "Διακομιστής DiDi MCP που παρέχει υπηρεσίες μεταφοράς συμπεριλαμβανομένης της αναζήτησης χαρτών, εκτίμησης τιμών, διαχείρισης παραγγελιών και παρακολούθησης οδηγών. Διαθέσιμο μόνο στην ηπειρωτική Κίνα. Απαιτεί διαμόρφωση της μεταβλητής περιβάλλοντος DIDI_API_KEY",
|
"didi_mcp": "Διακομιστής DiDi MCP που παρέχει υπηρεσίες μεταφοράς συμπεριλαμβανομένης της αναζήτησης χαρτών, εκτίμησης τιμών, διαχείρισης παραγγελιών και παρακολούθησης οδηγών. Διαθέσιμο μόνο στην ηπειρωτική Κίνα. Απαιτεί διαμόρφωση της μεταβλητής περιβάλλοντος DIDI_API_KEY",
|
||||||
"dify_knowledge": "Η υλοποίηση του Dify για τον διακομιστή MCP, παρέχει μια απλή API για να αλληλεπιδρά με το Dify. Απαιτείται η ρύθμιση του κλειδιού Dify",
|
"dify_knowledge": "Η υλοποίηση του Dify για τον διακομιστή MCP, παρέχει μια απλή API για να αλληλεπιδρά με το Dify. Απαιτείται η ρύθμιση του κλειδιού Dify",
|
||||||
"fetch": "Εξυπηρετητής MCP για λήψη περιεχομένου ιστοσελίδας URL",
|
"fetch": "Εξυπηρετητής MCP για λήψη περιεχομένου ιστοσελίδας URL",
|
||||||
@ -3890,6 +3940,7 @@
|
|||||||
"mcp_auto_install": "Αυτόματη εγκατάσταση υπηρεσίας MCP (προβολή)",
|
"mcp_auto_install": "Αυτόματη εγκατάσταση υπηρεσίας MCP (προβολή)",
|
||||||
"memory": "Βασική υλοποίηση μόνιμης μνήμης με βάση τοπικό γράφημα γνώσης. Αυτό επιτρέπει στο μοντέλο να θυμάται πληροφορίες σχετικές με τον χρήστη ανάμεσα σε διαφορετικές συνομιλίες. Απαιτείται η ρύθμιση της μεταβλητής περιβάλλοντος MEMORY_FILE_PATH.",
|
"memory": "Βασική υλοποίηση μόνιμης μνήμης με βάση τοπικό γράφημα γνώσης. Αυτό επιτρέπει στο μοντέλο να θυμάται πληροφορίες σχετικές με τον χρήστη ανάμεσα σε διαφορετικές συνομιλίες. Απαιτείται η ρύθμιση της μεταβλητής περιβάλλοντος MEMORY_FILE_PATH.",
|
||||||
"no": "Χωρίς περιγραφή",
|
"no": "Χωρίς περιγραφή",
|
||||||
|
"nowledge_mem": "[to be translated]:Requires Nowledge Mem app running locally. Keeps AI chats, tools, notes, agents, and files in private memory on your computer. Download from https://mem.nowledge.co/",
|
||||||
"python": "Εκτελέστε κώδικα Python σε ένα ασφαλές περιβάλλον sandbox. Χρησιμοποιήστε το Pyodide για να εκτελέσετε Python, υποστηρίζοντας την πλειονότητα των βιβλιοθηκών της τυπικής βιβλιοθήκης και των πακέτων επιστημονικού υπολογισμού",
|
"python": "Εκτελέστε κώδικα Python σε ένα ασφαλές περιβάλλον sandbox. Χρησιμοποιήστε το Pyodide για να εκτελέσετε Python, υποστηρίζοντας την πλειονότητα των βιβλιοθηκών της τυπικής βιβλιοθήκης και των πακέτων επιστημονικού υπολογισμού",
|
||||||
"sequentialthinking": "ένας εξυπηρετητής MCP που υλοποιείται, παρέχοντας εργαλεία για δυναμική και αναστοχαστική επίλυση προβλημάτων μέσω δομημένων διαδικασιών σκέψης"
|
"sequentialthinking": "ένας εξυπηρετητής MCP που υλοποιείται, παρέχοντας εργαλεία για δυναμική και αναστοχαστική επίλυση προβλημάτων μέσω δομημένων διαδικασιών σκέψης"
|
||||||
},
|
},
|
||||||
|
|||||||
@ -32,6 +32,7 @@
|
|||||||
},
|
},
|
||||||
"gitBash": {
|
"gitBash": {
|
||||||
"autoDetected": "Usando Git Bash detectado automáticamente",
|
"autoDetected": "Usando Git Bash detectado automáticamente",
|
||||||
|
"autoDiscoveredHint": "[to be translated]:Auto-discovered",
|
||||||
"clear": {
|
"clear": {
|
||||||
"button": "Borrar ruta personalizada"
|
"button": "Borrar ruta personalizada"
|
||||||
},
|
},
|
||||||
@ -39,6 +40,7 @@
|
|||||||
"error": {
|
"error": {
|
||||||
"description": "Se requiere Git Bash para ejecutar agentes en Windows. El agente no puede funcionar sin él. Instale Git para Windows desde",
|
"description": "Se requiere Git Bash para ejecutar agentes en Windows. El agente no puede funcionar sin él. Instale Git para Windows desde",
|
||||||
"recheck": "Volver a verificar la instalación de Git Bash",
|
"recheck": "Volver a verificar la instalación de Git Bash",
|
||||||
|
"required": "[to be translated]:Git Bash path is required on Windows",
|
||||||
"title": "Git Bash Requerido"
|
"title": "Git Bash Requerido"
|
||||||
},
|
},
|
||||||
"found": {
|
"found": {
|
||||||
@ -51,7 +53,9 @@
|
|||||||
"invalidPath": "El archivo seleccionado no es un ejecutable válido de Git Bash (bash.exe).",
|
"invalidPath": "El archivo seleccionado no es un ejecutable válido de Git Bash (bash.exe).",
|
||||||
"title": "Seleccionar ejecutable de Git Bash"
|
"title": "Seleccionar ejecutable de Git Bash"
|
||||||
},
|
},
|
||||||
"success": "¡Git Bash detectado con éxito!"
|
"placeholder": "[to be translated]:Select bash.exe path",
|
||||||
|
"success": "¡Git Bash detectado con éxito!",
|
||||||
|
"tooltip": "[to be translated]:Git Bash is required to run agents on Windows. Install from git-scm.com if not available."
|
||||||
},
|
},
|
||||||
"input": {
|
"input": {
|
||||||
"placeholder": "Introduce tu mensaje aquí, envía con {{key}} - @ seleccionar ruta, / seleccionar comando"
|
"placeholder": "Introduce tu mensaje aquí, envía con {{key}} - @ seleccionar ruta, / seleccionar comando"
|
||||||
@ -472,6 +476,7 @@
|
|||||||
"button": "Importar",
|
"button": "Importar",
|
||||||
"error": {
|
"error": {
|
||||||
"fetch_failed": "Error al obtener datos desde la URL",
|
"fetch_failed": "Error al obtener datos desde la URL",
|
||||||
|
"file_required": "Por favor, selecciona primero un archivo",
|
||||||
"invalid_format": "Formato de asistente inválido: faltan campos obligatorios",
|
"invalid_format": "Formato de asistente inválido: faltan campos obligatorios",
|
||||||
"url_required": "Por favor introduce una URL"
|
"url_required": "Por favor introduce una URL"
|
||||||
},
|
},
|
||||||
@ -489,8 +494,11 @@
|
|||||||
"button": "Eliminación por lotes",
|
"button": "Eliminación por lotes",
|
||||||
"confirm": "¿Estás seguro de que quieres eliminar los {{count}} asistentes seleccionados?"
|
"confirm": "¿Estás seguro de que quieres eliminar los {{count}} asistentes seleccionados?"
|
||||||
},
|
},
|
||||||
|
"batch_export": {
|
||||||
|
"button": "Exportar"
|
||||||
|
},
|
||||||
"mode": {
|
"mode": {
|
||||||
"delete": "Eliminar",
|
"manage": "Gestionar",
|
||||||
"sort": "Ordenar"
|
"sort": "Ordenar"
|
||||||
},
|
},
|
||||||
"title": "Gestionar asistentes"
|
"title": "Gestionar asistentes"
|
||||||
@ -540,13 +548,23 @@
|
|||||||
"more": "Configuración del Asistente",
|
"more": "Configuración del Asistente",
|
||||||
"prompt": "Configuración de Palabras Clave",
|
"prompt": "Configuración de Palabras Clave",
|
||||||
"reasoning_effort": {
|
"reasoning_effort": {
|
||||||
|
"auto": "Automóvil",
|
||||||
|
"auto_description": "Determinar flexiblemente el esfuerzo de razonamiento",
|
||||||
"default": "Por defecto",
|
"default": "Por defecto",
|
||||||
|
"default_description": "Depender del comportamiento predeterminado del modelo, sin ninguna configuración.",
|
||||||
"high": "Largo",
|
"high": "Largo",
|
||||||
|
"high_description": "Razonamiento de alto nivel",
|
||||||
"label": "Longitud de Cadena de Razonamiento",
|
"label": "Longitud de Cadena de Razonamiento",
|
||||||
"low": "Corto",
|
"low": "Corto",
|
||||||
|
"low_description": "Razonamiento de bajo nivel",
|
||||||
"medium": "Medio",
|
"medium": "Medio",
|
||||||
|
"medium_description": "Razonamiento de nivel medio",
|
||||||
"minimal": "minimal",
|
"minimal": "minimal",
|
||||||
"off": "Apagado"
|
"minimal_description": "Razonamiento mínimo",
|
||||||
|
"off": "Apagado",
|
||||||
|
"off_description": "Deshabilitar razonamiento",
|
||||||
|
"xhigh": "Extra Alta",
|
||||||
|
"xhigh_description": "Razonamiento de extra alto nivel"
|
||||||
},
|
},
|
||||||
"regular_phrases": {
|
"regular_phrases": {
|
||||||
"add": "Agregar frase",
|
"add": "Agregar frase",
|
||||||
@ -1026,6 +1044,29 @@
|
|||||||
"yuque": "Exportar a Yuque"
|
"yuque": "Exportar a Yuque"
|
||||||
},
|
},
|
||||||
"list": "Lista de temas",
|
"list": "Lista de temas",
|
||||||
|
"manage": {
|
||||||
|
"clear_selection": "Borrar selección",
|
||||||
|
"delete": {
|
||||||
|
"confirm": {
|
||||||
|
"content": "¿Estás seguro de que quieres eliminar {{count}} tema(s) seleccionado(s)? Esta acción no se puede deshacer.",
|
||||||
|
"title": "Eliminar Temas"
|
||||||
|
},
|
||||||
|
"success": "Eliminado(s) {{count}} tema(s)"
|
||||||
|
},
|
||||||
|
"deselect_all": "Deseleccionar todo",
|
||||||
|
"error": {
|
||||||
|
"at_least_one": "Al menos se debe mantener un tema."
|
||||||
|
},
|
||||||
|
"move": {
|
||||||
|
"button": "Mover",
|
||||||
|
"placeholder": "Seleccionar asistente de destino",
|
||||||
|
"success": "Movido(s) {{count}} tema(s)"
|
||||||
|
},
|
||||||
|
"pinned": "Temas Fijados",
|
||||||
|
"selected_count": "{{count}} seleccionado",
|
||||||
|
"title": "Administrar Temas",
|
||||||
|
"unpinned": "Temas no fijados"
|
||||||
|
},
|
||||||
"move_to": "Mover a",
|
"move_to": "Mover a",
|
||||||
"new": "Iniciar nueva conversación",
|
"new": "Iniciar nueva conversación",
|
||||||
"pin": "Fijar tema",
|
"pin": "Fijar tema",
|
||||||
@ -1036,6 +1077,10 @@
|
|||||||
"label": "Palabras clave del tema",
|
"label": "Palabras clave del tema",
|
||||||
"tips": "Palabras clave del tema: proporcionar indicaciones adicionales para el tema actual"
|
"tips": "Palabras clave del tema: proporcionar indicaciones adicionales para el tema actual"
|
||||||
},
|
},
|
||||||
|
"search": {
|
||||||
|
"placeholder": "Buscar temas...",
|
||||||
|
"title": "Buscar"
|
||||||
|
},
|
||||||
"title": "Tema",
|
"title": "Tema",
|
||||||
"unpin": "Quitar fijación"
|
"unpin": "Quitar fijación"
|
||||||
},
|
},
|
||||||
@ -1220,11 +1265,13 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"stop": "Detener",
|
"stop": "Detener",
|
||||||
|
"subscribe": "Suscribirse",
|
||||||
"success": "Éxito",
|
"success": "Éxito",
|
||||||
"swap": "Intercambiar",
|
"swap": "Intercambiar",
|
||||||
"topics": "Temas",
|
"topics": "Temas",
|
||||||
"unknown": "Desconocido",
|
"unknown": "Desconocido",
|
||||||
"unnamed": "Sin nombre",
|
"unnamed": "Sin nombre",
|
||||||
|
"unsubscribe": "Cancelar suscripción",
|
||||||
"update_success": "Actualización exitosa",
|
"update_success": "Actualización exitosa",
|
||||||
"upload_files": "Subir archivo",
|
"upload_files": "Subir archivo",
|
||||||
"warning": "Advertencia",
|
"warning": "Advertencia",
|
||||||
@ -2151,6 +2198,7 @@
|
|||||||
"collapse": "ocultar",
|
"collapse": "ocultar",
|
||||||
"content_placeholder": "Introduzca el contenido de la nota...",
|
"content_placeholder": "Introduzca el contenido de la nota...",
|
||||||
"copyContent": "copiar contenido",
|
"copyContent": "copiar contenido",
|
||||||
|
"crossPlatformRestoreWarning": "[to be translated]:Cross-platform configuration restored, but notes directory is empty. Please copy your note files to: {{path}}",
|
||||||
"delete": "eliminar",
|
"delete": "eliminar",
|
||||||
"delete_confirm": "¿Estás seguro de que deseas eliminar este {{type}}?",
|
"delete_confirm": "¿Estás seguro de que deseas eliminar este {{type}}?",
|
||||||
"delete_folder_confirm": "¿Está seguro de que desea eliminar la carpeta \"{{name}}\" y todo su contenido?",
|
"delete_folder_confirm": "¿Está seguro de que desea eliminar la carpeta \"{{name}}\" y todo su contenido?",
|
||||||
@ -2595,6 +2643,7 @@
|
|||||||
"lanyun": "Tecnología Lanyun",
|
"lanyun": "Tecnología Lanyun",
|
||||||
"lmstudio": "Estudio LM",
|
"lmstudio": "Estudio LM",
|
||||||
"longcat": "Totoro",
|
"longcat": "Totoro",
|
||||||
|
"mimo": "[to be translated]:Xiaomi MiMo",
|
||||||
"minimax": "Minimax",
|
"minimax": "Minimax",
|
||||||
"mistral": "Mistral",
|
"mistral": "Mistral",
|
||||||
"modelscope": "ModelScope Módulo",
|
"modelscope": "ModelScope Módulo",
|
||||||
@ -3883,6 +3932,7 @@
|
|||||||
"builtinServers": "Servidores integrados",
|
"builtinServers": "Servidores integrados",
|
||||||
"builtinServersDescriptions": {
|
"builtinServersDescriptions": {
|
||||||
"brave_search": "Una implementación de servidor MCP que integra la API de búsqueda de Brave, proporcionando funciones de búsqueda web y búsqueda local. Requiere configurar la variable de entorno BRAVE_API_KEY",
|
"brave_search": "Una implementación de servidor MCP que integra la API de búsqueda de Brave, proporcionando funciones de búsqueda web y búsqueda local. Requiere configurar la variable de entorno BRAVE_API_KEY",
|
||||||
|
"browser": "Controla una ventana Electron headless mediante Chrome DevTools Protocol. Herramientas: abrir URL, ejecutar JS de una línea, reiniciar sesión.",
|
||||||
"didi_mcp": "Servidor DiDi MCP que proporciona servicios de transporte incluyendo búsqueda de mapas, estimación de precios, gestión de pedidos y seguimiento de conductores. Disponible solo en China Continental. Requiere configurar la variable de entorno DIDI_API_KEY",
|
"didi_mcp": "Servidor DiDi MCP que proporciona servicios de transporte incluyendo búsqueda de mapas, estimación de precios, gestión de pedidos y seguimiento de conductores. Disponible solo en China Continental. Requiere configurar la variable de entorno DIDI_API_KEY",
|
||||||
"dify_knowledge": "Implementación del servidor MCP de Dify, que proporciona una API sencilla para interactuar con Dify. Se requiere configurar la clave de Dify.",
|
"dify_knowledge": "Implementación del servidor MCP de Dify, que proporciona una API sencilla para interactuar con Dify. Se requiere configurar la clave de Dify.",
|
||||||
"fetch": "Servidor MCP para obtener el contenido de la página web de una URL",
|
"fetch": "Servidor MCP para obtener el contenido de la página web de una URL",
|
||||||
@ -3890,6 +3940,7 @@
|
|||||||
"mcp_auto_install": "Instalación automática del servicio MCP (versión beta)",
|
"mcp_auto_install": "Instalación automática del servicio MCP (versión beta)",
|
||||||
"memory": "Implementación básica de memoria persistente basada en un grafo de conocimiento local. Esto permite que el modelo recuerde información relevante del usuario entre diferentes conversaciones. Es necesario configurar la variable de entorno MEMORY_FILE_PATH.",
|
"memory": "Implementación básica de memoria persistente basada en un grafo de conocimiento local. Esto permite que el modelo recuerde información relevante del usuario entre diferentes conversaciones. Es necesario configurar la variable de entorno MEMORY_FILE_PATH.",
|
||||||
"no": "sin descripción",
|
"no": "sin descripción",
|
||||||
|
"nowledge_mem": "[to be translated]:Requires Nowledge Mem app running locally. Keeps AI chats, tools, notes, agents, and files in private memory on your computer. Download from https://mem.nowledge.co/",
|
||||||
"python": "Ejecuta código Python en un entorno sandbox seguro. Usa Pyodide para ejecutar Python, compatible con la mayoría de las bibliotecas estándar y paquetes de cálculo científico.",
|
"python": "Ejecuta código Python en un entorno sandbox seguro. Usa Pyodide para ejecutar Python, compatible con la mayoría de las bibliotecas estándar y paquetes de cálculo científico.",
|
||||||
"sequentialthinking": "Una implementación de servidor MCP que proporciona herramientas para la resolución dinámica y reflexiva de problemas mediante un proceso de pensamiento estructurado"
|
"sequentialthinking": "Una implementación de servidor MCP que proporciona herramientas para la resolución dinámica y reflexiva de problemas mediante un proceso de pensamiento estructurado"
|
||||||
},
|
},
|
||||||
|
|||||||
@ -32,6 +32,7 @@
|
|||||||
},
|
},
|
||||||
"gitBash": {
|
"gitBash": {
|
||||||
"autoDetected": "Utilisation de Git Bash détecté automatiquement",
|
"autoDetected": "Utilisation de Git Bash détecté automatiquement",
|
||||||
|
"autoDiscoveredHint": "[to be translated]:Auto-discovered",
|
||||||
"clear": {
|
"clear": {
|
||||||
"button": "Effacer le chemin personnalisé"
|
"button": "Effacer le chemin personnalisé"
|
||||||
},
|
},
|
||||||
@ -39,6 +40,7 @@
|
|||||||
"error": {
|
"error": {
|
||||||
"description": "Git Bash est requis pour exécuter des agents sur Windows. L'agent ne peut pas fonctionner sans. Veuillez installer Git pour Windows depuis",
|
"description": "Git Bash est requis pour exécuter des agents sur Windows. L'agent ne peut pas fonctionner sans. Veuillez installer Git pour Windows depuis",
|
||||||
"recheck": "Revérifier l'installation de Git Bash",
|
"recheck": "Revérifier l'installation de Git Bash",
|
||||||
|
"required": "[to be translated]:Git Bash path is required on Windows",
|
||||||
"title": "Git Bash requis"
|
"title": "Git Bash requis"
|
||||||
},
|
},
|
||||||
"found": {
|
"found": {
|
||||||
@ -51,7 +53,9 @@
|
|||||||
"invalidPath": "Le fichier sélectionné n'est pas un exécutable Git Bash valide (bash.exe).",
|
"invalidPath": "Le fichier sélectionné n'est pas un exécutable Git Bash valide (bash.exe).",
|
||||||
"title": "Sélectionner l'exécutable Git Bash"
|
"title": "Sélectionner l'exécutable Git Bash"
|
||||||
},
|
},
|
||||||
"success": "Git Bash détecté avec succès !"
|
"placeholder": "[to be translated]:Select bash.exe path",
|
||||||
|
"success": "Git Bash détecté avec succès !",
|
||||||
|
"tooltip": "[to be translated]:Git Bash is required to run agents on Windows. Install from git-scm.com if not available."
|
||||||
},
|
},
|
||||||
"input": {
|
"input": {
|
||||||
"placeholder": "Entrez votre message ici, envoyez avec {{key}} - @ sélectionner le chemin, / sélectionner la commande"
|
"placeholder": "Entrez votre message ici, envoyez avec {{key}} - @ sélectionner le chemin, / sélectionner la commande"
|
||||||
@ -472,6 +476,7 @@
|
|||||||
"button": "Importer",
|
"button": "Importer",
|
||||||
"error": {
|
"error": {
|
||||||
"fetch_failed": "Échec de la récupération des données depuis l'URL",
|
"fetch_failed": "Échec de la récupération des données depuis l'URL",
|
||||||
|
"file_required": "Veuillez d'abord sélectionner un fichier",
|
||||||
"invalid_format": "Format d'assistant invalide : champs obligatoires manquants",
|
"invalid_format": "Format d'assistant invalide : champs obligatoires manquants",
|
||||||
"url_required": "Veuillez saisir une URL"
|
"url_required": "Veuillez saisir une URL"
|
||||||
},
|
},
|
||||||
@ -489,8 +494,11 @@
|
|||||||
"button": "Suppression par lot",
|
"button": "Suppression par lot",
|
||||||
"confirm": "Êtes-vous sûr de vouloir supprimer les {{count}} assistants sélectionnés ?"
|
"confirm": "Êtes-vous sûr de vouloir supprimer les {{count}} assistants sélectionnés ?"
|
||||||
},
|
},
|
||||||
|
"batch_export": {
|
||||||
|
"button": "Exporter"
|
||||||
|
},
|
||||||
"mode": {
|
"mode": {
|
||||||
"delete": "Supprimer",
|
"manage": "Gérer",
|
||||||
"sort": "Trier"
|
"sort": "Trier"
|
||||||
},
|
},
|
||||||
"title": "Gérer les assistants"
|
"title": "Gérer les assistants"
|
||||||
@ -540,13 +548,23 @@
|
|||||||
"more": "Paramètres de l'assistant",
|
"more": "Paramètres de l'assistant",
|
||||||
"prompt": "Paramètres de l'invite",
|
"prompt": "Paramètres de l'invite",
|
||||||
"reasoning_effort": {
|
"reasoning_effort": {
|
||||||
|
"auto": "Auto",
|
||||||
|
"auto_description": "Déterminer de manière flexible l'effort de raisonnement",
|
||||||
"default": "Par défaut",
|
"default": "Par défaut",
|
||||||
|
"default_description": "Dépendre du comportement par défaut du modèle, sans aucune configuration.",
|
||||||
"high": "Long",
|
"high": "Long",
|
||||||
|
"high_description": "Raisonnement de haut niveau",
|
||||||
"label": "Longueur de la chaîne de raisonnement",
|
"label": "Longueur de la chaîne de raisonnement",
|
||||||
"low": "Court",
|
"low": "Court",
|
||||||
|
"low_description": "Raisonnement de bas niveau",
|
||||||
"medium": "Moyen",
|
"medium": "Moyen",
|
||||||
|
"medium_description": "Raisonnement de niveau moyen",
|
||||||
"minimal": "minimal",
|
"minimal": "minimal",
|
||||||
"off": "Off"
|
"minimal_description": "Réflexion minimale",
|
||||||
|
"off": "Off",
|
||||||
|
"off_description": "Désactiver le raisonnement",
|
||||||
|
"xhigh": "Très élevée",
|
||||||
|
"xhigh_description": "Raisonnement de très haut niveau"
|
||||||
},
|
},
|
||||||
"regular_phrases": {
|
"regular_phrases": {
|
||||||
"add": "Добавить фразу",
|
"add": "Добавить фразу",
|
||||||
@ -1026,6 +1044,29 @@
|
|||||||
"yuque": "Exporter vers Yuque"
|
"yuque": "Exporter vers Yuque"
|
||||||
},
|
},
|
||||||
"list": "Liste des sujets",
|
"list": "Liste des sujets",
|
||||||
|
"manage": {
|
||||||
|
"clear_selection": "Effacer la sélection",
|
||||||
|
"delete": {
|
||||||
|
"confirm": {
|
||||||
|
"content": "Êtes-vous sûr de vouloir supprimer {{count}} sujet(s) sélectionné(s) ? Cette action est irréversible.",
|
||||||
|
"title": "Supprimer des sujets"
|
||||||
|
},
|
||||||
|
"success": "Supprimé {{count}} sujet(s)"
|
||||||
|
},
|
||||||
|
"deselect_all": "Tout désélectionner",
|
||||||
|
"error": {
|
||||||
|
"at_least_one": "Au moins un sujet doit être conservé"
|
||||||
|
},
|
||||||
|
"move": {
|
||||||
|
"button": "Déplacer",
|
||||||
|
"placeholder": "Sélectionner la cible",
|
||||||
|
"success": "Déplacé {{count}} sujet(s)"
|
||||||
|
},
|
||||||
|
"pinned": "Sujets épinglés",
|
||||||
|
"selected_count": "{{count}} sélectionné",
|
||||||
|
"title": "Gérer les sujets",
|
||||||
|
"unpinned": "Sujets non épinglés"
|
||||||
|
},
|
||||||
"move_to": "Déplacer vers",
|
"move_to": "Déplacer vers",
|
||||||
"new": "Commencer une nouvelle conversation",
|
"new": "Commencer une nouvelle conversation",
|
||||||
"pin": "Fixer le sujet",
|
"pin": "Fixer le sujet",
|
||||||
@ -1036,6 +1077,10 @@
|
|||||||
"label": "Indicateurs de sujet",
|
"label": "Indicateurs de sujet",
|
||||||
"tips": "Indicateurs de sujet : fournir des indications supplémentaires pour le sujet actuel"
|
"tips": "Indicateurs de sujet : fournir des indications supplémentaires pour le sujet actuel"
|
||||||
},
|
},
|
||||||
|
"search": {
|
||||||
|
"placeholder": "Rechercher des sujets...",
|
||||||
|
"title": "Rechercher"
|
||||||
|
},
|
||||||
"title": "Sujet",
|
"title": "Sujet",
|
||||||
"unpin": "Annuler le fixage"
|
"unpin": "Annuler le fixage"
|
||||||
},
|
},
|
||||||
@ -1220,11 +1265,13 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"stop": "Arrêter",
|
"stop": "Arrêter",
|
||||||
|
"subscribe": "S'abonner",
|
||||||
"success": "Succès",
|
"success": "Succès",
|
||||||
"swap": "Échanger",
|
"swap": "Échanger",
|
||||||
"topics": "Sujets",
|
"topics": "Sujets",
|
||||||
"unknown": "Inconnu",
|
"unknown": "Inconnu",
|
||||||
"unnamed": "Sans nom",
|
"unnamed": "Sans nom",
|
||||||
|
"unsubscribe": "Se désabonner",
|
||||||
"update_success": "Mise à jour réussie",
|
"update_success": "Mise à jour réussie",
|
||||||
"upload_files": "Uploader des fichiers",
|
"upload_files": "Uploader des fichiers",
|
||||||
"warning": "Avertissement",
|
"warning": "Avertissement",
|
||||||
@ -2151,6 +2198,7 @@
|
|||||||
"collapse": "réduire",
|
"collapse": "réduire",
|
||||||
"content_placeholder": "Veuillez saisir le contenu de la note...",
|
"content_placeholder": "Veuillez saisir le contenu de la note...",
|
||||||
"copyContent": "contenu copié",
|
"copyContent": "contenu copié",
|
||||||
|
"crossPlatformRestoreWarning": "[to be translated]:Cross-platform configuration restored, but notes directory is empty. Please copy your note files to: {{path}}",
|
||||||
"delete": "supprimer",
|
"delete": "supprimer",
|
||||||
"delete_confirm": "Êtes-vous sûr de vouloir supprimer ce {{type}} ?",
|
"delete_confirm": "Êtes-vous sûr de vouloir supprimer ce {{type}} ?",
|
||||||
"delete_folder_confirm": "Êtes-vous sûr de vouloir supprimer le dossier \"{{name}}\" et tout son contenu ?",
|
"delete_folder_confirm": "Êtes-vous sûr de vouloir supprimer le dossier \"{{name}}\" et tout son contenu ?",
|
||||||
@ -2595,6 +2643,7 @@
|
|||||||
"lanyun": "Technologie Lan Yun",
|
"lanyun": "Technologie Lan Yun",
|
||||||
"lmstudio": "Studio LM",
|
"lmstudio": "Studio LM",
|
||||||
"longcat": "Mon voisin Totoro",
|
"longcat": "Mon voisin Totoro",
|
||||||
|
"mimo": "[to be translated]:Xiaomi MiMo",
|
||||||
"minimax": "MiniMax",
|
"minimax": "MiniMax",
|
||||||
"mistral": "Mistral",
|
"mistral": "Mistral",
|
||||||
"modelscope": "ModelScope MoDa",
|
"modelscope": "ModelScope MoDa",
|
||||||
@ -3883,6 +3932,7 @@
|
|||||||
"builtinServers": "Serveurs intégrés",
|
"builtinServers": "Serveurs intégrés",
|
||||||
"builtinServersDescriptions": {
|
"builtinServersDescriptions": {
|
||||||
"brave_search": "Une implémentation de serveur MCP intégrant l'API de recherche Brave, offrant des fonctionnalités de recherche web et locale. Nécessite la configuration de la variable d'environnement BRAVE_API_KEY",
|
"brave_search": "Une implémentation de serveur MCP intégrant l'API de recherche Brave, offrant des fonctionnalités de recherche web et locale. Nécessite la configuration de la variable d'environnement BRAVE_API_KEY",
|
||||||
|
"browser": "Contrôle une fenêtre Electron headless via Chrome DevTools Protocol. Outils : ouvrir une URL, exécuter du JS en une ligne, réinitialiser la session.",
|
||||||
"didi_mcp": "Serveur DiDi MCP fournissant des services de transport incluant la recherche de cartes, l'estimation des prix, la gestion des commandes et le suivi des conducteurs. Disponible uniquement en Chine continentale. Nécessite la configuration de la variable d'environnement DIDI_API_KEY",
|
"didi_mcp": "Serveur DiDi MCP fournissant des services de transport incluant la recherche de cartes, l'estimation des prix, la gestion des commandes et le suivi des conducteurs. Disponible uniquement en Chine continentale. Nécessite la configuration de la variable d'environnement DIDI_API_KEY",
|
||||||
"dify_knowledge": "Implémentation du serveur MCP de Dify, fournissant une API simple pour interagir avec Dify. Nécessite la configuration de la clé Dify",
|
"dify_knowledge": "Implémentation du serveur MCP de Dify, fournissant une API simple pour interagir avec Dify. Nécessite la configuration de la clé Dify",
|
||||||
"fetch": "serveur MCP utilisé pour récupérer le contenu des pages web URL",
|
"fetch": "serveur MCP utilisé pour récupérer le contenu des pages web URL",
|
||||||
@ -3890,6 +3940,7 @@
|
|||||||
"mcp_auto_install": "Installation automatique du service MCP (version bêta)",
|
"mcp_auto_install": "Installation automatique du service MCP (version bêta)",
|
||||||
"memory": "Implémentation de base de mémoire persistante basée sur un graphe de connaissances local. Cela permet au modèle de se souvenir des informations relatives à l'utilisateur entre différentes conversations. Nécessite la configuration de la variable d'environnement MEMORY_FILE_PATH.",
|
"memory": "Implémentation de base de mémoire persistante basée sur un graphe de connaissances local. Cela permet au modèle de se souvenir des informations relatives à l'utilisateur entre différentes conversations. Nécessite la configuration de la variable d'environnement MEMORY_FILE_PATH.",
|
||||||
"no": "sans description",
|
"no": "sans description",
|
||||||
|
"nowledge_mem": "[to be translated]:Requires Nowledge Mem app running locally. Keeps AI chats, tools, notes, agents, and files in private memory on your computer. Download from https://mem.nowledge.co/",
|
||||||
"python": "Exécutez du code Python dans un environnement bac à sable sécurisé. Utilisez Pyodide pour exécuter Python, prenant en charge la plupart des bibliothèques standard et des packages de calcul scientifique.",
|
"python": "Exécutez du code Python dans un environnement bac à sable sécurisé. Utilisez Pyodide pour exécuter Python, prenant en charge la plupart des bibliothèques standard et des packages de calcul scientifique.",
|
||||||
"sequentialthinking": "Un serveur MCP qui fournit des outils permettant une résolution dynamique et réflexive des problèmes à travers un processus de pensée structuré"
|
"sequentialthinking": "Un serveur MCP qui fournit des outils permettant une résolution dynamique et réflexive des problèmes à travers un processus de pensée structuré"
|
||||||
},
|
},
|
||||||
|
|||||||
@ -31,27 +31,31 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"gitBash": {
|
"gitBash": {
|
||||||
"autoDetected": "[to be translated]:Using auto-detected Git Bash",
|
"autoDetected": "自動検出されたGit Bashを使用中",
|
||||||
|
"autoDiscoveredHint": "[to be translated]:Auto-discovered",
|
||||||
"clear": {
|
"clear": {
|
||||||
"button": "[to be translated]:Clear custom path"
|
"button": "カスタムパスをクリア"
|
||||||
},
|
},
|
||||||
"customPath": "[to be translated]:Using custom path: {{path}}",
|
"customPath": "カスタムパスを使用中: {{path}}",
|
||||||
"error": {
|
"error": {
|
||||||
"description": "Windowsでエージェントを実行するにはGit Bashが必要です。これがないとエージェントは動作しません。以下からGit for Windowsをインストールしてください。",
|
"description": "Windowsでエージェントを実行するにはGit Bashが必要です。これがないとエージェントは動作しません。以下からGit for Windowsをインストールしてください。",
|
||||||
"recheck": "Git Bashのインストールを再確認してください",
|
"recheck": "Git Bashのインストールを再確認してください",
|
||||||
|
"required": "[to be translated]:Git Bash path is required on Windows",
|
||||||
"title": "Git Bashが必要です"
|
"title": "Git Bashが必要です"
|
||||||
},
|
},
|
||||||
"found": {
|
"found": {
|
||||||
"title": "[to be translated]:Git Bash configured"
|
"title": "Git Bashが設定されました"
|
||||||
},
|
},
|
||||||
"notFound": "Git Bash が見つかりません。先にインストールしてください。",
|
"notFound": "Git Bash が見つかりません。先にインストールしてください。",
|
||||||
"pick": {
|
"pick": {
|
||||||
"button": "[to be translated]:Select Git Bash Path",
|
"button": "Git Bashパスを選択",
|
||||||
"failed": "[to be translated]:Failed to set Git Bash path",
|
"failed": "Git Bashパスの設定に失敗しました",
|
||||||
"invalidPath": "[to be translated]:Selected file is not a valid Git Bash executable (bash.exe).",
|
"invalidPath": "選択されたファイルは有効なGit Bash実行ファイル(bash.exe)ではありません。",
|
||||||
"title": "[to be translated]:Select Git Bash executable"
|
"title": "Git Bash実行ファイルを選択"
|
||||||
},
|
},
|
||||||
"success": "Git Bashが正常に検出されました!"
|
"placeholder": "[to be translated]:Select bash.exe path",
|
||||||
|
"success": "Git Bashが正常に検出されました!",
|
||||||
|
"tooltip": "[to be translated]:Git Bash is required to run agents on Windows. Install from git-scm.com if not available."
|
||||||
},
|
},
|
||||||
"input": {
|
"input": {
|
||||||
"placeholder": "メッセージをここに入力し、{{key}}で送信 - @でパスを選択、/でコマンドを選択"
|
"placeholder": "メッセージをここに入力し、{{key}}で送信 - @でパスを選択、/でコマンドを選択"
|
||||||
@ -472,6 +476,7 @@
|
|||||||
"button": "インポート",
|
"button": "インポート",
|
||||||
"error": {
|
"error": {
|
||||||
"fetch_failed": "URLからのデータ取得に失敗しました",
|
"fetch_failed": "URLからのデータ取得に失敗しました",
|
||||||
|
"file_required": "まずファイルを選択してください",
|
||||||
"invalid_format": "無効なアシスタント形式:必須フィールドが不足しています",
|
"invalid_format": "無効なアシスタント形式:必須フィールドが不足しています",
|
||||||
"url_required": "URLを入力してください"
|
"url_required": "URLを入力してください"
|
||||||
},
|
},
|
||||||
@ -489,8 +494,11 @@
|
|||||||
"button": "バッチ削除",
|
"button": "バッチ削除",
|
||||||
"confirm": "選択した{{count}}件のアシスタントを削除してもよろしいですか?"
|
"confirm": "選択した{{count}}件のアシスタントを削除してもよろしいですか?"
|
||||||
},
|
},
|
||||||
|
"batch_export": {
|
||||||
|
"button": "エクスポート"
|
||||||
|
},
|
||||||
"mode": {
|
"mode": {
|
||||||
"delete": "削除",
|
"manage": "管理",
|
||||||
"sort": "並べ替え"
|
"sort": "並べ替え"
|
||||||
},
|
},
|
||||||
"title": "アシスタントを管理"
|
"title": "アシスタントを管理"
|
||||||
@ -540,13 +548,23 @@
|
|||||||
"more": "アシスタント設定",
|
"more": "アシスタント設定",
|
||||||
"prompt": "プロンプト設定",
|
"prompt": "プロンプト設定",
|
||||||
"reasoning_effort": {
|
"reasoning_effort": {
|
||||||
|
"auto": "自動",
|
||||||
|
"auto_description": "推論にかける労力を柔軟に調整する",
|
||||||
"default": "デフォルト",
|
"default": "デフォルト",
|
||||||
|
"default_description": "設定なしで、モデルの既定の動作に依存する。",
|
||||||
"high": "最大限の思考",
|
"high": "最大限の思考",
|
||||||
|
"high_description": "高度な推論",
|
||||||
"label": "思考連鎖の長さ",
|
"label": "思考連鎖の長さ",
|
||||||
"low": "少しの思考",
|
"low": "少しの思考",
|
||||||
|
"low_description": "低レベル推論",
|
||||||
"medium": "普通の思考",
|
"medium": "普通の思考",
|
||||||
|
"medium_description": "中レベル推論",
|
||||||
"minimal": "最小限の思考",
|
"minimal": "最小限の思考",
|
||||||
"off": "オフ"
|
"minimal_description": "最小限の推論",
|
||||||
|
"off": "オフ",
|
||||||
|
"off_description": "推論を無効にする",
|
||||||
|
"xhigh": "超高",
|
||||||
|
"xhigh_description": "超高度な推論"
|
||||||
},
|
},
|
||||||
"regular_phrases": {
|
"regular_phrases": {
|
||||||
"add": "プロンプトを追加",
|
"add": "プロンプトを追加",
|
||||||
@ -1026,6 +1044,29 @@
|
|||||||
"yuque": "語雀にエクスポート"
|
"yuque": "語雀にエクスポート"
|
||||||
},
|
},
|
||||||
"list": "トピックリスト",
|
"list": "トピックリスト",
|
||||||
|
"manage": {
|
||||||
|
"clear_selection": "選択をクリア",
|
||||||
|
"delete": {
|
||||||
|
"confirm": {
|
||||||
|
"content": "{{count}}件の選択したトピックを削除してもよろしいですか?この操作は元に戻せません。",
|
||||||
|
"title": "トピックを削除"
|
||||||
|
},
|
||||||
|
"success": "{{count}}件のトピックを削除しました"
|
||||||
|
},
|
||||||
|
"deselect_all": "すべての選択を解除",
|
||||||
|
"error": {
|
||||||
|
"at_least_one": "少なくとも1つのトピックは保持されなければなりません"
|
||||||
|
},
|
||||||
|
"move": {
|
||||||
|
"button": "移動",
|
||||||
|
"placeholder": "対象を選択",
|
||||||
|
"success": "{{count}}件のトピックを移動しました"
|
||||||
|
},
|
||||||
|
"pinned": "ピン留めされたトピック",
|
||||||
|
"selected_count": "{{count}} 選択済み",
|
||||||
|
"title": "トピックを管理",
|
||||||
|
"unpinned": "ピン留めされていないトピック"
|
||||||
|
},
|
||||||
"move_to": "移動先",
|
"move_to": "移動先",
|
||||||
"new": "新しいトピック",
|
"new": "新しいトピック",
|
||||||
"pin": "トピックを固定",
|
"pin": "トピックを固定",
|
||||||
@ -1036,6 +1077,10 @@
|
|||||||
"label": "トピック提示語",
|
"label": "トピック提示語",
|
||||||
"tips": "トピック提示語:現在のトピックに対して追加の補足提示語を提供"
|
"tips": "トピック提示語:現在のトピックに対して追加の補足提示語を提供"
|
||||||
},
|
},
|
||||||
|
"search": {
|
||||||
|
"placeholder": "トピックを検索...",
|
||||||
|
"title": "検索"
|
||||||
|
},
|
||||||
"title": "トピック",
|
"title": "トピック",
|
||||||
"unpin": "固定解除"
|
"unpin": "固定解除"
|
||||||
},
|
},
|
||||||
@ -1220,11 +1265,13 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"stop": "停止",
|
"stop": "停止",
|
||||||
|
"subscribe": "購読",
|
||||||
"success": "成功",
|
"success": "成功",
|
||||||
"swap": "交換",
|
"swap": "交換",
|
||||||
"topics": "トピック",
|
"topics": "トピック",
|
||||||
"unknown": "Unknown",
|
"unknown": "Unknown",
|
||||||
"unnamed": "無題",
|
"unnamed": "無題",
|
||||||
|
"unsubscribe": "配信停止",
|
||||||
"update_success": "更新成功",
|
"update_success": "更新成功",
|
||||||
"upload_files": "ファイルをアップロードする",
|
"upload_files": "ファイルをアップロードする",
|
||||||
"warning": "警告",
|
"warning": "警告",
|
||||||
@ -2151,6 +2198,7 @@
|
|||||||
"collapse": "閉じる",
|
"collapse": "閉じる",
|
||||||
"content_placeholder": "メモの内容を入力してください...",
|
"content_placeholder": "メモの内容を入力してください...",
|
||||||
"copyContent": "コンテンツをコピーします",
|
"copyContent": "コンテンツをコピーします",
|
||||||
|
"crossPlatformRestoreWarning": "[to be translated]:Cross-platform configuration restored, but notes directory is empty. Please copy your note files to: {{path}}",
|
||||||
"delete": "削除",
|
"delete": "削除",
|
||||||
"delete_confirm": "この{{type}}を本当に削除しますか?",
|
"delete_confirm": "この{{type}}を本当に削除しますか?",
|
||||||
"delete_folder_confirm": "「{{name}}」フォルダーとそのすべての内容を削除してもよろしいですか?",
|
"delete_folder_confirm": "「{{name}}」フォルダーとそのすべての内容を削除してもよろしいですか?",
|
||||||
@ -2595,6 +2643,7 @@
|
|||||||
"lanyun": "LANYUN",
|
"lanyun": "LANYUN",
|
||||||
"lmstudio": "LM Studio",
|
"lmstudio": "LM Studio",
|
||||||
"longcat": "トトロ",
|
"longcat": "トトロ",
|
||||||
|
"mimo": "[to be translated]:Xiaomi MiMo",
|
||||||
"minimax": "MiniMax",
|
"minimax": "MiniMax",
|
||||||
"mistral": "Mistral",
|
"mistral": "Mistral",
|
||||||
"modelscope": "ModelScope",
|
"modelscope": "ModelScope",
|
||||||
@ -3883,6 +3932,7 @@
|
|||||||
"builtinServers": "組み込みサーバー",
|
"builtinServers": "組み込みサーバー",
|
||||||
"builtinServersDescriptions": {
|
"builtinServersDescriptions": {
|
||||||
"brave_search": "Brave検索APIを統合したMCPサーバーの実装で、ウェブ検索とローカル検索の両機能を提供します。BRAVE_API_KEY環境変数の設定が必要です",
|
"brave_search": "Brave検索APIを統合したMCPサーバーの実装で、ウェブ検索とローカル検索の両機能を提供します。BRAVE_API_KEY環境変数の設定が必要です",
|
||||||
|
"browser": "Chrome DevTools Protocolを介してheadless Electronウィンドウを制御します。ツール:URLを開く、単一行JSを実行、セッションをリセット。",
|
||||||
"didi_mcp": "DiDi MCPサーバーは、地図検索、料金見積もり、注文管理、ドライバー追跡を含むライドシェアサービスを提供します。中国本土でのみ利用可能です。DIDI_API_KEY環境変数の設定が必要です",
|
"didi_mcp": "DiDi MCPサーバーは、地図検索、料金見積もり、注文管理、ドライバー追跡を含むライドシェアサービスを提供します。中国本土でのみ利用可能です。DIDI_API_KEY環境変数の設定が必要です",
|
||||||
"dify_knowledge": "DifyのMCPサーバー実装は、Difyと対話するためのシンプルなAPIを提供します。Dify Keyの設定が必要です。",
|
"dify_knowledge": "DifyのMCPサーバー実装は、Difyと対話するためのシンプルなAPIを提供します。Dify Keyの設定が必要です。",
|
||||||
"fetch": "URLのウェブページコンテンツを取得するためのMCPサーバー",
|
"fetch": "URLのウェブページコンテンツを取得するためのMCPサーバー",
|
||||||
@ -3890,6 +3940,7 @@
|
|||||||
"mcp_auto_install": "MCPサービスの自動インストール(ベータ版)",
|
"mcp_auto_install": "MCPサービスの自動インストール(ベータ版)",
|
||||||
"memory": "ローカルのナレッジグラフに基づく永続的なメモリの基本的な実装です。これにより、モデルは異なる会話間でユーザーの関連情報を記憶できるようになります。MEMORY_FILE_PATH 環境変数の設定が必要です。",
|
"memory": "ローカルのナレッジグラフに基づく永続的なメモリの基本的な実装です。これにより、モデルは異なる会話間でユーザーの関連情報を記憶できるようになります。MEMORY_FILE_PATH 環境変数の設定が必要です。",
|
||||||
"no": "説明なし",
|
"no": "説明なし",
|
||||||
|
"nowledge_mem": "Nowledge Mem アプリをローカルで実行する必要があります。AI チャット、ツール、ノート、エージェント、ファイルをコンピューター上のプライベートメモリに保存します。https://mem.nowledge.co/ からダウンロードしてください",
|
||||||
"python": "安全なサンドボックス環境でPythonコードを実行します。Pyodideを使用してPythonを実行し、ほとんどの標準ライブラリと科学計算パッケージをサポートしています。",
|
"python": "安全なサンドボックス環境でPythonコードを実行します。Pyodideを使用してPythonを実行し、ほとんどの標準ライブラリと科学計算パッケージをサポートしています。",
|
||||||
"sequentialthinking": "構造化された思考プロセスを通じて動的かつ反省的な問題解決を行うためのツールを提供するMCPサーバーの実装"
|
"sequentialthinking": "構造化された思考プロセスを通じて動的かつ反省的な問題解決を行うためのツールを提供するMCPサーバーの実装"
|
||||||
},
|
},
|
||||||
|
|||||||
@ -32,6 +32,7 @@
|
|||||||
},
|
},
|
||||||
"gitBash": {
|
"gitBash": {
|
||||||
"autoDetected": "Usando Git Bash detectado automaticamente",
|
"autoDetected": "Usando Git Bash detectado automaticamente",
|
||||||
|
"autoDiscoveredHint": "[to be translated]:Auto-discovered",
|
||||||
"clear": {
|
"clear": {
|
||||||
"button": "Limpar caminho personalizado"
|
"button": "Limpar caminho personalizado"
|
||||||
},
|
},
|
||||||
@ -39,6 +40,7 @@
|
|||||||
"error": {
|
"error": {
|
||||||
"description": "O Git Bash é necessário para executar agentes no Windows. O agente não pode funcionar sem ele. Por favor, instale o Git para Windows a partir de",
|
"description": "O Git Bash é necessário para executar agentes no Windows. O agente não pode funcionar sem ele. Por favor, instale o Git para Windows a partir de",
|
||||||
"recheck": "Reverificar a Instalação do Git Bash",
|
"recheck": "Reverificar a Instalação do Git Bash",
|
||||||
|
"required": "[to be translated]:Git Bash path is required on Windows",
|
||||||
"title": "Git Bash Necessário"
|
"title": "Git Bash Necessário"
|
||||||
},
|
},
|
||||||
"found": {
|
"found": {
|
||||||
@ -51,7 +53,9 @@
|
|||||||
"invalidPath": "O arquivo selecionado não é um executável válido do Git Bash (bash.exe).",
|
"invalidPath": "O arquivo selecionado não é um executável válido do Git Bash (bash.exe).",
|
||||||
"title": "Selecionar executável do Git Bash"
|
"title": "Selecionar executável do Git Bash"
|
||||||
},
|
},
|
||||||
"success": "Git Bash detectado com sucesso!"
|
"placeholder": "[to be translated]:Select bash.exe path",
|
||||||
|
"success": "Git Bash detectado com sucesso!",
|
||||||
|
"tooltip": "[to be translated]:Git Bash is required to run agents on Windows. Install from git-scm.com if not available."
|
||||||
},
|
},
|
||||||
"input": {
|
"input": {
|
||||||
"placeholder": "Digite sua mensagem aqui, envie com {{key}} - @ selecionar caminho, / selecionar comando"
|
"placeholder": "Digite sua mensagem aqui, envie com {{key}} - @ selecionar caminho, / selecionar comando"
|
||||||
@ -472,6 +476,7 @@
|
|||||||
"button": "Importar",
|
"button": "Importar",
|
||||||
"error": {
|
"error": {
|
||||||
"fetch_failed": "Falha ao obter dados do URL",
|
"fetch_failed": "Falha ao obter dados do URL",
|
||||||
|
"file_required": "Por favor, selecione um arquivo primeiro",
|
||||||
"invalid_format": "Formato de assistente inválido: campos obrigatórios em falta",
|
"invalid_format": "Formato de assistente inválido: campos obrigatórios em falta",
|
||||||
"url_required": "Por favor insere um URL"
|
"url_required": "Por favor insere um URL"
|
||||||
},
|
},
|
||||||
@ -489,8 +494,11 @@
|
|||||||
"button": "Exclusão em Lote",
|
"button": "Exclusão em Lote",
|
||||||
"confirm": "Tem certeza de que deseja excluir os {{count}} assistentes selecionados?"
|
"confirm": "Tem certeza de que deseja excluir os {{count}} assistentes selecionados?"
|
||||||
},
|
},
|
||||||
|
"batch_export": {
|
||||||
|
"button": "Exportar"
|
||||||
|
},
|
||||||
"mode": {
|
"mode": {
|
||||||
"delete": "Excluir",
|
"manage": "Gerenciar",
|
||||||
"sort": "Ordenar"
|
"sort": "Ordenar"
|
||||||
},
|
},
|
||||||
"title": "Gerir assistentes"
|
"title": "Gerir assistentes"
|
||||||
@ -540,13 +548,23 @@
|
|||||||
"more": "Configurações do Assistente",
|
"more": "Configurações do Assistente",
|
||||||
"prompt": "Configurações de Prompt",
|
"prompt": "Configurações de Prompt",
|
||||||
"reasoning_effort": {
|
"reasoning_effort": {
|
||||||
|
"auto": "Automóvel",
|
||||||
|
"auto_description": "Determinar flexivelmente o esforço de raciocínio",
|
||||||
"default": "Padrão",
|
"default": "Padrão",
|
||||||
|
"default_description": "Depender do comportamento padrão do modelo, sem qualquer configuração.",
|
||||||
"high": "Longo",
|
"high": "Longo",
|
||||||
|
"high_description": "Raciocínio de alto nível",
|
||||||
"label": "Comprimento da Cadeia de Raciocínio",
|
"label": "Comprimento da Cadeia de Raciocínio",
|
||||||
"low": "Curto",
|
"low": "Curto",
|
||||||
|
"low_description": "Raciocínio de baixo nível",
|
||||||
"medium": "Médio",
|
"medium": "Médio",
|
||||||
|
"medium_description": "Raciocínio de nível médio",
|
||||||
"minimal": "mínimo",
|
"minimal": "mínimo",
|
||||||
"off": "Desligado"
|
"minimal_description": "Raciocínio mínimo",
|
||||||
|
"off": "Desligado",
|
||||||
|
"off_description": "Desabilitar raciocínio",
|
||||||
|
"xhigh": "Extra Alta",
|
||||||
|
"xhigh_description": "Raciocínio de altíssimo nível"
|
||||||
},
|
},
|
||||||
"regular_phrases": {
|
"regular_phrases": {
|
||||||
"add": "Adicionar Frase",
|
"add": "Adicionar Frase",
|
||||||
@ -1026,6 +1044,29 @@
|
|||||||
"yuque": "Exportar para Yuque"
|
"yuque": "Exportar para Yuque"
|
||||||
},
|
},
|
||||||
"list": "Lista de tópicos",
|
"list": "Lista de tópicos",
|
||||||
|
"manage": {
|
||||||
|
"clear_selection": "Limpar Seleção",
|
||||||
|
"delete": {
|
||||||
|
"confirm": {
|
||||||
|
"content": "Tem certeza de que deseja excluir {{count}} tópico(s) selecionado(s)? Esta ação não pode ser desfeita.",
|
||||||
|
"title": "Excluir Tópicos"
|
||||||
|
},
|
||||||
|
"success": "Excluído(s) {{count}} tópico(s)"
|
||||||
|
},
|
||||||
|
"deselect_all": "Desmarcar Todos",
|
||||||
|
"error": {
|
||||||
|
"at_least_one": "Pelo menos um tópico deve ser mantido"
|
||||||
|
},
|
||||||
|
"move": {
|
||||||
|
"button": "Mover",
|
||||||
|
"placeholder": "Selecionar assistente de destino",
|
||||||
|
"success": "Movido(s) {{count}} tópico(s)"
|
||||||
|
},
|
||||||
|
"pinned": "Tópicos Fixados",
|
||||||
|
"selected_count": "{{count}} selecionado",
|
||||||
|
"title": "Gerenciar Tópicos",
|
||||||
|
"unpinned": "Tópicos Desafixados"
|
||||||
|
},
|
||||||
"move_to": "Mover para",
|
"move_to": "Mover para",
|
||||||
"new": "Começar nova conversa",
|
"new": "Começar nova conversa",
|
||||||
"pin": "Fixar tópico",
|
"pin": "Fixar tópico",
|
||||||
@ -1036,6 +1077,10 @@
|
|||||||
"label": "Prompt do tópico",
|
"label": "Prompt do tópico",
|
||||||
"tips": "Prompt do tópico: fornecer prompts adicionais para o tópico atual"
|
"tips": "Prompt do tópico: fornecer prompts adicionais para o tópico atual"
|
||||||
},
|
},
|
||||||
|
"search": {
|
||||||
|
"placeholder": "Pesquisar tópicos...",
|
||||||
|
"title": "Pesquisar"
|
||||||
|
},
|
||||||
"title": "Tópicos",
|
"title": "Tópicos",
|
||||||
"unpin": "Desfixar"
|
"unpin": "Desfixar"
|
||||||
},
|
},
|
||||||
@ -1220,11 +1265,13 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"stop": "Parar",
|
"stop": "Parar",
|
||||||
|
"subscribe": "Subscrever",
|
||||||
"success": "Sucesso",
|
"success": "Sucesso",
|
||||||
"swap": "Trocar",
|
"swap": "Trocar",
|
||||||
"topics": "Tópicos",
|
"topics": "Tópicos",
|
||||||
"unknown": "Desconhecido",
|
"unknown": "Desconhecido",
|
||||||
"unnamed": "Sem nome",
|
"unnamed": "Sem nome",
|
||||||
|
"unsubscribe": "Cancelar inscrição",
|
||||||
"update_success": "Atualização bem-sucedida",
|
"update_success": "Atualização bem-sucedida",
|
||||||
"upload_files": "Carregar arquivo",
|
"upload_files": "Carregar arquivo",
|
||||||
"warning": "Aviso",
|
"warning": "Aviso",
|
||||||
@ -2151,6 +2198,7 @@
|
|||||||
"collapse": "[minimizar]",
|
"collapse": "[minimizar]",
|
||||||
"content_placeholder": "Introduza o conteúdo da nota...",
|
"content_placeholder": "Introduza o conteúdo da nota...",
|
||||||
"copyContent": "copiar conteúdo",
|
"copyContent": "copiar conteúdo",
|
||||||
|
"crossPlatformRestoreWarning": "[to be translated]:Cross-platform configuration restored, but notes directory is empty. Please copy your note files to: {{path}}",
|
||||||
"delete": "eliminar",
|
"delete": "eliminar",
|
||||||
"delete_confirm": "Tem a certeza de que deseja eliminar este {{type}}?",
|
"delete_confirm": "Tem a certeza de que deseja eliminar este {{type}}?",
|
||||||
"delete_folder_confirm": "Tem a certeza de que deseja eliminar a pasta \"{{name}}\" e todos os seus conteúdos?",
|
"delete_folder_confirm": "Tem a certeza de que deseja eliminar a pasta \"{{name}}\" e todos os seus conteúdos?",
|
||||||
@ -2595,6 +2643,7 @@
|
|||||||
"lanyun": "Lanyun Tecnologia",
|
"lanyun": "Lanyun Tecnologia",
|
||||||
"lmstudio": "Estúdio LM",
|
"lmstudio": "Estúdio LM",
|
||||||
"longcat": "Totoro",
|
"longcat": "Totoro",
|
||||||
|
"mimo": "[to be translated]:Xiaomi MiMo",
|
||||||
"minimax": "Minimax",
|
"minimax": "Minimax",
|
||||||
"mistral": "Mistral",
|
"mistral": "Mistral",
|
||||||
"modelscope": "ModelScope MôDá",
|
"modelscope": "ModelScope MôDá",
|
||||||
@ -3883,6 +3932,7 @@
|
|||||||
"builtinServers": "Servidores integrados",
|
"builtinServers": "Servidores integrados",
|
||||||
"builtinServersDescriptions": {
|
"builtinServersDescriptions": {
|
||||||
"brave_search": "uma implementação de servidor MCP integrada com a API de pesquisa Brave, fornecendo funcionalidades de pesquisa web e local. Requer a configuração da variável de ambiente BRAVE_API_KEY",
|
"brave_search": "uma implementação de servidor MCP integrada com a API de pesquisa Brave, fornecendo funcionalidades de pesquisa web e local. Requer a configuração da variável de ambiente BRAVE_API_KEY",
|
||||||
|
"browser": "Controla uma janela Electron headless via Chrome DevTools Protocol. Ferramentas: abrir URL, executar JS de linha única, reiniciar sessão.",
|
||||||
"didi_mcp": "Servidor DiDi MCP que fornece serviços de transporte incluindo pesquisa de mapas, estimativa de preços, gestão de pedidos e rastreamento de motoristas. Disponível apenas na China Continental. Requer configuração da variável de ambiente DIDI_API_KEY",
|
"didi_mcp": "Servidor DiDi MCP que fornece serviços de transporte incluindo pesquisa de mapas, estimativa de preços, gestão de pedidos e rastreamento de motoristas. Disponível apenas na China Continental. Requer configuração da variável de ambiente DIDI_API_KEY",
|
||||||
"dify_knowledge": "Implementação do servidor MCP do Dify, que fornece uma API simples para interagir com o Dify. Requer a configuração da chave Dify",
|
"dify_knowledge": "Implementação do servidor MCP do Dify, que fornece uma API simples para interagir com o Dify. Requer a configuração da chave Dify",
|
||||||
"fetch": "servidor MCP para obter o conteúdo da página web do URL",
|
"fetch": "servidor MCP para obter o conteúdo da página web do URL",
|
||||||
@ -3890,6 +3940,7 @@
|
|||||||
"mcp_auto_install": "Instalação automática do serviço MCP (beta)",
|
"mcp_auto_install": "Instalação automática do serviço MCP (beta)",
|
||||||
"memory": "Implementação base de memória persistente baseada em grafos de conhecimento locais. Isso permite que o modelo lembre informações relevantes do utilizador entre diferentes conversas. É necessário configurar a variável de ambiente MEMORY_FILE_PATH.",
|
"memory": "Implementação base de memória persistente baseada em grafos de conhecimento locais. Isso permite que o modelo lembre informações relevantes do utilizador entre diferentes conversas. É necessário configurar a variável de ambiente MEMORY_FILE_PATH.",
|
||||||
"no": "sem descrição",
|
"no": "sem descrição",
|
||||||
|
"nowledge_mem": "Requer a aplicação Nowledge Mem em execução localmente. Mantém conversas de IA, ferramentas, notas, agentes e ficheiros numa memória privada no seu computador. Transfira de https://mem.nowledge.co/",
|
||||||
"python": "Executar código Python num ambiente sandbox seguro. Utilizar Pyodide para executar Python, suportando a maioria das bibliotecas padrão e pacotes de computação científica",
|
"python": "Executar código Python num ambiente sandbox seguro. Utilizar Pyodide para executar Python, suportando a maioria das bibliotecas padrão e pacotes de computação científica",
|
||||||
"sequentialthinking": "Uma implementação de servidor MCP que fornece ferramentas para resolução dinâmica e reflexiva de problemas através de um processo de pensamento estruturado"
|
"sequentialthinking": "Uma implementação de servidor MCP que fornece ferramentas para resolução dinâmica e reflexiva de problemas através de um processo de pensamento estruturado"
|
||||||
},
|
},
|
||||||
|
|||||||
@ -32,6 +32,7 @@
|
|||||||
},
|
},
|
||||||
"gitBash": {
|
"gitBash": {
|
||||||
"autoDetected": "Используется автоматически обнаруженный Git Bash",
|
"autoDetected": "Используется автоматически обнаруженный Git Bash",
|
||||||
|
"autoDiscoveredHint": "[to be translated]:Auto-discovered",
|
||||||
"clear": {
|
"clear": {
|
||||||
"button": "Очистить пользовательский путь"
|
"button": "Очистить пользовательский путь"
|
||||||
},
|
},
|
||||||
@ -39,6 +40,7 @@
|
|||||||
"error": {
|
"error": {
|
||||||
"description": "Для запуска агентов в Windows требуется Git Bash. Без него агент не может работать. Пожалуйста, установите Git для Windows с",
|
"description": "Для запуска агентов в Windows требуется Git Bash. Без него агент не может работать. Пожалуйста, установите Git для Windows с",
|
||||||
"recheck": "Повторная проверка установки Git Bash",
|
"recheck": "Повторная проверка установки Git Bash",
|
||||||
|
"required": "[to be translated]:Git Bash path is required on Windows",
|
||||||
"title": "Требуется Git Bash"
|
"title": "Требуется Git Bash"
|
||||||
},
|
},
|
||||||
"found": {
|
"found": {
|
||||||
@ -51,7 +53,9 @@
|
|||||||
"invalidPath": "Выбранный файл не является допустимым исполняемым файлом Git Bash (bash.exe).",
|
"invalidPath": "Выбранный файл не является допустимым исполняемым файлом Git Bash (bash.exe).",
|
||||||
"title": "Выберите исполняемый файл Git Bash"
|
"title": "Выберите исполняемый файл Git Bash"
|
||||||
},
|
},
|
||||||
"success": "Git Bash успешно обнаружен!"
|
"placeholder": "[to be translated]:Select bash.exe path",
|
||||||
|
"success": "Git Bash успешно обнаружен!",
|
||||||
|
"tooltip": "[to be translated]:Git Bash is required to run agents on Windows. Install from git-scm.com if not available."
|
||||||
},
|
},
|
||||||
"input": {
|
"input": {
|
||||||
"placeholder": "Введите ваше сообщение здесь, отправьте с помощью {{key}} — @ выбрать путь, / выбрать команду"
|
"placeholder": "Введите ваше сообщение здесь, отправьте с помощью {{key}} — @ выбрать путь, / выбрать команду"
|
||||||
@ -472,6 +476,7 @@
|
|||||||
"button": "Импортировать",
|
"button": "Импортировать",
|
||||||
"error": {
|
"error": {
|
||||||
"fetch_failed": "Ошибка получения данных с URL",
|
"fetch_failed": "Ошибка получения данных с URL",
|
||||||
|
"file_required": "Сначала выберите файл",
|
||||||
"invalid_format": "Неверный формат помощника: отсутствуют обязательные поля",
|
"invalid_format": "Неверный формат помощника: отсутствуют обязательные поля",
|
||||||
"url_required": "Пожалуйста, введите URL"
|
"url_required": "Пожалуйста, введите URL"
|
||||||
},
|
},
|
||||||
@ -489,8 +494,11 @@
|
|||||||
"button": "Массовое удаление",
|
"button": "Массовое удаление",
|
||||||
"confirm": "Вы уверены, что хотите удалить выбранных {{count}} ассистентов?"
|
"confirm": "Вы уверены, что хотите удалить выбранных {{count}} ассистентов?"
|
||||||
},
|
},
|
||||||
|
"batch_export": {
|
||||||
|
"button": "Экспорт"
|
||||||
|
},
|
||||||
"mode": {
|
"mode": {
|
||||||
"delete": "Удалить",
|
"manage": "Управлять",
|
||||||
"sort": "Сортировать"
|
"sort": "Сортировать"
|
||||||
},
|
},
|
||||||
"title": "Управление помощниками"
|
"title": "Управление помощниками"
|
||||||
@ -540,13 +548,23 @@
|
|||||||
"more": "Настройки ассистента",
|
"more": "Настройки ассистента",
|
||||||
"prompt": "Настройки промптов",
|
"prompt": "Настройки промптов",
|
||||||
"reasoning_effort": {
|
"reasoning_effort": {
|
||||||
|
"auto": "Авто",
|
||||||
|
"auto_description": "Гибко определяйте усилие на рассуждение",
|
||||||
"default": "По умолчанию",
|
"default": "По умолчанию",
|
||||||
|
"default_description": "Полагаться на поведение модели по умолчанию, без какой-либо конфигурации.",
|
||||||
"high": "Стараюсь думать",
|
"high": "Стараюсь думать",
|
||||||
|
"high_description": "Высокоуровневое рассуждение",
|
||||||
"label": "Настройки размышлений",
|
"label": "Настройки размышлений",
|
||||||
"low": "Меньше думать",
|
"low": "Меньше думать",
|
||||||
|
"low_description": "Низкоуровневое рассуждение",
|
||||||
"medium": "Среднее",
|
"medium": "Среднее",
|
||||||
|
"medium_description": "Средний уровень рассуждения",
|
||||||
"minimal": "минимальный",
|
"minimal": "минимальный",
|
||||||
"off": "Выключить"
|
"minimal_description": "Минимальное рассуждение",
|
||||||
|
"off": "Выключить",
|
||||||
|
"off_description": "Отключить рассуждение",
|
||||||
|
"xhigh": "Сверхвысокое",
|
||||||
|
"xhigh_description": "Высочайший уровень рассуждений"
|
||||||
},
|
},
|
||||||
"regular_phrases": {
|
"regular_phrases": {
|
||||||
"add": "Добавить подсказку",
|
"add": "Добавить подсказку",
|
||||||
@ -1026,6 +1044,29 @@
|
|||||||
"yuque": "Экспорт в Yuque"
|
"yuque": "Экспорт в Yuque"
|
||||||
},
|
},
|
||||||
"list": "Список топиков",
|
"list": "Список топиков",
|
||||||
|
"manage": {
|
||||||
|
"clear_selection": "Очистить выбор",
|
||||||
|
"delete": {
|
||||||
|
"confirm": {
|
||||||
|
"content": "Вы уверены, что хотите удалить выбранные темы ({{count}})? Это действие нельзя отменить.",
|
||||||
|
"title": "Удалить темы"
|
||||||
|
},
|
||||||
|
"success": "Удалено {{count}} тем(ы)"
|
||||||
|
},
|
||||||
|
"deselect_all": "Снять выделение со всех",
|
||||||
|
"error": {
|
||||||
|
"at_least_one": "Должна быть сохранена хотя бы одна тема"
|
||||||
|
},
|
||||||
|
"move": {
|
||||||
|
"button": "Переместить",
|
||||||
|
"placeholder": "Выберите цель",
|
||||||
|
"success": "Перемещено {{count}} тем(ы)"
|
||||||
|
},
|
||||||
|
"pinned": "Закреплённые темы",
|
||||||
|
"selected_count": "{{count}} выбрано",
|
||||||
|
"title": "Управление темами",
|
||||||
|
"unpinned": "Откреплённые темы"
|
||||||
|
},
|
||||||
"move_to": "Переместить в",
|
"move_to": "Переместить в",
|
||||||
"new": "Новый топик",
|
"new": "Новый топик",
|
||||||
"pin": "Закрепленные темы",
|
"pin": "Закрепленные темы",
|
||||||
@ -1036,6 +1077,10 @@
|
|||||||
"label": "Тематические подсказки",
|
"label": "Тематические подсказки",
|
||||||
"tips": "Тематические подсказки: Дополнительные подсказки, предоставленные для текущей темы"
|
"tips": "Тематические подсказки: Дополнительные подсказки, предоставленные для текущей темы"
|
||||||
},
|
},
|
||||||
|
"search": {
|
||||||
|
"placeholder": "Искать темы...",
|
||||||
|
"title": "Поиск"
|
||||||
|
},
|
||||||
"title": "Топики",
|
"title": "Топики",
|
||||||
"unpin": "Открепленные темы"
|
"unpin": "Открепленные темы"
|
||||||
},
|
},
|
||||||
@ -1220,11 +1265,13 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"stop": "остановить",
|
"stop": "остановить",
|
||||||
|
"subscribe": "Подписаться",
|
||||||
"success": "Успешно",
|
"success": "Успешно",
|
||||||
"swap": "Поменять местами",
|
"swap": "Поменять местами",
|
||||||
"topics": "Топики",
|
"topics": "Топики",
|
||||||
"unknown": "Неизвестно",
|
"unknown": "Неизвестно",
|
||||||
"unnamed": "Без имени",
|
"unnamed": "Без имени",
|
||||||
|
"unsubscribe": "Отписаться",
|
||||||
"update_success": "Обновление выполнено успешно",
|
"update_success": "Обновление выполнено успешно",
|
||||||
"upload_files": "Загрузить файл",
|
"upload_files": "Загрузить файл",
|
||||||
"warning": "Предупреждение",
|
"warning": "Предупреждение",
|
||||||
@ -2151,6 +2198,7 @@
|
|||||||
"collapse": "Свернуть",
|
"collapse": "Свернуть",
|
||||||
"content_placeholder": "Введите содержимое заметки...",
|
"content_placeholder": "Введите содержимое заметки...",
|
||||||
"copyContent": "Копировать контент",
|
"copyContent": "Копировать контент",
|
||||||
|
"crossPlatformRestoreWarning": "[to be translated]:Cross-platform configuration restored, but notes directory is empty. Please copy your note files to: {{path}}",
|
||||||
"delete": "удалить",
|
"delete": "удалить",
|
||||||
"delete_confirm": "Вы уверены, что хотите удалить этот объект {{type}}?",
|
"delete_confirm": "Вы уверены, что хотите удалить этот объект {{type}}?",
|
||||||
"delete_folder_confirm": "Вы уверены, что хотите удалить папку \"{{name}}\" со всем ее содержимым?",
|
"delete_folder_confirm": "Вы уверены, что хотите удалить папку \"{{name}}\" со всем ее содержимым?",
|
||||||
@ -2595,6 +2643,7 @@
|
|||||||
"lanyun": "LANYUN",
|
"lanyun": "LANYUN",
|
||||||
"lmstudio": "LM Studio",
|
"lmstudio": "LM Studio",
|
||||||
"longcat": "Тоторо",
|
"longcat": "Тоторо",
|
||||||
|
"mimo": "[to be translated]:Xiaomi MiMo",
|
||||||
"minimax": "MiniMax",
|
"minimax": "MiniMax",
|
||||||
"mistral": "Mistral",
|
"mistral": "Mistral",
|
||||||
"modelscope": "ModelScope",
|
"modelscope": "ModelScope",
|
||||||
@ -3883,6 +3932,7 @@
|
|||||||
"builtinServers": "Встроенные серверы",
|
"builtinServers": "Встроенные серверы",
|
||||||
"builtinServersDescriptions": {
|
"builtinServersDescriptions": {
|
||||||
"brave_search": "реализация сервера MCP с интеграцией API поиска Brave, обеспечивающая функции веб-поиска и локального поиска. Требуется настройка переменной среды BRAVE_API_KEY",
|
"brave_search": "реализация сервера MCP с интеграцией API поиска Brave, обеспечивающая функции веб-поиска и локального поиска. Требуется настройка переменной среды BRAVE_API_KEY",
|
||||||
|
"browser": "Управление headless-окном Electron через Chrome DevTools Protocol. Инструменты: открытие URL, выполнение однострочного JS, сброс сессии.",
|
||||||
"didi_mcp": "Сервер DiDi MCP, предоставляющий услуги такси, включая поиск на карте, оценку стоимости, управление заказами и отслеживание водителей. Доступен только в материковом Китае. Требует настройки переменной окружения DIDI_API_KEY",
|
"didi_mcp": "Сервер DiDi MCP, предоставляющий услуги такси, включая поиск на карте, оценку стоимости, управление заказами и отслеживание водителей. Доступен только в материковом Китае. Требует настройки переменной окружения DIDI_API_KEY",
|
||||||
"dify_knowledge": "Реализация сервера MCP Dify, предоставляющая простой API для взаимодействия с Dify. Требуется настройка ключа Dify",
|
"dify_knowledge": "Реализация сервера MCP Dify, предоставляющая простой API для взаимодействия с Dify. Требуется настройка ключа Dify",
|
||||||
"fetch": "MCP-сервер для получения содержимого веб-страниц по URL",
|
"fetch": "MCP-сервер для получения содержимого веб-страниц по URL",
|
||||||
@ -3890,6 +3940,7 @@
|
|||||||
"mcp_auto_install": "Автоматическая установка службы MCP (бета-версия)",
|
"mcp_auto_install": "Автоматическая установка службы MCP (бета-версия)",
|
||||||
"memory": "реализация постоянной памяти на основе локального графа знаний. Это позволяет модели запоминать информацию о пользователе между различными диалогами. Требуется настроить переменную среды MEMORY_FILE_PATH.",
|
"memory": "реализация постоянной памяти на основе локального графа знаний. Это позволяет модели запоминать информацию о пользователе между различными диалогами. Требуется настроить переменную среды MEMORY_FILE_PATH.",
|
||||||
"no": "без описания",
|
"no": "без описания",
|
||||||
|
"nowledge_mem": "Требуется запущенное локально приложение Nowledge Mem. Хранит чаты ИИ, инструменты, заметки, агентов и файлы в приватной памяти на вашем компьютере. Скачать можно на https://mem.nowledge.co/",
|
||||||
"python": "Выполняйте код Python в безопасной песочнице. Запускайте Python с помощью Pyodide, поддерживается большинство стандартных библиотек и пакетов для научных вычислений",
|
"python": "Выполняйте код Python в безопасной песочнице. Запускайте Python с помощью Pyodide, поддерживается большинство стандартных библиотек и пакетов для научных вычислений",
|
||||||
"sequentialthinking": "MCP серверная реализация, предоставляющая инструменты для динамического и рефлексивного решения проблем посредством структурированного мыслительного процесса"
|
"sequentialthinking": "MCP серверная реализация, предоставляющая инструменты для динамического и рефлексивного решения проблем посредством структурированного мыслительного процесса"
|
||||||
},
|
},
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user