mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2025-12-19 14:41:24 +08:00
* fix: update Ollama provider options for Qwen model support Pass the model to buildOllamaProviderOptions and enable 'think' option only for supported Qwen models. This improves reasoning capability handling for Ollama providers. * fix: empty array * feat: ollama oss --------- Co-authored-by: suyao <sy20010504@gmail.com>
146 lines
9.8 KiB
Diff
Vendored
146 lines
9.8 KiB
Diff
Vendored
diff --git a/dist/index.d.ts b/dist/index.d.ts
|
|
index 8dd9b498050dbecd8dd6b901acf1aa8ca38a49af..ed644349c9d38fe2a66b2fb44214f7c18eb97f89 100644
|
|
--- a/dist/index.d.ts
|
|
+++ b/dist/index.d.ts
|
|
@@ -4,7 +4,7 @@ import { z } from 'zod/v4';
|
|
|
|
type OllamaChatModelId = "athene-v2" | "athene-v2:72b" | "aya-expanse" | "aya-expanse:8b" | "aya-expanse:32b" | "codegemma" | "codegemma:2b" | "codegemma:7b" | "codellama" | "codellama:7b" | "codellama:13b" | "codellama:34b" | "codellama:70b" | "codellama:code" | "codellama:python" | "command-r" | "command-r:35b" | "command-r-plus" | "command-r-plus:104b" | "command-r7b" | "command-r7b:7b" | "deepseek-r1" | "deepseek-r1:1.5b" | "deepseek-r1:7b" | "deepseek-r1:8b" | "deepseek-r1:14b" | "deepseek-r1:32b" | "deepseek-r1:70b" | "deepseek-r1:671b" | "deepseek-coder-v2" | "deepseek-coder-v2:16b" | "deepseek-coder-v2:236b" | "deepseek-v3" | "deepseek-v3:671b" | "devstral" | "devstral:24b" | "dolphin3" | "dolphin3:8b" | "exaone3.5" | "exaone3.5:2.4b" | "exaone3.5:7.8b" | "exaone3.5:32b" | "falcon2" | "falcon2:11b" | "falcon3" | "falcon3:1b" | "falcon3:3b" | "falcon3:7b" | "falcon3:10b" | "firefunction-v2" | "firefunction-v2:70b" | "gemma" | "gemma:2b" | "gemma:7b" | "gemma2" | "gemma2:2b" | "gemma2:9b" | "gemma2:27b" | "gemma3" | "gemma3:1b" | "gemma3:4b" | "gemma3:12b" | "gemma3:27b" | "granite3-dense" | "granite3-dense:2b" | "granite3-dense:8b" | "granite3-guardian" | "granite3-guardian:2b" | "granite3-guardian:8b" | "granite3-moe" | "granite3-moe:1b" | "granite3-moe:3b" | "granite3.1-dense" | "granite3.1-dense:2b" | "granite3.1-dense:8b" | "granite3.1-moe" | "granite3.1-moe:1b" | "granite3.1-moe:3b" | "llama2" | "llama2:7b" | "llama2:13b" | "llama2:70b" | "llama3" | "llama3:8b" | "llama3:70b" | "llama3-chatqa" | "llama3-chatqa:8b" | "llama3-chatqa:70b" | "llama3-gradient" | "llama3-gradient:8b" | "llama3-gradient:70b" | "llama3.1" | "llama3.1:8b" | "llama3.1:70b" | "llama3.1:405b" | "llama3.2" | "llama3.2:1b" | "llama3.2:3b" | "llama3.2-vision" | "llama3.2-vision:11b" | "llama3.2-vision:90b" | "llama3.3" | "llama3.3:70b" | "llama4" | "llama4:16x17b" | "llama4:128x17b" | "llama-guard3" | "llama-guard3:1b" | "llama-guard3:8b" | "llava" | "llava:7b" | "llava:13b" | "llava:34b" | "llava-llama3" | "llava-llama3:8b" | "llava-phi3" | "llava-phi3:3.8b" | "marco-o1" | "marco-o1:7b" | "mistral" | "mistral:7b" | "mistral-large" | "mistral-large:123b" | "mistral-nemo" | "mistral-nemo:12b" | "mistral-small" | "mistral-small:22b" | "mixtral" | "mixtral:8x7b" | "mixtral:8x22b" | "moondream" | "moondream:1.8b" | "openhermes" | "openhermes:v2.5" | "nemotron" | "nemotron:70b" | "nemotron-mini" | "nemotron-mini:4b" | "olmo" | "olmo:7b" | "olmo:13b" | "opencoder" | "opencoder:1.5b" | "opencoder:8b" | "phi3" | "phi3:3.8b" | "phi3:14b" | "phi3.5" | "phi3.5:3.8b" | "phi4" | "phi4:14b" | "qwen" | "qwen:7b" | "qwen:14b" | "qwen:32b" | "qwen:72b" | "qwen:110b" | "qwen2" | "qwen2:0.5b" | "qwen2:1.5b" | "qwen2:7b" | "qwen2:72b" | "qwen2.5" | "qwen2.5:0.5b" | "qwen2.5:1.5b" | "qwen2.5:3b" | "qwen2.5:7b" | "qwen2.5:14b" | "qwen2.5:32b" | "qwen2.5:72b" | "qwen2.5-coder" | "qwen2.5-coder:0.5b" | "qwen2.5-coder:1.5b" | "qwen2.5-coder:3b" | "qwen2.5-coder:7b" | "qwen2.5-coder:14b" | "qwen2.5-coder:32b" | "qwen3" | "qwen3:0.6b" | "qwen3:1.7b" | "qwen3:4b" | "qwen3:8b" | "qwen3:14b" | "qwen3:30b" | "qwen3:32b" | "qwen3:235b" | "qwq" | "qwq:32b" | "sailor2" | "sailor2:1b" | "sailor2:8b" | "sailor2:20b" | "shieldgemma" | "shieldgemma:2b" | "shieldgemma:9b" | "shieldgemma:27b" | "smallthinker" | "smallthinker:3b" | "smollm" | "smollm:135m" | "smollm:360m" | "smollm:1.7b" | "tinyllama" | "tinyllama:1.1b" | "tulu3" | "tulu3:8b" | "tulu3:70b" | (string & {});
|
|
declare const ollamaProviderOptions: z.ZodObject<{
|
|
- think: z.ZodOptional<z.ZodBoolean>;
|
|
+ think: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodEnum<['low', 'medium', 'high']>]>>;
|
|
options: z.ZodOptional<z.ZodObject<{
|
|
num_ctx: z.ZodOptional<z.ZodNumber>;
|
|
repeat_last_n: z.ZodOptional<z.ZodNumber>;
|
|
@@ -27,9 +27,11 @@ interface OllamaCompletionSettings {
|
|
* the model's thinking from the model's output. When disabled, the model will not think
|
|
* and directly output the content.
|
|
*
|
|
+ * For gpt-oss models, you can also use 'low', 'medium', or 'high' to control the depth of thinking.
|
|
+ *
|
|
* Only supported by certain models like DeepSeek R1 and Qwen 3.
|
|
*/
|
|
- think?: boolean;
|
|
+ think?: boolean | 'low' | 'medium' | 'high';
|
|
/**
|
|
* Echo back the prompt in addition to the completion.
|
|
*/
|
|
@@ -146,7 +148,7 @@ declare const ollamaEmbeddingProviderOptions: z.ZodObject<{
|
|
type OllamaEmbeddingProviderOptions = z.infer<typeof ollamaEmbeddingProviderOptions>;
|
|
|
|
declare const ollamaCompletionProviderOptions: z.ZodObject<{
|
|
- think: z.ZodOptional<z.ZodBoolean>;
|
|
+ think: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodEnum<['low', 'medium', 'high']>]>>;
|
|
user: z.ZodOptional<z.ZodString>;
|
|
suffix: z.ZodOptional<z.ZodString>;
|
|
echo: z.ZodOptional<z.ZodBoolean>;
|
|
diff --git a/dist/index.js b/dist/index.js
|
|
index 35b5142ce8476ce2549ed7c2ec48e7d8c46c90d9..2ef64dc9a4c2be043e6af608241a6a8309a5a69f 100644
|
|
--- a/dist/index.js
|
|
+++ b/dist/index.js
|
|
@@ -158,7 +158,7 @@ function getResponseMetadata({
|
|
|
|
// src/completion/ollama-completion-language-model.ts
|
|
var ollamaCompletionProviderOptions = import_v42.z.object({
|
|
- think: import_v42.z.boolean().optional(),
|
|
+ think: import_v42.z.union([import_v42.z.boolean(), import_v42.z.enum(['low', 'medium', 'high'])]).optional(),
|
|
user: import_v42.z.string().optional(),
|
|
suffix: import_v42.z.string().optional(),
|
|
echo: import_v42.z.boolean().optional()
|
|
@@ -662,7 +662,7 @@ function convertToOllamaChatMessages({
|
|
const images = content.filter((part) => part.type === "file" && part.mediaType.startsWith("image/")).map((part) => part.data);
|
|
messages.push({
|
|
role: "user",
|
|
- content: userText.length > 0 ? userText : [],
|
|
+ content: userText.length > 0 ? userText : '',
|
|
images: images.length > 0 ? images : void 0
|
|
});
|
|
break;
|
|
@@ -813,9 +813,11 @@ var ollamaProviderOptions = import_v44.z.object({
|
|
* the model's thinking from the model's output. When disabled, the model will not think
|
|
* and directly output the content.
|
|
*
|
|
+ * For gpt-oss models, you can also use 'low', 'medium', or 'high' to control the depth of thinking.
|
|
+ *
|
|
* Only supported by certain models like DeepSeek R1 and Qwen 3.
|
|
*/
|
|
- think: import_v44.z.boolean().optional(),
|
|
+ think: import_v44.z.union([import_v44.z.boolean(), import_v44.z.enum(['low', 'medium', 'high'])]).optional(),
|
|
options: import_v44.z.object({
|
|
num_ctx: import_v44.z.number().optional(),
|
|
repeat_last_n: import_v44.z.number().optional(),
|
|
@@ -929,14 +931,16 @@ var OllamaRequestBuilder = class {
|
|
prompt,
|
|
systemMessageMode: "system"
|
|
}),
|
|
- temperature,
|
|
- top_p: topP,
|
|
max_output_tokens: maxOutputTokens,
|
|
...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
|
|
format: responseFormat.schema != null ? responseFormat.schema : "json"
|
|
},
|
|
think: (_a = ollamaOptions == null ? void 0 : ollamaOptions.think) != null ? _a : false,
|
|
- options: (_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : void 0
|
|
+ options: {
|
|
+ ...temperature !== void 0 && { temperature },
|
|
+ ...topP !== void 0 && { top_p: topP },
|
|
+ ...((_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : {})
|
|
+ }
|
|
};
|
|
}
|
|
};
|
|
diff --git a/dist/index.mjs b/dist/index.mjs
|
|
index e2a634a78d80ac9542f2cc4f96cf2291094b10cf..67b23efce3c1cf4f026693d3ff9246988a3ef26e 100644
|
|
--- a/dist/index.mjs
|
|
+++ b/dist/index.mjs
|
|
@@ -144,7 +144,7 @@ function getResponseMetadata({
|
|
|
|
// src/completion/ollama-completion-language-model.ts
|
|
var ollamaCompletionProviderOptions = z2.object({
|
|
- think: z2.boolean().optional(),
|
|
+ think: z2.union([z2.boolean(), z2.enum(['low', 'medium', 'high'])]).optional(),
|
|
user: z2.string().optional(),
|
|
suffix: z2.string().optional(),
|
|
echo: z2.boolean().optional()
|
|
@@ -662,7 +662,7 @@ function convertToOllamaChatMessages({
|
|
const images = content.filter((part) => part.type === "file" && part.mediaType.startsWith("image/")).map((part) => part.data);
|
|
messages.push({
|
|
role: "user",
|
|
- content: userText.length > 0 ? userText : [],
|
|
+ content: userText.length > 0 ? userText : '',
|
|
images: images.length > 0 ? images : void 0
|
|
});
|
|
break;
|
|
@@ -815,9 +815,11 @@ var ollamaProviderOptions = z4.object({
|
|
* the model's thinking from the model's output. When disabled, the model will not think
|
|
* and directly output the content.
|
|
*
|
|
+ * For gpt-oss models, you can also use 'low', 'medium', or 'high' to control the depth of thinking.
|
|
+ *
|
|
* Only supported by certain models like DeepSeek R1 and Qwen 3.
|
|
*/
|
|
- think: z4.boolean().optional(),
|
|
+ think: z4.union([z4.boolean(), z4.enum(['low', 'medium', 'high'])]).optional(),
|
|
options: z4.object({
|
|
num_ctx: z4.number().optional(),
|
|
repeat_last_n: z4.number().optional(),
|
|
@@ -931,14 +933,16 @@ var OllamaRequestBuilder = class {
|
|
prompt,
|
|
systemMessageMode: "system"
|
|
}),
|
|
- temperature,
|
|
- top_p: topP,
|
|
max_output_tokens: maxOutputTokens,
|
|
...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
|
|
format: responseFormat.schema != null ? responseFormat.schema : "json"
|
|
},
|
|
think: (_a = ollamaOptions == null ? void 0 : ollamaOptions.think) != null ? _a : false,
|
|
- options: (_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : void 0
|
|
+ options: {
|
|
+ ...temperature !== void 0 && { temperature },
|
|
+ ...topP !== void 0 && { top_p: topP },
|
|
+ ...((_b = ollamaOptions == null ? void 0 : ollamaOptions.options) != null ? _b : {})
|
|
+ }
|
|
};
|
|
}
|
|
};
|