mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2025-12-29 05:51:26 +08:00
fix(ollama): improve reasoningEffort handling in providerOptions (#12089)
* fix(ollama): improve reasoningEffort handling in providerOptions * fix(ollama): update reasoning effort handling and add support for gpt-oss models * fix(ollama): update think option to support 'low', 'medium', and 'high' values * fix(ollama): update comment to clarify accepted reasoning effort values for gpt-oss models
This commit is contained in:
parent
b78df05f28
commit
5ff173fcc7
@ -7,7 +7,7 @@ index 8dd9b498050dbecd8dd6b901acf1aa8ca38a49af..ed644349c9d38fe2a66b2fb44214f7c1
|
||||
type OllamaChatModelId = "athene-v2" | "athene-v2:72b" | "aya-expanse" | "aya-expanse:8b" | "aya-expanse:32b" | "codegemma" | "codegemma:2b" | "codegemma:7b" | "codellama" | "codellama:7b" | "codellama:13b" | "codellama:34b" | "codellama:70b" | "codellama:code" | "codellama:python" | "command-r" | "command-r:35b" | "command-r-plus" | "command-r-plus:104b" | "command-r7b" | "command-r7b:7b" | "deepseek-r1" | "deepseek-r1:1.5b" | "deepseek-r1:7b" | "deepseek-r1:8b" | "deepseek-r1:14b" | "deepseek-r1:32b" | "deepseek-r1:70b" | "deepseek-r1:671b" | "deepseek-coder-v2" | "deepseek-coder-v2:16b" | "deepseek-coder-v2:236b" | "deepseek-v3" | "deepseek-v3:671b" | "devstral" | "devstral:24b" | "dolphin3" | "dolphin3:8b" | "exaone3.5" | "exaone3.5:2.4b" | "exaone3.5:7.8b" | "exaone3.5:32b" | "falcon2" | "falcon2:11b" | "falcon3" | "falcon3:1b" | "falcon3:3b" | "falcon3:7b" | "falcon3:10b" | "firefunction-v2" | "firefunction-v2:70b" | "gemma" | "gemma:2b" | "gemma:7b" | "gemma2" | "gemma2:2b" | "gemma2:9b" | "gemma2:27b" | "gemma3" | "gemma3:1b" | "gemma3:4b" | "gemma3:12b" | "gemma3:27b" | "granite3-dense" | "granite3-dense:2b" | "granite3-dense:8b" | "granite3-guardian" | "granite3-guardian:2b" | "granite3-guardian:8b" | "granite3-moe" | "granite3-moe:1b" | "granite3-moe:3b" | "granite3.1-dense" | "granite3.1-dense:2b" | "granite3.1-dense:8b" | "granite3.1-moe" | "granite3.1-moe:1b" | "granite3.1-moe:3b" | "llama2" | "llama2:7b" | "llama2:13b" | "llama2:70b" | "llama3" | "llama3:8b" | "llama3:70b" | "llama3-chatqa" | "llama3-chatqa:8b" | "llama3-chatqa:70b" | "llama3-gradient" | "llama3-gradient:8b" | "llama3-gradient:70b" | "llama3.1" | "llama3.1:8b" | "llama3.1:70b" | "llama3.1:405b" | "llama3.2" | "llama3.2:1b" | "llama3.2:3b" | "llama3.2-vision" | "llama3.2-vision:11b" | "llama3.2-vision:90b" | "llama3.3" | "llama3.3:70b" | "llama4" | "llama4:16x17b" | "llama4:128x17b" | "llama-guard3" | "llama-guard3:1b" | "llama-guard3:8b" | "llava" | "llava:7b" | "llava:13b" | "llava:34b" | "llava-llama3" | "llava-llama3:8b" | "llava-phi3" | "llava-phi3:3.8b" | "marco-o1" | "marco-o1:7b" | "mistral" | "mistral:7b" | "mistral-large" | "mistral-large:123b" | "mistral-nemo" | "mistral-nemo:12b" | "mistral-small" | "mistral-small:22b" | "mixtral" | "mixtral:8x7b" | "mixtral:8x22b" | "moondream" | "moondream:1.8b" | "openhermes" | "openhermes:v2.5" | "nemotron" | "nemotron:70b" | "nemotron-mini" | "nemotron-mini:4b" | "olmo" | "olmo:7b" | "olmo:13b" | "opencoder" | "opencoder:1.5b" | "opencoder:8b" | "phi3" | "phi3:3.8b" | "phi3:14b" | "phi3.5" | "phi3.5:3.8b" | "phi4" | "phi4:14b" | "qwen" | "qwen:7b" | "qwen:14b" | "qwen:32b" | "qwen:72b" | "qwen:110b" | "qwen2" | "qwen2:0.5b" | "qwen2:1.5b" | "qwen2:7b" | "qwen2:72b" | "qwen2.5" | "qwen2.5:0.5b" | "qwen2.5:1.5b" | "qwen2.5:3b" | "qwen2.5:7b" | "qwen2.5:14b" | "qwen2.5:32b" | "qwen2.5:72b" | "qwen2.5-coder" | "qwen2.5-coder:0.5b" | "qwen2.5-coder:1.5b" | "qwen2.5-coder:3b" | "qwen2.5-coder:7b" | "qwen2.5-coder:14b" | "qwen2.5-coder:32b" | "qwen3" | "qwen3:0.6b" | "qwen3:1.7b" | "qwen3:4b" | "qwen3:8b" | "qwen3:14b" | "qwen3:30b" | "qwen3:32b" | "qwen3:235b" | "qwq" | "qwq:32b" | "sailor2" | "sailor2:1b" | "sailor2:8b" | "sailor2:20b" | "shieldgemma" | "shieldgemma:2b" | "shieldgemma:9b" | "shieldgemma:27b" | "smallthinker" | "smallthinker:3b" | "smollm" | "smollm:135m" | "smollm:360m" | "smollm:1.7b" | "tinyllama" | "tinyllama:1.1b" | "tulu3" | "tulu3:8b" | "tulu3:70b" | (string & {});
|
||||
declare const ollamaProviderOptions: z.ZodObject<{
|
||||
- think: z.ZodOptional<z.ZodBoolean>;
|
||||
+ think: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodEnum<['low', 'medium', 'high']>]>>;
|
||||
+ think: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodLiteral<"low">, z.ZodLiteral<"medium">, z.ZodLiteral<"high">]>>;
|
||||
options: z.ZodOptional<z.ZodObject<{
|
||||
num_ctx: z.ZodOptional<z.ZodNumber>;
|
||||
repeat_last_n: z.ZodOptional<z.ZodNumber>;
|
||||
@ -29,7 +29,7 @@ index 8dd9b498050dbecd8dd6b901acf1aa8ca38a49af..ed644349c9d38fe2a66b2fb44214f7c1
|
||||
|
||||
declare const ollamaCompletionProviderOptions: z.ZodObject<{
|
||||
- think: z.ZodOptional<z.ZodBoolean>;
|
||||
+ think: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodEnum<['low', 'medium', 'high']>]>>;
|
||||
+ think: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodLiteral<"low">, z.ZodLiteral<"medium">, z.ZodLiteral<"high">]>>;
|
||||
user: z.ZodOptional<z.ZodString>;
|
||||
suffix: z.ZodOptional<z.ZodString>;
|
||||
echo: z.ZodOptional<z.ZodBoolean>;
|
||||
@ -42,7 +42,7 @@ index 35b5142ce8476ce2549ed7c2ec48e7d8c46c90d9..2ef64dc9a4c2be043e6af608241a6a83
|
||||
// src/completion/ollama-completion-language-model.ts
|
||||
var ollamaCompletionProviderOptions = import_v42.z.object({
|
||||
- think: import_v42.z.boolean().optional(),
|
||||
+ think: import_v42.z.union([import_v42.z.boolean(), import_v42.z.enum(['low', 'medium', 'high'])]).optional(),
|
||||
+ think: import_v42.z.union([import_v42.z.boolean(), import_v42.z.literal('low'), import_v42.z.literal('medium'), import_v42.z.literal('high')]).optional(),
|
||||
user: import_v42.z.string().optional(),
|
||||
suffix: import_v42.z.string().optional(),
|
||||
echo: import_v42.z.boolean().optional()
|
||||
@ -64,7 +64,7 @@ index 35b5142ce8476ce2549ed7c2ec48e7d8c46c90d9..2ef64dc9a4c2be043e6af608241a6a83
|
||||
* Only supported by certain models like DeepSeek R1 and Qwen 3.
|
||||
*/
|
||||
- think: import_v44.z.boolean().optional(),
|
||||
+ think: import_v44.z.union([import_v44.z.boolean(), import_v44.z.enum(['low', 'medium', 'high'])]).optional(),
|
||||
+ think: import_v44.z.union([import_v44.z.boolean(), import_v44.z.literal('low'), import_v44.z.literal('medium'), import_v44.z.literal('high')]).optional(),
|
||||
options: import_v44.z.object({
|
||||
num_ctx: import_v44.z.number().optional(),
|
||||
repeat_last_n: import_v44.z.number().optional(),
|
||||
@ -97,7 +97,7 @@ index e2a634a78d80ac9542f2cc4f96cf2291094b10cf..67b23efce3c1cf4f026693d3ff924698
|
||||
// src/completion/ollama-completion-language-model.ts
|
||||
var ollamaCompletionProviderOptions = z2.object({
|
||||
- think: z2.boolean().optional(),
|
||||
+ think: z2.union([z2.boolean(), z2.enum(['low', 'medium', 'high'])]).optional(),
|
||||
+ think: z2.union([z2.boolean(), z2.literal('low'), z2.literal('medium'), z2.literal('high')]).optional(),
|
||||
user: z2.string().optional(),
|
||||
suffix: z2.string().optional(),
|
||||
echo: z2.boolean().optional()
|
||||
@ -119,7 +119,7 @@ index e2a634a78d80ac9542f2cc4f96cf2291094b10cf..67b23efce3c1cf4f026693d3ff924698
|
||||
* Only supported by certain models like DeepSeek R1 and Qwen 3.
|
||||
*/
|
||||
- think: z4.boolean().optional(),
|
||||
+ think: z4.union([z4.boolean(), z4.enum(['low', 'medium', 'high'])]).optional(),
|
||||
+ think: z4.union([z4.boolean(), z4.literal('low'), z4.literal('medium'), z4.literal('high')]).optional(),
|
||||
options: z4.object({
|
||||
num_ctx: z4.number().optional(),
|
||||
repeat_last_n: z4.number().optional(),
|
||||
|
||||
@ -580,8 +580,10 @@ function buildOllamaProviderOptions(
|
||||
const reasoningEffort = assistant.settings?.reasoning_effort
|
||||
if (enableReasoning) {
|
||||
if (isOpenAIOpenWeightModel(model)) {
|
||||
// @ts-ignore upstream type error
|
||||
providerOptions.think = reasoningEffort as any
|
||||
// For gpt-oss models, Ollama accepts: 'low' | 'medium' | 'high'
|
||||
if (reasoningEffort === 'low' || reasoningEffort === 'medium' || reasoningEffort === 'high') {
|
||||
providerOptions.think = reasoningEffort
|
||||
}
|
||||
} else {
|
||||
providerOptions.think = !['none', undefined].includes(reasoningEffort)
|
||||
}
|
||||
|
||||
@ -680,7 +680,12 @@ describe('getThinkModelType - Comprehensive Coverage', () => {
|
||||
expect(getThinkModelType(createModel({ id: 'o3' }))).toBe('o')
|
||||
expect(getThinkModelType(createModel({ id: 'o3-mini' }))).toBe('o')
|
||||
expect(getThinkModelType(createModel({ id: 'o4' }))).toBe('o')
|
||||
expect(getThinkModelType(createModel({ id: 'gpt-oss-reasoning' }))).toBe('o')
|
||||
})
|
||||
|
||||
it('should return gpt_oss for gpt-oss models', () => {
|
||||
expect(getThinkModelType(createModel({ id: 'gpt-oss' }))).toBe('gpt_oss')
|
||||
expect(getThinkModelType(createModel({ id: 'gpt-oss:20b' }))).toBe('gpt_oss')
|
||||
expect(getThinkModelType(createModel({ id: 'gpt-oss-reasoning' }))).toBe('gpt_oss')
|
||||
})
|
||||
})
|
||||
|
||||
@ -1763,6 +1768,21 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
})
|
||||
|
||||
it('should return correct options for gpt-oss models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-oss' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-oss:20b' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-oss-reasoning' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
|
||||
@ -17,6 +17,7 @@ import {
|
||||
isGPT52ProModel,
|
||||
isGPT52SeriesModel,
|
||||
isOpenAIDeepResearchModel,
|
||||
isOpenAIOpenWeightModel,
|
||||
isOpenAIReasoningModel,
|
||||
isSupportedReasoningEffortOpenAIModel
|
||||
} from './openai'
|
||||
@ -41,6 +42,7 @@ export const MODEL_SUPPORTED_REASONING_EFFORT = {
|
||||
gpt5_2: ['none', 'low', 'medium', 'high', 'xhigh'] as const,
|
||||
gpt5pro: ['high'] as const,
|
||||
gpt52pro: ['medium', 'high', 'xhigh'] as const,
|
||||
gpt_oss: ['low', 'medium', 'high'] as const,
|
||||
grok: ['low', 'high'] as const,
|
||||
grok4_fast: ['auto'] as const,
|
||||
gemini2_flash: ['low', 'medium', 'high', 'auto'] as const,
|
||||
@ -72,6 +74,7 @@ export const MODEL_SUPPORTED_OPTIONS: ThinkingOptionConfig = {
|
||||
gpt5_2: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5_2] as const,
|
||||
gpt5_1_codex_max: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1_codex_max] as const,
|
||||
gpt52pro: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt52pro] as const,
|
||||
gpt_oss: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt_oss] as const,
|
||||
grok: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.grok] as const,
|
||||
grok4_fast: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.grok4_fast] as const,
|
||||
gemini2_flash: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini2_flash] as const,
|
||||
@ -127,6 +130,8 @@ const _getThinkModelType = (model: Model): ThinkingModelType => {
|
||||
thinkingModelType = 'gpt5pro'
|
||||
}
|
||||
}
|
||||
} else if (isOpenAIOpenWeightModel(model)) {
|
||||
thinkingModelType = 'gpt_oss'
|
||||
} else if (isSupportedReasoningEffortOpenAIModel(model)) {
|
||||
thinkingModelType = 'o'
|
||||
} else if (isGrok4FastReasoningModel(model)) {
|
||||
|
||||
@ -92,6 +92,7 @@ const ThinkModelTypes = [
|
||||
'gpt5_2',
|
||||
'gpt5pro',
|
||||
'gpt52pro',
|
||||
'gpt_oss',
|
||||
'grok',
|
||||
'grok4_fast',
|
||||
'gemini2_flash',
|
||||
|
||||
@ -20209,13 +20209,13 @@ __metadata:
|
||||
|
||||
"ollama-ai-provider-v2@patch:ollama-ai-provider-v2@npm%3A1.5.5#~/.yarn/patches/ollama-ai-provider-v2-npm-1.5.5-8bef249af9.patch":
|
||||
version: 1.5.5
|
||||
resolution: "ollama-ai-provider-v2@patch:ollama-ai-provider-v2@npm%3A1.5.5#~/.yarn/patches/ollama-ai-provider-v2-npm-1.5.5-8bef249af9.patch::version=1.5.5&hash=16c016"
|
||||
resolution: "ollama-ai-provider-v2@patch:ollama-ai-provider-v2@npm%3A1.5.5#~/.yarn/patches/ollama-ai-provider-v2-npm-1.5.5-8bef249af9.patch::version=1.5.5&hash=0aef28"
|
||||
dependencies:
|
||||
"@ai-sdk/provider": "npm:^2.0.0"
|
||||
"@ai-sdk/provider-utils": "npm:^3.0.17"
|
||||
peerDependencies:
|
||||
zod: ^4.0.16
|
||||
checksum: 10c0/aa6bd3415d08f7bbd1a3051f45b1cd3a8fa8bb01413e98de45e8888f64e6b12bca6e340453a3e82e4193ca5354397f524c6c0f7b3e9996d70f53c81374c69180
|
||||
checksum: 10c0/32ca1f543ee791ac96061a5f6d8899c00644eeb774b3b951ca1e3e3810b60753acacf8229b2c1ba099b25a01732c54e51e0df44d11f4d90ae201f483d41aa149
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user