diff --git a/src/renderer/src/config/models.ts b/src/renderer/src/config/models.ts index 5fe9b44525..be66317d88 100644 --- a/src/renderer/src/config/models.ts +++ b/src/renderer/src/config/models.ts @@ -2386,6 +2386,18 @@ export function isSupportedModel(model: OpenAI.Models.Model): boolean { return !NOT_SUPPORTED_REGEX.test(model.id) } +export function isNotSupportTemperatureAndTopP(model: Model): boolean { + if (!model) { + return true + } + + if (isOpenAIReasoningModel(model) || isOpenAIWebSearch(model)) { + return true + } + + return false +} + export function isWebSearchModel(model: Model): boolean { if (!model) { return false diff --git a/src/renderer/src/providers/AiProvider/AnthropicProvider.ts b/src/renderer/src/providers/AiProvider/AnthropicProvider.ts index 79e7207e72..51f2eaff13 100644 --- a/src/renderer/src/providers/AiProvider/AnthropicProvider.ts +++ b/src/renderer/src/providers/AiProvider/AnthropicProvider.ts @@ -13,7 +13,7 @@ import { WebSearchToolResultError } from '@anthropic-ai/sdk/resources' import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant' -import { isReasoningModel, isWebSearchModel } from '@renderer/config/models' +import { isClaudeReasoningModel, isReasoningModel, isWebSearchModel } from '@renderer/config/models' import { getStoreSetting } from '@renderer/hooks/useSettings' import i18n from '@renderer/i18n' import { getAssistantSettings, getDefaultModel, getTopNamingModel } from '@renderer/services/AssistantService' @@ -152,24 +152,18 @@ export default class AnthropicProvider extends BaseProvider { } as WebSearchTool20250305 } - /** - * Get the temperature - * @param assistant - The assistant - * @param model - The model - * @returns The temperature - */ - private getTemperature(assistant: Assistant, model: Model) { - return isReasoningModel(model) ? undefined : assistant?.settings?.temperature + override getTemperature(assistant: Assistant, model: Model): number | undefined { + if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) { + return undefined + } + return assistant.settings?.temperature } - /** - * Get the top P - * @param assistant - The assistant - * @param model - The model - * @returns The top P - */ - private getTopP(assistant: Assistant, model: Model) { - return isReasoningModel(model) ? undefined : assistant?.settings?.topP + override getTopP(assistant: Assistant, model: Model): number | undefined { + if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) { + return undefined + } + return assistant.settings?.topP } /** diff --git a/src/renderer/src/providers/AiProvider/BaseProvider.ts b/src/renderer/src/providers/AiProvider/BaseProvider.ts index 5773550022..48c1e34839 100644 --- a/src/renderer/src/providers/AiProvider/BaseProvider.ts +++ b/src/renderer/src/providers/AiProvider/BaseProvider.ts @@ -1,5 +1,5 @@ import Logger from '@renderer/config/logger' -import { isFunctionCallingModel } from '@renderer/config/models' +import { isFunctionCallingModel, isNotSupportTemperatureAndTopP } from '@renderer/config/models' import { REFERENCE_PROMPT } from '@renderer/config/prompts' import { getLMStudioKeepAliveTime } from '@renderer/hooks/useLMStudio' import type { @@ -103,6 +103,14 @@ export default abstract class BaseProvider { return this.provider.id === 'lmstudio' ? getLMStudioKeepAliveTime() : undefined } + public getTemperature(assistant: Assistant, model: Model): number | undefined { + return isNotSupportTemperatureAndTopP(model) ? undefined : assistant.settings?.temperature + } + + public getTopP(assistant: Assistant, model: Model): number | undefined { + return isNotSupportTemperatureAndTopP(model) ? undefined : assistant.settings?.topP + } + public async fakeCompletions({ onChunk }: CompletionsParams) { for (let i = 0; i < 100; i++) { await delay(0.01) diff --git a/src/renderer/src/providers/AiProvider/GeminiProvider.ts b/src/renderer/src/providers/AiProvider/GeminiProvider.ts index b329116a7b..03656645a2 100644 --- a/src/renderer/src/providers/AiProvider/GeminiProvider.ts +++ b/src/renderer/src/providers/AiProvider/GeminiProvider.ts @@ -379,8 +379,8 @@ export default class GeminiProvider extends BaseProvider { safetySettings: this.getSafetySettings(), // generate image don't need system instruction systemInstruction: isGemmaModel(model) ? undefined : systemInstruction, - temperature: assistant?.settings?.temperature, - topP: assistant?.settings?.topP, + temperature: this.getTemperature(assistant, model), + topP: this.getTopP(assistant, model), maxOutputTokens: maxTokens, tools: tools, ...this.getBudgetToken(assistant, model), diff --git a/src/renderer/src/providers/AiProvider/OpenAIProvider.ts b/src/renderer/src/providers/AiProvider/OpenAIProvider.ts index 81a9fa9caf..ab48778158 100644 --- a/src/renderer/src/providers/AiProvider/OpenAIProvider.ts +++ b/src/renderer/src/providers/AiProvider/OpenAIProvider.ts @@ -1,9 +1,9 @@ import { findTokenLimit, getOpenAIWebSearchParams, + isClaudeReasoningModel, isHunyuanSearchModel, isOpenAIReasoningModel, - isOpenAIWebSearch, isReasoningModel, isSupportedModel, isSupportedReasoningEffortGrokModel, @@ -192,14 +192,18 @@ export default class OpenAIProvider extends BaseOpenAIProvider { } as ChatCompletionMessageParam } - /** - * Get the temperature for the assistant - * @param assistant - The assistant - * @param model - The model - * @returns The temperature - */ - override getTemperature(assistant: Assistant, model: Model) { - return isReasoningModel(model) || isOpenAIWebSearch(model) ? undefined : assistant?.settings?.temperature + override getTemperature(assistant: Assistant, model: Model): number | undefined { + if (isOpenAIReasoningModel(model) || (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model))) { + return undefined + } + return assistant.settings?.temperature + } + + override getTopP(assistant: Assistant, model: Model): number | undefined { + if (isOpenAIReasoningModel(model) || (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model))) { + return undefined + } + return assistant.settings?.topP } /** @@ -229,20 +233,6 @@ export default class OpenAIProvider extends BaseOpenAIProvider { return {} } - /** - * Get the top P for the assistant - * @param assistant - The assistant - * @param model - The model - * @returns The top P - */ - override getTopP(assistant: Assistant, model: Model) { - if (isReasoningModel(model) || isOpenAIWebSearch(model)) { - return undefined - } - - return assistant?.settings?.topP - } - /** * Get the reasoning effort for the assistant * @param assistant - The assistant diff --git a/src/renderer/src/providers/AiProvider/OpenAIResponseProvider.ts b/src/renderer/src/providers/AiProvider/OpenAIResponseProvider.ts index 6cac0d0ab5..4d9a6f57bf 100644 --- a/src/renderer/src/providers/AiProvider/OpenAIResponseProvider.ts +++ b/src/renderer/src/providers/AiProvider/OpenAIResponseProvider.ts @@ -191,26 +191,6 @@ export abstract class BaseOpenAIProvider extends BaseProvider { return 5 * 1000 * 60 } - /** - * Get the temperature for the assistant - * @param assistant - The assistant - * @param model - The model - * @returns The temperature - */ - protected getTemperature(assistant: Assistant, model: Model) { - return isOpenAIReasoningModel(model) || isOpenAILLMModel(model) ? undefined : assistant?.settings?.temperature - } - - /** - * Get the top P for the assistant - * @param assistant - The assistant - * @param model - The model - * @returns The top P - */ - protected getTopP(assistant: Assistant, model: Model) { - return isOpenAIReasoningModel(model) || isOpenAILLMModel(model) ? undefined : assistant?.settings?.topP - } - private getResponseReasoningEffort(assistant: Assistant, model: Model) { if (!isSupportedReasoningEffortOpenAIModel(model)) { return {}