mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2025-12-27 12:51:26 +08:00
fix: inaccurate temperature param (#5973)
* fix: inaccurate temperature param * fix: enhance model support check for reasoning and web search models
This commit is contained in:
parent
c3f3fe5b79
commit
8f7c5eed75
@ -2386,6 +2386,18 @@ export function isSupportedModel(model: OpenAI.Models.Model): boolean {
|
||||
return !NOT_SUPPORTED_REGEX.test(model.id)
|
||||
}
|
||||
|
||||
export function isNotSupportTemperatureAndTopP(model: Model): boolean {
|
||||
if (!model) {
|
||||
return true
|
||||
}
|
||||
|
||||
if (isOpenAIReasoningModel(model) || isOpenAIWebSearch(model)) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
export function isWebSearchModel(model: Model): boolean {
|
||||
if (!model) {
|
||||
return false
|
||||
|
||||
@ -13,7 +13,7 @@ import {
|
||||
WebSearchToolResultError
|
||||
} from '@anthropic-ai/sdk/resources'
|
||||
import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant'
|
||||
import { isReasoningModel, isWebSearchModel } from '@renderer/config/models'
|
||||
import { isClaudeReasoningModel, isReasoningModel, isWebSearchModel } from '@renderer/config/models'
|
||||
import { getStoreSetting } from '@renderer/hooks/useSettings'
|
||||
import i18n from '@renderer/i18n'
|
||||
import { getAssistantSettings, getDefaultModel, getTopNamingModel } from '@renderer/services/AssistantService'
|
||||
@ -152,24 +152,18 @@ export default class AnthropicProvider extends BaseProvider {
|
||||
} as WebSearchTool20250305
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the temperature
|
||||
* @param assistant - The assistant
|
||||
* @param model - The model
|
||||
* @returns The temperature
|
||||
*/
|
||||
private getTemperature(assistant: Assistant, model: Model) {
|
||||
return isReasoningModel(model) ? undefined : assistant?.settings?.temperature
|
||||
override getTemperature(assistant: Assistant, model: Model): number | undefined {
|
||||
if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) {
|
||||
return undefined
|
||||
}
|
||||
return assistant.settings?.temperature
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the top P
|
||||
* @param assistant - The assistant
|
||||
* @param model - The model
|
||||
* @returns The top P
|
||||
*/
|
||||
private getTopP(assistant: Assistant, model: Model) {
|
||||
return isReasoningModel(model) ? undefined : assistant?.settings?.topP
|
||||
override getTopP(assistant: Assistant, model: Model): number | undefined {
|
||||
if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) {
|
||||
return undefined
|
||||
}
|
||||
return assistant.settings?.topP
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
import Logger from '@renderer/config/logger'
|
||||
import { isFunctionCallingModel } from '@renderer/config/models'
|
||||
import { isFunctionCallingModel, isNotSupportTemperatureAndTopP } from '@renderer/config/models'
|
||||
import { REFERENCE_PROMPT } from '@renderer/config/prompts'
|
||||
import { getLMStudioKeepAliveTime } from '@renderer/hooks/useLMStudio'
|
||||
import type {
|
||||
@ -103,6 +103,14 @@ export default abstract class BaseProvider {
|
||||
return this.provider.id === 'lmstudio' ? getLMStudioKeepAliveTime() : undefined
|
||||
}
|
||||
|
||||
public getTemperature(assistant: Assistant, model: Model): number | undefined {
|
||||
return isNotSupportTemperatureAndTopP(model) ? undefined : assistant.settings?.temperature
|
||||
}
|
||||
|
||||
public getTopP(assistant: Assistant, model: Model): number | undefined {
|
||||
return isNotSupportTemperatureAndTopP(model) ? undefined : assistant.settings?.topP
|
||||
}
|
||||
|
||||
public async fakeCompletions({ onChunk }: CompletionsParams) {
|
||||
for (let i = 0; i < 100; i++) {
|
||||
await delay(0.01)
|
||||
|
||||
@ -379,8 +379,8 @@ export default class GeminiProvider extends BaseProvider {
|
||||
safetySettings: this.getSafetySettings(),
|
||||
// generate image don't need system instruction
|
||||
systemInstruction: isGemmaModel(model) ? undefined : systemInstruction,
|
||||
temperature: assistant?.settings?.temperature,
|
||||
topP: assistant?.settings?.topP,
|
||||
temperature: this.getTemperature(assistant, model),
|
||||
topP: this.getTopP(assistant, model),
|
||||
maxOutputTokens: maxTokens,
|
||||
tools: tools,
|
||||
...this.getBudgetToken(assistant, model),
|
||||
|
||||
@ -1,9 +1,9 @@
|
||||
import {
|
||||
findTokenLimit,
|
||||
getOpenAIWebSearchParams,
|
||||
isClaudeReasoningModel,
|
||||
isHunyuanSearchModel,
|
||||
isOpenAIReasoningModel,
|
||||
isOpenAIWebSearch,
|
||||
isReasoningModel,
|
||||
isSupportedModel,
|
||||
isSupportedReasoningEffortGrokModel,
|
||||
@ -192,14 +192,18 @@ export default class OpenAIProvider extends BaseOpenAIProvider {
|
||||
} as ChatCompletionMessageParam
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the temperature for the assistant
|
||||
* @param assistant - The assistant
|
||||
* @param model - The model
|
||||
* @returns The temperature
|
||||
*/
|
||||
override getTemperature(assistant: Assistant, model: Model) {
|
||||
return isReasoningModel(model) || isOpenAIWebSearch(model) ? undefined : assistant?.settings?.temperature
|
||||
override getTemperature(assistant: Assistant, model: Model): number | undefined {
|
||||
if (isOpenAIReasoningModel(model) || (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model))) {
|
||||
return undefined
|
||||
}
|
||||
return assistant.settings?.temperature
|
||||
}
|
||||
|
||||
override getTopP(assistant: Assistant, model: Model): number | undefined {
|
||||
if (isOpenAIReasoningModel(model) || (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model))) {
|
||||
return undefined
|
||||
}
|
||||
return assistant.settings?.topP
|
||||
}
|
||||
|
||||
/**
|
||||
@ -229,20 +233,6 @@ export default class OpenAIProvider extends BaseOpenAIProvider {
|
||||
return {}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the top P for the assistant
|
||||
* @param assistant - The assistant
|
||||
* @param model - The model
|
||||
* @returns The top P
|
||||
*/
|
||||
override getTopP(assistant: Assistant, model: Model) {
|
||||
if (isReasoningModel(model) || isOpenAIWebSearch(model)) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
return assistant?.settings?.topP
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the reasoning effort for the assistant
|
||||
* @param assistant - The assistant
|
||||
|
||||
@ -191,26 +191,6 @@ export abstract class BaseOpenAIProvider extends BaseProvider {
|
||||
return 5 * 1000 * 60
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the temperature for the assistant
|
||||
* @param assistant - The assistant
|
||||
* @param model - The model
|
||||
* @returns The temperature
|
||||
*/
|
||||
protected getTemperature(assistant: Assistant, model: Model) {
|
||||
return isOpenAIReasoningModel(model) || isOpenAILLMModel(model) ? undefined : assistant?.settings?.temperature
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the top P for the assistant
|
||||
* @param assistant - The assistant
|
||||
* @param model - The model
|
||||
* @returns The top P
|
||||
*/
|
||||
protected getTopP(assistant: Assistant, model: Model) {
|
||||
return isOpenAIReasoningModel(model) || isOpenAILLMModel(model) ? undefined : assistant?.settings?.topP
|
||||
}
|
||||
|
||||
private getResponseReasoningEffort(assistant: Assistant, model: Model) {
|
||||
if (!isSupportedReasoningEffortOpenAIModel(model)) {
|
||||
return {}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user