mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2026-01-06 05:09:09 +08:00
fix: inaccurate temperature param (#5973)
* fix: inaccurate temperature param * fix: enhance model support check for reasoning and web search models
This commit is contained in:
parent
1bc6571d7d
commit
85f2f454c4
@ -2386,6 +2386,18 @@ export function isSupportedModel(model: OpenAI.Models.Model): boolean {
|
|||||||
return !NOT_SUPPORTED_REGEX.test(model.id)
|
return !NOT_SUPPORTED_REGEX.test(model.id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function isNotSupportTemperatureAndTopP(model: Model): boolean {
|
||||||
|
if (!model) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isOpenAIReasoningModel(model) || isOpenAIWebSearch(model)) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
export function isWebSearchModel(model: Model): boolean {
|
export function isWebSearchModel(model: Model): boolean {
|
||||||
if (!model) {
|
if (!model) {
|
||||||
return false
|
return false
|
||||||
|
|||||||
@ -13,7 +13,7 @@ import {
|
|||||||
WebSearchToolResultError
|
WebSearchToolResultError
|
||||||
} from '@anthropic-ai/sdk/resources'
|
} from '@anthropic-ai/sdk/resources'
|
||||||
import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant'
|
import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant'
|
||||||
import { isReasoningModel, isWebSearchModel } from '@renderer/config/models'
|
import { isClaudeReasoningModel, isReasoningModel, isWebSearchModel } from '@renderer/config/models'
|
||||||
import { getStoreSetting } from '@renderer/hooks/useSettings'
|
import { getStoreSetting } from '@renderer/hooks/useSettings'
|
||||||
import i18n from '@renderer/i18n'
|
import i18n from '@renderer/i18n'
|
||||||
import { getAssistantSettings, getDefaultModel, getTopNamingModel } from '@renderer/services/AssistantService'
|
import { getAssistantSettings, getDefaultModel, getTopNamingModel } from '@renderer/services/AssistantService'
|
||||||
@ -152,24 +152,18 @@ export default class AnthropicProvider extends BaseProvider {
|
|||||||
} as WebSearchTool20250305
|
} as WebSearchTool20250305
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
override getTemperature(assistant: Assistant, model: Model): number | undefined {
|
||||||
* Get the temperature
|
if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) {
|
||||||
* @param assistant - The assistant
|
return undefined
|
||||||
* @param model - The model
|
}
|
||||||
* @returns The temperature
|
return assistant.settings?.temperature
|
||||||
*/
|
|
||||||
private getTemperature(assistant: Assistant, model: Model) {
|
|
||||||
return isReasoningModel(model) ? undefined : assistant?.settings?.temperature
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
override getTopP(assistant: Assistant, model: Model): number | undefined {
|
||||||
* Get the top P
|
if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) {
|
||||||
* @param assistant - The assistant
|
return undefined
|
||||||
* @param model - The model
|
}
|
||||||
* @returns The top P
|
return assistant.settings?.topP
|
||||||
*/
|
|
||||||
private getTopP(assistant: Assistant, model: Model) {
|
|
||||||
return isReasoningModel(model) ? undefined : assistant?.settings?.topP
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
import Logger from '@renderer/config/logger'
|
import Logger from '@renderer/config/logger'
|
||||||
import { isFunctionCallingModel } from '@renderer/config/models'
|
import { isFunctionCallingModel, isNotSupportTemperatureAndTopP } from '@renderer/config/models'
|
||||||
import { REFERENCE_PROMPT } from '@renderer/config/prompts'
|
import { REFERENCE_PROMPT } from '@renderer/config/prompts'
|
||||||
import { getLMStudioKeepAliveTime } from '@renderer/hooks/useLMStudio'
|
import { getLMStudioKeepAliveTime } from '@renderer/hooks/useLMStudio'
|
||||||
import type {
|
import type {
|
||||||
@ -103,6 +103,14 @@ export default abstract class BaseProvider {
|
|||||||
return this.provider.id === 'lmstudio' ? getLMStudioKeepAliveTime() : undefined
|
return this.provider.id === 'lmstudio' ? getLMStudioKeepAliveTime() : undefined
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public getTemperature(assistant: Assistant, model: Model): number | undefined {
|
||||||
|
return isNotSupportTemperatureAndTopP(model) ? undefined : assistant.settings?.temperature
|
||||||
|
}
|
||||||
|
|
||||||
|
public getTopP(assistant: Assistant, model: Model): number | undefined {
|
||||||
|
return isNotSupportTemperatureAndTopP(model) ? undefined : assistant.settings?.topP
|
||||||
|
}
|
||||||
|
|
||||||
public async fakeCompletions({ onChunk }: CompletionsParams) {
|
public async fakeCompletions({ onChunk }: CompletionsParams) {
|
||||||
for (let i = 0; i < 100; i++) {
|
for (let i = 0; i < 100; i++) {
|
||||||
await delay(0.01)
|
await delay(0.01)
|
||||||
|
|||||||
@ -379,8 +379,8 @@ export default class GeminiProvider extends BaseProvider {
|
|||||||
safetySettings: this.getSafetySettings(),
|
safetySettings: this.getSafetySettings(),
|
||||||
// generate image don't need system instruction
|
// generate image don't need system instruction
|
||||||
systemInstruction: isGemmaModel(model) ? undefined : systemInstruction,
|
systemInstruction: isGemmaModel(model) ? undefined : systemInstruction,
|
||||||
temperature: assistant?.settings?.temperature,
|
temperature: this.getTemperature(assistant, model),
|
||||||
topP: assistant?.settings?.topP,
|
topP: this.getTopP(assistant, model),
|
||||||
maxOutputTokens: maxTokens,
|
maxOutputTokens: maxTokens,
|
||||||
tools: tools,
|
tools: tools,
|
||||||
...this.getBudgetToken(assistant, model),
|
...this.getBudgetToken(assistant, model),
|
||||||
|
|||||||
@ -1,9 +1,9 @@
|
|||||||
import {
|
import {
|
||||||
findTokenLimit,
|
findTokenLimit,
|
||||||
getOpenAIWebSearchParams,
|
getOpenAIWebSearchParams,
|
||||||
|
isClaudeReasoningModel,
|
||||||
isHunyuanSearchModel,
|
isHunyuanSearchModel,
|
||||||
isOpenAIReasoningModel,
|
isOpenAIReasoningModel,
|
||||||
isOpenAIWebSearch,
|
|
||||||
isReasoningModel,
|
isReasoningModel,
|
||||||
isSupportedModel,
|
isSupportedModel,
|
||||||
isSupportedReasoningEffortGrokModel,
|
isSupportedReasoningEffortGrokModel,
|
||||||
@ -192,14 +192,18 @@ export default class OpenAIProvider extends BaseOpenAIProvider {
|
|||||||
} as ChatCompletionMessageParam
|
} as ChatCompletionMessageParam
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
override getTemperature(assistant: Assistant, model: Model): number | undefined {
|
||||||
* Get the temperature for the assistant
|
if (isOpenAIReasoningModel(model) || (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model))) {
|
||||||
* @param assistant - The assistant
|
return undefined
|
||||||
* @param model - The model
|
}
|
||||||
* @returns The temperature
|
return assistant.settings?.temperature
|
||||||
*/
|
}
|
||||||
override getTemperature(assistant: Assistant, model: Model) {
|
|
||||||
return isReasoningModel(model) || isOpenAIWebSearch(model) ? undefined : assistant?.settings?.temperature
|
override getTopP(assistant: Assistant, model: Model): number | undefined {
|
||||||
|
if (isOpenAIReasoningModel(model) || (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model))) {
|
||||||
|
return undefined
|
||||||
|
}
|
||||||
|
return assistant.settings?.topP
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -229,20 +233,6 @@ export default class OpenAIProvider extends BaseOpenAIProvider {
|
|||||||
return {}
|
return {}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the top P for the assistant
|
|
||||||
* @param assistant - The assistant
|
|
||||||
* @param model - The model
|
|
||||||
* @returns The top P
|
|
||||||
*/
|
|
||||||
override getTopP(assistant: Assistant, model: Model) {
|
|
||||||
if (isReasoningModel(model) || isOpenAIWebSearch(model)) {
|
|
||||||
return undefined
|
|
||||||
}
|
|
||||||
|
|
||||||
return assistant?.settings?.topP
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the reasoning effort for the assistant
|
* Get the reasoning effort for the assistant
|
||||||
* @param assistant - The assistant
|
* @param assistant - The assistant
|
||||||
|
|||||||
@ -191,26 +191,6 @@ export abstract class BaseOpenAIProvider extends BaseProvider {
|
|||||||
return 5 * 1000 * 60
|
return 5 * 1000 * 60
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the temperature for the assistant
|
|
||||||
* @param assistant - The assistant
|
|
||||||
* @param model - The model
|
|
||||||
* @returns The temperature
|
|
||||||
*/
|
|
||||||
protected getTemperature(assistant: Assistant, model: Model) {
|
|
||||||
return isOpenAIReasoningModel(model) || isOpenAILLMModel(model) ? undefined : assistant?.settings?.temperature
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the top P for the assistant
|
|
||||||
* @param assistant - The assistant
|
|
||||||
* @param model - The model
|
|
||||||
* @returns The top P
|
|
||||||
*/
|
|
||||||
protected getTopP(assistant: Assistant, model: Model) {
|
|
||||||
return isOpenAIReasoningModel(model) || isOpenAILLMModel(model) ? undefined : assistant?.settings?.topP
|
|
||||||
}
|
|
||||||
|
|
||||||
private getResponseReasoningEffort(assistant: Assistant, model: Model) {
|
private getResponseReasoningEffort(assistant: Assistant, model: Model) {
|
||||||
if (!isSupportedReasoningEffortOpenAIModel(model)) {
|
if (!isSupportedReasoningEffortOpenAIModel(model)) {
|
||||||
return {}
|
return {}
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user