mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2025-12-19 06:30:10 +08:00
fix: use function as default tool use mode (#11338)
* refactor(assistant): change default tool use mode to function and use default settings Simplify reset logic by using DEFAULT_ASSISTANT_SETTINGS object instead of hardcoded values * fix(ApiService): safely fallback to prompt tool use for unsupported models Add check for function calling model support before using tool use mode to prevent errors with unsupported models.
This commit is contained in:
parent
096c36caf8
commit
34723934f4
@ -9,6 +9,7 @@ import { DEFAULT_CONTEXTCOUNT, DEFAULT_TEMPERATURE, MAX_CONTEXT_COUNT } from '@r
|
||||
import { isEmbeddingModel, isRerankModel } from '@renderer/config/models'
|
||||
import { useTimer } from '@renderer/hooks/useTimer'
|
||||
import { SettingRow } from '@renderer/pages/settings'
|
||||
import { DEFAULT_ASSISTANT_SETTINGS } from '@renderer/services/AssistantService'
|
||||
import type { Assistant, AssistantSettingCustomParameters, AssistantSettings, Model } from '@renderer/types'
|
||||
import { modalConfirm } from '@renderer/utils'
|
||||
import { Button, Col, Divider, Input, InputNumber, Row, Select, Slider, Switch, Tooltip } from 'antd'
|
||||
@ -31,7 +32,9 @@ const AssistantModelSettings: FC<Props> = ({ assistant, updateAssistant, updateA
|
||||
const [enableMaxTokens, setEnableMaxTokens] = useState(assistant?.settings?.enableMaxTokens ?? false)
|
||||
const [maxTokens, setMaxTokens] = useState(assistant?.settings?.maxTokens ?? 0)
|
||||
const [streamOutput, setStreamOutput] = useState(assistant?.settings?.streamOutput)
|
||||
const [toolUseMode, setToolUseMode] = useState(assistant?.settings?.toolUseMode ?? 'prompt')
|
||||
const [toolUseMode, setToolUseMode] = useState<AssistantSettings['toolUseMode']>(
|
||||
assistant?.settings?.toolUseMode ?? 'function'
|
||||
)
|
||||
const [defaultModel, setDefaultModel] = useState(assistant?.defaultModel)
|
||||
const [topP, setTopP] = useState(assistant?.settings?.topP ?? 1)
|
||||
const [enableTopP, setEnableTopP] = useState(assistant?.settings?.enableTopP ?? false)
|
||||
@ -158,28 +161,17 @@ const AssistantModelSettings: FC<Props> = ({ assistant, updateAssistant, updateA
|
||||
}
|
||||
|
||||
const onReset = () => {
|
||||
setTemperature(DEFAULT_TEMPERATURE)
|
||||
setEnableTemperature(true)
|
||||
setContextCount(DEFAULT_CONTEXTCOUNT)
|
||||
setEnableMaxTokens(false)
|
||||
setMaxTokens(0)
|
||||
setStreamOutput(true)
|
||||
setTopP(1)
|
||||
setEnableTopP(false)
|
||||
setCustomParameters([])
|
||||
setToolUseMode('prompt')
|
||||
updateAssistantSettings({
|
||||
temperature: DEFAULT_TEMPERATURE,
|
||||
enableTemperature: true,
|
||||
contextCount: DEFAULT_CONTEXTCOUNT,
|
||||
enableMaxTokens: false,
|
||||
maxTokens: 0,
|
||||
streamOutput: true,
|
||||
topP: 1,
|
||||
enableTopP: false,
|
||||
customParameters: [],
|
||||
toolUseMode: 'prompt'
|
||||
})
|
||||
setTemperature(DEFAULT_ASSISTANT_SETTINGS.temperature)
|
||||
setEnableTemperature(DEFAULT_ASSISTANT_SETTINGS.enableTemperature ?? true)
|
||||
setContextCount(DEFAULT_ASSISTANT_SETTINGS.contextCount)
|
||||
setEnableMaxTokens(DEFAULT_ASSISTANT_SETTINGS.enableMaxTokens ?? false)
|
||||
setMaxTokens(DEFAULT_ASSISTANT_SETTINGS.maxTokens ?? 0)
|
||||
setStreamOutput(DEFAULT_ASSISTANT_SETTINGS.streamOutput)
|
||||
setTopP(DEFAULT_ASSISTANT_SETTINGS.topP)
|
||||
setEnableTopP(DEFAULT_ASSISTANT_SETTINGS.enableTopP ?? false)
|
||||
setCustomParameters(DEFAULT_ASSISTANT_SETTINGS.customParameters ?? [])
|
||||
setToolUseMode(DEFAULT_ASSISTANT_SETTINGS.toolUseMode)
|
||||
updateAssistantSettings(DEFAULT_ASSISTANT_SETTINGS)
|
||||
}
|
||||
const modelFilter = (model: Model) => !isEmbeddingModel(model) && !isRerankModel(model)
|
||||
|
||||
|
||||
@ -6,7 +6,7 @@ import AiProvider from '@renderer/aiCore'
|
||||
import type { CompletionsParams } from '@renderer/aiCore/legacy/middleware/schemas'
|
||||
import type { AiSdkMiddlewareConfig } from '@renderer/aiCore/middleware/AiSdkMiddlewareBuilder'
|
||||
import { buildStreamTextParams } from '@renderer/aiCore/prepareParams'
|
||||
import { isDedicatedImageGenerationModel, isEmbeddingModel } from '@renderer/config/models'
|
||||
import { isDedicatedImageGenerationModel, isEmbeddingModel, isFunctionCallingModel } from '@renderer/config/models'
|
||||
import { getStoreSetting } from '@renderer/hooks/useSettings'
|
||||
import i18n from '@renderer/i18n'
|
||||
import store from '@renderer/store'
|
||||
@ -18,6 +18,7 @@ import type { Message } from '@renderer/types/newMessage'
|
||||
import type { SdkModel } from '@renderer/types/sdk'
|
||||
import { removeSpecialCharactersForTopicName, uuid } from '@renderer/utils'
|
||||
import { abortCompletion, readyToAbort } from '@renderer/utils/abortController'
|
||||
import { isToolUseModeFunction } from '@renderer/utils/assistant'
|
||||
import { isAbortError } from '@renderer/utils/error'
|
||||
import { purifyMarkdownImages } from '@renderer/utils/markdown'
|
||||
import { isPromptToolUse, isSupportedToolUse } from '@renderer/utils/mcp-tools'
|
||||
@ -126,12 +127,16 @@ export async function fetchChatCompletion({
|
||||
requestOptions: options
|
||||
})
|
||||
|
||||
// Safely fallback to prompt tool use when function calling is not supported by model.
|
||||
const usePromptToolUse =
|
||||
isPromptToolUse(assistant) || (isToolUseModeFunction(assistant) && !isFunctionCallingModel(assistant.model))
|
||||
|
||||
const middlewareConfig: AiSdkMiddlewareConfig = {
|
||||
streamOutput: assistant.settings?.streamOutput ?? true,
|
||||
onChunk: onChunkReceived,
|
||||
model: assistant.model,
|
||||
enableReasoning: capabilities.enableReasoning,
|
||||
isPromptToolUse: isPromptToolUse(assistant),
|
||||
isPromptToolUse: usePromptToolUse,
|
||||
isSupportedToolUse: isSupportedToolUse(assistant),
|
||||
isImageGenerationEndpoint: isDedicatedImageGenerationModel(assistant.model || getDefaultModel()),
|
||||
webSearchPluginConfig: webSearchPluginConfig,
|
||||
|
||||
@ -36,9 +36,10 @@ export const DEFAULT_ASSISTANT_SETTINGS: AssistantSettings = {
|
||||
streamOutput: true,
|
||||
topP: 1,
|
||||
enableTopP: false,
|
||||
toolUseMode: 'prompt',
|
||||
// It would gracefully fallback to prompt if not supported by model.
|
||||
toolUseMode: 'function',
|
||||
customParameters: []
|
||||
}
|
||||
} as const
|
||||
|
||||
export function getDefaultAssistant(): Assistant {
|
||||
return {
|
||||
@ -176,7 +177,7 @@ export const getAssistantSettings = (assistant: Assistant): AssistantSettings =>
|
||||
enableMaxTokens: assistant?.settings?.enableMaxTokens ?? false,
|
||||
maxTokens: getAssistantMaxTokens(),
|
||||
streamOutput: assistant?.settings?.streamOutput ?? true,
|
||||
toolUseMode: assistant?.settings?.toolUseMode ?? 'prompt',
|
||||
toolUseMode: assistant?.settings?.toolUseMode ?? 'function',
|
||||
defaultModel: assistant?.defaultModel ?? undefined,
|
||||
reasoning_effort: assistant?.settings?.reasoning_effort ?? undefined,
|
||||
customParameters: assistant?.settings?.customParameters ?? []
|
||||
|
||||
Loading…
Reference in New Issue
Block a user