refactor: add tool use mode translations and refactor settings

* Introduced new translations for "Tool Use Mode" and its options ("Function" and "Prompt") in English, Japanese, Russian, Simplified Chinese, and Traditional Chinese.
* Refactored settings components to replace the deprecated `enableToolUse` with `toolUseMode`, updating related logic and UI elements accordingly.
* Adjusted migration logic to ensure backward compatibility with previous settings.
This commit is contained in:
kangfenmao 2025-05-15 21:23:26 +08:00
parent a074ce285b
commit dcb1d22b33
16 changed files with 87 additions and 39 deletions

View File

@ -88,6 +88,9 @@
"settings.knowledge_base.recognition": "Use Knowledge Base",
"settings.knowledge_base.recognition.off": "Force Search",
"settings.knowledge_base.recognition.on": "Intent Recognition",
"settings.tool_use_mode": "Tool Use Mode",
"settings.tool_use_mode.function": "Function",
"settings.tool_use_mode.prompt": "Prompt",
"settings.regular_phrases": {
"title": "Regular Phrase",
"add": "Add Phrase",

View File

@ -98,7 +98,10 @@
"settings.knowledge_base.recognition.tip": "アシスタントは大規模言語モデルの意図認識能力を使用して、ナレッジベースを参照する必要があるかどうかを判断します。この機能はモデルの能力に依存します",
"settings.knowledge_base.recognition": "ナレッジベースの呼び出し",
"settings.knowledge_base.recognition.off": "強制検索",
"settings.knowledge_base.recognition.on": "意図認識"
"settings.knowledge_base.recognition.on": "意図認識",
"settings.tool_use_mode": "工具調用方式",
"settings.tool_use_mode.function": "関数",
"settings.tool_use_mode.prompt": "提示詞"
},
"auth": {
"error": "APIキーの自動取得に失敗しました。手動で取得してください",

View File

@ -88,6 +88,9 @@
"settings.knowledge_base.recognition": "Использование базы знаний",
"settings.knowledge_base.recognition.off": "Принудительный поиск",
"settings.knowledge_base.recognition.on": "Распознавание намерений",
"settings.tool_use_mode": "Режим использования инструментов",
"settings.tool_use_mode.function": "Функция",
"settings.tool_use_mode.prompt": "Подсказка",
"settings.regular_phrases": {
"title": "Регулярные подсказки",
"add": "Добавить подсказку",

View File

@ -78,6 +78,9 @@
"settings.knowledge_base.recognition": "调用知识库",
"settings.knowledge_base.recognition.off": "强制检索",
"settings.knowledge_base.recognition.on": "意图识别",
"settings.tool_use_mode": "工具调用方式",
"settings.tool_use_mode.function": "函数",
"settings.tool_use_mode.prompt": "提示词",
"settings.model": "模型设置",
"settings.preset_messages": "预设消息",
"settings.prompt": "提示词设置",

View File

@ -98,7 +98,10 @@
"settings.knowledge_base.recognition.tip": "智慧代理人將調用大語言模型的意圖識別能力,判斷是否需要調用知識庫進行回答,該功能將依賴模型的能力",
"settings.knowledge_base.recognition": "調用知識庫",
"settings.knowledge_base.recognition.off": "強制檢索",
"settings.knowledge_base.recognition.on": "意圖識別"
"settings.knowledge_base.recognition.on": "意圖識別",
"settings.tool_use_mode": "工具調用方式",
"settings.tool_use_mode.function": "函數",
"settings.tool_use_mode.prompt": "提示詞"
},
"auth": {
"error": "自動取得金鑰失敗,請手動取得",

View File

@ -212,7 +212,8 @@ const MessageTools: FC<Props> = ({ blocks }) => {
}
const CollapseContainer = styled(Collapse)`
margin-bottom: 15px;
margin-top: 10px;
margin-bottom: 12px;
border-radius: 8px;
overflow: hidden;

View File

@ -71,7 +71,6 @@ const SettingsTab: FC<Props> = (props) => {
const [maxTokens, setMaxTokens] = useState(assistant?.settings?.maxTokens ?? 0)
const [fontSizeValue, setFontSizeValue] = useState(fontSize)
const [streamOutput, setStreamOutput] = useState(assistant?.settings?.streamOutput ?? true)
const [enableToolUse, setEnableToolUse] = useState(assistant?.settings?.enableToolUse ?? false)
const { t } = useTranslation()
const dispatch = useAppDispatch()
@ -153,6 +152,9 @@ const SettingsTab: FC<Props> = (props) => {
setStreamOutput(assistant?.settings?.streamOutput ?? true)
}, [assistant])
const assistantContextCount = assistant?.settings?.contextCount || 20
const maxContextCount = assistantContextCount > 20 ? assistantContextCount : 20
return (
<Container className="settings-tab">
<SettingGroup style={{ marginTop: 10 }}>
@ -199,7 +201,7 @@ const SettingsTab: FC<Props> = (props) => {
<Col span={24}>
<Slider
min={0}
max={20}
max={maxContextCount}
onChange={setContextCount}
onChangeComplete={onContextCountChange}
value={typeof contextCount === 'number' ? contextCount : 0}
@ -219,18 +221,6 @@ const SettingsTab: FC<Props> = (props) => {
/>
</SettingRow>
<SettingDivider />
<SettingRow>
<SettingRowTitleSmall>{t('models.enable_tool_use')}</SettingRowTitleSmall>
<Switch
size="small"
checked={enableToolUse}
onChange={(checked) => {
setEnableToolUse(checked)
updateAssistantSettings({ enableToolUse: checked })
}}
/>
</SettingRow>
<SettingDivider />
<Row align="middle" justify="space-between" style={{ marginBottom: 10 }}>
<HStack alignItems="center">
<Label>{t('chat.settings.max_tokens')}</Label>

View File

@ -24,7 +24,7 @@ const AssistantModelSettings: FC<Props> = ({ assistant, updateAssistant, updateA
const [enableMaxTokens, setEnableMaxTokens] = useState(assistant?.settings?.enableMaxTokens ?? false)
const [maxTokens, setMaxTokens] = useState(assistant?.settings?.maxTokens ?? 0)
const [streamOutput, setStreamOutput] = useState(assistant?.settings?.streamOutput ?? true)
const [enableToolUse, setEnableToolUse] = useState(assistant?.settings?.enableToolUse ?? false)
const [toolUseMode, setToolUseMode] = useState(assistant?.settings?.toolUseMode ?? 'prompt')
const [defaultModel, setDefaultModel] = useState(assistant?.defaultModel)
const [topP, setTopP] = useState(assistant?.settings?.topP ?? 1)
const [customParameters, setCustomParameters] = useState<AssistantSettingCustomParameters[]>(
@ -150,6 +150,7 @@ const AssistantModelSettings: FC<Props> = ({ assistant, updateAssistant, updateA
setStreamOutput(true)
setTopP(1)
setCustomParameters([])
setToolUseMode('prompt')
updateAssistantSettings({
temperature: DEFAULT_TEMPERATURE,
contextCount: DEFAULT_CONTEXTCOUNT,
@ -157,7 +158,8 @@ const AssistantModelSettings: FC<Props> = ({ assistant, updateAssistant, updateA
maxTokens: 0,
streamOutput: true,
topP: 1,
customParameters: []
customParameters: [],
toolUseMode: 'prompt'
})
}
@ -379,14 +381,17 @@ const AssistantModelSettings: FC<Props> = ({ assistant, updateAssistant, updateA
</SettingRow>
<Divider style={{ margin: '10px 0' }} />
<SettingRow style={{ minHeight: 30 }}>
<Label>{t('models.enable_tool_use')}</Label>
<Switch
checked={enableToolUse}
onChange={(checked) => {
setEnableToolUse(checked)
updateAssistantSettings({ enableToolUse: checked })
}}
/>
<Label>{t('assistants.settings.tool_use_mode')}</Label>
<Select
value={toolUseMode}
style={{ width: 110 }}
onChange={(value) => {
setToolUseMode(value)
updateAssistantSettings({ toolUseMode: value })
}}>
<Select.Option value="prompt">{t('assistants.settings.tool_use_mode.prompt')}</Select.Option>
<Select.Option value="function">{t('assistants.settings.tool_use_mode.function')}</Select.Option>
</Select>
</SettingRow>
<Divider style={{ margin: '10px 0' }} />
<SettingRow style={{ minHeight: 30 }}>

View File

@ -43,6 +43,7 @@ import type { Message } from '@renderer/types/newMessage'
import { removeSpecialCharactersForTopicName } from '@renderer/utils'
import {
anthropicToolUseToMcpTool,
isEnabledToolUse,
mcpToolCallResponseToAnthropicMessage,
mcpToolsToAnthropicTools,
parseAndCallTools
@ -207,7 +208,7 @@ export default class AnthropicProvider extends BaseProvider {
public async completions({ messages, assistant, mcpTools, onChunk, onFilterMessages }: CompletionsParams) {
const defaultModel = getDefaultModel()
const model = assistant.model || defaultModel
const { contextCount, maxTokens, streamOutput, enableToolUse } = getAssistantSettings(assistant)
const { contextCount, maxTokens, streamOutput } = getAssistantSettings(assistant)
const userMessagesParams: MessageParam[] = []
@ -229,7 +230,7 @@ export default class AnthropicProvider extends BaseProvider {
const { tools } = this.setupToolsConfig<ToolUnion>({
model,
mcpTools,
enableToolUse
enableToolUse: isEnabledToolUse(assistant)
})
if (this.useSystemPromptForTools && mcpTools && mcpTools.length) {

View File

@ -54,6 +54,7 @@ import type { Message, Response } from '@renderer/types/newMessage'
import { removeSpecialCharactersForTopicName } from '@renderer/utils'
import {
geminiFunctionCallToMcpTool,
isEnabledToolUse,
mcpToolCallResponseToGeminiMessage,
mcpToolsToGeminiTools,
parseAndCallTools
@ -340,7 +341,7 @@ export default class GeminiProvider extends BaseProvider {
await this.generateImageByChat({ messages, assistant, onChunk })
return
}
const { contextCount, maxTokens, streamOutput, enableToolUse } = getAssistantSettings(assistant)
const { contextCount, maxTokens, streamOutput } = getAssistantSettings(assistant)
const userMessages = filterUserRoleStartMessages(
filterEmptyMessages(filterContextMessages(takeRight(messages, contextCount + 2)))
@ -360,7 +361,7 @@ export default class GeminiProvider extends BaseProvider {
const { tools } = this.setupToolsConfig<Tool>({
mcpTools,
model,
enableToolUse
enableToolUse: isEnabledToolUse(assistant)
})
if (this.useSystemPromptForTools) {

View File

@ -53,6 +53,7 @@ import {
convertLinksToZhipu
} from '@renderer/utils/linkConverter'
import {
isEnabledToolUse,
mcpToolCallResponseToOpenAICompatibleMessage,
mcpToolsToOpenAIChatTools,
openAIToolsToMcpTool,
@ -351,7 +352,7 @@ export default class OpenAIProvider extends BaseOpenAIProvider {
const defaultModel = getDefaultModel()
const model = assistant.model || defaultModel
const { contextCount, maxTokens, streamOutput, enableToolUse } = getAssistantSettings(assistant)
const { contextCount, maxTokens, streamOutput } = getAssistantSettings(assistant)
const isEnabledBultinWebSearch = assistant.enableWebSearch
messages = addImageFileToContents(messages)
const enableReasoning =
@ -365,7 +366,11 @@ export default class OpenAIProvider extends BaseOpenAIProvider {
content: `Formatting re-enabled${systemMessage ? '\n' + systemMessage.content : ''}`
}
}
const { tools } = this.setupToolsConfig<ChatCompletionTool>({ mcpTools, model, enableToolUse })
const { tools } = this.setupToolsConfig<ChatCompletionTool>({
mcpTools,
model,
enableToolUse: isEnabledToolUse(assistant)
})
if (this.useSystemPromptForTools) {
systemMessage.content = buildSystemPrompt(systemMessage.content || '', mcpTools)

View File

@ -37,6 +37,7 @@ import { removeSpecialCharactersForTopicName } from '@renderer/utils'
import { addImageFileToContents } from '@renderer/utils/formats'
import { convertLinks } from '@renderer/utils/linkConverter'
import {
isEnabledToolUse,
mcpToolCallResponseToOpenAIMessage,
mcpToolsToOpenAIResponseTools,
openAIToolsToMcpTool,
@ -289,7 +290,7 @@ export abstract class BaseOpenAIProvider extends BaseProvider {
}
const defaultModel = getDefaultModel()
const model = assistant.model || defaultModel
const { contextCount, maxTokens, streamOutput, enableToolUse } = getAssistantSettings(assistant)
const { contextCount, maxTokens, streamOutput } = getAssistantSettings(assistant)
const isEnabledBuiltinWebSearch = assistant.enableWebSearch
let tools: OpenAI.Responses.Tool[] = []
@ -318,7 +319,7 @@ export abstract class BaseOpenAIProvider extends BaseProvider {
const { tools: extraTools } = this.setupToolsConfig<OpenAI.Responses.Tool>({
mcpTools,
model,
enableToolUse
enableToolUse: isEnabledToolUse(assistant)
})
tools = tools.concat(extraTools)

View File

@ -108,7 +108,7 @@ export const getAssistantSettings = (assistant: Assistant): AssistantSettings =>
enableMaxTokens: assistant?.settings?.enableMaxTokens ?? false,
maxTokens: getAssistantMaxTokens(),
streamOutput: assistant?.settings?.streamOutput ?? true,
enableToolUse: assistant?.settings?.enableToolUse ?? false,
toolUseMode: assistant?.settings?.toolUseMode ?? 'prompt',
hideMessages: assistant?.settings?.hideMessages ?? false,
defaultModel: assistant?.defaultModel ?? undefined,
customParameters: assistant?.settings?.customParameters ?? []

View File

@ -1319,6 +1319,17 @@ const migrateConfig = {
},
'101': (state: RootState) => {
try {
state.assistants.assistants.forEach((assistant) => {
if (assistant.settings) {
// @ts-ignore eslint-disable-next-line
if (assistant.settings.enableToolUse) {
// @ts-ignore eslint-disable-next-line
assistant.settings.toolUseMode = assistant.settings.enableToolUse ? 'function' : 'prompt'
// @ts-ignore eslint-disable-next-line
delete assistant.settings.enableToolUse
}
}
})
if (state.shortcuts) {
state.shortcuts.shortcuts.push({
key: 'exit_fullscreen',

View File

@ -56,12 +56,12 @@ export type AssistantSettings = {
maxTokens: number | undefined
enableMaxTokens: boolean
streamOutput: boolean
enableToolUse: boolean
hideMessages: boolean
defaultModel?: Model
customParameters?: AssistantSettingCustomParameters[]
reasoning_effort?: ReasoningEffortOptions
qwenThinkMode?: boolean
toolUseMode?: 'function' | 'prompt'
}
export type Agent = Omit<Assistant, 'model'> & {

View File

@ -7,10 +7,18 @@ import {
} from '@anthropic-ai/sdk/resources'
import { Content, FunctionCall, Part, Tool, Type as GeminiSchemaType } from '@google/genai'
import Logger from '@renderer/config/logger'
import { isVisionModel } from '@renderer/config/models'
import { isFunctionCallingModel, isVisionModel } from '@renderer/config/models'
import store from '@renderer/store'
import { addMCPServer } from '@renderer/store/mcp'
import { MCPCallToolResponse, MCPServer, MCPTool, MCPToolResponse, Model, ToolUseResponse } from '@renderer/types'
import {
Assistant,
MCPCallToolResponse,
MCPServer,
MCPTool,
MCPToolResponse,
Model,
ToolUseResponse
} from '@renderer/types'
import type { MCPToolCompleteChunk, MCPToolInProgressChunk } from '@renderer/types/chunk'
import { ChunkType } from '@renderer/types/chunk'
import { isArray, isObject, pull, transform } from 'lodash'
@ -824,3 +832,13 @@ export function mcpToolCallResponseToGeminiMessage(
return message
}
export function isEnabledToolUse(assistant: Assistant) {
if (assistant.model) {
if (isFunctionCallingModel(assistant.model)) {
return assistant.settings?.toolUseMode === 'function'
}
}
return false
}