diff --git a/src/renderer/src/i18n/locales/en-us.json b/src/renderer/src/i18n/locales/en-us.json index 5e5b8d7658..12dcef76ce 100644 --- a/src/renderer/src/i18n/locales/en-us.json +++ b/src/renderer/src/i18n/locales/en-us.json @@ -88,6 +88,9 @@ "settings.knowledge_base.recognition": "Use Knowledge Base", "settings.knowledge_base.recognition.off": "Force Search", "settings.knowledge_base.recognition.on": "Intent Recognition", + "settings.tool_use_mode": "Tool Use Mode", + "settings.tool_use_mode.function": "Function", + "settings.tool_use_mode.prompt": "Prompt", "settings.regular_phrases": { "title": "Regular Phrase", "add": "Add Phrase", diff --git a/src/renderer/src/i18n/locales/ja-jp.json b/src/renderer/src/i18n/locales/ja-jp.json index fbcfca41e7..4c743a4e34 100644 --- a/src/renderer/src/i18n/locales/ja-jp.json +++ b/src/renderer/src/i18n/locales/ja-jp.json @@ -98,7 +98,10 @@ "settings.knowledge_base.recognition.tip": "アシスタントは大規模言語モデルの意図認識能力を使用して、ナレッジベースを参照する必要があるかどうかを判断します。この機能はモデルの能力に依存します", "settings.knowledge_base.recognition": "ナレッジベースの呼び出し", "settings.knowledge_base.recognition.off": "強制検索", - "settings.knowledge_base.recognition.on": "意図認識" + "settings.knowledge_base.recognition.on": "意図認識", + "settings.tool_use_mode": "工具調用方式", + "settings.tool_use_mode.function": "関数", + "settings.tool_use_mode.prompt": "提示詞" }, "auth": { "error": "APIキーの自動取得に失敗しました。手動で取得してください", diff --git a/src/renderer/src/i18n/locales/ru-ru.json b/src/renderer/src/i18n/locales/ru-ru.json index 47306614b9..30332b88a3 100644 --- a/src/renderer/src/i18n/locales/ru-ru.json +++ b/src/renderer/src/i18n/locales/ru-ru.json @@ -88,6 +88,9 @@ "settings.knowledge_base.recognition": "Использование базы знаний", "settings.knowledge_base.recognition.off": "Принудительный поиск", "settings.knowledge_base.recognition.on": "Распознавание намерений", + "settings.tool_use_mode": "Режим использования инструментов", + "settings.tool_use_mode.function": "Функция", + "settings.tool_use_mode.prompt": "Подсказка", "settings.regular_phrases": { "title": "Регулярные подсказки", "add": "Добавить подсказку", diff --git a/src/renderer/src/i18n/locales/zh-cn.json b/src/renderer/src/i18n/locales/zh-cn.json index 39e7655523..df40aacdbe 100644 --- a/src/renderer/src/i18n/locales/zh-cn.json +++ b/src/renderer/src/i18n/locales/zh-cn.json @@ -78,6 +78,9 @@ "settings.knowledge_base.recognition": "调用知识库", "settings.knowledge_base.recognition.off": "强制检索", "settings.knowledge_base.recognition.on": "意图识别", + "settings.tool_use_mode": "工具调用方式", + "settings.tool_use_mode.function": "函数", + "settings.tool_use_mode.prompt": "提示词", "settings.model": "模型设置", "settings.preset_messages": "预设消息", "settings.prompt": "提示词设置", diff --git a/src/renderer/src/i18n/locales/zh-tw.json b/src/renderer/src/i18n/locales/zh-tw.json index 3eb7efd583..de3c0a9593 100644 --- a/src/renderer/src/i18n/locales/zh-tw.json +++ b/src/renderer/src/i18n/locales/zh-tw.json @@ -98,7 +98,10 @@ "settings.knowledge_base.recognition.tip": "智慧代理人將調用大語言模型的意圖識別能力,判斷是否需要調用知識庫進行回答,該功能將依賴模型的能力", "settings.knowledge_base.recognition": "調用知識庫", "settings.knowledge_base.recognition.off": "強制檢索", - "settings.knowledge_base.recognition.on": "意圖識別" + "settings.knowledge_base.recognition.on": "意圖識別", + "settings.tool_use_mode": "工具調用方式", + "settings.tool_use_mode.function": "函數", + "settings.tool_use_mode.prompt": "提示詞" }, "auth": { "error": "自動取得金鑰失敗,請手動取得", diff --git a/src/renderer/src/pages/home/Messages/MessageTools.tsx b/src/renderer/src/pages/home/Messages/MessageTools.tsx index 433f1af051..b381fc171d 100644 --- a/src/renderer/src/pages/home/Messages/MessageTools.tsx +++ b/src/renderer/src/pages/home/Messages/MessageTools.tsx @@ -212,7 +212,8 @@ const MessageTools: FC = ({ blocks }) => { } const CollapseContainer = styled(Collapse)` - margin-bottom: 15px; + margin-top: 10px; + margin-bottom: 12px; border-radius: 8px; overflow: hidden; diff --git a/src/renderer/src/pages/home/Tabs/SettingsTab.tsx b/src/renderer/src/pages/home/Tabs/SettingsTab.tsx index b78135cf1c..06bd5e335b 100644 --- a/src/renderer/src/pages/home/Tabs/SettingsTab.tsx +++ b/src/renderer/src/pages/home/Tabs/SettingsTab.tsx @@ -71,7 +71,6 @@ const SettingsTab: FC = (props) => { const [maxTokens, setMaxTokens] = useState(assistant?.settings?.maxTokens ?? 0) const [fontSizeValue, setFontSizeValue] = useState(fontSize) const [streamOutput, setStreamOutput] = useState(assistant?.settings?.streamOutput ?? true) - const [enableToolUse, setEnableToolUse] = useState(assistant?.settings?.enableToolUse ?? false) const { t } = useTranslation() const dispatch = useAppDispatch() @@ -153,6 +152,9 @@ const SettingsTab: FC = (props) => { setStreamOutput(assistant?.settings?.streamOutput ?? true) }, [assistant]) + const assistantContextCount = assistant?.settings?.contextCount || 20 + const maxContextCount = assistantContextCount > 20 ? assistantContextCount : 20 + return ( @@ -199,7 +201,7 @@ const SettingsTab: FC = (props) => { = (props) => { /> - - {t('models.enable_tool_use')} - { - setEnableToolUse(checked) - updateAssistantSettings({ enableToolUse: checked }) - }} - /> - - diff --git a/src/renderer/src/pages/settings/AssistantSettings/AssistantModelSettings.tsx b/src/renderer/src/pages/settings/AssistantSettings/AssistantModelSettings.tsx index 63021e6083..1a22848ce5 100644 --- a/src/renderer/src/pages/settings/AssistantSettings/AssistantModelSettings.tsx +++ b/src/renderer/src/pages/settings/AssistantSettings/AssistantModelSettings.tsx @@ -24,7 +24,7 @@ const AssistantModelSettings: FC = ({ assistant, updateAssistant, updateA const [enableMaxTokens, setEnableMaxTokens] = useState(assistant?.settings?.enableMaxTokens ?? false) const [maxTokens, setMaxTokens] = useState(assistant?.settings?.maxTokens ?? 0) const [streamOutput, setStreamOutput] = useState(assistant?.settings?.streamOutput ?? true) - const [enableToolUse, setEnableToolUse] = useState(assistant?.settings?.enableToolUse ?? false) + const [toolUseMode, setToolUseMode] = useState(assistant?.settings?.toolUseMode ?? 'prompt') const [defaultModel, setDefaultModel] = useState(assistant?.defaultModel) const [topP, setTopP] = useState(assistant?.settings?.topP ?? 1) const [customParameters, setCustomParameters] = useState( @@ -150,6 +150,7 @@ const AssistantModelSettings: FC = ({ assistant, updateAssistant, updateA setStreamOutput(true) setTopP(1) setCustomParameters([]) + setToolUseMode('prompt') updateAssistantSettings({ temperature: DEFAULT_TEMPERATURE, contextCount: DEFAULT_CONTEXTCOUNT, @@ -157,7 +158,8 @@ const AssistantModelSettings: FC = ({ assistant, updateAssistant, updateA maxTokens: 0, streamOutput: true, topP: 1, - customParameters: [] + customParameters: [], + toolUseMode: 'prompt' }) } @@ -379,14 +381,17 @@ const AssistantModelSettings: FC = ({ assistant, updateAssistant, updateA - - { - setEnableToolUse(checked) - updateAssistantSettings({ enableToolUse: checked }) - }} - /> + + diff --git a/src/renderer/src/providers/AiProvider/AnthropicProvider.ts b/src/renderer/src/providers/AiProvider/AnthropicProvider.ts index 51f2eaff13..159b5f4292 100644 --- a/src/renderer/src/providers/AiProvider/AnthropicProvider.ts +++ b/src/renderer/src/providers/AiProvider/AnthropicProvider.ts @@ -43,6 +43,7 @@ import type { Message } from '@renderer/types/newMessage' import { removeSpecialCharactersForTopicName } from '@renderer/utils' import { anthropicToolUseToMcpTool, + isEnabledToolUse, mcpToolCallResponseToAnthropicMessage, mcpToolsToAnthropicTools, parseAndCallTools @@ -207,7 +208,7 @@ export default class AnthropicProvider extends BaseProvider { public async completions({ messages, assistant, mcpTools, onChunk, onFilterMessages }: CompletionsParams) { const defaultModel = getDefaultModel() const model = assistant.model || defaultModel - const { contextCount, maxTokens, streamOutput, enableToolUse } = getAssistantSettings(assistant) + const { contextCount, maxTokens, streamOutput } = getAssistantSettings(assistant) const userMessagesParams: MessageParam[] = [] @@ -229,7 +230,7 @@ export default class AnthropicProvider extends BaseProvider { const { tools } = this.setupToolsConfig({ model, mcpTools, - enableToolUse + enableToolUse: isEnabledToolUse(assistant) }) if (this.useSystemPromptForTools && mcpTools && mcpTools.length) { diff --git a/src/renderer/src/providers/AiProvider/GeminiProvider.ts b/src/renderer/src/providers/AiProvider/GeminiProvider.ts index 900bed0e94..46ea431f6f 100644 --- a/src/renderer/src/providers/AiProvider/GeminiProvider.ts +++ b/src/renderer/src/providers/AiProvider/GeminiProvider.ts @@ -54,6 +54,7 @@ import type { Message, Response } from '@renderer/types/newMessage' import { removeSpecialCharactersForTopicName } from '@renderer/utils' import { geminiFunctionCallToMcpTool, + isEnabledToolUse, mcpToolCallResponseToGeminiMessage, mcpToolsToGeminiTools, parseAndCallTools @@ -340,7 +341,7 @@ export default class GeminiProvider extends BaseProvider { await this.generateImageByChat({ messages, assistant, onChunk }) return } - const { contextCount, maxTokens, streamOutput, enableToolUse } = getAssistantSettings(assistant) + const { contextCount, maxTokens, streamOutput } = getAssistantSettings(assistant) const userMessages = filterUserRoleStartMessages( filterEmptyMessages(filterContextMessages(takeRight(messages, contextCount + 2))) @@ -360,7 +361,7 @@ export default class GeminiProvider extends BaseProvider { const { tools } = this.setupToolsConfig({ mcpTools, model, - enableToolUse + enableToolUse: isEnabledToolUse(assistant) }) if (this.useSystemPromptForTools) { diff --git a/src/renderer/src/providers/AiProvider/OpenAIProvider.ts b/src/renderer/src/providers/AiProvider/OpenAIProvider.ts index 62418ccdb0..f9f78cebe4 100644 --- a/src/renderer/src/providers/AiProvider/OpenAIProvider.ts +++ b/src/renderer/src/providers/AiProvider/OpenAIProvider.ts @@ -53,6 +53,7 @@ import { convertLinksToZhipu } from '@renderer/utils/linkConverter' import { + isEnabledToolUse, mcpToolCallResponseToOpenAICompatibleMessage, mcpToolsToOpenAIChatTools, openAIToolsToMcpTool, @@ -351,7 +352,7 @@ export default class OpenAIProvider extends BaseOpenAIProvider { const defaultModel = getDefaultModel() const model = assistant.model || defaultModel - const { contextCount, maxTokens, streamOutput, enableToolUse } = getAssistantSettings(assistant) + const { contextCount, maxTokens, streamOutput } = getAssistantSettings(assistant) const isEnabledBultinWebSearch = assistant.enableWebSearch messages = addImageFileToContents(messages) const enableReasoning = @@ -365,7 +366,11 @@ export default class OpenAIProvider extends BaseOpenAIProvider { content: `Formatting re-enabled${systemMessage ? '\n' + systemMessage.content : ''}` } } - const { tools } = this.setupToolsConfig({ mcpTools, model, enableToolUse }) + const { tools } = this.setupToolsConfig({ + mcpTools, + model, + enableToolUse: isEnabledToolUse(assistant) + }) if (this.useSystemPromptForTools) { systemMessage.content = buildSystemPrompt(systemMessage.content || '', mcpTools) diff --git a/src/renderer/src/providers/AiProvider/OpenAIResponseProvider.ts b/src/renderer/src/providers/AiProvider/OpenAIResponseProvider.ts index 4d9a6f57bf..baacc96c44 100644 --- a/src/renderer/src/providers/AiProvider/OpenAIResponseProvider.ts +++ b/src/renderer/src/providers/AiProvider/OpenAIResponseProvider.ts @@ -37,6 +37,7 @@ import { removeSpecialCharactersForTopicName } from '@renderer/utils' import { addImageFileToContents } from '@renderer/utils/formats' import { convertLinks } from '@renderer/utils/linkConverter' import { + isEnabledToolUse, mcpToolCallResponseToOpenAIMessage, mcpToolsToOpenAIResponseTools, openAIToolsToMcpTool, @@ -289,7 +290,7 @@ export abstract class BaseOpenAIProvider extends BaseProvider { } const defaultModel = getDefaultModel() const model = assistant.model || defaultModel - const { contextCount, maxTokens, streamOutput, enableToolUse } = getAssistantSettings(assistant) + const { contextCount, maxTokens, streamOutput } = getAssistantSettings(assistant) const isEnabledBuiltinWebSearch = assistant.enableWebSearch let tools: OpenAI.Responses.Tool[] = [] @@ -318,7 +319,7 @@ export abstract class BaseOpenAIProvider extends BaseProvider { const { tools: extraTools } = this.setupToolsConfig({ mcpTools, model, - enableToolUse + enableToolUse: isEnabledToolUse(assistant) }) tools = tools.concat(extraTools) diff --git a/src/renderer/src/services/AssistantService.ts b/src/renderer/src/services/AssistantService.ts index 41e899abd9..6ef0a4474f 100644 --- a/src/renderer/src/services/AssistantService.ts +++ b/src/renderer/src/services/AssistantService.ts @@ -108,7 +108,7 @@ export const getAssistantSettings = (assistant: Assistant): AssistantSettings => enableMaxTokens: assistant?.settings?.enableMaxTokens ?? false, maxTokens: getAssistantMaxTokens(), streamOutput: assistant?.settings?.streamOutput ?? true, - enableToolUse: assistant?.settings?.enableToolUse ?? false, + toolUseMode: assistant?.settings?.toolUseMode ?? 'prompt', hideMessages: assistant?.settings?.hideMessages ?? false, defaultModel: assistant?.defaultModel ?? undefined, customParameters: assistant?.settings?.customParameters ?? [] diff --git a/src/renderer/src/store/migrate.ts b/src/renderer/src/store/migrate.ts index 9ed708248b..029b05125c 100644 --- a/src/renderer/src/store/migrate.ts +++ b/src/renderer/src/store/migrate.ts @@ -1319,6 +1319,17 @@ const migrateConfig = { }, '101': (state: RootState) => { try { + state.assistants.assistants.forEach((assistant) => { + if (assistant.settings) { + // @ts-ignore eslint-disable-next-line + if (assistant.settings.enableToolUse) { + // @ts-ignore eslint-disable-next-line + assistant.settings.toolUseMode = assistant.settings.enableToolUse ? 'function' : 'prompt' + // @ts-ignore eslint-disable-next-line + delete assistant.settings.enableToolUse + } + } + }) if (state.shortcuts) { state.shortcuts.shortcuts.push({ key: 'exit_fullscreen', diff --git a/src/renderer/src/types/index.ts b/src/renderer/src/types/index.ts index e66e629043..ac10c11b3d 100644 --- a/src/renderer/src/types/index.ts +++ b/src/renderer/src/types/index.ts @@ -56,12 +56,12 @@ export type AssistantSettings = { maxTokens: number | undefined enableMaxTokens: boolean streamOutput: boolean - enableToolUse: boolean hideMessages: boolean defaultModel?: Model customParameters?: AssistantSettingCustomParameters[] reasoning_effort?: ReasoningEffortOptions qwenThinkMode?: boolean + toolUseMode?: 'function' | 'prompt' } export type Agent = Omit & { diff --git a/src/renderer/src/utils/mcp-tools.ts b/src/renderer/src/utils/mcp-tools.ts index e59b9ff1e5..4c446ffa78 100644 --- a/src/renderer/src/utils/mcp-tools.ts +++ b/src/renderer/src/utils/mcp-tools.ts @@ -7,10 +7,18 @@ import { } from '@anthropic-ai/sdk/resources' import { Content, FunctionCall, Part, Tool, Type as GeminiSchemaType } from '@google/genai' import Logger from '@renderer/config/logger' -import { isVisionModel } from '@renderer/config/models' +import { isFunctionCallingModel, isVisionModel } from '@renderer/config/models' import store from '@renderer/store' import { addMCPServer } from '@renderer/store/mcp' -import { MCPCallToolResponse, MCPServer, MCPTool, MCPToolResponse, Model, ToolUseResponse } from '@renderer/types' +import { + Assistant, + MCPCallToolResponse, + MCPServer, + MCPTool, + MCPToolResponse, + Model, + ToolUseResponse +} from '@renderer/types' import type { MCPToolCompleteChunk, MCPToolInProgressChunk } from '@renderer/types/chunk' import { ChunkType } from '@renderer/types/chunk' import { isArray, isObject, pull, transform } from 'lodash' @@ -824,3 +832,13 @@ export function mcpToolCallResponseToGeminiMessage( return message } + +export function isEnabledToolUse(assistant: Assistant) { + if (assistant.model) { + if (isFunctionCallingModel(assistant.model)) { + return assistant.settings?.toolUseMode === 'function' + } + } + + return false +}