diff --git a/src/renderer/src/aiCore/transformParameters.ts b/src/renderer/src/aiCore/transformParameters.ts index d5fdace254..f3976bbd69 100644 --- a/src/renderer/src/aiCore/transformParameters.ts +++ b/src/renderer/src/aiCore/transformParameters.ts @@ -6,6 +6,7 @@ import { loggerService } from '@logger' import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant' import { + isClaudeReasoningModel, isGenerateImageModel, isNotSupportTemperatureAndTopP, isOpenRouterBuiltInWebSearchModel, @@ -44,15 +45,29 @@ const logger = loggerService.withContext('transformParameters') /** * 获取温度参数 */ -export function getTemperature(assistant: Assistant, model: Model): number | undefined { - return isNotSupportTemperatureAndTopP(model) ? undefined : assistant.settings?.temperature +function getTemperature(assistant: Assistant, model: Model): number | undefined { + if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) { + return undefined + } + if (isNotSupportTemperatureAndTopP(model)) { + return undefined + } + const assistantSettings = getAssistantSettings(assistant) + return assistantSettings?.enableTemperature ? assistantSettings?.temperature : undefined } /** * 获取 TopP 参数 */ -export function getTopP(assistant: Assistant, model: Model): number | undefined { - return isNotSupportTemperatureAndTopP(model) ? undefined : assistant.settings?.topP +function getTopP(assistant: Assistant, model: Model): number | undefined { + if (assistant.settings?.reasoning_effort && isClaudeReasoningModel(model)) { + return undefined + } + if (isNotSupportTemperatureAndTopP(model)) { + return undefined + } + const assistantSettings = getAssistantSettings(assistant) + return assistantSettings?.enableTopP ? assistantSettings?.topP : undefined } /** @@ -372,7 +387,7 @@ export async function buildStreamTextParams( if (assistant.prompt) { params.system = assistant.prompt } - + console.log('params', params) return { params, modelId: model.id, diff --git a/src/renderer/src/aiCore/utils/options.ts b/src/renderer/src/aiCore/utils/options.ts index 765078316f..acca70df74 100644 --- a/src/renderer/src/aiCore/utils/options.ts +++ b/src/renderer/src/aiCore/utils/options.ts @@ -77,7 +77,7 @@ export function buildProviderOptions( case 'openai': case 'azure': providerSpecificOptions = { - ...buildOpenAIProviderOptions(assistant, model, capabilities), + ...buildOpenAIProviderOptions(assistant, model, capabilities, actualProvider), serviceTier: serviceTierSetting } break @@ -126,7 +126,8 @@ function buildOpenAIProviderOptions( enableReasoning: boolean enableWebSearch: boolean enableGenerateImage: boolean - } + }, + actualProvider: Provider ): Record { const { enableReasoning } = capabilities let providerOptions: Record = {} @@ -139,6 +140,11 @@ function buildOpenAIProviderOptions( } } + if (actualProvider.id === 'azure') { + providerOptions.apiVersion = actualProvider.apiVersion + providerOptions.useDeploymentBasedUrls = true + } + return providerOptions } diff --git a/src/renderer/src/windows/mini/home/HomeWindow.tsx b/src/renderer/src/windows/mini/home/HomeWindow.tsx index d079655928..28313e2a83 100644 --- a/src/renderer/src/windows/mini/home/HomeWindow.tsx +++ b/src/renderer/src/windows/mini/home/HomeWindow.tsx @@ -263,9 +263,9 @@ const HomeWindow: FC<{ draggable?: boolean }> = ({ draggable = true }) => { } newAssistant.settings.streamOutput = true // 显式关闭这些功能 - // newAssistant.webSearchProviderId = undefined + newAssistant.webSearchProviderId = undefined newAssistant.mcpServers = undefined - // newAssistant.knowledge_bases = undefined + newAssistant.knowledge_bases = undefined const llmMessages = await ConversationService.prepareMessagesForModel(messagesForContext, newAssistant) await fetchChatCompletion({ diff --git a/src/renderer/src/windows/selection/action/components/ActionUtils.ts b/src/renderer/src/windows/selection/action/components/ActionUtils.ts index 408a502803..6e3b901155 100644 --- a/src/renderer/src/windows/selection/action/components/ActionUtils.ts +++ b/src/renderer/src/windows/selection/action/components/ActionUtils.ts @@ -63,7 +63,7 @@ export const processMessages = async ( // 显式关闭这些功能 newAssistant.webSearchProviderId = undefined newAssistant.mcpServers = undefined - // newAssistant.knowledge_bases = undefined + newAssistant.knowledge_bases = undefined const llmMessages = await ConversationService.prepareMessagesForModel([userMessage], newAssistant) await fetchChatCompletion({