From fb8baa5c094b61d7e9046a3b57302ac4eb5505d0 Mon Sep 17 00:00:00 2001 From: teaim <20551753+teaim@users.noreply.github.com> Date: Fri, 7 Feb 2025 23:33:02 +0800 Subject: [PATCH] fix: o3-mini markdown formatting #997 --- src/renderer/src/providers/OpenAIProvider.ts | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/renderer/src/providers/OpenAIProvider.ts b/src/renderer/src/providers/OpenAIProvider.ts index 0299ea370b..bbf942603c 100644 --- a/src/renderer/src/providers/OpenAIProvider.ts +++ b/src/renderer/src/providers/OpenAIProvider.ts @@ -160,7 +160,14 @@ export default class OpenAIProvider extends BaseProvider { const model = assistant.model || defaultModel const { contextCount, maxTokens, streamOutput } = getAssistantSettings(assistant) - const systemMessage = assistant.prompt ? { role: 'system', content: assistant.prompt } : undefined + let systemMessage = assistant.prompt ? { role: 'system', content: assistant.prompt } : undefined + if (['o1', 'o1-2024-12-17'].includes(model.id) || model.id.startsWith('o3')) { + systemMessage = { + role: 'developer', + content: `Formatting re-enabled${systemMessage ? "\n" + systemMessage.content : ""}` + }; + } + const userMessages: ChatCompletionMessageParam[] = [] const _messages = filterContextMessages(takeRight(messages, contextCount + 1)) @@ -192,7 +199,7 @@ export default class OpenAIProvider extends BaseProvider { // @ts-ignore key is not typed const stream = await this.sdk.chat.completions.create({ model: model.id, - messages: [isOpenAIo1 ? undefined : systemMessage, ...userMessages].filter( + messages: [systemMessage, ...userMessages].filter( Boolean ) as ChatCompletionMessageParam[], temperature: this.getTemperature(assistant, model),