diff --git a/src/renderer/src/providers/AiProvider/OpenAIProvider.ts b/src/renderer/src/providers/AiProvider/OpenAIProvider.ts index f9f78cebe4..597f85f5fc 100644 --- a/src/renderer/src/providers/AiProvider/OpenAIProvider.ts +++ b/src/renderer/src/providers/AiProvider/OpenAIProvider.ts @@ -718,9 +718,17 @@ export default class OpenAIProvider extends BaseOpenAIProvider { const usage = chunk.usage const originalFinishDelta = chunk.delta const originalFinishRawChunk = chunk.chunk - if (!isEmpty(finishReason)) { - onChunk({ type: ChunkType.TEXT_COMPLETE, text: content }) + if (content) { + onChunk({ type: ChunkType.TEXT_COMPLETE, text: content }) + } + if (thinkingContent) { + onChunk({ + type: ChunkType.THINKING_COMPLETE, + text: thinkingContent, + thinking_millsec: new Date().getTime() - time_first_token_millsec + }) + } if (usage) { finalUsage.completion_tokens += usage.completion_tokens || 0 finalUsage.prompt_tokens += usage.prompt_tokens || 0 @@ -812,7 +820,6 @@ export default class OpenAIProvider extends BaseOpenAIProvider { if (toolResults.length) { await processToolResults(toolResults, idx) } - onChunk({ type: ChunkType.BLOCK_COMPLETE, response: { diff --git a/src/renderer/src/store/thunk/messageThunk.ts b/src/renderer/src/store/thunk/messageThunk.ts index fd3a88d5b9..c2c51b25e4 100644 --- a/src/renderer/src/store/thunk/messageThunk.ts +++ b/src/renderer/src/store/thunk/messageThunk.ts @@ -622,6 +622,14 @@ const fetchAndProcessAssistantResponseImpl = async ( const contextForUsage = userMsgIndex !== -1 ? orderedMsgs.slice(0, userMsgIndex + 1) : [] const finalContextWithAssistant = [...contextForUsage, finalAssistantMsg] + if (lastBlockId) { + const changes: Partial = { + status: MessageBlockStatus.SUCCESS + } + dispatch(updateOneBlock({ id: lastBlockId, changes })) + saveUpdatedBlockToDB(lastBlockId, assistantMsgId, topicId, getState) + } + // 更新topic的name autoRenameTopic(assistant, topicId)