refactor(MainTextBlock): enhance content processing by ignoring tooluse (#5483)

This commit is contained in:
SuYao 2025-04-29 16:16:34 +08:00 committed by GitHub
parent 08de8eac86
commit 1ac9ac09c9
2 changed files with 10 additions and 7 deletions

View File

@ -31,6 +31,8 @@ interface Props {
role: Message['role'] role: Message['role']
} }
const toolUseRegex = /<tool_use>([\s\S]*?)<\/tool_use>/g
const MainTextBlock: React.FC<Props> = ({ block, citationBlockId, role, mentions = [] }) => { const MainTextBlock: React.FC<Props> = ({ block, citationBlockId, role, mentions = [] }) => {
// Use the passed citationBlockId directly in the selector // Use the passed citationBlockId directly in the selector
const { renderInputMessageAsMarkdown } = useSettings() const { renderInputMessageAsMarkdown } = useSettings()
@ -67,6 +69,10 @@ const MainTextBlock: React.FC<Props> = ({ block, citationBlockId, role, mentions
return content return content
}, [block.content, block.citationReferences, citationBlockId, formattedCitations]) }, [block.content, block.citationReferences, citationBlockId, formattedCitations])
const ignoreToolUse = useMemo(() => {
return processedContent.replace(toolUseRegex, '')
}, [processedContent])
return ( return (
<> <>
{/* Render mentions associated with the message */} {/* Render mentions associated with the message */}
@ -80,7 +86,7 @@ const MainTextBlock: React.FC<Props> = ({ block, citationBlockId, role, mentions
{role === 'user' && !renderInputMessageAsMarkdown ? ( {role === 'user' && !renderInputMessageAsMarkdown ? (
<p style={{ marginBottom: 5, whiteSpace: 'pre-wrap' }}>{block.content}</p> <p style={{ marginBottom: 5, whiteSpace: 'pre-wrap' }}>{block.content}</p>
) : ( ) : (
<Markdown block={{ ...block, content: processedContent }} /> <Markdown block={{ ...block, content: ignoreToolUse }} />
)} )}
</> </>
) )

View File

@ -507,6 +507,7 @@ export default class OpenAIProvider extends BaseProvider {
// let isThinkingInContent: ThoughtProcessor | undefined = undefined // let isThinkingInContent: ThoughtProcessor | undefined = undefined
// const processThinkingChunk = this.handleThinkingTags() // const processThinkingChunk = this.handleThinkingTags()
let isFirstChunk = true let isFirstChunk = true
let isFirstThinkingChunk = true
for await (const chunk of stream) { for await (const chunk of stream) {
if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) { if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) {
break break
@ -521,12 +522,7 @@ export default class OpenAIProvider extends BaseProvider {
const reasoningContent = delta?.reasoning_content || delta?.reasoning const reasoningContent = delta?.reasoning_content || delta?.reasoning
const currentTime = new Date().getTime() // Get current time for each chunk const currentTime = new Date().getTime() // Get current time for each chunk
if ( if (time_first_token_millsec === 0 && isFirstThinkingChunk && reasoningContent) {
time_first_token_millsec === 0 &&
isEmpty(reasoningContent) &&
isEmpty(delta?.content) &&
isEmpty(finishReason)
) {
// 记录第一个token的时间 // 记录第一个token的时间
time_first_token_millsec = currentTime time_first_token_millsec = currentTime
// 记录第一个token的时间差 // 记录第一个token的时间差
@ -542,6 +538,7 @@ export default class OpenAIProvider extends BaseProvider {
fractionalSecondDigits: 3 fractionalSecondDigits: 3
})}` })}`
) )
isFirstThinkingChunk = false
} }
if (reasoningContent) { if (reasoningContent) {
thinkingContent += reasoningContent thinkingContent += reasoningContent