refactor(MainTextBlock): enhance content processing by ignoring tooluse (#5483)

This commit is contained in:
SuYao 2025-04-29 16:16:34 +08:00 committed by GitHub
parent 08de8eac86
commit 1ac9ac09c9
2 changed files with 10 additions and 7 deletions

View File

@ -31,6 +31,8 @@ interface Props {
role: Message['role']
}
const toolUseRegex = /<tool_use>([\s\S]*?)<\/tool_use>/g
const MainTextBlock: React.FC<Props> = ({ block, citationBlockId, role, mentions = [] }) => {
// Use the passed citationBlockId directly in the selector
const { renderInputMessageAsMarkdown } = useSettings()
@ -67,6 +69,10 @@ const MainTextBlock: React.FC<Props> = ({ block, citationBlockId, role, mentions
return content
}, [block.content, block.citationReferences, citationBlockId, formattedCitations])
const ignoreToolUse = useMemo(() => {
return processedContent.replace(toolUseRegex, '')
}, [processedContent])
return (
<>
{/* Render mentions associated with the message */}
@ -80,7 +86,7 @@ const MainTextBlock: React.FC<Props> = ({ block, citationBlockId, role, mentions
{role === 'user' && !renderInputMessageAsMarkdown ? (
<p style={{ marginBottom: 5, whiteSpace: 'pre-wrap' }}>{block.content}</p>
) : (
<Markdown block={{ ...block, content: processedContent }} />
<Markdown block={{ ...block, content: ignoreToolUse }} />
)}
</>
)

View File

@ -507,6 +507,7 @@ export default class OpenAIProvider extends BaseProvider {
// let isThinkingInContent: ThoughtProcessor | undefined = undefined
// const processThinkingChunk = this.handleThinkingTags()
let isFirstChunk = true
let isFirstThinkingChunk = true
for await (const chunk of stream) {
if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) {
break
@ -521,12 +522,7 @@ export default class OpenAIProvider extends BaseProvider {
const reasoningContent = delta?.reasoning_content || delta?.reasoning
const currentTime = new Date().getTime() // Get current time for each chunk
if (
time_first_token_millsec === 0 &&
isEmpty(reasoningContent) &&
isEmpty(delta?.content) &&
isEmpty(finishReason)
) {
if (time_first_token_millsec === 0 && isFirstThinkingChunk && reasoningContent) {
// 记录第一个token的时间
time_first_token_millsec = currentTime
// 记录第一个token的时间差
@ -542,6 +538,7 @@ export default class OpenAIProvider extends BaseProvider {
fractionalSecondDigits: 3
})}`
)
isFirstThinkingChunk = false
}
if (reasoningContent) {
thinkingContent += reasoningContent