mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2025-12-19 14:41:24 +08:00
Merge f9024eb07a into 8ab375161d
This commit is contained in:
commit
71f3cec372
@ -339,12 +339,14 @@ export class AiSdkToChunkAdapter {
|
|||||||
reasoning_content: final.reasoningContent || ''
|
reasoning_content: final.reasoningContent || ''
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Pass finishReason in BLOCK_COMPLETE for message-level tracking
|
||||||
this.onChunk({
|
this.onChunk({
|
||||||
type: ChunkType.BLOCK_COMPLETE,
|
type: ChunkType.BLOCK_COMPLETE,
|
||||||
response: {
|
response: {
|
||||||
...baseResponse,
|
...baseResponse,
|
||||||
usage: { ...usage },
|
usage: { ...usage },
|
||||||
metrics: metrics ? { ...metrics } : undefined
|
metrics: metrics ? { ...metrics } : undefined,
|
||||||
|
finishReason: chunk.finishReason
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
this.onChunk({
|
this.onChunk({
|
||||||
|
|||||||
@ -1805,6 +1805,9 @@
|
|||||||
},
|
},
|
||||||
"citation": "{{count}} citations",
|
"citation": "{{count}} citations",
|
||||||
"citations": "References",
|
"citations": "References",
|
||||||
|
"continue_generation": {
|
||||||
|
"prompt": "[CONTINUE EXACTLY FROM CUTOFF POINT]\n\nYour previous response was cut off mid-generation. Continue IMMEDIATELY from where you stopped - do NOT repeat, summarize, or restart. Your next word should be the exact continuation.\n\nYour response ended with: \"{{truncatedContent}}\"\n\nContinue now (first word must follow directly from the above):"
|
||||||
|
},
|
||||||
"copied": "Copied!",
|
"copied": "Copied!",
|
||||||
"copy": {
|
"copy": {
|
||||||
"failed": "Copy failed",
|
"failed": "Copy failed",
|
||||||
@ -2016,6 +2019,14 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"warning": {
|
"warning": {
|
||||||
|
"finish_reason": {
|
||||||
|
"content-filter": "Content was blocked by safety filter",
|
||||||
|
"continue": "Continue generating",
|
||||||
|
"error": "An error occurred during generation",
|
||||||
|
"length": "Maximum output length limit reached",
|
||||||
|
"other": "Generation terminated",
|
||||||
|
"unknown": "Generation terminated for unknown reason"
|
||||||
|
},
|
||||||
"rate": {
|
"rate": {
|
||||||
"limit": "Too many requests. Please wait {{seconds}} seconds before trying again."
|
"limit": "Too many requests. Please wait {{seconds}} seconds before trying again."
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1805,6 +1805,9 @@
|
|||||||
},
|
},
|
||||||
"citation": "{{count}} 个引用内容",
|
"citation": "{{count}} 个引用内容",
|
||||||
"citations": "引用内容",
|
"citations": "引用内容",
|
||||||
|
"continue_generation": {
|
||||||
|
"prompt": "[从截断处精确继续]\n\n你之前的回复在生成过程中被截断了。请立即从停止的地方继续——不要重复、总结或重新开始。你的下一个字必须是精确的接续。\n\n你的回复结尾是:\"{{truncatedContent}}\"\n\n现在继续(第一个字必须直接接上面的内容):"
|
||||||
|
},
|
||||||
"copied": "已复制",
|
"copied": "已复制",
|
||||||
"copy": {
|
"copy": {
|
||||||
"failed": "复制失败",
|
"failed": "复制失败",
|
||||||
@ -2016,6 +2019,14 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"warning": {
|
"warning": {
|
||||||
|
"finish_reason": {
|
||||||
|
"content-filter": "内容被安全过滤器拦截",
|
||||||
|
"continue": "继续生成",
|
||||||
|
"error": "生成过程中发生错误",
|
||||||
|
"length": "已达到最大输出长度限制",
|
||||||
|
"other": "生成已终止",
|
||||||
|
"unknown": "生成因未知原因终止"
|
||||||
|
},
|
||||||
"rate": {
|
"rate": {
|
||||||
"limit": "发送过于频繁,请等待 {{seconds}} 秒后再尝试"
|
"limit": "发送过于频繁,请等待 {{seconds}} 秒后再尝试"
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1805,6 +1805,9 @@
|
|||||||
},
|
},
|
||||||
"citation": "{{count}} 個引用內容",
|
"citation": "{{count}} 個引用內容",
|
||||||
"citations": "引用內容",
|
"citations": "引用內容",
|
||||||
|
"continue_generation": {
|
||||||
|
"prompt": "[to be translated]:Please continue your previous response exactly from where you left off. Do not repeat any content that was already generated. Continue directly from:\n\n{{truncatedContent}}"
|
||||||
|
},
|
||||||
"copied": "已複製!",
|
"copied": "已複製!",
|
||||||
"copy": {
|
"copy": {
|
||||||
"failed": "複製失敗",
|
"failed": "複製失敗",
|
||||||
@ -2016,6 +2019,14 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"warning": {
|
"warning": {
|
||||||
|
"finish_reason": {
|
||||||
|
"content-filter": "[to be translated]:Content was blocked by safety filter",
|
||||||
|
"continue": "[to be translated]:Continue generating",
|
||||||
|
"error": "[to be translated]:An error occurred during generation",
|
||||||
|
"length": "[to be translated]:Maximum output length limit reached",
|
||||||
|
"other": "[to be translated]:Generation terminated",
|
||||||
|
"unknown": "[to be translated]:Generation terminated for unknown reason"
|
||||||
|
},
|
||||||
"rate": {
|
"rate": {
|
||||||
"limit": "傳送過於頻繁,請在 {{seconds}} 秒後再嘗試"
|
"limit": "傳送過於頻繁,請在 {{seconds}} 秒後再嘗試"
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1805,6 +1805,9 @@
|
|||||||
},
|
},
|
||||||
"citation": "{{count}} Zitate",
|
"citation": "{{count}} Zitate",
|
||||||
"citations": "Zitate",
|
"citations": "Zitate",
|
||||||
|
"continue_generation": {
|
||||||
|
"prompt": "[to be translated]:Please continue your previous response exactly from where you left off. Do not repeat any content that was already generated. Continue directly from:\n\n{{truncatedContent}}"
|
||||||
|
},
|
||||||
"copied": "Kopiert",
|
"copied": "Kopiert",
|
||||||
"copy": {
|
"copy": {
|
||||||
"failed": "Kopieren fehlgeschlagen",
|
"failed": "Kopieren fehlgeschlagen",
|
||||||
@ -2016,6 +2019,14 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"warning": {
|
"warning": {
|
||||||
|
"finish_reason": {
|
||||||
|
"content-filter": "[to be translated]:Content was blocked by safety filter",
|
||||||
|
"continue": "[to be translated]:Continue generating",
|
||||||
|
"error": "[to be translated]:An error occurred during generation",
|
||||||
|
"length": "[to be translated]:Maximum output length limit reached",
|
||||||
|
"other": "[to be translated]:Generation terminated",
|
||||||
|
"unknown": "[to be translated]:Generation terminated for unknown reason"
|
||||||
|
},
|
||||||
"rate": {
|
"rate": {
|
||||||
"limit": "Zu viele Anfragen. Bitte warten Sie {{seconds}} Sekunden, bevor Sie es erneut versuchen"
|
"limit": "Zu viele Anfragen. Bitte warten Sie {{seconds}} Sekunden, bevor Sie es erneut versuchen"
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1805,6 +1805,9 @@
|
|||||||
},
|
},
|
||||||
"citation": "{{count}} αναφορές",
|
"citation": "{{count}} αναφορές",
|
||||||
"citations": "Περιεχόμενα αναφοράς",
|
"citations": "Περιεχόμενα αναφοράς",
|
||||||
|
"continue_generation": {
|
||||||
|
"prompt": "[to be translated]:Please continue your previous response exactly from where you left off. Do not repeat any content that was already generated. Continue directly from:\n\n{{truncatedContent}}"
|
||||||
|
},
|
||||||
"copied": "Αντιγράφηκε",
|
"copied": "Αντιγράφηκε",
|
||||||
"copy": {
|
"copy": {
|
||||||
"failed": "Η αντιγραφή απέτυχε",
|
"failed": "Η αντιγραφή απέτυχε",
|
||||||
@ -2016,6 +2019,14 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"warning": {
|
"warning": {
|
||||||
|
"finish_reason": {
|
||||||
|
"content-filter": "[to be translated]:Content was blocked by safety filter",
|
||||||
|
"continue": "[to be translated]:Continue generating",
|
||||||
|
"error": "[to be translated]:An error occurred during generation",
|
||||||
|
"length": "[to be translated]:Maximum output length limit reached",
|
||||||
|
"other": "[to be translated]:Generation terminated",
|
||||||
|
"unknown": "[to be translated]:Generation terminated for unknown reason"
|
||||||
|
},
|
||||||
"rate": {
|
"rate": {
|
||||||
"limit": "Υπερβολική συχνότητα στείλατε παρακαλώ περιμένετε {{seconds}} δευτερόλεπτα και προσπαθήστε ξανά"
|
"limit": "Υπερβολική συχνότητα στείλατε παρακαλώ περιμένετε {{seconds}} δευτερόλεπτα και προσπαθήστε ξανά"
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1805,6 +1805,9 @@
|
|||||||
},
|
},
|
||||||
"citation": "{{count}} contenido citado",
|
"citation": "{{count}} contenido citado",
|
||||||
"citations": "Citas",
|
"citations": "Citas",
|
||||||
|
"continue_generation": {
|
||||||
|
"prompt": "[to be translated]:Please continue your previous response exactly from where you left off. Do not repeat any content that was already generated. Continue directly from:\n\n{{truncatedContent}}"
|
||||||
|
},
|
||||||
"copied": "Copiado",
|
"copied": "Copiado",
|
||||||
"copy": {
|
"copy": {
|
||||||
"failed": "Copia fallida",
|
"failed": "Copia fallida",
|
||||||
@ -2016,6 +2019,14 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"warning": {
|
"warning": {
|
||||||
|
"finish_reason": {
|
||||||
|
"content-filter": "[to be translated]:Content was blocked by safety filter",
|
||||||
|
"continue": "[to be translated]:Continue generating",
|
||||||
|
"error": "[to be translated]:An error occurred during generation",
|
||||||
|
"length": "[to be translated]:Maximum output length limit reached",
|
||||||
|
"other": "[to be translated]:Generation terminated",
|
||||||
|
"unknown": "[to be translated]:Generation terminated for unknown reason"
|
||||||
|
},
|
||||||
"rate": {
|
"rate": {
|
||||||
"limit": "Envío demasiado frecuente, espere {{seconds}} segundos antes de intentarlo de nuevo"
|
"limit": "Envío demasiado frecuente, espere {{seconds}} segundos antes de intentarlo de nuevo"
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1805,6 +1805,9 @@
|
|||||||
},
|
},
|
||||||
"citation": "{{count}} éléments cités",
|
"citation": "{{count}} éléments cités",
|
||||||
"citations": "Citations",
|
"citations": "Citations",
|
||||||
|
"continue_generation": {
|
||||||
|
"prompt": "[to be translated]:Please continue your previous response exactly from where you left off. Do not repeat any content that was already generated. Continue directly from:\n\n{{truncatedContent}}"
|
||||||
|
},
|
||||||
"copied": "Copié",
|
"copied": "Copié",
|
||||||
"copy": {
|
"copy": {
|
||||||
"failed": "La copie a échoué",
|
"failed": "La copie a échoué",
|
||||||
@ -2016,6 +2019,14 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"warning": {
|
"warning": {
|
||||||
|
"finish_reason": {
|
||||||
|
"content-filter": "[to be translated]:Content was blocked by safety filter",
|
||||||
|
"continue": "[to be translated]:Continue generating",
|
||||||
|
"error": "[to be translated]:An error occurred during generation",
|
||||||
|
"length": "[to be translated]:Maximum output length limit reached",
|
||||||
|
"other": "[to be translated]:Generation terminated",
|
||||||
|
"unknown": "[to be translated]:Generation terminated for unknown reason"
|
||||||
|
},
|
||||||
"rate": {
|
"rate": {
|
||||||
"limit": "Vous envoyez trop souvent, veuillez attendre {{seconds}} secondes avant de réessayer"
|
"limit": "Vous envoyez trop souvent, veuillez attendre {{seconds}} secondes avant de réessayer"
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1805,6 +1805,9 @@
|
|||||||
},
|
},
|
||||||
"citation": "{{count}}個の引用内容",
|
"citation": "{{count}}個の引用内容",
|
||||||
"citations": "引用内容",
|
"citations": "引用内容",
|
||||||
|
"continue_generation": {
|
||||||
|
"prompt": "[to be translated]:Please continue your previous response exactly from where you left off. Do not repeat any content that was already generated. Continue directly from:\n\n{{truncatedContent}}"
|
||||||
|
},
|
||||||
"copied": "コピーしました!",
|
"copied": "コピーしました!",
|
||||||
"copy": {
|
"copy": {
|
||||||
"failed": "コピーに失敗しました",
|
"failed": "コピーに失敗しました",
|
||||||
@ -2016,6 +2019,14 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"warning": {
|
"warning": {
|
||||||
|
"finish_reason": {
|
||||||
|
"content-filter": "[to be translated]:Content was blocked by safety filter",
|
||||||
|
"continue": "[to be translated]:Continue generating",
|
||||||
|
"error": "[to be translated]:An error occurred during generation",
|
||||||
|
"length": "[to be translated]:Maximum output length limit reached",
|
||||||
|
"other": "[to be translated]:Generation terminated",
|
||||||
|
"unknown": "[to be translated]:Generation terminated for unknown reason"
|
||||||
|
},
|
||||||
"rate": {
|
"rate": {
|
||||||
"limit": "送信が頻繁すぎます。{{seconds}} 秒待ってから再試行してください。"
|
"limit": "送信が頻繁すぎます。{{seconds}} 秒待ってから再試行してください。"
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1805,6 +1805,9 @@
|
|||||||
},
|
},
|
||||||
"citation": "{{count}} conteúdo(s) citado(s)",
|
"citation": "{{count}} conteúdo(s) citado(s)",
|
||||||
"citations": "Citações",
|
"citations": "Citações",
|
||||||
|
"continue_generation": {
|
||||||
|
"prompt": "[to be translated]:Please continue your previous response exactly from where you left off. Do not repeat any content that was already generated. Continue directly from:\n\n{{truncatedContent}}"
|
||||||
|
},
|
||||||
"copied": "Copiado",
|
"copied": "Copiado",
|
||||||
"copy": {
|
"copy": {
|
||||||
"failed": "Cópia falhou",
|
"failed": "Cópia falhou",
|
||||||
@ -2016,6 +2019,14 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"warning": {
|
"warning": {
|
||||||
|
"finish_reason": {
|
||||||
|
"content-filter": "[to be translated]:Content was blocked by safety filter",
|
||||||
|
"continue": "[to be translated]:Continue generating",
|
||||||
|
"error": "[to be translated]:An error occurred during generation",
|
||||||
|
"length": "[to be translated]:Maximum output length limit reached",
|
||||||
|
"other": "[to be translated]:Generation terminated",
|
||||||
|
"unknown": "[to be translated]:Generation terminated for unknown reason"
|
||||||
|
},
|
||||||
"rate": {
|
"rate": {
|
||||||
"limit": "Envio muito frequente, aguarde {{seconds}} segundos antes de tentar novamente"
|
"limit": "Envio muito frequente, aguarde {{seconds}} segundos antes de tentar novamente"
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1805,6 +1805,9 @@
|
|||||||
},
|
},
|
||||||
"citation": "{{count}} цитат",
|
"citation": "{{count}} цитат",
|
||||||
"citations": "Содержание цитат",
|
"citations": "Содержание цитат",
|
||||||
|
"continue_generation": {
|
||||||
|
"prompt": "[to be translated]:Please continue your previous response exactly from where you left off. Do not repeat any content that was already generated. Continue directly from:\n\n{{truncatedContent}}"
|
||||||
|
},
|
||||||
"copied": "Скопировано!",
|
"copied": "Скопировано!",
|
||||||
"copy": {
|
"copy": {
|
||||||
"failed": "Не удалось скопировать",
|
"failed": "Не удалось скопировать",
|
||||||
@ -2016,6 +2019,14 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"warning": {
|
"warning": {
|
||||||
|
"finish_reason": {
|
||||||
|
"content-filter": "[to be translated]:Content was blocked by safety filter",
|
||||||
|
"continue": "[to be translated]:Continue generating",
|
||||||
|
"error": "[to be translated]:An error occurred during generation",
|
||||||
|
"length": "[to be translated]:Maximum output length limit reached",
|
||||||
|
"other": "[to be translated]:Generation terminated",
|
||||||
|
"unknown": "[to be translated]:Generation terminated for unknown reason"
|
||||||
|
},
|
||||||
"rate": {
|
"rate": {
|
||||||
"limit": "Отправка слишком частая, пожалуйста, подождите {{seconds}} секунд, прежде чем попробовать снова."
|
"limit": "Отправка слишком частая, пожалуйста, подождите {{seconds}} секунд, прежде чем попробовать снова."
|
||||||
}
|
}
|
||||||
|
|||||||
64
src/renderer/src/pages/home/Messages/FinishReasonWarning.tsx
Normal file
64
src/renderer/src/pages/home/Messages/FinishReasonWarning.tsx
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
import type { FinishReason } from 'ai'
|
||||||
|
import { Alert as AntdAlert, Button } from 'antd'
|
||||||
|
import { Play } from 'lucide-react'
|
||||||
|
import React from 'react'
|
||||||
|
import { useTranslation } from 'react-i18next'
|
||||||
|
import styled from 'styled-components'
|
||||||
|
|
||||||
|
interface Props {
|
||||||
|
finishReason: FinishReason
|
||||||
|
onContinue?: () => void
|
||||||
|
onDismiss?: () => void
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Displays a warning banner when message generation was truncated or filtered
|
||||||
|
* Only shows for non-normal finish reasons (not 'stop' or 'tool-calls')
|
||||||
|
*/
|
||||||
|
const FinishReasonWarning: React.FC<Props> = ({ finishReason, onContinue, onDismiss }) => {
|
||||||
|
const { t } = useTranslation()
|
||||||
|
|
||||||
|
// Don't show warning for normal finish reasons
|
||||||
|
if (finishReason === 'stop' || finishReason === 'tool-calls') {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
const getWarningMessage = () => {
|
||||||
|
const i18nKey = `message.warning.finish_reason.${finishReason}`
|
||||||
|
return t(i18nKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only show continue button for 'length' reason (max tokens reached)
|
||||||
|
const showContinueButton = finishReason === 'length' && onContinue
|
||||||
|
|
||||||
|
return (
|
||||||
|
<Alert
|
||||||
|
message={getWarningMessage()}
|
||||||
|
type="warning"
|
||||||
|
showIcon
|
||||||
|
closable={!!onDismiss}
|
||||||
|
onClose={onDismiss}
|
||||||
|
action={
|
||||||
|
showContinueButton && (
|
||||||
|
<Button
|
||||||
|
size="small"
|
||||||
|
type="text"
|
||||||
|
icon={<Play size={14} />}
|
||||||
|
onClick={onContinue}
|
||||||
|
style={{ display: 'flex', alignItems: 'center', gap: 4 }}>
|
||||||
|
{t('message.warning.finish_reason.continue')}
|
||||||
|
</Button>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
/>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
const Alert = styled(AntdAlert)`
|
||||||
|
margin: 0.5rem 0 !important;
|
||||||
|
padding: 8px 12px;
|
||||||
|
font-size: 12px;
|
||||||
|
align-items: center;
|
||||||
|
`
|
||||||
|
|
||||||
|
export default React.memo(FinishReasonWarning)
|
||||||
@ -13,7 +13,7 @@ import { getMessageModelId } from '@renderer/services/MessagesService'
|
|||||||
import { getModelUniqId } from '@renderer/services/ModelService'
|
import { getModelUniqId } from '@renderer/services/ModelService'
|
||||||
import { estimateMessageUsage } from '@renderer/services/TokenService'
|
import { estimateMessageUsage } from '@renderer/services/TokenService'
|
||||||
import type { Assistant, Topic } from '@renderer/types'
|
import type { Assistant, Topic } from '@renderer/types'
|
||||||
import type { Message, MessageBlock } from '@renderer/types/newMessage'
|
import type { Message as MessageType, MessageBlock } from '@renderer/types/newMessage'
|
||||||
import { classNames, cn } from '@renderer/utils'
|
import { classNames, cn } from '@renderer/utils'
|
||||||
import { scrollIntoView } from '@renderer/utils/dom'
|
import { scrollIntoView } from '@renderer/utils/dom'
|
||||||
import { isMessageProcessing } from '@renderer/utils/messageUtils/is'
|
import { isMessageProcessing } from '@renderer/utils/messageUtils/is'
|
||||||
@ -31,7 +31,7 @@ import MessageMenubar from './MessageMenubar'
|
|||||||
import MessageOutline from './MessageOutline'
|
import MessageOutline from './MessageOutline'
|
||||||
|
|
||||||
interface Props {
|
interface Props {
|
||||||
message: Message
|
message: MessageType
|
||||||
topic: Topic
|
topic: Topic
|
||||||
assistant?: Assistant
|
assistant?: Assistant
|
||||||
index?: number
|
index?: number
|
||||||
@ -40,7 +40,7 @@ interface Props {
|
|||||||
style?: React.CSSProperties
|
style?: React.CSSProperties
|
||||||
isGrouped?: boolean
|
isGrouped?: boolean
|
||||||
isStreaming?: boolean
|
isStreaming?: boolean
|
||||||
onSetMessages?: Dispatch<SetStateAction<Message[]>>
|
onSetMessages?: Dispatch<SetStateAction<MessageType[]>>
|
||||||
onUpdateUseful?: (msgId: string) => void
|
onUpdateUseful?: (msgId: string) => void
|
||||||
isGroupContextMessage?: boolean
|
isGroupContextMessage?: boolean
|
||||||
}
|
}
|
||||||
@ -118,6 +118,26 @@ const MessageItem: FC<Props> = ({
|
|||||||
stopEditing()
|
stopEditing()
|
||||||
}, [stopEditing])
|
}, [stopEditing])
|
||||||
|
|
||||||
|
// Handle continue generation when max tokens reached
|
||||||
|
const handleContinueGeneration = useCallback(
|
||||||
|
async (msg: MessageType) => {
|
||||||
|
if (!assistant) return
|
||||||
|
// Clear the finishReason first, then trigger continue generation
|
||||||
|
await editMessage(msg.id, { finishReason: undefined })
|
||||||
|
// Emit event to trigger continue generation
|
||||||
|
EventEmitter.emit(EVENT_NAMES.CONTINUE_GENERATION, { message: msg, assistant, topic })
|
||||||
|
},
|
||||||
|
[assistant, editMessage, topic]
|
||||||
|
)
|
||||||
|
|
||||||
|
// Handle dismiss warning (just clear finishReason)
|
||||||
|
const handleDismissWarning = useCallback(
|
||||||
|
async (msg: MessageType) => {
|
||||||
|
await editMessage(msg.id, { finishReason: undefined })
|
||||||
|
},
|
||||||
|
[editMessage]
|
||||||
|
)
|
||||||
|
|
||||||
const isLastMessage = index === 0 || !!isGrouped
|
const isLastMessage = index === 0 || !!isGrouped
|
||||||
const isAssistantMessage = message.role === 'assistant'
|
const isAssistantMessage = message.role === 'assistant'
|
||||||
const isProcessing = isMessageProcessing(message)
|
const isProcessing = isMessageProcessing(message)
|
||||||
@ -225,7 +245,11 @@ const MessageItem: FC<Props> = ({
|
|||||||
overflowY: 'visible'
|
overflowY: 'visible'
|
||||||
}}>
|
}}>
|
||||||
<MessageErrorBoundary>
|
<MessageErrorBoundary>
|
||||||
<MessageContent message={message} />
|
<MessageContent
|
||||||
|
message={message}
|
||||||
|
onContinueGeneration={handleContinueGeneration}
|
||||||
|
onDismissWarning={handleDismissWarning}
|
||||||
|
/>
|
||||||
</MessageErrorBoundary>
|
</MessageErrorBoundary>
|
||||||
</MessageContentContainer>
|
</MessageContentContainer>
|
||||||
{showMenubar && (
|
{showMenubar && (
|
||||||
|
|||||||
@ -6,11 +6,29 @@ import React from 'react'
|
|||||||
import styled from 'styled-components'
|
import styled from 'styled-components'
|
||||||
|
|
||||||
import MessageBlockRenderer from './Blocks'
|
import MessageBlockRenderer from './Blocks'
|
||||||
|
import FinishReasonWarning from './FinishReasonWarning'
|
||||||
|
|
||||||
interface Props {
|
interface Props {
|
||||||
message: Message
|
message: Message
|
||||||
|
onContinueGeneration?: (message: Message) => void
|
||||||
|
onDismissWarning?: (message: Message) => void
|
||||||
}
|
}
|
||||||
|
|
||||||
const MessageContent: React.FC<Props> = ({ message }) => {
|
const MessageContent: React.FC<Props> = ({ message, onContinueGeneration, onDismissWarning }) => {
|
||||||
|
// Check if we should show finish reason warning
|
||||||
|
const showFinishReasonWarning =
|
||||||
|
message.role === 'assistant' &&
|
||||||
|
message.finishReason &&
|
||||||
|
!['stop', 'tool-calls', 'error'].includes(message.finishReason)
|
||||||
|
|
||||||
|
const handleContinue = () => {
|
||||||
|
onContinueGeneration?.(message)
|
||||||
|
}
|
||||||
|
|
||||||
|
const handleDismiss = () => {
|
||||||
|
onDismissWarning?.(message)
|
||||||
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
{!isEmpty(message.mentions) && (
|
{!isEmpty(message.mentions) && (
|
||||||
@ -21,6 +39,13 @@ const MessageContent: React.FC<Props> = ({ message }) => {
|
|||||||
</Flex>
|
</Flex>
|
||||||
)}
|
)}
|
||||||
<MessageBlockRenderer blocks={message.blocks} message={message} />
|
<MessageBlockRenderer blocks={message.blocks} message={message} />
|
||||||
|
{showFinishReasonWarning && (
|
||||||
|
<FinishReasonWarning
|
||||||
|
finishReason={message.finishReason!}
|
||||||
|
onContinue={onContinueGeneration ? handleContinue : undefined}
|
||||||
|
onDismiss={onDismissWarning ? handleDismiss : undefined}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
</>
|
</>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -18,7 +18,11 @@ import { estimateHistoryTokens } from '@renderer/services/TokenService'
|
|||||||
import store, { useAppDispatch } from '@renderer/store'
|
import store, { useAppDispatch } from '@renderer/store'
|
||||||
import { messageBlocksSelectors, updateOneBlock } from '@renderer/store/messageBlock'
|
import { messageBlocksSelectors, updateOneBlock } from '@renderer/store/messageBlock'
|
||||||
import { newMessagesActions } from '@renderer/store/newMessage'
|
import { newMessagesActions } from '@renderer/store/newMessage'
|
||||||
import { saveMessageAndBlocksToDB, updateMessageAndBlocksThunk } from '@renderer/store/thunk/messageThunk'
|
import {
|
||||||
|
continueGenerationThunk,
|
||||||
|
saveMessageAndBlocksToDB,
|
||||||
|
updateMessageAndBlocksThunk
|
||||||
|
} from '@renderer/store/thunk/messageThunk'
|
||||||
import type { Assistant, Topic } from '@renderer/types'
|
import type { Assistant, Topic } from '@renderer/types'
|
||||||
import type { MessageBlock } from '@renderer/types/newMessage'
|
import type { MessageBlock } from '@renderer/types/newMessage'
|
||||||
import { type Message, MessageBlockType } from '@renderer/types/newMessage'
|
import { type Message, MessageBlockType } from '@renderer/types/newMessage'
|
||||||
@ -233,6 +237,18 @@ const Messages: React.FC<MessagesProps> = ({ assistant, topic, setActiveTopic, o
|
|||||||
window.toast.error(t('code_block.edit.save.failed.label'))
|
window.toast.error(t('code_block.edit.save.failed.label'))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
),
|
||||||
|
EventEmitter.on(
|
||||||
|
EVENT_NAMES.CONTINUE_GENERATION,
|
||||||
|
async (data: { message: Message; assistant: Assistant; topic: Topic }) => {
|
||||||
|
const { message, assistant: msgAssistant, topic: msgTopic } = data
|
||||||
|
// Only handle if it's for the current topic
|
||||||
|
if (msgTopic.id !== topic.id) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
await dispatch(continueGenerationThunk(topic.id, message, msgAssistant))
|
||||||
|
scrollToBottom()
|
||||||
|
}
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|||||||
@ -25,5 +25,6 @@ export const EVENT_NAMES = {
|
|||||||
RESEND_MESSAGE: 'RESEND_MESSAGE',
|
RESEND_MESSAGE: 'RESEND_MESSAGE',
|
||||||
SHOW_MODEL_SELECTOR: 'SHOW_MODEL_SELECTOR',
|
SHOW_MODEL_SELECTOR: 'SHOW_MODEL_SELECTOR',
|
||||||
EDIT_CODE_BLOCK: 'EDIT_CODE_BLOCK',
|
EDIT_CODE_BLOCK: 'EDIT_CODE_BLOCK',
|
||||||
CHANGE_TOPIC: 'CHANGE_TOPIC'
|
CHANGE_TOPIC: 'CHANGE_TOPIC',
|
||||||
|
CONTINUE_GENERATION: 'CONTINUE_GENERATION'
|
||||||
}
|
}
|
||||||
|
|||||||
@ -214,7 +214,12 @@ export const createBaseCallbacks = (deps: BaseCallbacksDependencies) => {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const messageUpdates = { status, metrics: response?.metrics, usage: response?.usage }
|
const messageUpdates = {
|
||||||
|
status,
|
||||||
|
metrics: response?.metrics,
|
||||||
|
usage: response?.usage,
|
||||||
|
finishReason: response?.finishReason
|
||||||
|
}
|
||||||
dispatch(
|
dispatch(
|
||||||
newMessagesActions.updateMessage({
|
newMessagesActions.updateMessage({
|
||||||
topicId,
|
topicId,
|
||||||
|
|||||||
@ -13,9 +13,15 @@ import store from '@renderer/store'
|
|||||||
import { updateTopicUpdatedAt } from '@renderer/store/assistants'
|
import { updateTopicUpdatedAt } from '@renderer/store/assistants'
|
||||||
import { type ApiServerConfig, type Assistant, type FileMetadata, type Model, type Topic } from '@renderer/types'
|
import { type ApiServerConfig, type Assistant, type FileMetadata, type Model, type Topic } from '@renderer/types'
|
||||||
import type { AgentSessionEntity, GetAgentSessionResponse } from '@renderer/types/agent'
|
import type { AgentSessionEntity, GetAgentSessionResponse } from '@renderer/types/agent'
|
||||||
|
import type { Chunk } from '@renderer/types/chunk'
|
||||||
import { ChunkType } from '@renderer/types/chunk'
|
import { ChunkType } from '@renderer/types/chunk'
|
||||||
import type { FileMessageBlock, ImageMessageBlock, Message, MessageBlock } from '@renderer/types/newMessage'
|
import type { FileMessageBlock, ImageMessageBlock, Message, MessageBlock } from '@renderer/types/newMessage'
|
||||||
import { AssistantMessageStatus, MessageBlockStatus, MessageBlockType } from '@renderer/types/newMessage'
|
import {
|
||||||
|
AssistantMessageStatus,
|
||||||
|
MessageBlockStatus,
|
||||||
|
MessageBlockType,
|
||||||
|
UserMessageStatus
|
||||||
|
} from '@renderer/types/newMessage'
|
||||||
import { uuid } from '@renderer/utils'
|
import { uuid } from '@renderer/utils'
|
||||||
import { addAbortController } from '@renderer/utils/abortController'
|
import { addAbortController } from '@renderer/utils/abortController'
|
||||||
import {
|
import {
|
||||||
@ -25,6 +31,7 @@ import {
|
|||||||
} from '@renderer/utils/agentSession'
|
} from '@renderer/utils/agentSession'
|
||||||
import {
|
import {
|
||||||
createAssistantMessage,
|
createAssistantMessage,
|
||||||
|
createMainTextBlock,
|
||||||
createTranslationBlock,
|
createTranslationBlock,
|
||||||
resetAssistantMessage
|
resetAssistantMessage
|
||||||
} from '@renderer/utils/messageUtils/create'
|
} from '@renderer/utils/messageUtils/create'
|
||||||
@ -1496,6 +1503,230 @@ export const appendAssistantResponseThunk =
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Thunk to continue generation from where an assistant message was truncated.
|
||||||
|
* Appends new content to the original truncated message instead of creating new messages.
|
||||||
|
* This avoids issues with APIs that don't support consecutive assistant messages.
|
||||||
|
* @param topicId - The topic ID.
|
||||||
|
* @param truncatedAssistantMessage - The assistant message that was truncated (finishReason: 'length').
|
||||||
|
* @param assistant - The assistant configuration.
|
||||||
|
*/
|
||||||
|
export const continueGenerationThunk =
|
||||||
|
(topicId: Topic['id'], truncatedAssistantMessage: Message, assistant: Assistant) =>
|
||||||
|
async (dispatch: AppDispatch, getState: () => RootState) => {
|
||||||
|
try {
|
||||||
|
const state = getState()
|
||||||
|
|
||||||
|
// Verify the truncated message exists
|
||||||
|
if (!state.messages.entities[truncatedAssistantMessage.id]) {
|
||||||
|
logger.error(`[continueGenerationThunk] Truncated message ${truncatedAssistantMessage.id} not found.`)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the content of the truncated message to include in the continuation prompt
|
||||||
|
const truncatedContent = getMainTextContent(truncatedAssistantMessage)
|
||||||
|
|
||||||
|
// Create a continuation prompt that asks the AI to continue strictly from where it left off
|
||||||
|
// Use only the last 150 chars to minimize repetition - just enough for context
|
||||||
|
const continuationPrompt = t('message.continue_generation.prompt', {
|
||||||
|
truncatedContent: truncatedContent.slice(-150)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Update the truncated message status to PROCESSING to indicate continuation
|
||||||
|
const messageUpdates = {
|
||||||
|
status: AssistantMessageStatus.PROCESSING,
|
||||||
|
updatedAt: new Date().toISOString()
|
||||||
|
}
|
||||||
|
dispatch(
|
||||||
|
newMessagesActions.updateMessage({
|
||||||
|
topicId,
|
||||||
|
messageId: truncatedAssistantMessage.id,
|
||||||
|
updates: messageUpdates
|
||||||
|
})
|
||||||
|
)
|
||||||
|
dispatch(updateTopicUpdatedAt({ topicId }))
|
||||||
|
|
||||||
|
// Queue the generation with continuation context
|
||||||
|
const queue = getTopicQueue(topicId)
|
||||||
|
const assistantConfig = {
|
||||||
|
...assistant,
|
||||||
|
model: truncatedAssistantMessage.model || assistant.model
|
||||||
|
}
|
||||||
|
queue.add(async () => {
|
||||||
|
await fetchAndProcessContinuationImpl(
|
||||||
|
dispatch,
|
||||||
|
getState,
|
||||||
|
topicId,
|
||||||
|
assistantConfig,
|
||||||
|
truncatedAssistantMessage,
|
||||||
|
continuationPrompt
|
||||||
|
)
|
||||||
|
})
|
||||||
|
} catch (error) {
|
||||||
|
logger.error(`[continueGenerationThunk] Error continuing generation:`, error as Error)
|
||||||
|
} finally {
|
||||||
|
finishTopicLoading(topicId)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Implementation for continuing generation on a truncated message.
|
||||||
|
* Similar to fetchAndProcessAssistantResponseImpl but:
|
||||||
|
* 1. Finds the existing main text block to append content to
|
||||||
|
* 2. Uses a continuation prompt to ask the AI to continue
|
||||||
|
* 3. Wraps the chunk processor to prepend existing content to new text
|
||||||
|
*/
|
||||||
|
const fetchAndProcessContinuationImpl = async (
|
||||||
|
dispatch: AppDispatch,
|
||||||
|
getState: () => RootState,
|
||||||
|
topicId: string,
|
||||||
|
origAssistant: Assistant,
|
||||||
|
truncatedMessage: Message,
|
||||||
|
continuationPrompt: string
|
||||||
|
) => {
|
||||||
|
const topic = origAssistant.topics.find((t) => t.id === topicId)
|
||||||
|
const assistant = topic?.prompt
|
||||||
|
? { ...origAssistant, prompt: `${origAssistant.prompt}\n${topic.prompt}` }
|
||||||
|
: origAssistant
|
||||||
|
const assistantMsgId = truncatedMessage.id
|
||||||
|
let callbacks: StreamProcessorCallbacks = {}
|
||||||
|
|
||||||
|
// Create a virtual user message with the continuation prompt
|
||||||
|
// We need to temporarily add the block to store so getMainTextContent can read it
|
||||||
|
const virtualUserMessageId = uuid()
|
||||||
|
const virtualTextBlock = createMainTextBlock(virtualUserMessageId, continuationPrompt, {
|
||||||
|
status: MessageBlockStatus.SUCCESS
|
||||||
|
})
|
||||||
|
|
||||||
|
try {
|
||||||
|
dispatch(newMessagesActions.setTopicLoading({ topicId, loading: true }))
|
||||||
|
|
||||||
|
// Find the existing main text block content to prepend
|
||||||
|
const state = getState()
|
||||||
|
const existingMainTextBlockId = truncatedMessage.blocks.find((blockId) => {
|
||||||
|
const block = state.messageBlocks.entities[blockId]
|
||||||
|
return block?.type === MessageBlockType.MAIN_TEXT
|
||||||
|
})
|
||||||
|
const existingContent = existingMainTextBlockId
|
||||||
|
? (state.messageBlocks.entities[existingMainTextBlockId] as any)?.content || ''
|
||||||
|
: ''
|
||||||
|
|
||||||
|
// Create BlockManager instance
|
||||||
|
const blockManager = new BlockManager({
|
||||||
|
dispatch,
|
||||||
|
getState,
|
||||||
|
saveUpdatedBlockToDB,
|
||||||
|
saveUpdatesToDB,
|
||||||
|
assistantMsgId,
|
||||||
|
topicId,
|
||||||
|
throttledBlockUpdate,
|
||||||
|
cancelThrottledBlockUpdate
|
||||||
|
})
|
||||||
|
|
||||||
|
const allMessagesForTopic = selectMessagesForTopic(getState(), topicId)
|
||||||
|
|
||||||
|
// Find the original user message that triggered this assistant response
|
||||||
|
const userMessageId = truncatedMessage.askId
|
||||||
|
const userMessageIndex = allMessagesForTopic.findIndex((m) => m?.id === userMessageId)
|
||||||
|
|
||||||
|
let messagesForContext: Message[] = []
|
||||||
|
if (userMessageIndex === -1) {
|
||||||
|
logger.error(
|
||||||
|
`[fetchAndProcessContinuationImpl] Triggering user message ${userMessageId} not found. Falling back.`
|
||||||
|
)
|
||||||
|
const assistantMessageIndex = allMessagesForTopic.findIndex((m) => m?.id === assistantMsgId)
|
||||||
|
messagesForContext = (
|
||||||
|
assistantMessageIndex !== -1 ? allMessagesForTopic.slice(0, assistantMessageIndex) : allMessagesForTopic
|
||||||
|
).filter((m) => m && !m.status?.includes('ing'))
|
||||||
|
} else {
|
||||||
|
// Include messages up to the user message
|
||||||
|
const contextSlice = allMessagesForTopic.slice(0, userMessageIndex + 1)
|
||||||
|
messagesForContext = contextSlice.filter((m) => m && !m.status?.includes('ing'))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the truncated assistant message content to context
|
||||||
|
const truncatedAssistantForContext: Message = {
|
||||||
|
...truncatedMessage,
|
||||||
|
status: AssistantMessageStatus.SUCCESS // Treat as completed for context
|
||||||
|
}
|
||||||
|
|
||||||
|
const virtualContinueMessage: Message = {
|
||||||
|
id: virtualUserMessageId,
|
||||||
|
role: 'user',
|
||||||
|
topicId,
|
||||||
|
assistantId: assistant.id,
|
||||||
|
createdAt: new Date().toISOString(),
|
||||||
|
status: UserMessageStatus.SUCCESS,
|
||||||
|
blocks: [virtualTextBlock.id],
|
||||||
|
model: assistant.model,
|
||||||
|
modelId: assistant.model?.id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Temporarily add the block to store (will be removed in finally block)
|
||||||
|
dispatch(upsertOneBlock(virtualTextBlock))
|
||||||
|
|
||||||
|
// Build the final context: original context + truncated assistant + virtual user message
|
||||||
|
messagesForContext = [...messagesForContext, truncatedAssistantForContext, virtualContinueMessage]
|
||||||
|
|
||||||
|
// Create standard callbacks (no modification needed)
|
||||||
|
callbacks = createCallbacks({
|
||||||
|
blockManager,
|
||||||
|
dispatch,
|
||||||
|
getState,
|
||||||
|
topicId,
|
||||||
|
assistantMsgId,
|
||||||
|
saveUpdatesToDB,
|
||||||
|
assistant
|
||||||
|
})
|
||||||
|
const baseStreamProcessor = createStreamProcessor(callbacks)
|
||||||
|
|
||||||
|
// Wrap the stream processor to prepend existing content to text chunks
|
||||||
|
const wrappedStreamProcessor = (chunk: Chunk) => {
|
||||||
|
if (chunk.type === ChunkType.TEXT_DELTA || chunk.type === ChunkType.TEXT_COMPLETE) {
|
||||||
|
// Prepend existing content to the new text
|
||||||
|
return baseStreamProcessor({
|
||||||
|
...chunk,
|
||||||
|
text: existingContent + chunk.text
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return baseStreamProcessor(chunk)
|
||||||
|
}
|
||||||
|
|
||||||
|
const abortController = new AbortController()
|
||||||
|
addAbortController(userMessageId!, () => abortController.abort())
|
||||||
|
|
||||||
|
await transformMessagesAndFetch(
|
||||||
|
{
|
||||||
|
messages: messagesForContext,
|
||||||
|
assistant,
|
||||||
|
topicId,
|
||||||
|
options: {
|
||||||
|
signal: abortController.signal,
|
||||||
|
timeout: 30000,
|
||||||
|
headers: defaultAppHeaders()
|
||||||
|
}
|
||||||
|
},
|
||||||
|
wrappedStreamProcessor
|
||||||
|
)
|
||||||
|
} catch (error: any) {
|
||||||
|
logger.error('Error in fetchAndProcessContinuationImpl:', error)
|
||||||
|
endSpan({
|
||||||
|
topicId,
|
||||||
|
error: error,
|
||||||
|
modelName: assistant.model?.name
|
||||||
|
})
|
||||||
|
try {
|
||||||
|
callbacks.onError?.(error)
|
||||||
|
} catch (callbackError) {
|
||||||
|
logger.error('Error in onError callback:', callbackError as Error)
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
// Always clean up the temporary virtual block
|
||||||
|
dispatch(removeManyBlocks([virtualTextBlock.id]))
|
||||||
|
dispatch(newMessagesActions.setTopicLoading({ topicId, loading: false }))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Clones messages from a source topic up to a specified index into a *pre-existing* new topic.
|
* Clones messages from a source topic up to a specified index into a *pre-existing* new topic.
|
||||||
* Generates new unique IDs for all cloned messages and blocks.
|
* Generates new unique IDs for all cloned messages and blocks.
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
import type { CompletionUsage } from '@cherrystudio/openai/resources'
|
import type { CompletionUsage } from '@cherrystudio/openai/resources'
|
||||||
import type { ProviderMetadata } from 'ai'
|
import type { FinishReason, ProviderMetadata } from 'ai'
|
||||||
|
|
||||||
import type {
|
import type {
|
||||||
Assistant,
|
Assistant,
|
||||||
@ -221,6 +221,10 @@ export type Message = {
|
|||||||
// raw data
|
// raw data
|
||||||
// TODO: add this providerMetadata to MessageBlock to save raw provider data for each block
|
// TODO: add this providerMetadata to MessageBlock to save raw provider data for each block
|
||||||
providerMetadata?: ProviderMetadata
|
providerMetadata?: ProviderMetadata
|
||||||
|
|
||||||
|
// Finish reason from AI SDK (e.g., 'stop', 'length', 'content-filter', etc.)
|
||||||
|
// Used to show warnings when generation was truncated or filtered
|
||||||
|
finishReason?: FinishReason
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface Response {
|
export interface Response {
|
||||||
@ -232,6 +236,7 @@ export interface Response {
|
|||||||
mcpToolResponse?: MCPToolResponse[]
|
mcpToolResponse?: MCPToolResponse[]
|
||||||
generateImage?: GenerateImageResponse
|
generateImage?: GenerateImageResponse
|
||||||
error?: ResponseError
|
error?: ResponseError
|
||||||
|
finishReason?: FinishReason
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: Weak type safety. It may be a specific class instance which inherits Error in runtime.
|
// FIXME: Weak type safety. It may be a specific class instance which inherits Error in runtime.
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user