mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2026-01-01 17:59:09 +08:00
hotfix: enhance OpenAI stream handling and error management (#6541)
fix: enhance OpenAI stream handling and error management - Updated the `openAIChunkToTextDelta` function to include error handling with a try-catch block, improving robustness during stream processing. - Refined the `readableStreamAsyncIterable` function to ensure proper handling of stream completion and errors, including a return method for cleanup. - Adjusted type definitions for better clarity and consistency in the handling of async iterables.
This commit is contained in:
parent
8efa7d25f8
commit
bc5cc4bf02
@ -619,32 +619,36 @@ export default class OpenAIProvider extends BaseOpenAIProvider {
|
|||||||
}
|
}
|
||||||
const reasoningTag = getAppropriateTag(model)
|
const reasoningTag = getAppropriateTag(model)
|
||||||
async function* openAIChunkToTextDelta(stream: any): AsyncGenerator<OpenAIStreamChunk> {
|
async function* openAIChunkToTextDelta(stream: any): AsyncGenerator<OpenAIStreamChunk> {
|
||||||
for await (const chunk of stream) {
|
try {
|
||||||
if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) {
|
for await (const chunk of stream) {
|
||||||
break
|
if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) {
|
||||||
}
|
|
||||||
|
|
||||||
if (chunk.choices && chunk.choices.length > 0) {
|
|
||||||
const delta = chunk.choices[0]?.delta
|
|
||||||
if (
|
|
||||||
(delta?.reasoning_content && delta?.reasoning_content !== '\n') ||
|
|
||||||
(delta?.reasoning && delta?.reasoning !== '\n')
|
|
||||||
) {
|
|
||||||
yield { type: 'reasoning', textDelta: delta.reasoning_content || delta.reasoning }
|
|
||||||
}
|
|
||||||
if (delta?.content) {
|
|
||||||
yield { type: 'text-delta', textDelta: delta.content }
|
|
||||||
}
|
|
||||||
if (delta?.tool_calls && delta?.tool_calls.length > 0) {
|
|
||||||
yield { type: 'tool-calls', delta: delta }
|
|
||||||
}
|
|
||||||
|
|
||||||
const finishReason = chunk?.choices[0]?.finish_reason
|
|
||||||
if (!isEmpty(finishReason)) {
|
|
||||||
yield { type: 'finish', finishReason, usage: chunk.usage, delta, chunk }
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (chunk.choices && chunk.choices.length > 0) {
|
||||||
|
const delta = chunk.choices[0]?.delta
|
||||||
|
if (
|
||||||
|
(delta?.reasoning_content && delta?.reasoning_content !== '\n') ||
|
||||||
|
(delta?.reasoning && delta?.reasoning !== '\n')
|
||||||
|
) {
|
||||||
|
yield { type: 'reasoning', textDelta: delta.reasoning_content || delta.reasoning }
|
||||||
|
}
|
||||||
|
if (delta?.content) {
|
||||||
|
yield { type: 'text-delta', textDelta: delta.content }
|
||||||
|
}
|
||||||
|
if (delta?.tool_calls && delta?.tool_calls.length > 0) {
|
||||||
|
yield { type: 'tool-calls', delta: delta }
|
||||||
|
}
|
||||||
|
|
||||||
|
const finishReason = chunk?.choices[0]?.finish_reason
|
||||||
|
if (!isEmpty(finishReason)) {
|
||||||
|
yield { type: 'finish', finishReason, usage: chunk.usage, delta, chunk }
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('[openAIChunkToTextDelta] error', error)
|
||||||
|
throw error
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -661,7 +665,7 @@ export default class OpenAIProvider extends BaseOpenAIProvider {
|
|||||||
})
|
})
|
||||||
|
|
||||||
// 3. 消费 processedStream,分发 onChunk
|
// 3. 消费 processedStream,分发 onChunk
|
||||||
for await (const chunk of readableStreamAsyncIterable(processedStream)) {
|
for await (const chunk of readableStreamAsyncIterable<OpenAIStreamChunk>(processedStream)) {
|
||||||
const delta = chunk.type === 'finish' ? chunk.delta : chunk
|
const delta = chunk.type === 'finish' ? chunk.delta : chunk
|
||||||
const rawChunk = chunk.type === 'finish' ? chunk.chunk : chunk
|
const rawChunk = chunk.type === 'finish' ? chunk.chunk : chunk
|
||||||
switch (chunk.type) {
|
switch (chunk.type) {
|
||||||
|
|||||||
@ -1,12 +1,32 @@
|
|||||||
export function readableStreamAsyncIterable<T>(stream: ReadableStream<T>): AsyncIterable<T> {
|
/**
|
||||||
|
* Most browsers don't yet have async iterable support for ReadableStream,
|
||||||
|
* and Node has a very different way of reading bytes from its "ReadableStream".
|
||||||
|
*
|
||||||
|
* This polyfill was pulled from https://github.com/MattiasBuelens/web-streams-polyfill/pull/122#issuecomment-1627354490
|
||||||
|
*/
|
||||||
|
export function readableStreamAsyncIterable<T>(stream: any): AsyncIterableIterator<T> {
|
||||||
|
if (stream[Symbol.asyncIterator]) return stream
|
||||||
|
|
||||||
const reader = stream.getReader()
|
const reader = stream.getReader()
|
||||||
return {
|
return {
|
||||||
[Symbol.asyncIterator](): AsyncIterator<T> {
|
async next() {
|
||||||
return {
|
try {
|
||||||
async next(): Promise<IteratorResult<T>> {
|
const result = await reader.read()
|
||||||
return reader.read() as Promise<IteratorResult<T>>
|
if (result?.done) reader.releaseLock() // release lock when stream becomes closed
|
||||||
}
|
return result
|
||||||
|
} catch (e) {
|
||||||
|
reader.releaseLock() // release lock when stream becomes errored
|
||||||
|
throw e
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
async return() {
|
||||||
|
const cancelPromise = reader.cancel()
|
||||||
|
reader.releaseLock()
|
||||||
|
await cancelPromise
|
||||||
|
return { done: true, value: undefined }
|
||||||
|
},
|
||||||
|
[Symbol.asyncIterator]() {
|
||||||
|
return this
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user