mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2025-12-25 03:10:08 +08:00
fix: move start_time_millsec initialization to onChunk for accurate timing
This commit is contained in:
parent
2782744a85
commit
5f3ef42826
@ -290,7 +290,6 @@ export default class AnthropicProvider extends BaseProvider {
|
||||
|
||||
const processStream = async (body: MessageCreateParamsNonStreaming, idx: number) => {
|
||||
let time_first_token_millsec = 0
|
||||
const start_time_millsec = new Date().getTime()
|
||||
|
||||
if (!streamOutput) {
|
||||
const message = await this.sdk.messages.create({ ...body, stream: false })
|
||||
@ -484,6 +483,7 @@ export default class AnthropicProvider extends BaseProvider {
|
||||
})
|
||||
}
|
||||
onChunk({ type: ChunkType.LLM_RESPONSE_CREATED })
|
||||
const start_time_millsec = new Date().getTime()
|
||||
await processStream(body, 0).finally(cleanup)
|
||||
}
|
||||
|
||||
|
||||
@ -500,7 +500,6 @@ export default class GeminiProvider extends BaseProvider {
|
||||
|
||||
let functionCalls: FunctionCall[] = []
|
||||
let time_first_token_millsec = 0
|
||||
const start_time_millsec = new Date().getTime()
|
||||
|
||||
if (stream instanceof GenerateContentResponse) {
|
||||
let content = ''
|
||||
@ -647,6 +646,7 @@ export default class GeminiProvider extends BaseProvider {
|
||||
}
|
||||
|
||||
onChunk({ type: ChunkType.LLM_RESPONSE_CREATED })
|
||||
const start_time_millsec = new Date().getTime()
|
||||
const userMessagesStream = await chat.sendMessageStream({
|
||||
message: messageContents as PartUnion,
|
||||
config: {
|
||||
|
||||
@ -518,7 +518,6 @@ export default class OpenAICompatibleProvider extends BaseOpenAiProvider {
|
||||
const processStream = async (stream: any, idx: number) => {
|
||||
const toolCalls: ChatCompletionMessageToolCall[] = []
|
||||
let time_first_token_millsec = 0
|
||||
const start_time_millsec = new Date().getTime()
|
||||
|
||||
// Handle non-streaming case (already returns early, no change needed here)
|
||||
if (!isSupportStreamOutput()) {
|
||||
@ -831,6 +830,7 @@ export default class OpenAICompatibleProvider extends BaseOpenAiProvider {
|
||||
reqMessages = processReqMessages(model, reqMessages)
|
||||
// 等待接口返回流
|
||||
onChunk({ type: ChunkType.LLM_RESPONSE_CREATED })
|
||||
const start_time_millsec = new Date().getTime()
|
||||
const stream = await this.sdk.chat.completions
|
||||
// @ts-ignore key is not typed
|
||||
.create(
|
||||
|
||||
@ -567,7 +567,6 @@ export abstract class BaseOpenAiProvider extends BaseProvider {
|
||||
) => {
|
||||
const toolCalls: OpenAI.Responses.ResponseFunctionToolCall[] = []
|
||||
let time_first_token_millsec = 0
|
||||
const start_time_millsec = new Date().getTime()
|
||||
|
||||
if (!streamOutput) {
|
||||
const nonStream = stream as OpenAI.Responses.Response
|
||||
@ -785,6 +784,7 @@ export abstract class BaseOpenAiProvider extends BaseProvider {
|
||||
}
|
||||
|
||||
onChunk({ type: ChunkType.LLM_RESPONSE_CREATED })
|
||||
const start_time_millsec = new Date().getTime()
|
||||
const stream = await this.sdk.responses.create(
|
||||
{
|
||||
model: model.id,
|
||||
|
||||
Loading…
Reference in New Issue
Block a user