From 1ab9ea295d1d6101699451835a12bba63e9fcb71 Mon Sep 17 00:00:00 2001 From: suyao Date: Mon, 12 May 2025 18:14:19 +0800 Subject: [PATCH] fix: move start_time_millsec initialization to onChunk for accurate timing --- src/renderer/src/providers/AiProvider/AnthropicProvider.ts | 2 +- src/renderer/src/providers/AiProvider/GeminiProvider.ts | 2 +- .../src/providers/AiProvider/OpenAICompatibleProvider.ts | 2 +- src/renderer/src/providers/AiProvider/OpenAIProvider.ts | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/renderer/src/providers/AiProvider/AnthropicProvider.ts b/src/renderer/src/providers/AiProvider/AnthropicProvider.ts index 3f2929bdd0..2bfd4b6fb5 100644 --- a/src/renderer/src/providers/AiProvider/AnthropicProvider.ts +++ b/src/renderer/src/providers/AiProvider/AnthropicProvider.ts @@ -290,7 +290,6 @@ export default class AnthropicProvider extends BaseProvider { const processStream = async (body: MessageCreateParamsNonStreaming, idx: number) => { let time_first_token_millsec = 0 - const start_time_millsec = new Date().getTime() if (!streamOutput) { const message = await this.sdk.messages.create({ ...body, stream: false }) @@ -484,6 +483,7 @@ export default class AnthropicProvider extends BaseProvider { }) } onChunk({ type: ChunkType.LLM_RESPONSE_CREATED }) + const start_time_millsec = new Date().getTime() await processStream(body, 0).finally(cleanup) } diff --git a/src/renderer/src/providers/AiProvider/GeminiProvider.ts b/src/renderer/src/providers/AiProvider/GeminiProvider.ts index 0a49ebe573..32c964fda8 100644 --- a/src/renderer/src/providers/AiProvider/GeminiProvider.ts +++ b/src/renderer/src/providers/AiProvider/GeminiProvider.ts @@ -500,7 +500,6 @@ export default class GeminiProvider extends BaseProvider { let functionCalls: FunctionCall[] = [] let time_first_token_millsec = 0 - const start_time_millsec = new Date().getTime() if (stream instanceof GenerateContentResponse) { let content = '' @@ -647,6 +646,7 @@ export default class GeminiProvider extends BaseProvider { } onChunk({ type: ChunkType.LLM_RESPONSE_CREATED }) + const start_time_millsec = new Date().getTime() const userMessagesStream = await chat.sendMessageStream({ message: messageContents as PartUnion, config: { diff --git a/src/renderer/src/providers/AiProvider/OpenAICompatibleProvider.ts b/src/renderer/src/providers/AiProvider/OpenAICompatibleProvider.ts index 54df45ed98..c793f5ae2f 100644 --- a/src/renderer/src/providers/AiProvider/OpenAICompatibleProvider.ts +++ b/src/renderer/src/providers/AiProvider/OpenAICompatibleProvider.ts @@ -518,7 +518,6 @@ export default class OpenAICompatibleProvider extends BaseOpenAiProvider { const processStream = async (stream: any, idx: number) => { const toolCalls: ChatCompletionMessageToolCall[] = [] let time_first_token_millsec = 0 - const start_time_millsec = new Date().getTime() // Handle non-streaming case (already returns early, no change needed here) if (!isSupportStreamOutput()) { @@ -831,6 +830,7 @@ export default class OpenAICompatibleProvider extends BaseOpenAiProvider { reqMessages = processReqMessages(model, reqMessages) // 等待接口返回流 onChunk({ type: ChunkType.LLM_RESPONSE_CREATED }) + const start_time_millsec = new Date().getTime() const stream = await this.sdk.chat.completions // @ts-ignore key is not typed .create( diff --git a/src/renderer/src/providers/AiProvider/OpenAIProvider.ts b/src/renderer/src/providers/AiProvider/OpenAIProvider.ts index 154b1a7357..01b06d5d35 100644 --- a/src/renderer/src/providers/AiProvider/OpenAIProvider.ts +++ b/src/renderer/src/providers/AiProvider/OpenAIProvider.ts @@ -567,7 +567,6 @@ export abstract class BaseOpenAiProvider extends BaseProvider { ) => { const toolCalls: OpenAI.Responses.ResponseFunctionToolCall[] = [] let time_first_token_millsec = 0 - const start_time_millsec = new Date().getTime() if (!streamOutput) { const nonStream = stream as OpenAI.Responses.Response @@ -785,6 +784,7 @@ export abstract class BaseOpenAiProvider extends BaseProvider { } onChunk({ type: ChunkType.LLM_RESPONSE_CREATED }) + const start_time_millsec = new Date().getTime() const stream = await this.sdk.responses.create( { model: model.id,