diff --git a/src/renderer/src/providers/AiProvider/OpenAIProvider.ts b/src/renderer/src/providers/AiProvider/OpenAIProvider.ts index a7bed185d5..545c92ed3e 100644 --- a/src/renderer/src/providers/AiProvider/OpenAIProvider.ts +++ b/src/renderer/src/providers/AiProvider/OpenAIProvider.ts @@ -1139,8 +1139,6 @@ export default class OpenAIProvider extends BaseOpenAIProvider { const body = { model: model.id, messages: [{ role: 'user', content: 'hi' }], - max_completion_tokens: 1, // openAI - max_tokens: 1, // openAI deprecated 但大部分OpenAI兼容的提供商继续用这个头 enable_thinking: false, // qwen3 stream } diff --git a/src/renderer/src/providers/AiProvider/OpenAIResponseProvider.ts b/src/renderer/src/providers/AiProvider/OpenAIResponseProvider.ts index 9e163c39c6..af66a835bc 100644 --- a/src/renderer/src/providers/AiProvider/OpenAIResponseProvider.ts +++ b/src/renderer/src/providers/AiProvider/OpenAIResponseProvider.ts @@ -907,24 +907,18 @@ export abstract class BaseOpenAIProvider extends BaseProvider { const response = await this.sdk.responses.create({ model: model.id, input: [{ role: 'user', content: 'hi' }], - max_output_tokens: 1, stream: true }) - let hasContent = false for await (const chunk of response) { if (chunk.type === 'response.output_text.delta') { - hasContent = true + return { valid: true, error: null } } } - if (hasContent) { - return { valid: true, error: null } - } throw new Error('Empty streaming response') } else { const response = await this.sdk.responses.create({ model: model.id, input: [{ role: 'user', content: 'hi' }], - max_output_tokens: 1, stream: false }) if (!response.output_text) {