refactor: remove deprecated max token settings from OpenAIProvider and OpenAIResponseProvider

This commit is contained in:
kangfenmao 2025-05-16 14:36:40 +08:00
parent 119125038d
commit 29b917772c
2 changed files with 1 additions and 9 deletions

View File

@ -1139,8 +1139,6 @@ export default class OpenAIProvider extends BaseOpenAIProvider {
const body = {
model: model.id,
messages: [{ role: 'user', content: 'hi' }],
max_completion_tokens: 1, // openAI
max_tokens: 1, // openAI deprecated 但大部分OpenAI兼容的提供商继续用这个头
enable_thinking: false, // qwen3
stream
}

View File

@ -907,24 +907,18 @@ export abstract class BaseOpenAIProvider extends BaseProvider {
const response = await this.sdk.responses.create({
model: model.id,
input: [{ role: 'user', content: 'hi' }],
max_output_tokens: 1,
stream: true
})
let hasContent = false
for await (const chunk of response) {
if (chunk.type === 'response.output_text.delta') {
hasContent = true
return { valid: true, error: null }
}
}
if (hasContent) {
return { valid: true, error: null }
}
throw new Error('Empty streaming response')
} else {
const response = await this.sdk.responses.create({
model: model.id,
input: [{ role: 'user', content: 'hi' }],
max_output_tokens: 1,
stream: false
})
if (!response.output_text) {