mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2026-01-09 14:59:27 +08:00
feat: support streaming for model health check (#5546)
This commit is contained in:
parent
28ec990100
commit
aaf396f83a
@ -531,25 +531,50 @@ export default class AnthropicProvider extends BaseProvider {
|
|||||||
/**
|
/**
|
||||||
* Check if the model is valid
|
* Check if the model is valid
|
||||||
* @param model - The model
|
* @param model - The model
|
||||||
|
* @param stream - Whether to use streaming interface
|
||||||
* @returns The validity of the model
|
* @returns The validity of the model
|
||||||
*/
|
*/
|
||||||
public async check(model: Model): Promise<{ valid: boolean; error: Error | null }> {
|
public async check(model: Model, stream: boolean = false): Promise<{ valid: boolean; error: Error | null }> {
|
||||||
if (!model) {
|
if (!model) {
|
||||||
return { valid: false, error: new Error('No model found') }
|
return { valid: false, error: new Error('No model found') }
|
||||||
}
|
}
|
||||||
|
|
||||||
const body = {
|
const body = {
|
||||||
model: model.id,
|
model: model.id,
|
||||||
messages: [{ role: 'user', content: 'hi' }],
|
messages: [{ role: 'user' as const, content: 'hi' }],
|
||||||
max_tokens: 100,
|
max_tokens: 100,
|
||||||
stream: false
|
stream
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const message = await this.sdk.messages.create(body as MessageCreateParamsNonStreaming)
|
if (!stream) {
|
||||||
return {
|
const message = await this.sdk.messages.create(body as MessageCreateParamsNonStreaming)
|
||||||
valid: message.content.length > 0,
|
return {
|
||||||
error: null
|
valid: message.content.length > 0,
|
||||||
|
error: null
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return await new Promise((resolve, reject) => {
|
||||||
|
let hasContent = false
|
||||||
|
this.sdk.messages
|
||||||
|
.stream(body)
|
||||||
|
.on('text', (text) => {
|
||||||
|
if (!hasContent && text) {
|
||||||
|
hasContent = true
|
||||||
|
resolve({ valid: true, error: null })
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.on('finalMessage', (message) => {
|
||||||
|
if (!hasContent && message.content && message.content.length > 0) {
|
||||||
|
hasContent = true
|
||||||
|
resolve({ valid: true, error: null })
|
||||||
|
}
|
||||||
|
if (!hasContent) {
|
||||||
|
reject(new Error('Empty streaming response'))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.on('error', (error) => reject(error))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
} catch (error: any) {
|
} catch (error: any) {
|
||||||
return {
|
return {
|
||||||
|
|||||||
@ -43,7 +43,7 @@ export default abstract class BaseProvider {
|
|||||||
abstract summaryForSearch(messages: Message[], assistant: Assistant): Promise<string | null>
|
abstract summaryForSearch(messages: Message[], assistant: Assistant): Promise<string | null>
|
||||||
abstract suggestions(messages: Message[], assistant: Assistant): Promise<Suggestion[]>
|
abstract suggestions(messages: Message[], assistant: Assistant): Promise<Suggestion[]>
|
||||||
abstract generateText({ prompt, content }: { prompt: string; content: string }): Promise<string>
|
abstract generateText({ prompt, content }: { prompt: string; content: string }): Promise<string>
|
||||||
abstract check(model: Model): Promise<{ valid: boolean; error: Error | null }>
|
abstract check(model: Model, stream: boolean): Promise<{ valid: boolean; error: Error | null }>
|
||||||
abstract models(): Promise<OpenAI.Models.Model[]>
|
abstract models(): Promise<OpenAI.Models.Model[]>
|
||||||
abstract generateImage(params: GenerateImageParams): Promise<string[]>
|
abstract generateImage(params: GenerateImageParams): Promise<string[]>
|
||||||
abstract generateImageByChat({ messages, assistant, onChunk, onFilterMessages }: CompletionsParams): Promise<void>
|
abstract generateImageByChat({ messages, assistant, onChunk, onFilterMessages }: CompletionsParams): Promise<void>
|
||||||
|
|||||||
@ -740,25 +740,47 @@ export default class GeminiProvider extends BaseProvider {
|
|||||||
/**
|
/**
|
||||||
* Check if the model is valid
|
* Check if the model is valid
|
||||||
* @param model - The model
|
* @param model - The model
|
||||||
|
* @param stream - Whether to use streaming interface
|
||||||
* @returns The validity of the model
|
* @returns The validity of the model
|
||||||
*/
|
*/
|
||||||
public async check(model: Model): Promise<{ valid: boolean; error: Error | null }> {
|
public async check(model: Model, stream: boolean = false): Promise<{ valid: boolean; error: Error | null }> {
|
||||||
if (!model) {
|
if (!model) {
|
||||||
return { valid: false, error: new Error('No model found') }
|
return { valid: false, error: new Error('No model found') }
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const result = await this.sdk.models.generateContent({
|
if (!stream) {
|
||||||
model: model.id,
|
const result = await this.sdk.models.generateContent({
|
||||||
contents: [{ role: 'user', parts: [{ text: 'hi' }] }],
|
model: model.id,
|
||||||
config: {
|
contents: [{ role: 'user', parts: [{ text: 'hi' }] }],
|
||||||
maxOutputTokens: 100
|
config: {
|
||||||
|
maxOutputTokens: 100
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if (isEmpty(result.text)) {
|
||||||
|
throw new Error('Empty response')
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
const response = await this.sdk.models.generateContentStream({
|
||||||
|
model: model.id,
|
||||||
|
contents: [{ role: 'user', parts: [{ text: 'hi' }] }],
|
||||||
|
config: {
|
||||||
|
maxOutputTokens: 100
|
||||||
|
}
|
||||||
|
})
|
||||||
|
// 等待整个流式响应结束
|
||||||
|
let hasContent = false
|
||||||
|
for await (const chunk of response) {
|
||||||
|
if (chunk.text && chunk.text.length > 0) {
|
||||||
|
hasContent = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!hasContent) {
|
||||||
|
throw new Error('Empty streaming response')
|
||||||
}
|
}
|
||||||
})
|
|
||||||
return {
|
|
||||||
valid: !isEmpty(result.text),
|
|
||||||
error: null
|
|
||||||
}
|
}
|
||||||
|
return { valid: true, error: null }
|
||||||
} catch (error: any) {
|
} catch (error: any) {
|
||||||
return {
|
return {
|
||||||
valid: false,
|
valid: false,
|
||||||
|
|||||||
@ -962,26 +962,41 @@ export default class OpenAIProvider extends BaseProvider {
|
|||||||
/**
|
/**
|
||||||
* Check if the model is valid
|
* Check if the model is valid
|
||||||
* @param model - The model
|
* @param model - The model
|
||||||
|
* @param stream - Whether to use streaming interface
|
||||||
* @returns The validity of the model
|
* @returns The validity of the model
|
||||||
*/
|
*/
|
||||||
public async check(model: Model): Promise<{ valid: boolean; error: Error | null }> {
|
public async check(model: Model, stream: boolean = false): Promise<{ valid: boolean; error: Error | null }> {
|
||||||
if (!model) {
|
if (!model) {
|
||||||
return { valid: false, error: new Error('No model found') }
|
return { valid: false, error: new Error('No model found') }
|
||||||
}
|
}
|
||||||
const body = {
|
const body = {
|
||||||
model: model.id,
|
model: model.id,
|
||||||
messages: [{ role: 'user', content: 'hi' }],
|
messages: [{ role: 'user', content: 'hi' }],
|
||||||
stream: false
|
stream
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
await this.checkIsCopilot()
|
await this.checkIsCopilot()
|
||||||
console.debug('[checkModel] body', model.id, body)
|
console.debug('[checkModel] body', model.id, body)
|
||||||
const response = await this.sdk.chat.completions.create(body as ChatCompletionCreateParamsNonStreaming)
|
if (!stream) {
|
||||||
|
const response = await this.sdk.chat.completions.create(body as ChatCompletionCreateParamsNonStreaming)
|
||||||
return {
|
if (!response?.choices[0].message) {
|
||||||
valid: Boolean(response?.choices[0].message),
|
throw new Error('Empty response')
|
||||||
error: null
|
}
|
||||||
|
return { valid: true, error: null }
|
||||||
|
} else {
|
||||||
|
const response: any = await this.sdk.chat.completions.create(body as any)
|
||||||
|
// 等待整个流式响应结束
|
||||||
|
let hasContent = false
|
||||||
|
for await (const chunk of response) {
|
||||||
|
if (chunk.choices?.[0]?.delta?.content) {
|
||||||
|
hasContent = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (hasContent) {
|
||||||
|
return { valid: true, error: null }
|
||||||
|
}
|
||||||
|
throw new Error('Empty streaming response')
|
||||||
}
|
}
|
||||||
} catch (error: any) {
|
} catch (error: any) {
|
||||||
return {
|
return {
|
||||||
|
|||||||
@ -59,8 +59,8 @@ export default class AiProvider {
|
|||||||
return this.sdk.generateText({ prompt, content })
|
return this.sdk.generateText({ prompt, content })
|
||||||
}
|
}
|
||||||
|
|
||||||
public async check(model: Model): Promise<{ valid: boolean; error: Error | null }> {
|
public async check(model: Model, stream: boolean = false): Promise<{ valid: boolean; error: Error | null }> {
|
||||||
return this.sdk.check(model)
|
return this.sdk.check(model, stream)
|
||||||
}
|
}
|
||||||
|
|
||||||
public async models(): Promise<OpenAI.Models.Model[]> {
|
public async models(): Promise<OpenAI.Models.Model[]> {
|
||||||
|
|||||||
@ -82,7 +82,14 @@ export async function checkModel(provider: Provider, model: Model) {
|
|||||||
return performModelCheck(
|
return performModelCheck(
|
||||||
provider,
|
provider,
|
||||||
model,
|
model,
|
||||||
(ai, model) => ai.check(model),
|
async (ai, model) => {
|
||||||
|
const result = await ai.check(model, false)
|
||||||
|
if (result.valid && !result.error) {
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
// Try streaming check
|
||||||
|
return ai.check(model, true)
|
||||||
|
},
|
||||||
({ valid, error }) => ({ valid, error: error || null })
|
({ valid, error }) => ({ valid, error: error || null })
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user