mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2026-01-01 17:59:09 +08:00
feat: enhance provider configuration and error handling for AI SDK integration
This commit is contained in:
parent
d367040fd4
commit
9d34098a53
26
packages/shared/provider/constant.ts
Normal file
26
packages/shared/provider/constant.ts
Normal file
@ -0,0 +1,26 @@
|
||||
import { getLowerBaseModelName } from '@shared/utils/naming'
|
||||
|
||||
import type { MinimalModel } from './types'
|
||||
|
||||
export const COPILOT_EDITOR_VERSION = 'vscode/1.104.1'
|
||||
export const COPILOT_PLUGIN_VERSION = 'copilot-chat/0.26.7'
|
||||
export const COPILOT_INTEGRATION_ID = 'vscode-chat'
|
||||
export const COPILOT_USER_AGENT = 'GitHubCopilotChat/0.26.7'
|
||||
|
||||
export const COPILOT_DEFAULT_HEADERS = {
|
||||
'Copilot-Integration-Id': COPILOT_INTEGRATION_ID,
|
||||
'User-Agent': COPILOT_USER_AGENT,
|
||||
'Editor-Version': COPILOT_EDITOR_VERSION,
|
||||
'Editor-Plugin-Version': COPILOT_PLUGIN_VERSION,
|
||||
'editor-version': COPILOT_EDITOR_VERSION,
|
||||
'editor-plugin-version': COPILOT_PLUGIN_VERSION,
|
||||
'copilot-vision-request': 'true'
|
||||
} as const
|
||||
|
||||
// Models that require the OpenAI Responses endpoint when routed through GitHub Copilot (#10560)
|
||||
const COPILOT_RESPONSES_MODEL_IDS = ['gpt-5-codex']
|
||||
|
||||
export function isCopilotResponsesModel<M extends MinimalModel>(model: M): boolean {
|
||||
const normalizedId = getLowerBaseModelName(model.id)
|
||||
return COPILOT_RESPONSES_MODEL_IDS.some((target) => normalizedId === target)
|
||||
}
|
||||
@ -127,7 +127,7 @@ export function providerToAiSdkConfig(
|
||||
if (provider.id === SystemProviderIds.copilot) {
|
||||
const defaultHeaders = context.getCopilotDefaultHeaders?.() ?? {}
|
||||
const storedHeaders = context.getCopilotStoredHeaders?.() ?? {}
|
||||
const options = ProviderConfigFactory.fromProvider('github-copilot-openai-compatible', baseConfig, {
|
||||
const copilotExtraOptions: Record<string, unknown> = {
|
||||
headers: {
|
||||
...defaultHeaders,
|
||||
...storedHeaders,
|
||||
@ -135,7 +135,15 @@ export function providerToAiSdkConfig(
|
||||
},
|
||||
name: provider.id,
|
||||
includeUsage: true
|
||||
})
|
||||
}
|
||||
if (context.fetch) {
|
||||
copilotExtraOptions.fetch = context.fetch
|
||||
}
|
||||
const options = ProviderConfigFactory.fromProvider(
|
||||
'github-copilot-openai-compatible',
|
||||
baseConfig,
|
||||
copilotExtraOptions
|
||||
)
|
||||
|
||||
return {
|
||||
providerId: 'github-copilot-openai-compatible',
|
||||
|
||||
@ -4,7 +4,7 @@ import { loggerService } from '@logger'
|
||||
import anthropicService from '@main/services/AnthropicService'
|
||||
import { buildClaudeCodeSystemMessage, getSdkClient } from '@shared/anthropic'
|
||||
import type { Provider } from '@types'
|
||||
import { APICallError } from 'ai'
|
||||
import { APICallError, RetryError } from 'ai'
|
||||
import { net } from 'electron'
|
||||
import type { Response } from 'express'
|
||||
|
||||
@ -267,6 +267,41 @@ export class MessagesService {
|
||||
500: 'internal_server_error'
|
||||
}
|
||||
|
||||
// Handle AI SDK RetryError - extract the last error for better error messages
|
||||
if (RetryError.isInstance(error)) {
|
||||
const lastError = error.lastError
|
||||
// If the last error is an APICallError, extract its details
|
||||
if (APICallError.isInstance(lastError)) {
|
||||
statusCode = lastError.statusCode || 502
|
||||
errorMessage = lastError.message
|
||||
return {
|
||||
statusCode,
|
||||
errorResponse: {
|
||||
type: 'error',
|
||||
error: {
|
||||
type: errorMap[statusCode] || 'api_error',
|
||||
message: `${error.reason}: ${errorMessage}`,
|
||||
requestId: lastError.name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Fallback for other retry errors
|
||||
errorMessage = error.message
|
||||
statusCode = 502
|
||||
return {
|
||||
statusCode,
|
||||
errorResponse: {
|
||||
type: 'error',
|
||||
error: {
|
||||
type: 'api_error',
|
||||
message: errorMessage,
|
||||
requestId: error.name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (APICallError.isInstance(error)) {
|
||||
statusCode = error.statusCode
|
||||
errorMessage = error.message
|
||||
|
||||
@ -9,6 +9,8 @@ import type {
|
||||
import { type AiPlugin, createExecutor } from '@cherrystudio/ai-core'
|
||||
import { createProvider as createProviderCore } from '@cherrystudio/ai-core/provider'
|
||||
import { loggerService } from '@logger'
|
||||
import anthropicService from '@main/services/AnthropicService'
|
||||
import copilotService from '@main/services/CopilotService'
|
||||
import { reduxService } from '@main/services/ReduxService'
|
||||
import { AiSdkToAnthropicSSE, formatSSEDone, formatSSEEvent } from '@shared/adapters'
|
||||
import { isGemini3ModelId } from '@shared/middleware'
|
||||
@ -21,6 +23,7 @@ import {
|
||||
providerToAiSdkConfig as sharedProviderToAiSdkConfig,
|
||||
resolveActualProvider
|
||||
} from '@shared/provider'
|
||||
import { COPILOT_DEFAULT_HEADERS } from '@shared/provider/constant'
|
||||
import { defaultAppHeaders } from '@shared/utils'
|
||||
import type { Provider } from '@types'
|
||||
import type { ImagePart, JSONValue, ModelMessage, Provider as AiSdkProvider, TextPart, Tool } from 'ai'
|
||||
@ -284,6 +287,68 @@ async function createAiSdkProvider(config: AiSdkConfig): Promise<AiSdkProvider>
|
||||
return provider
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare special provider configuration for providers that need dynamic tokens
|
||||
* Similar to renderer's prepareSpecialProviderConfig
|
||||
*/
|
||||
async function prepareSpecialProviderConfig(provider: Provider, config: AiSdkConfig): Promise<AiSdkConfig> {
|
||||
switch (provider.id) {
|
||||
case 'copilot': {
|
||||
const storedHeaders =
|
||||
((await reduxService.select('state.copilot.defaultHeaders')) as Record<string, string> | null) ?? {}
|
||||
const headers: Record<string, string> = {
|
||||
...COPILOT_DEFAULT_HEADERS,
|
||||
...storedHeaders
|
||||
}
|
||||
|
||||
try {
|
||||
const { token } = await copilotService.getToken(null as any, headers)
|
||||
config.options.apiKey = token
|
||||
const existingHeaders = (config.options.headers as Record<string, string> | undefined) ?? {}
|
||||
config.options.headers = {
|
||||
...headers,
|
||||
...existingHeaders
|
||||
}
|
||||
logger.debug('Copilot token retrieved successfully')
|
||||
} catch (error) {
|
||||
logger.error('Failed to get Copilot token', error as Error)
|
||||
throw new Error('Failed to get Copilot token. Please re-authorize Copilot.')
|
||||
}
|
||||
break
|
||||
}
|
||||
case 'anthropic': {
|
||||
if (provider.authType === 'oauth') {
|
||||
try {
|
||||
const oauthToken = await anthropicService.getValidAccessToken()
|
||||
if (!oauthToken) {
|
||||
throw new Error('Anthropic OAuth token not available. Please re-authorize.')
|
||||
}
|
||||
config.options = {
|
||||
...config.options,
|
||||
headers: {
|
||||
...(config.options.headers ? config.options.headers : {}),
|
||||
'Content-Type': 'application/json',
|
||||
'anthropic-version': '2023-06-01',
|
||||
'anthropic-beta': 'oauth-2025-04-20',
|
||||
Authorization: `Bearer ${oauthToken}`
|
||||
},
|
||||
baseURL: 'https://api.anthropic.com/v1',
|
||||
apiKey: ''
|
||||
}
|
||||
logger.debug('Anthropic OAuth token retrieved successfully')
|
||||
} catch (error) {
|
||||
logger.error('Failed to get Anthropic OAuth token', error as Error)
|
||||
throw new Error('Failed to get Anthropic OAuth token. Please re-authorize.')
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
// Note: cherryai requires request-level signing which is not easily supported here
|
||||
// It would need custom fetch implementation similar to renderer
|
||||
}
|
||||
return config
|
||||
}
|
||||
|
||||
/**
|
||||
* Core stream execution function - single source of truth for AI SDK calls
|
||||
*/
|
||||
@ -291,7 +356,10 @@ async function executeStream(config: ExecuteStreamConfig): Promise<AiSdkToAnthro
|
||||
const { provider, modelId, params, middlewares = [], plugins = [], onEvent } = config
|
||||
|
||||
// Convert provider config to AI SDK config
|
||||
const sdkConfig = providerToAiSdkConfig(provider, modelId)
|
||||
let sdkConfig = providerToAiSdkConfig(provider, modelId)
|
||||
|
||||
// Prepare special provider config (Copilot, Anthropic OAuth, etc.)
|
||||
sdkConfig = await prepareSpecialProviderConfig(provider, sdkConfig)
|
||||
|
||||
logger.debug('Created AI SDK config', {
|
||||
providerId: sdkConfig.providerId,
|
||||
|
||||
@ -1,25 +1 @@
|
||||
import type { Model } from '@renderer/types'
|
||||
|
||||
export const COPILOT_EDITOR_VERSION = 'vscode/1.104.1'
|
||||
export const COPILOT_PLUGIN_VERSION = 'copilot-chat/0.26.7'
|
||||
export const COPILOT_INTEGRATION_ID = 'vscode-chat'
|
||||
export const COPILOT_USER_AGENT = 'GitHubCopilotChat/0.26.7'
|
||||
|
||||
export const COPILOT_DEFAULT_HEADERS = {
|
||||
'Copilot-Integration-Id': COPILOT_INTEGRATION_ID,
|
||||
'User-Agent': COPILOT_USER_AGENT,
|
||||
'Editor-Version': COPILOT_EDITOR_VERSION,
|
||||
'Editor-Plugin-Version': COPILOT_PLUGIN_VERSION,
|
||||
'editor-version': COPILOT_EDITOR_VERSION,
|
||||
'editor-plugin-version': COPILOT_PLUGIN_VERSION,
|
||||
'copilot-vision-request': 'true'
|
||||
} as const
|
||||
|
||||
// Models that require the OpenAI Responses endpoint when routed through GitHub Copilot (#10560)
|
||||
const COPILOT_RESPONSES_MODEL_IDS = ['gpt-5-codex']
|
||||
|
||||
export function isCopilotResponsesModel(model: Model): boolean {
|
||||
const normalizedId = model.id?.trim().toLowerCase()
|
||||
const normalizedName = model.name?.trim().toLowerCase()
|
||||
return COPILOT_RESPONSES_MODEL_IDS.some((target) => normalizedId === target || normalizedName === target)
|
||||
}
|
||||
export { COPILOT_DEFAULT_HEADERS, isCopilotResponsesModel } from '@shared/provider/constant'
|
||||
|
||||
Loading…
Reference in New Issue
Block a user