diff --git a/electron.vite.config.ts b/electron.vite.config.ts index 761ecfbf15..da471c9fc9 100644 --- a/electron.vite.config.ts +++ b/electron.vite.config.ts @@ -27,7 +27,8 @@ export default defineConfig({ '@mcp-trace/trace-core': resolve('packages/mcp-trace/trace-core'), '@mcp-trace/trace-node': resolve('packages/mcp-trace/trace-node'), '@cherrystudio/ai-core/provider': resolve('packages/aiCore/src/core/providers'), - '@cherrystudio/ai-core': resolve('packages/aiCore/src') + '@cherrystudio/ai-core': resolve('packages/aiCore/src'), + '@cherrystudio/ai-sdk-provider': resolve('packages/ai-sdk-provider/src') } }, build: { diff --git a/src/main/apiServer/services/unified-messages.ts b/src/main/apiServer/services/unified-messages.ts index 5aadfbf534..ddb6d59b37 100644 --- a/src/main/apiServer/services/unified-messages.ts +++ b/src/main/apiServer/services/unified-messages.ts @@ -186,7 +186,7 @@ IANA media type. } return { type: 'content', - value: [] + value: values } } } @@ -313,17 +313,24 @@ function convertAnthropicToAiMessages(params: MessageCreateParams): ModelMessage } // Build the message based on role + // Only push user/assistant message if there's actual content (avoid empty messages) if (msg.role === 'user') { - messages.push({ - role: 'user', - content: [...textParts, ...imageParts] - }) + const userContent = [...textParts, ...imageParts] + if (userContent.length > 0) { + messages.push({ + role: 'user', + content: userContent + }) + } } else { // Assistant messages contain tool calls, not tool results - messages.push({ - role: 'assistant', - content: [...reasoningParts, ...textParts, ...toolCallParts] - }) + const assistantContent = [...reasoningParts, ...textParts, ...toolCallParts] + if (assistantContent.length > 0) { + messages.push({ + role: 'assistant', + content: assistantContent + }) + } } } } diff --git a/src/main/apiServer/utils/index.ts b/src/main/apiServer/utils/index.ts index fde1ff3475..17d3f9f088 100644 --- a/src/main/apiServer/utils/index.ts +++ b/src/main/apiServer/utils/index.ts @@ -28,10 +28,9 @@ export async function getAvailableProviders(): Promise { return [] } - // Support OpenAI and Anthropic type providers for API server - const supportedProviders = providers.filter( - (p: Provider) => p.enabled && (p.type === 'openai' || p.type === 'anthropic') - ) + // Support all provider types that AI SDK can handle + // The unified-messages service uses AI SDK which supports many providers + const supportedProviders = providers.filter((p: Provider) => p.enabled) // Cache the filtered results CacheService.set(PROVIDERS_CACHE_KEY, supportedProviders, PROVIDERS_CACHE_TTL) @@ -160,7 +159,7 @@ export async function validateModelId(model: string): Promise<{ valid: false, error: { type: 'provider_not_found', - message: `Provider '${providerId}' not found, not enabled, or not supported. Only OpenAI providers are currently supported.`, + message: `Provider '${providerId}' not found or not enabled.`, code: 'provider_not_found' } } @@ -262,14 +261,8 @@ export function validateProvider(provider: Provider): boolean { return false } - // Support OpenAI and Anthropic type providers - if (provider.type !== 'openai' && provider.type !== 'anthropic') { - logger.debug('Provider type not supported', { - providerId: provider.id, - providerType: provider.type - }) - return false - } + // AI SDK supports many provider types, no longer need to filter by type + // The unified-messages service handles all supported types return true } catch (error: any) {