refactor(aiCore): update ModernAiProvider constructor and clean up unused code

- Modified the constructor of ModernAiProvider to accept an optional provider parameter, enhancing flexibility in provider selection.
- Removed deprecated and unused functions related to search keyword extraction and search summary fetching, streamlining the codebase.
- Updated import statements and adjusted related logic to reflect the removal of obsolete functions, improving maintainability.
This commit is contained in:
MyPrototypeWhat 2025-08-14 18:06:11 +08:00
parent 3630133efd
commit 376020b23c
3 changed files with 163 additions and 568 deletions

View File

@ -30,8 +30,8 @@ export default class ModernAiProvider {
private config: ReturnType<typeof providerToAiSdkConfig>
private actualProvider: Provider
constructor(model: Model) {
this.actualProvider = getActualProvider(model)
constructor(model: Model, provider?: Provider) {
this.actualProvider = provider || getActualProvider(model)
this.legacyProvider = new LegacyAiProvider(this.actualProvider)
// 只保存配置不预先创建executor

View File

@ -26,18 +26,11 @@ import {
isVisionModel,
isWebSearchModel
} from '@renderer/config/models'
import {
SEARCH_SUMMARY_PROMPT,
SEARCH_SUMMARY_PROMPT_KNOWLEDGE_ONLY,
SEARCH_SUMMARY_PROMPT_WEB_ONLY
} from '@renderer/config/prompts'
import { getAssistantSettings, getDefaultModel, getProviderByModel } from '@renderer/services/AssistantService'
import { getDefaultAssistant } from '@renderer/services/AssistantService'
import { getAssistantSettings, getDefaultModel } from '@renderer/services/AssistantService'
import type { Assistant, MCPTool, Message, Model, Provider } from '@renderer/types'
import { FileTypes } from '@renderer/types'
import { FileMessageBlock, ImageMessageBlock, ThinkingMessageBlock } from '@renderer/types/newMessage'
// import { getWebSearchTools } from './utils/websearch'
import { extractInfoFromXML, ExtractResults } from '@renderer/utils/extract'
import {
findFileBlocks,
findImageBlocks,
@ -45,10 +38,7 @@ import {
getMainTextContent
} from '@renderer/utils/messageUtils/find'
import { defaultTimeout } from '@shared/config/constant'
import { isEmpty } from 'lodash'
import AiProvider from './legacy/index'
import { CompletionsParams } from './legacy/middleware/schemas'
// import { webSearchTool } from './tools/WebSearchTool'
// import { jsonSchemaToZod } from 'json-schema-to-zod'
import { setupToolsConfig } from './utils/mcp'
@ -328,99 +318,101 @@ export async function buildGenerateTextParams(
/**
*
*
* @deprecated
*/
export async function extractSearchKeywords(
lastUserMessage: Message,
assistant: Assistant,
options: {
shouldWebSearch?: boolean
shouldKnowledgeSearch?: boolean
lastAnswer?: Message
} = {}
): Promise<ExtractResults | undefined> {
const { shouldWebSearch = false, shouldKnowledgeSearch = false, lastAnswer } = options
// export async function extractSearchKeywords(
// lastUserMessage: Message,
// assistant: Assistant,
// options: {
// shouldWebSearch?: boolean
// shouldKnowledgeSearch?: boolean
// lastAnswer?: Message
// } = {}
// ): Promise<ExtractResults | undefined> {
// const { shouldWebSearch = false, shouldKnowledgeSearch = false, lastAnswer } = options
if (!lastUserMessage) return undefined
// if (!lastUserMessage) return undefined
// 根据配置决定是否需要提取
const needWebExtract = shouldWebSearch
const needKnowledgeExtract = shouldKnowledgeSearch
// // 根据配置决定是否需要提取
// const needWebExtract = shouldWebSearch
// const needKnowledgeExtract = shouldKnowledgeSearch
if (!needWebExtract && !needKnowledgeExtract) return undefined
// if (!needWebExtract && !needKnowledgeExtract) return undefined
// 选择合适的提示词
let prompt: string
if (needWebExtract && !needKnowledgeExtract) {
prompt = SEARCH_SUMMARY_PROMPT_WEB_ONLY
} else if (!needWebExtract && needKnowledgeExtract) {
prompt = SEARCH_SUMMARY_PROMPT_KNOWLEDGE_ONLY
} else {
prompt = SEARCH_SUMMARY_PROMPT
}
// // 选择合适的提示词
// let prompt: string
// if (needWebExtract && !needKnowledgeExtract) {
// prompt = SEARCH_SUMMARY_PROMPT_WEB_ONLY
// } else if (!needWebExtract && needKnowledgeExtract) {
// prompt = SEARCH_SUMMARY_PROMPT_KNOWLEDGE_ONLY
// } else {
// prompt = SEARCH_SUMMARY_PROMPT
// }
// 构建用于提取的助手配置
const summaryAssistant = getDefaultAssistant()
summaryAssistant.model = assistant.model || getDefaultModel()
summaryAssistant.prompt = prompt
// // 构建用于提取的助手配置
// const summaryAssistant = getDefaultAssistant()
// summaryAssistant.model = assistant.model || getDefaultModel()
// summaryAssistant.prompt = prompt
try {
const result = await fetchSearchSummary({
messages: lastAnswer ? [lastAnswer, lastUserMessage] : [lastUserMessage],
assistant: summaryAssistant
})
// try {
// const result = await fetchSearchSummary({
// messages: lastAnswer ? [lastAnswer, lastUserMessage] : [lastUserMessage],
// assistant: summaryAssistant
// })
if (!result) return getFallbackResult()
// if (!result) return getFallbackResult()
const extracted = extractInfoFromXML(result.getText())
// 根据需求过滤结果
return {
websearch: needWebExtract ? extracted?.websearch : undefined,
knowledge: needKnowledgeExtract ? extracted?.knowledge : undefined
}
} catch (e: any) {
console.error('extract error', e)
return getFallbackResult()
}
// const extracted = extractInfoFromXML(result.getText())
// // 根据需求过滤结果
// return {
// websearch: needWebExtract ? extracted?.websearch : undefined,
// knowledge: needKnowledgeExtract ? extracted?.knowledge : undefined
// }
// } catch (e: any) {
// console.error('extract error', e)
// return getFallbackResult()
// }
function getFallbackResult(): ExtractResults {
const fallbackContent = getMainTextContent(lastUserMessage)
return {
websearch: shouldWebSearch ? { question: [fallbackContent || 'search'] } : undefined,
knowledge: shouldKnowledgeSearch
? {
question: [fallbackContent || 'search'],
rewrite: fallbackContent || 'search'
}
: undefined
}
}
}
// function getFallbackResult(): ExtractResults {
// const fallbackContent = getMainTextContent(lastUserMessage)
// return {
// websearch: shouldWebSearch ? { question: [fallbackContent || 'search'] } : undefined,
// knowledge: shouldKnowledgeSearch
// ? {
// question: [fallbackContent || 'search'],
// rewrite: fallbackContent || 'search'
// }
// : undefined
// }
// }
// }
/**
* -
* @deprecated
*/
async function fetchSearchSummary({ messages, assistant }: { messages: Message[]; assistant: Assistant }) {
const model = assistant.model || getDefaultModel()
const provider = getProviderByModel(model)
// async function fetchSearchSummary({ messages, assistant }: { messages: Message[]; assistant: Assistant }) {
// const model = assistant.model || getDefaultModel()
// const provider = getProviderByModel(model)
if (!hasApiKey(provider)) {
return null
}
// if (!hasApiKey(provider)) {
// return null
// }
const AI = new AiProvider(provider)
// const AI = new AiProvider(provider)
const params: CompletionsParams = {
callType: 'search',
messages: messages,
assistant,
streamOutput: false
}
// const params: CompletionsParams = {
// callType: 'search',
// messages: messages,
// assistant,
// streamOutput: false
// }
return await AI.completions(params)
}
// return await AI.completions(params)
// }
function hasApiKey(provider: Provider) {
if (!provider) return false
if (provider.id === 'ollama' || provider.id === 'lmstudio' || provider.type === 'vertexai') return true
return !isEmpty(provider.apiKey)
}
// function hasApiKey(provider: Provider) {
// if (!provider) return false
// if (provider.id === 'ollama' || provider.id === 'lmstudio' || provider.type === 'vertexai') return true
// return !isEmpty(provider.apiKey)
// }

View File

@ -48,303 +48,6 @@ import {
const logger = loggerService.withContext('ApiService')
// TODO考虑拆开
// async function fetchExternalTool(
// lastUserMessage: Message,
// assistant: Assistant,
// onChunkReceived: (chunk: Chunk) => void,
// lastAnswer?: Message
// ): Promise<ExternalToolResult> {
// // 可能会有重复?
// const knowledgeBaseIds = assistant.knowledge_bases?.map((base) => base.id)
// const hasKnowledgeBase = !isEmpty(knowledgeBaseIds)
// const knowledgeRecognition = assistant.knowledgeRecognition || 'on'
// const webSearchProvider = WebSearchService.getWebSearchProvider(assistant.webSearchProviderId)
// // 使用外部搜索工具
// const shouldWebSearch = !!assistant.webSearchProviderId && webSearchProvider !== null
// const shouldKnowledgeSearch = hasKnowledgeBase
// const globalMemoryEnabled = selectGlobalMemoryEnabled(store.getState())
// const shouldSearchMemory = globalMemoryEnabled && assistant.enableMemory
// // 在工具链开始时发送进度通知
// const willUseTools = shouldWebSearch || shouldKnowledgeSearch
// if (willUseTools) {
// onChunkReceived({ type: ChunkType.EXTERNEL_TOOL_IN_PROGRESS })
// }
// // --- Keyword/Question Extraction Function ---
// const extract = async (): Promise<ExtractResults | undefined> => {
// if (!lastUserMessage) return undefined
// // 根据配置决定是否需要提取
// const needWebExtract = shouldWebSearch
// const needKnowledgeExtract = hasKnowledgeBase && knowledgeRecognition === 'on'
// if (!needWebExtract && !needKnowledgeExtract) return undefined
// let prompt: string
// if (needWebExtract && !needKnowledgeExtract) {
// prompt = SEARCH_SUMMARY_PROMPT_WEB_ONLY
// } else if (!needWebExtract && needKnowledgeExtract) {
// prompt = SEARCH_SUMMARY_PROMPT_KNOWLEDGE_ONLY
// } else {
// prompt = SEARCH_SUMMARY_PROMPT
// }
// const summaryAssistant = getDefaultAssistant()
// summaryAssistant.model = assistant.model || getDefaultModel()
// summaryAssistant.prompt = prompt
// try {
// const result = await fetchSearchSummary({
// messages: lastAnswer ? [lastAnswer, lastUserMessage] : [lastUserMessage],
// assistant: summaryAssistant
// })
// if (!result) return getFallbackResult()
// const extracted = extractInfoFromXML(result.getText())
// // 根据需求过滤结果
// return {
// websearch: needWebExtract ? extracted?.websearch : undefined,
// knowledge: needKnowledgeExtract ? extracted?.knowledge : undefined
// }
// } catch (e: any) {
// console.error('extract error', e)
// if (isAbortError(e)) throw e
// return getFallbackResult()
// }
// }
// const getFallbackResult = (): ExtractResults => {
// const fallbackContent = getMainTextContent(lastUserMessage)
// return {
// websearch: shouldWebSearch ? { question: [fallbackContent || 'search'] } : undefined,
// knowledge: shouldKnowledgeSearch
// ? {
// question: [fallbackContent || 'search'],
// rewrite: fallbackContent
// }
// : undefined
// }
// }
// // --- Web Search Function ---
// const searchTheWeb = async (extractResults: ExtractResults | undefined): Promise<WebSearchResponse | undefined> => {
// if (!shouldWebSearch) return
// // Add check for extractResults existence early
// if (!extractResults?.websearch) {
// console.warn('searchTheWeb called without valid extractResults.websearch')
// return
// }
// if (extractResults.websearch.question[0] === 'not_needed') return
// // Add check for assistant.model before using it
// if (!assistant.model) {
// console.warn('searchTheWeb called without assistant.model')
// return undefined
// }
// try {
// // Use the consolidated processWebsearch function
// WebSearchService.createAbortSignal(lastUserMessage.id)
// const webSearchResponse = await WebSearchService.processWebsearch(
// webSearchProvider!,
// extractResults,
// lastUserMessage.id
// )
// return {
// results: webSearchResponse,
// source: WebSearchSource.WEBSEARCH
// }
// } catch (error) {
// if (isAbortError(error)) throw error
// console.error('Web search failed:', error)
// return
// }
// }
// const searchMemory = async (): Promise<MemoryItem[] | undefined> => {
// if (!shouldSearchMemory) return []
// try {
// const memoryConfig = selectMemoryConfig(store.getState())
// const content = getMainTextContent(lastUserMessage)
// if (!content) {
// console.warn('searchMemory called without valid content in lastUserMessage')
// return []
// }
// if (memoryConfig.llmApiClient && memoryConfig.embedderApiClient) {
// const currentUserId = selectCurrentUserId(store.getState())
// // Search for relevant memories
// const processorConfig = MemoryProcessor.getProcessorConfig(memoryConfig, assistant.id, currentUserId)
// console.log('Searching for relevant memories with content:', content)
// const memoryProcessor = new MemoryProcessor()
// const relevantMemories = await memoryProcessor.searchRelevantMemories(
// content,
// processorConfig,
// 5 // Limit to top 5 most relevant memories
// )
// if (relevantMemories?.length > 0) {
// console.log('Found relevant memories:', relevantMemories)
// return relevantMemories
// }
// return []
// } else {
// console.warn('Memory is enabled but embedding or LLM model is not configured')
// return []
// }
// } catch (error) {
// console.error('Error processing memory search:', error)
// // Continue with conversation even if memory processing fails
// return []
// }
// }
// // --- Knowledge Base Search Function ---
// const searchKnowledgeBase = async (
// extractResults: ExtractResults | undefined
// ): Promise<KnowledgeReference[] | undefined> => {
// if (!hasKnowledgeBase) return
// // 知识库搜索条件
// let searchCriteria: { question: string[]; rewrite: string }
// if (knowledgeRecognition === 'off') {
// const directContent = getMainTextContent(lastUserMessage)
// searchCriteria = { question: [directContent || 'search'], rewrite: directContent }
// } else {
// // auto mode
// if (!extractResults?.knowledge) {
// console.warn('searchKnowledgeBase: No valid search criteria in auto mode')
// return
// }
// searchCriteria = extractResults.knowledge
// }
// if (searchCriteria.question[0] === 'not_needed') return
// try {
// const tempExtractResults: ExtractResults = {
// websearch: undefined,
// knowledge: searchCriteria
// }
// // Attempt to get knowledgeBaseIds from the main text block
// // NOTE: This assumes knowledgeBaseIds are ONLY on the main text block
// // NOTE: processKnowledgeSearch needs to handle undefined ids gracefully
// // const mainTextBlock = mainTextBlocks
// // ?.map((blockId) => store.getState().messageBlocks.entities[blockId])
// // .find((block) => block?.type === MessageBlockType.MAIN_TEXT) as MainTextMessageBlock | undefined
// return await processKnowledgeSearch(tempExtractResults, knowledgeBaseIds)
// } catch (error) {
// console.error('Knowledge base search failed:', error)
// return
// }
// }
// // --- Execute Extraction and Searches ---
// let extractResults: ExtractResults | undefined
// try {
// // 根据配置决定是否需要提取
// if (shouldWebSearch || hasKnowledgeBase) {
// extractResults = await extract()
// Logger.log('[fetchExternalTool] Extraction results:', extractResults)
// }
// let webSearchResponseFromSearch: WebSearchResponse | undefined
// let knowledgeReferencesFromSearch: KnowledgeReference[] | undefined
// let memorySearchReferences: MemoryItem[] | undefined
// // 并行执行搜索
// if (shouldWebSearch || shouldKnowledgeSearch || shouldSearchMemory) {
// ;[webSearchResponseFromSearch, knowledgeReferencesFromSearch, memorySearchReferences] = await Promise.all([
// searchTheWeb(extractResults),
// searchKnowledgeBase(extractResults),
// searchMemory()
// ])
// }
// // 存储搜索结果
// if (lastUserMessage) {
// if (webSearchResponseFromSearch) {
// window.keyv.set(`web-search-${lastUserMessage.id}`, webSearchResponseFromSearch)
// }
// if (knowledgeReferencesFromSearch) {
// window.keyv.set(`knowledge-search-${lastUserMessage.id}`, knowledgeReferencesFromSearch)
// }
// if (memorySearchReferences) {
// window.keyv.set(`memory-search-${lastUserMessage.id}`, memorySearchReferences)
// }
// }
// // 发送工具执行完成通知
// const wasAnyToolEnabled = shouldWebSearch || shouldKnowledgeSearch || shouldSearchMemory
// if (wasAnyToolEnabled) {
// onChunkReceived({
// type: ChunkType.EXTERNEL_TOOL_COMPLETE,
// external_tool: {
// webSearch: webSearchResponseFromSearch,
// knowledge: knowledgeReferencesFromSearch,
// memories: memorySearchReferences
// }
// })
// }
// // Get MCP tools (Fix duplicate declaration)
// let mcpTools: MCPTool[] = [] // Initialize as empty array
// const allMcpServers = store.getState().mcp.servers || []
// const activedMcpServers = allMcpServers.filter((s) => s.isActive)
// const assistantMcpServers = assistant.mcpServers || []
// const enabledMCPs = activedMcpServers.filter((server) => assistantMcpServers.some((s) => s.id === server.id))
// if (enabledMCPs && enabledMCPs.length > 0) {
// try {
// const toolPromises = enabledMCPs.map<Promise<MCPTool[]>>(async (mcpServer) => {
// try {
// const tools = await window.api.mcp.listTools(mcpServer)
// return tools.filter((tool: any) => !mcpServer.disabledTools?.includes(tool.name))
// } catch (error) {
// console.error(`Error fetching tools from MCP server ${mcpServer.name}:`, error)
// return []
// }
// })
// const results = await Promise.allSettled(toolPromises)
// mcpTools = results
// .filter((result): result is PromiseFulfilledResult<MCPTool[]> => result.status === 'fulfilled')
// .map((result) => result.value)
// .flat()
// } catch (toolError) {
// console.error('Error fetching MCP tools:', toolError)
// }
// }
// return { mcpTools }
// } catch (error) {
// if (isAbortError(error)) throw error
// console.error('Tool execution failed:', error)
// // 发送错误状态
// const wasAnyToolEnabled = shouldWebSearch || shouldKnowledgeSearch || shouldSearchMemory
// if (wasAnyToolEnabled) {
// onChunkReceived({
// type: ChunkType.EXTERNEL_TOOL_COMPLETE,
// external_tool: {
// webSearch: undefined,
// knowledge: undefined
// }
// })
// }
// return { mcpTools: [] }
// }
// }
export async function fetchMcpTools(assistant: Assistant) {
// Get MCP tools (Fix duplicate declaration)
let mcpTools: MCPTool[] = [] // Initialize as empty array
@ -506,124 +209,8 @@ export async function fetchChatCompletion({
callType: 'chat'
})
}
// await AI.completions(
// {
// callType: 'chat',
// messages: _messages,
// assistant,
// onChunk: onChunkReceived,
// mcpTools: mcpTools,
// maxTokens,
// streamOutput: assistant.settings?.streamOutput || false,
// enableReasoning,
// enableWebSearch,
// enableGenerateImage
// },
// {
// streamOutput: assistant.settings?.streamOutput || false
// }
// )
// Post-conversation memory processing
// TODO: 需要转换messages
// const globalMemoryEnabled = selectGlobalMemoryEnabled(store.getState())
// if (globalMemoryEnabled && assistant.enableMemory) {
// await processConversationMemory(messages, assistant)
// }
}
/**
* Process conversation for memory extraction and storage
*/
// async function processConversationMemory(messages: Message[], assistant: Assistant) {
// try {
// const memoryConfig = selectMemoryConfig(store.getState())
// // Use assistant's model as fallback for memory processing if not configured
// const llmModel =
// getModel(memoryConfig.llmApiClient?.model, memoryConfig.llmApiClient?.provider) ||
// assistant.model ||
// getDefaultModel()
// const embedderModel =
// getModel(memoryConfig.embedderApiClient?.model, memoryConfig.embedderApiClient?.provider) ||
// getFirstEmbeddingModel()
// if (!embedderModel) {
// console.warn(
// 'Memory processing skipped: no embedding model available. Please configure an embedding model in memory settings.'
// )
// return
// }
// if (!llmModel) {
// console.warn('Memory processing skipped: LLM model not available')
// return
// }
// // Convert messages to the format expected by memory processor
// const conversationMessages = messages
// .filter((msg) => msg.role === 'user' || msg.role === 'assistant')
// .map((msg) => ({
// role: msg.role as 'user' | 'assistant',
// content: getMainTextContent(msg) || ''
// }))
// .filter((msg) => msg.content.trim().length > 0)
// // if (conversationMessages.length < 2) {
// // Need at least a user message and assistant response
// // return
// // }
// const currentUserId = selectCurrentUserId(store.getState())
// // Create updated memory config with resolved models
// const updatedMemoryConfig = {
// ...memoryConfig,
// llmApiClient: {
// model: llmModel.id,
// provider: llmModel.provider,
// apiKey: getProviderByModel(llmModel).apiKey,
// baseURL: new AiProvider(getProviderByModel(llmModel)).getBaseURL(),
// apiVersion: getProviderByModel(llmModel).apiVersion
// },
// embedderApiClient: {
// model: embedderModel.id,
// provider: embedderModel.provider,
// apiKey: getProviderByModel(embedderModel).apiKey,
// baseURL: new AiProvider(getProviderByModel(embedderModel)).getBaseURL(),
// apiVersion: getProviderByModel(embedderModel).apiVersion
// }
// }
// const lastUserMessage = findLast(messages, (m) => m.role === 'user')
// const processorConfig = MemoryProcessor.getProcessorConfig(
// updatedMemoryConfig,
// assistant.id,
// currentUserId,
// lastUserMessage?.id
// )
// // Process the conversation in the background (don't await to avoid blocking UI)
// const memoryProcessor = new MemoryProcessor()
// memoryProcessor
// .processConversation(conversationMessages, processorConfig)
// .then((result) => {
// console.log('Memory processing completed:', result)
// if (result.facts.length > 0) {
// console.log('Extracted facts from conversation:', result.facts)
// console.log('Memory operations performed:', result.operations)
// } else {
// console.log('No facts extracted from conversation')
// }
// })
// .catch((error) => {
// console.error('Background memory processing failed:', error)
// })
// } catch (error) {
// console.error('Error in post-conversation memory processing:', error)
// }
// }
interface FetchTranslateProps {
content: string
assistant: TranslateAssistant
@ -656,19 +243,42 @@ export async function fetchTranslate({ content, assistant, onResponse }: FetchTr
assistant.settings?.reasoning_effort !== undefined) ||
(isReasoningModel(model) && (!isSupportedThinkingTokenModel(model) || !isSupportedReasoningEffortModel(model)))
const params: CompletionsParams = {
callType: 'translate',
messages: content,
assistant: { ...assistant, model },
streamOutput: stream,
enableReasoning,
onResponse
const params: StreamTextParams = {
system: assistant.prompt,
prompt: content
}
const AI = new AiProvider(provider)
const AI = new AiProviderNew(model)
const middlewareConfig: AiSdkMiddlewareConfig = {
streamOutput: stream,
enableReasoning,
isPromptToolUse: false,
isSupportedToolUse: false,
isImageGenerationEndpoint: false,
enableWebSearch: false,
enableGenerateImage: false,
onChunk: onResponse
? (chunk) => {
if (chunk.type === ChunkType.TEXT_DELTA) {
onResponse(chunk.text, false)
} else if (chunk.type === ChunkType.TEXT_COMPLETE) {
onResponse(chunk.text, true)
}
}
: undefined
}
try {
return (await AI.completions(params)).getText() || ''
return (
(
await AI.completions(model.id, params, {
...middlewareConfig,
assistant,
callType: 'translate'
})
).getText() || ''
)
} catch (error: any) {
return ''
}
@ -735,25 +345,12 @@ export async function fetchMessagesSummary({ messages, assistant }: { messages:
prompt,
model
}
// const params: CompletionsParams = {
// callType: 'summary',
// messages: conversation,
// assistant: { ...summaryAssistant, prompt, model },
// maxTokens: 1000,
// streamOutput: false,
// topicId,
// enableReasoning: false
// }
const llmMessages = {
system: prompt,
prompt: conversation
}
// const llmMessages = await ConversationService.prepareMessagesForModel(messages, summaryAssistant)
// 使用 transformParameters 模块构建参数
// const { params: aiSdkParams, modelId } = await buildStreamTextParams(llmMessages, summaryAssistant, provider)
const middlewareConfig: AiSdkMiddlewareConfig = {
streamOutput: false,
enableReasoning: false,
@ -764,7 +361,6 @@ export async function fetchMessagesSummary({ messages, assistant }: { messages:
enableGenerateImage: false,
mcpTools: []
}
console.log('fetchMessagesSummary', '开始总结')
try {
const { getText } = await AI.completionsForTrace(model.id, llmMessages, {
...middlewareConfig,
@ -772,7 +368,6 @@ export async function fetchMessagesSummary({ messages, assistant }: { messages:
topicId,
callType: 'summary'
})
console.log('fetchMessagesSummary', '总结完成', getText())
const text = getText()
return removeSpecialCharactersForTopicName(text) || null
} catch (error: any) {
@ -780,29 +375,6 @@ export async function fetchMessagesSummary({ messages, assistant }: { messages:
}
}
export async function fetchSearchSummary({ messages, assistant }: { messages: Message[]; assistant: Assistant }) {
const model = assistant.model || getDefaultModel()
const provider = getProviderByModel(model)
if (!hasApiKey(provider)) {
return null
}
const topicId = messages?.find((message) => message.topicId)?.topicId || undefined
const AI = new AiProvider(provider)
const params: CompletionsParams = {
callType: 'search',
messages: messages,
assistant,
streamOutput: false,
topicId
}
return await AI.completionsForTrace(params)
}
export async function fetchGenerate({
prompt,
content,
@ -821,21 +393,41 @@ export async function fetchGenerate({
return ''
}
const AI = new AiProvider(provider)
const AI = new AiProviderNew(model)
const assistant = getDefaultAssistant()
assistant.model = model
assistant.prompt = prompt
const params: CompletionsParams = {
callType: 'generate',
messages: content,
assistant,
streamOutput: false
// const params: CompletionsParams = {
// callType: 'generate',
// messages: content,
// assistant,
// streamOutput: false
// }
const middlewareConfig: AiSdkMiddlewareConfig = {
streamOutput: assistant.settings?.streamOutput ?? true,
enableReasoning: false,
isPromptToolUse: false,
isSupportedToolUse: false,
isImageGenerationEndpoint: false,
enableWebSearch: false,
enableGenerateImage: false
}
try {
const result = await AI.completions(params)
const result = await AI.completions(
model.id,
{
prompt: content
},
{
...middlewareConfig,
assistant,
callType: 'generate'
}
)
return result.getText() || ''
} catch (error: any) {
return ''
@ -865,7 +457,7 @@ function hasApiKey(provider: Provider) {
// }
export async function fetchModels(provider: Provider): Promise<SdkModel[]> {
const AI = new AiProvider(provider)
const AI = new AiProviderNew({} as Model, provider)
try {
return await AI.models()
@ -904,7 +496,7 @@ export function checkApiProvider(provider: Provider): void {
export async function checkApi(provider: Provider, model: Model): Promise<void> {
checkApiProvider(provider)
const ai = new AiProvider(provider)
const ai = new AiProviderNew(model)
const assistant = getDefaultAssistant()
assistant.model = model
@ -912,22 +504,33 @@ export async function checkApi(provider: Provider, model: Model): Promise<void>
if (isEmbeddingModel(model)) {
await ai.getEmbeddingDimensions(model)
} else {
const params: CompletionsParams = {
callType: 'check',
messages: 'hi',
assistant,
streamOutput: true,
const params: StreamTextParams = {
system: assistant.prompt,
prompt: 'hi'
}
const middlewareConfig: AiSdkMiddlewareConfig = {
streamOutput: false,
enableReasoning: false,
shouldThrow: true
isSupportedToolUse: false,
isImageGenerationEndpoint: false,
enableWebSearch: false,
enableGenerateImage: false,
isPromptToolUse: false
}
// Try streaming check first
const result = await ai.completions(params)
const result = await ai.completions(model.id, params, {
...middlewareConfig,
assistant,
callType: 'check'
})
if (!result.getText()) {
throw new Error('No response received')
}
}
} catch (error: any) {
// 失败回退legacy
const legacyAi = new AiProvider(provider)
if (error.message.includes('stream')) {
const params: CompletionsParams = {
callType: 'check',
@ -936,7 +539,7 @@ export async function checkApi(provider: Provider, model: Model): Promise<void>
streamOutput: false,
shouldThrow: true
}
const result = await ai.completions(params)
const result = await legacyAi.completions(params)
if (!result.getText()) {
throw new Error('No response received')
}