diff --git a/OpenAIProvider.ts b/OpenAIProvider.ts new file mode 100644 index 0000000000..7cea3ddecc --- /dev/null +++ b/OpenAIProvider.ts @@ -0,0 +1,909 @@ +import { DEFAULT_MAX_TOKENS } from '@renderer/config/constant' +import { + getOpenAIWebSearchParams, + isGrokReasoningModel, + isHunyuanSearchModel, + isOpenAIoSeries, + isOpenAIWebSearch, + isReasoningModel, + isSupportedModel, + isVisionModel, + isZhipuModel +} from '@renderer/config/models' +import { getStoreSetting } from '@renderer/hooks/useSettings' +import i18n from '@renderer/i18n' +import { getAssistantSettings, getDefaultModel, getTopNamingModel } from '@renderer/services/AssistantService' +import { EVENT_NAMES } from '@renderer/services/EventService' +import { + filterContextMessages, + filterEmptyMessages, + filterUserRoleStartMessages +} from '@renderer/services/MessagesService' +import store from '@renderer/store' +import { + Assistant, + FileTypes, + GenerateImageParams, + MCPToolResponse, + Model, + Provider, + Suggestion +} from '@renderer/types' +import { Message } from '@renderer/types/newMessageTypes' +import { removeSpecialCharactersForTopicName } from '@renderer/utils' +import { addImageFileToContents } from '@renderer/utils/formats' +import { findFileBlocks, findImageBlocks, getMessageContent } from '@renderer/utils/messageUtils/find' +import { mcpToolCallResponseToOpenAIMessage, parseAndCallTools } from '@renderer/utils/mcp-tools' +import { buildSystemPrompt } from '@renderer/utils/prompt' +import { takeRight } from 'lodash' +import OpenAI, { AzureOpenAI } from 'openai' +import { + ChatCompletionContentPart, + ChatCompletionCreateParamsNonStreaming, + ChatCompletionMessageParam +} from 'openai/resources' + +import { CompletionsParams } from '.' +import BaseProvider from './BaseProvider' + +type ReasoningEffort = 'high' | 'medium' | 'low' + +export default class OpenAIProvider extends BaseProvider { + private sdk: OpenAI + + constructor(provider: Provider) { + super(provider) + + if (provider.id === 'azure-openai' || provider.type === 'azure-openai') { + this.sdk = new AzureOpenAI({ + dangerouslyAllowBrowser: true, + apiKey: this.apiKey, + apiVersion: provider.apiVersion, + endpoint: provider.apiHost + }) + return + } + + this.sdk = new OpenAI({ + dangerouslyAllowBrowser: true, + apiKey: this.apiKey, + baseURL: this.getBaseURL(), + defaultHeaders: { + ...this.defaultHeaders(), + ...(this.provider.id === 'copilot' ? { 'editor-version': 'vscode/1.97.2' } : {}) + } + }) + } + + /** + * Check if the provider does not support files + * @returns True if the provider does not support files, false otherwise + */ + private get isNotSupportFiles() { + if (this.provider?.isNotSupportArrayContent) { + return true + } + + const providers = ['deepseek', 'baichuan', 'minimax', 'xirang'] + + return providers.includes(this.provider.id) + } + + /** + * Extract the file content from the message + * @param message - The message + * @returns The file content + */ + private async extractFileContent(message: Message) { + const fileBlocks = findFileBlocks(message) + if (fileBlocks.length > 0) { + const textFileBlocks = fileBlocks.filter( + (fb) => fb.file && [FileTypes.TEXT, FileTypes.DOCUMENT].includes(fb.file.type) + ) + + if (textFileBlocks.length > 0) { + let text = '' + const divider = '\n\n---\n\n' + + for (const fileBlock of textFileBlocks) { + const file = fileBlock.file + const fileContent = (await window.api.file.read(file.id + file.ext)).trim() + const fileNameRow = 'file: ' + file.origin_name + '\n\n' + text = text + fileNameRow + fileContent + divider + } + + return text + } + } + + return '' + } + + /** + * Get the message parameter + * @param message - The message + * @param model - The model + * @returns The message parameter + */ + private async getMessageParam( + message: Message, + model: Model + ): Promise { + const isVision = isVisionModel(model) + const content = await this.getMessageContent(message) + const fileBlocks = findFileBlocks(message) + const imageBlocks = findImageBlocks(message) + + if (fileBlocks.length === 0 && imageBlocks.length === 0) { + return { + role: message.role === 'system' ? 'user' : message.role, + content + } + } + + // If the model does not support files, extract the file content + if (this.isNotSupportFiles) { + const fileContent = await this.extractFileContent(message) + + return { + role: message.role === 'system' ? 'user' : message.role, + content: content + '\n\n---\n\n' + fileContent + } + } + + // If the model supports files, add the file content to the message + const parts: ChatCompletionContentPart[] = [] + + if (content) { + parts.push({ type: 'text', text: content }) + } + + for (const imageBlock of imageBlocks) { + if (isVision) { + if (imageBlock.file) { + const image = await window.api.file.base64Image(imageBlock.file.id + imageBlock.file.ext) + parts.push({ type: 'image_url', image_url: { url: image.data } }) + } else if (imageBlock.url && imageBlock.url.startsWith('data:')) { + parts.push({ type: 'image_url', image_url: { url: imageBlock.url } }) + } + } + } + + for (const fileBlock of fileBlocks) { + const file = fileBlock.file + if (!file) continue + + if ([FileTypes.TEXT, FileTypes.DOCUMENT].includes(file.type)) { + const fileContent = await (await window.api.file.read(file.id + file.ext)).trim() + parts.push({ + type: 'text', + text: file.origin_name + '\n' + fileContent + }) + } + } + + return { + role: message.role === 'system' ? 'user' : message.role, + content: parts + } as ChatCompletionMessageParam + } + + /** + * Get the temperature for the assistant + * @param assistant - The assistant + * @param model - The model + * @returns The temperature + */ + private getTemperature(assistant: Assistant, model: Model) { + return isReasoningModel(model) || isOpenAIWebSearch(model) ? undefined : assistant?.settings?.temperature + } + + /** + * Get the provider specific parameters for the assistant + * @param assistant - The assistant + * @param model - The model + * @returns The provider specific parameters + */ + private getProviderSpecificParameters(assistant: Assistant, model: Model) { + const { maxTokens } = getAssistantSettings(assistant) + + if (this.provider.id === 'openrouter') { + if (model.id.includes('deepseek-r1')) { + return { + include_reasoning: true + } + } + } + + if (this.isOpenAIReasoning(model)) { + return { + max_tokens: undefined, + max_completion_tokens: maxTokens + } + } + + return {} + } + + /** + * Get the top P for the assistant + * @param assistant - The assistant + * @param model - The model + * @returns The top P + */ + private getTopP(assistant: Assistant, model: Model) { + if (isReasoningModel(model) || isOpenAIWebSearch(model)) return undefined + + return assistant?.settings?.topP + } + + /** + * Get the reasoning effort for the assistant + * @param assistant - The assistant + * @param model - The model + * @returns The reasoning effort + */ + private getReasoningEffort(assistant: Assistant, model: Model) { + if (this.provider.id === 'groq') { + return {} + } + + if (isReasoningModel(model)) { + if (model.provider === 'openrouter') { + return { + reasoning: { + effort: assistant?.settings?.reasoning_effort + } + } + } + + if (isGrokReasoningModel(model)) { + return { + reasoning_effort: assistant?.settings?.reasoning_effort + } + } + + if (isOpenAIoSeries(model)) { + return { + reasoning_effort: assistant?.settings?.reasoning_effort + } + } + + if (model.id.includes('claude-3.7-sonnet') || model.id.includes('claude-3-7-sonnet')) { + const effortRatios: Record = { + high: 0.8, + medium: 0.5, + low: 0.2 + } + + const effort = assistant?.settings?.reasoning_effort as ReasoningEffort + const effortRatio = effortRatios[effort] + + if (!effortRatio) { + return {} + } + + const maxTokens = assistant?.settings?.maxTokens || DEFAULT_MAX_TOKENS + const budgetTokens = Math.trunc(Math.max(Math.min(maxTokens * effortRatio, 32000), 1024)) + + return { + thinking: { + type: 'enabled', + budget_tokens: budgetTokens + } + } + } + + return {} + } + + return {} + } + + /** + * Check if the model is an OpenAI reasoning model + * @param model - The model + * @returns True if the model is an OpenAI reasoning model, false otherwise + */ + private isOpenAIReasoning(model: Model) { + return model.id.startsWith('o1') || model.id.startsWith('o3') + } + + /** + * Generate completions for the assistant + * @param messages - The messages + * @param assistant - The assistant + * @param mcpTools - The MCP tools + * @param onChunk - The onChunk callback + * @param onFilterMessages - The onFilterMessages callback + * @returns The completions + */ + async completions({ messages, assistant, mcpTools, onChunk, onFilterMessages }: CompletionsParams): Promise { + const defaultModel = getDefaultModel() + const model = assistant.model || defaultModel + const { contextCount, maxTokens, streamOutput } = getAssistantSettings(assistant) + messages = addImageFileToContents(messages) + let systemMessage = { role: 'system', content: assistant.prompt || '' } + if (isOpenAIoSeries(model)) { + systemMessage = { + role: 'developer', + content: `Formatting re-enabled${systemMessage ? '\n' + systemMessage.content : ''}` + } + } + if (mcpTools && mcpTools.length > 0) { + systemMessage.content = buildSystemPrompt(systemMessage.content || '', mcpTools) + } + + const userMessages: ChatCompletionMessageParam[] = [] + const _messages = filterUserRoleStartMessages( + filterEmptyMessages(filterContextMessages(takeRight(messages, contextCount + 1))) + ) + + onFilterMessages(_messages) + + for (const message of _messages) { + userMessages.push(await this.getMessageParam(message, model)) + } + + const isOpenAIReasoning = this.isOpenAIReasoning(model) + + const isSupportStreamOutput = () => { + if (isOpenAIReasoning) { + return false + } + return streamOutput + } + + let hasReasoningContent = false + let lastChunk = '' + const isReasoningJustDone = ( + delta: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta & { + reasoning_content?: string + reasoning?: string + thinking?: string + } + ) => { + if (!delta?.content) return false + + // 检查当前chunk和上一个chunk的组合是否形成###Response标记 + const combinedChunks = lastChunk + delta.content + lastChunk = delta.content + + // 检测思考结束 + if (combinedChunks.includes('###Response') || delta.content === '') { + return true + } + + // 如果有reasoning_content或reasoning,说明是在思考中 + if (delta?.reasoning_content || delta?.reasoning || delta?.thinking) { + hasReasoningContent = true + } + + // 如果之前有reasoning_content或reasoning,现在有普通content,说明思考结束 + if (hasReasoningContent && delta.content) { + return true + } + + return false + } + + let time_first_token_millsec = 0 + let time_first_content_millsec = 0 + const start_time_millsec = new Date().getTime() + const lastUserMessage = _messages.findLast((m) => m.role === 'user') + const { abortController, cleanup, signalPromise } = this.createAbortController(lastUserMessage?.id, true) + const { signal } = abortController + await this.checkIsCopilot() + + const reqMessages: ChatCompletionMessageParam[] = [systemMessage, ...userMessages].filter( + Boolean + ) as ChatCompletionMessageParam[] + + const toolResponses: MCPToolResponse[] = [] + let firstChunk = true + + const processToolUses = async (content: string, idx: number) => { + const toolResults = await parseAndCallTools( + content, + toolResponses, + onChunk, + idx, + mcpToolCallResponseToOpenAIMessage, + mcpTools, + isVisionModel(model) + ) + + if (toolResults.length > 0) { + reqMessages.push({ + role: 'assistant', + content: content + } as ChatCompletionMessageParam) + toolResults.forEach((ts) => reqMessages.push(ts as ChatCompletionMessageParam)) + + const newStream = await this.sdk.chat.completions + // @ts-ignore key is not typed + .create( + { + model: model.id, + messages: reqMessages, + temperature: this.getTemperature(assistant, model), + top_p: this.getTopP(assistant, model), + max_tokens: maxTokens, + keep_alive: this.keepAliveTime, + stream: isSupportStreamOutput(), + // tools: tools, + ...getOpenAIWebSearchParams(assistant, model), + ...this.getReasoningEffort(assistant, model), + ...this.getProviderSpecificParameters(assistant, model), + ...this.getCustomParameters(assistant) + }, + { + signal + } + ) + await processStream(newStream, idx + 1) + } + } + + const processStream = async (stream: any, idx: number) => { + if (!isSupportStreamOutput()) { + const time_completion_millsec = new Date().getTime() - start_time_millsec + return onChunk({ + text: stream.choices[0].message?.content || '', + usage: stream.usage, + metrics: { + completion_tokens: stream.usage?.completion_tokens, + time_completion_millsec, + time_first_token_millsec: 0 + } + }) + } + + let content = '' + for await (const chunk of stream) { + if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) { + break + } + + const delta = chunk.choices[0]?.delta + if (delta?.content) { + content += delta.content + } + + if (delta?.reasoning_content || delta?.reasoning) { + hasReasoningContent = true + } + + if (time_first_token_millsec == 0) { + time_first_token_millsec = new Date().getTime() - start_time_millsec + } + + if (time_first_content_millsec == 0 && isReasoningJustDone(delta)) { + time_first_content_millsec = new Date().getTime() + } + + const time_completion_millsec = new Date().getTime() - start_time_millsec + const time_thinking_millsec = time_first_content_millsec ? time_first_content_millsec - start_time_millsec : 0 + + // Extract citations from the raw response if available + const citations = (chunk as OpenAI.Chat.Completions.ChatCompletionChunk & { citations?: string[] })?.citations + + const finishReason = chunk.choices[0]?.finish_reason + + let webSearch: any[] | undefined = undefined + if (assistant.enableWebSearch && isZhipuModel(model) && finishReason === 'stop') { + webSearch = chunk?.web_search + } + if (firstChunk && assistant.enableWebSearch && isHunyuanSearchModel(model)) { + webSearch = chunk?.search_info?.search_results + firstChunk = true + } + onChunk({ + text: delta?.content || '', + reasoning_content: delta?.reasoning_content || delta?.reasoning || '', + usage: chunk.usage, + metrics: { + completion_tokens: chunk.usage?.completion_tokens, + time_completion_millsec, + time_first_token_millsec, + time_thinking_millsec + }, + webSearch, + annotations: delta?.annotations, + citations, + mcpToolResponse: toolResponses + }) + } + + await processToolUses(content, idx) + } + + const stream = await this.sdk.chat.completions + // @ts-ignore key is not typed + .create( + { + model: model.id, + messages: reqMessages, + temperature: this.getTemperature(assistant, model), + top_p: this.getTopP(assistant, model), + max_tokens: maxTokens, + keep_alive: this.keepAliveTime, + stream: isSupportStreamOutput(), + // tools: tools, + ...getOpenAIWebSearchParams(assistant, model), + ...this.getReasoningEffort(assistant, model), + ...this.getProviderSpecificParameters(assistant, model), + ...this.getCustomParameters(assistant) + }, + { + signal + } + ) + + await processStream(stream, 0).finally(cleanup) + // 捕获signal的错误 + await signalPromise?.promise?.catch((error) => { + throw error + }) + } + + /** + * Translate a message + * @param message - The message + * @param assistant - The assistant + * @param onResponse - The onResponse callback + * @returns The translated message + */ + async translate(message: Message, assistant: Assistant, onResponse?: (text: string) => void) { + const defaultModel = getDefaultModel() + const model = assistant.model || defaultModel + const content = await this.getMessageContent(message) + const messagesForApi = content + ? [ + { role: 'system', content: assistant.prompt }, + { role: 'user', content } + ] + : [{ role: 'user', content: assistant.prompt }] + + const isOpenAIReasoning = this.isOpenAIReasoning(model) + + const isSupportedStreamOutput = () => { + if (!onResponse) { + return false + } + if (isOpenAIReasoning) { + return false + } + return true + } + + const stream = isSupportedStreamOutput() + + await this.checkIsCopilot() + + // @ts-ignore key is not typed + const response = await this.sdk.chat.completions.create({ + model: model.id, + messages: messagesForApi as ChatCompletionMessageParam[], + stream, + keep_alive: this.keepAliveTime, + temperature: assistant?.settings?.temperature + }) + + if (!stream) { + return response.choices[0].message?.content || '' + } + + let text = '' + let isThinking = false + const isReasoning = isReasoningModel(model) + + for await (const chunk of response) { + const deltaContent = chunk.choices[0]?.delta?.content || '' + + if (isReasoning) { + if (deltaContent.includes('')) { + isThinking = true + } + + if (!isThinking) { + text += deltaContent + onResponse?.(text) + } + + if (deltaContent.includes('')) { + isThinking = false + } + } else { + text += deltaContent + onResponse?.(text) + } + } + + return text + } + + /** + * Summarize a message + * @param messages - The messages + * @param assistant - The assistant + * @returns The summary + */ + public async summaries(messages: Message[], assistant: Assistant): Promise { + const model = getTopNamingModel() || assistant.model || getDefaultModel() + + const userMessages = takeRight(messages, 5) + .filter((message) => !message.isPreset) + .map((message) => ({ + role: message.role, + content: getMessageContent(message) + })) + + const userMessageContent = userMessages.reduce((prev, curr) => { + const content = curr.role === 'user' ? `User: ${curr.content}` : `Assistant: ${curr.content}` + return prev + (prev ? '\n' : '') + content + }, '') + + const systemMessage = { + role: 'system', + content: getStoreSetting('topicNamingPrompt') || i18n.t('prompts.title') + } + + const userMessage = { + role: 'user', + content: userMessageContent + } + + await this.checkIsCopilot() + + // @ts-ignore key is not typed + const response = await this.sdk.chat.completions.create({ + model: model.id, + messages: [systemMessage, userMessage] as ChatCompletionMessageParam[], + stream: false, + keep_alive: this.keepAliveTime, + max_tokens: 1000 + }) + + // 针对思考类模型的返回,总结仅截取之后的内容 + let content = response.choices[0].message?.content || '' + content = content.replace(/^(.*?)<\/think>/s, '') + + return removeSpecialCharactersForTopicName(content.substring(0, 50)) + } + + /** + * Summarize a message for search + * @param messages - The messages + * @param assistant - The assistant + * @returns The summary + */ + public async summaryForSearch(messages: Message[], assistant: Assistant): Promise { + const model = assistant.model || getDefaultModel() + + const systemMessage = { + role: 'system', + content: assistant.prompt + } + + const messageContents = messages.map((m) => getMessageContent(m)) + const userMessageContent = messageContents.join('\n') + + const userMessage = { + role: 'user', + content: userMessageContent + } + // @ts-ignore key is not typed + const response = await this.sdk.chat.completions.create( + { + model: model.id, + messages: [systemMessage, userMessage] as ChatCompletionMessageParam[], + stream: false, + keep_alive: this.keepAliveTime, + max_tokens: 1000 + }, + { + timeout: 20 * 1000 + } + ) + + // 针对思考类模型的返回,总结仅截取之后的内容 + let content = response.choices[0].message?.content || '' + content = content.replace(/^(.*?)<\/think>/s, '') + + return content + } + + /** + * Generate text + * @param prompt - The prompt + * @param content - The content + * @returns The generated text + */ + public async generateText({ prompt, content }: { prompt: string; content: string }): Promise { + const model = getDefaultModel() + + await this.checkIsCopilot() + + const response = await this.sdk.chat.completions.create({ + model: model.id, + stream: false, + messages: [ + { role: 'system', content: prompt }, + { role: 'user', content } + ] + }) + + return response.choices[0].message?.content || '' + } + + /** + * Generate suggestions + * @param messages - The messages + * @param assistant - The assistant + * @returns The suggestions + */ + async suggestions(messages: Message[], assistant: Assistant): Promise { + const model = assistant.model + + if (!model) { + return [] + } + + await this.checkIsCopilot() + + const userMessagesForApi = messages + .filter((m) => m.role === 'user') + .map((m) => ({ + role: m.role, + content: getMessageContent(m) + })) + + const response: any = await this.sdk.request({ + method: 'post', + path: '/advice_questions', + body: { + messages: userMessagesForApi, + model: model.id, + max_tokens: 0, + temperature: 0, + n: 0 + } + }) + + return response?.questions?.filter(Boolean)?.map((q: any) => ({ content: q })) || [] + } + + /** + * Check if the model is valid + * @param model - The model + * @returns The validity of the model + */ + public async check(model: Model): Promise<{ valid: boolean; error: Error | null }> { + if (!model) { + return { valid: false, error: new Error('No model found') } + } + const body = { + model: model.id, + messages: [{ role: 'user', content: 'hi' }], + stream: false + } + + try { + await this.checkIsCopilot() + const response = await this.sdk.chat.completions.create(body as ChatCompletionCreateParamsNonStreaming) + + return { + valid: Boolean(response?.choices[0].message), + error: null + } + } catch (error: any) { + return { + valid: false, + error + } + } + } + + /** + * Get the models + * @returns The models + */ + public async models(): Promise { + try { + await this.checkIsCopilot() + + const response = await this.sdk.models.list() + + if (this.provider.id === 'github') { + // @ts-ignore key is not typed + return response.body + .map((model) => ({ + id: model.name, + description: model.summary, + object: 'model', + owned_by: model.publisher + })) + .filter(isSupportedModel) + } + + if (this.provider.id === 'together') { + // @ts-ignore key is not typed + return response?.body + .map((model: any) => ({ + id: model.id, + description: model.display_name, + object: 'model', + owned_by: model.organization + })) + .filter(isSupportedModel) + } + + const models = response?.data || [] + + return models.filter(isSupportedModel) + } catch (error) { + return [] + } + } + + /** + * Generate an image + * @param params - The parameters + * @returns The generated image + */ + public async generateImage({ + model, + prompt, + negativePrompt, + imageSize, + batchSize, + seed, + numInferenceSteps, + guidanceScale, + signal, + promptEnhancement + }: GenerateImageParams): Promise { + const response = (await this.sdk.request({ + method: 'post', + path: '/images/generations', + signal, + body: { + model, + prompt, + negative_prompt: negativePrompt, + image_size: imageSize, + batch_size: batchSize, + seed: seed ? parseInt(seed) : undefined, + num_inference_steps: numInferenceSteps, + guidance_scale: guidanceScale, + prompt_enhancement: promptEnhancement + } + })) as { data: Array<{ url: string }> } + + return response.data.map((item) => item.url) + } + + /** + * Get the embedding dimensions + * @param model - The model + * @returns The embedding dimensions + */ + public async getEmbeddingDimensions(model: Model): Promise { + await this.checkIsCopilot() + + const data = await this.sdk.embeddings.create({ + model: model.id, + input: model?.provider === 'baidu-cloud' ? ['hi'] : 'hi' + }) + return data.data[0].embedding.length + } + + public async checkIsCopilot() { + if (this.provider.id !== 'copilot') return + const defaultHeaders = store.getState().copilot.defaultHeaders + // copilot每次请求前需要重新获取token,因为token中附带时间戳 + const { token } = await window.api.copilot.getToken(defaultHeaders) + this.sdk.apiKey = token + } +} diff --git a/electron.vite.config.1745374096634.mjs b/electron.vite.config.1745374096634.mjs new file mode 100644 index 0000000000..07a99a2c41 --- /dev/null +++ b/electron.vite.config.1745374096634.mjs @@ -0,0 +1,98 @@ +// electron.vite.config.ts +import react from "@vitejs/plugin-react"; +import { defineConfig, externalizeDepsPlugin } from "electron-vite"; +import { resolve } from "path"; +import { visualizer } from "rollup-plugin-visualizer"; +var visualizerPlugin = (type) => { + return process.env[`VISUALIZER_${type.toUpperCase()}`] ? [visualizer({ open: true })] : []; +}; +var electron_vite_config_default = defineConfig({ + main: { + plugins: [ + externalizeDepsPlugin({ + exclude: [ + "@cherrystudio/embedjs", + "@cherrystudio/embedjs-openai", + "@cherrystudio/embedjs-loader-web", + "@cherrystudio/embedjs-loader-markdown", + "@cherrystudio/embedjs-loader-msoffice", + "@cherrystudio/embedjs-loader-xml", + "@cherrystudio/embedjs-loader-pdf", + "@cherrystudio/embedjs-loader-sitemap", + "@cherrystudio/embedjs-libsql", + "@cherrystudio/embedjs-loader-image", + "p-queue", + "webdav" + ] + }), + ...visualizerPlugin("main") + ], + resolve: { + alias: { + "@main": resolve("src/main"), + "@types": resolve("src/renderer/src/types"), + "@shared": resolve("packages/shared") + } + }, + build: { + rollupOptions: { + external: ["@libsql/client"] + } + } + }, + preload: { + plugins: [externalizeDepsPlugin()], + resolve: { + alias: { + "@shared": resolve("packages/shared") + } + } + }, + renderer: { + plugins: [ + react({ + babel: { + plugins: [ + [ + "styled-components", + { + displayName: true, + // 开发环境下启用组件名称 + fileName: false, + // 不在类名中包含文件名 + pure: true, + // 优化性能 + ssr: false + // 不需要服务端渲染 + } + ] + ] + } + }), + ...visualizerPlugin("renderer") + ], + resolve: { + alias: { + "@renderer": resolve("src/renderer/src"), + "@shared": resolve("packages/shared") + } + }, + optimizeDeps: { + exclude: [] + }, + build: { + rollupOptions: { + input: { + index: resolve("src/renderer/index.html") + } + }, + // 复制ASR服务器文件 + assetsInlineLimit: 0, + // 确保复制assets目录下的所有文件 + copyPublicDir: true + } + } +}); +export { + electron_vite_config_default as default +}; diff --git a/package.json b/package.json index 479a955aad..542cf4df5a 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "CherryStudio", - "version": "1.2.5-bate", + "version": "1.2.6-bate", "private": true, "description": "A powerful AI assistant for producer.", "main": "./out/main/index.js", @@ -44,7 +44,12 @@ "typecheck:node": "tsc --noEmit -p tsconfig.node.json --composite false", "typecheck:web": "tsc --noEmit -p tsconfig.web.json --composite false", "check:i18n": "node scripts/check-i18n.js", - "test": "npx -y tsx --test src/**/*.test.ts", + "test": "yarn test:renderer", + "test:coverage": "yarn test:renderer:coverage", + "test:node": "npx -y tsx --test src/**/*.test.ts", + "test:renderer": "vitest", + "test:renderer:ui": "vitest --ui", + "test:renderer:coverage": "vitest run --coverage", "format": "prettier --write .", "lint": "eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --fix", "postinstall": "electron-builder install-app-deps", @@ -186,6 +191,8 @@ "@types/react-window": "^1", "@types/tinycolor2": "^1", "@vitejs/plugin-react": "^4.3.4", + "@vitest/coverage-v8": "^3.1.1", + "@vitest/ui": "^3.1.1", "analytics": "^0.8.16", "antd": "^5.22.5", "applescript": "^1.0.0", @@ -250,7 +257,8 @@ "tokenx": "^0.4.1", "typescript": "^5.6.2", "uuid": "^10.0.0", - "vite": "^5.0.12" + "vite": "^5.0.12", + "vitest": "^3.1.1" }, "resolutions": { "pdf-parse@npm:1.1.1": "patch:pdf-parse@npm%3A1.1.1#~/.yarn/patches/pdf-parse-npm-1.1.1-04a6109b2a.patch", diff --git a/src/main/mcpServers/factory.ts b/src/main/mcpServers/factory.ts index 560e24731f..9bda831153 100644 --- a/src/main/mcpServers/factory.ts +++ b/src/main/mcpServers/factory.ts @@ -7,6 +7,7 @@ import FileSystemServer from './filesystem' import MemoryServer from './memory' import ThinkingServer from './sequentialthinking' import SimpleRememberServer from './simpleremember' +import TimeToolsServer from './timetools' import { WorkspaceFileToolServer } from './workspacefile' export async function createInMemoryMCPServer( @@ -62,6 +63,17 @@ export async function createInMemoryMCPServer( return new WorkspaceFileToolServer(workspacePath).server } + case '@cherry/timetools': { + Logger.info('[MCP] Creating TimeToolsServer instance') + try { + const server = new TimeToolsServer().server + Logger.info('[MCP] TimeToolsServer instance created successfully') + return server + } catch (error) { + Logger.error('[MCP] Error creating TimeToolsServer instance:', error) + throw error + } + } default: throw new Error(`Unknown in-memory MCP server: ${name}`) } diff --git a/src/main/mcpServers/timetools.ts b/src/main/mcpServers/timetools.ts new file mode 100644 index 0000000000..ff88e771bf --- /dev/null +++ b/src/main/mcpServers/timetools.ts @@ -0,0 +1,208 @@ +// src/main/mcpServers/timetools.ts +import { Server } from '@modelcontextprotocol/sdk/server/index.js' +import { + CallToolRequestSchema, + ErrorCode, + ListToolsRequestSchema, + McpError +} from '@modelcontextprotocol/sdk/types.js' +import Logger from 'electron-log' + +// 定义时间工具 +const GET_CURRENT_TIME_TOOL = { + name: 'get_current_time', + description: '获取当前系统时间,返回格式化的日期和时间信息', + inputSchema: { + type: 'object', + title: 'GetCurrentTimeInput', + description: '获取当前时间的输入参数', + properties: { + format: { + type: 'string', + description: '时间格式,可选值:full(完整格式)、date(仅日期)、time(仅时间)、iso(ISO格式),默认为full', + enum: ['full', 'date', 'time', 'iso'] + }, + timezone: { + type: 'string', + description: '时区,例如:Asia/Shanghai,默认为系统本地时区' + } + } + } +} + +// 时间工具服务器类 +class TimeToolsServer { + public server: Server + + constructor() { + Logger.info('[TimeTools] Creating server') + + // 初始化服务器 + this.server = new Server( + { + name: 'time-tools-server', + version: '1.0.0' + }, + { + capabilities: { + tools: { + // 按照MCP规范声明工具能力 + listChanged: true + } + } + } + ) + + Logger.info('[TimeTools] Server initialized with tools capability') + this.setupRequestHandlers() + Logger.info('[TimeTools] Server initialization complete') + } + + // 设置请求处理程序 + setupRequestHandlers() { + // 列出工具 + this.server.setRequestHandler(ListToolsRequestSchema, async () => { + Logger.info('[TimeTools] Listing tools request received') + return { + tools: [GET_CURRENT_TIME_TOOL] + } + }) + + // 处理工具调用 + this.server.setRequestHandler(CallToolRequestSchema, async (request) => { + const { name, arguments: args } = request.params + + Logger.info(`[TimeTools] Tool call received: ${name}`, args) + + try { + if (name === 'get_current_time') { + return this.handleGetCurrentTime(args) + } + + Logger.error(`[TimeTools] Unknown tool: ${name}`) + throw new McpError(ErrorCode.MethodNotFound, `Unknown tool: ${name}`) + } catch (error) { + Logger.error(`[TimeTools] Error handling tool call ${name}:`, error) + return { + content: [ + { + type: 'text', + text: error instanceof Error ? error.message : String(error) + } + ], + isError: true + } + } + }) + } + + // 处理获取当前时间的工具调用 + private handleGetCurrentTime(args: any) { + Logger.info('[TimeTools] Handling get_current_time', args) + + const format = args?.format || 'full' + const timezone = args?.timezone || undefined + + const now = new Date() + let formattedTime = '' + + try { + // 根据请求的格式返回时间 + switch (format) { + case 'date': + formattedTime = this.formatDate(now, timezone) + break + case 'time': + formattedTime = this.formatTime(now, timezone) + break + case 'iso': + formattedTime = now.toISOString() + break + case 'full': + default: + formattedTime = this.formatFull(now, timezone) + break + } + + // 构建完整的响应对象 + const response = { + currentTime: formattedTime, + timestamp: now.getTime(), + timezone: timezone || Intl.DateTimeFormat().resolvedOptions().timeZone, + format: format + } + + Logger.info('[TimeTools] Current time response:', response) + + return { + content: [ + { + type: 'text', + text: JSON.stringify(response, null, 2) + } + ], + isError: false + } + } catch (error) { + Logger.error('[TimeTools] Error formatting time:', error) + throw new McpError( + ErrorCode.InternalError, + `Error formatting time: ${error instanceof Error ? error.message : String(error)}` + ) + } + } + + // 格式化完整日期和时间 + private formatFull(date: Date, timezone?: string): string { + const options: Intl.DateTimeFormatOptions = { + year: 'numeric', + month: 'long', + day: 'numeric', + weekday: 'long', + hour: 'numeric', + minute: 'numeric', + second: 'numeric', + timeZoneName: 'short' + } + + if (timezone) { + options.timeZone = timezone + } + + return new Intl.DateTimeFormat('zh-CN', options).format(date) + } + + // 仅格式化日期 + private formatDate(date: Date, timezone?: string): string { + const options: Intl.DateTimeFormatOptions = { + year: 'numeric', + month: 'long', + day: 'numeric', + weekday: 'long' + } + + if (timezone) { + options.timeZone = timezone + } + + return new Intl.DateTimeFormat('zh-CN', options).format(date) + } + + // 仅格式化时间 + private formatTime(date: Date, timezone?: string): string { + const options: Intl.DateTimeFormatOptions = { + hour: 'numeric', + minute: 'numeric', + second: 'numeric', + timeZoneName: 'short' + } + + if (timezone) { + options.timeZone = timezone + } + + return new Intl.DateTimeFormat('zh-CN', options).format(date) + } +} + +export default TimeToolsServer diff --git a/src/main/services/AxiosProxy.ts b/src/main/services/AxiosProxy.ts index bdac92bd7c..3472cc79fb 100644 --- a/src/main/services/AxiosProxy.ts +++ b/src/main/services/AxiosProxy.ts @@ -1,25 +1,28 @@ import { AxiosInstance, default as axios_ } from 'axios' +import { ProxyAgent } from 'proxy-agent' import { proxyManager } from './ProxyManager' class AxiosProxy { - private cacheAxios: AxiosInstance | undefined - private proxyURL: string | undefined + private cacheAxios: AxiosInstance | null = null + private proxyAgent: ProxyAgent | null = null get axios(): AxiosInstance { - const currentProxyURL = proxyManager.getProxyUrl() - if (this.proxyURL !== currentProxyURL) { - this.proxyURL = currentProxyURL - const agent = proxyManager.getProxyAgent() + // 获取当前代理代理 + const currentProxyAgent = proxyManager.getProxyAgent() + + // 如果代理发生变化或尚未初始化,则重新创建 axios 实例 + if (this.cacheAxios === null || (currentProxyAgent !== null && this.proxyAgent !== currentProxyAgent)) { + this.proxyAgent = currentProxyAgent + + // 创建带有代理配置的 axios 实例 this.cacheAxios = axios_.create({ proxy: false, - ...(agent && { httpAgent: agent, httpsAgent: agent }) + httpAgent: currentProxyAgent || undefined, + httpsAgent: currentProxyAgent || undefined }) } - if (this.cacheAxios === undefined) { - this.cacheAxios = axios_.create({ proxy: false }) - } return this.cacheAxios } } diff --git a/src/renderer/src/App.tsx b/src/renderer/src/App.tsx index 539b3e8a38..111bb2fd28 100644 --- a/src/renderer/src/App.tsx +++ b/src/renderer/src/App.tsx @@ -2,29 +2,19 @@ import '@renderer/databases' import store, { persistor } from '@renderer/store' import { Provider } from 'react-redux' -import { HashRouter, Route, Routes } from 'react-router-dom' import { PersistGate } from 'redux-persist/integration/react' -import Sidebar from './components/app/Sidebar' import DeepClaudeProvider from './components/DeepClaudeProvider' import MemoryProvider from './components/MemoryProvider' import PDFSettingsInitializer from './components/PDFSettingsInitializer' +import WebSearchInitializer from './components/WebSearchInitializer' import WorkspaceInitializer from './components/WorkspaceInitializer' import TopViewContainer from './components/TopView' import AntdProvider from './context/AntdProvider' import StyleSheetManager from './context/StyleSheetManager' import { SyntaxHighlighterProvider } from './context/SyntaxHighlighterProvider' import { ThemeProvider } from './context/ThemeProvider' -import NavigationHandler from './handler/NavigationHandler' -import AgentsPage from './pages/agents/AgentsPage' -import AppsPage from './pages/apps/AppsPage' -import FilesPage from './pages/files/FilesPage' -import HomePage from './pages/home/HomePage' -import KnowledgePage from './pages/knowledge/KnowledgePage' -import PaintingsPage from './pages/paintings/PaintingsPage' -import SettingsPage from './pages/settings/SettingsPage' -import TranslatePage from './pages/translate/TranslatePage' -import WorkspacePage from './pages/workspace' +import RouterComponent from './router/RouterConfig' function App(): React.ReactElement { return ( @@ -37,23 +27,10 @@ function App(): React.ReactElement { + - - - - - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - } /> - - + diff --git a/src/renderer/src/components/MemoryProvider.tsx b/src/renderer/src/components/MemoryProvider.tsx index 54b3e6103d..75c4a89d92 100644 --- a/src/renderer/src/components/MemoryProvider.tsx +++ b/src/renderer/src/components/MemoryProvider.tsx @@ -1,6 +1,7 @@ import { useMemoryService } from '@renderer/services/MemoryService' import { useAppDispatch, useAppSelector } from '@renderer/store' import store from '@renderer/store' +import { createSelector } from '@reduxjs/toolkit' import { clearShortMemories, loadLongTermMemoryData, @@ -43,14 +44,28 @@ const MemoryProvider: FC = ({ children }) => { const analyzeModel = useAppSelector((state) => state.memory?.analyzeModel || null) const shortMemoryActive = useAppSelector((state) => state.memory?.shortMemoryActive || false) - // 获取当前对话 - const currentTopic = useAppSelector((state) => state.messages?.currentTopic?.id) - const messages = useAppSelector((state) => { - if (!currentTopic || !state.messages?.messagesByTopic) { - return [] + // 创建记忆化选择器 + const selectCurrentTopicId = createSelector( + [(state) => state.messages?.currentTopic?.id], + (topicId) => topicId + ) + + const selectMessagesForTopic = createSelector( + [ + (state) => state.messages?.messagesByTopic, + (_state, topicId) => topicId + ], + (messagesByTopic, topicId) => { + if (!topicId || !messagesByTopic) { + return [] + } + return messagesByTopic[topicId] || [] } - return state.messages.messagesByTopic[currentTopic] || [] - }) + ) + + // 获取当前对话 + const currentTopic = useAppSelector(selectCurrentTopicId) + const messages = useAppSelector((state) => selectMessagesForTopic(state, currentTopic)) // 存储上一次的话题ID const previousTopicRef = useRef(null) diff --git a/src/renderer/src/components/MinApp/WebviewContainer.tsx b/src/renderer/src/components/MinApp/WebviewContainer.tsx index 3d9eadc279..4b879a1f9f 100644 --- a/src/renderer/src/components/MinApp/WebviewContainer.tsx +++ b/src/renderer/src/components/MinApp/WebviewContainer.tsx @@ -60,6 +60,9 @@ const WebviewContainer = memo( // eslint-disable-next-line react-hooks/exhaustive-deps }, [appid, url]) + //remove the tag of CherryStudio and Electron + const userAgent = navigator.userAgent.replace(/CherryStudio\/\S+\s/, '').replace(/Electron\/\S+\s/, '') + return ( ) } diff --git a/src/renderer/src/components/ObsidianExportDialog.tsx b/src/renderer/src/components/ObsidianExportDialog.tsx index 857d6be120..739b51003a 100644 --- a/src/renderer/src/components/ObsidianExportDialog.tsx +++ b/src/renderer/src/components/ObsidianExportDialog.tsx @@ -131,6 +131,8 @@ const ObsidianExportDialog: React.FC = ({ folder: '' }) + // 是否手动编辑过标题 + const [hasTitleBeenManuallyEdited, setHasTitleBeenManuallyEdited] = useState(false) const [vaults, setVaults] = useState>([]) const [files, setFiles] = useState([]) const [fileTreeData, setFileTreeData] = useState([]) @@ -255,6 +257,12 @@ const ObsidianExportDialog: React.FC = ({ setState((prevState) => ({ ...prevState, [key]: value })) } + // 处理title输入变化 + const handleTitleInputChange = (newTitle: string) => { + handleChange('title', newTitle) + setHasTitleBeenManuallyEdited(true) + } + const handleVaultChange = (value: string) => { setSelectedVault(value) // 文件夹会通过useEffect自动获取 @@ -278,11 +286,17 @@ const ObsidianExportDialog: React.FC = ({ const fileName = selectedFile.name const titleWithoutExt = fileName.endsWith('.md') ? fileName.substring(0, fileName.length - 3) : fileName handleChange('title', titleWithoutExt) + // 重置手动编辑标记,因为这是非用户设置的title + setHasTitleBeenManuallyEdited(false) handleChange('processingMethod', '1') } else { // 如果是文件夹,自动设置标题为话题名并设置处理方式为3(新建) handleChange('processingMethod', '3') - handleChange('title', title) + // 仅当用户未手动编辑过 title 时,才将其重置为 props.title + if (!hasTitleBeenManuallyEdited) { + // title 是 props.title + handleChange('title', title) + } } } } @@ -309,7 +323,7 @@ const ObsidianExportDialog: React.FC = ({ handleChange('title', e.target.value)} + onChange={(e) => handleTitleInputChange(e.target.value)} placeholder={i18n.t('chat.topics.export.obsidian_title_placeholder')} /> diff --git a/src/renderer/src/components/WebSearchInitializer.tsx b/src/renderer/src/components/WebSearchInitializer.tsx new file mode 100644 index 0000000000..1d9ee19f13 --- /dev/null +++ b/src/renderer/src/components/WebSearchInitializer.tsx @@ -0,0 +1,20 @@ +import { useEffect } from 'react' +import WebSearchService from '@renderer/services/WebSearchService' + +/** + * 初始化WebSearch服务的组件 + * 确保DeepSearch供应商被添加到列表中 + */ +const WebSearchInitializer = () => { + useEffect(() => { + // 触发WebSearchService的初始化 + // 这将确保DeepSearch供应商被添加到列表中 + WebSearchService.getWebSearchProvider() + console.log('[WebSearchInitializer] 初始化WebSearch服务') + }, []) + + // 这个组件不渲染任何内容 + return null +} + +export default WebSearchInitializer diff --git a/src/renderer/src/components/WorkspaceFileViewer/index.tsx b/src/renderer/src/components/WorkspaceFileViewer/index.tsx index 980fc8a3e1..8e16f4d2a9 100644 --- a/src/renderer/src/components/WorkspaceFileViewer/index.tsx +++ b/src/renderer/src/components/WorkspaceFileViewer/index.tsx @@ -25,7 +25,6 @@ import { v4 as uuidv4 } from 'uuid' import { useTheme } from '../../hooks/useTheme' const { Title } = Typography -const { TabPane } = Tabs // --- Styled Components Props Interfaces --- @@ -308,48 +307,56 @@ const WorkspaceFileViewer: React.FC = ({ - - {/* 代码 Tab: 使用 SyntaxHighlighter 或 TextArea */} - - - {isEditing ? ( - setEditedContent(e.target.value)} - isDark={!useInternalLightTheme} - ref={textAreaRef} - spellCheck={false} - /> - ) : ( - - {content} - - )} - - - - {/* 原始内容 Tab: 只使用 RawScrollContainer 显示纯文本 */} - - - {content} {/* 直接渲染文本内容,没有 SyntaxHighlighter */} - - - + + {isEditing ? ( + setEditedContent(e.target.value)} + isDark={!useInternalLightTheme} + ref={textAreaRef} + spellCheck={false} + /> + ) : ( + + {content} + + )} + + ) + }, + { + key: 'raw', + label: t('workspace.raw'), + children: ( + + {content} {/* 直接渲染文本内容,没有 SyntaxHighlighter */} + + ) + } + ]} + /> diff --git a/src/renderer/src/config/models.ts b/src/renderer/src/config/models.ts index 8bbaab5239..8aefe3cb94 100644 --- a/src/renderer/src/config/models.ts +++ b/src/renderer/src/config/models.ts @@ -1673,34 +1673,28 @@ export const SYSTEM_MODELS: Record = { ], openrouter: [ { - id: 'google/gemma-2-9b-it:free', + id: 'google/gemini-2.5-flash-preview', provider: 'openrouter', - name: 'Google: Gemma 2 9B', - group: 'Gemma' + name: 'Google: Gemini 2.5 Flash Preview', + group: 'google' }, { - id: 'microsoft/phi-3-mini-128k-instruct:free', + id: 'qwen/qwen-2.5-7b-instruct:free', provider: 'openrouter', - name: 'Phi-3 Mini 128K Instruct', - group: 'Phi' + name: 'Qwen: Qwen-2.5-7B Instruct', + group: 'qwen' }, { - id: 'microsoft/phi-3-medium-128k-instruct:free', + id: 'deepseek/deepseek-chat', provider: 'openrouter', - name: 'Phi-3 Medium 128K Instruct', - group: 'Phi' - }, - { - id: 'meta-llama/llama-3-8b-instruct:free', - provider: 'openrouter', - name: 'Meta: Llama 3 8B Instruct', - group: 'Llama3' + name: 'DeepSeek: V3', + group: 'deepseek' }, { id: 'mistralai/mistral-7b-instruct:free', provider: 'openrouter', name: 'Mistral: Mistral 7B Instruct', - group: 'Mistral' + group: 'mistralai' } ], groq: [ diff --git a/src/renderer/src/config/prompts.ts b/src/renderer/src/config/prompts.ts index fc303ba41a..1bd72cf78d 100644 --- a/src/renderer/src/config/prompts.ts +++ b/src/renderer/src/config/prompts.ts @@ -117,6 +117,11 @@ export const SEARCH_SUMMARY_PROMPT = ` If the user asks some question from some URL or wants you to summarize a PDF or a webpage (via URL) you need to return the links inside the \`links\` XML block and the question inside the \`question\` XML block. If the user wants to you to summarize the webpage or the PDF you need to return \`summarize\` inside the \`question\` XML block in place of a question and the link to summarize in the \`links\` XML block. You must always return the rephrased question inside the \`question\` XML block, if there are no links in the follow-up question then don't insert a \`links\` XML block in your response. + 4. Websearch: Always return the rephrased question inside the 'question' XML block. If there are no links in the follow-up question, do not insert a 'links' XML block in your response. + 5. Knowledge: Always return the rephrased question inside the 'question' XML block. + 6. Always wrap the rephrased question in the appropriate XML blocks to specify the tool(s) for retrieving information: use for queries requiring real-time or external information, for queries that can be answered from a pre-existing knowledge base, or both if the question could be applicable to either tool. Ensure that the rephrased question is always contained within a block inside these wrappers. + 7. *use {tools} to rephrase the question* + There are several examples attached for your reference inside the below \`examples\` XML block diff --git a/src/renderer/src/i18n/locales/en-us.json b/src/renderer/src/i18n/locales/en-us.json index cb9f36514f..65b19ffd4e 100644 --- a/src/renderer/src/i18n/locales/en-us.json +++ b/src/renderer/src/i18n/locales/en-us.json @@ -1881,6 +1881,7 @@ }, "error.failed": "Translation failed", "error.not_configured": "Translation model is not configured", + "success": "Translation successful", "history": { "clear": "Clear History", "clear_description": "Clear history will delete all translation history, continue?", diff --git a/src/renderer/src/i18n/locales/zh-cn.json b/src/renderer/src/i18n/locales/zh-cn.json index d4d429dbd1..c7581908f4 100644 --- a/src/renderer/src/i18n/locales/zh-cn.json +++ b/src/renderer/src/i18n/locales/zh-cn.json @@ -1988,6 +1988,7 @@ }, "error.failed": "翻译失败", "error.not_configured": "翻译模型未配置", + "success": "翻译成功", "history": { "clear": "清空历史", "clear_description": "清空历史将删除所有翻译历史记录,是否继续?", diff --git a/src/renderer/src/pages/home/Inputbar/AttachmentPreview.tsx b/src/renderer/src/pages/home/Inputbar/AttachmentPreview.tsx index 343e4f6a18..a6e2886cb1 100644 --- a/src/renderer/src/pages/home/Inputbar/AttachmentPreview.tsx +++ b/src/renderer/src/pages/home/Inputbar/AttachmentPreview.tsx @@ -26,12 +26,21 @@ interface Props { setFiles: (files: FileType[]) => void } +const MAX_FILENAME_DISPLAY_LENGTH = 20 +function truncateFileName(name: string, maxLength: number = MAX_FILENAME_DISPLAY_LENGTH) { + if (name.length <= maxLength) return name + return name.slice(0, maxLength - 3) + '...' +} + const FileNameRender: FC<{ file: FileType }> = ({ file }) => { const [visible, setVisible] = useState(false) const isImage = (ext: string) => { return ['.png', '.jpg', '.jpeg', '.gif', '.bmp', '.webp'].includes(ext) } + const fullName = FileManager.formatFileName(file) + const displayName = truncateFileName(fullName) + return ( = ({ file }) => { }} /> )} + {fullName} {formatFileSize(file.size)} }> @@ -66,8 +76,9 @@ const FileNameRender: FC<{ file: FileType }> = ({ file }) => { if (path) { window.api.file.openPath(path) } - }}> - {FileManager.formatFileName(file)} + }} + title={fullName}> + {displayName} ) @@ -157,4 +168,8 @@ const FileName = styled.span` } ` +const FileNameSpan = styled.span` + word-break: break-all; +` + export default AttachmentPreview diff --git a/src/renderer/src/pages/home/Markdown/Markdown.tsx b/src/renderer/src/pages/home/Markdown/Markdown.tsx index 261a06a31d..9ee1515628 100644 --- a/src/renderer/src/pages/home/Markdown/Markdown.tsx +++ b/src/renderer/src/pages/home/Markdown/Markdown.tsx @@ -1,6 +1,7 @@ import 'katex/dist/katex.min.css' import 'katex/dist/contrib/copy-tex' import 'katex/dist/contrib/mhchem' +import '@renderer/styles/translation.css' import MarkdownShadowDOMRenderer from '@renderer/components/MarkdownShadowDOMRenderer' import { useSettings } from '@renderer/hooks/useSettings' @@ -38,6 +39,11 @@ const Markdown: FC = ({ message }) => { const { renderInputMessageAsMarkdown, mathEngine } = useSettings() const messageContent = useMemo(() => { + // 检查消息内容是否为空或未定义 + if (message.content === undefined) { + return '' + } + const empty = isEmpty(message.content) const paused = message.status === 'paused' const content = empty && paused ? t('message.chat.completion.paused') : withGeminiGrounding(message) @@ -82,6 +88,19 @@ const Markdown: FC = ({ message }) => { {props.children} ) + }, + // 自定义处理translated标签 + translated: (props: any) => { + // 将translated标签渲染为可点击的span + return ( + window.toggleTranslation(e as unknown as MouseEvent)} + data-original={props.original} + data-language={props.language}> + {props.children} + + ) } // Removed custom div renderer for tool markers } as Partial // Keep Components type here @@ -89,7 +108,7 @@ const Markdown: FC = ({ message }) => { }, []) // Removed message.metadata dependency as it's no longer used here if (message.role === 'user' && !renderInputMessageAsMarkdown) { - return

{messageContent}

+ return

{messageContent}

} if (processedMessageContent.includes('