Merge branch 'main' into 1600822305-patch-2

# Conflicts:
#	package.json
#	yarn.lock
This commit is contained in:
kangfenmao 2025-04-19 20:17:09 +08:00
commit f76076a0f9
114 changed files with 41948 additions and 136781 deletions

View File

@ -18,7 +18,9 @@ body:
options:
- label: 我理解 Issue 是用于反馈和解决问题的,而非吐槽评论区,将尽可能提供更多信息帮助问题解决。
required: true
- label: 我已经查看了置顶 Issue 并搜索了现有的 [开放Issue](https://github.com/CherryHQ/cherry-studio/issues)和[已关闭Issue](https://github.com/CherryHQ/cherry-studio/issues?q=is%3Aissue%20state%3Aclosed%20),没有找到类似的问题。
- label: 我的问题不是 [常见问题](https://github.com/CherryHQ/cherry-studio/issues/3881) 中的内容。
required: true
- label: 我已经查看了 **置顶 Issue** 并搜索了现有的 [开放Issue](https://github.com/CherryHQ/cherry-studio/issues)和[已关闭Issue](https://github.com/CherryHQ/cherry-studio/issues?q=is%3Aissue%20state%3Aclosed%20),没有找到类似的问题。
required: true
- label: 我填写了简短且清晰明确的标题,以便开发者在翻阅 Issue 列表时能快速确定大致问题。而不是“一个建议”、“卡住了”等。
required: true
@ -48,8 +50,8 @@ body:
id: description
attributes:
label: 错误描述
description: 描述问题时请尽可能详细
placeholder: 告诉我们发生了什么...
description: 描述问题时请尽可能详细。请尽可能提供截图或屏幕录制,以帮助我们更好地理解问题。
placeholder: 告诉我们发生了什么...(记得附上截图/录屏,如果适用)
validations:
required: true
@ -57,12 +59,14 @@ body:
id: reproduction
attributes:
label: 重现步骤
description: 提供详细的重现步骤,以便于我们可以准确地重现问题
description: 提供详细的重现步骤,以便于我们的开发人员可以准确地重现问题。请尽可能为每个步骤提供截图或屏幕录制。
placeholder: |
1. 转到 '...'
2. 点击 '....'
3. 向下滚动到 '....'
4. 看到错误
记得尽可能为每个步骤附上截图/录屏!
validations:
required: true

View File

@ -18,7 +18,9 @@ body:
options:
- label: I understand that issues are for feedback and problem solving, not for complaining in the comment section, and will provide as much information as possible to help solve the problem.
required: true
- label: I've looked at pinned issues and searched for existing [Open Issues](https://github.com/CherryHQ/cherry-studio/issues), [Closed Issues](https://github.com/CherryHQ/cherry-studio/issues?q=is%3Aissue%20state%3Aclosed), and [Discussions](https://github.com/CherryHQ/cherry-studio/discussions), no similar issue or discussion was found.
- label: My issue is not listed in the [FAQ](https://github.com/CherryHQ/cherry-studio/issues/3881).
required: true
- label: I've looked at **pinned issues** and searched for existing [Open Issues](https://github.com/CherryHQ/cherry-studio/issues), [Closed Issues](https://github.com/CherryHQ/cherry-studio/issues?q=is%3Aissue%20state%3Aclosed), and [Discussions](https://github.com/CherryHQ/cherry-studio/discussions), no similar issue or discussion was found.
required: true
- label: I've filled in short, clear headings so that developers can quickly identify a rough idea of what to expect when flipping through the list of issues. And not "a suggestion", "stuck", etc.
required: true

39
.github/workflows/issue-management.yml vendored Normal file
View File

@ -0,0 +1,39 @@
name: "Stale Issue Management"
on:
schedule:
- cron: "0 0 * * *"
workflow_dispatch:
env:
daysBeforeStale: 30 # Number of days of inactivity before marking as stale
daysBeforeClose: 30 # Number of days to wait after marking as stale before closing
jobs:
stale:
if: github.repository_owner == 'CherryHQ'
runs-on: ubuntu-latest
permissions:
actions: write # Workaround for https://github.com/actions/stale/issues/1090
issues: write
# Completely disable stalling for PRs
pull-requests: none
contents: none
steps:
- name: Close inactive issues
uses: actions/stale@v9
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
days-before-stale: ${{ env.daysBeforeStale }}
days-before-close: ${{ env.daysBeforeClose }}
stale-issue-label: "inactive"
stale-issue-message: |
This issue has been inactive for a prolonged period and will be closed automatically in ${{ env.daysBeforeClose }} days.
该问题已长时间处于闲置状态,${{ env.daysBeforeClose }} 天后将自动关闭。
exempt-issue-labels: "pending, Dev Team, enhancement"
days-before-pr-stale: -1 # Completely disable stalling for PRs
days-before-pr-close: -1 # Completely disable closing for PRs
# Temporary to reduce the huge issues number
operations-per-run: 100
debug-only: false

View File

@ -95,9 +95,6 @@ jobs:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
RENDERER_VITE_AIHUBMIX_SECRET: ${{ vars.RENDERER_VITE_AIHUBMIX_SECRET }}
- name: Replace spaces in filenames
run: node scripts/replace-spaces.js
- name: Release
uses: ncipollo/release-action@v1
with:

File diff suppressed because one or more lines are too long

View File

@ -13,7 +13,7 @@
Cherry Studio is a desktop client that supports for multiple LLM providers, available on Windows, Mac and Linux.
👏 Join [Telegram Group](https://t.me/CherryStudioAI)[Discord](https://discord.gg/wez8HtpxqQ) | [QQ Group(472019156)](https://qm.qq.com/q/CbZiBWwCXu)
👏 Join [Telegram Group](https://t.me/CherryStudioAI)[Discord](https://discord.gg/wez8HtpxqQ) | [QQ Group(575014769)](https://qm.qq.com/q/lo0D4qVZKi)
❤️ Like Cherry Studio? Give it a star 🌟 or [Sponsor](docs/sponsor.md) to support the development!
@ -86,9 +86,10 @@ https://docs.cherry-ai.com
- Theme Gallery: https://cherrycss.com
- Aero Theme: https://github.com/hakadao/CherryStudio-Aero
- PaperMaterial Theme: https://github.com/rainoffallingstar/CherryStudio-PaperMaterial
- PaperMaterial Theme: https://github.com/rainoffallingstar/CherryStudio-PaperMaterial
- Claude dynamic-style: https://github.com/bjl101501/CherryStudio-Claudestyle-dynamic
- Maple Neon Theme: https://github.com/BoningtonChen/CherryStudio_themes
Welcome PR for more themes
# 🖥️ Develop

View File

@ -14,7 +14,7 @@
Cherry Studio は、複数の LLM プロバイダーをサポートするデスクトップクライアントで、Windows、Mac、Linux で利用可能です。
👏 [Telegram](https://t.me/CherryStudioAI)[Discord](https://discord.gg/wez8HtpxqQ) | [QQグループ(472019156)](https://qm.qq.com/q/CbZiBWwCXu)
👏 [Telegram](https://t.me/CherryStudioAI)[Discord](https://discord.gg/wez8HtpxqQ) | [QQグループ(575014769)](https://qm.qq.com/q/lo0D4qVZKi)
❤️ Cherry Studio をお気に入りにしましたか?小さな星をつけてください 🌟 または [スポンサー](sponsor.md) をして開発をサポートしてください!❤️
@ -85,10 +85,11 @@ https://docs.cherry-ai.com
# 🌈 テーマ
テーマギャラリー: https://cherrycss.com
Aero テーマ: https://github.com/hakadao/CherryStudio-Aero
PaperMaterial テーマ: https://github.com/rainoffallingstar/CherryStudio-PaperMaterial
Claude テーマ: https://github.com/bjl101501/CherryStudio-Claudestyle-dynamic
- テーマギャラリー: https://cherrycss.com
- Aero テーマ: https://github.com/hakadao/CherryStudio-Aero
- PaperMaterial テーマ: https://github.com/rainoffallingstar/CherryStudio-PaperMaterial
- Claude テーマ: https://github.com/bjl101501/CherryStudio-Claudestyle-dynamic
- メープルネオンテーマ: https://github.com/BoningtonChen/CherryStudio_themes
より多くのテーマのPRを歓迎します

View File

@ -14,7 +14,7 @@
Cherry Studio 是一款支持多个大语言模型LLM服务商的桌面客户端兼容 Windows、Mac 和 Linux 系统。
👏 欢迎加入 [Telegram 群组](https://t.me/CherryStudioAI)[Discord](https://discord.gg/wez8HtpxqQ) | [QQ群(472019156)](https://qm.qq.com/q/CbZiBWwCXu)
👏 欢迎加入 [Telegram 群组](https://t.me/CherryStudioAI)[Discord](https://discord.gg/wez8HtpxqQ) | [QQ群(575014769)](https://qm.qq.com/q/lo0D4qVZKi)
❤️ 喜欢 Cherry Studio? 点亮小星星 🌟 或 [赞助开发者](sponsor.md)! ❤️
@ -85,10 +85,11 @@ https://docs.cherry-ai.com
# 🌈 主题
主题库https://cherrycss.com
Aero 主题https://github.com/hakadao/CherryStudio-Aero
PaperMaterial 主题: https://github.com/rainoffallingstar/CherryStudio-PaperMaterial
仿Claude 主题: https://github.com/bjl101501/CherryStudio-Claudestyle-dynamic
- 主题库https://cherrycss.com
- Aero 主题https://github.com/hakadao/CherryStudio-Aero
- PaperMaterial 主题: https://github.com/rainoffallingstar/CherryStudio-PaperMaterial
- 仿Claude 主题: https://github.com/bjl101501/CherryStudio-Claudestyle-dynamic
- 霓虹枫叶字体主题: https://github.com/BoningtonChen/CherryStudio_themes
欢迎 PR 更多主题

View File

@ -52,13 +52,7 @@ win:
artifactName: ${productName}-${version}-${arch}-setup.${ext}
target:
- target: nsis
arch:
- x64
- arm64
- target: portable
arch:
- x64
- arm64
nsis:
artifactName: ${productName}-${version}-${arch}-setup.${ext}
shortcutName: ${productName}
@ -67,6 +61,7 @@ nsis:
allowToChangeInstallationDirectory: true
oneClick: false
include: build/nsis-installer.nsh
buildUniversalInstaller: false
portable:
artifactName: ${productName}-${version}-${arch}-portable.${ext}
mac:
@ -80,20 +75,11 @@ mac:
- NSDownloadsFolderUsageDescription: Application requests access to the user's Downloads folder.
target:
- target: dmg
arch:
- arm64
- x64
- target: zip
arch:
- arm64
- x64
linux:
artifactName: ${productName}-${version}-${arch}.${ext}
target:
- target: AppImage
arch:
- arm64
- x64
maintainer: electronjs.org
category: Utility
publish:
@ -103,6 +89,7 @@ electronDownload:
mirror: https://npmmirror.com/mirrors/electron/
afterPack: scripts/after-pack.js
afterSign: scripts/notarize.js
artifactBuildCompleted: scripts/artifact-build-completed.js
releaseInfo:
releaseNotes: |
全新图标风格

View File

@ -1,4 +1,4 @@
import react from '@vitejs/plugin-react'
import viteReact from '@vitejs/plugin-react'
import { defineConfig, externalizeDepsPlugin } from 'electron-vite'
import { resolve } from 'path'
import { visualizer } from 'rollup-plugin-visualizer'
@ -6,7 +6,7 @@ import { visualizer } from 'rollup-plugin-visualizer'
const visualizerPlugin = (type: 'renderer' | 'main') => {
return process.env[`VISUALIZER_${type.toUpperCase()}`] ? [visualizer({ open: true })] : []
}
// const viteReact = await import('@vitejs/plugin-react')
export default defineConfig({
main: {
plugins: [
@ -51,7 +51,7 @@ export default defineConfig({
},
renderer: {
plugins: [
react({
viteReact({
babel: {
plugins: [
[

View File

@ -1,6 +1,6 @@
{
"name": "CherryStudio",
"version": "1.2.4",
"version": "1.2.5",
"private": true,
"description": "A powerful AI assistant for producer.",
"main": "./out/main/index.js",
@ -23,13 +23,13 @@
"build": "npm run typecheck && electron-vite build",
"build:check": "yarn test && yarn typecheck && yarn check:i18n",
"build:unpack": "dotenv npm run build && electron-builder --dir",
"build:win": "dotenv npm run build && electron-builder --win && node scripts/after-build.js",
"build:win": "dotenv npm run build && electron-builder --win --x64 --arm64",
"build:win:x64": "dotenv npm run build && electron-builder --win --x64",
"build:win:arm64": "dotenv npm run build && electron-builder --win --arm64",
"build:mac": "dotenv electron-vite build && electron-builder --mac",
"build:mac": "dotenv electron-vite build && electron-builder --mac --arm64 --x64",
"build:mac:arm64": "dotenv electron-vite build && electron-builder --mac --arm64",
"build:mac:x64": "dotenv electron-vite build && electron-builder --mac --x64",
"build:linux": "dotenv electron-vite build && electron-builder --linux",
"build:linux": "dotenv electron-vite build && electron-builder --linux --x64 --arm64",
"build:linux:arm64": "dotenv electron-vite build && electron-builder --linux --arm64",
"build:linux:x64": "dotenv electron-vite build && electron-builder --linux --x64",
"build:npm": "node scripts/build-npm.js",
@ -64,7 +64,6 @@
"@cherrystudio/embedjs-openai": "^0.1.28",
"@electron-toolkit/utils": "^3.0.0",
"@electron/notarize": "^2.5.0",
"@google/generative-ai": "^0.24.0",
"@langchain/community": "^0.3.36",
"@mozilla/readability": "^0.6.0",
"@notionhq/client": "^2.2.15",
@ -74,6 +73,7 @@
"@xyflow/react": "^12.4.4",
"adm-zip": "^0.5.16",
"async-mutex": "^0.5.0",
"bufferutil": "^4.0.9",
"color": "^5.0.0",
"diff": "^7.0.0",
"docx": "^9.0.2",
@ -83,11 +83,10 @@
"electron-updater": "^6.3.9",
"electron-window-state": "^5.0.3",
"epub": "patch:epub@npm%3A1.3.0#~/.yarn/patches/epub-npm-1.3.0-8325494ffe.patch",
"fast-xml-parser": "^5.0.9",
"fast-xml-parser": "^5.2.0",
"fetch-socks": "^1.3.2",
"fs-extra": "^11.2.0",
"got-scraping": "^4.1.1",
"js-yaml": "^4.1.0",
"jsdom": "^26.0.0",
"markdown-it": "^14.1.0",
"node-edge-tts": "^1.2.8",
@ -98,6 +97,7 @@
"turndown-plugin-gfm": "^1.0.2",
"undici": "^7.4.0",
"webdav": "^5.8.0",
"ws": "^8.18.1",
"zipread": "^1.3.3"
},
"devDependencies": {
@ -114,7 +114,7 @@
"@emotion/is-prop-valid": "^1.3.1",
"@eslint-react/eslint-plugin": "^1.36.1",
"@eslint/js": "^9.22.0",
"@google/genai": "^0.4.0",
"@google/genai": "patch:@google/genai@npm%3A0.8.0#~/.yarn/patches/@google-genai-npm-0.8.0-450d0d9a7d.patch",
"@hello-pangea/dnd": "^16.6.0",
"@kangfenmao/keyv-storage": "^0.1.0",
"@modelcontextprotocol/sdk": "^1.9.0",
@ -135,7 +135,8 @@
"@types/react-dom": "^19.0.4",
"@types/react-infinite-scroll-component": "^5.0.0",
"@types/tinycolor2": "^1",
"@vitejs/plugin-react": "^4.2.1",
"@types/ws": "^8",
"@vitejs/plugin-react": "4.3.4",
"analytics": "^0.8.16",
"antd": "^5.22.5",
"applescript": "^1.0.0",
@ -185,6 +186,7 @@
"rehype-katex": "^7.0.1",
"rehype-mathjax": "^7.0.0",
"rehype-raw": "^7.0.0",
"rehype-sanitize": "^6.0.0",
"remark-cjk-friendly": "^1.1.0",
"remark-gfm": "^4.0.0",
"remark-math": "^6.0.0",
@ -198,7 +200,7 @@
"tokenx": "^0.4.1",
"typescript": "^5.6.2",
"uuid": "^10.0.0",
"vite": "^5.0.12"
"vite": "6.2.6"
},
"resolutions": {
"pdf-parse@npm:1.1.1": "patch:pdf-parse@npm%3A1.1.1#~/.yarn/patches/pdf-parse-npm-1.1.1-04a6109b2a.patch",

View File

@ -12,6 +12,8 @@ export enum IpcChannel {
App_SetTrayOnClose = 'app:set-tray-on-close',
App_RestartTray = 'app:restart-tray',
App_SetTheme = 'app:set-theme',
App_SetCustomCss = 'app:set-custom-css',
App_SetAutoUpdate = 'app:set-auto-update',
App_IsBinaryExist = 'app:is-binary-exist',
App_GetBinaryPath = 'app:get-binary-path',
@ -139,6 +141,7 @@ export enum IpcChannel {
// system
System_GetDeviceType = 'system:getDeviceType',
System_GetHostname = 'system:getHostname',
// events
SelectionAction = 'selection-action',

View File

@ -1,72 +0,0 @@
const fs = require('fs')
const path = require('path')
const yaml = require('js-yaml')
async function renameFilesWithSpaces() {
const distPath = path.join('dist')
const files = fs.readdirSync(distPath, { withFileTypes: true })
// Only process files in the root of dist directory, not subdirectories
files.forEach((file) => {
if (file.isFile() && file.name.includes(' ')) {
const oldPath = path.join(distPath, file.name)
const newName = file.name.replace(/ /g, '-')
const newPath = path.join(distPath, newName)
fs.renameSync(oldPath, newPath)
console.log(`Renamed: ${file.name} -> ${newName}`)
}
})
}
async function afterBuild() {
console.log('[After build] hook started...')
try {
// Read the latest.yml file
const latestYmlPath = path.join('dist', 'latest.yml')
const yamlContent = fs.readFileSync(latestYmlPath, 'utf8')
const data = yaml.load(yamlContent)
// Remove the first element from files array
if (data.files && data.files.length > 1) {
const file = data.files.shift()
// Remove Cherry Studio-1.2.3-setup.exe
fs.rmSync(path.join('dist', file.url))
fs.rmSync(path.join('dist', file.url + '.blockmap'))
// Remove Cherry Studio-1.2.3-portable.exe
fs.rmSync(path.join('dist', file.url.replace('-setup', '-portable')))
// Update path and sha512 with the new first element's data
if (data.files[0]) {
data.path = data.files[0].url
data.sha512 = data.files[0].sha512
}
}
// Write back the modified YAML with specific dump options
const newYamlContent = yaml.dump(data, {
lineWidth: -1, // Prevent line wrapping
quotingType: '"', // Use double quotes when needed
forceQuotes: false, // Only quote when necessary
noCompatMode: true, // Use new style options
styles: {
'!!str': 'plain' // Force plain style for strings
}
})
fs.writeFileSync(latestYmlPath, newYamlContent, 'utf8')
// Rename files with spaces
await renameFilesWithSpaces()
console.log('Successfully cleaned up latest.yml data')
} catch (error) {
console.error('Error processing latest.yml:', error)
throw error
}
}
afterBuild()

View File

@ -1,8 +1,10 @@
const { Arch } = require('electron-builder')
const { default: removeLocales } = require('./remove-locales')
const fs = require('fs')
const path = require('path')
exports.default = async function (context) {
await removeLocales(context)
const platform = context.packager.platform.name
const arch = context.arch

View File

@ -0,0 +1,23 @@
const fs = require('fs')
exports.default = function (buildResult) {
try {
console.log('[artifact build completed] rename artifact file...')
if (!buildResult.file.includes(' ')) {
return
}
let oldFilePath = buildResult.file
if (oldFilePath.includes('-portable') && !oldFilePath.includes('-x64') && !oldFilePath.includes('-arm64')) {
console.log('[artifact build completed] delete portable file:', oldFilePath)
fs.unlinkSync(oldFilePath)
return
}
const newfilePath = oldFilePath.replace(/ /g, '-')
fs.renameSync(oldFilePath, newfilePath)
buildResult.file = newfilePath
console.log(`[artifact build completed] rename file ${oldFilePath} to ${newfilePath} `)
} catch (error) {
console.error('Error renaming file:', error)
}
}

58
scripts/remove-locales.js Normal file
View File

@ -0,0 +1,58 @@
const fs = require('fs')
const path = require('path')
exports.default = async function (context) {
const platform = context.packager.platform.name
// 根据平台确定 locales 目录位置
let resourceDirs = []
if (platform === 'mac') {
// macOS 的语言文件位置
resourceDirs = [
path.join(context.appOutDir, 'Cherry Studio.app', 'Contents', 'Resources'),
path.join(
context.appOutDir,
'Cherry Studio.app',
'Contents',
'Frameworks',
'Electron Framework.framework',
'Resources'
)
]
} else {
// Windows 和 Linux 的语言文件位置
resourceDirs = [path.join(context.appOutDir, 'locales')]
}
// 处理每个资源目录
for (const resourceDir of resourceDirs) {
if (!fs.existsSync(resourceDir)) {
console.log(`Resource directory not found: ${resourceDir}, skipping...`)
continue
}
// 读取所有文件和目录
const items = fs.readdirSync(resourceDir)
// 遍历并删除不需要的语言文件
for (const item of items) {
if (platform === 'mac') {
// 在 macOS 上检查 .lproj 目录
if (item.endsWith('.lproj') && !item.match(/^(en|zh|ru)/)) {
const dirPath = path.join(resourceDir, item)
fs.rmSync(dirPath, { recursive: true, force: true })
console.log(`Removed locale directory: ${item} from ${resourceDir}`)
}
} else {
// 其他平台处理 .pak 文件
if (!item.match(/^(en|zh|ru)/)) {
const filePath = path.join(resourceDir, item)
fs.unlinkSync(filePath)
console.log(`Removed locale file: ${item} from ${resourceDir}`)
}
}
}
}
console.log('Locale cleanup completed!')
}

View File

@ -1,58 +0,0 @@
// replaceSpaces.js
const fs = require('fs')
const path = require('path')
const directory = 'dist'
// 处理文件名中的空格
function replaceFileNames() {
fs.readdir(directory, (err, files) => {
if (err) throw err
files.forEach((file) => {
const oldPath = path.join(directory, file)
const newPath = path.join(directory, file.replace(/ /g, '-'))
fs.stat(oldPath, (err, stats) => {
if (err) throw err
if (stats.isFile() && oldPath !== newPath) {
fs.rename(oldPath, newPath, (err) => {
if (err) throw err
console.log(`Renamed: ${oldPath} -> ${newPath}`)
})
}
})
})
})
}
function replaceYmlContent() {
fs.readdir(directory, (err, files) => {
if (err) throw err
files.forEach((file) => {
if (path.extname(file).toLowerCase() === '.yml') {
const filePath = path.join(directory, file)
fs.readFile(filePath, 'utf8', (err, data) => {
if (err) throw err
// 替换内容
const newContent = data.replace(/Cherry Studio-/g, 'Cherry-Studio-')
// 写回文件
fs.writeFile(filePath, newContent, 'utf8', (err) => {
if (err) throw err
console.log(`Updated content in: ${filePath}`)
})
})
}
})
})
}
// 执行两个操作
replaceFileNames()
replaceYmlContent()

View File

@ -61,6 +61,10 @@ if (!app.requestSingleInstanceLock()) {
ipcMain.handle(IpcChannel.System_GetDeviceType, () => {
return process.platform === 'darwin' ? 'mac' : process.platform === 'win32' ? 'windows' : 'linux'
})
ipcMain.handle(IpcChannel.System_GetHostname, () => {
return require('os').hostname()
})
})
registerProtocolClient(app)

View File

@ -0,0 +1,14 @@
interface CreateOAuthUrlArgs {
app: string;
}
declare function createOAuthUrl({ app }: CreateOAuthUrlArgs): Promise<string>;
declare function _dont_use_in_prod_createOAuthUrl({ app, }: CreateOAuthUrlArgs): Promise<string>;
interface DecryptSecretArgs {
app: string;
s: string;
}
declare function decryptSecret({ app, s }: DecryptSecretArgs): Promise<string>;
declare function _dont_use_in_prod_decryptSecret({ app, s, }: DecryptSecretArgs): Promise<string>;
export { type CreateOAuthUrlArgs, type DecryptSecretArgs, _dont_use_in_prod_createOAuthUrl, _dont_use_in_prod_decryptSecret, createOAuthUrl, decryptSecret };

View File

@ -1,8 +0,0 @@
declare function decrypt(app: string, s: string): string
interface Secret {
app: string
}
declare function createOAuthUrl(secret: Secret): string
export { type Secret, createOAuthUrl, decrypt }

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@ -102,6 +102,11 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
configManager.setTrayOnClose(isActive)
})
// auto update
ipcMain.handle(IpcChannel.App_SetAutoUpdate, (_, isActive: boolean) => {
configManager.setAutoUpdate(isActive)
})
ipcMain.handle(IpcChannel.App_RestartTray, () => TrayService.getInstance().restartTray())
ipcMain.handle(IpcChannel.Config_Set, (_, key: string, value: any) => {
@ -132,6 +137,22 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
mainWindow.setTitleBarOverlay(theme === 'dark' ? titleBarOverlayDark : titleBarOverlayLight)
})
// custom css
ipcMain.handle(IpcChannel.App_SetCustomCss, (event, css: string) => {
if (css === configManager.getCustomCss()) return
configManager.setCustomCss(css)
// Broadcast to all windows including the mini window
const senderWindowId = event.sender.id
const windows = BrowserWindow.getAllWindows()
// 向其他窗口广播主题变化
windows.forEach((win) => {
if (win.webContents.id !== senderWindowId) {
win.webContents.send('custom-css:update', css)
}
})
})
// clear cache
ipcMain.handle(IpcChannel.App_ClearCache, async () => {
const sessions = [session.defaultSession, session.fromPartition('persist:webview')]

View File

@ -1,6 +1,6 @@
import { ExtractChunkData } from '@cherrystudio/embedjs-interfaces'
import AxiosProxy from '@main/services/AxiosProxy'
import { KnowledgeBaseParams } from '@types'
import axios from 'axios'
import BaseReranker from './BaseReranker'
@ -20,7 +20,7 @@ export default class JinaReranker extends BaseReranker {
}
try {
const { data } = await axios.post(url, requestBody, { headers: this.defaultHeaders() })
const { data } = await AxiosProxy.axios.post(url, requestBody, { headers: this.defaultHeaders() })
const rerankResults = data.results
return this.getRerankResult(searchResults, rerankResults)

View File

@ -1,6 +1,6 @@
import type { ExtractChunkData } from '@cherrystudio/embedjs-interfaces'
import axiosProxy from '@main/services/AxiosProxy'
import { KnowledgeBaseParams } from '@types'
import axios from 'axios'
import BaseReranker from './BaseReranker'
@ -22,7 +22,7 @@ export default class SiliconFlowReranker extends BaseReranker {
}
try {
const { data } = await axios.post(url, requestBody, { headers: this.defaultHeaders() })
const { data } = await axiosProxy.axios.post(url, requestBody, { headers: this.defaultHeaders() })
const rerankResults = data.results
return this.getRerankResult(searchResults, rerankResults)

View File

@ -1,6 +1,6 @@
import { ExtractChunkData } from '@cherrystudio/embedjs-interfaces'
import axiosProxy from '@main/services/AxiosProxy'
import { KnowledgeBaseParams } from '@types'
import axios from 'axios'
import BaseReranker from './BaseReranker'
@ -22,7 +22,7 @@ export default class VoyageReranker extends BaseReranker {
}
try {
const { data } = await axios.post(url, requestBody, {
const { data } = await axiosProxy.axios.post(url, requestBody, {
headers: {
...this.defaultHeaders()
}

View File

@ -5,6 +5,7 @@ import logger from 'electron-log'
import { AppUpdater as _AppUpdater, autoUpdater } from 'electron-updater'
import icon from '../../../build/icon.png?asset'
import { configManager } from './ConfigManager'
export default class AppUpdater {
autoUpdater: _AppUpdater = autoUpdater
@ -15,7 +16,8 @@ export default class AppUpdater {
autoUpdater.logger = logger
autoUpdater.forceDevUpdateConfig = !app.isPackaged
autoUpdater.autoDownload = true
autoUpdater.autoDownload = configManager.getAutoUpdate()
autoUpdater.autoInstallOnAppQuit = configManager.getAutoUpdate()
// 检测下载错误
autoUpdater.on('error', (error) => {

View File

@ -0,0 +1,27 @@
import { AxiosInstance, default as axios_ } from 'axios'
import { proxyManager } from './ProxyManager'
class AxiosProxy {
private cacheAxios: AxiosInstance | undefined
private proxyURL: string | undefined
get axios(): AxiosInstance {
const currentProxyURL = proxyManager.getProxyUrl()
if (this.proxyURL !== currentProxyURL) {
this.proxyURL = currentProxyURL
const agent = proxyManager.getProxyAgent()
this.cacheAxios = axios_.create({
proxy: false,
...(agent && { httpAgent: agent, httpsAgent: agent })
})
}
if (this.cacheAxios === undefined) {
this.cacheAxios = axios_.create({ proxy: false })
}
return this.cacheAxios
}
}
export default new AxiosProxy()

View File

@ -14,7 +14,8 @@ enum ConfigKeys {
ZoomFactor = 'ZoomFactor',
Shortcuts = 'shortcuts',
ClickTrayToShowQuickAssistant = 'clickTrayToShowQuickAssistant',
EnableQuickAssistant = 'enableQuickAssistant'
EnableQuickAssistant = 'enableQuickAssistant',
AutoUpdate = 'autoUpdate'
}
export class ConfigManager {
@ -42,6 +43,14 @@ export class ConfigManager {
this.set(ConfigKeys.Theme, theme)
}
getCustomCss(): string {
return this.store.get('customCss', '') as string
}
setCustomCss(css: string) {
this.store.set('customCss', css)
}
getLaunchToTray(): boolean {
return !!this.get(ConfigKeys.LaunchToTray, false)
}
@ -128,6 +137,14 @@ export class ConfigManager {
this.set(ConfigKeys.EnableQuickAssistant, value)
}
getAutoUpdate(): boolean {
return this.get<boolean>(ConfigKeys.AutoUpdate, true)
}
setAutoUpdate(value: boolean) {
this.set(ConfigKeys.AutoUpdate, value)
}
set(key: string, value: unknown) {
this.store.set(key, value)
}

View File

@ -1,8 +1,10 @@
import axios, { AxiosRequestConfig } from 'axios'
import { AxiosRequestConfig } from 'axios'
import { app, safeStorage } from 'electron'
import fs from 'fs/promises'
import path from 'path'
import aoxisProxy from './AxiosProxy'
// 配置常量,集中管理
const CONFIG = {
GITHUB_CLIENT_ID: 'Iv1.b507a08c87ecfe98',
@ -93,7 +95,7 @@ class CopilotService {
}
}
const response = await axios.get(CONFIG.API_URLS.GITHUB_USER, config)
const response = await aoxisProxy.axios.get(CONFIG.API_URLS.GITHUB_USER, config)
return {
login: response.data.login,
avatar: response.data.avatar_url
@ -114,7 +116,7 @@ class CopilotService {
try {
this.updateHeaders(headers)
const response = await axios.post<AuthResponse>(
const response = await aoxisProxy.axios.post<AuthResponse>(
CONFIG.API_URLS.GITHUB_DEVICE_CODE,
{
client_id: CONFIG.GITHUB_CLIENT_ID,
@ -146,7 +148,7 @@ class CopilotService {
await this.delay(currentDelay)
try {
const response = await axios.post<TokenResponse>(
const response = await aoxisProxy.axios.post<TokenResponse>(
CONFIG.API_URLS.GITHUB_ACCESS_TOKEN,
{
client_id: CONFIG.GITHUB_CLIENT_ID,
@ -208,7 +210,7 @@ class CopilotService {
}
}
const response = await axios.get<CopilotTokenResponse>(CONFIG.API_URLS.COPILOT_TOKEN, config)
const response = await aoxisProxy.axios.get<CopilotTokenResponse>(CONFIG.API_URLS.COPILOT_TOKEN, config)
return response.data
} catch (error) {

View File

@ -1,4 +1,4 @@
import { FileMetadataResponse, FileState, GoogleAIFileManager } from '@google/generative-ai/server'
import { File, FileState, GoogleGenAI, Pager } from '@google/genai'
import { FileType } from '@types'
import fs from 'fs'
@ -8,11 +8,15 @@ export class GeminiService {
private static readonly FILE_LIST_CACHE_KEY = 'gemini_file_list'
private static readonly CACHE_DURATION = 3000
static async uploadFile(_: Electron.IpcMainInvokeEvent, file: FileType, apiKey: string) {
const fileManager = new GoogleAIFileManager(apiKey)
const uploadResult = await fileManager.uploadFile(file.path, {
mimeType: 'application/pdf',
displayName: file.origin_name
static async uploadFile(_: Electron.IpcMainInvokeEvent, file: FileType, apiKey: string): Promise<File> {
const sdk = new GoogleGenAI({ vertexai: false, apiKey })
const uploadResult = await sdk.files.upload({
file: file.path,
config: {
mimeType: 'application/pdf',
name: file.id,
displayName: file.origin_name
}
})
return uploadResult
}
@ -24,40 +28,42 @@ export class GeminiService {
}
}
static async retrieveFile(
_: Electron.IpcMainInvokeEvent,
file: FileType,
apiKey: string
): Promise<FileMetadataResponse | undefined> {
const fileManager = new GoogleAIFileManager(apiKey)
static async retrieveFile(_: Electron.IpcMainInvokeEvent, file: FileType, apiKey: string): Promise<File | undefined> {
const sdk = new GoogleGenAI({ vertexai: false, apiKey })
const cachedResponse = CacheService.get<any>(GeminiService.FILE_LIST_CACHE_KEY)
if (cachedResponse) {
return GeminiService.processResponse(cachedResponse, file)
}
const response = await fileManager.listFiles()
const response = await sdk.files.list()
CacheService.set(GeminiService.FILE_LIST_CACHE_KEY, response, GeminiService.CACHE_DURATION)
return GeminiService.processResponse(response, file)
}
private static processResponse(response: any, file: FileType) {
if (response.files) {
return response.files
.filter((file) => file.state === FileState.ACTIVE)
.find((i) => i.displayName === file.origin_name && Number(i.sizeBytes) === file.size)
private static async processResponse(response: Pager<File>, file: FileType) {
for await (const f of response) {
if (f.state === FileState.ACTIVE) {
if (f.displayName === file.origin_name && Number(f.sizeBytes) === file.size) {
return f
}
}
}
return undefined
}
static async listFiles(_: Electron.IpcMainInvokeEvent, apiKey: string) {
const fileManager = new GoogleAIFileManager(apiKey)
return await fileManager.listFiles()
static async listFiles(_: Electron.IpcMainInvokeEvent, apiKey: string): Promise<File[]> {
const sdk = new GoogleGenAI({ vertexai: false, apiKey })
const files: File[] = []
for await (const f of await sdk.files.list()) {
files.push(f)
}
return files
}
static async deleteFile(_: Electron.IpcMainInvokeEvent, apiKey: string, fileId: string) {
const fileManager = new GoogleAIFileManager(apiKey)
await fileManager.deleteFile(fileId)
static async deleteFile(_: Electron.IpcMainInvokeEvent, fileId: string, apiKey: string) {
const sdk = new GoogleGenAI({ vertexai: false, apiKey })
await sdk.files.delete({ name: fileId })
}
}

View File

@ -1,3 +1,4 @@
import crypto from 'node:crypto'
import fs from 'node:fs'
import os from 'node:os'
import path from 'node:path'
@ -22,9 +23,12 @@ import {
} from '@types'
import { app } from 'electron'
import Logger from 'electron-log'
import { EventEmitter } from 'events'
import { memoize } from 'lodash'
import { CacheService } from './CacheService'
import { CallBackServer } from './mcp/oauth/callback'
import { McpOAuthClientProvider } from './mcp/oauth/provider'
import { StreamableHTTPClientTransport, type StreamableHTTPClientTransportOptions } from './MCPStreamableHttpClient'
// Generic type for caching wrapped functions
@ -117,9 +121,17 @@ class McpService {
const args = [...(server.args || [])]
let transport: StdioClientTransport | SSEClientTransport | InMemoryTransport | StreamableHTTPClientTransport
// let transport: StdioClientTransport | SSEClientTransport | InMemoryTransport | StreamableHTTPClientTransport
const authProvider = new McpOAuthClientProvider({
serverUrlHash: crypto
.createHash('md5')
.update(server.baseUrl || '')
.digest('hex')
})
try {
const initTransport = async (): Promise<
StdioClientTransport | SSEClientTransport | InMemoryTransport | StreamableHTTPClientTransport
> => {
// Create appropriate transport based on configuration
if (server.type === 'inMemory') {
Logger.info(`[MCP] Using in-memory transport for server: ${server.name}`)
@ -134,29 +146,31 @@ class McpService {
throw new Error(`Failed to start in-memory server: ${error.message}`)
}
// set the client transport to the client
transport = clientTransport
return clientTransport
} else if (server.baseUrl) {
if (server.type === 'streamableHttp') {
const options: StreamableHTTPClientTransportOptions = {
requestInit: {
headers: server.headers || {}
}
},
authProvider
}
transport = new StreamableHTTPClientTransport(new URL(server.baseUrl!), options)
return new StreamableHTTPClientTransport(new URL(server.baseUrl!), options)
} else if (server.type === 'sse') {
const options: SSEClientTransportOptions = {
requestInit: {
headers: server.headers || {}
}
},
authProvider
}
transport = new SSEClientTransport(new URL(server.baseUrl!), options)
return new SSEClientTransport(new URL(server.baseUrl!), options)
} else {
throw new Error('Invalid server type')
}
} else if (server.command) {
let cmd = server.command
if (server.command === 'npx' || server.command === 'bun' || server.command === 'bunx') {
if (server.command === 'npx') {
cmd = await getBinaryPath('bun')
Logger.info(`[MCP] Using command: ${cmd}`)
@ -196,7 +210,7 @@ class McpService {
Logger.info(`[MCP] Starting server with command: ${cmd} ${args ? args.join(' ') : ''}`)
// Logger.info(`[MCP] Environment variables for server:`, server.env)
transport = new StdioClientTransport({
const stdioTransport = new StdioClientTransport({
command: cmd,
args,
env: {
@ -206,14 +220,72 @@ class McpService {
},
stderr: 'pipe'
})
transport.stderr?.on('data', (data) =>
stdioTransport.stderr?.on('data', (data) =>
Logger.info(`[MCP] Stdio stderr for server: ${server.name} `, data.toString())
)
return stdioTransport
} else {
throw new Error('Either baseUrl or command must be provided')
}
}
await client.connect(transport)
const handleAuth = async (client: Client, transport: SSEClientTransport | StreamableHTTPClientTransport) => {
Logger.info(`[MCP] Starting OAuth flow for server: ${server.name}`)
// Create an event emitter for the OAuth callback
const events = new EventEmitter()
// Create a callback server
const callbackServer = new CallBackServer({
port: authProvider.config.callbackPort,
path: authProvider.config.callbackPath || '/oauth/callback',
events
})
// Set a timeout to close the callback server
const timeoutId = setTimeout(() => {
Logger.warn(`[MCP] OAuth flow timed out for server: ${server.name}`)
callbackServer.close()
}, 300000) // 5 minutes timeout
try {
// Wait for the authorization code
const authCode = await callbackServer.waitForAuthCode()
Logger.info(`[MCP] Received auth code: ${authCode}`)
// Complete the OAuth flow
await transport.finishAuth(authCode)
Logger.info(`[MCP] OAuth flow completed for server: ${server.name}`)
const newTransport = await initTransport()
// Try to connect again
await client.connect(newTransport)
Logger.info(`[MCP] Successfully authenticated with server: ${server.name}`)
} catch (oauthError) {
Logger.error(`[MCP] OAuth authentication failed for server ${server.name}:`, oauthError)
throw new Error(
`OAuth authentication failed: ${oauthError instanceof Error ? oauthError.message : String(oauthError)}`
)
} finally {
// Clear the timeout and close the callback server
clearTimeout(timeoutId)
callbackServer.close()
}
}
try {
const transport = await initTransport()
try {
await client.connect(transport)
} catch (error: Error | any) {
if (error instanceof Error && (error.name === 'UnauthorizedError' || error.message.includes('Unauthorized'))) {
Logger.info(`[MCP] Authentication required for server: ${server.name}`)
await handleAuth(client, transport as SSEClientTransport | StreamableHTTPClientTransport)
} else {
throw error
}
}
// Store the new client in the cache
this.clients.set(serverKey, client)
@ -514,15 +586,12 @@ class McpService {
// 根据不同的 shell 构建不同的命令
if (userShell.includes('zsh')) {
shell = '/bin/zsh'
command =
'source /etc/zshenv 2>/dev/null || true; source ~/.zshenv 2>/dev/null || true; source /etc/zprofile 2>/dev/null || true; source ~/.zprofile 2>/dev/null || true; source /etc/zshrc 2>/dev/null || true; source ~/.zshrc 2>/dev/null || true; source /etc/zlogin 2>/dev/null || true; source ~/.zlogin 2>/dev/null || true; echo $PATH'
} else if (userShell.includes('bash')) {
shell = '/bin/bash'
command =
'source /etc/profile 2>/dev/null || true; source ~/.bash_profile 2>/dev/null || true; source ~/.bash_login 2>/dev/null || true; source ~/.profile 2>/dev/null || true; source ~/.bashrc 2>/dev/null || true; echo $PATH'
} else if (userShell.includes('fish')) {
shell = '/bin/fish'
command =
'source /etc/fish/config.fish 2>/dev/null || true; source ~/.config/fish/config.fish 2>/dev/null || true; source ~/.config/fish/config.local.fish 2>/dev/null || true; echo $PATH'
} else {
@ -540,15 +609,15 @@ class McpService {
})
let path = ''
child.stdout.on('data', (data) => {
child.stdout.on('data', (data: Buffer) => {
path += data.toString()
})
child.stderr.on('data', (data) => {
child.stderr.on('data', (data: Buffer) => {
console.error('Error getting PATH:', data.toString())
})
child.on('close', (code) => {
child.on('close', (code: number) => {
if (code === 0) {
const trimmedPath = path.trim()
resolve(trimmedPath)

View File

@ -5,6 +5,8 @@ import { XMLParser } from 'fast-xml-parser'
import { isNil, partial } from 'lodash'
import { type FileStat } from 'webdav'
import { createOAuthUrl, decryptSecret } from '../integration/nutstore/sso/lib/index.mjs'
interface OAuthResponse {
username: string
userid: string
@ -30,18 +32,18 @@ interface WebDAVResponse {
}
export async function getNutstoreSSOUrl() {
const { createOAuthUrl } = await import('../integration/nutstore/sso/lib')
const url = createOAuthUrl({
const url = await createOAuthUrl({
app: 'cherrystudio'
})
return url
}
export async function decryptToken(token: string) {
const { decrypt } = await import('../integration/nutstore/sso/lib')
try {
const decrypted = decrypt('cherrystudio', token)
const decrypted = await decryptSecret({
app: 'cherrystudio',
s: token
})
return JSON.parse(decrypted) as OAuthResponse
} catch (error) {
console.error('解密失败:', error)

View File

@ -243,6 +243,7 @@ export class WindowService {
private loadMainWindowContent(mainWindow: BrowserWindow) {
if (is.dev && process.env['ELECTRON_RENDERER_URL']) {
mainWindow.loadURL(process.env['ELECTRON_RENDERER_URL'])
// mainWindow.webContents.openDevTools()
} else {
mainWindow.loadFile(join(__dirname, '../renderer/index.html'))
}

View File

@ -0,0 +1,76 @@
import Logger from 'electron-log'
import EventEmitter from 'events'
import http from 'http'
import { URL } from 'url'
import { OAuthCallbackServerOptions } from './types'
export class CallBackServer {
private server: Promise<http.Server>
private events: EventEmitter
constructor(options: OAuthCallbackServerOptions) {
const { port, path, events } = options
this.events = events
this.server = this.initialize(port, path)
}
initialize(port: number, path: string): Promise<http.Server> {
const server = http.createServer((req, res) => {
// Only handle requests to the callback path
if (req.url?.startsWith(path)) {
try {
// Parse the URL to extract the authorization code
const url = new URL(req.url, `http://localhost:${port}`)
const code = url.searchParams.get('code')
if (code) {
// Emit the code event
this.events.emit('auth-code-received', code)
}
} catch (error) {
Logger.error('Error processing OAuth callback:', error)
res.writeHead(500, { 'Content-Type': 'text/plain' })
res.end('Internal Server Error')
}
} else {
// Not a callback request
res.writeHead(404, { 'Content-Type': 'text/plain' })
res.end('Not Found')
}
})
// Handle server errors
server.on('error', (error) => {
Logger.error('OAuth callback server error:', error)
})
const runningServer = new Promise<http.Server>((resolve, reject) => {
server.listen(port, () => {
Logger.info(`OAuth callback server listening on port ${port}`)
resolve(server)
})
server.on('error', (error) => {
reject(error)
})
})
return runningServer
}
get getServer(): Promise<http.Server> {
return this.server
}
async close() {
const server = await this.server
server.close()
}
async waitForAuthCode(): Promise<string> {
return new Promise((resolve) => {
this.events.once('auth-code-received', (code) => {
resolve(code)
})
})
}
}

View File

@ -0,0 +1,78 @@
import path from 'node:path'
import { getConfigDir } from '@main/utils/file'
import { OAuthClientProvider } from '@modelcontextprotocol/sdk/client/auth'
import { OAuthClientInformation, OAuthClientInformationFull, OAuthTokens } from '@modelcontextprotocol/sdk/shared/auth'
import Logger from 'electron-log'
import open from 'open'
import { JsonFileStorage } from './storage'
import { OAuthProviderOptions } from './types'
export class McpOAuthClientProvider implements OAuthClientProvider {
private storage: JsonFileStorage
public readonly config: Required<OAuthProviderOptions>
constructor(options: OAuthProviderOptions) {
const configDir = path.join(getConfigDir(), 'mcp', 'oauth')
this.config = {
serverUrlHash: options.serverUrlHash,
callbackPort: options.callbackPort || 12346,
callbackPath: options.callbackPath || '/oauth/callback',
configDir: options.configDir || configDir,
clientName: options.clientName || 'Cherry Studio',
clientUri: options.clientUri || 'https://github.com/CherryHQ/cherry-studio'
}
this.storage = new JsonFileStorage(this.config.serverUrlHash, this.config.configDir)
}
get redirectUrl(): string {
return `http://localhost:${this.config.callbackPort}${this.config.callbackPath}`
}
get clientMetadata() {
return {
redirect_uris: [this.redirectUrl],
token_endpoint_auth_method: 'none',
grant_types: ['authorization_code', 'refresh_token'],
response_types: ['code'],
client_name: this.config.clientName,
client_uri: this.config.clientUri
}
}
async clientInformation(): Promise<OAuthClientInformation | undefined> {
return this.storage.getClientInformation()
}
async saveClientInformation(info: OAuthClientInformationFull): Promise<void> {
await this.storage.saveClientInformation(info)
}
async tokens(): Promise<OAuthTokens | undefined> {
return this.storage.getTokens()
}
async saveTokens(tokens: OAuthTokens): Promise<void> {
await this.storage.saveTokens(tokens)
}
async redirectToAuthorization(authorizationUrl: URL): Promise<void> {
try {
// Open the browser to the authorization URL
await open(authorizationUrl.toString())
Logger.info('Browser opened automatically.')
} catch (error) {
Logger.error('Could not open browser automatically.')
throw error // Let caller handle the error
}
}
async saveCodeVerifier(codeVerifier: string): Promise<void> {
await this.storage.saveCodeVerifier(codeVerifier)
}
async codeVerifier(): Promise<string> {
return this.storage.getCodeVerifier()
}
}

View File

@ -0,0 +1,120 @@
import {
OAuthClientInformation,
OAuthClientInformationFull,
OAuthTokens
} from '@modelcontextprotocol/sdk/shared/auth.js'
import Logger from 'electron-log'
import fs from 'fs/promises'
import path from 'path'
import { IOAuthStorage, OAuthStorageData, OAuthStorageSchema } from './types'
export class JsonFileStorage implements IOAuthStorage {
private readonly filePath: string
private cache: OAuthStorageData | null = null
constructor(
readonly serverUrlHash: string,
configDir: string
) {
this.filePath = path.join(configDir, `${serverUrlHash}_oauth.json`)
}
private async readStorage(): Promise<OAuthStorageData> {
if (this.cache) {
return this.cache
}
try {
const data = await fs.readFile(this.filePath, 'utf-8')
const parsed = JSON.parse(data)
const validated = OAuthStorageSchema.parse(parsed)
this.cache = validated
return validated
} catch (error) {
if (error instanceof Error && 'code' in error && error.code === 'ENOENT') {
// File doesn't exist, return initial state
const initial: OAuthStorageData = { lastUpdated: Date.now() }
await this.writeStorage(initial)
return initial
}
Logger.error('Error reading OAuth storage:', error)
throw new Error(`Failed to read OAuth storage: ${error instanceof Error ? error.message : String(error)}`)
}
}
private async writeStorage(data: OAuthStorageData): Promise<void> {
try {
// Ensure directory exists
await fs.mkdir(path.dirname(this.filePath), { recursive: true })
// Update timestamp
data.lastUpdated = Date.now()
// Write file atomically
const tempPath = `${this.filePath}.tmp`
await fs.writeFile(tempPath, JSON.stringify(data, null, 2))
await fs.rename(tempPath, this.filePath)
// Update cache
this.cache = data
} catch (error) {
Logger.error('Error writing OAuth storage:', error)
throw new Error(`Failed to write OAuth storage: ${error instanceof Error ? error.message : String(error)}`)
}
}
async getClientInformation(): Promise<OAuthClientInformation | undefined> {
const data = await this.readStorage()
return data.clientInfo
}
async saveClientInformation(info: OAuthClientInformationFull): Promise<void> {
const data = await this.readStorage()
await this.writeStorage({
...data,
clientInfo: info
})
}
async getTokens(): Promise<OAuthTokens | undefined> {
const data = await this.readStorage()
return data.tokens
}
async saveTokens(tokens: OAuthTokens): Promise<void> {
const data = await this.readStorage()
await this.writeStorage({
...data,
tokens
})
}
async getCodeVerifier(): Promise<string> {
const data = await this.readStorage()
if (!data.codeVerifier) {
throw new Error('No code verifier saved for session')
}
return data.codeVerifier
}
async saveCodeVerifier(codeVerifier: string): Promise<void> {
const data = await this.readStorage()
await this.writeStorage({
...data,
codeVerifier
})
}
async clear(): Promise<void> {
try {
await fs.unlink(this.filePath)
this.cache = null
} catch (error) {
if (error instanceof Error && 'code' in error && error.code !== 'ENOENT') {
Logger.error('Error clearing OAuth storage:', error)
throw new Error(`Failed to clear OAuth storage: ${error instanceof Error ? error.message : String(error)}`)
}
}
}
}

View File

@ -0,0 +1,61 @@
import {
OAuthClientInformation,
OAuthClientInformationFull,
OAuthTokens
} from '@modelcontextprotocol/sdk/shared/auth.js'
import EventEmitter from 'events'
import { z } from 'zod'
export interface OAuthStorageData {
clientInfo?: OAuthClientInformation
tokens?: OAuthTokens
codeVerifier?: string
lastUpdated: number
}
export const OAuthStorageSchema = z.object({
clientInfo: z.any().optional(),
tokens: z.any().optional(),
codeVerifier: z.string().optional(),
lastUpdated: z.number()
})
export interface IOAuthStorage {
getClientInformation(): Promise<OAuthClientInformation | undefined>
saveClientInformation(info: OAuthClientInformationFull): Promise<void>
getTokens(): Promise<OAuthTokens | undefined>
saveTokens(tokens: OAuthTokens): Promise<void>
getCodeVerifier(): Promise<string>
saveCodeVerifier(codeVerifier: string): Promise<void>
clear(): Promise<void>
}
/**
* OAuth callback server setup options
*/
export interface OAuthCallbackServerOptions {
/** Port for the callback server */
port: number
/** Path for the callback endpoint */
path: string
/** Event emitter to signal when auth code is received */
events: EventEmitter
}
/**
* Options for creating an OAuth client provider
*/
export interface OAuthProviderOptions {
/** Server URL to connect to */
serverUrlHash: string
/** Port for the OAuth callback server */
callbackPort?: number
/** Path for the OAuth callback endpoint */
callbackPath?: string
/** Directory to store OAuth credentials */
configDir?: string
/** Client name to use for OAuth registration */
clientName?: string
/** Client URI to use for OAuth registration */
clientUri?: string
}

View File

@ -79,3 +79,7 @@ export function getFilesDir() {
export function getConfigDir() {
return path.join(os.homedir(), '.cherrystudio', 'config')
}
export function getAppConfigDir(name: string) {
return path.join(getConfigDir(), name)
}

View File

@ -1,6 +1,6 @@
import { ExtractChunkData } from '@cherrystudio/embedjs-interfaces'
import { ElectronAPI } from '@electron-toolkit/preload'
import type { FileMetadataResponse, ListFilesResponse, UploadFileResponse } from '@google/generative-ai/server'
import type { File } from '@google/genai'
import type { GetMCPPromptResponse, MCPPrompt, MCPResource, MCPServer, MCPTool } from '@renderer/types'
import { AppInfo, FileType, KnowledgeBaseParams, KnowledgeItem, LanguageVarious, WebDavConfig } from '@renderer/types'
import type { LoaderReturn } from '@shared/config/types'
@ -29,10 +29,13 @@ declare global {
setTrayOnClose: (isActive: boolean) => void
restartTray: () => void
setTheme: (theme: 'light' | 'dark') => void
setCustomCss: (css: string) => void
setAutoUpdate: (isActive: boolean) => void
reload: () => void
clearCache: () => Promise<{ success: boolean; error?: string }>
system: {
getDeviceType: () => Promise<'mac' | 'windows' | 'linux'>
getHostname: () => Promise<string>
}
zip: {
compress: (text: string) => Promise<Buffer>
@ -119,11 +122,11 @@ declare global {
resetMinimumSize: () => Promise<void>
}
gemini: {
uploadFile: (file: FileType, apiKey: string) => Promise<UploadFileResponse>
retrieveFile: (file: FileType, apiKey: string) => Promise<FileMetadataResponse | undefined>
uploadFile: (file: FileType, apiKey: string) => Promise<File>
retrieveFile: (file: FileType, apiKey: string) => Promise<File | undefined>
base64File: (file: FileType) => Promise<{ data: string; mimeType: string }>
listFiles: (apiKey: string) => Promise<ListFilesResponse>
deleteFile: (apiKey: string, fileId: string) => Promise<void>
listFiles: (apiKey: string) => Promise<File[]>
deleteFile: (fileId: string, apiKey: string) => Promise<void>
}
selectionMenu: {
action: (action: string) => Promise<void>

View File

@ -19,10 +19,13 @@ const api = {
setTrayOnClose: (isActive: boolean) => ipcRenderer.invoke(IpcChannel.App_SetTrayOnClose, isActive),
restartTray: () => ipcRenderer.invoke(IpcChannel.App_RestartTray),
setTheme: (theme: 'light' | 'dark') => ipcRenderer.invoke(IpcChannel.App_SetTheme, theme),
setCustomCss: (css: string) => ipcRenderer.invoke(IpcChannel.App_SetCustomCss, css),
setAutoUpdate: (isActive: boolean) => ipcRenderer.invoke(IpcChannel.App_SetAutoUpdate, isActive),
openWebsite: (url: string) => ipcRenderer.invoke(IpcChannel.Open_Website, url),
clearCache: () => ipcRenderer.invoke(IpcChannel.App_ClearCache),
system: {
getDeviceType: () => ipcRenderer.invoke(IpcChannel.System_GetDeviceType)
getDeviceType: () => ipcRenderer.invoke(IpcChannel.System_GetDeviceType),
getHostname: () => ipcRenderer.invoke(IpcChannel.System_GetHostname)
},
zip: {
compress: (text: string) => ipcRenderer.invoke(IpcChannel.Zip_Compress, text),

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

View File

@ -260,6 +260,7 @@ body,
.markdown,
.anticon,
.iconfont,
.lucide,
.message-tokens {
color: var(--chat-text-user) !important;
}

View File

@ -331,7 +331,7 @@ const MinappPopupContainer: React.FC = () => {
height={'100%'}
maskClosable={false}
closeIcon={null}
style={{ marginLeft: 'var(--sidebar-width)' }}>
style={{ marginLeft: 'var(--sidebar-width)', backgroundColor: 'var(--color-background)' }}>
{!isReady && (
<EmptyView>
<Avatar

View File

@ -75,7 +75,7 @@ const WebviewContainer = memo(
const WebviewStyle: React.CSSProperties = {
width: 'calc(100vw - var(--sidebar-width))',
height: 'calc(100vh - var(--navbar-height))',
backgroundColor: 'white',
backgroundColor: 'var(--color-background)',
display: 'inline-flex'
}

View File

@ -131,6 +131,8 @@ const ObsidianExportDialog: React.FC<ObsidianExportDialogProps> = ({
folder: ''
})
// 是否手动编辑过标题
const [hasTitleBeenManuallyEdited, setHasTitleBeenManuallyEdited] = useState(false)
const [vaults, setVaults] = useState<Array<{ path: string; name: string }>>([])
const [files, setFiles] = useState<FileInfo[]>([])
const [fileTreeData, setFileTreeData] = useState<any[]>([])
@ -255,6 +257,12 @@ const ObsidianExportDialog: React.FC<ObsidianExportDialogProps> = ({
setState((prevState) => ({ ...prevState, [key]: value }))
}
// 处理title输入变化
const handleTitleInputChange = (newTitle: string) => {
handleChange('title', newTitle)
setHasTitleBeenManuallyEdited(true)
}
const handleVaultChange = (value: string) => {
setSelectedVault(value)
// 文件夹会通过useEffect自动获取
@ -278,11 +286,17 @@ const ObsidianExportDialog: React.FC<ObsidianExportDialogProps> = ({
const fileName = selectedFile.name
const titleWithoutExt = fileName.endsWith('.md') ? fileName.substring(0, fileName.length - 3) : fileName
handleChange('title', titleWithoutExt)
// 重置手动编辑标记因为这是非用户设置的title
setHasTitleBeenManuallyEdited(false)
handleChange('processingMethod', '1')
} else {
// 如果是文件夹自动设置标题为话题名并设置处理方式为3(新建)
handleChange('processingMethod', '3')
handleChange('title', title)
// 仅当用户未手动编辑过 title 时,才将其重置为 props.title
if (!hasTitleBeenManuallyEdited) {
// title 是 props.title
handleChange('title', title)
}
}
}
}
@ -309,7 +323,7 @@ const ObsidianExportDialog: React.FC<ObsidianExportDialogProps> = ({
<Form.Item label={i18n.t('chat.topics.export.obsidian_title')}>
<Input
value={state.title}
onChange={(e) => handleChange('title', e.target.value)}
onChange={(e) => handleTitleInputChange(e.target.value)}
placeholder={i18n.t('chat.topics.export.obsidian_title_placeholder')}
/>
</Form.Item>

View File

@ -82,14 +82,19 @@ export const QuickPanelView: React.FC<Props> = ({ setInputText }) => {
return true
}
const pattern = lowerSearchText.split('').join('.*')
if (tinyPinyin.isSupported() && /[\u4e00-\u9fa5]/.test(filterText)) {
const pinyinText = tinyPinyin.convertToPinyin(filterText, '', true)
if (pinyinText.toLowerCase().includes(lowerSearchText)) {
try {
const pinyinText = tinyPinyin.convertToPinyin(filterText, '', true).toLowerCase()
const regex = new RegExp(pattern, 'ig')
return regex.test(pinyinText)
} catch (error) {
return true
}
} else {
const regex = new RegExp(pattern, 'ig')
return regex.test(filterText.toLowerCase())
}
return false
})
setIndex(newList.length > 0 ? ctx.defaultIndex || 0 : -1)
@ -206,6 +211,8 @@ export const QuickPanelView: React.FC<Props> = ({ setInputText }) => {
const textArea = document.querySelector('.inputbar textarea') as HTMLTextAreaElement
const handleInput = (e: Event) => {
if (isComposing.current) return
const target = e.target as HTMLTextAreaElement
const cursorPosition = target.selectionStart
const textBeforeCursor = target.value.slice(0, cursorPosition)
@ -225,8 +232,9 @@ export const QuickPanelView: React.FC<Props> = ({ setInputText }) => {
isComposing.current = true
}
const handleCompositionEnd = () => {
const handleCompositionEnd = (e: CompositionEvent) => {
isComposing.current = false
handleInput(e)
}
textArea.addEventListener('input', handleInput)

View File

@ -42,8 +42,9 @@ export function useWebdavBackupModal({ backupMethod }: { backupMethod?: typeof b
const showBackupModal = useCallback(async () => {
// 获取默认文件名
const deviceType = await window.api.system.getDeviceType()
const hostname = await window.api.system.getHostname()
const timestamp = dayjs().format('YYYYMMDDHHmmss')
const defaultFileName = `cherry-studio.${timestamp}.${deviceType}.zip`
const defaultFileName = `cherry-studio.${timestamp}.${hostname}.${deviceType}.zip`
setCustomFileName(defaultFileName)
setIsModalVisible(true)
}, [])

View File

@ -41,6 +41,7 @@ import XiaoYiAppLogo from '@renderer/assets/images/apps/xiaoyi.webp?url'
import YouLogo from '@renderer/assets/images/apps/you.jpg?url'
import TencentYuanbaoAppLogo from '@renderer/assets/images/apps/yuanbao.webp?url'
import YuewenAppLogo from '@renderer/assets/images/apps/yuewen.png?url'
import ZaiAppLogo from '@renderer/assets/images/apps/zai.png?url'
import ZhihuAppLogo from '@renderer/assets/images/apps/zhihu.png?url'
import ClaudeAppLogo from '@renderer/assets/images/models/claude.png?url'
import HailuoModelLogo from '@renderer/assets/images/models/hailuo.png?url'
@ -392,5 +393,15 @@ export const DEFAULT_MIN_APPS: MinAppType[] = [
logo: DangbeiLogo,
url: 'https://ai.dangbei.com/',
bodered: true
},
{
id: `zai`,
name: `Z.ai`,
logo: ZaiAppLogo,
url: `https://chat.z.ai/`,
bodered: true,
style: {
padding: 10
}
}
]

View File

@ -158,10 +158,13 @@ const visionAllowedModels = [
'grok-vision-beta',
'pixtral',
'gpt-4(?:-[\\w-]+)',
'gpt-4.1(?:-[\\w-]+)?',
'gpt-4o(?:-[\\w-]+)?',
'gpt-4.5(?:-[\\w-]+)',
'chatgpt-4o(?:-[\\w-]+)?',
'o1(?:-[\\w-]+)?',
'o3(?:-[\\w-]+)?',
'o4(?:-[\\w-]+)?',
'deepseek-vl(?:[\\w-]+)?',
'kimi-latest',
'gemma-3(?:-[\\w-]+)'
@ -173,6 +176,7 @@ const visionExcludedModels = [
'gpt-4-32k',
'gpt-4-\\d+',
'o1-mini',
'o3-mini',
'o1-preview',
'AIDC-AI/Marco-o1'
]
@ -258,8 +262,9 @@ export function getModelLogo(modelId: string) {
jina: isLight ? JinaModelLogo : JinaModelLogoDark,
abab: isLight ? MinimaxModelLogo : MinimaxModelLogoDark,
minimax: isLight ? MinimaxModelLogo : MinimaxModelLogoDark,
o3: isLight ? ChatGPTo1ModelLogo : ChatGPTo1ModelLogoDark,
o1: isLight ? ChatGPTo1ModelLogo : ChatGPTo1ModelLogoDark,
o3: isLight ? ChatGPTo1ModelLogo : ChatGPTo1ModelLogoDark,
o4: isLight ? ChatGPTo1ModelLogo : ChatGPTo1ModelLogoDark,
'gpt-3': isLight ? ChatGPT35ModelLogo : ChatGPT35ModelLogoDark,
'gpt-4': isLight ? ChatGPT4ModelLogo : ChatGPT4ModelLogoDark,
gpts: isLight ? ChatGPT4ModelLogo : ChatGPT4ModelLogoDark,
@ -1072,16 +1077,22 @@ export const SYSTEM_MODELS: Record<string, Model[]> = {
],
zhipu: [
{
id: 'glm-zero-preview',
id: 'glm-z1-air',
provider: 'zhipu',
name: 'GLM-Zero-Preview',
group: 'GLM-Zero'
name: 'GLM-Z1-AIR',
group: 'GLM-Z1'
},
{
id: 'glm-4-0520',
id: 'glm-z1-airx',
provider: 'zhipu',
name: 'GLM-4-0520',
group: 'GLM-4'
name: 'GLM-Z1-AIRX',
group: 'GLM-Z1'
},
{
id: 'glm-z1-flash',
provider: 'zhipu',
name: 'GLM-Z1-FLASH',
group: 'GLM-Z1'
},
{
id: 'glm-4-long',
@ -1096,9 +1107,9 @@ export const SYSTEM_MODELS: Record<string, Model[]> = {
group: 'GLM-4'
},
{
id: 'glm-4-air',
id: 'glm-4-air-250414',
provider: 'zhipu',
name: 'GLM-4-Air',
name: 'GLM-4-Air-250414',
group: 'GLM-4'
},
{
@ -1108,9 +1119,9 @@ export const SYSTEM_MODELS: Record<string, Model[]> = {
group: 'GLM-4'
},
{
id: 'glm-4-flash',
id: 'glm-4-flash-250414',
provider: 'zhipu',
name: 'GLM-4-Flash',
name: 'GLM-4-Flash-250414',
group: 'GLM-4'
},
{
@ -1132,9 +1143,9 @@ export const SYSTEM_MODELS: Record<string, Model[]> = {
group: 'GLM-4v'
},
{
id: 'glm-4v-plus',
id: 'glm-4v-plus-0111',
provider: 'zhipu',
name: 'GLM-4V-Plus',
name: 'GLM-4V-Plus-0111',
group: 'GLM-4v'
},
{
@ -2197,8 +2208,9 @@ export function isVisionModel(model: Model): boolean {
}
export function isOpenAIoSeries(model: Model): boolean {
return ['o1', 'o1-2024-12-17'].includes(model.id) || model.id.includes('o3')
return model.id.includes('o1') || model.id.includes('o3') || model.id.includes('o4')
}
export function isOpenAIWebSearch(model: Model): boolean {
return model.id.includes('gpt-4o-search-preview') || model.id.includes('gpt-4o-mini-search-preview')
}
@ -2212,7 +2224,8 @@ export function isSupportedReasoningEffortModel(model?: Model): boolean {
model.id.includes('claude-3-7-sonnet') ||
model.id.includes('claude-3.7-sonnet') ||
isOpenAIoSeries(model) ||
isGrokReasoningModel(model)
isGrokReasoningModel(model) ||
isGemini25ReasoningModel(model)
) {
return true
}
@ -2220,6 +2233,13 @@ export function isSupportedReasoningEffortModel(model?: Model): boolean {
return false
}
export function isGrokModel(model?: Model): boolean {
if (!model) {
return false
}
return model.id.includes('grok')
}
export function isGrokReasoningModel(model?: Model): boolean {
if (!model) {
return false
@ -2232,6 +2252,18 @@ export function isGrokReasoningModel(model?: Model): boolean {
return false
}
export function isGemini25ReasoningModel(model?: Model): boolean {
if (!model) {
return false
}
if (model.id.includes('gemini-2.5')) {
return true
}
return false
}
export function isReasoningModel(model?: Model): boolean {
if (!model) {
return false
@ -2245,7 +2277,11 @@ export function isReasoningModel(model?: Model): boolean {
return true
}
if (model.id.includes('gemini-2.5-pro-exp')) {
if (isGemini25ReasoningModel(model)) {
return true
}
if (model.id.includes('glm-z1')) {
return true
}

View File

@ -112,65 +112,149 @@ export const SUMMARIZE_PROMPT =
// https://github.com/ItzCrazyKns/Perplexica/blob/master/src/lib/prompts/webSearch.ts
export const SEARCH_SUMMARY_PROMPT = `
You are an AI question rephraser. You will be given a conversation and a follow-up question, you will have to rephrase the follow up question so it is a standalone question and can be used by another LLM to search the web for information to answer it.
If it is a simple writing task or a greeting (unless the greeting contains a question after it) like Hi, Hello, How are you, etc. than a question then you need to return \`not_needed\` as the response (This is because the LLM won't need to search the web for finding information on this topic).
If the user asks some question from some URL or wants you to summarize a PDF or a webpage (via URL) you need to return the links inside the \`links\` XML block and the question inside the \`question\` XML block. If the user wants to you to summarize the webpage or the PDF you need to return \`summarize\` inside the \`question\` XML block in place of a question and the link to summarize in the \`links\` XML block.
You must always return the rephrased question inside the \`question\` XML block, if there are no links in the follow-up question then don't insert a \`links\` XML block in your response.
You are an AI question rephraser. Your role is to rephrase follow-up queries from a conversation into standalone queries that can be used by another LLM to retrieve information, either through web search or from a knowledge base.
Follow these guidelines:
1. If the question is a simple writing task, greeting (e.g., Hi, Hello, How are you), or does not require searching for information (unless the greeting contains a follow-up question), return 'not_needed' in the 'question' XML block. This indicates that no search is required.
2. If the user asks a question related to a specific URL, PDF, or webpage, include the links in the 'links' XML block and the question in the 'question' XML block. If the request is to summarize content from a URL or PDF, return 'summarize' in the 'question' XML block and include the relevant links in the 'links' XML block.
3. For websearch, You need extract keywords into 'question' XML block. For knowledge, You need rewrite user query into 'rewrite' XML block with one alternative version while preserving the original intent and meaning.
4. Websearch: Always return the rephrased question inside the 'question' XML block. If there are no links in the follow-up question, do not insert a 'links' XML block in your response.
5. Knowledge: Always return the rephrased question inside the 'question' XML block.
6. Always wrap the rephrased question in the appropriate XML blocks to specify the tool(s) for retrieving information: use <websearch></websearch> for queries requiring real-time or external information, <knowledge></knowledge> for queries that can be answered from a pre-existing knowledge base, or both if the question could be applicable to either tool. Ensure that the rephrased question is always contained within a <question></question> block inside these wrappers.
7. If you are not sure to use knowledge or websearch, you need use both of them.
There are several examples attached for your reference inside the below \`examples\` XML block
There are several examples attached for your reference inside the below 'examples' XML block.
<examples>
1. Follow up question: What is the capital of France
Rephrased question:\`
<question>
Capital of france
</question>
<websearch>
<question>
Capital of France
</question>
</websearch>
<knowledge>
<rewrite>
What city serves as the capital of France?
</rewrite>
<question>
What is the capital of France
</question>
</knowledge>
\`
2. Hi, how are you?
Rephrased question\`
<question>
not_needed
</question>
2. Follow up question: Hi, how are you?
Rephrased question:\`
<websearch>
<question>
not_needed
</question>
</websearch>
<knowledge>
<question>
not_needed
</question>
</knowledge>
\`
3. Follow up question: What is Docker?
Rephrased question: \`
<question>
What is Docker
</question>
<websearch>
<question>
What is Docker
</question>
</websearch>
<knowledge>
<rewrite>
Can you explain what Docker is and its main purpose?
</rewrite>
<question>
What is Docker
</question>
</knowledge>
\`
4. Follow up question: Can you tell me what is X from https://example.com
Rephrased question: \`
<question>
Can you tell me what is X?
</question>
<links>
https://example.com
</links>
<websearch>
<question>
What is X
</question>
<links>
https://example.com
</links>
</websearch>
<knowledge>
<question>
not_needed
</question>
</knowledge>
\`
5. Follow up question: Summarize the content from https://example.com
5. Follow up question: Summarize the content from https://example1.com and https://example2.com
Rephrased question: \`
<question>
summarize
</question>
<websearch>
<question>
summarize
</question>
<links>
https://example1.com
</links>
<links>
https://example2.com
</links>
</websearch>
<knowledge>
<question>
not_needed
</question>
</knowledge>
\`
<links>
https://example.com
</links>
6. Follow up question: Based on websearch, Which company had higher revenue in 2022, "Apple" or "Microsoft"?
Rephrased question: \`
<websearch>
<question>
Apple's revenue in 2022
</question>
<question>
Microsoft's revenue in 2022
</question>
</websearch>
<knowledge>
<question>
not_needed
</question>
</knowledge>
\`
7. Follow up question: Based on knowledge, Fomula of Scaled Dot-Product Attention and Multi-Head Attention?
Rephrased question: \`
<websearch>
<question>
not_needed
</question>
</websearch>
<knowledge>
<rewrite>
What are the mathematical formulas for Scaled Dot-Product Attention and Multi-Head Attention
</rewrite>
<question>
What is the formula for Scaled Dot-Product Attention?
</question>
<question>
What is the formula for Multi-Head Attention?
</question>
</knowledge>
\`
</examples>
Anything below is the part of the actual conversation and you need to use conversation and the follow-up question to rephrase the follow-up question as a standalone question based on the guidelines shared above.
Anything below is part of the actual conversation. Use the conversation history and the follow-up question to rephrase the follow-up question as a standalone question based on the guidelines shared above.
<conversation>
{chat_history}
</conversation>
Follow up question: {query}
Follow up question: {question}
Rephrased question:
`

View File

@ -3,6 +3,7 @@ import {
AssistantIconType,
SendMessageShortcut,
setAssistantIconType,
setAutoCheckUpdate as _setAutoCheckUpdate,
setLaunchOnBoot,
setLaunchToTray,
setSendMessageShortcut as _setSendMessageShortcut,
@ -50,6 +51,11 @@ export function useSettings() {
}
},
setAutoCheckUpdate(isAutoUpdate: boolean) {
dispatch(_setAutoCheckUpdate(isAutoUpdate))
window.api.setAutoUpdate(isAutoUpdate)
},
setTheme(theme: ThemeMode) {
dispatch(setTheme(theme))
},

View File

@ -357,7 +357,7 @@
"no_api_key": "API key is not configured",
"provider_disabled": "Model provider is not enabled",
"render": {
"description": "Failed to render formula. Please check if the formula format is correct",
"description": "Failed to render message content. Please check if the message content format is correct",
"title": "Render Error"
},
"user_message_not_found": "Cannot find original user message to resend",
@ -577,7 +577,7 @@
"restore.failed": "Restore failed",
"restore.success": "Restored successfully",
"save.success.title": "Saved successfully",
"searching": "Searching the internet...",
"searching": "Searching...",
"success.joplin.export": "Successfully exported to Joplin",
"success.markdown.export.preconf": "Successfully exported the Markdown file to the preconfigured path",
"success.markdown.export.specified": "Successfully exported the Markdown file",
@ -948,7 +948,9 @@
"syncError": "Backup Error",
"syncStatus": "Backup Status",
"title": "WebDAV",
"user": "WebDAV User"
"user": "WebDAV User",
"maxBackups": "Maximum Backups",
"maxBackups.unlimited": "Unlimited"
},
"yuque": {
"check": {
@ -1058,7 +1060,7 @@
"general.display.title": "Display Settings",
"general.emoji_picker": "Emoji Picker",
"general.image_upload": "Image Upload",
"general.auto_check_update.title": "Auto update checking",
"general.auto_check_update.title": "Auto Update",
"general.reset.button": "Reset",
"general.reset.title": "Data Reset",
"general.restore.button": "Restore",
@ -1259,6 +1261,12 @@
"api_key": "API Key",
"api_key.tip": "Multiple keys separated by commas",
"api_version": "API Version",
"basic_auth": "HTTP authentication",
"basic_auth.tip": "Applicable to instances deployed remotely (see the documentation). Currently, only the Basic scheme (RFC 7617) is supported.",
"basic_auth.user_name": "Username",
"basic_auth.user_name.tip": "Left empty to disable",
"basic_auth.password": "Password",
"basic_auth.password.tip": "",
"charge": "Charge",
"check": "Check",
"check_all_keys": "Check All Keys",
@ -1380,17 +1388,19 @@
},
"title": "Web Search",
"subscribe": "Blacklist Subscription",
"subscribe_update": "Update now",
"subscribe_update": "Update",
"subscribe_add": "Add Subscription",
"subscribe_url": "Subscription feed address",
"subscribe_url": "Subscription Url",
"subscribe_name": "Alternative name",
"subscribe_name.placeholder": "Alternative name used when the downloaded subscription feed has no name.",
"subscribe_add_success": "Subscription feed added successfully!",
"subscribe_delete": "Delete subscription source",
"subscribe_delete": "Delete",
"overwrite": "Override search service",
"overwrite_tooltip": "Force use search service instead of LLM",
"apikey": "API key",
"free": "Free"
"free": "Free",
"content_limit": "Content length limit",
"content_limit_tooltip": "Limit the content length of the search results; content that exceeds the limit will be truncated."
},
"quickPhrase": {
"title": "Quick Phrases",

View File

@ -335,7 +335,7 @@
"no_api_key": "APIキーが設定されていません",
"provider_disabled": "モデルプロバイダーが有効になっていません",
"render": {
"description": "数式のレンダリングに失敗しました。数式の形式が正しいか確認してください",
"description": "メッセージの内容のレンダリングに失敗しました。メッセージの内容の形式が正しいか確認してください",
"title": "レンダリングエラー"
},
"user_message_not_found": "元のユーザーメッセージを見つけることができませんでした",
@ -554,7 +554,7 @@
"restore.failed": "復元に失敗しました",
"restore.success": "復元に成功しました",
"save.success.title": "保存に成功しました",
"searching": "インターネットで検索中...",
"searching": "検索中...",
"success.joplin.export": "Joplin へのエクスポートに成功しました",
"success.markdown.export.preconf": "Markdown ファイルを事前設定されたパスに正常にエクスポートしました",
"success.markdown.export.specified": "Markdown ファイルを正常にエクスポートしました",
@ -926,7 +926,9 @@
"syncError": "バックアップエラー",
"syncStatus": "バックアップ状態",
"title": "WebDAV",
"user": "WebDAVユーザー"
"user": "WebDAVユーザー",
"maxBackups": "最大バックアップ数",
"maxBackups.unlimited": "無制限"
},
"yuque": {
"check": {
@ -1236,6 +1238,12 @@
"api_key": "APIキー",
"api_key.tip": "複数のキーはカンマで区切ります",
"api_version": "APIバージョン",
"basic_auth": "HTTP 認証",
"basic_auth.tip": "サーバー展開によるインスタンスに適用されますドキュメントを参照。現在はBasicスキームRFC7617のみをサポートしています。",
"basic_auth.user_name": "ユーザー名",
"basic_auth.user_name.tip": "空欄で無効化",
"basic_auth.password": "パスワード",
"basic_auth.password.tip": "",
"charge": "充電",
"check": "チェック",
"check_all_keys": "すべてのキーをチェック",
@ -1357,19 +1365,21 @@
"title": "ウェブ検索",
"blacklist_tooltip": "マッチパターン: *://*.example.com/*\n正規表現: /example\\.(net|org)/",
"subscribe": "ブラックリスト購読",
"subscribe_update": "今すぐ更新",
"subscribe_update": "更新",
"subscribe_add": "サブスクリプションを追加",
"subscribe_url": "フィードのURL",
"subscribe_name": "代替名",
"subscribe_name.placeholder": "ダウンロードしたフィードに名前がない場合に使用される代替名",
"subscribe_add_success": "フィードの追加が成功しました!",
"subscribe_delete": "フィードの削除",
"subscribe_delete": "削除",
"overwrite": "サービス検索を上書き",
"overwrite_tooltip": "大規模言語モデルではなく、サービス検索を使用する",
"apikey": "API キー",
"free": "無料"
"free": "無料",
"content_limit": "内容の長さ制限",
"content_limit_tooltip": "検索結果の内容長を制限し、制限を超える内容は切り捨てられます。"
},
"general.auto_check_update.title": "自動更新チェックを有効にする",
"general.auto_check_update.title": "自動更新",
"quickPhrase": {
"title": "クイックフレーズ",
"add": "フレーズを追加",

View File

@ -338,7 +338,7 @@
"no_api_key": "Ключ API не настроен",
"provider_disabled": "Провайдер моделей не включен",
"render": {
"description": "Не удалось рендерить формулу. Пожалуйста, проверьте, правильно ли формат формулы",
"description": "Не удалось рендерить содержимое сообщения. Пожалуйста, проверьте, правильно ли формат содержимого сообщения",
"title": "Ошибка рендеринга"
},
"user_message_not_found": "Не удалось найти исходное сообщение пользователя",
@ -558,7 +558,7 @@
"restore.failed": "Восстановление не удалось",
"restore.success": "Успешно восстановлено",
"save.success.title": "Успешно сохранено",
"searching": "Поиск в Интернете...",
"searching": "Идет поиск...",
"success.joplin.export": "Успешный экспорт в Joplin",
"success.markdown.export.preconf": "Файл Markdown успешно экспортирован в предуказанный путь",
"success.markdown.export.specified": "Файл Markdown успешно экспортирован",
@ -929,7 +929,9 @@
"syncError": "Ошибка резервного копирования",
"syncStatus": "Статус резервного копирования",
"title": "WebDAV",
"user": "Пользователь WebDAV"
"user": "Пользователь WebDAV",
"maxBackups": "Максимальное количество резервных копий",
"maxBackups.unlimited": "Без ограничений"
},
"yuque": {
"check": {
@ -1239,6 +1241,12 @@
"api_key": "Ключ API",
"api_key.tip": "Несколько ключей, разделенных запятыми",
"api_version": "Версия API",
"basic_auth": "HTTP аутентификация",
"basic_auth.tip": "Применимо к экземплярам, развернутым через сервер (см. документацию). В настоящее время поддерживается только схема Basic (RFC7617).",
"basic_auth.user_name": "Имя пользователя",
"basic_auth.user_name.tip": "Оставить пустым для отключения",
"basic_auth.password": "Пароль",
"basic_auth.password.tip": "",
"charge": "Пополнить",
"check": "Проверить",
"check_all_keys": "Проверить все ключи",
@ -1358,21 +1366,23 @@
"title": "Tavily"
},
"title": "Поиск в Интернете",
"blacklist_tooltip": "Соответствующий шаблон: *://*.example.com/*\nРегулярное выражение: /example\\.(net|org)/",
"subscribe": "Черный список подписки",
"subscribe_update": "Обновить сейчас",
"subscribe_add": "Добавить подписку",
"subscribe_url": "Адрес источника подписки",
"subscribe_name": "альтернативное имя",
"subscribe_name.placeholder": "替代名称, используемый, когда загружаемый подписочный источник не имеет названия",
"subscribe_add_success": "Подписка добавлена успешно!",
"subscribe_delete": "Удалить источник подписки",
"overwrite": "Переопределить поставщика поиска",
"overwrite_tooltip": "Использовать поставщика поиска вместо LLM",
"apikey": "Ключ API",
"free": "Бесплатно"
"blacklist_tooltip": "Шаблон: *://*.example.com/*\nРегулярное выражение: /example\\.(net|org)/",
"subscribe": "Подписка на черный список",
"subscribe_update": "Обновить",
"subscribe_add": "Добавить",
"subscribe_url": "URL подписки",
"subscribe_name": "Альтернативное имя",
"subscribe_name.placeholder": "Альтернативное имя, если в подписке нет названия.",
"subscribe_add_success": "Подписка успешно добавлена!",
"subscribe_delete": "Удалить",
"overwrite": "Переопределить провайдера поиска",
"overwrite_tooltip": "Использовать провайдера поиска вместо LLM",
"apikey": "API ключ",
"free": "Бесплатно",
"content_limit": "Ограничение длины текста",
"content_limit_tooltip": "Ограничьте длину содержимого результатов поиска, контент, превышающий ограничение, будет обрезан."
},
"general.auto_check_update.title": "Включить автоматическую проверку обновлений",
"general.auto_check_update.title": "Включить автообновление",
"quickPhrase": {
"title": "Быстрые фразы",
"add": "Добавить фразу",

View File

@ -357,7 +357,7 @@
"no_api_key": "API 密钥未配置",
"provider_disabled": "模型提供商未启用",
"render": {
"description": "渲染公式失败,请检查公式格式是否正确",
"description": "消息内容渲染失败,请检查消息内容格式是否正确",
"title": "渲染错误"
},
"user_message_not_found": "无法找到原始用户消息",
@ -577,7 +577,7 @@
"restore.failed": "恢复失败",
"restore.success": "恢复成功",
"save.success.title": "保存成功",
"searching": "正在联网搜索...",
"searching": "正在搜索...",
"success.joplin.export": "成功导出到 Joplin",
"success.markdown.export.preconf": "成功导出 Markdown 文件到预先设定的路径",
"success.markdown.export.specified": "成功导出 Markdown 文件",
@ -950,7 +950,9 @@
"syncError": "备份错误",
"syncStatus": "备份状态",
"title": "WebDAV",
"user": "WebDAV 用户名"
"user": "WebDAV 用户名",
"maxBackups": "最大备份数",
"maxBackups.unlimited": "无限制"
},
"yuque": {
"check": {
@ -1058,7 +1060,7 @@
"general.display.title": "显示设置",
"general.emoji_picker": "表情选择器",
"general.image_upload": "图片上传",
"general.auto_check_update.title": "自动检测更新",
"general.auto_check_update.title": "自动更新",
"general.reset.button": "重置",
"general.reset.title": "重置数据",
"general.restore.button": "恢复",
@ -1259,6 +1261,12 @@
"api_key": "API 密钥",
"api_key.tip": "多个密钥使用逗号分隔",
"api_version": "API 版本",
"basic_auth": "HTTP 认证",
"basic_auth.tip": "适用于通过服务器部署的实例(参见文档)。目前仅支持 Basic 方案RFC7617。",
"basic_auth.user_name": "用户名",
"basic_auth.user_name.tip": "留空以禁用",
"basic_auth.password": "密码",
"basic_auth.password.tip": "",
"charge": "充值",
"check": "检查",
"check_all_keys": "检查所有密钥",
@ -1390,7 +1398,9 @@
},
"title": "网络搜索",
"apikey": "API 密钥",
"free": "免费"
"free": "免费",
"content_limit": "内容长度限制",
"content_limit_tooltip": "限制搜索结果的内容长度, 超过限制的内容将被截断"
},
"quickPhrase": {
"title": "快捷短语",

View File

@ -335,7 +335,7 @@
"no_api_key": "API 金鑰未設定",
"provider_disabled": "模型供應商未啟用",
"render": {
"description": "渲染公式失敗,請檢查公式格式是否正確",
"description": "消息內容渲染失敗,請檢查消息內容格式是否正確",
"title": "渲染錯誤"
},
"user_message_not_found": "無法找到原始用戶訊息",
@ -555,7 +555,7 @@
"restore.failed": "恢復失敗",
"restore.success": "恢復成功",
"save.success.title": "儲存成功",
"searching": "正在網路上搜尋...",
"searching": "正在搜尋...",
"success.joplin.export": "成功匯出到 Joplin",
"success.markdown.export.preconf": "成功導出 Markdown 文件到預先設定的路徑",
"success.markdown.export.specified": "成功導出 Markdown 文件",
@ -926,7 +926,9 @@
"syncError": "備份錯誤",
"syncStatus": "備份狀態",
"title": "WebDAV",
"user": "WebDAV 使用者名稱"
"user": "WebDAV 使用者名稱",
"maxBackups": "最大備份數量",
"maxBackups.unlimited": "無限制"
},
"yuque": {
"check": {
@ -1236,6 +1238,12 @@
"api_key": "API 金鑰",
"api_key.tip": "多個金鑰使用逗號分隔",
"api_version": "API 版本",
"basic_auth": "HTTP 認證",
"basic_auth.tip": "適用於透過伺服器部署的實例(請參閱文檔)。目前僅支援 Basic 方案RFC7617。",
"basic_auth.user_name": "用戶",
"basic_auth.user_name.tip": "留空以停用",
"basic_auth.password": "密碼",
"basic_auth.password.tip": "",
"charge": "儲值",
"check": "檢查",
"check_all_keys": "檢查所有金鑰",
@ -1356,20 +1364,22 @@
"check_failed": "驗證失敗",
"blacklist_tooltip": "匹配模式: *://*.example.com/*\n正则表达式: /example\\.(net|org)/",
"subscribe": "黑名單訂閱",
"subscribe_update": "立即更新",
"subscribe_update": "更新",
"subscribe_add": "添加訂閱",
"subscribe_url": "訂閱源地址",
"subscribe_name": "替代名稱",
"subscribe_name.placeholder": "當下載的訂閱源沒有名稱時所使用的替代名稱",
"subscribe_add_success": "訂閱源添加成功!",
"subscribe_delete": "刪除訂閱源",
"subscribe_delete": "刪除",
"title": "網路搜尋",
"overwrite": "覆蓋搜尋服務商",
"overwrite_tooltip": "強制使用搜尋服務商而不是大語言模型進行搜尋",
"apikey": "API 金鑰",
"free": "免費"
"free": "免費",
"content_limit": "內容長度限制",
"content_limit_tooltip": "限制搜尋結果的內容長度,超過限制的內容將被截斷。"
},
"general.auto_check_update.title": "啟用自動更新檢查",
"general.auto_check_update.title": "啟用自動更新",
"quickPhrase": {
"title": "快捷短語",
"add": "新增短語",

View File

@ -814,7 +814,9 @@
"syncError": "Σφάλμα στην αντιγραφή ασφαλείας",
"syncStatus": "Κατάσταση αντιγραφής ασφαλείας",
"title": "WebDAV",
"user": "Όνομα χρήστη WebDAV"
"user": "Όνομα χρήστη WebDAV",
"maxBackups": "Μέγιστο αριθμό αρχείων αντιγραφής ασφαλείας",
"maxBackups.unlimited": "Απεριόριστο"
},
"yuque": {
"check": {

View File

@ -814,7 +814,9 @@
"syncError": "Error de copia de seguridad",
"syncStatus": "Estado de copia de seguridad",
"title": "WebDAV",
"user": "Nombre de usuario WebDAV"
"user": "Nombre de usuario WebDAV",
"maxBackups": "Número máximo de copias de seguridad",
"maxBackups.unlimited": "Sin límite"
},
"yuque": {
"check": {

View File

@ -814,7 +814,9 @@
"syncError": "Erreur de sauvegarde",
"syncStatus": "Statut de la sauvegarde",
"title": "WebDAV",
"user": "Nom d'utilisateur WebDAV"
"user": "Nom d'utilisateur WebDAV",
"maxBackups": "Nombre maximal de sauvegardes",
"maxBackups.unlimited": "Illimité"
},
"yuque": {
"check": {

View File

@ -814,7 +814,9 @@
"syncError": "Erro de backup",
"syncStatus": "Status de backup",
"title": "WebDAV",
"user": "Nome de usuário WebDAV"
"user": "Nome de usuário WebDAV",
"maxBackups": "Número máximo de backups",
"maxBackups.unlimited": "Sem limite"
},
"yuque": {
"check": {

View File

@ -3,6 +3,7 @@ import './utils/analytics'
import KeyvStorage from '@kangfenmao/keyv-storage'
import { startAutoSync } from './services/BackupService'
import { startNutstoreAutoSync } from './services/NutstoreService'
import store from './store'
function initSpinner() {
@ -20,9 +21,13 @@ function initKeyv() {
function initAutoSync() {
setTimeout(() => {
const { webdavAutoSync } = store.getState().settings
const { nutstoreAutoSync } = store.getState().nutstore
if (webdavAutoSync) {
startAutoSync()
}
if (nutstoreAutoSync) {
startNutstoreAutoSync()
}
}, 2000)
}

View File

@ -118,7 +118,7 @@ const PopupContainer: React.FC<Props> = ({ resolve }) => {
prompt: AGENT_PROMPT,
content: promptText
})
formRef.current?.setFieldValue('prompt', generatedText)
form.setFieldsValue({ prompt: generatedText })
} catch (error) {
console.error('Error fetching data:', error)
}
@ -170,11 +170,9 @@ const PopupContainer: React.FC<Props> = ({ resolve }) => {
label={t('agents.add.prompt')}
rules={[{ required: true }]}
style={{ position: 'relative' }}>
<TextAreaContainer>
<TextArea placeholder={t('agents.add.prompt.placeholder')} spellCheck={false} rows={10} />
<TokenCount>Tokens: {tokenCount}</TokenCount>
</TextAreaContainer>
<TextArea placeholder={t('agents.add.prompt.placeholder')} spellCheck={false} rows={10} />
</Form.Item>
<TokenCount>Tokens: {tokenCount}</TokenCount>
<Button
icon={loading ? <LoadingOutlined /> : <ThunderboltOutlined />}
onClick={handleButtonClick}
@ -203,11 +201,6 @@ const PopupContainer: React.FC<Props> = ({ resolve }) => {
)
}
const TextAreaContainer = styled.div`
position: relative;
width: 100%;
`
const TokenCount = styled.div`
position: absolute;
bottom: 8px;

View File

@ -1,5 +1,5 @@
import { DeleteOutlined } from '@ant-design/icons'
import type { FileMetadataResponse } from '@google/generative-ai/server'
import type { File } from '@google/genai'
import { useProvider } from '@renderer/hooks/useProvider'
import { runAsyncFunction } from '@renderer/utils'
import { MB } from '@shared/config/constant'
@ -16,11 +16,11 @@ interface GeminiFilesProps {
const GeminiFiles: FC<GeminiFilesProps> = ({ id }) => {
const { provider } = useProvider(id)
const [files, setFiles] = useState<FileMetadataResponse[]>([])
const [files, setFiles] = useState<File[]>([])
const [loading, setLoading] = useState(false)
const fetchFiles = useCallback(async () => {
const { files } = await window.api.gemini.listFiles(provider.apiKey)
const files = await window.api.gemini.listFiles(provider.apiKey)
files && setFiles(files.filter((file) => file.state === 'ACTIVE'))
}, [provider])
@ -60,14 +60,14 @@ const GeminiFiles: FC<GeminiFilesProps> = ({ id }) => {
key={file.name}
fileInfo={{
name: file.displayName,
ext: `.${file.name.split('.').pop()}`,
extra: `${dayjs(file.createTime).format('MM-DD HH:mm')} · ${(parseInt(file.sizeBytes) / MB).toFixed(2)} MB`,
ext: `.${file.name?.split('.').pop()}`,
extra: `${dayjs(file.createTime).format('MM-DD HH:mm')} · ${(parseInt(file.sizeBytes || '0') / MB).toFixed(2)} MB`,
actions: (
<DeleteOutlined
style={{ cursor: 'pointer', color: 'var(--color-error)' }}
onClick={() => {
setFiles(files.filter((f) => f.name !== file.name))
window.api.gemini.deleteFile(provider.apiKey, file.name).catch((error) => {
window.api.gemini.deleteFile(file.name!, provider.apiKey).catch((error) => {
console.error('Failed to delete file:', error)
setFiles((prev) => [...prev, file])
})

View File

@ -26,12 +26,21 @@ interface Props {
setFiles: (files: FileType[]) => void
}
const MAX_FILENAME_DISPLAY_LENGTH = 20
function truncateFileName(name: string, maxLength: number = MAX_FILENAME_DISPLAY_LENGTH) {
if (name.length <= maxLength) return name
return name.slice(0, maxLength - 3) + '...'
}
const FileNameRender: FC<{ file: FileType }> = ({ file }) => {
const [visible, setVisible] = useState<boolean>(false)
const isImage = (ext: string) => {
return ['.png', '.jpg', '.jpeg', '.gif', '.bmp', '.webp'].includes(ext)
}
const fullName = FileManager.formatFileName(file)
const displayName = truncateFileName(fullName)
return (
<Tooltip
styles={{
@ -53,6 +62,7 @@ const FileNameRender: FC<{ file: FileType }> = ({ file }) => {
}}
/>
)}
<span style={{ wordBreak: 'break-all' }}>{fullName}</span>
{formatFileSize(file.size)}
</Flex>
}>
@ -66,8 +76,9 @@ const FileNameRender: FC<{ file: FileType }> = ({ file }) => {
if (path) {
window.api.file.openPath(path)
}
}}>
{FileManager.formatFileName(file)}
}}
title={fullName}>
{displayName}
</FileName>
</Tooltip>
)

View File

@ -62,6 +62,7 @@ const MentionModelsButton: FC<Props> = ({ ref, mentionModels, onMentionModel, To
{first(m.name)}
</Avatar>
),
filterText: (p.isSystem ? t(`provider.${p.id}`) : p.name) + m.name,
action: () => onMentionModel(m),
isSelected: mentionModels.some((selected) => getModelUniqId(selected) === getModelUniqId(m))
}))
@ -89,6 +90,7 @@ const MentionModelsButton: FC<Props> = ({ ref, mentionModels, onMentionModel, To
{first(m.name)}
</Avatar>
),
filterText: (p.isSystem ? t(`provider.${p.id}`) : p.name) + m.name,
action: () => onMentionModel(m),
isSelected: mentionModels.some((selected) => getModelUniqId(selected) === getModelUniqId(m))
}))

View File

@ -8,6 +8,7 @@ import type { Message } from '@renderer/types'
import { parseJSON } from '@renderer/utils'
import { escapeBrackets, removeSvgEmptyLines, withGeminiGrounding } from '@renderer/utils/formats'
import { findCitationInChildren } from '@renderer/utils/markdown'
import { sanitizeSchema } from '@renderer/utils/markdown'
import { isEmpty } from 'lodash'
import { type FC, useMemo } from 'react'
import { useTranslation } from 'react-i18next'
@ -16,6 +17,7 @@ import rehypeKatex from 'rehype-katex'
// @ts-ignore next-line
import rehypeMathjax from 'rehype-mathjax'
import rehypeRaw from 'rehype-raw'
import rehypeSanitize from 'rehype-sanitize'
import remarkCjkFriendly from 'remark-cjk-friendly'
import remarkGfm from 'remark-gfm'
import remarkMath from 'remark-math'
@ -24,21 +26,16 @@ import CodeBlock from './CodeBlock'
import ImagePreview from './ImagePreview'
import Link from './Link'
const ALLOWED_ELEMENTS =
/<(style|p|div|span|b|i|strong|em|ul|ol|li|table|tr|td|th|thead|tbody|h[1-6]|blockquote|pre|code|br|hr|svg|path|circle|rect|line|polyline|polygon|text|g|defs|title|desc|tspan|sub|sup)/i
interface Props {
message: Message
}
const remarkPlugins = [remarkMath, remarkGfm, remarkCjkFriendly]
const disallowedElements = ['iframe']
const Markdown: FC<Props> = ({ message }) => {
const { t } = useTranslation()
const { renderInputMessageAsMarkdown, mathEngine } = useSettings()
const rehypeMath = useMemo(() => (mathEngine === 'KaTeX' ? rehypeKatex : rehypeMathjax), [mathEngine])
const messageContent = useMemo(() => {
const empty = isEmpty(message.content)
const paused = message.status === 'paused'
@ -47,9 +44,8 @@ const Markdown: FC<Props> = ({ message }) => {
}, [message, t])
const rehypePlugins = useMemo(() => {
const hasElements = ALLOWED_ELEMENTS.test(messageContent)
return hasElements ? [rehypeRaw, rehypeMath] : [rehypeMath]
}, [messageContent, rehypeMath])
return [rehypeRaw, [rehypeSanitize, sanitizeSchema], mathEngine === 'KaTeX' ? rehypeKatex : rehypeMathjax]
}, [mathEngine])
const components = useMemo(() => {
const baseComponents = {
@ -75,7 +71,6 @@ const Markdown: FC<Props> = ({ message }) => {
remarkPlugins={remarkPlugins}
className="markdown"
components={components}
disallowedElements={disallowedElements}
remarkRehypeOptions={{
footnoteLabel: t('common.footnotes'),
footnoteLabelTagName: 'h4',

View File

@ -1,6 +1,6 @@
import { InfoCircleOutlined } from '@ant-design/icons'
import Favicon from '@renderer/components/Icons/FallbackFavicon'
import { HStack } from '@renderer/components/Layout'
import { FileSearch, Info } from 'lucide-react'
import React from 'react'
import { useTranslation } from 'react-i18next'
import styled from 'styled-components'
@ -11,10 +11,12 @@ interface Citation {
title?: string
hostname?: string
showFavicon?: boolean
type?: string
}
interface CitationsListProps {
citations: Citation[]
hideTitle?: boolean
}
const CitationsList: React.FC<CitationsListProps> = ({ citations }) => {
@ -25,27 +27,69 @@ const CitationsList: React.FC<CitationsListProps> = ({ citations }) => {
return (
<CitationsContainer className="footnotes">
<CitationsTitle>
{t('message.citations')}
<InfoCircleOutlined style={{ fontSize: '14px', marginLeft: '4px', opacity: 0.6 }} />
<span>{t('message.citations')}</span>
<Info size={14} style={{ opacity: 0.6 }} />
</CitationsTitle>
{citations.map((citation) => (
<HStack key={citation.url || citation.number} style={{ alignItems: 'center', gap: 8 }}>
<span style={{ fontSize: 13, color: 'var(--color-text-2)' }}>{citation.number}.</span>
{citation.showFavicon && citation.url && (
<Favicon hostname={new URL(citation.url).hostname} alt={citation.title || citation.hostname || ''} />
{citation.type === 'websearch' ? (
<WebSearchCitation citation={citation} />
) : (
<KnowledgeCitation citation={citation} />
)}
<CitationLink href={citation.url} className="text-nowrap" target="_blank" rel="noopener noreferrer">
{citation.title ? citation.title : <span className="hostname">{citation.hostname}</span>}
</CitationLink>
</HStack>
))}
</CitationsContainer>
)
}
const handleLinkClick = (url: string, event: React.MouseEvent) => {
if (!url) return
event.preventDefault()
// 检查是否是网络URL
if (url.startsWith('http://') || url.startsWith('https://')) {
window.open(url, '_blank', 'noopener,noreferrer')
} else {
try {
window.api.file.openPath(url)
} catch (error) {
console.error('打开本地文件失败:', error)
}
}
}
// 网络搜索引用组件
const WebSearchCitation: React.FC<{ citation: Citation }> = ({ citation }) => {
return (
<>
{citation.showFavicon && citation.url && (
<Favicon hostname={new URL(citation.url).hostname} alt={citation.title || citation.hostname || ''} />
)}
<CitationLink href={citation.url} className="text-nowrap" onClick={(e) => handleLinkClick(citation.url, e)}>
{citation.title ? citation.title : <span className="hostname">{citation.hostname}</span>}
</CitationLink>
</>
)
}
// 知识库引用组件
const KnowledgeCitation: React.FC<{ citation: Citation }> = ({ citation }) => {
return (
<>
{citation.showFavicon && citation.url && <FileSearch width={16} />}
<CitationLink href={citation.url} className="text-nowrap" onClick={(e) => handleLinkClick(citation.url, e)}>
{citation.title}
</CitationLink>
</>
)
}
const CitationsContainer = styled.div`
background-color: rgb(242, 247, 253);
border-radius: 4px;
border-radius: 10px;
padding: 8px 12px;
margin: 12px 0;
display: flex;
@ -61,6 +105,9 @@ const CitationsTitle = styled.div`
font-weight: 500;
margin-bottom: 4px;
color: var(--color-text-1);
display: flex;
align-items: center;
gap: 6px;
`
const CitationLink = styled.a`

View File

@ -54,7 +54,7 @@ const MessageItem: FC<Props> = ({
const { showMessageDivider, messageFont, fontSize } = useSettings()
const { generating } = useRuntime()
const messageContainerRef = useRef<HTMLDivElement>(null)
// const topic = useTopic(assistant, _topic?.id)
const [contextMenuPosition, setContextMenuPosition] = useState<{ x: number; y: number } | null>(null)
const [selectedQuoteText, setSelectedQuoteText] = useState<string>('')

View File

@ -66,15 +66,26 @@ const MessageAttachments: FC<Props> = ({ message }) => {
)
}
const StyledUpload = styled(Upload)`
.ant-upload-list-item-name {
max-width: 220px;
display: inline-block;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
vertical-align: bottom;
}
`
return (
<Container style={{ marginTop: 2, marginBottom: 8 }} className="message-attachments">
<Upload
<StyledUpload
listType="text"
disabled
fileList={message.files?.map((file) => ({
uid: file.id,
url: 'file://' + FileManager.getSafePath(file),
status: 'done',
status: 'done' as const,
name: FileManager.formatFileName(file)
}))}
/>

View File

@ -100,6 +100,17 @@ const MessageContent: React.FC<Props> = ({ message: _message, model }) => {
}))
}, [message.metadata?.citations, message.metadata?.annotations, model])
// 判断是否有引用内容
const hasCitations = useMemo(() => {
return !!(
(formattedCitations && formattedCitations.length > 0) ||
(message?.metadata?.webSearch && message.status === 'success') ||
(message?.metadata?.webSearchInfo && message.status === 'success') ||
(message?.metadata?.groundingMetadata && message.status === 'success') ||
(message?.metadata?.knowledge && message.status === 'success')
)
}, [formattedCitations, message])
// 获取引用数据
const citationsData = useMemo(() => {
const searchResults =
@ -122,6 +133,16 @@ const MessageContent: React.FC<Props> = ({ message: _message, model }) => {
})
})
// 添加knowledge结果
const knowledgeResults = message.metadata?.knowledge
knowledgeResults?.forEach((result) => {
data.set(result.sourceUrl, {
url: result.sourceUrl,
title: result.id,
content: result.content
})
})
// 添加citations
citationsUrls.forEach((result) => {
if (!data.has(result.url)) {
@ -136,10 +157,11 @@ const MessageContent: React.FC<Props> = ({ message: _message, model }) => {
return data
}, [
formattedCitations,
message?.metadata?.annotations,
message?.metadata?.groundingMetadata?.groundingChunks,
message?.metadata?.webSearch?.results,
message?.metadata?.webSearchInfo
message.metadata?.annotations,
message.metadata?.groundingMetadata?.groundingChunks,
message.metadata?.knowledge,
message.metadata?.webSearch?.results,
message.metadata?.webSearchInfo
])
// Process content to make citation numbers clickable
@ -149,7 +171,8 @@ const MessageContent: React.FC<Props> = ({ message: _message, model }) => {
message.metadata?.citations ||
message.metadata?.webSearch ||
message.metadata?.webSearchInfo ||
message.metadata?.annotations
message.metadata?.annotations ||
message.metadata?.knowledge
)
) {
return message.content
@ -157,20 +180,26 @@ const MessageContent: React.FC<Props> = ({ message: _message, model }) => {
let content = message.content
const searchResultsCitations = message?.metadata?.webSearch?.results?.map((result) => result.url) || []
const websearchResultsCitations = message?.metadata?.webSearch?.results?.map((result) => result.url) || []
const knowledgeResultsCitations = message?.metadata?.knowledge?.map((result) => result.sourceUrl) || []
const searchResultsCitations = [...websearchResultsCitations, ...knowledgeResultsCitations]
const citations = message?.metadata?.citations || searchResultsCitations
// Convert [n] format to superscript numbers and make them clickable
// Use <sup> tag for superscript and make it a link with citation data
if (message.metadata?.webSearch) {
if (message.metadata?.webSearch || message.metadata?.knowledge) {
content = content.replace(/\[\[(\d+)\]\]|\[(\d+)\]/g, (match, num1, num2) => {
const num = num1 || num2
const index = parseInt(num) - 1
if (index >= 0 && index < citations.length) {
const link = citations[index]
const isWebLink = link && (link.startsWith('http://') || link.startsWith('https://'))
const citationData = link ? encodeHTML(JSON.stringify(citationsData.get(link) || { url: link })) : null
return link ? `[<sup data-citation='${citationData}'>${num}</sup>](${link})` : `<sup>${num}</sup>`
return link && isWebLink
? `[<sup data-citation='${citationData}'>${num}</sup>](${link})`
: `<sup>${num}</sup>`
}
return match
})
@ -184,6 +213,7 @@ const MessageContent: React.FC<Props> = ({ message: _message, model }) => {
}, [
message.metadata?.citations,
message.metadata?.webSearch,
message.metadata?.knowledge,
message.metadata?.webSearchInfo,
message.metadata?.annotations,
message.content,
@ -242,59 +272,74 @@ const MessageContent: React.FC<Props> = ({ message: _message, model }) => {
)}
</Fragment>
)}
{message?.metadata?.groundingMetadata && message.status == 'success' && (
{hasCitations && (
<>
<CitationsList
citations={
message.metadata.groundingMetadata?.groundingChunks?.map((chunk, index) => ({
{message?.metadata?.groundingMetadata && message.status === 'success' && (
<>
<CitationsList
citations={
message.metadata.groundingMetadata?.groundingChunks?.map((chunk, index) => ({
number: index + 1,
url: chunk?.web?.uri || '',
title: chunk?.web?.title,
showFavicon: false
})) || []
}
/>
<SearchEntryPoint
dangerouslySetInnerHTML={{
__html: message.metadata.groundingMetadata?.searchEntryPoint?.renderedContent
? message.metadata.groundingMetadata.searchEntryPoint.renderedContent
.replace(/@media \(prefers-color-scheme: light\)/g, 'body[theme-mode="light"]')
.replace(/@media \(prefers-color-scheme: dark\)/g, 'body[theme-mode="dark"]')
: ''
}}
/>
</>
)}
{formattedCitations && (
<CitationsList
citations={formattedCitations.map((citation) => ({
number: citation.number,
url: citation.url,
hostname: citation.hostname,
showFavicon: isWebCitation
}))}
/>
)}
{(message?.metadata?.webSearch || message.metadata?.knowledge) && message.status === 'success' && (
<CitationsList
citations={[
...(message.metadata.webSearch?.results.map((result, index) => ({
number: index + 1,
url: result.url,
title: result.title,
showFavicon: true,
type: 'websearch'
})) || []),
...(message.metadata.knowledge?.map((result, index) => ({
number: (message.metadata?.webSearch?.results?.length || 0) + index + 1,
url: result.sourceUrl,
title: result.sourceUrl,
showFavicon: true,
type: 'knowledge'
})) || [])
]}
/>
)}
{message?.metadata?.webSearchInfo && message.status === 'success' && (
<CitationsList
citations={message.metadata.webSearchInfo.map((result, index) => ({
number: index + 1,
url: chunk?.web?.uri || '',
title: chunk?.web?.title,
showFavicon: false
})) || []
}
/>
<SearchEntryPoint
dangerouslySetInnerHTML={{
__html: message.metadata.groundingMetadata?.searchEntryPoint?.renderedContent
? message.metadata.groundingMetadata.searchEntryPoint.renderedContent
.replace(/@media \(prefers-color-scheme: light\)/g, 'body[theme-mode="light"]')
.replace(/@media \(prefers-color-scheme: dark\)/g, 'body[theme-mode="dark"]')
: ''
}}
/>
url: result.link || result.url,
title: result.title,
showFavicon: true
}))}
/>
)}
</>
)}
{formattedCitations && (
<CitationsList
citations={formattedCitations.map((citation) => ({
number: citation.number,
url: citation.url,
hostname: citation.hostname,
showFavicon: isWebCitation
}))}
/>
)}
{message?.metadata?.webSearch && message.status === 'success' && (
<CitationsList
citations={message.metadata.webSearch.results.map((result, index) => ({
number: index + 1,
url: result.url,
title: result.title,
showFavicon: true
}))}
/>
)}
{message?.metadata?.webSearchInfo && message.status === 'success' && (
<CitationsList
citations={message.metadata.webSearchInfo.map((result, index) => ({
number: index + 1,
url: result.link || result.url,
title: result.title,
showFavicon: true
}))}
/>
)}
<MessageAttachments message={message} />
</Fragment>
)

View File

@ -9,15 +9,19 @@ interface Props {
interface State {
hasError: boolean
error?: Error
}
const ErrorFallback = ({ fallback }: { fallback?: React.ReactNode }) => {
const ErrorFallback = ({ fallback, error }: { fallback?: React.ReactNode; error?: Error }) => {
const { t } = useTranslation()
return (
fallback || (
<Alert message={t('error.render.title')} description={t('error.render.description')} type="error" showIcon />
)
)
// 如果有详细错误信息,添加到描述中
const errorDescription =
process.env.NODE_ENV !== 'production' && error
? `${t('error.render.description')}: ${error.message}`
: t('error.render.description')
return fallback || <Alert message={t('error.render.title')} description={errorDescription} type="error" showIcon />
}
class MessageErrorBoundary extends React.Component<Props, State> {
@ -26,13 +30,13 @@ class MessageErrorBoundary extends React.Component<Props, State> {
this.state = { hasError: false }
}
static getDerivedStateFromError() {
return { hasError: true }
static getDerivedStateFromError(error: Error) {
return { hasError: true, error }
}
render() {
if (this.state.hasError) {
return <ErrorFallback fallback={this.props.fallback} />
return <ErrorFallback fallback={this.props.fallback} error={this.state.error} />
}
return this.props.children
}

View File

@ -8,7 +8,6 @@ import { useRuntime } from '@renderer/hooks/useRuntime'
import { useSettings } from '@renderer/hooks/useSettings'
import { useAppDispatch } from '@renderer/store'
import { setUpdateState } from '@renderer/store/runtime'
import { setAutoCheckUpdate } from '@renderer/store/settings'
import { ThemeMode } from '@renderer/types'
import { compareVersions, runAsyncFunction } from '@renderer/utils'
import { Avatar, Button, Progress, Row, Switch, Tag } from 'antd'
@ -25,7 +24,7 @@ import { SettingContainer, SettingDivider, SettingGroup, SettingRow, SettingTitl
const AboutSettings: FC = () => {
const [version, setVersion] = useState('')
const { t } = useTranslation()
const { autoCheckUpdate } = useSettings()
const { autoCheckUpdate, setAutoCheckUpdate } = useSettings()
const { theme } = useTheme()
const dispatch = useAppDispatch()
const { update } = useRuntime()
@ -150,7 +149,7 @@ const AboutSettings: FC = () => {
<SettingDivider />
<SettingRow>
<SettingRowTitle>{t('settings.general.auto_check_update.title')}</SettingRowTitle>
<Switch value={autoCheckUpdate} onChange={(v) => dispatch(setAutoCheckUpdate(v))} />
<Switch value={autoCheckUpdate} onChange={(v) => setAutoCheckUpdate(v)} />
</SettingRow>
</SettingGroup>
{hasNewVersion && update.info && (

View File

@ -173,13 +173,13 @@ const ServerName = styled.div`
const ServerDescription = styled.div`
font-size: 0.85rem;
color: ${(props) => props.theme.colors?.textSecondary || '#8c8c8c'};
color: var(--color-text-2);
margin-bottom: 3px;
`
const ServerUrl = styled.div`
font-size: 0.8rem;
color: ${(props) => props.theme.colors?.textTertiary || '#bfbfbf'};
color: var(--color-text-3);
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;

View File

@ -204,18 +204,18 @@ const DataSettings: FC = () => {
<SettingDivider />
<SettingRow>
<SettingRowTitle>{t('settings.data.app_data')}</SettingRowTitle>
<HStack alignItems="center" gap="5px">
<Typography.Text style={{ color: 'var(--color-text-3)' }}>{appInfo?.appDataPath}</Typography.Text>
<StyledIcon onClick={() => handleOpenPath(appInfo?.appDataPath)} />
</HStack>
<PathRow>
<PathText style={{ color: 'var(--color-text-3)' }}>{appInfo?.appDataPath}</PathText>
<StyledIcon onClick={() => handleOpenPath(appInfo?.appDataPath)} style={{ flexShrink: 0 }} />
</PathRow>
</SettingRow>
<SettingDivider />
<SettingRow>
<SettingRowTitle>{t('settings.data.app_logs')}</SettingRowTitle>
<HStack alignItems="center" gap="5px">
<Typography.Text style={{ color: 'var(--color-text-3)' }}>{appInfo?.logsPath}</Typography.Text>
<StyledIcon onClick={() => handleOpenPath(appInfo?.logsPath)} />
</HStack>
<PathRow>
<PathText style={{ color: 'var(--color-text-3)' }}>{appInfo?.logsPath}</PathText>
<StyledIcon onClick={() => handleOpenPath(appInfo?.logsPath)} style={{ flexShrink: 0 }} />
</PathRow>
</SettingRow>
<SettingDivider />
<SettingRow>
@ -280,4 +280,24 @@ const MenuList = styled.div`
}
`
const PathText = styled(Typography.Text)`
flex: 1;
min-width: 0;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
display: inline-block;
vertical-align: middle;
text-align: right;
margin-left: 5px;
`
const PathRow = styled(HStack)`
min-width: 0;
flex: 1;
width: 0;
align-items: center;
gap: 5px;
`
export default DataSettings

View File

@ -9,6 +9,7 @@ import { useAppDispatch, useAppSelector } from '@renderer/store'
import {
setWebdavAutoSync,
setWebdavHost as _setWebdavHost,
setWebdavMaxBackups as _setWebdavMaxBackups,
setWebdavPass as _setWebdavPass,
setWebdavPath as _setWebdavPath,
setWebdavSyncInterval as _setWebdavSyncInterval,
@ -27,7 +28,8 @@ const WebDavSettings: FC = () => {
webdavUser: webDAVUser,
webdavPass: webDAVPass,
webdavPath: webDAVPath,
webdavSyncInterval: webDAVSyncInterval
webdavSyncInterval: webDAVSyncInterval,
webdavMaxBackups: webDAVMaxBackups
} = useSettings()
const [webdavHost, setWebdavHost] = useState<string | undefined>(webDAVHost)
@ -37,6 +39,7 @@ const WebDavSettings: FC = () => {
const [backupManagerVisible, setBackupManagerVisible] = useState(false)
const [syncInterval, setSyncInterval] = useState<number>(webDAVSyncInterval)
const [maxBackups, setMaxBackups] = useState<number>(webDAVMaxBackups)
const dispatch = useAppDispatch()
const { theme } = useTheme()
@ -59,6 +62,11 @@ const WebDavSettings: FC = () => {
}
}
const onMaxBackupsChange = (value: number) => {
setMaxBackups(value)
dispatch(_setWebdavMaxBackups(value))
}
const renderSyncStatus = () => {
if (!webdavHost) return null
@ -173,6 +181,19 @@ const WebDavSettings: FC = () => {
<Select.Option value={1440}>{t('settings.data.webdav.hour_interval', { count: 24 })}</Select.Option>
</Select>
</SettingRow>
<SettingDivider />
<SettingRow>
<SettingRowTitle>{t('settings.data.webdav.maxBackups')}</SettingRowTitle>
<Select value={maxBackups} onChange={onMaxBackupsChange} disabled={!webdavHost} style={{ width: 120 }}>
<Select.Option value={0}>{t('settings.data.webdav.maxBackups.unlimited')}</Select.Option>
<Select.Option value={1}>1</Select.Option>
<Select.Option value={3}>3</Select.Option>
<Select.Option value={5}>5</Select.Option>
<Select.Option value={10}>10</Select.Option>
<Select.Option value={20}>20</Select.Option>
<Select.Option value={50}>50</Select.Option>
</Select>
</SettingRow>
{webdavSync && syncInterval > 0 && (
<>
<SettingDivider />

View File

@ -188,7 +188,10 @@ const DisplaySettings: FC = () => {
<SettingDivider />
<Input.TextArea
value={customCss}
onChange={(e) => dispatch(setCustomCss(e.target.value))}
onChange={(e) => {
dispatch(setCustomCss(e.target.value))
window.api.setCustomCss(e.target.value)
}}
placeholder={t('settings.display.custom.css.placeholder')}
style={{
minHeight: 200,

View File

@ -324,6 +324,11 @@ const McpSettings: React.FC<Props> = ({ server }) => {
}
const onToggleActive = async (active: boolean) => {
if (isFormChanged && active) {
await onSave()
return
}
await form.validateFields()
setLoadingServer(server.id)
const oldActiveState = server.isActive

View File

@ -5,6 +5,7 @@ import { Center, HStack } from '@renderer/components/Layout'
import { useMCPServers } from '@renderer/hooks/useMCPServers'
import { builtinMCPServers } from '@renderer/store/mcp'
import { MCPServer } from '@renderer/types'
import { getMcpConfigSampleFromReadme } from '@renderer/utils'
import { Button, Card, Flex, Input, Space, Spin, Tag, Typography } from 'antd'
import { npxFinder } from 'npx-scope-finder'
import { type FC, useEffect, useState } from 'react'
@ -19,6 +20,7 @@ interface SearchResult {
npmLink: string
fullName: string
type: MCPServer['type']
configSample?: MCPServer['configSample']
}
const npmScopes = ['@cherry', '@modelcontextprotocol', '@gongrzhe', '@mcpmarket']
@ -73,9 +75,13 @@ const NpxSearch: FC<{
try {
// Call npxFinder to search for packages
const packages = await npxFinder(searchScope)
// Map the packages to our desired format
const formattedResults: SearchResult[] = packages.map((pkg) => {
let configSample
if (pkg.original?.readme) {
configSample = getMcpConfigSampleFromReadme(pkg.original.readme)
}
return {
key: pkg.name,
name: pkg.name?.split('/')[1] || '',
@ -84,7 +90,8 @@ const NpxSearch: FC<{
usage: `npx ${pkg.name}`,
npmLink: pkg.links?.npm || `https://www.npmjs.com/package/${pkg.name}`,
fullName: pkg.name || '',
type: 'stdio'
type: 'stdio',
configSample
}
})
@ -199,7 +206,8 @@ const NpxSearch: FC<{
name: record.name,
description: `${record.description}\n\n${t('settings.mcp.npx_list.usage')}: ${record.usage}\n${t('settings.mcp.npx_list.npm')}: ${record.npmLink}`,
command: 'npx',
args: ['-y', record.fullName],
args: record.configSample?.args ?? ['-y', record.fullName],
env: record.configSample?.env,
isActive: false,
type: record.type
}

View File

@ -289,9 +289,9 @@ const ModelList: React.FC<ModelListProps> = ({ providerId, modelStatuses = [], s
</Typography.Text>
}
placement="top">
<span>{model.name}</span>
<NameSpan>{model.name}</NameSpan>
</Tooltip>
<ModelTagsWithLabel model={model} size={11} />
<ModelTagsWithLabel model={model} size={11} style={{ flexShrink: 0 }} />
</ListItemName>
</HStack>
<Flex gap={4} align="center">
@ -371,15 +371,20 @@ const ListItemName = styled.div`
font-size: 14px;
line-height: 1;
font-weight: 600;
span {
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
cursor: help;
font-family: 'Ubuntu';
line-height: 30px;
font-size: 14px;
}
min-width: 0;
overflow: hidden;
flex: 1;
width: 0;
`
const NameSpan = styled.span`
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
cursor: help;
font-family: 'Ubuntu';
line-height: 30px;
font-size: 14px;
`
const RemoveIcon = styled(MinusCircleOutlined)`

View File

@ -65,8 +65,8 @@ const PopupContainer: React.FC<Props> = ({ title, resolve }) => {
centered>
<Form
form={form}
labelCol={{ flex: '110px' }}
labelAlign="left"
labelCol={{ flex: '150px' }}
labelAlign="right"
colon={false}
style={{ marginTop: 25 }}
onFinish={onFinish}>

View File

@ -1,7 +1,13 @@
import { useTheme } from '@renderer/context/ThemeProvider'
import { useAppDispatch, useAppSelector } from '@renderer/store'
import { setEnhanceMode, setMaxResult, setOverwrite, setSearchWithTime } from '@renderer/store/websearch'
import { Slider, Switch, Tooltip } from 'antd'
import {
setContentLimit,
setEnhanceMode,
setMaxResult,
setOverwrite,
setSearchWithTime
} from '@renderer/store/websearch'
import { Input, Slider, Switch, Tooltip } from 'antd'
import { t } from 'i18next'
import { Info } from 'lucide-react'
import { FC } from 'react'
@ -14,6 +20,7 @@ const BasicSettings: FC = () => {
const enhanceMode = useAppSelector((state) => state.websearch.enhanceMode)
const overwrite = useAppSelector((state) => state.websearch.overwrite)
const maxResults = useAppSelector((state) => state.websearch.maxResults)
const contentLimit = useAppSelector((state) => state.websearch.contentLimit)
const dispatch = useAppDispatch()
@ -59,6 +66,26 @@ const BasicSettings: FC = () => {
onChangeComplete={(value) => dispatch(setMaxResult(value))}
/>
</SettingRow>
<SettingDivider style={{ marginTop: 15, marginBottom: 10 }} />
<SettingRow>
<SettingRowTitle>
{t('settings.websearch.content_limit')}
<Tooltip title={t('settings.websearch.content_limit_tooltip')} placement="right">
<Info size={16} color="var(--color-icon)" style={{ marginLeft: 5, cursor: 'pointer' }} />
</Tooltip>
</SettingRowTitle>
<Input
style={{ width: '100px' }}
placeholder="2000"
value={contentLimit}
onChange={(e) => {
const value = e.target.value
if (!isNaN(Number(value)) && Number(value) > 0) {
dispatch(setContentLimit(Number(value)))
}
}}
/>
</SettingRow>
</SettingGroup>
</>
)

View File

@ -5,14 +5,14 @@ import { formatApiKeys } from '@renderer/services/ApiService'
import WebSearchService from '@renderer/services/WebSearchService'
import { WebSearchProvider } from '@renderer/types'
import { hasObjectKey } from '@renderer/utils'
import { Avatar, Button, Divider, Flex, Input } from 'antd'
import { Avatar, Button, Divider, Flex, Form, Input, Tooltip } from 'antd'
import Link from 'antd/es/typography/Link'
import { Info } from 'lucide-react'
import { FC, useEffect, useState } from 'react'
import { useTranslation } from 'react-i18next'
import styled from 'styled-components'
import { SettingHelpLink, SettingHelpText, SettingHelpTextRow, SettingSubtitle, SettingTitle } from '..'
import { SettingDivider, SettingHelpLink, SettingHelpText, SettingHelpTextRow, SettingSubtitle, SettingTitle } from '..'
import ApiCheckPopup from '../ProviderSettings/ApiCheckPopup'
interface Props {
@ -25,6 +25,8 @@ const WebSearchProviderSetting: FC<Props> = ({ provider: _provider }) => {
const [apiKey, setApiKey] = useState(provider.apiKey || '')
const [apiHost, setApiHost] = useState(provider.apiHost || '')
const [apiChecking, setApiChecking] = useState(false)
const [basicAuthUsername, setBasicAuthUsername] = useState(provider.basicAuthUsername || '')
const [basicAuthPassword, setBasicAuthPassword] = useState(provider.basicAuthPassword || '')
const [apiValid, setApiValid] = useState(false)
const webSearchProviderConfig = WEB_SEARCH_PROVIDER_CONFIG[provider.id]
@ -49,6 +51,26 @@ const WebSearchProviderSetting: FC<Props> = ({ provider: _provider }) => {
}
}
const onUpdateBasicAuthUsername = () => {
const currentValue = basicAuthUsername || ''
const savedValue = provider.basicAuthUsername || ''
if (currentValue !== savedValue) {
updateProvider({ ...provider, basicAuthUsername: basicAuthUsername })
} else {
setBasicAuthUsername(provider.basicAuthUsername || '')
}
}
const onUpdateBasicAuthPassword = () => {
const currentValue = basicAuthPassword || ''
const savedValue = provider.basicAuthPassword || ''
if (currentValue !== savedValue) {
updateProvider({ ...provider, basicAuthPassword: basicAuthPassword })
} else {
setBasicAuthPassword(provider.basicAuthPassword || '')
}
}
async function checkSearch() {
if (!provider) {
window.message.error({
@ -111,7 +133,9 @@ const WebSearchProviderSetting: FC<Props> = ({ provider: _provider }) => {
useEffect(() => {
setApiKey(provider.apiKey ?? '')
setApiHost(provider.apiHost ?? '')
}, [provider.apiKey, provider.apiHost])
setBasicAuthUsername(provider.basicAuthUsername ?? '')
setBasicAuthPassword(provider.basicAuthPassword ?? '')
}, [provider.apiKey, provider.apiHost, provider.basicAuthUsername, provider.basicAuthPassword])
return (
<>
@ -176,6 +200,50 @@ const WebSearchProviderSetting: FC<Props> = ({ provider: _provider }) => {
{apiChecking ? <LoadingOutlined spin /> : apiValid ? <CheckOutlined /> : t('settings.websearch.check')}
</Button>
</Flex>
<SettingDivider style={{ marginTop: 12, marginBottom: 12 }} />
<SettingSubtitle style={{ marginTop: 5, marginBottom: 10 }}>
{t('settings.provider.basic_auth')}
<Tooltip title={t('settings.provider.basic_auth.tip')} placement="right">
<Info size={16} color="var(--color-icon)" style={{ marginLeft: 5, cursor: 'pointer' }} />
</Tooltip>
</SettingSubtitle>
<Flex>
<Form
layout="inline"
initialValues={{
username: basicAuthUsername,
password: basicAuthPassword
}}
onValuesChange={(changedValues) => {
// Update local state when form values change
if ('username' in changedValues) {
setBasicAuthUsername(changedValues.username || '')
}
if ('password' in changedValues) {
setBasicAuthPassword(changedValues.password || '')
}
}}>
<Form.Item label={t('settings.provider.basic_auth.user_name')} name="username">
<Input
placeholder={t('settings.provider.basic_auth.user_name.tip')}
onBlur={onUpdateBasicAuthUsername}
/>
</Form.Item>
<Form.Item
label={t('settings.provider.basic_auth.password')}
name="password"
rules={[{ required: !!basicAuthUsername, validateTrigger: ['onBlur', 'onChange'] }]}
help=""
hidden={!basicAuthUsername}>
<Input.Password
placeholder={t('settings.provider.basic_auth.password.tip')}
onBlur={onUpdateBasicAuthPassword}
disabled={!basicAuthUsername}
visibilityToggle={true}
/>
</Form.Item>
</Form>
</Flex>
</>
)}
</>

View File

@ -1,7 +1,6 @@
import { FOOTNOTE_PROMPT, REFERENCE_PROMPT } from '@renderer/config/prompts'
import { REFERENCE_PROMPT } from '@renderer/config/prompts'
import { getLMStudioKeepAliveTime } from '@renderer/hooks/useLMStudio'
import { getOllamaKeepAliveTime } from '@renderer/hooks/useOllama'
import { getKnowledgeBaseReferences } from '@renderer/services/KnowledgeService'
import type {
Assistant,
GenerateImageParams,
@ -15,7 +14,6 @@ import type {
import { delay, isJSON, parseJSON } from '@renderer/utils'
import { addAbortController, removeAbortController } from '@renderer/utils/abortController'
import { formatApiHost } from '@renderer/utils/api'
import { t } from 'i18next'
import { isEmpty } from 'lodash'
import type OpenAI from 'openai'
@ -98,28 +96,28 @@ export default abstract class BaseProvider {
return message.content
}
const webSearchReferences = await this.getWebSearchReferences(message)
const webSearchReferences = await this.getWebSearchReferencesFromCache(message)
const knowledgeReferences = await this.getKnowledgeBaseReferencesFromCache(message)
if (!isEmpty(webSearchReferences)) {
const referenceContent = `\`\`\`json\n${JSON.stringify(webSearchReferences, null, 2)}\n\`\`\``
// 添加偏移量以避免ID冲突
const reindexedKnowledgeReferences = knowledgeReferences.map((ref) => ({
...ref,
id: ref.id + webSearchReferences.length // 为知识库引用的ID添加网络搜索引用的数量作为偏移量
}))
const allReferences = [...webSearchReferences, ...reindexedKnowledgeReferences]
console.log(`Found ${allReferences.length} references for ID: ${message.id}`, allReferences)
if (!isEmpty(allReferences)) {
const referenceContent = `\`\`\`json\n${JSON.stringify(allReferences, null, 2)}\n\`\`\``
return REFERENCE_PROMPT.replace('{question}', message.content).replace('{references}', referenceContent)
}
const knowledgeReferences = await getKnowledgeBaseReferences(message)
if (!isEmpty(message.knowledgeBaseIds) && isEmpty(knowledgeReferences)) {
window.message.info({ content: t('knowledge.no_match'), key: 'knowledge-base-no-match-info' })
}
if (!isEmpty(knowledgeReferences)) {
const referenceContent = `\`\`\`json\n${JSON.stringify(knowledgeReferences, null, 2)}\n\`\`\``
return FOOTNOTE_PROMPT.replace('{question}', message.content).replace('{references}', referenceContent)
}
return message.content
}
private async getWebSearchReferences(message: Message) {
private async getWebSearchReferencesFromCache(message: Message) {
if (isEmpty(message.content)) {
return []
}
@ -140,6 +138,23 @@ export default abstract class BaseProvider {
return []
}
/**
*
*/
private async getKnowledgeBaseReferencesFromCache(message: Message): Promise<KnowledgeReference[]> {
if (isEmpty(message.content)) {
return []
}
const knowledgeReferences: KnowledgeReference[] = window.keyv.get(`knowledge-search-${message.id}`)
if (!isEmpty(knowledgeReferences)) {
console.log(`Found ${knowledgeReferences.length} knowledge base references in cache for ID: ${message.id}`)
return knowledgeReferences
}
console.log(`No knowledge base references found in cache for ID: ${message.id}`)
return []
}
protected getCustomParameters(assistant: Assistant) {
return (
assistant?.settings?.customParameters?.reduce((acc, param) => {

View File

@ -1,25 +1,25 @@
import {
ContentListUnion,
createPartFromBase64,
FinishReason,
GenerateContentResponse,
GoogleGenAI
} from '@google/genai'
import {
Content,
FileDataPart,
GenerateContentStreamResult,
GoogleGenerativeAI,
File,
GenerateContentConfig,
GenerateContentResponse,
GoogleGenAI,
HarmBlockThreshold,
HarmCategory,
InlineDataPart,
Modality,
Part,
RequestOptions,
PartUnion,
SafetySetting,
TextPart,
Tool
} from '@google/generative-ai'
import { isGemmaModel, isVisionModel, isWebSearchModel } from '@renderer/config/models'
ThinkingConfig,
ToolListUnion
} from '@google/genai'
import {
isGemini25ReasoningModel,
isGemmaModel,
isGenerateImageModel,
isVisionModel,
isWebSearchModel
} from '@renderer/config/models'
import { getStoreSetting } from '@renderer/hooks/useSettings'
import i18n from '@renderer/i18n'
import { getAssistantSettings, getDefaultModel, getTopNamingModel } from '@renderer/services/AssistantService'
@ -39,22 +39,17 @@ import axios from 'axios'
import { flatten, isEmpty, takeRight } from 'lodash'
import OpenAI from 'openai'
import { ChunkCallbackData, CompletionsParams } from '.'
import { CompletionsParams } from '.'
import BaseProvider from './BaseProvider'
type ReasoningEffort = 'low' | 'medium' | 'high'
export default class GeminiProvider extends BaseProvider {
private sdk: GoogleGenerativeAI
private requestOptions: RequestOptions
private imageSdk: GoogleGenAI
private sdk: GoogleGenAI
constructor(provider: Provider) {
super(provider)
this.sdk = new GoogleGenerativeAI(this.apiKey)
/// this sdk is experimental
this.imageSdk = new GoogleGenAI({ apiKey: this.apiKey, httpOptions: { baseUrl: this.getBaseURL() } })
this.requestOptions = {
baseUrl: this.getBaseURL()
}
this.sdk = new GoogleGenAI({ vertexai: false, apiKey: this.apiKey, httpOptions: { baseUrl: this.getBaseURL() } })
}
public getBaseURL(): string {
@ -76,31 +71,31 @@ export default class GeminiProvider extends BaseProvider {
inlineData: {
data,
mimeType
}
} as InlineDataPart
} as Part['inlineData']
}
}
// Retrieve file from Gemini uploaded files
const fileMetadata = await window.api.gemini.retrieveFile(file, this.apiKey)
const fileMetadata: File | undefined = await window.api.gemini.retrieveFile(file, this.apiKey)
if (fileMetadata) {
return {
fileData: {
fileUri: fileMetadata.uri,
mimeType: fileMetadata.mimeType
}
} as FileDataPart
} as Part['fileData']
}
}
// If file is not found, upload it to Gemini
const uploadResult = await window.api.gemini.uploadFile(file, this.apiKey)
const result = await window.api.gemini.uploadFile(file, this.apiKey)
return {
fileData: {
fileUri: uploadResult.file.uri,
mimeType: uploadResult.file.mimeType
}
} as FileDataPart
fileUri: result.uri,
mimeType: result.mimeType
} as Part['fileData']
}
}
/**
@ -125,8 +120,8 @@ export default class GeminiProvider extends BaseProvider {
inlineData: {
data: base64Data,
mimeType: mimeType
}
} as InlineDataPart)
} as Part['inlineData']
})
}
}
}
@ -139,8 +134,8 @@ export default class GeminiProvider extends BaseProvider {
inlineData: {
data: base64Data.base64,
mimeType: base64Data.mime
}
} as InlineDataPart)
} as Part['inlineData']
})
}
if (file.ext === '.pdf') {
@ -152,13 +147,13 @@ export default class GeminiProvider extends BaseProvider {
const fileContent = await (await window.api.file.read(file.id + file.ext)).trim()
parts.push({
text: file.origin_name + '\n' + fileContent
} as TextPart)
})
}
}
return {
role,
parts
parts: parts
}
}
@ -196,6 +191,41 @@ export default class GeminiProvider extends BaseProvider {
]
}
/**
* Get the reasoning effort for the assistant
* @param assistant - The assistant
* @param model - The model
* @returns The reasoning effort
*/
private getReasoningEffort(assistant: Assistant, model: Model) {
if (isGemini25ReasoningModel(model)) {
const effortRatios: Record<ReasoningEffort, number> = {
high: 1,
medium: 0.5,
low: 0.2
}
const effort = assistant?.settings?.reasoning_effort as ReasoningEffort
const effortRatio = effortRatios[effort]
const maxBudgetToken = 24576 // https://ai.google.dev/gemini-api/docs/thinking
const budgetTokens = Math.max(1024, Math.trunc(maxBudgetToken * effortRatio))
if (!effortRatio) {
return {
thinkingConfig: {
thinkingBudget: 0
} as ThinkingConfig
}
}
return {
thinkingConfig: {
thinkingBudget: budgetTokens,
includeThoughts: true
} as ThinkingConfig
}
}
return {}
}
/**
* Generate completions
* @param messages - The messages
@ -204,165 +234,218 @@ export default class GeminiProvider extends BaseProvider {
* @param onChunk - The onChunk callback
* @param onFilterMessages - The onFilterMessages callback
*/
public async completions({ messages, assistant, mcpTools, onChunk, onFilterMessages }: CompletionsParams) {
if (assistant.enableGenerateImage) {
await this.generateImageExp({ messages, assistant, onFilterMessages, onChunk })
} else {
const defaultModel = getDefaultModel()
const model = assistant.model || defaultModel
const { contextCount, maxTokens, streamOutput } = getAssistantSettings(assistant)
public async completions({
messages,
assistant,
mcpTools,
onChunk,
onFilterMessages
}: CompletionsParams): Promise<void> {
const defaultModel = getDefaultModel()
const model = assistant.model || defaultModel
const { contextCount, maxTokens, streamOutput } = getAssistantSettings(assistant)
const userMessages = filterUserRoleStartMessages(
filterEmptyMessages(filterContextMessages(takeRight(messages, contextCount + 2)))
)
onFilterMessages(userMessages)
const userMessages = filterUserRoleStartMessages(
filterEmptyMessages(filterContextMessages(takeRight(messages, contextCount + 2)))
)
onFilterMessages(userMessages)
const userLastMessage = userMessages.pop()
const userLastMessage = userMessages.pop()
const history: Content[] = []
const history: Content[] = []
for (const message of userMessages) {
history.push(await this.getMessageContents(message))
}
for (const message of userMessages) {
history.push(await this.getMessageContents(message))
}
let systemInstruction = assistant.prompt
let systemInstruction = assistant.prompt
if (mcpTools && mcpTools.length > 0) {
systemInstruction = buildSystemPrompt(assistant.prompt || '', mcpTools)
}
if (mcpTools && mcpTools.length > 0) {
systemInstruction = buildSystemPrompt(assistant.prompt || '', mcpTools)
}
// const tools = mcpToolsToGeminiTools(mcpTools)
const tools: Tool[] = []
const toolResponses: MCPToolResponse[] = []
// const tools = mcpToolsToGeminiTools(mcpTools)
const tools: ToolListUnion = []
const toolResponses: MCPToolResponse[] = []
if (!WebSearchService.isOverwriteEnabled() && assistant.enableWebSearch && isWebSearchModel(model)) {
tools.push({
// @ts-ignore googleSearch is not a valid tool for Gemini
googleSearch: {}
})
}
if (!WebSearchService.isOverwriteEnabled() && assistant.enableWebSearch && isWebSearchModel(model)) {
tools.push({
// @ts-ignore googleSearch is not a valid tool for Gemini
googleSearch: {}
})
}
const geminiModel = this.sdk.getGenerativeModel(
{
model: model.id,
...(isGemmaModel(model) ? {} : { systemInstruction: systemInstruction }),
safetySettings: this.getSafetySettings(model.id),
tools: tools,
generationConfig: {
maxOutputTokens: maxTokens,
temperature: assistant?.settings?.temperature,
topP: assistant?.settings?.topP,
...this.getCustomParameters(assistant)
const generateContentConfig: GenerateContentConfig = {
responseModalities: isGenerateImageModel(model) ? [Modality.TEXT, Modality.IMAGE] : undefined,
responseMimeType: isGenerateImageModel(model) ? 'text/plain' : undefined,
safetySettings: this.getSafetySettings(model.id),
// generate image don't need system instruction
systemInstruction: isGemmaModel(model) || isGenerateImageModel(model) ? undefined : systemInstruction,
temperature: assistant?.settings?.temperature,
topP: assistant?.settings?.topP,
maxOutputTokens: maxTokens,
tools: tools,
...this.getReasoningEffort(assistant, model),
...this.getCustomParameters(assistant)
}
const messageContents: Content = await this.getMessageContents(userLastMessage!)
const chat = this.sdk.chats.create({
model: model.id,
config: generateContentConfig,
history: history
})
if (isGemmaModel(model) && assistant.prompt) {
const isFirstMessage = history.length === 0
if (isFirstMessage && messageContents) {
const systemMessage = [
{
text:
'<start_of_turn>user\n' +
systemInstruction +
'<end_of_turn>\n' +
'<start_of_turn>user\n' +
(messageContents?.parts?.[0] as Part).text +
'<end_of_turn>'
}
},
this.requestOptions
)
const chat = geminiModel.startChat({ history })
const messageContents = await this.getMessageContents(userLastMessage!)
if (isGemmaModel(model) && assistant.prompt) {
const isFirstMessage = history.length === 0
if (isFirstMessage) {
const systemMessage = {
role: 'user',
parts: [
{
text:
'<start_of_turn>user\n' +
systemInstruction +
'<end_of_turn>\n' +
'<start_of_turn>user\n' +
messageContents.parts[0].text +
'<end_of_turn>'
}
]
}
messageContents.parts = systemMessage.parts
] as Part[]
if (messageContents && messageContents.parts) {
messageContents.parts[0] = systemMessage[0]
}
}
}
const start_time_millsec = new Date().getTime()
const { abortController, cleanup } = this.createAbortController(userLastMessage?.id)
const { signal } = abortController
const start_time_millsec = new Date().getTime()
const { cleanup, abortController } = this.createAbortController(userLastMessage?.id, true)
const signalProxy = {
_originalSignal: abortController.signal,
addEventListener: (eventName: string, listener: () => void) => {
if (eventName === 'abort') {
abortController.signal.addEventListener('abort', listener)
}
},
removeEventListener: (eventName: string, listener: () => void) => {
if (eventName === 'abort') {
abortController.signal.removeEventListener('abort', listener)
}
},
get aborted() {
return abortController.signal.aborted
}
}
if (!streamOutput) {
const response = await chat.sendMessage({
message: messageContents as PartUnion,
config: {
...generateContentConfig,
httpOptions: {
signal: signalProxy as any
}
}
})
const time_completion_millsec = new Date().getTime() - start_time_millsec
onChunk({
text: response.text,
usage: {
prompt_tokens: response.usageMetadata?.promptTokenCount || 0,
thoughts_tokens: response.usageMetadata?.thoughtsTokenCount || 0,
completion_tokens: response.usageMetadata?.candidatesTokenCount || 0,
total_tokens: response.usageMetadata?.totalTokenCount || 0
},
metrics: {
completion_tokens: response.usageMetadata?.candidatesTokenCount,
time_completion_millsec,
time_first_token_millsec: 0
},
search: response.candidates?.[0]?.groundingMetadata
})
return
}
const userMessagesStream = await chat.sendMessageStream({
message: messageContents as PartUnion,
config: {
...generateContentConfig,
httpOptions: {
signal: signalProxy as any
}
}
})
let time_first_token_millsec = 0
const processToolUses = async (content: string, idx: number) => {
const toolResults = await parseAndCallTools(
content,
toolResponses,
onChunk,
idx,
mcpToolCallResponseToGeminiMessage,
mcpTools,
isVisionModel(model)
)
if (toolResults && toolResults.length > 0) {
history.push(messageContents)
const newChat = this.sdk.chats.create({
model: model.id,
config: generateContentConfig,
history: history as Content[]
})
const newStream = await newChat.sendMessageStream({
message: flatten(toolResults.map((ts) => (ts as Content).parts)) as PartUnion,
config: {
...generateContentConfig,
httpOptions: {
signal: signalProxy as any
}
}
})
await processStream(newStream, idx + 1)
}
}
const processStream = async (stream: AsyncGenerator<GenerateContentResponse>, idx: number) => {
let content = ''
for await (const chunk of stream) {
if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) break
if (time_first_token_millsec == 0) {
time_first_token_millsec = new Date().getTime() - start_time_millsec
}
if (!streamOutput) {
const { response } = await chat.sendMessage(messageContents.parts, { signal })
const time_completion_millsec = new Date().getTime() - start_time_millsec
if (chunk.text !== undefined) {
content += chunk.text
}
await processToolUses(content, idx)
const generateImage = this.processGeminiImageResponse(chunk)
onChunk({
text: response.candidates?.[0].content.parts[0].text,
text: chunk.text !== undefined ? chunk.text : '',
usage: {
prompt_tokens: response.usageMetadata?.promptTokenCount || 0,
completion_tokens: response.usageMetadata?.candidatesTokenCount || 0,
total_tokens: response.usageMetadata?.totalTokenCount || 0
prompt_tokens: chunk.usageMetadata?.promptTokenCount || 0,
completion_tokens: chunk.usageMetadata?.candidatesTokenCount || 0,
thoughts_tokens: chunk.usageMetadata?.thoughtsTokenCount || 0,
total_tokens: chunk.usageMetadata?.totalTokenCount || 0
},
metrics: {
completion_tokens: response.usageMetadata?.candidatesTokenCount,
completion_tokens: chunk.usageMetadata?.candidatesTokenCount,
time_completion_millsec,
time_first_token_millsec: 0
time_first_token_millsec
},
search: response.candidates?.[0]?.groundingMetadata
search: chunk.candidates?.[0]?.groundingMetadata,
mcpToolResponse: toolResponses,
generateImage: generateImage
})
return
}
const userMessagesStream = await chat.sendMessageStream(messageContents.parts, { signal })
let time_first_token_millsec = 0
const processToolUses = async (content: string, idx: number) => {
const toolResults = await parseAndCallTools(
content,
toolResponses,
onChunk,
idx,
mcpToolCallResponseToGeminiMessage,
mcpTools,
isVisionModel(model)
)
if (toolResults && toolResults.length > 0) {
history.push(messageContents)
const newChat = geminiModel.startChat({ history })
const newStream = await newChat.sendMessageStream(flatten(toolResults.map((ts) => (ts as Content).parts)), {
signal
})
await processStream(newStream, idx + 1)
}
}
const processStream = async (stream: GenerateContentStreamResult, idx: number) => {
let content = ''
for await (const chunk of stream.stream) {
if (window.keyv.get(EVENT_NAMES.CHAT_COMPLETION_PAUSED)) break
if (time_first_token_millsec == 0) {
time_first_token_millsec = new Date().getTime() - start_time_millsec
}
const time_completion_millsec = new Date().getTime() - start_time_millsec
content += chunk.text()
processToolUses(content, idx)
onChunk({
text: chunk.text(),
usage: {
prompt_tokens: chunk.usageMetadata?.promptTokenCount || 0,
completion_tokens: chunk.usageMetadata?.candidatesTokenCount || 0,
total_tokens: chunk.usageMetadata?.totalTokenCount || 0
},
metrics: {
completion_tokens: chunk.usageMetadata?.candidatesTokenCount,
time_completion_millsec,
time_first_token_millsec
},
search: chunk.candidates?.[0]?.groundingMetadata,
mcpToolResponse: toolResponses
})
}
}
await processStream(userMessagesStream, 0).finally(cleanup)
}
await processStream(userMessagesStream, 0).finally(cleanup)
}
/**
@ -372,39 +455,51 @@ export default class GeminiProvider extends BaseProvider {
* @param onResponse - The onResponse callback
* @returns The translated message
*/
async translate(message: Message, assistant: Assistant, onResponse?: (text: string) => void) {
public async translate(message: Message, assistant: Assistant, onResponse?: (text: string) => void) {
const defaultModel = getDefaultModel()
const { maxTokens } = getAssistantSettings(assistant)
const model = assistant.model || defaultModel
const geminiModel = this.sdk.getGenerativeModel(
{
model: model.id,
...(isGemmaModel(model) ? {} : { systemInstruction: assistant.prompt }),
generationConfig: {
maxOutputTokens: maxTokens,
temperature: assistant?.settings?.temperature
}
},
this.requestOptions
)
const content =
isGemmaModel(model) && assistant.prompt
? `<start_of_turn>user\n${assistant.prompt}<end_of_turn>\n<start_of_turn>user\n${message.content}<end_of_turn>`
: message.content
if (!onResponse) {
const { response } = await geminiModel.generateContent(content)
return response.text()
const response = await this.sdk.models.generateContent({
model: model.id,
config: {
maxOutputTokens: maxTokens,
temperature: assistant?.settings?.temperature,
systemInstruction: isGemmaModel(model) ? undefined : assistant.prompt
},
contents: [
{
role: 'user',
parts: [{ text: content }]
}
]
})
return response.text || ''
}
const response = await geminiModel.generateContentStream(content)
const response = await this.sdk.models.generateContentStream({
model: model.id,
config: {
maxOutputTokens: maxTokens,
temperature: assistant?.settings?.temperature,
systemInstruction: isGemmaModel(model) ? undefined : assistant.prompt
},
contents: [
{
role: 'user',
parts: [{ text: content }]
}
]
})
let text = ''
for await (const chunk of response.stream) {
text += chunk.text()
for await (const chunk of response) {
text += chunk.text
onResponse(text)
}
@ -442,25 +537,24 @@ export default class GeminiProvider extends BaseProvider {
content: userMessageContent
}
const geminiModel = this.sdk.getGenerativeModel(
{
model: model.id,
...(isGemmaModel(model) ? {} : { systemInstruction: systemMessage.content }),
generationConfig: {
temperature: assistant?.settings?.temperature
}
},
this.requestOptions
)
const chat = await geminiModel.startChat()
const content = isGemmaModel(model)
? `<start_of_turn>user\n${systemMessage.content}<end_of_turn>\n<start_of_turn>user\n${userMessage.content}<end_of_turn>`
: userMessage.content
const { response } = await chat.sendMessage(content)
const response = await this.sdk.models.generateContent({
model: model.id,
config: {
systemInstruction: isGemmaModel(model) ? undefined : systemMessage.content
},
contents: [
{
role: 'user',
parts: [{ text: content }]
}
]
})
return removeSpecialCharactersForTopicName(response.text())
return removeSpecialCharactersForTopicName(response.text || '')
}
/**
@ -471,24 +565,23 @@ export default class GeminiProvider extends BaseProvider {
*/
public async generateText({ prompt, content }: { prompt: string; content: string }): Promise<string> {
const model = getDefaultModel()
const systemMessage = { role: 'system', content: prompt }
const geminiModel = this.sdk.getGenerativeModel(
{
model: model.id,
...(isGemmaModel(model) ? {} : { systemInstruction: systemMessage.content })
},
this.requestOptions
)
const chat = await geminiModel.startChat()
const messageContent = isGemmaModel(model)
const MessageContent = isGemmaModel(model)
? `<start_of_turn>user\n${prompt}<end_of_turn>\n<start_of_turn>user\n${content}<end_of_turn>`
: content
const response = await this.sdk.models.generateContent({
model: model.id,
config: {
systemInstruction: isGemmaModel(model) ? undefined : prompt
},
contents: [
{
role: 'user',
parts: [{ text: MessageContent }]
}
]
})
const { response } = await chat.sendMessage(messageContent)
return response.text()
return response.text || ''
}
/**
@ -518,24 +611,28 @@ export default class GeminiProvider extends BaseProvider {
content: messages.map((m) => m.content).join('\n')
}
const geminiModel = this.sdk.getGenerativeModel(
{
model: model.id,
systemInstruction: systemMessage.content,
generationConfig: {
temperature: assistant?.settings?.temperature
const content = isGemmaModel(model)
? `<start_of_turn>user\n${systemMessage.content}<end_of_turn>\n<start_of_turn>user\n${userMessage.content}<end_of_turn>`
: userMessage.content
const response = await this.sdk.models.generateContent({
model: model.id,
config: {
systemInstruction: isGemmaModel(model) ? undefined : systemMessage.content,
temperature: assistant?.settings?.temperature,
httpOptions: {
timeout: 20 * 1000
}
},
{
...this.requestOptions,
timeout: 20 * 1000
}
)
contents: [
{
role: 'user',
parts: [{ text: content }]
}
]
})
const chat = await geminiModel.startChat()
const { response } = await chat.sendMessage(userMessage.content)
return response.text()
return response.text || ''
}
/**
@ -546,144 +643,13 @@ export default class GeminiProvider extends BaseProvider {
return []
}
/**
*
* @param messages -
* @param assistant -
* @param onChunk -
* @param onFilterMessages -
* @returns Promise<void>
*/
private async generateImageExp({ messages, assistant, onChunk, onFilterMessages }: CompletionsParams): Promise<void> {
const defaultModel = getDefaultModel()
const model = assistant.model || defaultModel
const { contextCount, streamOutput, maxTokens } = getAssistantSettings(assistant)
const userMessages = filterUserRoleStartMessages(filterContextMessages(takeRight(messages, contextCount + 2)))
onFilterMessages(userMessages)
const userLastMessage = userMessages.pop()
if (!userLastMessage) {
throw new Error('No user message found')
}
const history: Content[] = []
for (const message of userMessages) {
history.push(await this.getMessageContents(message))
}
const userLastMessageContent = await this.getMessageContents(userLastMessage)
const allContents = [...history, userLastMessageContent]
let contents: ContentListUnion = allContents.length > 0 ? (allContents as ContentListUnion) : []
contents = await this.addImageFileToContents(userLastMessage, contents)
if (!streamOutput) {
const response = await this.callGeminiGenerateContent(model.id, contents, maxTokens)
const { isValid, message } = this.isValidGeminiResponse(response)
if (!isValid) {
throw new Error(`Gemini API error: ${message}`)
}
this.processGeminiImageResponse(response, onChunk)
return
}
const response = await this.callGeminiGenerateContentStream(model.id, contents, maxTokens)
for await (const chunk of response) {
this.processGeminiImageResponse(chunk, onChunk)
}
}
/**
*
* @param message -
* @param contents -
* @returns
*/
private async addImageFileToContents(message: Message, contents: ContentListUnion): Promise<ContentListUnion> {
if (message.files && message.files.length > 0) {
const file = message.files[0]
const fileContent = await window.api.file.base64Image(file.id + file.ext)
if (fileContent && fileContent.base64) {
const contentsArray = Array.isArray(contents) ? contents : [contents]
return [...contentsArray, createPartFromBase64(fileContent.base64, fileContent.mime)]
}
}
return contents
}
/**
* Gemini API生成内容
* @param modelId - ID
* @param contents -
* @returns
*/
private async callGeminiGenerateContent(
modelId: string,
contents: ContentListUnion,
maxTokens?: number
): Promise<GenerateContentResponse> {
try {
return await this.imageSdk.models.generateContent({
model: modelId,
contents: contents,
config: {
responseModalities: ['Text', 'Image'],
responseMimeType: 'text/plain',
maxOutputTokens: maxTokens
}
})
} catch (error) {
console.error('Gemini API error:', error)
throw error
}
}
private async callGeminiGenerateContentStream(
modelId: string,
contents: ContentListUnion,
maxTokens?: number
): Promise<AsyncGenerator<GenerateContentResponse>> {
try {
return await this.imageSdk.models.generateContentStream({
model: modelId,
contents: contents,
config: {
responseModalities: ['Text', 'Image'],
responseMimeType: 'text/plain',
maxOutputTokens: maxTokens
}
})
} catch (error) {
console.error('Gemini API error:', error)
throw error
}
}
/**
* Gemini响应是否有效
* @param response - Gemini响应
* @returns
*/
private isValidGeminiResponse(response: GenerateContentResponse): { isValid: boolean; message: string } {
return {
isValid: response?.candidates?.[0]?.finishReason === FinishReason.STOP ? true : false,
message: response?.candidates?.[0]?.finishReason || ''
}
}
/**
* Gemini图像响应
* @param response - Gemini响应
* @param onChunk -
*/
private processGeminiImageResponse(response: any, onChunk: (chunk: ChunkCallbackData) => void): void {
const parts = response.candidates[0].content.parts
private processGeminiImageResponse(chunk: GenerateContentResponse): { type: 'base64'; images: string[] } | undefined {
const parts = chunk.candidates?.[0]?.content?.parts
if (!parts) {
return
}
@ -695,31 +661,13 @@ export default class GeminiProvider extends BaseProvider {
return null
}
const dataPrefix = `data:${part.inlineData.mimeType || 'image/png'};base64,`
return part.inlineData.data.startsWith('data:') ? part.inlineData.data : dataPrefix + part.inlineData.data
return part.inlineData.data?.startsWith('data:') ? part.inlineData.data : dataPrefix + part.inlineData.data
})
// 提取文本数据
const text = parts
.filter((part: Part) => part.text !== undefined)
.map((part: Part) => part.text)
.join('')
// 返回结果
onChunk({
text,
generateImage: {
type: 'base64',
images
},
usage: {
prompt_tokens: response.usageMetadata?.promptTokenCount || 0,
completion_tokens: response.usageMetadata?.candidatesTokenCount || 0,
total_tokens: response.usageMetadata?.totalTokenCount || 0
},
metrics: {
completion_tokens: response.usageMetadata?.candidatesTokenCount
}
})
return {
type: 'base64',
images: images.filter((image) => image !== null)
}
}
/**
@ -732,18 +680,16 @@ export default class GeminiProvider extends BaseProvider {
return { valid: false, error: new Error('No model found') }
}
const body = {
model: model.id,
messages: [{ role: 'user', content: 'hi' }],
max_tokens: 100,
stream: false
}
try {
const geminiModel = this.sdk.getGenerativeModel({ model: body.model }, this.requestOptions)
const result = await geminiModel.generateContent(body.messages[0].content)
const result = await this.sdk.models.generateContent({
model: model.id,
contents: [{ role: 'user', parts: [{ text: 'hi' }] }],
config: {
maxOutputTokens: 100
}
})
return {
valid: !isEmpty(result.response.text()),
valid: !isEmpty(result.text),
error: null
}
} catch (error: any) {
@ -785,7 +731,10 @@ export default class GeminiProvider extends BaseProvider {
* @returns The embedding dimensions
*/
public async getEmbeddingDimensions(model: Model): Promise<number> {
const data = await this.sdk.getGenerativeModel({ model: model.id }, this.requestOptions).embedContent('hi')
return data.embedding.values.length
const data = await this.sdk.models.embedContent({
model: model.id,
contents: [{ role: 'user', parts: [{ text: 'hi' }] }]
})
return data.embeddings?.[0]?.values?.length || 0
}
}

View File

@ -19,6 +19,7 @@ import {
filterEmptyMessages,
filterUserRoleStartMessages
} from '@renderer/services/MessagesService'
import { processReqMessages } from '@renderer/services/ModelMessageService'
import store from '@renderer/store'
import {
Assistant,
@ -45,7 +46,7 @@ import {
import { CompletionsParams } from '.'
import BaseProvider from './BaseProvider'
type ReasoningEffort = 'high' | 'medium' | 'low'
type ReasoningEffort = 'low' | 'medium' | 'high'
export default class OpenAIProvider extends BaseProvider {
private sdk: OpenAI
@ -293,7 +294,7 @@ export default class OpenAIProvider extends BaseProvider {
* @returns True if the model is an OpenAI reasoning model, false otherwise
*/
private isOpenAIReasoning(model: Model) {
return model.id.startsWith('o1') || model.id.startsWith('o3')
return model.id.startsWith('o1') || model.id.startsWith('o3') || model.id.startsWith('o4')
}
/**
@ -332,12 +333,7 @@ export default class OpenAIProvider extends BaseProvider {
userMessages.push(await this.getMessageParam(message, model))
}
const isOpenAIReasoning = this.isOpenAIReasoning(model)
const isSupportStreamOutput = () => {
if (isOpenAIReasoning) {
return false
}
return streamOutput
}
@ -378,13 +374,18 @@ export default class OpenAIProvider extends BaseProvider {
let time_first_content_millsec = 0
const start_time_millsec = new Date().getTime()
const lastUserMessage = _messages.findLast((m) => m.role === 'user')
const { abortController, cleanup, signalPromise } = this.createAbortController(lastUserMessage?.id, true)
const { signal } = abortController
await this.checkIsCopilot()
const reqMessages: ChatCompletionMessageParam[] = [systemMessage, ...userMessages].filter(
Boolean
) as ChatCompletionMessageParam[]
//当 systemMessage 内容为空时不发送 systemMessage
let reqMessages: ChatCompletionMessageParam[]
if (!systemMessage.content) {
reqMessages = [...userMessages]
} else {
reqMessages = [systemMessage, ...userMessages].filter(Boolean) as ChatCompletionMessageParam[]
}
const toolResponses: MCPToolResponse[] = []
let firstChunk = true
@ -505,6 +506,9 @@ export default class OpenAIProvider extends BaseProvider {
await processToolUses(content, idx)
}
// console.log('[before] reqMessages', reqMessages)
reqMessages = processReqMessages(model, reqMessages)
// console.log('[after] reqMessages', reqMessages)
const stream = await this.sdk.chat.completions
// @ts-ignore key is not typed
.create(

View File

@ -1,4 +1,4 @@
import type { GroundingMetadata } from '@google/generative-ai'
import type { GroundingMetadata } from '@google/genai'
import BaseProvider from '@renderer/providers/AiProvider/BaseProvider'
import ProviderFactory from '@renderer/providers/AiProvider/ProviderFactory'
import type {
@ -11,14 +11,15 @@ import type {
Metrics,
Model,
Provider,
Suggestion
Suggestion,
Usage
} from '@renderer/types'
import OpenAI from 'openai'
export interface ChunkCallbackData {
text?: string
reasoning_content?: string
usage?: OpenAI.Completions.CompletionUsage
usage?: Usage
metrics?: Metrics
// Zhipu web search
webSearch?: any[]

View File

@ -31,11 +31,18 @@ export default class ExaProvider extends BaseWebSearchProvider {
return {
query: response.autopromptString,
results: response.results.map((result) => ({
title: result.title || 'No title',
content: result.text || '',
url: result.url || ''
}))
results: response.results.slice(0, websearch.maxResults).map((result) => {
let content = result.text || ''
if (websearch.contentLimit && content.length > websearch.contentLimit) {
content = content.slice(0, websearch.contentLimit) + '...'
}
return {
title: result.title || 'No title',
content: content,
url: result.url || ''
}
})
}
} catch (error) {
console.error('Exa search failed:', error)

View File

@ -44,12 +44,8 @@ export default class LocalSearchProvider extends BaseWebSearchProvider {
const fetchPromises = validItems.map(async (item) => {
// console.log(`Fetching content for ${item.url}...`)
const result = await fetchWebContent(item.url, 'markdown', this.provider.usingBrowser)
if (
this.provider.contentLimit &&
this.provider.contentLimit != -1 &&
result.content.length > this.provider.contentLimit
) {
result.content = result.content.slice(0, this.provider.contentLimit) + '...'
if (websearch.contentLimit && result.content.length > websearch.contentLimit) {
result.content = result.content.slice(0, websearch.contentLimit) + '...'
}
return result
})

View File

@ -2,6 +2,7 @@ import { SearxngClient } from '@agentic/searxng'
import { WebSearchState } from '@renderer/store/websearch'
import { WebSearchProvider, WebSearchResponse } from '@renderer/types'
import axios from 'axios'
import ky from 'ky'
import BaseWebSearchProvider from './BaseWebSearchProvider'
@ -9,6 +10,8 @@ export default class SearxngProvider extends BaseWebSearchProvider {
private searxng: SearxngClient
private engines: string[] = []
private readonly apiHost: string
private readonly basicAuthUsername?: string
private readonly basicAuthPassword?: string
private isInitialized = false
constructor(provider: WebSearchProvider) {
@ -16,9 +19,22 @@ export default class SearxngProvider extends BaseWebSearchProvider {
if (!provider.apiHost) {
throw new Error('API host is required for SearxNG provider')
}
this.apiHost = provider.apiHost
this.basicAuthUsername = provider.basicAuthUsername
this.basicAuthPassword = provider.basicAuthPassword ? provider.basicAuthPassword : ''
try {
this.searxng = new SearxngClient({ apiBaseUrl: this.apiHost })
// `ky` do not support basic auth directly
const headers = this.basicAuthUsername
? {
Authorization: `Basic ` + btoa(`${this.basicAuthUsername}:${this.basicAuthPassword}`)
}
: undefined
this.searxng = new SearxngClient({
apiBaseUrl: this.apiHost,
ky: ky.create({ headers })
})
} catch (error) {
throw new Error(
`Failed to initialize SearxNG client: ${error instanceof Error ? error.message : 'Unknown error'}`
@ -29,9 +45,16 @@ export default class SearxngProvider extends BaseWebSearchProvider {
private async initEngines(): Promise<void> {
try {
console.log(`Initializing SearxNG with API host: ${this.apiHost}`)
const auth = this.basicAuthUsername
? {
username: this.basicAuthUsername,
password: this.basicAuthPassword ? this.basicAuthPassword : ''
}
: undefined
const response = await axios.get(`${this.apiHost}/config`, {
timeout: 5000,
validateStatus: (status) => status === 200 // 仅接受 200 状态码
validateStatus: (status) => status === 200, // 仅接受 200 状态码
auth
})
if (!response.data) {
@ -92,9 +115,14 @@ export default class SearxngProvider extends BaseWebSearchProvider {
return {
query: result.query,
results: result.results.slice(0, websearch.maxResults).map((result) => {
let content = result.content || ''
if (websearch.contentLimit && content.length > websearch.contentLimit) {
content = content.slice(0, websearch.contentLimit) + '...'
}
return {
title: result.title || 'No title',
content: result.content || '',
content: content,
url: result.url || ''
}
})

View File

@ -27,11 +27,18 @@ export default class TavilyProvider extends BaseWebSearchProvider {
})
return {
query: result.query,
results: result.results.map((result) => ({
title: result.title || 'No title',
content: result.content || '',
url: result.url || ''
}))
results: result.results.slice(0, websearch.maxResults).map((result) => {
let content = result.content || ''
if (websearch.contentLimit && content.length > websearch.contentLimit) {
content = content.slice(0, websearch.contentLimit) + '...'
}
return {
title: result.title || 'No title',
content: content,
url: result.url || ''
}
})
}
} catch (error) {
console.error('Tavily search failed:', error)

View File

@ -8,9 +8,18 @@ import { SEARCH_SUMMARY_PROMPT } from '@renderer/config/prompts'
import i18n from '@renderer/i18n'
import store from '@renderer/store'
import { setGenerating } from '@renderer/store/runtime'
import { Assistant, MCPTool, Message, Model, Provider, Suggestion, WebSearchResponse } from '@renderer/types'
import {
Assistant,
KnowledgeReference,
MCPTool,
Message,
Model,
Provider,
Suggestion,
WebSearchResponse
} from '@renderer/types'
import { formatMessageError, isAbortError } from '@renderer/utils/error'
import { fetchWebContents } from '@renderer/utils/fetch'
import { extractInfoFromXML, ExtractResults } from '@renderer/utils/extract'
import { withGenerateImage } from '@renderer/utils/formats'
import {
cleanLinkCommas,
@ -26,13 +35,13 @@ import { cloneDeep, findLast, isEmpty } from 'lodash'
import AiProvider from '../providers/AiProvider'
import {
getAssistantProvider,
getDefaultAssistant,
getDefaultModel,
getProviderByModel,
getTopNamingModel,
getTranslateModel
} from './AssistantService'
import { EVENT_NAMES, EventEmitter } from './EventService'
import { processKnowledgeSearch } from './KnowledgeService'
import { filterContextMessages, filterMessages, filterUsefulMessages } from './MessagesService'
import { estimateMessagesUsage } from './TokenService'
import WebSearchService from './WebSearchService'
@ -52,77 +61,99 @@ export async function fetchChatCompletion({
const webSearchProvider = WebSearchService.getWebSearchProvider()
const AI = new AiProvider(provider)
const searchTheWeb = async () => {
if (WebSearchService.isWebSearchEnabled() && assistant.enableWebSearch && assistant.model) {
let query = ''
let webSearchResponse: WebSearchResponse = {
results: []
}
const webSearchParams = getOpenAIWebSearchParams(assistant, assistant.model)
if (isEmpty(webSearchParams) && !isOpenAIWebSearch(assistant.model)) {
const lastMessage = findLast(messages, (m) => m.role === 'user')
const lastAnswer = findLast(messages, (m) => m.role === 'assistant')
const hasKnowledgeBase = !isEmpty(lastMessage?.knowledgeBaseIds)
const lastUserMessage = findLast(messages, (m) => m.role === 'user')
const lastAnswer = findLast(messages, (m) => m.role === 'assistant')
const hasKnowledgeBase = !isEmpty(lastUserMessage?.knowledgeBaseIds)
if (!lastUserMessage) {
return
}
if (lastMessage) {
if (hasKnowledgeBase) {
window.message.info({
content: i18n.t('message.ignore.knowledge.base'),
key: 'knowledge-base-no-match-info'
})
}
// 更新消息状态为搜索中
onResponse({ ...message, status: 'searching' })
try {
// 等待关键词生成完成
const searchSummaryAssistant = getDefaultAssistant()
searchSummaryAssistant.model = assistant.model || getDefaultModel()
searchSummaryAssistant.prompt = SEARCH_SUMMARY_PROMPT
// 如果启用搜索增强模式,则使用搜索增强模式
if (WebSearchService.isEnhanceModeEnabled()) {
const keywords = await fetchSearchSummary({
messages: lastAnswer ? [lastAnswer, lastMessage] : [lastMessage],
assistant: searchSummaryAssistant
})
try {
const result = WebSearchService.extractInfoFromXML(keywords || '')
if (result.question === 'not_needed') {
// 如果不需要搜索,则直接返回
console.log('No need to search')
return
} else if (result.question === 'summarize' && result.links && result.links.length > 0) {
const contents = await fetchWebContents(result.links)
webSearchResponse = {
query: 'summaries',
results: contents
}
} else {
query = result.question
webSearchResponse = await WebSearchService.search(webSearchProvider, query)
}
} catch (error) {
console.error('Failed to extract info from XML:', error)
}
} else {
query = lastMessage.content
}
// 处理搜索结果
message.metadata = {
...message.metadata,
webSearch: webSearchResponse
}
window.keyv.set(`web-search-${lastMessage?.id}`, webSearchResponse)
} catch (error) {
console.error('Web search failed:', error)
}
// 网络搜索/知识库 关键词提取
const extract = async () => {
const summaryAssistant = {
...assistant,
prompt: SEARCH_SUMMARY_PROMPT
}
const keywords = await fetchSearchSummary({
messages: lastAnswer ? [lastAnswer, lastUserMessage] : [lastUserMessage],
assistant: summaryAssistant
})
try {
return extractInfoFromXML(keywords || '')
} catch (e: any) {
console.error('extract error', e)
return {
websearch: {
question: [lastUserMessage.content]
},
knowledge: {
question: [lastUserMessage.content]
}
} as ExtractResults
}
}
let extractResults: ExtractResults
if (assistant.enableWebSearch || hasKnowledgeBase) {
extractResults = await extract()
}
const searchTheWeb = async () => {
// 检查是否需要进行网络搜索
const shouldSearch =
extractResults?.websearch &&
WebSearchService.isWebSearchEnabled() &&
assistant.enableWebSearch &&
assistant.model &&
extractResults.websearch.question[0] !== 'not_needed'
if (!shouldSearch) return
onResponse({ ...message, status: 'searching' })
// 检查是否使用OpenAI的网络搜索
const webSearchParams = getOpenAIWebSearchParams(assistant, assistant.model!)
if (!isEmpty(webSearchParams) || isOpenAIWebSearch(assistant.model!)) return
try {
const webSearchResponse: WebSearchResponse = await WebSearchService.processWebsearch(
webSearchProvider,
extractResults
)
// console.log('webSearchResponse', webSearchResponse)
// 处理搜索结果
message.metadata = {
...message.metadata,
webSearch: webSearchResponse
}
window.keyv.set(`web-search-${lastUserMessage?.id}`, webSearchResponse)
} catch (error) {
console.error('Web search failed:', error)
}
}
// --- 知识库搜索 ---
const searchKnowledgeBase = async () => {
const shouldSearch =
hasKnowledgeBase && extractResults.knowledge && extractResults.knowledge.question[0] !== 'not_needed'
if (!shouldSearch) return
onResponse({ ...message, status: 'searching' })
try {
const knowledgeReferences: KnowledgeReference[] = await processKnowledgeSearch(
extractResults,
lastUserMessage.knowledgeBaseIds
)
console.log('knowledgeReferences', knowledgeReferences)
// 处理搜索结果
message.metadata = {
...message.metadata,
knowledge: knowledgeReferences
}
window.keyv.set(`knowledge-search-${lastUserMessage?.id}`, knowledgeReferences)
} catch (error) {
console.error('Knowledge base search failed:', error)
window.keyv.set(`knowledge-search-${lastUserMessage?.id}`, [])
}
}
@ -130,10 +161,8 @@ export async function fetchChatCompletion({
let _messages: Message[] = []
let isFirstChunk = true
// Search web
await searchTheWeb()
await Promise.all([searchTheWeb(), searchKnowledgeBase()])
const lastUserMessage = findLast(messages, (m) => m.role === 'user')
// Get MCP tools
const mcpTools: MCPTool[] = []
const enabledMCPs = lastUserMessage?.enabledMCPs
@ -282,6 +311,7 @@ export async function fetchChatCompletion({
}
}
// console.log('message', message)
// Emit chat completion event
EventEmitter.emit(EVENT_NAMES.RECEIVE_MESSAGE, message)
onResponse(message)

View File

@ -82,15 +82,17 @@ export async function backupToWebdav({
store.dispatch(setWebDAVSyncState({ syncing: true, lastSyncError: null }))
const { webdavHost, webdavUser, webdavPass, webdavPath } = store.getState().settings
const { webdavHost, webdavUser, webdavPass, webdavPath, webdavMaxBackups } = store.getState().settings
let deviceType = 'unknown'
let hostname = 'unknown'
try {
deviceType = (await window.api.system.getDeviceType()) || 'unknown'
hostname = (await window.api.system.getHostname()) || 'unknown'
} catch (error) {
Logger.error('[Backup] Failed to get device type:', error)
Logger.error('[Backup] Failed to get device type or hostname:', error)
}
const timestamp = dayjs().format('YYYYMMDDHHmmss')
const backupFileName = customFileName || `cherry-studio.${timestamp}.${deviceType}.zip`
const backupFileName = customFileName || `cherry-studio.${timestamp}.${hostname}.${deviceType}.zip`
const finalFileName = backupFileName.endsWith('.zip') ? backupFileName : `${backupFileName}.zip`
const backupData = await getBackupData()
@ -112,6 +114,47 @@ export async function backupToWebdav({
if (showMessage && !autoBackupProcess) {
window.message.success({ content: i18n.t('message.backup.success'), key: 'backup' })
}
// 清理旧备份文件
if (webdavMaxBackups > 0) {
try {
// 获取所有备份文件
const files = await window.api.backup.listWebdavFiles({
webdavHost,
webdavUser,
webdavPass,
webdavPath
})
// 筛选当前设备的备份文件
const currentDeviceFiles = files.filter((file) => {
// 检查文件名是否包含当前设备的标识信息
return file.fileName.includes(deviceType) && file.fileName.includes(hostname)
})
// 如果当前设备的备份文件数量超过最大保留数量,删除最旧的文件
if (currentDeviceFiles.length > webdavMaxBackups) {
// 文件已按修改时间降序排序,所以最旧的文件在末尾
const filesToDelete = currentDeviceFiles.slice(webdavMaxBackups)
for (const file of filesToDelete) {
try {
await window.api.backup.deleteWebdavFile(file.fileName, {
webdavHost,
webdavUser,
webdavPass,
webdavPath
})
Logger.log(`[Backup] Deleted old backup file: ${file.fileName}`)
} catch (error) {
Logger.error(`[Backup] Failed to delete old backup file: ${file.fileName}`, error)
}
}
}
} catch (error) {
Logger.error('[Backup] Failed to clean up old backup files:', error)
}
}
} else {
// if auto backup process, throw error
if (autoBackupProcess) {

View File

@ -3,8 +3,9 @@ import { DEFAULT_KNOWLEDGE_DOCUMENT_COUNT, DEFAULT_KNOWLEDGE_THRESHOLD } from '@
import { getEmbeddingMaxContext } from '@renderer/config/embedings'
import AiProvider from '@renderer/providers/AiProvider'
import store from '@renderer/store'
import { FileType, KnowledgeBase, KnowledgeBaseParams, KnowledgeReference, Message } from '@renderer/types'
import { isEmpty, take } from 'lodash'
import { FileType, KnowledgeBase, KnowledgeBaseParams, KnowledgeReference } from '@renderer/types'
import { ExtractResults } from '@renderer/utils/extract'
import { isEmpty } from 'lodash'
import { getProviderByModel } from './AssistantService'
import FileManager from './FileManager'
@ -86,66 +87,96 @@ export const getKnowledgeSourceUrl = async (item: ExtractChunkData & { file: Fil
return item.metadata.source
}
export const getKnowledgeBaseReference = async (base: KnowledgeBase, message: Message) => {
const searchResults = await window.api.knowledgeBase
.search({
search: message.content,
base: getKnowledgeBaseParams(base)
})
.then((results) =>
results.filter((item) => {
const threshold = base.threshold || DEFAULT_KNOWLEDGE_THRESHOLD
return item.score >= threshold
})
)
let rerankResults = searchResults
if (base.rerankModel) {
rerankResults = await window.api.knowledgeBase.rerank({
search: message.content,
base: getKnowledgeBaseParams(base),
results: searchResults
})
}
const processdResults = await Promise.all(
rerankResults.map(async (item) => {
const file = await getFileFromUrl(item.metadata.source)
return { ...item, file }
})
)
const documentCount = base.documentCount || DEFAULT_KNOWLEDGE_DOCUMENT_COUNT
const references = await Promise.all(
take(processdResults, documentCount).map(async (item, index) => {
const baseItem = base.items.find((i) => i.uniqueId === item.metadata.uniqueLoaderId)
return {
id: index + 1,
content: item.pageContent,
sourceUrl: await getKnowledgeSourceUrl(item),
type: baseItem?.type
} as KnowledgeReference
})
)
return references
}
export const getKnowledgeBaseReferences = async (message: Message) => {
if (isEmpty(message.knowledgeBaseIds) || isEmpty(message.content)) {
export const processKnowledgeSearch = async (
extractResults: ExtractResults,
knowledgeBaseIds: string[] | undefined
): Promise<KnowledgeReference[]> => {
if (
!extractResults.knowledge?.question ||
extractResults.knowledge.question.length === 0 ||
isEmpty(knowledgeBaseIds)
) {
console.log('No valid question found in extractResults.knowledge')
return []
}
const questions = extractResults.knowledge.question
const rewrite = extractResults.knowledge.rewrite
const bases = store.getState().knowledge.bases.filter((kb) => message.knowledgeBaseIds?.includes(kb.id))
const bases = store.getState().knowledge.bases.filter((kb) => knowledgeBaseIds?.includes(kb.id))
if (!bases || bases.length === 0) {
console.log('Skipping knowledge search: No matching knowledge bases found.')
return []
}
const referencesPromises = bases.map(async (base) => await getKnowledgeBaseReference(base, message))
const referencesPromises = bases.map(async (base) => {
try {
const baseParams = getKnowledgeBaseParams(base)
const documentCount = base.documentCount || DEFAULT_KNOWLEDGE_DOCUMENT_COUNT
const references = (await Promise.all(referencesPromises)).filter((result) => !isEmpty(result)).flat()
const allSearchResultsPromises = questions.map((question) =>
window.api.knowledgeBase
.search({
search: question,
base: baseParams
})
.then((results) =>
results.filter((item) => {
const threshold = base.threshold || DEFAULT_KNOWLEDGE_THRESHOLD
return item.score >= threshold
})
)
)
const allSearchResults = await Promise.all(allSearchResultsPromises)
const searchResults = Array.from(
new Map(allSearchResults.flat().map((item) => [item.metadata.uniqueId || item.pageContent, item])).values()
)
.sort((a, b) => b.score - a.score)
.slice(0, documentCount)
console.log(`Knowledge base ${base.name} search results:`, searchResults)
let rerankResults = searchResults
if (base.rerankModel && searchResults.length > 0) {
rerankResults = await window.api.knowledgeBase.rerank({
search: rewrite,
base: baseParams,
results: searchResults
})
}
const processdResults = await Promise.all(
rerankResults.map(async (item) => {
const file = await getFileFromUrl(item.metadata.source)
return { ...item, file }
})
)
const references = await Promise.all(
processdResults.map(async (item, index) => {
// const baseItem = base.items.find((i) => i.uniqueId === item.metadata.uniqueLoaderId)
return {
id: index + 1, // 搜索多个库会导致ID重复
content: item.pageContent,
sourceUrl: await getKnowledgeSourceUrl(item),
type: 'file' // 需要映射 baseItem.type是'localPathLoader' -> 'file'
} as KnowledgeReference
})
)
return references
} catch (error) {
console.error(`Error searching knowledge base ${base.name}:`, error)
return []
}
})
const resultsPerBase = await Promise.all(referencesPromises)
const allReferencesRaw = resultsPerBase.flat().filter((ref): ref is KnowledgeReference => !!ref)
// 重新为引用分配ID
const references = allReferencesRaw.map((ref, index) => ({
...ref,
id: index + 1
}))
return references
}

View File

@ -0,0 +1,49 @@
import { Model } from '@renderer/types'
import { ChatCompletionMessageParam } from 'openai/resources'
export function processReqMessages(
model: Model,
reqMessages: ChatCompletionMessageParam[]
): ChatCompletionMessageParam[] {
if (!needStrictlyInterleaveUserAndAssistantMessages(model)) {
return reqMessages
}
return mergeSameRoleMessages(reqMessages)
}
function needStrictlyInterleaveUserAndAssistantMessages(model: Model) {
return model.id === 'deepseek-reasoner'
}
/**
* Merge successive messages with the same role
*/
function mergeSameRoleMessages(messages: ChatCompletionMessageParam[]): ChatCompletionMessageParam[] {
const split = '\n'
const processedMessages: ChatCompletionMessageParam[] = []
let currentGroup: ChatCompletionMessageParam[] = []
for (const message of messages) {
if (currentGroup.length === 0 || currentGroup[0].role === message.role) {
currentGroup.push(message)
} else {
// merge the current group and add to processed messages
processedMessages.push({
...currentGroup[0],
content: currentGroup.map((m) => m.content).join(split)
})
currentGroup = [message]
}
}
// process the last group
if (currentGroup.length > 0) {
processedMessages.push({
...currentGroup[0],
content: currentGroup.map((m) => m.content).join(split)
})
}
return processedMessages
}

View File

@ -1,6 +1,5 @@
import { Assistant, FileType, FileTypes, Message } from '@renderer/types'
import { Assistant, FileType, FileTypes, Message, Usage } from '@renderer/types'
import { flatten, takeRight } from 'lodash'
import { CompletionUsage } from 'openai/resources'
import { approximateTokenSize } from 'tokenx'
import { getAssistantSettings } from './AssistantService'
@ -52,7 +51,7 @@ export function estimateImageTokens(file: FileType) {
return Math.floor(file.size / 100)
}
export async function estimateMessageUsage(message: Message): Promise<CompletionUsage> {
export async function estimateMessageUsage(message: Message): Promise<Usage> {
let imageTokens = 0
if (message.files) {
@ -80,17 +79,17 @@ export async function estimateMessagesUsage({
}: {
assistant: Assistant
messages: Message[]
}): Promise<CompletionUsage> {
}): Promise<Usage> {
const outputMessage = messages.pop()!
const prompt_tokens = await estimateHistoryTokens(assistant, messages)
const { completion_tokens } = await estimateMessageUsage(outputMessage)
return {
prompt_tokens: await estimateHistoryTokens(assistant, messages),
prompt_tokens,
completion_tokens,
total_tokens: prompt_tokens + completion_tokens
} as CompletionUsage
} as Usage
}
export async function estimateHistoryTokens(assistant: Assistant, msgs: Message[]) {

View File

@ -1,8 +1,10 @@
import WebSearchEngineProvider from '@renderer/providers/WebSearchProvider'
import store from '@renderer/store'
import { setDefaultProvider, WebSearchState } from '@renderer/store/websearch'
import { WebSearchProvider, WebSearchResponse } from '@renderer/types'
import { WebSearchProvider, WebSearchResponse, WebSearchResult } from '@renderer/types'
import { hasObjectKey } from '@renderer/utils'
import { ExtractResults } from '@renderer/utils/extract'
import { fetchWebContents } from '@renderer/utils/fetch'
import dayjs from 'dayjs'
/**
@ -131,34 +133,46 @@ class WebSearchService {
}
}
/**
* XML标签的文本中提取信息
* @public
* @param text XML标签的文本
* @returns
* @throws question标签则抛出错误
*/
public extractInfoFromXML(text: string): { question: string; links?: string[] } {
// 提取question标签内容
const questionMatch = text.match(/<question>([\s\S]*?)<\/question>/)
if (!questionMatch) {
throw new Error('Missing required <question> tag')
}
const question = questionMatch[1].trim()
public async processWebsearch(
webSearchProvider: WebSearchProvider,
extractResults: ExtractResults
): Promise<WebSearchResponse> {
try {
// 检查 websearch 和 question 是否有效
if (!extractResults.websearch?.question || extractResults.websearch.question.length === 0) {
console.log('No valid question found in extractResults.websearch')
return { results: [] }
}
// 提取links标签内容可选
const linksMatch = text.match(/<links>([\s\S]*?)<\/links>/)
const links = linksMatch
? linksMatch[1]
.trim()
.split('\n')
.map((link) => link.trim())
.filter((link) => link !== '')
: undefined
const questions = extractResults.websearch.question
const links = extractResults.websearch.links
const firstQuestion = questions[0]
return {
question,
links
if (firstQuestion === 'summarize' && links && links.length > 0) {
const contents = await fetchWebContents(links)
return {
query: 'summaries',
results: contents
}
}
const searchPromises = questions.map((q) => this.search(webSearchProvider, q))
const searchResults = await Promise.allSettled(searchPromises)
const aggregatedResults: WebSearchResult[] = []
searchResults.forEach((result) => {
if (result.status === 'fulfilled') {
if (result.value.results) {
aggregatedResults.push(...result.value.results)
}
}
})
return {
query: questions.join(' | '),
results: aggregatedResults
}
} catch (error) {
console.error('Failed to process enhanced search:', error)
return { results: [] }
}
}
}

View File

@ -0,0 +1,124 @@
import assert from 'node:assert'
import { test } from 'node:test'
import { ChatCompletionMessageParam } from 'openai/resources'
const { processReqMessages } = require('../ModelMessageService')
test('ModelMessageService', async (t) => {
const mockMessages: ChatCompletionMessageParam[] = [
{ role: 'user', content: 'First question' },
{ role: 'user', content: 'Additional context' },
{ role: 'assistant', content: 'First answer' },
{ role: 'assistant', content: 'Additional information' },
{ role: 'user', content: 'Second question' },
{ role: 'assistant', content: 'Second answer' }
]
await t.test('should merge successive messages with same role for deepseek-reasoner model', () => {
const model = { id: 'deepseek-reasoner' }
const result = processReqMessages(model, mockMessages)
assert.strictEqual(result.length, 4)
assert.deepStrictEqual(result[0], {
role: 'user',
content: 'First question\nAdditional context'
})
assert.deepStrictEqual(result[1], {
role: 'assistant',
content: 'First answer\nAdditional information'
})
assert.deepStrictEqual(result[2], {
role: 'user',
content: 'Second question'
})
assert.deepStrictEqual(result[3], {
role: 'assistant',
content: 'Second answer'
})
})
await t.test('should not merge messages for other models', () => {
const model = { id: 'gpt-4' }
const result = processReqMessages(model, mockMessages)
assert.strictEqual(result.length, mockMessages.length)
assert.deepStrictEqual(result, mockMessages)
})
await t.test('should handle empty messages array', () => {
const model = { id: 'deepseek-reasoner' }
const result = processReqMessages(model, [])
assert.strictEqual(result.length, 0)
assert.deepStrictEqual(result, [])
})
await t.test('should handle single message', () => {
const model = { id: 'deepseek-reasoner' }
const singleMessage = [{ role: 'user', content: 'Single message' }]
const result = processReqMessages(model, singleMessage)
assert.strictEqual(result.length, 1)
assert.deepStrictEqual(result, singleMessage)
})
await t.test('should preserve other message properties when merging', () => {
const model = { id: 'deepseek-reasoner' }
const messagesWithProps = [
{
role: 'user',
content: 'First message',
name: 'user1',
function_call: { name: 'test', arguments: '{}' }
},
{
role: 'user',
content: 'Second message',
name: 'user1'
}
] as ChatCompletionMessageParam[]
const result = processReqMessages(model, messagesWithProps)
assert.strictEqual(result.length, 1)
assert.deepStrictEqual(result[0], {
role: 'user',
content: 'First message\nSecond message',
name: 'user1',
function_call: { name: 'test', arguments: '{}' }
})
})
await t.test('should handle alternating roles correctly', () => {
const model = { id: 'deepseek-reasoner' }
const alternatingMessages = [
{ role: 'user', content: 'Q1' },
{ role: 'assistant', content: 'A1' },
{ role: 'user', content: 'Q2' },
{ role: 'assistant', content: 'A2' }
] as ChatCompletionMessageParam[]
const result = processReqMessages(model, alternatingMessages)
assert.strictEqual(result.length, 4)
assert.deepStrictEqual(result, alternatingMessages)
})
await t.test('should handle messages with empty content', () => {
const model = { id: 'deepseek-reasoner' }
const messagesWithEmpty = [
{ role: 'user', content: 'Q1' },
{ role: 'user', content: '' },
{ role: 'user', content: 'Q2' }
] as ChatCompletionMessageParam[]
const result = processReqMessages(model, messagesWithEmpty)
assert.strictEqual(result.length, 1)
assert.deepStrictEqual(result[0], {
role: 'user',
content: 'Q1\n\nQ2'
})
})
})

View File

@ -42,7 +42,7 @@ const persistedReducer = persistReducer(
{
key: 'cherry-studio',
storage,
version: 96,
version: 97,
blacklist: ['runtime', 'messages'],
migrate
},

View File

@ -1228,6 +1228,21 @@ const migrateConfig = {
} catch (error) {
return state
}
},
'97': (state: RootState) => {
try {
addMiniApp(state, 'zai')
state.settings.webdavMaxBackups = 0
if (state.websearch && state.websearch.providers) {
state.websearch.providers.forEach((provider) => {
provider.basicAuthUsername = ''
provider.basicAuthPassword = ''
})
}
return state
} catch (error) {
return state
}
}
}

Some files were not shown because too many files have changed in this diff Show More